diff --git a/Makefile b/Makefile
index 347f4d604a..7ee0460e50 100644
--- a/Makefile
+++ b/Makefile
@@ -35,6 +35,10 @@ docker:
tarball: $(ARCHIVE)
+$(GOPATH):
+ mkdir -p $(GOPATH)
+ cp -a $(MAKEFILE_DIR)/vendor/ $(GOPATH)/src
+
$(ARCHIVE): build
mkdir -p $(ARCHIVEDIR)
cp -a prometheus promtool consoles console_libraries $(ARCHIVEDIR)
@@ -65,6 +69,7 @@ clean:
-find . -type f -name '.#*' -exec rm '{}' ';'
$(SELFLINK): $(GOPATH)
+ mkdir -p `dirname $@`
ln -s $(MAKEFILE_DIR) $@
dependencies: $(GOCC) | $(SELFLINK)
@@ -73,7 +78,7 @@ documentation: search_index
godoc -http=:6060 -index -index_files='search_index'
format: dependencies
- find . -iname '*.go' | egrep -v "^\./\.build/" | xargs -n1 $(GOFMT) -w -s=true
+ find . -iname '*.go' | egrep -v "^\./(\.build|vendor)/" | xargs -n1 $(GOFMT) -w -s=true
race_condition_binary: build
$(GO) build -race -o prometheus.race $(BUILDFLAGS) github.com/prometheus/prometheus/cmd/prometheus
diff --git a/vendor.json b/vendor.json
new file mode 100755
index 0000000000..4edef18771
--- /dev/null
+++ b/vendor.json
@@ -0,0 +1,230 @@
+{
+ "comment": "",
+ "ignore": "test",
+ "package": [
+ {
+ "canonical": "bitbucket.org/ww/goautoneg",
+ "comment": "",
+ "local": "vendor/bitbucket.org/ww/goautoneg",
+ "revision": "75cd24fc2f2c",
+ "revisionTime": "2012-07-07T21:04:53+10:00"
+ },
+ {
+ "canonical": "github.com/Sirupsen/logrus",
+ "comment": "",
+ "local": "vendor/github.com/Sirupsen/logrus",
+ "revision": "418b41d23a1bf978c06faea5313ba194650ac088",
+ "revisionTime": "2015-09-08T20:46:18Z"
+ },
+ {
+ "canonical": "github.com/beorn7/perks/quantile",
+ "comment": "",
+ "local": "vendor/github.com/beorn7/perks/quantile",
+ "revision": "b965b613227fddccbfffe13eae360ed3fa822f8d",
+ "revisionTime": "2015-02-23T14:51:52+01:00"
+ },
+ {
+ "canonical": "github.com/golang/protobuf/proto",
+ "comment": "",
+ "local": "vendor/github.com/golang/protobuf/proto",
+ "revision": "1dceb1a2654bdc74ca97ad91f71f500eecc96269",
+ "revisionTime": "2015-09-03T11:38:02+10:00"
+ },
+ {
+ "canonical": "github.com/golang/snappy",
+ "comment": "",
+ "local": "vendor/github.com/golang/snappy",
+ "revision": "723cc1e459b8eea2dea4583200fd60757d40097a",
+ "revisionTime": "2015-07-30T13:18:44+10:00"
+ },
+ {
+ "canonical": "github.com/hashicorp/consul/api",
+ "comment": "",
+ "local": "vendor/github.com/hashicorp/consul/api",
+ "revision": "34f98f7bdf2eec7517e3aac44691566963152721",
+ "revisionTime": "2015-09-08T11:01:49-04:00"
+ },
+ {
+ "canonical": "github.com/julienschmidt/httprouter",
+ "comment": "",
+ "local": "vendor/github.com/julienschmidt/httprouter",
+ "revision": "109e267447e95ad1bb48b758e40dd7453eb7b039",
+ "revisionTime": "2015-09-05T19:25:33+02:00"
+ },
+ {
+ "canonical": "github.com/matttproud/golang_protobuf_extensions/pbutil",
+ "comment": "",
+ "local": "vendor/github.com/matttproud/golang_protobuf_extensions/pbutil",
+ "revision": "fc2b8d3a73c4867e51861bbdd5ae3c1f0869dd6a",
+ "revisionTime": "2015-04-06T19:39:34+02:00"
+ },
+ {
+ "canonical": "github.com/miekg/dns",
+ "comment": "",
+ "local": "vendor/github.com/miekg/dns",
+ "revision": "8395762c3490507cf5a27405fcd0e3d3dc547109",
+ "revisionTime": "2015-09-05T08:12:15+01:00"
+ },
+ {
+ "canonical": "github.com/prometheus/client_golang/prometheus",
+ "comment": "",
+ "local": "vendor/github.com/prometheus/client_golang/prometheus",
+ "revision": "25e1a6571b75377d2f49354012d002b024eb44ca",
+ "revisionTime": "2015-08-24T10:54:08+02:00"
+ },
+ {
+ "canonical": "github.com/prometheus/client_golang/text",
+ "comment": "",
+ "local": "vendor/github.com/prometheus/client_golang/text",
+ "revision": "25e1a6571b75377d2f49354012d002b024eb44ca",
+ "revisionTime": "2015-08-24T10:54:08+02:00"
+ },
+ {
+ "canonical": "github.com/prometheus/client_model/go",
+ "comment": "",
+ "local": "vendor/github.com/prometheus/client_model/go",
+ "revision": "fa8ad6fec33561be4280a8f0514318c79d7f6cb6",
+ "revisionTime": "2015-02-12T10:17:44Z"
+ },
+ {
+ "canonical": "github.com/prometheus/common/expfmt",
+ "comment": "",
+ "local": "vendor/github.com/prometheus/common/expfmt",
+ "revision": "c33395bbc758c8d25735ec7036d66b342084ae35",
+ "revisionTime": "2015-08-25T14:37:19+02:00"
+ },
+ {
+ "canonical": "github.com/prometheus/common/model",
+ "comment": "",
+ "local": "vendor/github.com/prometheus/common/model",
+ "revision": "c33395bbc758c8d25735ec7036d66b342084ae35",
+ "revisionTime": "2015-08-25T14:37:19+02:00"
+ },
+ {
+ "canonical": "github.com/prometheus/log",
+ "comment": "",
+ "local": "vendor/github.com/prometheus/log",
+ "revision": "439e5db48fbb50ebbaf2c816030473a62f505f55",
+ "revisionTime": "2015-05-29T14:22:02+02:00"
+ },
+ {
+ "canonical": "github.com/prometheus/procfs",
+ "comment": "",
+ "local": "vendor/github.com/prometheus/procfs",
+ "revision": "c91d8eefde16bd047416409eb56353ea84a186e4",
+ "revisionTime": "2015-06-16T16:46:31+02:00"
+ },
+ {
+ "canonical": "github.com/samuel/go-zookeeper/zk",
+ "comment": "",
+ "local": "vendor/github.com/samuel/go-zookeeper/zk",
+ "revision": "177002e16a0061912f02377e2dd8951a8b3551bc",
+ "revisionTime": "2015-08-17T10:50:50-07:00"
+ },
+ {
+ "canonical": "github.com/syndtr/goleveldb/leveldb",
+ "comment": "",
+ "local": "vendor/github.com/syndtr/goleveldb/leveldb",
+ "revision": "1a9d62f03ea92815b46fcaab357cfd4df264b1a0",
+ "revisionTime": "2015-08-19T12:16:22+07:00"
+ },
+ {
+ "canonical": "github.com/syndtr/goleveldb/leveldb/cache",
+ "comment": "",
+ "local": "vendor/github.com/syndtr/goleveldb/leveldb/cache",
+ "revision": "1a9d62f03ea92815b46fcaab357cfd4df264b1a0",
+ "revisionTime": "2015-08-19T12:16:22+07:00"
+ },
+ {
+ "canonical": "github.com/syndtr/goleveldb/leveldb/comparer",
+ "comment": "",
+ "local": "vendor/github.com/syndtr/goleveldb/leveldb/comparer",
+ "revision": "1a9d62f03ea92815b46fcaab357cfd4df264b1a0",
+ "revisionTime": "2015-08-19T12:16:22+07:00"
+ },
+ {
+ "canonical": "github.com/syndtr/goleveldb/leveldb/errors",
+ "comment": "",
+ "local": "vendor/github.com/syndtr/goleveldb/leveldb/errors",
+ "revision": "1a9d62f03ea92815b46fcaab357cfd4df264b1a0",
+ "revisionTime": "2015-08-19T12:16:22+07:00"
+ },
+ {
+ "canonical": "github.com/syndtr/goleveldb/leveldb/filter",
+ "comment": "",
+ "local": "vendor/github.com/syndtr/goleveldb/leveldb/filter",
+ "revision": "1a9d62f03ea92815b46fcaab357cfd4df264b1a0",
+ "revisionTime": "2015-08-19T12:16:22+07:00"
+ },
+ {
+ "canonical": "github.com/syndtr/goleveldb/leveldb/iterator",
+ "comment": "",
+ "local": "vendor/github.com/syndtr/goleveldb/leveldb/iterator",
+ "revision": "1a9d62f03ea92815b46fcaab357cfd4df264b1a0",
+ "revisionTime": "2015-08-19T12:16:22+07:00"
+ },
+ {
+ "canonical": "github.com/syndtr/goleveldb/leveldb/journal",
+ "comment": "",
+ "local": "vendor/github.com/syndtr/goleveldb/leveldb/journal",
+ "revision": "1a9d62f03ea92815b46fcaab357cfd4df264b1a0",
+ "revisionTime": "2015-08-19T12:16:22+07:00"
+ },
+ {
+ "canonical": "github.com/syndtr/goleveldb/leveldb/memdb",
+ "comment": "",
+ "local": "vendor/github.com/syndtr/goleveldb/leveldb/memdb",
+ "revision": "1a9d62f03ea92815b46fcaab357cfd4df264b1a0",
+ "revisionTime": "2015-08-19T12:16:22+07:00"
+ },
+ {
+ "canonical": "github.com/syndtr/goleveldb/leveldb/opt",
+ "comment": "",
+ "local": "vendor/github.com/syndtr/goleveldb/leveldb/opt",
+ "revision": "1a9d62f03ea92815b46fcaab357cfd4df264b1a0",
+ "revisionTime": "2015-08-19T12:16:22+07:00"
+ },
+ {
+ "canonical": "github.com/syndtr/goleveldb/leveldb/storage",
+ "comment": "",
+ "local": "vendor/github.com/syndtr/goleveldb/leveldb/storage",
+ "revision": "1a9d62f03ea92815b46fcaab357cfd4df264b1a0",
+ "revisionTime": "2015-08-19T12:16:22+07:00"
+ },
+ {
+ "canonical": "github.com/syndtr/goleveldb/leveldb/table",
+ "comment": "",
+ "local": "vendor/github.com/syndtr/goleveldb/leveldb/table",
+ "revision": "1a9d62f03ea92815b46fcaab357cfd4df264b1a0",
+ "revisionTime": "2015-08-19T12:16:22+07:00"
+ },
+ {
+ "canonical": "github.com/syndtr/goleveldb/leveldb/util",
+ "comment": "",
+ "local": "vendor/github.com/syndtr/goleveldb/leveldb/util",
+ "revision": "1a9d62f03ea92815b46fcaab357cfd4df264b1a0",
+ "revisionTime": "2015-08-19T12:16:22+07:00"
+ },
+ {
+ "canonical": "golang.org/x/net/context",
+ "comment": "",
+ "local": "vendor/golang.org/x/net/context",
+ "revision": "db8e4de5b2d6653f66aea53094624468caad15d2",
+ "revisionTime": "2015-08-24T18:07:02+02:00"
+ },
+ {
+ "canonical": "gopkg.in/fsnotify.v1",
+ "comment": "",
+ "local": "vendor/gopkg.in/fsnotify.v1",
+ "revision": "96c060f6a6b7e0d6f75fddd10efeaca3e5d1bcb0",
+ "revisionTime": "2015-02-08T13:29:58-07:00"
+ },
+ {
+ "canonical": "gopkg.in/yaml.v2",
+ "comment": "",
+ "local": "vendor/gopkg.in/yaml.v2",
+ "revision": "7ad95dd0798a40da1ccdff6dff35fd177b5edf40",
+ "revisionTime": "2015-06-24T11:29:02+01:00"
+ }
+ ]
+}
\ No newline at end of file
diff --git a/vendor/bitbucket.org/ww/goautoneg/Makefile b/vendor/bitbucket.org/ww/goautoneg/Makefile
new file mode 100644
index 0000000000..e33ee17303
--- /dev/null
+++ b/vendor/bitbucket.org/ww/goautoneg/Makefile
@@ -0,0 +1,13 @@
+include $(GOROOT)/src/Make.inc
+
+TARG=bitbucket.org/ww/goautoneg
+GOFILES=autoneg.go
+
+include $(GOROOT)/src/Make.pkg
+
+format:
+ gofmt -w *.go
+
+docs:
+ gomake clean
+ godoc ${TARG} > README.txt
diff --git a/vendor/bitbucket.org/ww/goautoneg/README.txt b/vendor/bitbucket.org/ww/goautoneg/README.txt
new file mode 100644
index 0000000000..7723656d58
--- /dev/null
+++ b/vendor/bitbucket.org/ww/goautoneg/README.txt
@@ -0,0 +1,67 @@
+PACKAGE
+
+package goautoneg
+import "bitbucket.org/ww/goautoneg"
+
+HTTP Content-Type Autonegotiation.
+
+The functions in this package implement the behaviour specified in
+http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html
+
+Copyright (c) 2011, Open Knowledge Foundation Ltd.
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+ Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+
+ Neither the name of the Open Knowledge Foundation Ltd. nor the
+ names of its contributors may be used to endorse or promote
+ products derived from this software without specific prior written
+ permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+FUNCTIONS
+
+func Negotiate(header string, alternatives []string) (content_type string)
+Negotiate the most appropriate content_type given the accept header
+and a list of alternatives.
+
+func ParseAccept(header string) (accept []Accept)
+Parse an Accept Header string returning a sorted list
+of clauses
+
+
+TYPES
+
+type Accept struct {
+ Type, SubType string
+ Q float32
+ Params map[string]string
+}
+Structure to represent a clause in an HTTP Accept Header
+
+
+SUBDIRECTORIES
+
+ .hg
diff --git a/vendor/bitbucket.org/ww/goautoneg/autoneg.go b/vendor/bitbucket.org/ww/goautoneg/autoneg.go
new file mode 100644
index 0000000000..648b38cb65
--- /dev/null
+++ b/vendor/bitbucket.org/ww/goautoneg/autoneg.go
@@ -0,0 +1,162 @@
+/*
+HTTP Content-Type Autonegotiation.
+
+The functions in this package implement the behaviour specified in
+http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html
+
+Copyright (c) 2011, Open Knowledge Foundation Ltd.
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+ Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+
+ Neither the name of the Open Knowledge Foundation Ltd. nor the
+ names of its contributors may be used to endorse or promote
+ products derived from this software without specific prior written
+ permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+*/
+package goautoneg
+
+import (
+ "sort"
+ "strconv"
+ "strings"
+)
+
+// Structure to represent a clause in an HTTP Accept Header
+type Accept struct {
+ Type, SubType string
+ Q float64
+ Params map[string]string
+}
+
+// For internal use, so that we can use the sort interface
+type accept_slice []Accept
+
+func (accept accept_slice) Len() int {
+ slice := []Accept(accept)
+ return len(slice)
+}
+
+func (accept accept_slice) Less(i, j int) bool {
+ slice := []Accept(accept)
+ ai, aj := slice[i], slice[j]
+ if ai.Q > aj.Q {
+ return true
+ }
+ if ai.Type != "*" && aj.Type == "*" {
+ return true
+ }
+ if ai.SubType != "*" && aj.SubType == "*" {
+ return true
+ }
+ return false
+}
+
+func (accept accept_slice) Swap(i, j int) {
+ slice := []Accept(accept)
+ slice[i], slice[j] = slice[j], slice[i]
+}
+
+// Parse an Accept Header string returning a sorted list
+// of clauses
+func ParseAccept(header string) (accept []Accept) {
+ parts := strings.Split(header, ",")
+ accept = make([]Accept, 0, len(parts))
+ for _, part := range parts {
+ part := strings.Trim(part, " ")
+
+ a := Accept{}
+ a.Params = make(map[string]string)
+ a.Q = 1.0
+
+ mrp := strings.Split(part, ";")
+
+ media_range := mrp[0]
+ sp := strings.Split(media_range, "/")
+ a.Type = strings.Trim(sp[0], " ")
+
+ switch {
+ case len(sp) == 1 && a.Type == "*":
+ a.SubType = "*"
+ case len(sp) == 2:
+ a.SubType = strings.Trim(sp[1], " ")
+ default:
+ continue
+ }
+
+ if len(mrp) == 1 {
+ accept = append(accept, a)
+ continue
+ }
+
+ for _, param := range mrp[1:] {
+ sp := strings.SplitN(param, "=", 2)
+ if len(sp) != 2 {
+ continue
+ }
+ token := strings.Trim(sp[0], " ")
+ if token == "q" {
+ a.Q, _ = strconv.ParseFloat(sp[1], 32)
+ } else {
+ a.Params[token] = strings.Trim(sp[1], " ")
+ }
+ }
+
+ accept = append(accept, a)
+ }
+
+ slice := accept_slice(accept)
+ sort.Sort(slice)
+
+ return
+}
+
+// Negotiate the most appropriate content_type given the accept header
+// and a list of alternatives.
+func Negotiate(header string, alternatives []string) (content_type string) {
+ asp := make([][]string, 0, len(alternatives))
+ for _, ctype := range alternatives {
+ asp = append(asp, strings.SplitN(ctype, "/", 2))
+ }
+ for _, clause := range ParseAccept(header) {
+ for i, ctsp := range asp {
+ if clause.Type == ctsp[0] && clause.SubType == ctsp[1] {
+ content_type = alternatives[i]
+ return
+ }
+ if clause.Type == ctsp[0] && clause.SubType == "*" {
+ content_type = alternatives[i]
+ return
+ }
+ if clause.Type == "*" && clause.SubType == "*" {
+ content_type = alternatives[i]
+ return
+ }
+ }
+ }
+ return
+}
diff --git a/vendor/github.com/Sirupsen/logrus/CHANGELOG.md b/vendor/github.com/Sirupsen/logrus/CHANGELOG.md
new file mode 100644
index 0000000000..78f98959bb
--- /dev/null
+++ b/vendor/github.com/Sirupsen/logrus/CHANGELOG.md
@@ -0,0 +1,47 @@
+# 0.8.7
+
+* logrus/core: fix possible race (#216)
+* logrus/doc: small typo fixes and doc improvements
+
+
+# 0.8.6
+
+* hooks/raven: allow passing an initialized client
+
+# 0.8.5
+
+* logrus/core: revert #208
+
+# 0.8.4
+
+* formatter/text: fix data race (#218)
+
+# 0.8.3
+
+* logrus/core: fix entry log level (#208)
+* logrus/core: improve performance of text formatter by 40%
+* logrus/core: expose `LevelHooks` type
+* logrus/core: add support for DragonflyBSD and NetBSD
+* formatter/text: print structs more verbosely
+
+# 0.8.2
+
+* logrus: fix more Fatal family functions
+
+# 0.8.1
+
+* logrus: fix not exiting on `Fatalf` and `Fatalln`
+
+# 0.8.0
+
+* logrus: defaults to stderr instead of stdout
+* hooks/sentry: add special field for `*http.Request`
+* formatter/text: ignore Windows for colors
+
+# 0.7.3
+
+* formatter/\*: allow configuration of timestamp layout
+
+# 0.7.2
+
+* formatter/text: Add configuration option for time format (#158)
diff --git a/vendor/github.com/Sirupsen/logrus/LICENSE b/vendor/github.com/Sirupsen/logrus/LICENSE
new file mode 100644
index 0000000000..f090cb42f3
--- /dev/null
+++ b/vendor/github.com/Sirupsen/logrus/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2014 Simon Eskildsen
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/vendor/github.com/Sirupsen/logrus/README.md b/vendor/github.com/Sirupsen/logrus/README.md
new file mode 100644
index 0000000000..6fa6e2062a
--- /dev/null
+++ b/vendor/github.com/Sirupsen/logrus/README.md
@@ -0,0 +1,357 @@
+# Logrus
[](https://travis-ci.org/Sirupsen/logrus) [][godoc]
+
+Logrus is a structured logger for Go (golang), completely API compatible with
+the standard library logger. [Godoc][godoc]. **Please note the Logrus API is not
+yet stable (pre 1.0). Logrus itself is completely stable and has been used in
+many large deployments. The core API is unlikely to change much but please
+version control your Logrus to make sure you aren't fetching latest `master` on
+every build.**
+
+Nicely color-coded in development (when a TTY is attached, otherwise just
+plain text):
+
+
+
+With `log.Formatter = new(logrus.JSONFormatter)`, for easy parsing by logstash
+or Splunk:
+
+```json
+{"animal":"walrus","level":"info","msg":"A group of walrus emerges from the
+ocean","size":10,"time":"2014-03-10 19:57:38.562264131 -0400 EDT"}
+
+{"level":"warning","msg":"The group's number increased tremendously!",
+"number":122,"omg":true,"time":"2014-03-10 19:57:38.562471297 -0400 EDT"}
+
+{"animal":"walrus","level":"info","msg":"A giant walrus appears!",
+"size":10,"time":"2014-03-10 19:57:38.562500591 -0400 EDT"}
+
+{"animal":"walrus","level":"info","msg":"Tremendously sized cow enters the ocean.",
+"size":9,"time":"2014-03-10 19:57:38.562527896 -0400 EDT"}
+
+{"level":"fatal","msg":"The ice breaks!","number":100,"omg":true,
+"time":"2014-03-10 19:57:38.562543128 -0400 EDT"}
+```
+
+With the default `log.Formatter = new(&log.TextFormatter{})` when a TTY is not
+attached, the output is compatible with the
+[logfmt](http://godoc.org/github.com/kr/logfmt) format:
+
+```text
+time="2015-03-26T01:27:38-04:00" level=debug msg="Started observing beach" animal=walrus number=8
+time="2015-03-26T01:27:38-04:00" level=info msg="A group of walrus emerges from the ocean" animal=walrus size=10
+time="2015-03-26T01:27:38-04:00" level=warning msg="The group's number increased tremendously!" number=122 omg=true
+time="2015-03-26T01:27:38-04:00" level=debug msg="Temperature changes" temperature=-4
+time="2015-03-26T01:27:38-04:00" level=panic msg="It's over 9000!" animal=orca size=9009
+time="2015-03-26T01:27:38-04:00" level=fatal msg="The ice breaks!" err=&{0x2082280c0 map[animal:orca size:9009] 2015-03-26 01:27:38.441574009 -0400 EDT panic It's over 9000!} number=100 omg=true
+exit status 1
+```
+
+#### Example
+
+The simplest way to use Logrus is simply the package-level exported logger:
+
+```go
+package main
+
+import (
+ log "github.com/Sirupsen/logrus"
+)
+
+func main() {
+ log.WithFields(log.Fields{
+ "animal": "walrus",
+ }).Info("A walrus appears")
+}
+```
+
+Note that it's completely api-compatible with the stdlib logger, so you can
+replace your `log` imports everywhere with `log "github.com/Sirupsen/logrus"`
+and you'll now have the flexibility of Logrus. You can customize it all you
+want:
+
+```go
+package main
+
+import (
+ "os"
+ log "github.com/Sirupsen/logrus"
+ "github.com/Sirupsen/logrus/hooks/airbrake"
+)
+
+func init() {
+ // Log as JSON instead of the default ASCII formatter.
+ log.SetFormatter(&log.JSONFormatter{})
+
+ // Use the Airbrake hook to report errors that have Error severity or above to
+ // an exception tracker. You can create custom hooks, see the Hooks section.
+ log.AddHook(airbrake.NewHook("https://example.com", "xyz", "development"))
+
+ // Output to stderr instead of stdout, could also be a file.
+ log.SetOutput(os.Stderr)
+
+ // Only log the warning severity or above.
+ log.SetLevel(log.WarnLevel)
+}
+
+func main() {
+ log.WithFields(log.Fields{
+ "animal": "walrus",
+ "size": 10,
+ }).Info("A group of walrus emerges from the ocean")
+
+ log.WithFields(log.Fields{
+ "omg": true,
+ "number": 122,
+ }).Warn("The group's number increased tremendously!")
+
+ log.WithFields(log.Fields{
+ "omg": true,
+ "number": 100,
+ }).Fatal("The ice breaks!")
+
+ // A common pattern is to re-use fields between logging statements by re-using
+ // the logrus.Entry returned from WithFields()
+ contextLogger := log.WithFields(log.Fields{
+ "common": "this is a common field",
+ "other": "I also should be logged always",
+ })
+
+ contextLogger.Info("I'll be logged with common and other field")
+ contextLogger.Info("Me too")
+}
+```
+
+For more advanced usage such as logging to multiple locations from the same
+application, you can also create an instance of the `logrus` Logger:
+
+```go
+package main
+
+import (
+ "github.com/Sirupsen/logrus"
+)
+
+// Create a new instance of the logger. You can have any number of instances.
+var log = logrus.New()
+
+func main() {
+ // The API for setting attributes is a little different than the package level
+ // exported logger. See Godoc.
+ log.Out = os.Stderr
+
+ log.WithFields(logrus.Fields{
+ "animal": "walrus",
+ "size": 10,
+ }).Info("A group of walrus emerges from the ocean")
+}
+```
+
+#### Fields
+
+Logrus encourages careful, structured logging though logging fields instead of
+long, unparseable error messages. For example, instead of: `log.Fatalf("Failed
+to send event %s to topic %s with key %d")`, you should log the much more
+discoverable:
+
+```go
+log.WithFields(log.Fields{
+ "event": event,
+ "topic": topic,
+ "key": key,
+}).Fatal("Failed to send event")
+```
+
+We've found this API forces you to think about logging in a way that produces
+much more useful logging messages. We've been in countless situations where just
+a single added field to a log statement that was already there would've saved us
+hours. The `WithFields` call is optional.
+
+In general, with Logrus using any of the `printf`-family functions should be
+seen as a hint you should add a field, however, you can still use the
+`printf`-family functions with Logrus.
+
+#### Hooks
+
+You can add hooks for logging levels. For example to send errors to an exception
+tracking service on `Error`, `Fatal` and `Panic`, info to StatsD or log to
+multiple places simultaneously, e.g. syslog.
+
+Logrus comes with [built-in hooks](hooks/). Add those, or your custom hook, in
+`init`:
+
+```go
+import (
+ log "github.com/Sirupsen/logrus"
+ "github.com/Sirupsen/logrus/hooks/airbrake"
+ logrus_syslog "github.com/Sirupsen/logrus/hooks/syslog"
+ "log/syslog"
+)
+
+func init() {
+ log.AddHook(airbrake.NewHook("https://example.com", "xyz", "development"))
+
+ hook, err := logrus_syslog.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "")
+ if err != nil {
+ log.Error("Unable to connect to local syslog daemon")
+ } else {
+ log.AddHook(hook)
+ }
+}
+```
+
+
+| Hook | Description |
+| ----- | ----------- |
+| [Airbrake](https://github.com/Sirupsen/logrus/blob/master/hooks/airbrake/airbrake.go) | Send errors to an exception tracking service compatible with the Airbrake API. Uses [`airbrake-go`](https://github.com/tobi/airbrake-go) behind the scenes. |
+| [Papertrail](https://github.com/Sirupsen/logrus/blob/master/hooks/papertrail/papertrail.go) | Send errors to the Papertrail hosted logging service via UDP. |
+| [Syslog](https://github.com/Sirupsen/logrus/blob/master/hooks/syslog/syslog.go) | Send errors to remote syslog server. Uses standard library `log/syslog` behind the scenes. |
+| [BugSnag](https://github.com/Sirupsen/logrus/blob/master/hooks/bugsnag/bugsnag.go) | Send errors to the Bugsnag exception tracking service. |
+| [Sentry](https://github.com/Sirupsen/logrus/blob/master/hooks/sentry/sentry.go) | Send errors to the Sentry error logging and aggregation service. |
+| [Hiprus](https://github.com/nubo/hiprus) | Send errors to a channel in hipchat. |
+| [Logrusly](https://github.com/sebest/logrusly) | Send logs to [Loggly](https://www.loggly.com/) |
+| [Slackrus](https://github.com/johntdyer/slackrus) | Hook for Slack chat. |
+| [Journalhook](https://github.com/wercker/journalhook) | Hook for logging to `systemd-journald` |
+| [Graylog](https://github.com/gemnasium/logrus-hooks/tree/master/graylog) | Hook for logging to [Graylog](http://graylog2.org/) |
+| [Raygun](https://github.com/squirkle/logrus-raygun-hook) | Hook for logging to [Raygun.io](http://raygun.io/) |
+| [LFShook](https://github.com/rifflock/lfshook) | Hook for logging to the local filesystem |
+| [Honeybadger](https://github.com/agonzalezro/logrus_honeybadger) | Hook for sending exceptions to Honeybadger |
+| [Mail](https://github.com/zbindenren/logrus_mail) | Hook for sending exceptions via mail |
+| [Rollrus](https://github.com/heroku/rollrus) | Hook for sending errors to rollbar |
+| [Fluentd](https://github.com/evalphobia/logrus_fluent) | Hook for logging to fluentd |
+| [Mongodb](https://github.com/weekface/mgorus) | Hook for logging to mongodb |
+
+#### Level logging
+
+Logrus has six logging levels: Debug, Info, Warning, Error, Fatal and Panic.
+
+```go
+log.Debug("Useful debugging information.")
+log.Info("Something noteworthy happened!")
+log.Warn("You should probably take a look at this.")
+log.Error("Something failed but I'm not quitting.")
+// Calls os.Exit(1) after logging
+log.Fatal("Bye.")
+// Calls panic() after logging
+log.Panic("I'm bailing.")
+```
+
+You can set the logging level on a `Logger`, then it will only log entries with
+that severity or anything above it:
+
+```go
+// Will log anything that is info or above (warn, error, fatal, panic). Default.
+log.SetLevel(log.InfoLevel)
+```
+
+It may be useful to set `log.Level = logrus.DebugLevel` in a debug or verbose
+environment if your application has that.
+
+#### Entries
+
+Besides the fields added with `WithField` or `WithFields` some fields are
+automatically added to all logging events:
+
+1. `time`. The timestamp when the entry was created.
+2. `msg`. The logging message passed to `{Info,Warn,Error,Fatal,Panic}` after
+ the `AddFields` call. E.g. `Failed to send event.`
+3. `level`. The logging level. E.g. `info`.
+
+#### Environments
+
+Logrus has no notion of environment.
+
+If you wish for hooks and formatters to only be used in specific environments,
+you should handle that yourself. For example, if your application has a global
+variable `Environment`, which is a string representation of the environment you
+could do:
+
+```go
+import (
+ log "github.com/Sirupsen/logrus"
+)
+
+init() {
+ // do something here to set environment depending on an environment variable
+ // or command-line flag
+ if Environment == "production" {
+ log.SetFormatter(&log.JSONFormatter{})
+ } else {
+ // The TextFormatter is default, you don't actually have to do this.
+ log.SetFormatter(&log.TextFormatter{})
+ }
+}
+```
+
+This configuration is how `logrus` was intended to be used, but JSON in
+production is mostly only useful if you do log aggregation with tools like
+Splunk or Logstash.
+
+#### Formatters
+
+The built-in logging formatters are:
+
+* `logrus.TextFormatter`. Logs the event in colors if stdout is a tty, otherwise
+ without colors.
+ * *Note:* to force colored output when there is no TTY, set the `ForceColors`
+ field to `true`. To force no colored output even if there is a TTY set the
+ `DisableColors` field to `true`
+* `logrus.JSONFormatter`. Logs fields as JSON.
+* `logrus_logstash.LogstashFormatter`. Logs fields as Logstash Events (http://logstash.net).
+
+ ```go
+ logrus.SetFormatter(&logrus_logstash.LogstashFormatter{Type: “application_name"})
+ ```
+
+Third party logging formatters:
+
+* [`zalgo`](https://github.com/aybabtme/logzalgo): invoking the P͉̫o̳̼̊w̖͈̰͎e̬͔̭͂r͚̼̹̲ ̫͓͉̳͈ō̠͕͖̚f̝͍̠ ͕̲̞͖͑Z̖̫̤̫ͪa͉̬͈̗l͖͎g̳̥o̰̥̅!̣͔̲̻͊̄ ̙̘̦̹̦.
+
+You can define your formatter by implementing the `Formatter` interface,
+requiring a `Format` method. `Format` takes an `*Entry`. `entry.Data` is a
+`Fields` type (`map[string]interface{}`) with all your fields as well as the
+default ones (see Entries section above):
+
+```go
+type MyJSONFormatter struct {
+}
+
+log.SetFormatter(new(MyJSONFormatter))
+
+func (f *MyJSONFormatter) Format(entry *Entry) ([]byte, error) {
+ // Note this doesn't include Time, Level and Message which are available on
+ // the Entry. Consult `godoc` on information about those fields or read the
+ // source of the official loggers.
+ serialized, err := json.Marshal(entry.Data)
+ if err != nil {
+ return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err)
+ }
+ return append(serialized, '\n'), nil
+}
+```
+
+#### Logger as an `io.Writer`
+
+Logrus can be transformed into an `io.Writer`. That writer is the end of an `io.Pipe` and it is your responsibility to close it.
+
+```go
+w := logger.Writer()
+defer w.Close()
+
+srv := http.Server{
+ // create a stdlib log.Logger that writes to
+ // logrus.Logger.
+ ErrorLog: log.New(w, "", 0),
+}
+```
+
+Each line written to that writer will be printed the usual way, using formatters
+and hooks. The level for those entries is `info`.
+
+#### Rotation
+
+Log rotation is not provided with Logrus. Log rotation should be done by an
+external program (like `logrotate(8)`) that can compress and delete old log
+entries. It should not be a feature of the application-level logger.
+
+
+[godoc]: https://godoc.org/github.com/Sirupsen/logrus
diff --git a/vendor/github.com/Sirupsen/logrus/doc.go b/vendor/github.com/Sirupsen/logrus/doc.go
new file mode 100644
index 0000000000..dddd5f877b
--- /dev/null
+++ b/vendor/github.com/Sirupsen/logrus/doc.go
@@ -0,0 +1,26 @@
+/*
+Package logrus is a structured logger for Go, completely API compatible with the standard library logger.
+
+
+The simplest way to use Logrus is simply the package-level exported logger:
+
+ package main
+
+ import (
+ log "github.com/Sirupsen/logrus"
+ )
+
+ func main() {
+ log.WithFields(log.Fields{
+ "animal": "walrus",
+ "number": 1,
+ "size": 10,
+ }).Info("A walrus appears")
+ }
+
+Output:
+ time="2015-09-07T08:48:33Z" level=info msg="A walrus appears" animal=walrus number=1 size=10
+
+For a full guide visit https://github.com/Sirupsen/logrus
+*/
+package logrus
diff --git a/vendor/github.com/Sirupsen/logrus/entry.go b/vendor/github.com/Sirupsen/logrus/entry.go
new file mode 100644
index 0000000000..9ae900bc5e
--- /dev/null
+++ b/vendor/github.com/Sirupsen/logrus/entry.go
@@ -0,0 +1,264 @@
+package logrus
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "os"
+ "time"
+)
+
+// Defines the key when adding errors using WithError.
+var ErrorKey = "error"
+
+// An entry is the final or intermediate Logrus logging entry. It contains all
+// the fields passed with WithField{,s}. It's finally logged when Debug, Info,
+// Warn, Error, Fatal or Panic is called on it. These objects can be reused and
+// passed around as much as you wish to avoid field duplication.
+type Entry struct {
+ Logger *Logger
+
+ // Contains all the fields set by the user.
+ Data Fields
+
+ // Time at which the log entry was created
+ Time time.Time
+
+ // Level the log entry was logged at: Debug, Info, Warn, Error, Fatal or Panic
+ Level Level
+
+ // Message passed to Debug, Info, Warn, Error, Fatal or Panic
+ Message string
+}
+
+func NewEntry(logger *Logger) *Entry {
+ return &Entry{
+ Logger: logger,
+ // Default is three fields, give a little extra room
+ Data: make(Fields, 5),
+ }
+}
+
+// Returns a reader for the entry, which is a proxy to the formatter.
+func (entry *Entry) Reader() (*bytes.Buffer, error) {
+ serialized, err := entry.Logger.Formatter.Format(entry)
+ return bytes.NewBuffer(serialized), err
+}
+
+// Returns the string representation from the reader and ultimately the
+// formatter.
+func (entry *Entry) String() (string, error) {
+ reader, err := entry.Reader()
+ if err != nil {
+ return "", err
+ }
+
+ return reader.String(), err
+}
+
+// Add an error as single field (using the key defined in ErrorKey) to the Entry.
+func (entry *Entry) WithError(err error) *Entry {
+ return entry.WithField(ErrorKey, err)
+}
+
+// Add a single field to the Entry.
+func (entry *Entry) WithField(key string, value interface{}) *Entry {
+ return entry.WithFields(Fields{key: value})
+}
+
+// Add a map of fields to the Entry.
+func (entry *Entry) WithFields(fields Fields) *Entry {
+ data := Fields{}
+ for k, v := range entry.Data {
+ data[k] = v
+ }
+ for k, v := range fields {
+ data[k] = v
+ }
+ return &Entry{Logger: entry.Logger, Data: data}
+}
+
+// This function is not declared with a pointer value because otherwise
+// race conditions will occur when using multiple goroutines
+func (entry Entry) log(level Level, msg string) {
+ entry.Time = time.Now()
+ entry.Level = level
+ entry.Message = msg
+
+ if err := entry.Logger.Hooks.Fire(level, &entry); err != nil {
+ entry.Logger.mu.Lock()
+ fmt.Fprintf(os.Stderr, "Failed to fire hook: %v\n", err)
+ entry.Logger.mu.Unlock()
+ }
+
+ reader, err := entry.Reader()
+ if err != nil {
+ entry.Logger.mu.Lock()
+ fmt.Fprintf(os.Stderr, "Failed to obtain reader, %v\n", err)
+ entry.Logger.mu.Unlock()
+ }
+
+ entry.Logger.mu.Lock()
+ defer entry.Logger.mu.Unlock()
+
+ _, err = io.Copy(entry.Logger.Out, reader)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "Failed to write to log, %v\n", err)
+ }
+
+ // To avoid Entry#log() returning a value that only would make sense for
+ // panic() to use in Entry#Panic(), we avoid the allocation by checking
+ // directly here.
+ if level <= PanicLevel {
+ panic(&entry)
+ }
+}
+
+func (entry *Entry) Debug(args ...interface{}) {
+ if entry.Logger.Level >= DebugLevel {
+ entry.log(DebugLevel, fmt.Sprint(args...))
+ }
+}
+
+func (entry *Entry) Print(args ...interface{}) {
+ entry.Info(args...)
+}
+
+func (entry *Entry) Info(args ...interface{}) {
+ if entry.Logger.Level >= InfoLevel {
+ entry.log(InfoLevel, fmt.Sprint(args...))
+ }
+}
+
+func (entry *Entry) Warn(args ...interface{}) {
+ if entry.Logger.Level >= WarnLevel {
+ entry.log(WarnLevel, fmt.Sprint(args...))
+ }
+}
+
+func (entry *Entry) Warning(args ...interface{}) {
+ entry.Warn(args...)
+}
+
+func (entry *Entry) Error(args ...interface{}) {
+ if entry.Logger.Level >= ErrorLevel {
+ entry.log(ErrorLevel, fmt.Sprint(args...))
+ }
+}
+
+func (entry *Entry) Fatal(args ...interface{}) {
+ if entry.Logger.Level >= FatalLevel {
+ entry.log(FatalLevel, fmt.Sprint(args...))
+ }
+ os.Exit(1)
+}
+
+func (entry *Entry) Panic(args ...interface{}) {
+ if entry.Logger.Level >= PanicLevel {
+ entry.log(PanicLevel, fmt.Sprint(args...))
+ }
+ panic(fmt.Sprint(args...))
+}
+
+// Entry Printf family functions
+
+func (entry *Entry) Debugf(format string, args ...interface{}) {
+ if entry.Logger.Level >= DebugLevel {
+ entry.Debug(fmt.Sprintf(format, args...))
+ }
+}
+
+func (entry *Entry) Infof(format string, args ...interface{}) {
+ if entry.Logger.Level >= InfoLevel {
+ entry.Info(fmt.Sprintf(format, args...))
+ }
+}
+
+func (entry *Entry) Printf(format string, args ...interface{}) {
+ entry.Infof(format, args...)
+}
+
+func (entry *Entry) Warnf(format string, args ...interface{}) {
+ if entry.Logger.Level >= WarnLevel {
+ entry.Warn(fmt.Sprintf(format, args...))
+ }
+}
+
+func (entry *Entry) Warningf(format string, args ...interface{}) {
+ entry.Warnf(format, args...)
+}
+
+func (entry *Entry) Errorf(format string, args ...interface{}) {
+ if entry.Logger.Level >= ErrorLevel {
+ entry.Error(fmt.Sprintf(format, args...))
+ }
+}
+
+func (entry *Entry) Fatalf(format string, args ...interface{}) {
+ if entry.Logger.Level >= FatalLevel {
+ entry.Fatal(fmt.Sprintf(format, args...))
+ }
+ os.Exit(1)
+}
+
+func (entry *Entry) Panicf(format string, args ...interface{}) {
+ if entry.Logger.Level >= PanicLevel {
+ entry.Panic(fmt.Sprintf(format, args...))
+ }
+}
+
+// Entry Println family functions
+
+func (entry *Entry) Debugln(args ...interface{}) {
+ if entry.Logger.Level >= DebugLevel {
+ entry.Debug(entry.sprintlnn(args...))
+ }
+}
+
+func (entry *Entry) Infoln(args ...interface{}) {
+ if entry.Logger.Level >= InfoLevel {
+ entry.Info(entry.sprintlnn(args...))
+ }
+}
+
+func (entry *Entry) Println(args ...interface{}) {
+ entry.Infoln(args...)
+}
+
+func (entry *Entry) Warnln(args ...interface{}) {
+ if entry.Logger.Level >= WarnLevel {
+ entry.Warn(entry.sprintlnn(args...))
+ }
+}
+
+func (entry *Entry) Warningln(args ...interface{}) {
+ entry.Warnln(args...)
+}
+
+func (entry *Entry) Errorln(args ...interface{}) {
+ if entry.Logger.Level >= ErrorLevel {
+ entry.Error(entry.sprintlnn(args...))
+ }
+}
+
+func (entry *Entry) Fatalln(args ...interface{}) {
+ if entry.Logger.Level >= FatalLevel {
+ entry.Fatal(entry.sprintlnn(args...))
+ }
+ os.Exit(1)
+}
+
+func (entry *Entry) Panicln(args ...interface{}) {
+ if entry.Logger.Level >= PanicLevel {
+ entry.Panic(entry.sprintlnn(args...))
+ }
+}
+
+// Sprintlnn => Sprint no newline. This is to get the behavior of how
+// fmt.Sprintln where spaces are always added between operands, regardless of
+// their type. Instead of vendoring the Sprintln implementation to spare a
+// string allocation, we do the simplest thing.
+func (entry *Entry) sprintlnn(args ...interface{}) string {
+ msg := fmt.Sprintln(args...)
+ return msg[:len(msg)-1]
+}
diff --git a/vendor/github.com/Sirupsen/logrus/exported.go b/vendor/github.com/Sirupsen/logrus/exported.go
new file mode 100644
index 0000000000..9a0120ac1d
--- /dev/null
+++ b/vendor/github.com/Sirupsen/logrus/exported.go
@@ -0,0 +1,193 @@
+package logrus
+
+import (
+ "io"
+)
+
+var (
+ // std is the name of the standard logger in stdlib `log`
+ std = New()
+)
+
+func StandardLogger() *Logger {
+ return std
+}
+
+// SetOutput sets the standard logger output.
+func SetOutput(out io.Writer) {
+ std.mu.Lock()
+ defer std.mu.Unlock()
+ std.Out = out
+}
+
+// SetFormatter sets the standard logger formatter.
+func SetFormatter(formatter Formatter) {
+ std.mu.Lock()
+ defer std.mu.Unlock()
+ std.Formatter = formatter
+}
+
+// SetLevel sets the standard logger level.
+func SetLevel(level Level) {
+ std.mu.Lock()
+ defer std.mu.Unlock()
+ std.Level = level
+}
+
+// GetLevel returns the standard logger level.
+func GetLevel() Level {
+ std.mu.Lock()
+ defer std.mu.Unlock()
+ return std.Level
+}
+
+// AddHook adds a hook to the standard logger hooks.
+func AddHook(hook Hook) {
+ std.mu.Lock()
+ defer std.mu.Unlock()
+ std.Hooks.Add(hook)
+}
+
+// WithError creates an entry from the standard logger and adds an error to it, using the value defined in ErrorKey as key.
+func WithError(err error) *Entry {
+ return std.WithField(ErrorKey, err)
+}
+
+// WithField creates an entry from the standard logger and adds a field to
+// it. If you want multiple fields, use `WithFields`.
+//
+// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal
+// or Panic on the Entry it returns.
+func WithField(key string, value interface{}) *Entry {
+ return std.WithField(key, value)
+}
+
+// WithFields creates an entry from the standard logger and adds multiple
+// fields to it. This is simply a helper for `WithField`, invoking it
+// once for each field.
+//
+// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal
+// or Panic on the Entry it returns.
+func WithFields(fields Fields) *Entry {
+ return std.WithFields(fields)
+}
+
+// Debug logs a message at level Debug on the standard logger.
+func Debug(args ...interface{}) {
+ std.Debug(args...)
+}
+
+// Print logs a message at level Info on the standard logger.
+func Print(args ...interface{}) {
+ std.Print(args...)
+}
+
+// Info logs a message at level Info on the standard logger.
+func Info(args ...interface{}) {
+ std.Info(args...)
+}
+
+// Warn logs a message at level Warn on the standard logger.
+func Warn(args ...interface{}) {
+ std.Warn(args...)
+}
+
+// Warning logs a message at level Warn on the standard logger.
+func Warning(args ...interface{}) {
+ std.Warning(args...)
+}
+
+// Error logs a message at level Error on the standard logger.
+func Error(args ...interface{}) {
+ std.Error(args...)
+}
+
+// Panic logs a message at level Panic on the standard logger.
+func Panic(args ...interface{}) {
+ std.Panic(args...)
+}
+
+// Fatal logs a message at level Fatal on the standard logger.
+func Fatal(args ...interface{}) {
+ std.Fatal(args...)
+}
+
+// Debugf logs a message at level Debug on the standard logger.
+func Debugf(format string, args ...interface{}) {
+ std.Debugf(format, args...)
+}
+
+// Printf logs a message at level Info on the standard logger.
+func Printf(format string, args ...interface{}) {
+ std.Printf(format, args...)
+}
+
+// Infof logs a message at level Info on the standard logger.
+func Infof(format string, args ...interface{}) {
+ std.Infof(format, args...)
+}
+
+// Warnf logs a message at level Warn on the standard logger.
+func Warnf(format string, args ...interface{}) {
+ std.Warnf(format, args...)
+}
+
+// Warningf logs a message at level Warn on the standard logger.
+func Warningf(format string, args ...interface{}) {
+ std.Warningf(format, args...)
+}
+
+// Errorf logs a message at level Error on the standard logger.
+func Errorf(format string, args ...interface{}) {
+ std.Errorf(format, args...)
+}
+
+// Panicf logs a message at level Panic on the standard logger.
+func Panicf(format string, args ...interface{}) {
+ std.Panicf(format, args...)
+}
+
+// Fatalf logs a message at level Fatal on the standard logger.
+func Fatalf(format string, args ...interface{}) {
+ std.Fatalf(format, args...)
+}
+
+// Debugln logs a message at level Debug on the standard logger.
+func Debugln(args ...interface{}) {
+ std.Debugln(args...)
+}
+
+// Println logs a message at level Info on the standard logger.
+func Println(args ...interface{}) {
+ std.Println(args...)
+}
+
+// Infoln logs a message at level Info on the standard logger.
+func Infoln(args ...interface{}) {
+ std.Infoln(args...)
+}
+
+// Warnln logs a message at level Warn on the standard logger.
+func Warnln(args ...interface{}) {
+ std.Warnln(args...)
+}
+
+// Warningln logs a message at level Warn on the standard logger.
+func Warningln(args ...interface{}) {
+ std.Warningln(args...)
+}
+
+// Errorln logs a message at level Error on the standard logger.
+func Errorln(args ...interface{}) {
+ std.Errorln(args...)
+}
+
+// Panicln logs a message at level Panic on the standard logger.
+func Panicln(args ...interface{}) {
+ std.Panicln(args...)
+}
+
+// Fatalln logs a message at level Fatal on the standard logger.
+func Fatalln(args ...interface{}) {
+ std.Fatalln(args...)
+}
diff --git a/vendor/github.com/Sirupsen/logrus/formatter.go b/vendor/github.com/Sirupsen/logrus/formatter.go
new file mode 100644
index 0000000000..104d689f18
--- /dev/null
+++ b/vendor/github.com/Sirupsen/logrus/formatter.go
@@ -0,0 +1,48 @@
+package logrus
+
+import "time"
+
+const DefaultTimestampFormat = time.RFC3339
+
+// The Formatter interface is used to implement a custom Formatter. It takes an
+// `Entry`. It exposes all the fields, including the default ones:
+//
+// * `entry.Data["msg"]`. The message passed from Info, Warn, Error ..
+// * `entry.Data["time"]`. The timestamp.
+// * `entry.Data["level"]. The level the entry was logged at.
+//
+// Any additional fields added with `WithField` or `WithFields` are also in
+// `entry.Data`. Format is expected to return an array of bytes which are then
+// logged to `logger.Out`.
+type Formatter interface {
+ Format(*Entry) ([]byte, error)
+}
+
+// This is to not silently overwrite `time`, `msg` and `level` fields when
+// dumping it. If this code wasn't there doing:
+//
+// logrus.WithField("level", 1).Info("hello")
+//
+// Would just silently drop the user provided level. Instead with this code
+// it'll logged as:
+//
+// {"level": "info", "fields.level": 1, "msg": "hello", "time": "..."}
+//
+// It's not exported because it's still using Data in an opinionated way. It's to
+// avoid code duplication between the two default formatters.
+func prefixFieldClashes(data Fields) {
+ _, ok := data["time"]
+ if ok {
+ data["fields.time"] = data["time"]
+ }
+
+ _, ok = data["msg"]
+ if ok {
+ data["fields.msg"] = data["msg"]
+ }
+
+ _, ok = data["level"]
+ if ok {
+ data["fields.level"] = data["level"]
+ }
+}
diff --git a/vendor/github.com/Sirupsen/logrus/hooks.go b/vendor/github.com/Sirupsen/logrus/hooks.go
new file mode 100644
index 0000000000..3f151cdc39
--- /dev/null
+++ b/vendor/github.com/Sirupsen/logrus/hooks.go
@@ -0,0 +1,34 @@
+package logrus
+
+// A hook to be fired when logging on the logging levels returned from
+// `Levels()` on your implementation of the interface. Note that this is not
+// fired in a goroutine or a channel with workers, you should handle such
+// functionality yourself if your call is non-blocking and you don't wish for
+// the logging calls for levels returned from `Levels()` to block.
+type Hook interface {
+ Levels() []Level
+ Fire(*Entry) error
+}
+
+// Internal type for storing the hooks on a logger instance.
+type LevelHooks map[Level][]Hook
+
+// Add a hook to an instance of logger. This is called with
+// `log.Hooks.Add(new(MyHook))` where `MyHook` implements the `Hook` interface.
+func (hooks LevelHooks) Add(hook Hook) {
+ for _, level := range hook.Levels() {
+ hooks[level] = append(hooks[level], hook)
+ }
+}
+
+// Fire all the hooks for the passed level. Used by `entry.log` to fire
+// appropriate hooks for a log entry.
+func (hooks LevelHooks) Fire(level Level, entry *Entry) error {
+ for _, hook := range hooks[level] {
+ if err := hook.Fire(entry); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/Sirupsen/logrus/json_formatter.go b/vendor/github.com/Sirupsen/logrus/json_formatter.go
new file mode 100644
index 0000000000..2ad6dc5cf4
--- /dev/null
+++ b/vendor/github.com/Sirupsen/logrus/json_formatter.go
@@ -0,0 +1,41 @@
+package logrus
+
+import (
+ "encoding/json"
+ "fmt"
+)
+
+type JSONFormatter struct {
+ // TimestampFormat sets the format used for marshaling timestamps.
+ TimestampFormat string
+}
+
+func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) {
+ data := make(Fields, len(entry.Data)+3)
+ for k, v := range entry.Data {
+ switch v := v.(type) {
+ case error:
+ // Otherwise errors are ignored by `encoding/json`
+ // https://github.com/Sirupsen/logrus/issues/137
+ data[k] = v.Error()
+ default:
+ data[k] = v
+ }
+ }
+ prefixFieldClashes(data)
+
+ timestampFormat := f.TimestampFormat
+ if timestampFormat == "" {
+ timestampFormat = DefaultTimestampFormat
+ }
+
+ data["time"] = entry.Time.Format(timestampFormat)
+ data["msg"] = entry.Message
+ data["level"] = entry.Level.String()
+
+ serialized, err := json.Marshal(data)
+ if err != nil {
+ return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err)
+ }
+ return append(serialized, '\n'), nil
+}
diff --git a/vendor/github.com/Sirupsen/logrus/logger.go b/vendor/github.com/Sirupsen/logrus/logger.go
new file mode 100644
index 0000000000..fd9804c64c
--- /dev/null
+++ b/vendor/github.com/Sirupsen/logrus/logger.go
@@ -0,0 +1,206 @@
+package logrus
+
+import (
+ "io"
+ "os"
+ "sync"
+)
+
+type Logger struct {
+ // The logs are `io.Copy`'d to this in a mutex. It's common to set this to a
+ // file, or leave it default which is `os.Stderr`. You can also set this to
+ // something more adventorous, such as logging to Kafka.
+ Out io.Writer
+ // Hooks for the logger instance. These allow firing events based on logging
+ // levels and log entries. For example, to send errors to an error tracking
+ // service, log to StatsD or dump the core on fatal errors.
+ Hooks LevelHooks
+ // All log entries pass through the formatter before logged to Out. The
+ // included formatters are `TextFormatter` and `JSONFormatter` for which
+ // TextFormatter is the default. In development (when a TTY is attached) it
+ // logs with colors, but to a file it wouldn't. You can easily implement your
+ // own that implements the `Formatter` interface, see the `README` or included
+ // formatters for examples.
+ Formatter Formatter
+ // The logging level the logger should log at. This is typically (and defaults
+ // to) `logrus.Info`, which allows Info(), Warn(), Error() and Fatal() to be
+ // logged. `logrus.Debug` is useful in
+ Level Level
+ // Used to sync writing to the log.
+ mu sync.Mutex
+}
+
+// Creates a new logger. Configuration should be set by changing `Formatter`,
+// `Out` and `Hooks` directly on the default logger instance. You can also just
+// instantiate your own:
+//
+// var log = &Logger{
+// Out: os.Stderr,
+// Formatter: new(JSONFormatter),
+// Hooks: make(LevelHooks),
+// Level: logrus.DebugLevel,
+// }
+//
+// It's recommended to make this a global instance called `log`.
+func New() *Logger {
+ return &Logger{
+ Out: os.Stderr,
+ Formatter: new(TextFormatter),
+ Hooks: make(LevelHooks),
+ Level: InfoLevel,
+ }
+}
+
+// Adds a field to the log entry, note that you it doesn't log until you call
+// Debug, Print, Info, Warn, Fatal or Panic. It only creates a log entry.
+// If you want multiple fields, use `WithFields`.
+func (logger *Logger) WithField(key string, value interface{}) *Entry {
+ return NewEntry(logger).WithField(key, value)
+}
+
+// Adds a struct of fields to the log entry. All it does is call `WithField` for
+// each `Field`.
+func (logger *Logger) WithFields(fields Fields) *Entry {
+ return NewEntry(logger).WithFields(fields)
+}
+
+func (logger *Logger) Debugf(format string, args ...interface{}) {
+ if logger.Level >= DebugLevel {
+ NewEntry(logger).Debugf(format, args...)
+ }
+}
+
+func (logger *Logger) Infof(format string, args ...interface{}) {
+ if logger.Level >= InfoLevel {
+ NewEntry(logger).Infof(format, args...)
+ }
+}
+
+func (logger *Logger) Printf(format string, args ...interface{}) {
+ NewEntry(logger).Printf(format, args...)
+}
+
+func (logger *Logger) Warnf(format string, args ...interface{}) {
+ if logger.Level >= WarnLevel {
+ NewEntry(logger).Warnf(format, args...)
+ }
+}
+
+func (logger *Logger) Warningf(format string, args ...interface{}) {
+ if logger.Level >= WarnLevel {
+ NewEntry(logger).Warnf(format, args...)
+ }
+}
+
+func (logger *Logger) Errorf(format string, args ...interface{}) {
+ if logger.Level >= ErrorLevel {
+ NewEntry(logger).Errorf(format, args...)
+ }
+}
+
+func (logger *Logger) Fatalf(format string, args ...interface{}) {
+ if logger.Level >= FatalLevel {
+ NewEntry(logger).Fatalf(format, args...)
+ }
+ os.Exit(1)
+}
+
+func (logger *Logger) Panicf(format string, args ...interface{}) {
+ if logger.Level >= PanicLevel {
+ NewEntry(logger).Panicf(format, args...)
+ }
+}
+
+func (logger *Logger) Debug(args ...interface{}) {
+ if logger.Level >= DebugLevel {
+ NewEntry(logger).Debug(args...)
+ }
+}
+
+func (logger *Logger) Info(args ...interface{}) {
+ if logger.Level >= InfoLevel {
+ NewEntry(logger).Info(args...)
+ }
+}
+
+func (logger *Logger) Print(args ...interface{}) {
+ NewEntry(logger).Info(args...)
+}
+
+func (logger *Logger) Warn(args ...interface{}) {
+ if logger.Level >= WarnLevel {
+ NewEntry(logger).Warn(args...)
+ }
+}
+
+func (logger *Logger) Warning(args ...interface{}) {
+ if logger.Level >= WarnLevel {
+ NewEntry(logger).Warn(args...)
+ }
+}
+
+func (logger *Logger) Error(args ...interface{}) {
+ if logger.Level >= ErrorLevel {
+ NewEntry(logger).Error(args...)
+ }
+}
+
+func (logger *Logger) Fatal(args ...interface{}) {
+ if logger.Level >= FatalLevel {
+ NewEntry(logger).Fatal(args...)
+ }
+ os.Exit(1)
+}
+
+func (logger *Logger) Panic(args ...interface{}) {
+ if logger.Level >= PanicLevel {
+ NewEntry(logger).Panic(args...)
+ }
+}
+
+func (logger *Logger) Debugln(args ...interface{}) {
+ if logger.Level >= DebugLevel {
+ NewEntry(logger).Debugln(args...)
+ }
+}
+
+func (logger *Logger) Infoln(args ...interface{}) {
+ if logger.Level >= InfoLevel {
+ NewEntry(logger).Infoln(args...)
+ }
+}
+
+func (logger *Logger) Println(args ...interface{}) {
+ NewEntry(logger).Println(args...)
+}
+
+func (logger *Logger) Warnln(args ...interface{}) {
+ if logger.Level >= WarnLevel {
+ NewEntry(logger).Warnln(args...)
+ }
+}
+
+func (logger *Logger) Warningln(args ...interface{}) {
+ if logger.Level >= WarnLevel {
+ NewEntry(logger).Warnln(args...)
+ }
+}
+
+func (logger *Logger) Errorln(args ...interface{}) {
+ if logger.Level >= ErrorLevel {
+ NewEntry(logger).Errorln(args...)
+ }
+}
+
+func (logger *Logger) Fatalln(args ...interface{}) {
+ if logger.Level >= FatalLevel {
+ NewEntry(logger).Fatalln(args...)
+ }
+ os.Exit(1)
+}
+
+func (logger *Logger) Panicln(args ...interface{}) {
+ if logger.Level >= PanicLevel {
+ NewEntry(logger).Panicln(args...)
+ }
+}
diff --git a/vendor/github.com/Sirupsen/logrus/logrus.go b/vendor/github.com/Sirupsen/logrus/logrus.go
new file mode 100644
index 0000000000..0c09fbc264
--- /dev/null
+++ b/vendor/github.com/Sirupsen/logrus/logrus.go
@@ -0,0 +1,98 @@
+package logrus
+
+import (
+ "fmt"
+ "log"
+)
+
+// Fields type, used to pass to `WithFields`.
+type Fields map[string]interface{}
+
+// Level type
+type Level uint8
+
+// Convert the Level to a string. E.g. PanicLevel becomes "panic".
+func (level Level) String() string {
+ switch level {
+ case DebugLevel:
+ return "debug"
+ case InfoLevel:
+ return "info"
+ case WarnLevel:
+ return "warning"
+ case ErrorLevel:
+ return "error"
+ case FatalLevel:
+ return "fatal"
+ case PanicLevel:
+ return "panic"
+ }
+
+ return "unknown"
+}
+
+// ParseLevel takes a string level and returns the Logrus log level constant.
+func ParseLevel(lvl string) (Level, error) {
+ switch lvl {
+ case "panic":
+ return PanicLevel, nil
+ case "fatal":
+ return FatalLevel, nil
+ case "error":
+ return ErrorLevel, nil
+ case "warn", "warning":
+ return WarnLevel, nil
+ case "info":
+ return InfoLevel, nil
+ case "debug":
+ return DebugLevel, nil
+ }
+
+ var l Level
+ return l, fmt.Errorf("not a valid logrus Level: %q", lvl)
+}
+
+// These are the different logging levels. You can set the logging level to log
+// on your instance of logger, obtained with `logrus.New()`.
+const (
+ // PanicLevel level, highest level of severity. Logs and then calls panic with the
+ // message passed to Debug, Info, ...
+ PanicLevel Level = iota
+ // FatalLevel level. Logs and then calls `os.Exit(1)`. It will exit even if the
+ // logging level is set to Panic.
+ FatalLevel
+ // ErrorLevel level. Logs. Used for errors that should definitely be noted.
+ // Commonly used for hooks to send errors to an error tracking service.
+ ErrorLevel
+ // WarnLevel level. Non-critical entries that deserve eyes.
+ WarnLevel
+ // InfoLevel level. General operational entries about what's going on inside the
+ // application.
+ InfoLevel
+ // DebugLevel level. Usually only enabled when debugging. Very verbose logging.
+ DebugLevel
+)
+
+// Won't compile if StdLogger can't be realized by a log.Logger
+var (
+ _ StdLogger = &log.Logger{}
+ _ StdLogger = &Entry{}
+ _ StdLogger = &Logger{}
+)
+
+// StdLogger is what your logrus-enabled library should take, that way
+// it'll accept a stdlib logger and a logrus logger. There's no standard
+// interface, this is the closest we get, unfortunately.
+type StdLogger interface {
+ Print(...interface{})
+ Printf(string, ...interface{})
+ Println(...interface{})
+
+ Fatal(...interface{})
+ Fatalf(string, ...interface{})
+ Fatalln(...interface{})
+
+ Panic(...interface{})
+ Panicf(string, ...interface{})
+ Panicln(...interface{})
+}
diff --git a/vendor/github.com/Sirupsen/logrus/terminal_bsd.go b/vendor/github.com/Sirupsen/logrus/terminal_bsd.go
new file mode 100644
index 0000000000..71f8d67a55
--- /dev/null
+++ b/vendor/github.com/Sirupsen/logrus/terminal_bsd.go
@@ -0,0 +1,9 @@
+// +build darwin freebsd openbsd netbsd dragonfly
+
+package logrus
+
+import "syscall"
+
+const ioctlReadTermios = syscall.TIOCGETA
+
+type Termios syscall.Termios
diff --git a/vendor/github.com/Sirupsen/logrus/terminal_linux.go b/vendor/github.com/Sirupsen/logrus/terminal_linux.go
new file mode 100644
index 0000000000..a2c0b40db6
--- /dev/null
+++ b/vendor/github.com/Sirupsen/logrus/terminal_linux.go
@@ -0,0 +1,12 @@
+// Based on ssh/terminal:
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package logrus
+
+import "syscall"
+
+const ioctlReadTermios = syscall.TCGETS
+
+type Termios syscall.Termios
diff --git a/vendor/github.com/Sirupsen/logrus/terminal_notwindows.go b/vendor/github.com/Sirupsen/logrus/terminal_notwindows.go
new file mode 100644
index 0000000000..4bb5376028
--- /dev/null
+++ b/vendor/github.com/Sirupsen/logrus/terminal_notwindows.go
@@ -0,0 +1,21 @@
+// Based on ssh/terminal:
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build linux darwin freebsd openbsd netbsd dragonfly
+
+package logrus
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+// IsTerminal returns true if the given file descriptor is a terminal.
+func IsTerminal() bool {
+ fd := syscall.Stdout
+ var termios Termios
+ _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0)
+ return err == 0
+}
diff --git a/vendor/github.com/Sirupsen/logrus/terminal_windows.go b/vendor/github.com/Sirupsen/logrus/terminal_windows.go
new file mode 100644
index 0000000000..2e09f6f7e3
--- /dev/null
+++ b/vendor/github.com/Sirupsen/logrus/terminal_windows.go
@@ -0,0 +1,27 @@
+// Based on ssh/terminal:
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build windows
+
+package logrus
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+var kernel32 = syscall.NewLazyDLL("kernel32.dll")
+
+var (
+ procGetConsoleMode = kernel32.NewProc("GetConsoleMode")
+)
+
+// IsTerminal returns true if the given file descriptor is a terminal.
+func IsTerminal() bool {
+ fd := syscall.Stdout
+ var st uint32
+ r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0)
+ return r != 0 && e == 0
+}
diff --git a/vendor/github.com/Sirupsen/logrus/text_formatter.go b/vendor/github.com/Sirupsen/logrus/text_formatter.go
new file mode 100644
index 0000000000..17cc298484
--- /dev/null
+++ b/vendor/github.com/Sirupsen/logrus/text_formatter.go
@@ -0,0 +1,159 @@
+package logrus
+
+import (
+ "bytes"
+ "fmt"
+ "runtime"
+ "sort"
+ "strings"
+ "time"
+)
+
+const (
+ nocolor = 0
+ red = 31
+ green = 32
+ yellow = 33
+ blue = 34
+ gray = 37
+)
+
+var (
+ baseTimestamp time.Time
+ isTerminal bool
+)
+
+func init() {
+ baseTimestamp = time.Now()
+ isTerminal = IsTerminal()
+}
+
+func miniTS() int {
+ return int(time.Since(baseTimestamp) / time.Second)
+}
+
+type TextFormatter struct {
+ // Set to true to bypass checking for a TTY before outputting colors.
+ ForceColors bool
+
+ // Force disabling colors.
+ DisableColors bool
+
+ // Disable timestamp logging. useful when output is redirected to logging
+ // system that already adds timestamps.
+ DisableTimestamp bool
+
+ // Enable logging the full timestamp when a TTY is attached instead of just
+ // the time passed since beginning of execution.
+ FullTimestamp bool
+
+ // TimestampFormat to use for display when a full timestamp is printed
+ TimestampFormat string
+
+ // The fields are sorted by default for a consistent output. For applications
+ // that log extremely frequently and don't use the JSON formatter this may not
+ // be desired.
+ DisableSorting bool
+}
+
+func (f *TextFormatter) Format(entry *Entry) ([]byte, error) {
+ var keys []string = make([]string, 0, len(entry.Data))
+ for k := range entry.Data {
+ keys = append(keys, k)
+ }
+
+ if !f.DisableSorting {
+ sort.Strings(keys)
+ }
+
+ b := &bytes.Buffer{}
+
+ prefixFieldClashes(entry.Data)
+
+ isColorTerminal := isTerminal && (runtime.GOOS != "windows")
+ isColored := (f.ForceColors || isColorTerminal) && !f.DisableColors
+
+ timestampFormat := f.TimestampFormat
+ if timestampFormat == "" {
+ timestampFormat = DefaultTimestampFormat
+ }
+ if isColored {
+ f.printColored(b, entry, keys, timestampFormat)
+ } else {
+ if !f.DisableTimestamp {
+ f.appendKeyValue(b, "time", entry.Time.Format(timestampFormat))
+ }
+ f.appendKeyValue(b, "level", entry.Level.String())
+ f.appendKeyValue(b, "msg", entry.Message)
+ for _, key := range keys {
+ f.appendKeyValue(b, key, entry.Data[key])
+ }
+ }
+
+ b.WriteByte('\n')
+ return b.Bytes(), nil
+}
+
+func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []string, timestampFormat string) {
+ var levelColor int
+ switch entry.Level {
+ case DebugLevel:
+ levelColor = gray
+ case WarnLevel:
+ levelColor = yellow
+ case ErrorLevel, FatalLevel, PanicLevel:
+ levelColor = red
+ default:
+ levelColor = blue
+ }
+
+ levelText := strings.ToUpper(entry.Level.String())[0:4]
+
+ if !f.FullTimestamp {
+ fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d] %-44s ", levelColor, levelText, miniTS(), entry.Message)
+ } else {
+ fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%s] %-44s ", levelColor, levelText, entry.Time.Format(timestampFormat), entry.Message)
+ }
+ for _, k := range keys {
+ v := entry.Data[k]
+ fmt.Fprintf(b, " \x1b[%dm%s\x1b[0m=%+v", levelColor, k, v)
+ }
+}
+
+func needsQuoting(text string) bool {
+ for _, ch := range text {
+ if !((ch >= 'a' && ch <= 'z') ||
+ (ch >= 'A' && ch <= 'Z') ||
+ (ch >= '0' && ch <= '9') ||
+ ch == '-' || ch == '.') {
+ return false
+ }
+ }
+ return true
+}
+
+func (f *TextFormatter) appendKeyValue(b *bytes.Buffer, key string, value interface{}) {
+
+ b.WriteString(key)
+ b.WriteByte('=')
+
+ switch value := value.(type) {
+ case string:
+ if needsQuoting(value) {
+ b.WriteString(value)
+ } else {
+ fmt.Fprintf(b, "%q", value)
+ }
+ case error:
+ errmsg := value.Error()
+ if needsQuoting(errmsg) {
+ b.WriteString(errmsg)
+ } else {
+ fmt.Fprintf(b, "%q", value)
+ }
+ default:
+ fmt.Fprint(b, value)
+ }
+
+ b.WriteByte(' ')
+}
diff --git a/vendor/github.com/Sirupsen/logrus/writer.go b/vendor/github.com/Sirupsen/logrus/writer.go
new file mode 100644
index 0000000000..1e30b1c753
--- /dev/null
+++ b/vendor/github.com/Sirupsen/logrus/writer.go
@@ -0,0 +1,31 @@
+package logrus
+
+import (
+ "bufio"
+ "io"
+ "runtime"
+)
+
+func (logger *Logger) Writer() *io.PipeWriter {
+ reader, writer := io.Pipe()
+
+ go logger.writerScanner(reader)
+ runtime.SetFinalizer(writer, writerFinalizer)
+
+ return writer
+}
+
+func (logger *Logger) writerScanner(reader *io.PipeReader) {
+ scanner := bufio.NewScanner(reader)
+ for scanner.Scan() {
+ logger.Print(scanner.Text())
+ }
+ if err := scanner.Err(); err != nil {
+ logger.Errorf("Error while reading from Writer: %s", err)
+ }
+ reader.Close()
+}
+
+func writerFinalizer(writer *io.PipeWriter) {
+ writer.Close()
+}
diff --git a/vendor/github.com/beorn7/perks/quantile/exampledata.txt b/vendor/github.com/beorn7/perks/quantile/exampledata.txt
new file mode 100644
index 0000000000..1602287d7c
--- /dev/null
+++ b/vendor/github.com/beorn7/perks/quantile/exampledata.txt
@@ -0,0 +1,2388 @@
+8
+5
+26
+12
+5
+235
+13
+6
+28
+30
+3
+3
+3
+3
+5
+2
+33
+7
+2
+4
+7
+12
+14
+5
+8
+3
+10
+4
+5
+3
+6
+6
+209
+20
+3
+10
+14
+3
+4
+6
+8
+5
+11
+7
+3
+2
+3
+3
+212
+5
+222
+4
+10
+10
+5
+6
+3
+8
+3
+10
+254
+220
+2
+3
+5
+24
+5
+4
+222
+7
+3
+3
+223
+8
+15
+12
+14
+14
+3
+2
+2
+3
+13
+3
+11
+4
+4
+6
+5
+7
+13
+5
+3
+5
+2
+5
+3
+5
+2
+7
+15
+17
+14
+3
+6
+6
+3
+17
+5
+4
+7
+6
+4
+4
+8
+6
+8
+3
+9
+3
+6
+3
+4
+5
+3
+3
+660
+4
+6
+10
+3
+6
+3
+2
+5
+13
+2
+4
+4
+10
+4
+8
+4
+3
+7
+9
+9
+3
+10
+37
+3
+13
+4
+12
+3
+6
+10
+8
+5
+21
+2
+3
+8
+3
+2
+3
+3
+4
+12
+2
+4
+8
+8
+4
+3
+2
+20
+1
+6
+32
+2
+11
+6
+18
+3
+8
+11
+3
+212
+3
+4
+2
+6
+7
+12
+11
+3
+2
+16
+10
+6
+4
+6
+3
+2
+7
+3
+2
+2
+2
+2
+5
+6
+4
+3
+10
+3
+4
+6
+5
+3
+4
+4
+5
+6
+4
+3
+4
+4
+5
+7
+5
+5
+3
+2
+7
+2
+4
+12
+4
+5
+6
+2
+4
+4
+8
+4
+15
+13
+7
+16
+5
+3
+23
+5
+5
+7
+3
+2
+9
+8
+7
+5
+8
+11
+4
+10
+76
+4
+47
+4
+3
+2
+7
+4
+2
+3
+37
+10
+4
+2
+20
+5
+4
+4
+10
+10
+4
+3
+7
+23
+240
+7
+13
+5
+5
+3
+3
+2
+5
+4
+2
+8
+7
+19
+2
+23
+8
+7
+2
+5
+3
+8
+3
+8
+13
+5
+5
+5
+2
+3
+23
+4
+9
+8
+4
+3
+3
+5
+220
+2
+3
+4
+6
+14
+3
+53
+6
+2
+5
+18
+6
+3
+219
+6
+5
+2
+5
+3
+6
+5
+15
+4
+3
+17
+3
+2
+4
+7
+2
+3
+3
+4
+4
+3
+2
+664
+6
+3
+23
+5
+5
+16
+5
+8
+2
+4
+2
+24
+12
+3
+2
+3
+5
+8
+3
+5
+4
+3
+14
+3
+5
+8
+2
+3
+7
+9
+4
+2
+3
+6
+8
+4
+3
+4
+6
+5
+3
+3
+6
+3
+19
+4
+4
+6
+3
+6
+3
+5
+22
+5
+4
+4
+3
+8
+11
+4
+9
+7
+6
+13
+4
+4
+4
+6
+17
+9
+3
+3
+3
+4
+3
+221
+5
+11
+3
+4
+2
+12
+6
+3
+5
+7
+5
+7
+4
+9
+7
+14
+37
+19
+217
+16
+3
+5
+2
+2
+7
+19
+7
+6
+7
+4
+24
+5
+11
+4
+7
+7
+9
+13
+3
+4
+3
+6
+28
+4
+4
+5
+5
+2
+5
+6
+4
+4
+6
+10
+5
+4
+3
+2
+3
+3
+6
+5
+5
+4
+3
+2
+3
+7
+4
+6
+18
+16
+8
+16
+4
+5
+8
+6
+9
+13
+1545
+6
+215
+6
+5
+6
+3
+45
+31
+5
+2
+2
+4
+3
+3
+2
+5
+4
+3
+5
+7
+7
+4
+5
+8
+5
+4
+749
+2
+31
+9
+11
+2
+11
+5
+4
+4
+7
+9
+11
+4
+5
+4
+7
+3
+4
+6
+2
+15
+3
+4
+3
+4
+3
+5
+2
+13
+5
+5
+3
+3
+23
+4
+4
+5
+7
+4
+13
+2
+4
+3
+4
+2
+6
+2
+7
+3
+5
+5
+3
+29
+5
+4
+4
+3
+10
+2
+3
+79
+16
+6
+6
+7
+7
+3
+5
+5
+7
+4
+3
+7
+9
+5
+6
+5
+9
+6
+3
+6
+4
+17
+2
+10
+9
+3
+6
+2
+3
+21
+22
+5
+11
+4
+2
+17
+2
+224
+2
+14
+3
+4
+4
+2
+4
+4
+4
+4
+5
+3
+4
+4
+10
+2
+6
+3
+3
+5
+7
+2
+7
+5
+6
+3
+218
+2
+2
+5
+2
+6
+3
+5
+222
+14
+6
+33
+3
+2
+5
+3
+3
+3
+9
+5
+3
+3
+2
+7
+4
+3
+4
+3
+5
+6
+5
+26
+4
+13
+9
+7
+3
+221
+3
+3
+4
+4
+4
+4
+2
+18
+5
+3
+7
+9
+6
+8
+3
+10
+3
+11
+9
+5
+4
+17
+5
+5
+6
+6
+3
+2
+4
+12
+17
+6
+7
+218
+4
+2
+4
+10
+3
+5
+15
+3
+9
+4
+3
+3
+6
+29
+3
+3
+4
+5
+5
+3
+8
+5
+6
+6
+7
+5
+3
+5
+3
+29
+2
+31
+5
+15
+24
+16
+5
+207
+4
+3
+3
+2
+15
+4
+4
+13
+5
+5
+4
+6
+10
+2
+7
+8
+4
+6
+20
+5
+3
+4
+3
+12
+12
+5
+17
+7
+3
+3
+3
+6
+10
+3
+5
+25
+80
+4
+9
+3
+2
+11
+3
+3
+2
+3
+8
+7
+5
+5
+19
+5
+3
+3
+12
+11
+2
+6
+5
+5
+5
+3
+3
+3
+4
+209
+14
+3
+2
+5
+19
+4
+4
+3
+4
+14
+5
+6
+4
+13
+9
+7
+4
+7
+10
+2
+9
+5
+7
+2
+8
+4
+6
+5
+5
+222
+8
+7
+12
+5
+216
+3
+4
+4
+6
+3
+14
+8
+7
+13
+4
+3
+3
+3
+3
+17
+5
+4
+3
+33
+6
+6
+33
+7
+5
+3
+8
+7
+5
+2
+9
+4
+2
+233
+24
+7
+4
+8
+10
+3
+4
+15
+2
+16
+3
+3
+13
+12
+7
+5
+4
+207
+4
+2
+4
+27
+15
+2
+5
+2
+25
+6
+5
+5
+6
+13
+6
+18
+6
+4
+12
+225
+10
+7
+5
+2
+2
+11
+4
+14
+21
+8
+10
+3
+5
+4
+232
+2
+5
+5
+3
+7
+17
+11
+6
+6
+23
+4
+6
+3
+5
+4
+2
+17
+3
+6
+5
+8
+3
+2
+2
+14
+9
+4
+4
+2
+5
+5
+3
+7
+6
+12
+6
+10
+3
+6
+2
+2
+19
+5
+4
+4
+9
+2
+4
+13
+3
+5
+6
+3
+6
+5
+4
+9
+6
+3
+5
+7
+3
+6
+6
+4
+3
+10
+6
+3
+221
+3
+5
+3
+6
+4
+8
+5
+3
+6
+4
+4
+2
+54
+5
+6
+11
+3
+3
+4
+4
+4
+3
+7
+3
+11
+11
+7
+10
+6
+13
+223
+213
+15
+231
+7
+3
+7
+228
+2
+3
+4
+4
+5
+6
+7
+4
+13
+3
+4
+5
+3
+6
+4
+6
+7
+2
+4
+3
+4
+3
+3
+6
+3
+7
+3
+5
+18
+5
+6
+8
+10
+3
+3
+3
+2
+4
+2
+4
+4
+5
+6
+6
+4
+10
+13
+3
+12
+5
+12
+16
+8
+4
+19
+11
+2
+4
+5
+6
+8
+5
+6
+4
+18
+10
+4
+2
+216
+6
+6
+6
+2
+4
+12
+8
+3
+11
+5
+6
+14
+5
+3
+13
+4
+5
+4
+5
+3
+28
+6
+3
+7
+219
+3
+9
+7
+3
+10
+6
+3
+4
+19
+5
+7
+11
+6
+15
+19
+4
+13
+11
+3
+7
+5
+10
+2
+8
+11
+2
+6
+4
+6
+24
+6
+3
+3
+3
+3
+6
+18
+4
+11
+4
+2
+5
+10
+8
+3
+9
+5
+3
+4
+5
+6
+2
+5
+7
+4
+4
+14
+6
+4
+4
+5
+5
+7
+2
+4
+3
+7
+3
+3
+6
+4
+5
+4
+4
+4
+3
+3
+3
+3
+8
+14
+2
+3
+5
+3
+2
+4
+5
+3
+7
+3
+3
+18
+3
+4
+4
+5
+7
+3
+3
+3
+13
+5
+4
+8
+211
+5
+5
+3
+5
+2
+5
+4
+2
+655
+6
+3
+5
+11
+2
+5
+3
+12
+9
+15
+11
+5
+12
+217
+2
+6
+17
+3
+3
+207
+5
+5
+4
+5
+9
+3
+2
+8
+5
+4
+3
+2
+5
+12
+4
+14
+5
+4
+2
+13
+5
+8
+4
+225
+4
+3
+4
+5
+4
+3
+3
+6
+23
+9
+2
+6
+7
+233
+4
+4
+6
+18
+3
+4
+6
+3
+4
+4
+2
+3
+7
+4
+13
+227
+4
+3
+5
+4
+2
+12
+9
+17
+3
+7
+14
+6
+4
+5
+21
+4
+8
+9
+2
+9
+25
+16
+3
+6
+4
+7
+8
+5
+2
+3
+5
+4
+3
+3
+5
+3
+3
+3
+2
+3
+19
+2
+4
+3
+4
+2
+3
+4
+4
+2
+4
+3
+3
+3
+2
+6
+3
+17
+5
+6
+4
+3
+13
+5
+3
+3
+3
+4
+9
+4
+2
+14
+12
+4
+5
+24
+4
+3
+37
+12
+11
+21
+3
+4
+3
+13
+4
+2
+3
+15
+4
+11
+4
+4
+3
+8
+3
+4
+4
+12
+8
+5
+3
+3
+4
+2
+220
+3
+5
+223
+3
+3
+3
+10
+3
+15
+4
+241
+9
+7
+3
+6
+6
+23
+4
+13
+7
+3
+4
+7
+4
+9
+3
+3
+4
+10
+5
+5
+1
+5
+24
+2
+4
+5
+5
+6
+14
+3
+8
+2
+3
+5
+13
+13
+3
+5
+2
+3
+15
+3
+4
+2
+10
+4
+4
+4
+5
+5
+3
+5
+3
+4
+7
+4
+27
+3
+6
+4
+15
+3
+5
+6
+6
+5
+4
+8
+3
+9
+2
+6
+3
+4
+3
+7
+4
+18
+3
+11
+3
+3
+8
+9
+7
+24
+3
+219
+7
+10
+4
+5
+9
+12
+2
+5
+4
+4
+4
+3
+3
+19
+5
+8
+16
+8
+6
+22
+3
+23
+3
+242
+9
+4
+3
+3
+5
+7
+3
+3
+5
+8
+3
+7
+5
+14
+8
+10
+3
+4
+3
+7
+4
+6
+7
+4
+10
+4
+3
+11
+3
+7
+10
+3
+13
+6
+8
+12
+10
+5
+7
+9
+3
+4
+7
+7
+10
+8
+30
+9
+19
+4
+3
+19
+15
+4
+13
+3
+215
+223
+4
+7
+4
+8
+17
+16
+3
+7
+6
+5
+5
+4
+12
+3
+7
+4
+4
+13
+4
+5
+2
+5
+6
+5
+6
+6
+7
+10
+18
+23
+9
+3
+3
+6
+5
+2
+4
+2
+7
+3
+3
+2
+5
+5
+14
+10
+224
+6
+3
+4
+3
+7
+5
+9
+3
+6
+4
+2
+5
+11
+4
+3
+3
+2
+8
+4
+7
+4
+10
+7
+3
+3
+18
+18
+17
+3
+3
+3
+4
+5
+3
+3
+4
+12
+7
+3
+11
+13
+5
+4
+7
+13
+5
+4
+11
+3
+12
+3
+6
+4
+4
+21
+4
+6
+9
+5
+3
+10
+8
+4
+6
+4
+4
+6
+5
+4
+8
+6
+4
+6
+4
+4
+5
+9
+6
+3
+4
+2
+9
+3
+18
+2
+4
+3
+13
+3
+6
+6
+8
+7
+9
+3
+2
+16
+3
+4
+6
+3
+2
+33
+22
+14
+4
+9
+12
+4
+5
+6
+3
+23
+9
+4
+3
+5
+5
+3
+4
+5
+3
+5
+3
+10
+4
+5
+5
+8
+4
+4
+6
+8
+5
+4
+3
+4
+6
+3
+3
+3
+5
+9
+12
+6
+5
+9
+3
+5
+3
+2
+2
+2
+18
+3
+2
+21
+2
+5
+4
+6
+4
+5
+10
+3
+9
+3
+2
+10
+7
+3
+6
+6
+4
+4
+8
+12
+7
+3
+7
+3
+3
+9
+3
+4
+5
+4
+4
+5
+5
+10
+15
+4
+4
+14
+6
+227
+3
+14
+5
+216
+22
+5
+4
+2
+2
+6
+3
+4
+2
+9
+9
+4
+3
+28
+13
+11
+4
+5
+3
+3
+2
+3
+3
+5
+3
+4
+3
+5
+23
+26
+3
+4
+5
+6
+4
+6
+3
+5
+5
+3
+4
+3
+2
+2
+2
+7
+14
+3
+6
+7
+17
+2
+2
+15
+14
+16
+4
+6
+7
+13
+6
+4
+5
+6
+16
+3
+3
+28
+3
+6
+15
+3
+9
+2
+4
+6
+3
+3
+22
+4
+12
+6
+7
+2
+5
+4
+10
+3
+16
+6
+9
+2
+5
+12
+7
+5
+5
+5
+5
+2
+11
+9
+17
+4
+3
+11
+7
+3
+5
+15
+4
+3
+4
+211
+8
+7
+5
+4
+7
+6
+7
+6
+3
+6
+5
+6
+5
+3
+4
+4
+26
+4
+6
+10
+4
+4
+3
+2
+3
+3
+4
+5
+9
+3
+9
+4
+4
+5
+5
+8
+2
+4
+2
+3
+8
+4
+11
+19
+5
+8
+6
+3
+5
+6
+12
+3
+2
+4
+16
+12
+3
+4
+4
+8
+6
+5
+6
+6
+219
+8
+222
+6
+16
+3
+13
+19
+5
+4
+3
+11
+6
+10
+4
+7
+7
+12
+5
+3
+3
+5
+6
+10
+3
+8
+2
+5
+4
+7
+2
+4
+4
+2
+12
+9
+6
+4
+2
+40
+2
+4
+10
+4
+223
+4
+2
+20
+6
+7
+24
+5
+4
+5
+2
+20
+16
+6
+5
+13
+2
+3
+3
+19
+3
+2
+4
+5
+6
+7
+11
+12
+5
+6
+7
+7
+3
+5
+3
+5
+3
+14
+3
+4
+4
+2
+11
+1
+7
+3
+9
+6
+11
+12
+5
+8
+6
+221
+4
+2
+12
+4
+3
+15
+4
+5
+226
+7
+218
+7
+5
+4
+5
+18
+4
+5
+9
+4
+4
+2
+9
+18
+18
+9
+5
+6
+6
+3
+3
+7
+3
+5
+4
+4
+4
+12
+3
+6
+31
+5
+4
+7
+3
+6
+5
+6
+5
+11
+2
+2
+11
+11
+6
+7
+5
+8
+7
+10
+5
+23
+7
+4
+3
+5
+34
+2
+5
+23
+7
+3
+6
+8
+4
+4
+4
+2
+5
+3
+8
+5
+4
+8
+25
+2
+3
+17
+8
+3
+4
+8
+7
+3
+15
+6
+5
+7
+21
+9
+5
+6
+6
+5
+3
+2
+3
+10
+3
+6
+3
+14
+7
+4
+4
+8
+7
+8
+2
+6
+12
+4
+213
+6
+5
+21
+8
+2
+5
+23
+3
+11
+2
+3
+6
+25
+2
+3
+6
+7
+6
+6
+4
+4
+6
+3
+17
+9
+7
+6
+4
+3
+10
+7
+2
+3
+3
+3
+11
+8
+3
+7
+6
+4
+14
+36
+3
+4
+3
+3
+22
+13
+21
+4
+2
+7
+4
+4
+17
+15
+3
+7
+11
+2
+4
+7
+6
+209
+6
+3
+2
+2
+24
+4
+9
+4
+3
+3
+3
+29
+2
+2
+4
+3
+3
+5
+4
+6
+3
+3
+2
+4
diff --git a/vendor/github.com/beorn7/perks/quantile/stream.go b/vendor/github.com/beorn7/perks/quantile/stream.go
new file mode 100644
index 0000000000..587b1fc5ba
--- /dev/null
+++ b/vendor/github.com/beorn7/perks/quantile/stream.go
@@ -0,0 +1,292 @@
+// Package quantile computes approximate quantiles over an unbounded data
+// stream within low memory and CPU bounds.
+//
+// A small amount of accuracy is traded to achieve the above properties.
+//
+// Multiple streams can be merged before calling Query to generate a single set
+// of results. This is meaningful when the streams represent the same type of
+// data. See Merge and Samples.
+//
+// For more detailed information about the algorithm used, see:
+//
+// Effective Computation of Biased Quantiles over Data Streams
+//
+// http://www.cs.rutgers.edu/~muthu/bquant.pdf
+package quantile
+
+import (
+ "math"
+ "sort"
+)
+
+// Sample holds an observed value and meta information for compression. JSON
+// tags have been added for convenience.
+type Sample struct {
+ Value float64 `json:",string"`
+ Width float64 `json:",string"`
+ Delta float64 `json:",string"`
+}
+
+// Samples represents a slice of samples. It implements sort.Interface.
+type Samples []Sample
+
+func (a Samples) Len() int { return len(a) }
+func (a Samples) Less(i, j int) bool { return a[i].Value < a[j].Value }
+func (a Samples) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
+
+type invariant func(s *stream, r float64) float64
+
+// NewLowBiased returns an initialized Stream for low-biased quantiles
+// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but
+// error guarantees can still be given even for the lower ranks of the data
+// distribution.
+//
+// The provided epsilon is a relative error, i.e. the true quantile of a value
+// returned by a query is guaranteed to be within (1±Epsilon)*Quantile.
+//
+// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error
+// properties.
+func NewLowBiased(epsilon float64) *Stream {
+ ƒ := func(s *stream, r float64) float64 {
+ return 2 * epsilon * r
+ }
+ return newStream(ƒ)
+}
+
+// NewHighBiased returns an initialized Stream for high-biased quantiles
+// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but
+// error guarantees can still be given even for the higher ranks of the data
+// distribution.
+//
+// The provided epsilon is a relative error, i.e. the true quantile of a value
+// returned by a query is guaranteed to be within 1-(1±Epsilon)*(1-Quantile).
+//
+// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error
+// properties.
+func NewHighBiased(epsilon float64) *Stream {
+ ƒ := func(s *stream, r float64) float64 {
+ return 2 * epsilon * (s.n - r)
+ }
+ return newStream(ƒ)
+}
+
+// NewTargeted returns an initialized Stream concerned with a particular set of
+// quantile values that are supplied a priori. Knowing these a priori reduces
+// space and computation time. The targets map maps the desired quantiles to
+// their absolute errors, i.e. the true quantile of a value returned by a query
+// is guaranteed to be within (Quantile±Epsilon).
+//
+// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error properties.
+func NewTargeted(targets map[float64]float64) *Stream {
+ ƒ := func(s *stream, r float64) float64 {
+ var m = math.MaxFloat64
+ var f float64
+ for quantile, epsilon := range targets {
+ if quantile*s.n <= r {
+ f = (2 * epsilon * r) / quantile
+ } else {
+ f = (2 * epsilon * (s.n - r)) / (1 - quantile)
+ }
+ if f < m {
+ m = f
+ }
+ }
+ return m
+ }
+ return newStream(ƒ)
+}
+
+// Stream computes quantiles for a stream of float64s. It is not thread-safe by
+// design. Take care when using across multiple goroutines.
+type Stream struct {
+ *stream
+ b Samples
+ sorted bool
+}
+
+func newStream(ƒ invariant) *Stream {
+ x := &stream{ƒ: ƒ}
+ return &Stream{x, make(Samples, 0, 500), true}
+}
+
+// Insert inserts v into the stream.
+func (s *Stream) Insert(v float64) {
+ s.insert(Sample{Value: v, Width: 1})
+}
+
+func (s *Stream) insert(sample Sample) {
+ s.b = append(s.b, sample)
+ s.sorted = false
+ if len(s.b) == cap(s.b) {
+ s.flush()
+ }
+}
+
+// Query returns the computed qth percentiles value. If s was created with
+// NewTargeted, and q is not in the set of quantiles provided a priori, Query
+// will return an unspecified result.
+func (s *Stream) Query(q float64) float64 {
+ if !s.flushed() {
+ // Fast path when there hasn't been enough data for a flush;
+ // this also yields better accuracy for small sets of data.
+ l := len(s.b)
+ if l == 0 {
+ return 0
+ }
+ i := int(float64(l) * q)
+ if i > 0 {
+ i -= 1
+ }
+ s.maybeSort()
+ return s.b[i].Value
+ }
+ s.flush()
+ return s.stream.query(q)
+}
+
+// Merge merges samples into the underlying streams samples. This is handy when
+// merging multiple streams from separate threads, database shards, etc.
+//
+// ATTENTION: This method is broken and does not yield correct results. The
+// underlying algorithm is not capable of merging streams correctly.
+func (s *Stream) Merge(samples Samples) {
+ sort.Sort(samples)
+ s.stream.merge(samples)
+}
+
+// Reset reinitializes and clears the list reusing the samples buffer memory.
+func (s *Stream) Reset() {
+ s.stream.reset()
+ s.b = s.b[:0]
+}
+
+// Samples returns stream samples held by s.
+func (s *Stream) Samples() Samples {
+ if !s.flushed() {
+ return s.b
+ }
+ s.flush()
+ return s.stream.samples()
+}
+
+// Count returns the total number of samples observed in the stream
+// since initialization.
+func (s *Stream) Count() int {
+ return len(s.b) + s.stream.count()
+}
+
+func (s *Stream) flush() {
+ s.maybeSort()
+ s.stream.merge(s.b)
+ s.b = s.b[:0]
+}
+
+func (s *Stream) maybeSort() {
+ if !s.sorted {
+ s.sorted = true
+ sort.Sort(s.b)
+ }
+}
+
+func (s *Stream) flushed() bool {
+ return len(s.stream.l) > 0
+}
+
+type stream struct {
+ n float64
+ l []Sample
+ ƒ invariant
+}
+
+func (s *stream) reset() {
+ s.l = s.l[:0]
+ s.n = 0
+}
+
+func (s *stream) insert(v float64) {
+ s.merge(Samples{{v, 1, 0}})
+}
+
+func (s *stream) merge(samples Samples) {
+ // TODO(beorn7): This tries to merge not only individual samples, but
+ // whole summaries. The paper doesn't mention merging summaries at
+ // all. Unittests show that the merging is inaccurate. Find out how to
+ // do merges properly.
+ var r float64
+ i := 0
+ for _, sample := range samples {
+ for ; i < len(s.l); i++ {
+ c := s.l[i]
+ if c.Value > sample.Value {
+ // Insert at position i.
+ s.l = append(s.l, Sample{})
+ copy(s.l[i+1:], s.l[i:])
+ s.l[i] = Sample{
+ sample.Value,
+ sample.Width,
+ math.Max(sample.Delta, math.Floor(s.ƒ(s, r))-1),
+ // TODO(beorn7): How to calculate delta correctly?
+ }
+ i++
+ goto inserted
+ }
+ r += c.Width
+ }
+ s.l = append(s.l, Sample{sample.Value, sample.Width, 0})
+ i++
+ inserted:
+ s.n += sample.Width
+ r += sample.Width
+ }
+ s.compress()
+}
+
+func (s *stream) count() int {
+ return int(s.n)
+}
+
+func (s *stream) query(q float64) float64 {
+ t := math.Ceil(q * s.n)
+ t += math.Ceil(s.ƒ(s, t) / 2)
+ p := s.l[0]
+ var r float64
+ for _, c := range s.l[1:] {
+ r += p.Width
+ if r+c.Width+c.Delta > t {
+ return p.Value
+ }
+ p = c
+ }
+ return p.Value
+}
+
+func (s *stream) compress() {
+ if len(s.l) < 2 {
+ return
+ }
+ x := s.l[len(s.l)-1]
+ xi := len(s.l) - 1
+ r := s.n - 1 - x.Width
+
+ for i := len(s.l) - 2; i >= 0; i-- {
+ c := s.l[i]
+ if c.Width+x.Width+x.Delta <= s.ƒ(s, r) {
+ x.Width += c.Width
+ s.l[xi] = x
+ // Remove element at i.
+ copy(s.l[i:], s.l[i+1:])
+ s.l = s.l[:len(s.l)-1]
+ xi -= 1
+ } else {
+ x = c
+ xi = i
+ }
+ r -= c.Width
+ }
+}
+
+func (s *stream) samples() Samples {
+ samples := make(Samples, len(s.l))
+ copy(samples, s.l)
+ return samples
+}
diff --git a/vendor/github.com/golang/protobuf/proto/Makefile b/vendor/github.com/golang/protobuf/proto/Makefile
new file mode 100644
index 0000000000..f1f06564a1
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/Makefile
@@ -0,0 +1,43 @@
+# Go support for Protocol Buffers - Google's data interchange format
+#
+# Copyright 2010 The Go Authors. All rights reserved.
+# https://github.com/golang/protobuf
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+install:
+ go install
+
+test: install generate-test-pbs
+ go test
+
+
+generate-test-pbs:
+ make install
+ make -C testdata
+ protoc --go_out=Mtestdata/test.proto=github.com/golang/protobuf/proto/testdata:. proto3_proto/proto3.proto
+ make
diff --git a/vendor/github.com/golang/protobuf/proto/clone.go b/vendor/github.com/golang/protobuf/proto/clone.go
new file mode 100644
index 0000000000..84bbaf4335
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/clone.go
@@ -0,0 +1,223 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2011 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Protocol buffer deep copy and merge.
+// TODO: MessageSet and RawMessage.
+
+package proto
+
+import (
+ "log"
+ "reflect"
+ "strings"
+)
+
+// Clone returns a deep copy of a protocol buffer.
+func Clone(pb Message) Message {
+ in := reflect.ValueOf(pb)
+ if in.IsNil() {
+ return pb
+ }
+
+ out := reflect.New(in.Type().Elem())
+ // out is empty so a merge is a deep copy.
+ mergeStruct(out.Elem(), in.Elem())
+ return out.Interface().(Message)
+}
+
+// Merge merges src into dst.
+// Required and optional fields that are set in src will be set to that value in dst.
+// Elements of repeated fields will be appended.
+// Merge panics if src and dst are not the same type, or if dst is nil.
+func Merge(dst, src Message) {
+ in := reflect.ValueOf(src)
+ out := reflect.ValueOf(dst)
+ if out.IsNil() {
+ panic("proto: nil destination")
+ }
+ if in.Type() != out.Type() {
+ // Explicit test prior to mergeStruct so that mistyped nils will fail
+ panic("proto: type mismatch")
+ }
+ if in.IsNil() {
+ // Merging nil into non-nil is a quiet no-op
+ return
+ }
+ mergeStruct(out.Elem(), in.Elem())
+}
+
+func mergeStruct(out, in reflect.Value) {
+ sprop := GetProperties(in.Type())
+ for i := 0; i < in.NumField(); i++ {
+ f := in.Type().Field(i)
+ if strings.HasPrefix(f.Name, "XXX_") {
+ continue
+ }
+ mergeAny(out.Field(i), in.Field(i), false, sprop.Prop[i])
+ }
+
+ if emIn, ok := in.Addr().Interface().(extendableProto); ok {
+ emOut := out.Addr().Interface().(extendableProto)
+ mergeExtension(emOut.ExtensionMap(), emIn.ExtensionMap())
+ }
+
+ uf := in.FieldByName("XXX_unrecognized")
+ if !uf.IsValid() {
+ return
+ }
+ uin := uf.Bytes()
+ if len(uin) > 0 {
+ out.FieldByName("XXX_unrecognized").SetBytes(append([]byte(nil), uin...))
+ }
+}
+
+// mergeAny performs a merge between two values of the same type.
+// viaPtr indicates whether the values were indirected through a pointer (implying proto2).
+// prop is set if this is a struct field (it may be nil).
+func mergeAny(out, in reflect.Value, viaPtr bool, prop *Properties) {
+ if in.Type() == protoMessageType {
+ if !in.IsNil() {
+ if out.IsNil() {
+ out.Set(reflect.ValueOf(Clone(in.Interface().(Message))))
+ } else {
+ Merge(out.Interface().(Message), in.Interface().(Message))
+ }
+ }
+ return
+ }
+ switch in.Kind() {
+ case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64,
+ reflect.String, reflect.Uint32, reflect.Uint64:
+ if !viaPtr && isProto3Zero(in) {
+ return
+ }
+ out.Set(in)
+ case reflect.Interface:
+ // Probably a oneof field; copy non-nil values.
+ if in.IsNil() {
+ return
+ }
+ // Allocate destination if it is not set, or set to a different type.
+ // Otherwise we will merge as normal.
+ if out.IsNil() || out.Elem().Type() != in.Elem().Type() {
+ out.Set(reflect.New(in.Elem().Elem().Type())) // interface -> *T -> T -> new(T)
+ }
+ mergeAny(out.Elem(), in.Elem(), false, nil)
+ case reflect.Map:
+ if in.Len() == 0 {
+ return
+ }
+ if out.IsNil() {
+ out.Set(reflect.MakeMap(in.Type()))
+ }
+ // For maps with value types of *T or []byte we need to deep copy each value.
+ elemKind := in.Type().Elem().Kind()
+ for _, key := range in.MapKeys() {
+ var val reflect.Value
+ switch elemKind {
+ case reflect.Ptr:
+ val = reflect.New(in.Type().Elem().Elem())
+ mergeAny(val, in.MapIndex(key), false, nil)
+ case reflect.Slice:
+ val = in.MapIndex(key)
+ val = reflect.ValueOf(append([]byte{}, val.Bytes()...))
+ default:
+ val = in.MapIndex(key)
+ }
+ out.SetMapIndex(key, val)
+ }
+ case reflect.Ptr:
+ if in.IsNil() {
+ return
+ }
+ if out.IsNil() {
+ out.Set(reflect.New(in.Elem().Type()))
+ }
+ mergeAny(out.Elem(), in.Elem(), true, nil)
+ case reflect.Slice:
+ if in.IsNil() {
+ return
+ }
+ if in.Type().Elem().Kind() == reflect.Uint8 {
+ // []byte is a scalar bytes field, not a repeated field.
+
+ // Edge case: if this is in a proto3 message, a zero length
+ // bytes field is considered the zero value, and should not
+ // be merged.
+ if prop != nil && prop.proto3 && in.Len() == 0 {
+ return
+ }
+
+ // Make a deep copy.
+ // Append to []byte{} instead of []byte(nil) so that we never end up
+ // with a nil result.
+ out.SetBytes(append([]byte{}, in.Bytes()...))
+ return
+ }
+ n := in.Len()
+ if out.IsNil() {
+ out.Set(reflect.MakeSlice(in.Type(), 0, n))
+ }
+ switch in.Type().Elem().Kind() {
+ case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64,
+ reflect.String, reflect.Uint32, reflect.Uint64:
+ out.Set(reflect.AppendSlice(out, in))
+ default:
+ for i := 0; i < n; i++ {
+ x := reflect.Indirect(reflect.New(in.Type().Elem()))
+ mergeAny(x, in.Index(i), false, nil)
+ out.Set(reflect.Append(out, x))
+ }
+ }
+ case reflect.Struct:
+ mergeStruct(out, in)
+ default:
+ // unknown type, so not a protocol buffer
+ log.Printf("proto: don't know how to copy %v", in)
+ }
+}
+
+func mergeExtension(out, in map[int32]Extension) {
+ for extNum, eIn := range in {
+ eOut := Extension{desc: eIn.desc}
+ if eIn.value != nil {
+ v := reflect.New(reflect.TypeOf(eIn.value)).Elem()
+ mergeAny(v, reflect.ValueOf(eIn.value), false, nil)
+ eOut.value = v.Interface()
+ }
+ if eIn.enc != nil {
+ eOut.enc = make([]byte, len(eIn.enc))
+ copy(eOut.enc, eIn.enc)
+ }
+
+ out[extNum] = eOut
+ }
+}
diff --git a/vendor/github.com/golang/protobuf/proto/decode.go b/vendor/github.com/golang/protobuf/proto/decode.go
new file mode 100644
index 0000000000..84866356cb
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/decode.go
@@ -0,0 +1,863 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+/*
+ * Routines for decoding protocol buffer data to construct in-memory representations.
+ */
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "reflect"
+)
+
+// errOverflow is returned when an integer is too large to be represented.
+var errOverflow = errors.New("proto: integer overflow")
+
+// ErrInternalBadWireType is returned by generated code when an incorrect
+// wire type is encountered. It does not get returned to user code.
+var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof")
+
+// The fundamental decoders that interpret bytes on the wire.
+// Those that take integer types all return uint64 and are
+// therefore of type valueDecoder.
+
+// DecodeVarint reads a varint-encoded integer from the slice.
+// It returns the integer and the number of bytes consumed, or
+// zero if there is not enough.
+// This is the format for the
+// int32, int64, uint32, uint64, bool, and enum
+// protocol buffer types.
+func DecodeVarint(buf []byte) (x uint64, n int) {
+ // x, n already 0
+ for shift := uint(0); shift < 64; shift += 7 {
+ if n >= len(buf) {
+ return 0, 0
+ }
+ b := uint64(buf[n])
+ n++
+ x |= (b & 0x7F) << shift
+ if (b & 0x80) == 0 {
+ return x, n
+ }
+ }
+
+ // The number is too large to represent in a 64-bit value.
+ return 0, 0
+}
+
+// DecodeVarint reads a varint-encoded integer from the Buffer.
+// This is the format for the
+// int32, int64, uint32, uint64, bool, and enum
+// protocol buffer types.
+func (p *Buffer) DecodeVarint() (x uint64, err error) {
+ // x, err already 0
+
+ i := p.index
+ l := len(p.buf)
+
+ for shift := uint(0); shift < 64; shift += 7 {
+ if i >= l {
+ err = io.ErrUnexpectedEOF
+ return
+ }
+ b := p.buf[i]
+ i++
+ x |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ p.index = i
+ return
+ }
+ }
+
+ // The number is too large to represent in a 64-bit value.
+ err = errOverflow
+ return
+}
+
+// DecodeFixed64 reads a 64-bit integer from the Buffer.
+// This is the format for the
+// fixed64, sfixed64, and double protocol buffer types.
+func (p *Buffer) DecodeFixed64() (x uint64, err error) {
+ // x, err already 0
+ i := p.index + 8
+ if i < 0 || i > len(p.buf) {
+ err = io.ErrUnexpectedEOF
+ return
+ }
+ p.index = i
+
+ x = uint64(p.buf[i-8])
+ x |= uint64(p.buf[i-7]) << 8
+ x |= uint64(p.buf[i-6]) << 16
+ x |= uint64(p.buf[i-5]) << 24
+ x |= uint64(p.buf[i-4]) << 32
+ x |= uint64(p.buf[i-3]) << 40
+ x |= uint64(p.buf[i-2]) << 48
+ x |= uint64(p.buf[i-1]) << 56
+ return
+}
+
+// DecodeFixed32 reads a 32-bit integer from the Buffer.
+// This is the format for the
+// fixed32, sfixed32, and float protocol buffer types.
+func (p *Buffer) DecodeFixed32() (x uint64, err error) {
+ // x, err already 0
+ i := p.index + 4
+ if i < 0 || i > len(p.buf) {
+ err = io.ErrUnexpectedEOF
+ return
+ }
+ p.index = i
+
+ x = uint64(p.buf[i-4])
+ x |= uint64(p.buf[i-3]) << 8
+ x |= uint64(p.buf[i-2]) << 16
+ x |= uint64(p.buf[i-1]) << 24
+ return
+}
+
+// DecodeZigzag64 reads a zigzag-encoded 64-bit integer
+// from the Buffer.
+// This is the format used for the sint64 protocol buffer type.
+func (p *Buffer) DecodeZigzag64() (x uint64, err error) {
+ x, err = p.DecodeVarint()
+ if err != nil {
+ return
+ }
+ x = (x >> 1) ^ uint64((int64(x&1)<<63)>>63)
+ return
+}
+
+// DecodeZigzag32 reads a zigzag-encoded 32-bit integer
+// from the Buffer.
+// This is the format used for the sint32 protocol buffer type.
+func (p *Buffer) DecodeZigzag32() (x uint64, err error) {
+ x, err = p.DecodeVarint()
+ if err != nil {
+ return
+ }
+ x = uint64((uint32(x) >> 1) ^ uint32((int32(x&1)<<31)>>31))
+ return
+}
+
+// These are not ValueDecoders: they produce an array of bytes or a string.
+// bytes, embedded messages
+
+// DecodeRawBytes reads a count-delimited byte buffer from the Buffer.
+// This is the format used for the bytes protocol buffer
+// type and for embedded messages.
+func (p *Buffer) DecodeRawBytes(alloc bool) (buf []byte, err error) {
+ n, err := p.DecodeVarint()
+ if err != nil {
+ return nil, err
+ }
+
+ nb := int(n)
+ if nb < 0 {
+ return nil, fmt.Errorf("proto: bad byte length %d", nb)
+ }
+ end := p.index + nb
+ if end < p.index || end > len(p.buf) {
+ return nil, io.ErrUnexpectedEOF
+ }
+
+ if !alloc {
+ // todo: check if can get more uses of alloc=false
+ buf = p.buf[p.index:end]
+ p.index += nb
+ return
+ }
+
+ buf = make([]byte, nb)
+ copy(buf, p.buf[p.index:])
+ p.index += nb
+ return
+}
+
+// DecodeStringBytes reads an encoded string from the Buffer.
+// This is the format used for the proto2 string type.
+func (p *Buffer) DecodeStringBytes() (s string, err error) {
+ buf, err := p.DecodeRawBytes(false)
+ if err != nil {
+ return
+ }
+ return string(buf), nil
+}
+
+// Skip the next item in the buffer. Its wire type is decoded and presented as an argument.
+// If the protocol buffer has extensions, and the field matches, add it as an extension.
+// Otherwise, if the XXX_unrecognized field exists, append the skipped data there.
+func (o *Buffer) skipAndSave(t reflect.Type, tag, wire int, base structPointer, unrecField field) error {
+ oi := o.index
+
+ err := o.skip(t, tag, wire)
+ if err != nil {
+ return err
+ }
+
+ if !unrecField.IsValid() {
+ return nil
+ }
+
+ ptr := structPointer_Bytes(base, unrecField)
+
+ // Add the skipped field to struct field
+ obuf := o.buf
+
+ o.buf = *ptr
+ o.EncodeVarint(uint64(tag<<3 | wire))
+ *ptr = append(o.buf, obuf[oi:o.index]...)
+
+ o.buf = obuf
+
+ return nil
+}
+
+// Skip the next item in the buffer. Its wire type is decoded and presented as an argument.
+func (o *Buffer) skip(t reflect.Type, tag, wire int) error {
+
+ var u uint64
+ var err error
+
+ switch wire {
+ case WireVarint:
+ _, err = o.DecodeVarint()
+ case WireFixed64:
+ _, err = o.DecodeFixed64()
+ case WireBytes:
+ _, err = o.DecodeRawBytes(false)
+ case WireFixed32:
+ _, err = o.DecodeFixed32()
+ case WireStartGroup:
+ for {
+ u, err = o.DecodeVarint()
+ if err != nil {
+ break
+ }
+ fwire := int(u & 0x7)
+ if fwire == WireEndGroup {
+ break
+ }
+ ftag := int(u >> 3)
+ err = o.skip(t, ftag, fwire)
+ if err != nil {
+ break
+ }
+ }
+ default:
+ err = fmt.Errorf("proto: can't skip unknown wire type %d for %s", wire, t)
+ }
+ return err
+}
+
+// Unmarshaler is the interface representing objects that can
+// unmarshal themselves. The method should reset the receiver before
+// decoding starts. The argument points to data that may be
+// overwritten, so implementations should not keep references to the
+// buffer.
+type Unmarshaler interface {
+ Unmarshal([]byte) error
+}
+
+// Unmarshal parses the protocol buffer representation in buf and places the
+// decoded result in pb. If the struct underlying pb does not match
+// the data in buf, the results can be unpredictable.
+//
+// Unmarshal resets pb before starting to unmarshal, so any
+// existing data in pb is always removed. Use UnmarshalMerge
+// to preserve and append to existing data.
+func Unmarshal(buf []byte, pb Message) error {
+ pb.Reset()
+ return UnmarshalMerge(buf, pb)
+}
+
+// UnmarshalMerge parses the protocol buffer representation in buf and
+// writes the decoded result to pb. If the struct underlying pb does not match
+// the data in buf, the results can be unpredictable.
+//
+// UnmarshalMerge merges into existing data in pb.
+// Most code should use Unmarshal instead.
+func UnmarshalMerge(buf []byte, pb Message) error {
+ // If the object can unmarshal itself, let it.
+ if u, ok := pb.(Unmarshaler); ok {
+ return u.Unmarshal(buf)
+ }
+ return NewBuffer(buf).Unmarshal(pb)
+}
+
+// DecodeMessage reads a count-delimited message from the Buffer.
+func (p *Buffer) DecodeMessage(pb Message) error {
+ enc, err := p.DecodeRawBytes(false)
+ if err != nil {
+ return err
+ }
+ return NewBuffer(enc).Unmarshal(pb)
+}
+
+// DecodeGroup reads a tag-delimited group from the Buffer.
+func (p *Buffer) DecodeGroup(pb Message) error {
+ typ, base, err := getbase(pb)
+ if err != nil {
+ return err
+ }
+ return p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), true, base)
+}
+
+// Unmarshal parses the protocol buffer representation in the
+// Buffer and places the decoded result in pb. If the struct
+// underlying pb does not match the data in the buffer, the results can be
+// unpredictable.
+func (p *Buffer) Unmarshal(pb Message) error {
+ // If the object can unmarshal itself, let it.
+ if u, ok := pb.(Unmarshaler); ok {
+ err := u.Unmarshal(p.buf[p.index:])
+ p.index = len(p.buf)
+ return err
+ }
+
+ typ, base, err := getbase(pb)
+ if err != nil {
+ return err
+ }
+
+ err = p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), false, base)
+
+ if collectStats {
+ stats.Decode++
+ }
+
+ return err
+}
+
+// unmarshalType does the work of unmarshaling a structure.
+func (o *Buffer) unmarshalType(st reflect.Type, prop *StructProperties, is_group bool, base structPointer) error {
+ var state errorState
+ required, reqFields := prop.reqCount, uint64(0)
+
+ var err error
+ for err == nil && o.index < len(o.buf) {
+ oi := o.index
+ var u uint64
+ u, err = o.DecodeVarint()
+ if err != nil {
+ break
+ }
+ wire := int(u & 0x7)
+ if wire == WireEndGroup {
+ if is_group {
+ return nil // input is satisfied
+ }
+ return fmt.Errorf("proto: %s: wiretype end group for non-group", st)
+ }
+ tag := int(u >> 3)
+ if tag <= 0 {
+ return fmt.Errorf("proto: %s: illegal tag %d (wire type %d)", st, tag, wire)
+ }
+ fieldnum, ok := prop.decoderTags.get(tag)
+ if !ok {
+ // Maybe it's an extension?
+ if prop.extendable {
+ if e := structPointer_Interface(base, st).(extendableProto); isExtensionField(e, int32(tag)) {
+ if err = o.skip(st, tag, wire); err == nil {
+ ext := e.ExtensionMap()[int32(tag)] // may be missing
+ ext.enc = append(ext.enc, o.buf[oi:o.index]...)
+ e.ExtensionMap()[int32(tag)] = ext
+ }
+ continue
+ }
+ }
+ // Maybe it's a oneof?
+ if prop.oneofUnmarshaler != nil {
+ m := structPointer_Interface(base, st).(Message)
+ // First return value indicates whether tag is a oneof field.
+ ok, err = prop.oneofUnmarshaler(m, tag, wire, o)
+ if err == ErrInternalBadWireType {
+ // Map the error to something more descriptive.
+ // Do the formatting here to save generated code space.
+ err = fmt.Errorf("bad wiretype for oneof field in %T", m)
+ }
+ if ok {
+ continue
+ }
+ }
+ err = o.skipAndSave(st, tag, wire, base, prop.unrecField)
+ continue
+ }
+ p := prop.Prop[fieldnum]
+
+ if p.dec == nil {
+ fmt.Fprintf(os.Stderr, "proto: no protobuf decoder for %s.%s\n", st, st.Field(fieldnum).Name)
+ continue
+ }
+ dec := p.dec
+ if wire != WireStartGroup && wire != p.WireType {
+ if wire == WireBytes && p.packedDec != nil {
+ // a packable field
+ dec = p.packedDec
+ } else {
+ err = fmt.Errorf("proto: bad wiretype for field %s.%s: got wiretype %d, want %d", st, st.Field(fieldnum).Name, wire, p.WireType)
+ continue
+ }
+ }
+ decErr := dec(o, p, base)
+ if decErr != nil && !state.shouldContinue(decErr, p) {
+ err = decErr
+ }
+ if err == nil && p.Required {
+ // Successfully decoded a required field.
+ if tag <= 64 {
+ // use bitmap for fields 1-64 to catch field reuse.
+ var mask uint64 = 1 << uint64(tag-1)
+ if reqFields&mask == 0 {
+ // new required field
+ reqFields |= mask
+ required--
+ }
+ } else {
+ // This is imprecise. It can be fooled by a required field
+ // with a tag > 64 that is encoded twice; that's very rare.
+ // A fully correct implementation would require allocating
+ // a data structure, which we would like to avoid.
+ required--
+ }
+ }
+ }
+ if err == nil {
+ if is_group {
+ return io.ErrUnexpectedEOF
+ }
+ if state.err != nil {
+ return state.err
+ }
+ if required > 0 {
+ // Not enough information to determine the exact field. If we use extra
+ // CPU, we could determine the field only if the missing required field
+ // has a tag <= 64 and we check reqFields.
+ return &RequiredNotSetError{"{Unknown}"}
+ }
+ }
+ return err
+}
+
+// Individual type decoders
+// For each,
+// u is the decoded value,
+// v is a pointer to the field (pointer) in the struct
+
+// Sizes of the pools to allocate inside the Buffer.
+// The goal is modest amortization and allocation
+// on at least 16-byte boundaries.
+const (
+ boolPoolSize = 16
+ uint32PoolSize = 8
+ uint64PoolSize = 4
+)
+
+// Decode a bool.
+func (o *Buffer) dec_bool(p *Properties, base structPointer) error {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ if len(o.bools) == 0 {
+ o.bools = make([]bool, boolPoolSize)
+ }
+ o.bools[0] = u != 0
+ *structPointer_Bool(base, p.field) = &o.bools[0]
+ o.bools = o.bools[1:]
+ return nil
+}
+
+func (o *Buffer) dec_proto3_bool(p *Properties, base structPointer) error {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ *structPointer_BoolVal(base, p.field) = u != 0
+ return nil
+}
+
+// Decode an int32.
+func (o *Buffer) dec_int32(p *Properties, base structPointer) error {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ word32_Set(structPointer_Word32(base, p.field), o, uint32(u))
+ return nil
+}
+
+func (o *Buffer) dec_proto3_int32(p *Properties, base structPointer) error {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ word32Val_Set(structPointer_Word32Val(base, p.field), uint32(u))
+ return nil
+}
+
+// Decode an int64.
+func (o *Buffer) dec_int64(p *Properties, base structPointer) error {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ word64_Set(structPointer_Word64(base, p.field), o, u)
+ return nil
+}
+
+func (o *Buffer) dec_proto3_int64(p *Properties, base structPointer) error {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ word64Val_Set(structPointer_Word64Val(base, p.field), o, u)
+ return nil
+}
+
+// Decode a string.
+func (o *Buffer) dec_string(p *Properties, base structPointer) error {
+ s, err := o.DecodeStringBytes()
+ if err != nil {
+ return err
+ }
+ *structPointer_String(base, p.field) = &s
+ return nil
+}
+
+func (o *Buffer) dec_proto3_string(p *Properties, base structPointer) error {
+ s, err := o.DecodeStringBytes()
+ if err != nil {
+ return err
+ }
+ *structPointer_StringVal(base, p.field) = s
+ return nil
+}
+
+// Decode a slice of bytes ([]byte).
+func (o *Buffer) dec_slice_byte(p *Properties, base structPointer) error {
+ b, err := o.DecodeRawBytes(true)
+ if err != nil {
+ return err
+ }
+ *structPointer_Bytes(base, p.field) = b
+ return nil
+}
+
+// Decode a slice of bools ([]bool).
+func (o *Buffer) dec_slice_bool(p *Properties, base structPointer) error {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ v := structPointer_BoolSlice(base, p.field)
+ *v = append(*v, u != 0)
+ return nil
+}
+
+// Decode a slice of bools ([]bool) in packed format.
+func (o *Buffer) dec_slice_packed_bool(p *Properties, base structPointer) error {
+ v := structPointer_BoolSlice(base, p.field)
+
+ nn, err := o.DecodeVarint()
+ if err != nil {
+ return err
+ }
+ nb := int(nn) // number of bytes of encoded bools
+
+ y := *v
+ for i := 0; i < nb; i++ {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ y = append(y, u != 0)
+ }
+
+ *v = y
+ return nil
+}
+
+// Decode a slice of int32s ([]int32).
+func (o *Buffer) dec_slice_int32(p *Properties, base structPointer) error {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ structPointer_Word32Slice(base, p.field).Append(uint32(u))
+ return nil
+}
+
+// Decode a slice of int32s ([]int32) in packed format.
+func (o *Buffer) dec_slice_packed_int32(p *Properties, base structPointer) error {
+ v := structPointer_Word32Slice(base, p.field)
+
+ nn, err := o.DecodeVarint()
+ if err != nil {
+ return err
+ }
+ nb := int(nn) // number of bytes of encoded int32s
+
+ fin := o.index + nb
+ if fin < o.index {
+ return errOverflow
+ }
+ for o.index < fin {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ v.Append(uint32(u))
+ }
+ return nil
+}
+
+// Decode a slice of int64s ([]int64).
+func (o *Buffer) dec_slice_int64(p *Properties, base structPointer) error {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+
+ structPointer_Word64Slice(base, p.field).Append(u)
+ return nil
+}
+
+// Decode a slice of int64s ([]int64) in packed format.
+func (o *Buffer) dec_slice_packed_int64(p *Properties, base structPointer) error {
+ v := structPointer_Word64Slice(base, p.field)
+
+ nn, err := o.DecodeVarint()
+ if err != nil {
+ return err
+ }
+ nb := int(nn) // number of bytes of encoded int64s
+
+ fin := o.index + nb
+ if fin < o.index {
+ return errOverflow
+ }
+ for o.index < fin {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ v.Append(u)
+ }
+ return nil
+}
+
+// Decode a slice of strings ([]string).
+func (o *Buffer) dec_slice_string(p *Properties, base structPointer) error {
+ s, err := o.DecodeStringBytes()
+ if err != nil {
+ return err
+ }
+ v := structPointer_StringSlice(base, p.field)
+ *v = append(*v, s)
+ return nil
+}
+
+// Decode a slice of slice of bytes ([][]byte).
+func (o *Buffer) dec_slice_slice_byte(p *Properties, base structPointer) error {
+ b, err := o.DecodeRawBytes(true)
+ if err != nil {
+ return err
+ }
+ v := structPointer_BytesSlice(base, p.field)
+ *v = append(*v, b)
+ return nil
+}
+
+// Decode a map field.
+func (o *Buffer) dec_new_map(p *Properties, base structPointer) error {
+ raw, err := o.DecodeRawBytes(false)
+ if err != nil {
+ return err
+ }
+ oi := o.index // index at the end of this map entry
+ o.index -= len(raw) // move buffer back to start of map entry
+
+ mptr := structPointer_NewAt(base, p.field, p.mtype) // *map[K]V
+ if mptr.Elem().IsNil() {
+ mptr.Elem().Set(reflect.MakeMap(mptr.Type().Elem()))
+ }
+ v := mptr.Elem() // map[K]V
+
+ // Prepare addressable doubly-indirect placeholders for the key and value types.
+ // See enc_new_map for why.
+ keyptr := reflect.New(reflect.PtrTo(p.mtype.Key())).Elem() // addressable *K
+ keybase := toStructPointer(keyptr.Addr()) // **K
+
+ var valbase structPointer
+ var valptr reflect.Value
+ switch p.mtype.Elem().Kind() {
+ case reflect.Slice:
+ // []byte
+ var dummy []byte
+ valptr = reflect.ValueOf(&dummy) // *[]byte
+ valbase = toStructPointer(valptr) // *[]byte
+ case reflect.Ptr:
+ // message; valptr is **Msg; need to allocate the intermediate pointer
+ valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V
+ valptr.Set(reflect.New(valptr.Type().Elem()))
+ valbase = toStructPointer(valptr)
+ default:
+ // everything else
+ valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V
+ valbase = toStructPointer(valptr.Addr()) // **V
+ }
+
+ // Decode.
+ // This parses a restricted wire format, namely the encoding of a message
+ // with two fields. See enc_new_map for the format.
+ for o.index < oi {
+ // tagcode for key and value properties are always a single byte
+ // because they have tags 1 and 2.
+ tagcode := o.buf[o.index]
+ o.index++
+ switch tagcode {
+ case p.mkeyprop.tagcode[0]:
+ if err := p.mkeyprop.dec(o, p.mkeyprop, keybase); err != nil {
+ return err
+ }
+ case p.mvalprop.tagcode[0]:
+ if err := p.mvalprop.dec(o, p.mvalprop, valbase); err != nil {
+ return err
+ }
+ default:
+ // TODO: Should we silently skip this instead?
+ return fmt.Errorf("proto: bad map data tag %d", raw[0])
+ }
+ }
+ keyelem, valelem := keyptr.Elem(), valptr.Elem()
+ if !keyelem.IsValid() || !valelem.IsValid() {
+ // We did not decode the key or the value in the map entry.
+ // Either way, it's an invalid map entry.
+ return fmt.Errorf("proto: bad map data: missing key/val")
+ }
+
+ v.SetMapIndex(keyelem, valelem)
+ return nil
+}
+
+// Decode a group.
+func (o *Buffer) dec_struct_group(p *Properties, base structPointer) error {
+ bas := structPointer_GetStructPointer(base, p.field)
+ if structPointer_IsNil(bas) {
+ // allocate new nested message
+ bas = toStructPointer(reflect.New(p.stype))
+ structPointer_SetStructPointer(base, p.field, bas)
+ }
+ return o.unmarshalType(p.stype, p.sprop, true, bas)
+}
+
+// Decode an embedded message.
+func (o *Buffer) dec_struct_message(p *Properties, base structPointer) (err error) {
+ raw, e := o.DecodeRawBytes(false)
+ if e != nil {
+ return e
+ }
+
+ bas := structPointer_GetStructPointer(base, p.field)
+ if structPointer_IsNil(bas) {
+ // allocate new nested message
+ bas = toStructPointer(reflect.New(p.stype))
+ structPointer_SetStructPointer(base, p.field, bas)
+ }
+
+ // If the object can unmarshal itself, let it.
+ if p.isUnmarshaler {
+ iv := structPointer_Interface(bas, p.stype)
+ return iv.(Unmarshaler).Unmarshal(raw)
+ }
+
+ obuf := o.buf
+ oi := o.index
+ o.buf = raw
+ o.index = 0
+
+ err = o.unmarshalType(p.stype, p.sprop, false, bas)
+ o.buf = obuf
+ o.index = oi
+
+ return err
+}
+
+// Decode a slice of embedded messages.
+func (o *Buffer) dec_slice_struct_message(p *Properties, base structPointer) error {
+ return o.dec_slice_struct(p, false, base)
+}
+
+// Decode a slice of embedded groups.
+func (o *Buffer) dec_slice_struct_group(p *Properties, base structPointer) error {
+ return o.dec_slice_struct(p, true, base)
+}
+
+// Decode a slice of structs ([]*struct).
+func (o *Buffer) dec_slice_struct(p *Properties, is_group bool, base structPointer) error {
+ v := reflect.New(p.stype)
+ bas := toStructPointer(v)
+ structPointer_StructPointerSlice(base, p.field).Append(bas)
+
+ if is_group {
+ err := o.unmarshalType(p.stype, p.sprop, is_group, bas)
+ return err
+ }
+
+ raw, err := o.DecodeRawBytes(false)
+ if err != nil {
+ return err
+ }
+
+ // If the object can unmarshal itself, let it.
+ if p.isUnmarshaler {
+ iv := v.Interface()
+ return iv.(Unmarshaler).Unmarshal(raw)
+ }
+
+ obuf := o.buf
+ oi := o.index
+ o.buf = raw
+ o.index = 0
+
+ err = o.unmarshalType(p.stype, p.sprop, is_group, bas)
+
+ o.buf = obuf
+ o.index = oi
+
+ return err
+}
diff --git a/vendor/github.com/golang/protobuf/proto/encode.go b/vendor/github.com/golang/protobuf/proto/encode.go
new file mode 100644
index 0000000000..fe48cc743d
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/encode.go
@@ -0,0 +1,1336 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+/*
+ * Routines for encoding data into the wire format for protocol buffers.
+ */
+
+import (
+ "errors"
+ "fmt"
+ "reflect"
+ "sort"
+)
+
+// RequiredNotSetError is the error returned if Marshal is called with
+// a protocol buffer struct whose required fields have not
+// all been initialized. It is also the error returned if Unmarshal is
+// called with an encoded protocol buffer that does not include all the
+// required fields.
+//
+// When printed, RequiredNotSetError reports the first unset required field in a
+// message. If the field cannot be precisely determined, it is reported as
+// "{Unknown}".
+type RequiredNotSetError struct {
+ field string
+}
+
+func (e *RequiredNotSetError) Error() string {
+ return fmt.Sprintf("proto: required field %q not set", e.field)
+}
+
+var (
+ // errRepeatedHasNil is the error returned if Marshal is called with
+ // a struct with a repeated field containing a nil element.
+ errRepeatedHasNil = errors.New("proto: repeated field has nil element")
+
+ // ErrNil is the error returned if Marshal is called with nil.
+ ErrNil = errors.New("proto: Marshal called with nil")
+)
+
+// The fundamental encoders that put bytes on the wire.
+// Those that take integer types all accept uint64 and are
+// therefore of type valueEncoder.
+
+const maxVarintBytes = 10 // maximum length of a varint
+
+// EncodeVarint returns the varint encoding of x.
+// This is the format for the
+// int32, int64, uint32, uint64, bool, and enum
+// protocol buffer types.
+// Not used by the package itself, but helpful to clients
+// wishing to use the same encoding.
+func EncodeVarint(x uint64) []byte {
+ var buf [maxVarintBytes]byte
+ var n int
+ for n = 0; x > 127; n++ {
+ buf[n] = 0x80 | uint8(x&0x7F)
+ x >>= 7
+ }
+ buf[n] = uint8(x)
+ n++
+ return buf[0:n]
+}
+
+// EncodeVarint writes a varint-encoded integer to the Buffer.
+// This is the format for the
+// int32, int64, uint32, uint64, bool, and enum
+// protocol buffer types.
+func (p *Buffer) EncodeVarint(x uint64) error {
+ for x >= 1<<7 {
+ p.buf = append(p.buf, uint8(x&0x7f|0x80))
+ x >>= 7
+ }
+ p.buf = append(p.buf, uint8(x))
+ return nil
+}
+
+func sizeVarint(x uint64) (n int) {
+ for {
+ n++
+ x >>= 7
+ if x == 0 {
+ break
+ }
+ }
+ return n
+}
+
+// EncodeFixed64 writes a 64-bit integer to the Buffer.
+// This is the format for the
+// fixed64, sfixed64, and double protocol buffer types.
+func (p *Buffer) EncodeFixed64(x uint64) error {
+ p.buf = append(p.buf,
+ uint8(x),
+ uint8(x>>8),
+ uint8(x>>16),
+ uint8(x>>24),
+ uint8(x>>32),
+ uint8(x>>40),
+ uint8(x>>48),
+ uint8(x>>56))
+ return nil
+}
+
+func sizeFixed64(x uint64) int {
+ return 8
+}
+
+// EncodeFixed32 writes a 32-bit integer to the Buffer.
+// This is the format for the
+// fixed32, sfixed32, and float protocol buffer types.
+func (p *Buffer) EncodeFixed32(x uint64) error {
+ p.buf = append(p.buf,
+ uint8(x),
+ uint8(x>>8),
+ uint8(x>>16),
+ uint8(x>>24))
+ return nil
+}
+
+func sizeFixed32(x uint64) int {
+ return 4
+}
+
+// EncodeZigzag64 writes a zigzag-encoded 64-bit integer
+// to the Buffer.
+// This is the format used for the sint64 protocol buffer type.
+func (p *Buffer) EncodeZigzag64(x uint64) error {
+ // use signed number to get arithmetic right shift.
+ return p.EncodeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+
+func sizeZigzag64(x uint64) int {
+ return sizeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+
+// EncodeZigzag32 writes a zigzag-encoded 32-bit integer
+// to the Buffer.
+// This is the format used for the sint32 protocol buffer type.
+func (p *Buffer) EncodeZigzag32(x uint64) error {
+ // use signed number to get arithmetic right shift.
+ return p.EncodeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31))))
+}
+
+func sizeZigzag32(x uint64) int {
+ return sizeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31))))
+}
+
+// EncodeRawBytes writes a count-delimited byte buffer to the Buffer.
+// This is the format used for the bytes protocol buffer
+// type and for embedded messages.
+func (p *Buffer) EncodeRawBytes(b []byte) error {
+ p.EncodeVarint(uint64(len(b)))
+ p.buf = append(p.buf, b...)
+ return nil
+}
+
+func sizeRawBytes(b []byte) int {
+ return sizeVarint(uint64(len(b))) +
+ len(b)
+}
+
+// EncodeStringBytes writes an encoded string to the Buffer.
+// This is the format used for the proto2 string type.
+func (p *Buffer) EncodeStringBytes(s string) error {
+ p.EncodeVarint(uint64(len(s)))
+ p.buf = append(p.buf, s...)
+ return nil
+}
+
+func sizeStringBytes(s string) int {
+ return sizeVarint(uint64(len(s))) +
+ len(s)
+}
+
+// Marshaler is the interface representing objects that can marshal themselves.
+type Marshaler interface {
+ Marshal() ([]byte, error)
+}
+
+// Marshal takes the protocol buffer
+// and encodes it into the wire format, returning the data.
+func Marshal(pb Message) ([]byte, error) {
+ // Can the object marshal itself?
+ if m, ok := pb.(Marshaler); ok {
+ return m.Marshal()
+ }
+ p := NewBuffer(nil)
+ err := p.Marshal(pb)
+ var state errorState
+ if err != nil && !state.shouldContinue(err, nil) {
+ return nil, err
+ }
+ if p.buf == nil && err == nil {
+ // Return a non-nil slice on success.
+ return []byte{}, nil
+ }
+ return p.buf, err
+}
+
+// EncodeMessage writes the protocol buffer to the Buffer,
+// prefixed by a varint-encoded length.
+func (p *Buffer) EncodeMessage(pb Message) error {
+ t, base, err := getbase(pb)
+ if structPointer_IsNil(base) {
+ return ErrNil
+ }
+ if err == nil {
+ var state errorState
+ err = p.enc_len_struct(GetProperties(t.Elem()), base, &state)
+ }
+ return err
+}
+
+// Marshal takes the protocol buffer
+// and encodes it into the wire format, writing the result to the
+// Buffer.
+func (p *Buffer) Marshal(pb Message) error {
+ // Can the object marshal itself?
+ if m, ok := pb.(Marshaler); ok {
+ data, err := m.Marshal()
+ if err != nil {
+ return err
+ }
+ p.buf = append(p.buf, data...)
+ return nil
+ }
+
+ t, base, err := getbase(pb)
+ if structPointer_IsNil(base) {
+ return ErrNil
+ }
+ if err == nil {
+ err = p.enc_struct(GetProperties(t.Elem()), base)
+ }
+
+ if collectStats {
+ stats.Encode++
+ }
+
+ return err
+}
+
+// Size returns the encoded size of a protocol buffer.
+func Size(pb Message) (n int) {
+ // Can the object marshal itself? If so, Size is slow.
+ // TODO: add Size to Marshaler, or add a Sizer interface.
+ if m, ok := pb.(Marshaler); ok {
+ b, _ := m.Marshal()
+ return len(b)
+ }
+
+ t, base, err := getbase(pb)
+ if structPointer_IsNil(base) {
+ return 0
+ }
+ if err == nil {
+ n = size_struct(GetProperties(t.Elem()), base)
+ }
+
+ if collectStats {
+ stats.Size++
+ }
+
+ return
+}
+
+// Individual type encoders.
+
+// Encode a bool.
+func (o *Buffer) enc_bool(p *Properties, base structPointer) error {
+ v := *structPointer_Bool(base, p.field)
+ if v == nil {
+ return ErrNil
+ }
+ x := 0
+ if *v {
+ x = 1
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ p.valEnc(o, uint64(x))
+ return nil
+}
+
+func (o *Buffer) enc_proto3_bool(p *Properties, base structPointer) error {
+ v := *structPointer_BoolVal(base, p.field)
+ if !v {
+ return ErrNil
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ p.valEnc(o, 1)
+ return nil
+}
+
+func size_bool(p *Properties, base structPointer) int {
+ v := *structPointer_Bool(base, p.field)
+ if v == nil {
+ return 0
+ }
+ return len(p.tagcode) + 1 // each bool takes exactly one byte
+}
+
+func size_proto3_bool(p *Properties, base structPointer) int {
+ v := *structPointer_BoolVal(base, p.field)
+ if !v {
+ return 0
+ }
+ return len(p.tagcode) + 1 // each bool takes exactly one byte
+}
+
+// Encode an int32.
+func (o *Buffer) enc_int32(p *Properties, base structPointer) error {
+ v := structPointer_Word32(base, p.field)
+ if word32_IsNil(v) {
+ return ErrNil
+ }
+ x := int32(word32_Get(v)) // permit sign extension to use full 64-bit range
+ o.buf = append(o.buf, p.tagcode...)
+ p.valEnc(o, uint64(x))
+ return nil
+}
+
+func (o *Buffer) enc_proto3_int32(p *Properties, base structPointer) error {
+ v := structPointer_Word32Val(base, p.field)
+ x := int32(word32Val_Get(v)) // permit sign extension to use full 64-bit range
+ if x == 0 {
+ return ErrNil
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ p.valEnc(o, uint64(x))
+ return nil
+}
+
+func size_int32(p *Properties, base structPointer) (n int) {
+ v := structPointer_Word32(base, p.field)
+ if word32_IsNil(v) {
+ return 0
+ }
+ x := int32(word32_Get(v)) // permit sign extension to use full 64-bit range
+ n += len(p.tagcode)
+ n += p.valSize(uint64(x))
+ return
+}
+
+func size_proto3_int32(p *Properties, base structPointer) (n int) {
+ v := structPointer_Word32Val(base, p.field)
+ x := int32(word32Val_Get(v)) // permit sign extension to use full 64-bit range
+ if x == 0 {
+ return 0
+ }
+ n += len(p.tagcode)
+ n += p.valSize(uint64(x))
+ return
+}
+
+// Encode a uint32.
+// Exactly the same as int32, except for no sign extension.
+func (o *Buffer) enc_uint32(p *Properties, base structPointer) error {
+ v := structPointer_Word32(base, p.field)
+ if word32_IsNil(v) {
+ return ErrNil
+ }
+ x := word32_Get(v)
+ o.buf = append(o.buf, p.tagcode...)
+ p.valEnc(o, uint64(x))
+ return nil
+}
+
+func (o *Buffer) enc_proto3_uint32(p *Properties, base structPointer) error {
+ v := structPointer_Word32Val(base, p.field)
+ x := word32Val_Get(v)
+ if x == 0 {
+ return ErrNil
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ p.valEnc(o, uint64(x))
+ return nil
+}
+
+func size_uint32(p *Properties, base structPointer) (n int) {
+ v := structPointer_Word32(base, p.field)
+ if word32_IsNil(v) {
+ return 0
+ }
+ x := word32_Get(v)
+ n += len(p.tagcode)
+ n += p.valSize(uint64(x))
+ return
+}
+
+func size_proto3_uint32(p *Properties, base structPointer) (n int) {
+ v := structPointer_Word32Val(base, p.field)
+ x := word32Val_Get(v)
+ if x == 0 {
+ return 0
+ }
+ n += len(p.tagcode)
+ n += p.valSize(uint64(x))
+ return
+}
+
+// Encode an int64.
+func (o *Buffer) enc_int64(p *Properties, base structPointer) error {
+ v := structPointer_Word64(base, p.field)
+ if word64_IsNil(v) {
+ return ErrNil
+ }
+ x := word64_Get(v)
+ o.buf = append(o.buf, p.tagcode...)
+ p.valEnc(o, x)
+ return nil
+}
+
+func (o *Buffer) enc_proto3_int64(p *Properties, base structPointer) error {
+ v := structPointer_Word64Val(base, p.field)
+ x := word64Val_Get(v)
+ if x == 0 {
+ return ErrNil
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ p.valEnc(o, x)
+ return nil
+}
+
+func size_int64(p *Properties, base structPointer) (n int) {
+ v := structPointer_Word64(base, p.field)
+ if word64_IsNil(v) {
+ return 0
+ }
+ x := word64_Get(v)
+ n += len(p.tagcode)
+ n += p.valSize(x)
+ return
+}
+
+func size_proto3_int64(p *Properties, base structPointer) (n int) {
+ v := structPointer_Word64Val(base, p.field)
+ x := word64Val_Get(v)
+ if x == 0 {
+ return 0
+ }
+ n += len(p.tagcode)
+ n += p.valSize(x)
+ return
+}
+
+// Encode a string.
+func (o *Buffer) enc_string(p *Properties, base structPointer) error {
+ v := *structPointer_String(base, p.field)
+ if v == nil {
+ return ErrNil
+ }
+ x := *v
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeStringBytes(x)
+ return nil
+}
+
+func (o *Buffer) enc_proto3_string(p *Properties, base structPointer) error {
+ v := *structPointer_StringVal(base, p.field)
+ if v == "" {
+ return ErrNil
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeStringBytes(v)
+ return nil
+}
+
+func size_string(p *Properties, base structPointer) (n int) {
+ v := *structPointer_String(base, p.field)
+ if v == nil {
+ return 0
+ }
+ x := *v
+ n += len(p.tagcode)
+ n += sizeStringBytes(x)
+ return
+}
+
+func size_proto3_string(p *Properties, base structPointer) (n int) {
+ v := *structPointer_StringVal(base, p.field)
+ if v == "" {
+ return 0
+ }
+ n += len(p.tagcode)
+ n += sizeStringBytes(v)
+ return
+}
+
+// All protocol buffer fields are nillable, but be careful.
+func isNil(v reflect.Value) bool {
+ switch v.Kind() {
+ case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
+ return v.IsNil()
+ }
+ return false
+}
+
+// Encode a message struct.
+func (o *Buffer) enc_struct_message(p *Properties, base structPointer) error {
+ var state errorState
+ structp := structPointer_GetStructPointer(base, p.field)
+ if structPointer_IsNil(structp) {
+ return ErrNil
+ }
+
+ // Can the object marshal itself?
+ if p.isMarshaler {
+ m := structPointer_Interface(structp, p.stype).(Marshaler)
+ data, err := m.Marshal()
+ if err != nil && !state.shouldContinue(err, nil) {
+ return err
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeRawBytes(data)
+ return state.err
+ }
+
+ o.buf = append(o.buf, p.tagcode...)
+ return o.enc_len_struct(p.sprop, structp, &state)
+}
+
+func size_struct_message(p *Properties, base structPointer) int {
+ structp := structPointer_GetStructPointer(base, p.field)
+ if structPointer_IsNil(structp) {
+ return 0
+ }
+
+ // Can the object marshal itself?
+ if p.isMarshaler {
+ m := structPointer_Interface(structp, p.stype).(Marshaler)
+ data, _ := m.Marshal()
+ n0 := len(p.tagcode)
+ n1 := sizeRawBytes(data)
+ return n0 + n1
+ }
+
+ n0 := len(p.tagcode)
+ n1 := size_struct(p.sprop, structp)
+ n2 := sizeVarint(uint64(n1)) // size of encoded length
+ return n0 + n1 + n2
+}
+
+// Encode a group struct.
+func (o *Buffer) enc_struct_group(p *Properties, base structPointer) error {
+ var state errorState
+ b := structPointer_GetStructPointer(base, p.field)
+ if structPointer_IsNil(b) {
+ return ErrNil
+ }
+
+ o.EncodeVarint(uint64((p.Tag << 3) | WireStartGroup))
+ err := o.enc_struct(p.sprop, b)
+ if err != nil && !state.shouldContinue(err, nil) {
+ return err
+ }
+ o.EncodeVarint(uint64((p.Tag << 3) | WireEndGroup))
+ return state.err
+}
+
+func size_struct_group(p *Properties, base structPointer) (n int) {
+ b := structPointer_GetStructPointer(base, p.field)
+ if structPointer_IsNil(b) {
+ return 0
+ }
+
+ n += sizeVarint(uint64((p.Tag << 3) | WireStartGroup))
+ n += size_struct(p.sprop, b)
+ n += sizeVarint(uint64((p.Tag << 3) | WireEndGroup))
+ return
+}
+
+// Encode a slice of bools ([]bool).
+func (o *Buffer) enc_slice_bool(p *Properties, base structPointer) error {
+ s := *structPointer_BoolSlice(base, p.field)
+ l := len(s)
+ if l == 0 {
+ return ErrNil
+ }
+ for _, x := range s {
+ o.buf = append(o.buf, p.tagcode...)
+ v := uint64(0)
+ if x {
+ v = 1
+ }
+ p.valEnc(o, v)
+ }
+ return nil
+}
+
+func size_slice_bool(p *Properties, base structPointer) int {
+ s := *structPointer_BoolSlice(base, p.field)
+ l := len(s)
+ if l == 0 {
+ return 0
+ }
+ return l * (len(p.tagcode) + 1) // each bool takes exactly one byte
+}
+
+// Encode a slice of bools ([]bool) in packed format.
+func (o *Buffer) enc_slice_packed_bool(p *Properties, base structPointer) error {
+ s := *structPointer_BoolSlice(base, p.field)
+ l := len(s)
+ if l == 0 {
+ return ErrNil
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeVarint(uint64(l)) // each bool takes exactly one byte
+ for _, x := range s {
+ v := uint64(0)
+ if x {
+ v = 1
+ }
+ p.valEnc(o, v)
+ }
+ return nil
+}
+
+func size_slice_packed_bool(p *Properties, base structPointer) (n int) {
+ s := *structPointer_BoolSlice(base, p.field)
+ l := len(s)
+ if l == 0 {
+ return 0
+ }
+ n += len(p.tagcode)
+ n += sizeVarint(uint64(l))
+ n += l // each bool takes exactly one byte
+ return
+}
+
+// Encode a slice of bytes ([]byte).
+func (o *Buffer) enc_slice_byte(p *Properties, base structPointer) error {
+ s := *structPointer_Bytes(base, p.field)
+ if s == nil {
+ return ErrNil
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeRawBytes(s)
+ return nil
+}
+
+func (o *Buffer) enc_proto3_slice_byte(p *Properties, base structPointer) error {
+ s := *structPointer_Bytes(base, p.field)
+ if len(s) == 0 {
+ return ErrNil
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeRawBytes(s)
+ return nil
+}
+
+func size_slice_byte(p *Properties, base structPointer) (n int) {
+ s := *structPointer_Bytes(base, p.field)
+ if s == nil {
+ return 0
+ }
+ n += len(p.tagcode)
+ n += sizeRawBytes(s)
+ return
+}
+
+func size_proto3_slice_byte(p *Properties, base structPointer) (n int) {
+ s := *structPointer_Bytes(base, p.field)
+ if len(s) == 0 {
+ return 0
+ }
+ n += len(p.tagcode)
+ n += sizeRawBytes(s)
+ return
+}
+
+// Encode a slice of int32s ([]int32).
+func (o *Buffer) enc_slice_int32(p *Properties, base structPointer) error {
+ s := structPointer_Word32Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return ErrNil
+ }
+ for i := 0; i < l; i++ {
+ o.buf = append(o.buf, p.tagcode...)
+ x := int32(s.Index(i)) // permit sign extension to use full 64-bit range
+ p.valEnc(o, uint64(x))
+ }
+ return nil
+}
+
+func size_slice_int32(p *Properties, base structPointer) (n int) {
+ s := structPointer_Word32Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return 0
+ }
+ for i := 0; i < l; i++ {
+ n += len(p.tagcode)
+ x := int32(s.Index(i)) // permit sign extension to use full 64-bit range
+ n += p.valSize(uint64(x))
+ }
+ return
+}
+
+// Encode a slice of int32s ([]int32) in packed format.
+func (o *Buffer) enc_slice_packed_int32(p *Properties, base structPointer) error {
+ s := structPointer_Word32Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return ErrNil
+ }
+ // TODO: Reuse a Buffer.
+ buf := NewBuffer(nil)
+ for i := 0; i < l; i++ {
+ x := int32(s.Index(i)) // permit sign extension to use full 64-bit range
+ p.valEnc(buf, uint64(x))
+ }
+
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeVarint(uint64(len(buf.buf)))
+ o.buf = append(o.buf, buf.buf...)
+ return nil
+}
+
+func size_slice_packed_int32(p *Properties, base structPointer) (n int) {
+ s := structPointer_Word32Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return 0
+ }
+ var bufSize int
+ for i := 0; i < l; i++ {
+ x := int32(s.Index(i)) // permit sign extension to use full 64-bit range
+ bufSize += p.valSize(uint64(x))
+ }
+
+ n += len(p.tagcode)
+ n += sizeVarint(uint64(bufSize))
+ n += bufSize
+ return
+}
+
+// Encode a slice of uint32s ([]uint32).
+// Exactly the same as int32, except for no sign extension.
+func (o *Buffer) enc_slice_uint32(p *Properties, base structPointer) error {
+ s := structPointer_Word32Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return ErrNil
+ }
+ for i := 0; i < l; i++ {
+ o.buf = append(o.buf, p.tagcode...)
+ x := s.Index(i)
+ p.valEnc(o, uint64(x))
+ }
+ return nil
+}
+
+func size_slice_uint32(p *Properties, base structPointer) (n int) {
+ s := structPointer_Word32Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return 0
+ }
+ for i := 0; i < l; i++ {
+ n += len(p.tagcode)
+ x := s.Index(i)
+ n += p.valSize(uint64(x))
+ }
+ return
+}
+
+// Encode a slice of uint32s ([]uint32) in packed format.
+// Exactly the same as int32, except for no sign extension.
+func (o *Buffer) enc_slice_packed_uint32(p *Properties, base structPointer) error {
+ s := structPointer_Word32Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return ErrNil
+ }
+ // TODO: Reuse a Buffer.
+ buf := NewBuffer(nil)
+ for i := 0; i < l; i++ {
+ p.valEnc(buf, uint64(s.Index(i)))
+ }
+
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeVarint(uint64(len(buf.buf)))
+ o.buf = append(o.buf, buf.buf...)
+ return nil
+}
+
+func size_slice_packed_uint32(p *Properties, base structPointer) (n int) {
+ s := structPointer_Word32Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return 0
+ }
+ var bufSize int
+ for i := 0; i < l; i++ {
+ bufSize += p.valSize(uint64(s.Index(i)))
+ }
+
+ n += len(p.tagcode)
+ n += sizeVarint(uint64(bufSize))
+ n += bufSize
+ return
+}
+
+// Encode a slice of int64s ([]int64).
+func (o *Buffer) enc_slice_int64(p *Properties, base structPointer) error {
+ s := structPointer_Word64Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return ErrNil
+ }
+ for i := 0; i < l; i++ {
+ o.buf = append(o.buf, p.tagcode...)
+ p.valEnc(o, s.Index(i))
+ }
+ return nil
+}
+
+func size_slice_int64(p *Properties, base structPointer) (n int) {
+ s := structPointer_Word64Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return 0
+ }
+ for i := 0; i < l; i++ {
+ n += len(p.tagcode)
+ n += p.valSize(s.Index(i))
+ }
+ return
+}
+
+// Encode a slice of int64s ([]int64) in packed format.
+func (o *Buffer) enc_slice_packed_int64(p *Properties, base structPointer) error {
+ s := structPointer_Word64Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return ErrNil
+ }
+ // TODO: Reuse a Buffer.
+ buf := NewBuffer(nil)
+ for i := 0; i < l; i++ {
+ p.valEnc(buf, s.Index(i))
+ }
+
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeVarint(uint64(len(buf.buf)))
+ o.buf = append(o.buf, buf.buf...)
+ return nil
+}
+
+func size_slice_packed_int64(p *Properties, base structPointer) (n int) {
+ s := structPointer_Word64Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return 0
+ }
+ var bufSize int
+ for i := 0; i < l; i++ {
+ bufSize += p.valSize(s.Index(i))
+ }
+
+ n += len(p.tagcode)
+ n += sizeVarint(uint64(bufSize))
+ n += bufSize
+ return
+}
+
+// Encode a slice of slice of bytes ([][]byte).
+func (o *Buffer) enc_slice_slice_byte(p *Properties, base structPointer) error {
+ ss := *structPointer_BytesSlice(base, p.field)
+ l := len(ss)
+ if l == 0 {
+ return ErrNil
+ }
+ for i := 0; i < l; i++ {
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeRawBytes(ss[i])
+ }
+ return nil
+}
+
+func size_slice_slice_byte(p *Properties, base structPointer) (n int) {
+ ss := *structPointer_BytesSlice(base, p.field)
+ l := len(ss)
+ if l == 0 {
+ return 0
+ }
+ n += l * len(p.tagcode)
+ for i := 0; i < l; i++ {
+ n += sizeRawBytes(ss[i])
+ }
+ return
+}
+
+// Encode a slice of strings ([]string).
+func (o *Buffer) enc_slice_string(p *Properties, base structPointer) error {
+ ss := *structPointer_StringSlice(base, p.field)
+ l := len(ss)
+ for i := 0; i < l; i++ {
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeStringBytes(ss[i])
+ }
+ return nil
+}
+
+func size_slice_string(p *Properties, base structPointer) (n int) {
+ ss := *structPointer_StringSlice(base, p.field)
+ l := len(ss)
+ n += l * len(p.tagcode)
+ for i := 0; i < l; i++ {
+ n += sizeStringBytes(ss[i])
+ }
+ return
+}
+
+// Encode a slice of message structs ([]*struct).
+func (o *Buffer) enc_slice_struct_message(p *Properties, base structPointer) error {
+ var state errorState
+ s := structPointer_StructPointerSlice(base, p.field)
+ l := s.Len()
+
+ for i := 0; i < l; i++ {
+ structp := s.Index(i)
+ if structPointer_IsNil(structp) {
+ return errRepeatedHasNil
+ }
+
+ // Can the object marshal itself?
+ if p.isMarshaler {
+ m := structPointer_Interface(structp, p.stype).(Marshaler)
+ data, err := m.Marshal()
+ if err != nil && !state.shouldContinue(err, nil) {
+ return err
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeRawBytes(data)
+ continue
+ }
+
+ o.buf = append(o.buf, p.tagcode...)
+ err := o.enc_len_struct(p.sprop, structp, &state)
+ if err != nil && !state.shouldContinue(err, nil) {
+ if err == ErrNil {
+ return errRepeatedHasNil
+ }
+ return err
+ }
+ }
+ return state.err
+}
+
+func size_slice_struct_message(p *Properties, base structPointer) (n int) {
+ s := structPointer_StructPointerSlice(base, p.field)
+ l := s.Len()
+ n += l * len(p.tagcode)
+ for i := 0; i < l; i++ {
+ structp := s.Index(i)
+ if structPointer_IsNil(structp) {
+ return // return the size up to this point
+ }
+
+ // Can the object marshal itself?
+ if p.isMarshaler {
+ m := structPointer_Interface(structp, p.stype).(Marshaler)
+ data, _ := m.Marshal()
+ n += len(p.tagcode)
+ n += sizeRawBytes(data)
+ continue
+ }
+
+ n0 := size_struct(p.sprop, structp)
+ n1 := sizeVarint(uint64(n0)) // size of encoded length
+ n += n0 + n1
+ }
+ return
+}
+
+// Encode a slice of group structs ([]*struct).
+func (o *Buffer) enc_slice_struct_group(p *Properties, base structPointer) error {
+ var state errorState
+ s := structPointer_StructPointerSlice(base, p.field)
+ l := s.Len()
+
+ for i := 0; i < l; i++ {
+ b := s.Index(i)
+ if structPointer_IsNil(b) {
+ return errRepeatedHasNil
+ }
+
+ o.EncodeVarint(uint64((p.Tag << 3) | WireStartGroup))
+
+ err := o.enc_struct(p.sprop, b)
+
+ if err != nil && !state.shouldContinue(err, nil) {
+ if err == ErrNil {
+ return errRepeatedHasNil
+ }
+ return err
+ }
+
+ o.EncodeVarint(uint64((p.Tag << 3) | WireEndGroup))
+ }
+ return state.err
+}
+
+func size_slice_struct_group(p *Properties, base structPointer) (n int) {
+ s := structPointer_StructPointerSlice(base, p.field)
+ l := s.Len()
+
+ n += l * sizeVarint(uint64((p.Tag<<3)|WireStartGroup))
+ n += l * sizeVarint(uint64((p.Tag<<3)|WireEndGroup))
+ for i := 0; i < l; i++ {
+ b := s.Index(i)
+ if structPointer_IsNil(b) {
+ return // return size up to this point
+ }
+
+ n += size_struct(p.sprop, b)
+ }
+ return
+}
+
+// Encode an extension map.
+func (o *Buffer) enc_map(p *Properties, base structPointer) error {
+ v := *structPointer_ExtMap(base, p.field)
+ if err := encodeExtensionMap(v); err != nil {
+ return err
+ }
+ // Fast-path for common cases: zero or one extensions.
+ if len(v) <= 1 {
+ for _, e := range v {
+ o.buf = append(o.buf, e.enc...)
+ }
+ return nil
+ }
+
+ // Sort keys to provide a deterministic encoding.
+ keys := make([]int, 0, len(v))
+ for k := range v {
+ keys = append(keys, int(k))
+ }
+ sort.Ints(keys)
+
+ for _, k := range keys {
+ o.buf = append(o.buf, v[int32(k)].enc...)
+ }
+ return nil
+}
+
+func size_map(p *Properties, base structPointer) int {
+ v := *structPointer_ExtMap(base, p.field)
+ return sizeExtensionMap(v)
+}
+
+// Encode a map field.
+func (o *Buffer) enc_new_map(p *Properties, base structPointer) error {
+ var state errorState // XXX: or do we need to plumb this through?
+
+ /*
+ A map defined as
+ map map_field = N;
+ is encoded in the same way as
+ message MapFieldEntry {
+ key_type key = 1;
+ value_type value = 2;
+ }
+ repeated MapFieldEntry map_field = N;
+ */
+
+ v := structPointer_NewAt(base, p.field, p.mtype).Elem() // map[K]V
+ if v.Len() == 0 {
+ return nil
+ }
+
+ keycopy, valcopy, keybase, valbase := mapEncodeScratch(p.mtype)
+
+ enc := func() error {
+ if err := p.mkeyprop.enc(o, p.mkeyprop, keybase); err != nil {
+ return err
+ }
+ if err := p.mvalprop.enc(o, p.mvalprop, valbase); err != nil {
+ return err
+ }
+ return nil
+ }
+
+ keys := v.MapKeys()
+ sort.Sort(mapKeys(keys))
+ for _, key := range keys {
+ val := v.MapIndex(key)
+
+ // The only illegal map entry values are nil message pointers.
+ if val.Kind() == reflect.Ptr && val.IsNil() {
+ return errors.New("proto: map has nil element")
+ }
+
+ keycopy.Set(key)
+ valcopy.Set(val)
+
+ o.buf = append(o.buf, p.tagcode...)
+ if err := o.enc_len_thing(enc, &state); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func size_new_map(p *Properties, base structPointer) int {
+ v := structPointer_NewAt(base, p.field, p.mtype).Elem() // map[K]V
+
+ keycopy, valcopy, keybase, valbase := mapEncodeScratch(p.mtype)
+
+ n := 0
+ for _, key := range v.MapKeys() {
+ val := v.MapIndex(key)
+ keycopy.Set(key)
+ valcopy.Set(val)
+
+ // Tag codes for key and val are the responsibility of the sub-sizer.
+ keysize := p.mkeyprop.size(p.mkeyprop, keybase)
+ valsize := p.mvalprop.size(p.mvalprop, valbase)
+ entry := keysize + valsize
+ // Add on tag code and length of map entry itself.
+ n += len(p.tagcode) + sizeVarint(uint64(entry)) + entry
+ }
+ return n
+}
+
+// mapEncodeScratch returns a new reflect.Value matching the map's value type,
+// and a structPointer suitable for passing to an encoder or sizer.
+func mapEncodeScratch(mapType reflect.Type) (keycopy, valcopy reflect.Value, keybase, valbase structPointer) {
+ // Prepare addressable doubly-indirect placeholders for the key and value types.
+ // This is needed because the element-type encoders expect **T, but the map iteration produces T.
+
+ keycopy = reflect.New(mapType.Key()).Elem() // addressable K
+ keyptr := reflect.New(reflect.PtrTo(keycopy.Type())).Elem() // addressable *K
+ keyptr.Set(keycopy.Addr()) //
+ keybase = toStructPointer(keyptr.Addr()) // **K
+
+ // Value types are more varied and require special handling.
+ switch mapType.Elem().Kind() {
+ case reflect.Slice:
+ // []byte
+ var dummy []byte
+ valcopy = reflect.ValueOf(&dummy).Elem() // addressable []byte
+ valbase = toStructPointer(valcopy.Addr())
+ case reflect.Ptr:
+ // message; the generated field type is map[K]*Msg (so V is *Msg),
+ // so we only need one level of indirection.
+ valcopy = reflect.New(mapType.Elem()).Elem() // addressable V
+ valbase = toStructPointer(valcopy.Addr())
+ default:
+ // everything else
+ valcopy = reflect.New(mapType.Elem()).Elem() // addressable V
+ valptr := reflect.New(reflect.PtrTo(valcopy.Type())).Elem() // addressable *V
+ valptr.Set(valcopy.Addr()) //
+ valbase = toStructPointer(valptr.Addr()) // **V
+ }
+ return
+}
+
+// Encode a struct.
+func (o *Buffer) enc_struct(prop *StructProperties, base structPointer) error {
+ var state errorState
+ // Encode fields in tag order so that decoders may use optimizations
+ // that depend on the ordering.
+ // https://developers.google.com/protocol-buffers/docs/encoding#order
+ for _, i := range prop.order {
+ p := prop.Prop[i]
+ if p.enc != nil {
+ err := p.enc(o, p, base)
+ if err != nil {
+ if err == ErrNil {
+ if p.Required && state.err == nil {
+ state.err = &RequiredNotSetError{p.Name}
+ }
+ } else if err == errRepeatedHasNil {
+ // Give more context to nil values in repeated fields.
+ return errors.New("repeated field " + p.OrigName + " has nil element")
+ } else if !state.shouldContinue(err, p) {
+ return err
+ }
+ }
+ }
+ }
+
+ // Do oneof fields.
+ if prop.oneofMarshaler != nil {
+ m := structPointer_Interface(base, prop.stype).(Message)
+ if err := prop.oneofMarshaler(m, o); err != nil {
+ return err
+ }
+ }
+
+ // Add unrecognized fields at the end.
+ if prop.unrecField.IsValid() {
+ v := *structPointer_Bytes(base, prop.unrecField)
+ if len(v) > 0 {
+ o.buf = append(o.buf, v...)
+ }
+ }
+
+ return state.err
+}
+
+func size_struct(prop *StructProperties, base structPointer) (n int) {
+ for _, i := range prop.order {
+ p := prop.Prop[i]
+ if p.size != nil {
+ n += p.size(p, base)
+ }
+ }
+
+ // Add unrecognized fields at the end.
+ if prop.unrecField.IsValid() {
+ v := *structPointer_Bytes(base, prop.unrecField)
+ n += len(v)
+ }
+
+ // Factor in any oneof fields.
+ // TODO: This could be faster and use less reflection.
+ if prop.oneofMarshaler != nil {
+ sv := reflect.ValueOf(structPointer_Interface(base, prop.stype)).Elem()
+ for i := 0; i < prop.stype.NumField(); i++ {
+ fv := sv.Field(i)
+ if fv.Kind() != reflect.Interface || fv.IsNil() {
+ continue
+ }
+ if prop.stype.Field(i).Tag.Get("protobuf_oneof") == "" {
+ continue
+ }
+ spv := fv.Elem() // interface -> *T
+ sv := spv.Elem() // *T -> T
+ sf := sv.Type().Field(0) // StructField inside T
+ var prop Properties
+ prop.Init(sf.Type, "whatever", sf.Tag.Get("protobuf"), &sf)
+ n += prop.size(&prop, toStructPointer(spv))
+ }
+ }
+
+ return
+}
+
+var zeroes [20]byte // longer than any conceivable sizeVarint
+
+// Encode a struct, preceded by its encoded length (as a varint).
+func (o *Buffer) enc_len_struct(prop *StructProperties, base structPointer, state *errorState) error {
+ return o.enc_len_thing(func() error { return o.enc_struct(prop, base) }, state)
+}
+
+// Encode something, preceded by its encoded length (as a varint).
+func (o *Buffer) enc_len_thing(enc func() error, state *errorState) error {
+ iLen := len(o.buf)
+ o.buf = append(o.buf, 0, 0, 0, 0) // reserve four bytes for length
+ iMsg := len(o.buf)
+ err := enc()
+ if err != nil && !state.shouldContinue(err, nil) {
+ return err
+ }
+ lMsg := len(o.buf) - iMsg
+ lLen := sizeVarint(uint64(lMsg))
+ switch x := lLen - (iMsg - iLen); {
+ case x > 0: // actual length is x bytes larger than the space we reserved
+ // Move msg x bytes right.
+ o.buf = append(o.buf, zeroes[:x]...)
+ copy(o.buf[iMsg+x:], o.buf[iMsg:iMsg+lMsg])
+ case x < 0: // actual length is x bytes smaller than the space we reserved
+ // Move msg x bytes left.
+ copy(o.buf[iMsg+x:], o.buf[iMsg:iMsg+lMsg])
+ o.buf = o.buf[:len(o.buf)+x] // x is negative
+ }
+ // Encode the length in the reserved space.
+ o.buf = o.buf[:iLen]
+ o.EncodeVarint(uint64(lMsg))
+ o.buf = o.buf[:len(o.buf)+lMsg]
+ return state.err
+}
+
+// errorState maintains the first error that occurs and updates that error
+// with additional context.
+type errorState struct {
+ err error
+}
+
+// shouldContinue reports whether encoding should continue upon encountering the
+// given error. If the error is RequiredNotSetError, shouldContinue returns true
+// and, if this is the first appearance of that error, remembers it for future
+// reporting.
+//
+// If prop is not nil, it may update any error with additional context about the
+// field with the error.
+func (s *errorState) shouldContinue(err error, prop *Properties) bool {
+ // Ignore unset required fields.
+ reqNotSet, ok := err.(*RequiredNotSetError)
+ if !ok {
+ return false
+ }
+ if s.err == nil {
+ if prop != nil {
+ err = &RequiredNotSetError{prop.Name + "." + reqNotSet.field}
+ }
+ s.err = err
+ }
+ return true
+}
diff --git a/vendor/github.com/golang/protobuf/proto/equal.go b/vendor/github.com/golang/protobuf/proto/equal.go
new file mode 100644
index 0000000000..5475c3d959
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/equal.go
@@ -0,0 +1,267 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2011 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Protocol buffer comparison.
+// TODO: MessageSet.
+
+package proto
+
+import (
+ "bytes"
+ "log"
+ "reflect"
+ "strings"
+)
+
+/*
+Equal returns true iff protocol buffers a and b are equal.
+The arguments must both be pointers to protocol buffer structs.
+
+Equality is defined in this way:
+ - Two messages are equal iff they are the same type,
+ corresponding fields are equal, unknown field sets
+ are equal, and extensions sets are equal.
+ - Two set scalar fields are equal iff their values are equal.
+ If the fields are of a floating-point type, remember that
+ NaN != x for all x, including NaN.
+ - Two repeated fields are equal iff their lengths are the same,
+ and their corresponding elements are equal (a "bytes" field,
+ although represented by []byte, is not a repeated field)
+ - Two unset fields are equal.
+ - Two unknown field sets are equal if their current
+ encoded state is equal.
+ - Two extension sets are equal iff they have corresponding
+ elements that are pairwise equal.
+ - Every other combination of things are not equal.
+
+The return value is undefined if a and b are not protocol buffers.
+*/
+func Equal(a, b Message) bool {
+ if a == nil || b == nil {
+ return a == b
+ }
+ v1, v2 := reflect.ValueOf(a), reflect.ValueOf(b)
+ if v1.Type() != v2.Type() {
+ return false
+ }
+ if v1.Kind() == reflect.Ptr {
+ if v1.IsNil() {
+ return v2.IsNil()
+ }
+ if v2.IsNil() {
+ return false
+ }
+ v1, v2 = v1.Elem(), v2.Elem()
+ }
+ if v1.Kind() != reflect.Struct {
+ return false
+ }
+ return equalStruct(v1, v2)
+}
+
+// v1 and v2 are known to have the same type.
+func equalStruct(v1, v2 reflect.Value) bool {
+ for i := 0; i < v1.NumField(); i++ {
+ f := v1.Type().Field(i)
+ if strings.HasPrefix(f.Name, "XXX_") {
+ continue
+ }
+ f1, f2 := v1.Field(i), v2.Field(i)
+ if f.Type.Kind() == reflect.Ptr {
+ if n1, n2 := f1.IsNil(), f2.IsNil(); n1 && n2 {
+ // both unset
+ continue
+ } else if n1 != n2 {
+ // set/unset mismatch
+ return false
+ }
+ b1, ok := f1.Interface().(raw)
+ if ok {
+ b2 := f2.Interface().(raw)
+ // RawMessage
+ if !bytes.Equal(b1.Bytes(), b2.Bytes()) {
+ return false
+ }
+ continue
+ }
+ f1, f2 = f1.Elem(), f2.Elem()
+ }
+ if !equalAny(f1, f2) {
+ return false
+ }
+ }
+
+ if em1 := v1.FieldByName("XXX_extensions"); em1.IsValid() {
+ em2 := v2.FieldByName("XXX_extensions")
+ if !equalExtensions(v1.Type(), em1.Interface().(map[int32]Extension), em2.Interface().(map[int32]Extension)) {
+ return false
+ }
+ }
+
+ uf := v1.FieldByName("XXX_unrecognized")
+ if !uf.IsValid() {
+ return true
+ }
+
+ u1 := uf.Bytes()
+ u2 := v2.FieldByName("XXX_unrecognized").Bytes()
+ if !bytes.Equal(u1, u2) {
+ return false
+ }
+
+ return true
+}
+
+// v1 and v2 are known to have the same type.
+func equalAny(v1, v2 reflect.Value) bool {
+ if v1.Type() == protoMessageType {
+ m1, _ := v1.Interface().(Message)
+ m2, _ := v2.Interface().(Message)
+ return Equal(m1, m2)
+ }
+ switch v1.Kind() {
+ case reflect.Bool:
+ return v1.Bool() == v2.Bool()
+ case reflect.Float32, reflect.Float64:
+ return v1.Float() == v2.Float()
+ case reflect.Int32, reflect.Int64:
+ return v1.Int() == v2.Int()
+ case reflect.Interface:
+ // Probably a oneof field; compare the inner values.
+ n1, n2 := v1.IsNil(), v2.IsNil()
+ if n1 || n2 {
+ return n1 == n2
+ }
+ e1, e2 := v1.Elem(), v2.Elem()
+ if e1.Type() != e2.Type() {
+ return false
+ }
+ return equalAny(e1, e2)
+ case reflect.Map:
+ if v1.Len() != v2.Len() {
+ return false
+ }
+ for _, key := range v1.MapKeys() {
+ val2 := v2.MapIndex(key)
+ if !val2.IsValid() {
+ // This key was not found in the second map.
+ return false
+ }
+ if !equalAny(v1.MapIndex(key), val2) {
+ return false
+ }
+ }
+ return true
+ case reflect.Ptr:
+ return equalAny(v1.Elem(), v2.Elem())
+ case reflect.Slice:
+ if v1.Type().Elem().Kind() == reflect.Uint8 {
+ // short circuit: []byte
+ if v1.IsNil() != v2.IsNil() {
+ return false
+ }
+ return bytes.Equal(v1.Interface().([]byte), v2.Interface().([]byte))
+ }
+
+ if v1.Len() != v2.Len() {
+ return false
+ }
+ for i := 0; i < v1.Len(); i++ {
+ if !equalAny(v1.Index(i), v2.Index(i)) {
+ return false
+ }
+ }
+ return true
+ case reflect.String:
+ return v1.Interface().(string) == v2.Interface().(string)
+ case reflect.Struct:
+ return equalStruct(v1, v2)
+ case reflect.Uint32, reflect.Uint64:
+ return v1.Uint() == v2.Uint()
+ }
+
+ // unknown type, so not a protocol buffer
+ log.Printf("proto: don't know how to compare %v", v1)
+ return false
+}
+
+// base is the struct type that the extensions are based on.
+// em1 and em2 are extension maps.
+func equalExtensions(base reflect.Type, em1, em2 map[int32]Extension) bool {
+ if len(em1) != len(em2) {
+ return false
+ }
+
+ for extNum, e1 := range em1 {
+ e2, ok := em2[extNum]
+ if !ok {
+ return false
+ }
+
+ m1, m2 := e1.value, e2.value
+
+ if m1 != nil && m2 != nil {
+ // Both are unencoded.
+ if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2)) {
+ return false
+ }
+ continue
+ }
+
+ // At least one is encoded. To do a semantically correct comparison
+ // we need to unmarshal them first.
+ var desc *ExtensionDesc
+ if m := extensionMaps[base]; m != nil {
+ desc = m[extNum]
+ }
+ if desc == nil {
+ log.Printf("proto: don't know how to compare extension %d of %v", extNum, base)
+ continue
+ }
+ var err error
+ if m1 == nil {
+ m1, err = decodeExtension(e1.enc, desc)
+ }
+ if m2 == nil && err == nil {
+ m2, err = decodeExtension(e2.enc, desc)
+ }
+ if err != nil {
+ // The encoded form is invalid.
+ log.Printf("proto: badly encoded extension %d of %v: %v", extNum, base, err)
+ return false
+ }
+ if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2)) {
+ return false
+ }
+ }
+
+ return true
+}
diff --git a/vendor/github.com/golang/protobuf/proto/extensions.go b/vendor/github.com/golang/protobuf/proto/extensions.go
new file mode 100644
index 0000000000..e591ccef79
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/extensions.go
@@ -0,0 +1,400 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+/*
+ * Types and routines for supporting protocol buffer extensions.
+ */
+
+import (
+ "errors"
+ "fmt"
+ "reflect"
+ "strconv"
+ "sync"
+)
+
+// ErrMissingExtension is the error returned by GetExtension if the named extension is not in the message.
+var ErrMissingExtension = errors.New("proto: missing extension")
+
+// ExtensionRange represents a range of message extensions for a protocol buffer.
+// Used in code generated by the protocol compiler.
+type ExtensionRange struct {
+ Start, End int32 // both inclusive
+}
+
+// extendableProto is an interface implemented by any protocol buffer that may be extended.
+type extendableProto interface {
+ Message
+ ExtensionRangeArray() []ExtensionRange
+ ExtensionMap() map[int32]Extension
+}
+
+var extendableProtoType = reflect.TypeOf((*extendableProto)(nil)).Elem()
+
+// ExtensionDesc represents an extension specification.
+// Used in generated code from the protocol compiler.
+type ExtensionDesc struct {
+ ExtendedType Message // nil pointer to the type that is being extended
+ ExtensionType interface{} // nil pointer to the extension type
+ Field int32 // field number
+ Name string // fully-qualified name of extension, for text formatting
+ Tag string // protobuf tag style
+}
+
+func (ed *ExtensionDesc) repeated() bool {
+ t := reflect.TypeOf(ed.ExtensionType)
+ return t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8
+}
+
+// Extension represents an extension in a message.
+type Extension struct {
+ // When an extension is stored in a message using SetExtension
+ // only desc and value are set. When the message is marshaled
+ // enc will be set to the encoded form of the message.
+ //
+ // When a message is unmarshaled and contains extensions, each
+ // extension will have only enc set. When such an extension is
+ // accessed using GetExtension (or GetExtensions) desc and value
+ // will be set.
+ desc *ExtensionDesc
+ value interface{}
+ enc []byte
+}
+
+// SetRawExtension is for testing only.
+func SetRawExtension(base extendableProto, id int32, b []byte) {
+ base.ExtensionMap()[id] = Extension{enc: b}
+}
+
+// isExtensionField returns true iff the given field number is in an extension range.
+func isExtensionField(pb extendableProto, field int32) bool {
+ for _, er := range pb.ExtensionRangeArray() {
+ if er.Start <= field && field <= er.End {
+ return true
+ }
+ }
+ return false
+}
+
+// checkExtensionTypes checks that the given extension is valid for pb.
+func checkExtensionTypes(pb extendableProto, extension *ExtensionDesc) error {
+ // Check the extended type.
+ if a, b := reflect.TypeOf(pb), reflect.TypeOf(extension.ExtendedType); a != b {
+ return errors.New("proto: bad extended type; " + b.String() + " does not extend " + a.String())
+ }
+ // Check the range.
+ if !isExtensionField(pb, extension.Field) {
+ return errors.New("proto: bad extension number; not in declared ranges")
+ }
+ return nil
+}
+
+// extPropKey is sufficient to uniquely identify an extension.
+type extPropKey struct {
+ base reflect.Type
+ field int32
+}
+
+var extProp = struct {
+ sync.RWMutex
+ m map[extPropKey]*Properties
+}{
+ m: make(map[extPropKey]*Properties),
+}
+
+func extensionProperties(ed *ExtensionDesc) *Properties {
+ key := extPropKey{base: reflect.TypeOf(ed.ExtendedType), field: ed.Field}
+
+ extProp.RLock()
+ if prop, ok := extProp.m[key]; ok {
+ extProp.RUnlock()
+ return prop
+ }
+ extProp.RUnlock()
+
+ extProp.Lock()
+ defer extProp.Unlock()
+ // Check again.
+ if prop, ok := extProp.m[key]; ok {
+ return prop
+ }
+
+ prop := new(Properties)
+ prop.Init(reflect.TypeOf(ed.ExtensionType), "unknown_name", ed.Tag, nil)
+ extProp.m[key] = prop
+ return prop
+}
+
+// encodeExtensionMap encodes any unmarshaled (unencoded) extensions in m.
+func encodeExtensionMap(m map[int32]Extension) error {
+ for k, e := range m {
+ if e.value == nil || e.desc == nil {
+ // Extension is only in its encoded form.
+ continue
+ }
+
+ // We don't skip extensions that have an encoded form set,
+ // because the extension value may have been mutated after
+ // the last time this function was called.
+
+ et := reflect.TypeOf(e.desc.ExtensionType)
+ props := extensionProperties(e.desc)
+
+ p := NewBuffer(nil)
+ // If e.value has type T, the encoder expects a *struct{ X T }.
+ // Pass a *T with a zero field and hope it all works out.
+ x := reflect.New(et)
+ x.Elem().Set(reflect.ValueOf(e.value))
+ if err := props.enc(p, props, toStructPointer(x)); err != nil {
+ return err
+ }
+ e.enc = p.buf
+ m[k] = e
+ }
+ return nil
+}
+
+func sizeExtensionMap(m map[int32]Extension) (n int) {
+ for _, e := range m {
+ if e.value == nil || e.desc == nil {
+ // Extension is only in its encoded form.
+ n += len(e.enc)
+ continue
+ }
+
+ // We don't skip extensions that have an encoded form set,
+ // because the extension value may have been mutated after
+ // the last time this function was called.
+
+ et := reflect.TypeOf(e.desc.ExtensionType)
+ props := extensionProperties(e.desc)
+
+ // If e.value has type T, the encoder expects a *struct{ X T }.
+ // Pass a *T with a zero field and hope it all works out.
+ x := reflect.New(et)
+ x.Elem().Set(reflect.ValueOf(e.value))
+ n += props.size(props, toStructPointer(x))
+ }
+ return
+}
+
+// HasExtension returns whether the given extension is present in pb.
+func HasExtension(pb extendableProto, extension *ExtensionDesc) bool {
+ // TODO: Check types, field numbers, etc.?
+ _, ok := pb.ExtensionMap()[extension.Field]
+ return ok
+}
+
+// ClearExtension removes the given extension from pb.
+func ClearExtension(pb extendableProto, extension *ExtensionDesc) {
+ // TODO: Check types, field numbers, etc.?
+ delete(pb.ExtensionMap(), extension.Field)
+}
+
+// GetExtension parses and returns the given extension of pb.
+// If the extension is not present and has no default value it returns ErrMissingExtension.
+func GetExtension(pb extendableProto, extension *ExtensionDesc) (interface{}, error) {
+ if err := checkExtensionTypes(pb, extension); err != nil {
+ return nil, err
+ }
+
+ emap := pb.ExtensionMap()
+ e, ok := emap[extension.Field]
+ if !ok {
+ // defaultExtensionValue returns the default value or
+ // ErrMissingExtension if there is no default.
+ return defaultExtensionValue(extension)
+ }
+
+ if e.value != nil {
+ // Already decoded. Check the descriptor, though.
+ if e.desc != extension {
+ // This shouldn't happen. If it does, it means that
+ // GetExtension was called twice with two different
+ // descriptors with the same field number.
+ return nil, errors.New("proto: descriptor conflict")
+ }
+ return e.value, nil
+ }
+
+ v, err := decodeExtension(e.enc, extension)
+ if err != nil {
+ return nil, err
+ }
+
+ // Remember the decoded version and drop the encoded version.
+ // That way it is safe to mutate what we return.
+ e.value = v
+ e.desc = extension
+ e.enc = nil
+ emap[extension.Field] = e
+ return e.value, nil
+}
+
+// defaultExtensionValue returns the default value for extension.
+// If no default for an extension is defined ErrMissingExtension is returned.
+func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) {
+ t := reflect.TypeOf(extension.ExtensionType)
+ props := extensionProperties(extension)
+
+ sf, _, err := fieldDefault(t, props)
+ if err != nil {
+ return nil, err
+ }
+
+ if sf == nil || sf.value == nil {
+ // There is no default value.
+ return nil, ErrMissingExtension
+ }
+
+ if t.Kind() != reflect.Ptr {
+ // We do not need to return a Ptr, we can directly return sf.value.
+ return sf.value, nil
+ }
+
+ // We need to return an interface{} that is a pointer to sf.value.
+ value := reflect.New(t).Elem()
+ value.Set(reflect.New(value.Type().Elem()))
+ if sf.kind == reflect.Int32 {
+ // We may have an int32 or an enum, but the underlying data is int32.
+ // Since we can't set an int32 into a non int32 reflect.value directly
+ // set it as a int32.
+ value.Elem().SetInt(int64(sf.value.(int32)))
+ } else {
+ value.Elem().Set(reflect.ValueOf(sf.value))
+ }
+ return value.Interface(), nil
+}
+
+// decodeExtension decodes an extension encoded in b.
+func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) {
+ o := NewBuffer(b)
+
+ t := reflect.TypeOf(extension.ExtensionType)
+ rep := extension.repeated()
+
+ props := extensionProperties(extension)
+
+ // t is a pointer to a struct, pointer to basic type or a slice.
+ // Allocate a "field" to store the pointer/slice itself; the
+ // pointer/slice will be stored here. We pass
+ // the address of this field to props.dec.
+ // This passes a zero field and a *t and lets props.dec
+ // interpret it as a *struct{ x t }.
+ value := reflect.New(t).Elem()
+
+ for {
+ // Discard wire type and field number varint. It isn't needed.
+ if _, err := o.DecodeVarint(); err != nil {
+ return nil, err
+ }
+
+ if err := props.dec(o, props, toStructPointer(value.Addr())); err != nil {
+ return nil, err
+ }
+
+ if !rep || o.index >= len(o.buf) {
+ break
+ }
+ }
+ return value.Interface(), nil
+}
+
+// GetExtensions returns a slice of the extensions present in pb that are also listed in es.
+// The returned slice has the same length as es; missing extensions will appear as nil elements.
+func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) {
+ epb, ok := pb.(extendableProto)
+ if !ok {
+ err = errors.New("proto: not an extendable proto")
+ return
+ }
+ extensions = make([]interface{}, len(es))
+ for i, e := range es {
+ extensions[i], err = GetExtension(epb, e)
+ if err == ErrMissingExtension {
+ err = nil
+ }
+ if err != nil {
+ return
+ }
+ }
+ return
+}
+
+// SetExtension sets the specified extension of pb to the specified value.
+func SetExtension(pb extendableProto, extension *ExtensionDesc, value interface{}) error {
+ if err := checkExtensionTypes(pb, extension); err != nil {
+ return err
+ }
+ typ := reflect.TypeOf(extension.ExtensionType)
+ if typ != reflect.TypeOf(value) {
+ return errors.New("proto: bad extension value type")
+ }
+ // nil extension values need to be caught early, because the
+ // encoder can't distinguish an ErrNil due to a nil extension
+ // from an ErrNil due to a missing field. Extensions are
+ // always optional, so the encoder would just swallow the error
+ // and drop all the extensions from the encoded message.
+ if reflect.ValueOf(value).IsNil() {
+ return fmt.Errorf("proto: SetExtension called with nil value of type %T", value)
+ }
+
+ pb.ExtensionMap()[extension.Field] = Extension{desc: extension, value: value}
+ return nil
+}
+
+// A global registry of extensions.
+// The generated code will register the generated descriptors by calling RegisterExtension.
+
+var extensionMaps = make(map[reflect.Type]map[int32]*ExtensionDesc)
+
+// RegisterExtension is called from the generated code.
+func RegisterExtension(desc *ExtensionDesc) {
+ st := reflect.TypeOf(desc.ExtendedType).Elem()
+ m := extensionMaps[st]
+ if m == nil {
+ m = make(map[int32]*ExtensionDesc)
+ extensionMaps[st] = m
+ }
+ if _, ok := m[desc.Field]; ok {
+ panic("proto: duplicate extension registered: " + st.String() + " " + strconv.Itoa(int(desc.Field)))
+ }
+ m[desc.Field] = desc
+}
+
+// RegisteredExtensions returns a map of the registered extensions of a
+// protocol buffer struct, indexed by the extension number.
+// The argument pb should be a nil pointer to the struct type.
+func RegisteredExtensions(pb Message) map[int32]*ExtensionDesc {
+ return extensionMaps[reflect.TypeOf(pb).Elem()]
+}
diff --git a/vendor/github.com/golang/protobuf/proto/lib.go b/vendor/github.com/golang/protobuf/proto/lib.go
new file mode 100644
index 0000000000..abbf7583ed
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/lib.go
@@ -0,0 +1,883 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+/*
+Package proto converts data structures to and from the wire format of
+protocol buffers. It works in concert with the Go source code generated
+for .proto files by the protocol compiler.
+
+A summary of the properties of the protocol buffer interface
+for a protocol buffer variable v:
+
+ - Names are turned from camel_case to CamelCase for export.
+ - There are no methods on v to set fields; just treat
+ them as structure fields.
+ - There are getters that return a field's value if set,
+ and return the field's default value if unset.
+ The getters work even if the receiver is a nil message.
+ - The zero value for a struct is its correct initialization state.
+ All desired fields must be set before marshaling.
+ - A Reset() method will restore a protobuf struct to its zero state.
+ - Non-repeated fields are pointers to the values; nil means unset.
+ That is, optional or required field int32 f becomes F *int32.
+ - Repeated fields are slices.
+ - Helper functions are available to aid the setting of fields.
+ msg.Foo = proto.String("hello") // set field
+ - Constants are defined to hold the default values of all fields that
+ have them. They have the form Default_StructName_FieldName.
+ Because the getter methods handle defaulted values,
+ direct use of these constants should be rare.
+ - Enums are given type names and maps from names to values.
+ Enum values are prefixed by the enclosing message's name, or by the
+ enum's type name if it is a top-level enum. Enum types have a String
+ method, and a Enum method to assist in message construction.
+ - Nested messages, groups and enums have type names prefixed with the name of
+ the surrounding message type.
+ - Extensions are given descriptor names that start with E_,
+ followed by an underscore-delimited list of the nested messages
+ that contain it (if any) followed by the CamelCased name of the
+ extension field itself. HasExtension, ClearExtension, GetExtension
+ and SetExtension are functions for manipulating extensions.
+ - Oneof field sets are given a single field in their message,
+ with distinguished wrapper types for each possible field value.
+ - Marshal and Unmarshal are functions to encode and decode the wire format.
+
+The simplest way to describe this is to see an example.
+Given file test.proto, containing
+
+ package example;
+
+ enum FOO { X = 17; }
+
+ message Test {
+ required string label = 1;
+ optional int32 type = 2 [default=77];
+ repeated int64 reps = 3;
+ optional group OptionalGroup = 4 {
+ required string RequiredField = 5;
+ }
+ oneof union {
+ int32 number = 6;
+ string name = 7;
+ }
+ }
+
+The resulting file, test.pb.go, is:
+
+ package example
+
+ import proto "github.com/golang/protobuf/proto"
+ import math "math"
+
+ type FOO int32
+ const (
+ FOO_X FOO = 17
+ )
+ var FOO_name = map[int32]string{
+ 17: "X",
+ }
+ var FOO_value = map[string]int32{
+ "X": 17,
+ }
+
+ func (x FOO) Enum() *FOO {
+ p := new(FOO)
+ *p = x
+ return p
+ }
+ func (x FOO) String() string {
+ return proto.EnumName(FOO_name, int32(x))
+ }
+ func (x *FOO) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(FOO_value, data)
+ if err != nil {
+ return err
+ }
+ *x = FOO(value)
+ return nil
+ }
+
+ type Test struct {
+ Label *string `protobuf:"bytes,1,req,name=label" json:"label,omitempty"`
+ Type *int32 `protobuf:"varint,2,opt,name=type,def=77" json:"type,omitempty"`
+ Reps []int64 `protobuf:"varint,3,rep,name=reps" json:"reps,omitempty"`
+ Optionalgroup *Test_OptionalGroup `protobuf:"group,4,opt,name=OptionalGroup" json:"optionalgroup,omitempty"`
+ // Types that are valid to be assigned to Union:
+ // *Test_Number
+ // *Test_Name
+ Union isTest_Union `protobuf_oneof:"union"`
+ XXX_unrecognized []byte `json:"-"`
+ }
+ func (m *Test) Reset() { *m = Test{} }
+ func (m *Test) String() string { return proto.CompactTextString(m) }
+ func (*Test) ProtoMessage() {}
+
+ type isTest_Union interface {
+ isTest_Union()
+ }
+
+ type Test_Number struct {
+ Number int32 `protobuf:"varint,6,opt,name=number"`
+ }
+ type Test_Name struct {
+ Name string `protobuf:"bytes,7,opt,name=name"`
+ }
+
+ func (*Test_Number) isTest_Union() {}
+ func (*Test_Name) isTest_Union() {}
+
+ func (m *Test) GetUnion() isTest_Union {
+ if m != nil {
+ return m.Union
+ }
+ return nil
+ }
+ const Default_Test_Type int32 = 77
+
+ func (m *Test) GetLabel() string {
+ if m != nil && m.Label != nil {
+ return *m.Label
+ }
+ return ""
+ }
+
+ func (m *Test) GetType() int32 {
+ if m != nil && m.Type != nil {
+ return *m.Type
+ }
+ return Default_Test_Type
+ }
+
+ func (m *Test) GetOptionalgroup() *Test_OptionalGroup {
+ if m != nil {
+ return m.Optionalgroup
+ }
+ return nil
+ }
+
+ type Test_OptionalGroup struct {
+ RequiredField *string `protobuf:"bytes,5,req" json:"RequiredField,omitempty"`
+ }
+ func (m *Test_OptionalGroup) Reset() { *m = Test_OptionalGroup{} }
+ func (m *Test_OptionalGroup) String() string { return proto.CompactTextString(m) }
+
+ func (m *Test_OptionalGroup) GetRequiredField() string {
+ if m != nil && m.RequiredField != nil {
+ return *m.RequiredField
+ }
+ return ""
+ }
+
+ func (m *Test) GetNumber() int32 {
+ if x, ok := m.GetUnion().(*Test_Number); ok {
+ return x.Number
+ }
+ return 0
+ }
+
+ func (m *Test) GetName() string {
+ if x, ok := m.GetUnion().(*Test_Name); ok {
+ return x.Name
+ }
+ return ""
+ }
+
+ func init() {
+ proto.RegisterEnum("example.FOO", FOO_name, FOO_value)
+ }
+
+To create and play with a Test object:
+
+package main
+
+ import (
+ "log"
+
+ "github.com/golang/protobuf/proto"
+ pb "./example.pb"
+ )
+
+ func main() {
+ test := &pb.Test{
+ Label: proto.String("hello"),
+ Type: proto.Int32(17),
+ Optionalgroup: &pb.Test_OptionalGroup{
+ RequiredField: proto.String("good bye"),
+ },
+ Union: &pb.Test_Name{"fred"},
+ }
+ data, err := proto.Marshal(test)
+ if err != nil {
+ log.Fatal("marshaling error: ", err)
+ }
+ newTest := &pb.Test{}
+ err = proto.Unmarshal(data, newTest)
+ if err != nil {
+ log.Fatal("unmarshaling error: ", err)
+ }
+ // Now test and newTest contain the same data.
+ if test.GetLabel() != newTest.GetLabel() {
+ log.Fatalf("data mismatch %q != %q", test.GetLabel(), newTest.GetLabel())
+ }
+ // Use a type switch to determine which oneof was set.
+ switch u := test.Union.(type) {
+ case *pb.Test_Number: // u.Number contains the number.
+ case *pb.Test_Name: // u.Name contains the string.
+ }
+ // etc.
+ }
+*/
+package proto
+
+import (
+ "encoding/json"
+ "fmt"
+ "log"
+ "reflect"
+ "sort"
+ "strconv"
+ "sync"
+)
+
+// Message is implemented by generated protocol buffer messages.
+type Message interface {
+ Reset()
+ String() string
+ ProtoMessage()
+}
+
+// Stats records allocation details about the protocol buffer encoders
+// and decoders. Useful for tuning the library itself.
+type Stats struct {
+ Emalloc uint64 // mallocs in encode
+ Dmalloc uint64 // mallocs in decode
+ Encode uint64 // number of encodes
+ Decode uint64 // number of decodes
+ Chit uint64 // number of cache hits
+ Cmiss uint64 // number of cache misses
+ Size uint64 // number of sizes
+}
+
+// Set to true to enable stats collection.
+const collectStats = false
+
+var stats Stats
+
+// GetStats returns a copy of the global Stats structure.
+func GetStats() Stats { return stats }
+
+// A Buffer is a buffer manager for marshaling and unmarshaling
+// protocol buffers. It may be reused between invocations to
+// reduce memory usage. It is not necessary to use a Buffer;
+// the global functions Marshal and Unmarshal create a
+// temporary Buffer and are fine for most applications.
+type Buffer struct {
+ buf []byte // encode/decode byte stream
+ index int // write point
+
+ // pools of basic types to amortize allocation.
+ bools []bool
+ uint32s []uint32
+ uint64s []uint64
+
+ // extra pools, only used with pointer_reflect.go
+ int32s []int32
+ int64s []int64
+ float32s []float32
+ float64s []float64
+}
+
+// NewBuffer allocates a new Buffer and initializes its internal data to
+// the contents of the argument slice.
+func NewBuffer(e []byte) *Buffer {
+ return &Buffer{buf: e}
+}
+
+// Reset resets the Buffer, ready for marshaling a new protocol buffer.
+func (p *Buffer) Reset() {
+ p.buf = p.buf[0:0] // for reading/writing
+ p.index = 0 // for reading
+}
+
+// SetBuf replaces the internal buffer with the slice,
+// ready for unmarshaling the contents of the slice.
+func (p *Buffer) SetBuf(s []byte) {
+ p.buf = s
+ p.index = 0
+}
+
+// Bytes returns the contents of the Buffer.
+func (p *Buffer) Bytes() []byte { return p.buf }
+
+/*
+ * Helper routines for simplifying the creation of optional fields of basic type.
+ */
+
+// Bool is a helper routine that allocates a new bool value
+// to store v and returns a pointer to it.
+func Bool(v bool) *bool {
+ return &v
+}
+
+// Int32 is a helper routine that allocates a new int32 value
+// to store v and returns a pointer to it.
+func Int32(v int32) *int32 {
+ return &v
+}
+
+// Int is a helper routine that allocates a new int32 value
+// to store v and returns a pointer to it, but unlike Int32
+// its argument value is an int.
+func Int(v int) *int32 {
+ p := new(int32)
+ *p = int32(v)
+ return p
+}
+
+// Int64 is a helper routine that allocates a new int64 value
+// to store v and returns a pointer to it.
+func Int64(v int64) *int64 {
+ return &v
+}
+
+// Float32 is a helper routine that allocates a new float32 value
+// to store v and returns a pointer to it.
+func Float32(v float32) *float32 {
+ return &v
+}
+
+// Float64 is a helper routine that allocates a new float64 value
+// to store v and returns a pointer to it.
+func Float64(v float64) *float64 {
+ return &v
+}
+
+// Uint32 is a helper routine that allocates a new uint32 value
+// to store v and returns a pointer to it.
+func Uint32(v uint32) *uint32 {
+ return &v
+}
+
+// Uint64 is a helper routine that allocates a new uint64 value
+// to store v and returns a pointer to it.
+func Uint64(v uint64) *uint64 {
+ return &v
+}
+
+// String is a helper routine that allocates a new string value
+// to store v and returns a pointer to it.
+func String(v string) *string {
+ return &v
+}
+
+// EnumName is a helper function to simplify printing protocol buffer enums
+// by name. Given an enum map and a value, it returns a useful string.
+func EnumName(m map[int32]string, v int32) string {
+ s, ok := m[v]
+ if ok {
+ return s
+ }
+ return strconv.Itoa(int(v))
+}
+
+// UnmarshalJSONEnum is a helper function to simplify recovering enum int values
+// from their JSON-encoded representation. Given a map from the enum's symbolic
+// names to its int values, and a byte buffer containing the JSON-encoded
+// value, it returns an int32 that can be cast to the enum type by the caller.
+//
+// The function can deal with both JSON representations, numeric and symbolic.
+func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) {
+ if data[0] == '"' {
+ // New style: enums are strings.
+ var repr string
+ if err := json.Unmarshal(data, &repr); err != nil {
+ return -1, err
+ }
+ val, ok := m[repr]
+ if !ok {
+ return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr)
+ }
+ return val, nil
+ }
+ // Old style: enums are ints.
+ var val int32
+ if err := json.Unmarshal(data, &val); err != nil {
+ return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName)
+ }
+ return val, nil
+}
+
+// DebugPrint dumps the encoded data in b in a debugging format with a header
+// including the string s. Used in testing but made available for general debugging.
+func (p *Buffer) DebugPrint(s string, b []byte) {
+ var u uint64
+
+ obuf := p.buf
+ index := p.index
+ p.buf = b
+ p.index = 0
+ depth := 0
+
+ fmt.Printf("\n--- %s ---\n", s)
+
+out:
+ for {
+ for i := 0; i < depth; i++ {
+ fmt.Print(" ")
+ }
+
+ index := p.index
+ if index == len(p.buf) {
+ break
+ }
+
+ op, err := p.DecodeVarint()
+ if err != nil {
+ fmt.Printf("%3d: fetching op err %v\n", index, err)
+ break out
+ }
+ tag := op >> 3
+ wire := op & 7
+
+ switch wire {
+ default:
+ fmt.Printf("%3d: t=%3d unknown wire=%d\n",
+ index, tag, wire)
+ break out
+
+ case WireBytes:
+ var r []byte
+
+ r, err = p.DecodeRawBytes(false)
+ if err != nil {
+ break out
+ }
+ fmt.Printf("%3d: t=%3d bytes [%d]", index, tag, len(r))
+ if len(r) <= 6 {
+ for i := 0; i < len(r); i++ {
+ fmt.Printf(" %.2x", r[i])
+ }
+ } else {
+ for i := 0; i < 3; i++ {
+ fmt.Printf(" %.2x", r[i])
+ }
+ fmt.Printf(" ..")
+ for i := len(r) - 3; i < len(r); i++ {
+ fmt.Printf(" %.2x", r[i])
+ }
+ }
+ fmt.Printf("\n")
+
+ case WireFixed32:
+ u, err = p.DecodeFixed32()
+ if err != nil {
+ fmt.Printf("%3d: t=%3d fix32 err %v\n", index, tag, err)
+ break out
+ }
+ fmt.Printf("%3d: t=%3d fix32 %d\n", index, tag, u)
+
+ case WireFixed64:
+ u, err = p.DecodeFixed64()
+ if err != nil {
+ fmt.Printf("%3d: t=%3d fix64 err %v\n", index, tag, err)
+ break out
+ }
+ fmt.Printf("%3d: t=%3d fix64 %d\n", index, tag, u)
+
+ case WireVarint:
+ u, err = p.DecodeVarint()
+ if err != nil {
+ fmt.Printf("%3d: t=%3d varint err %v\n", index, tag, err)
+ break out
+ }
+ fmt.Printf("%3d: t=%3d varint %d\n", index, tag, u)
+
+ case WireStartGroup:
+ fmt.Printf("%3d: t=%3d start\n", index, tag)
+ depth++
+
+ case WireEndGroup:
+ depth--
+ fmt.Printf("%3d: t=%3d end\n", index, tag)
+ }
+ }
+
+ if depth != 0 {
+ fmt.Printf("%3d: start-end not balanced %d\n", p.index, depth)
+ }
+ fmt.Printf("\n")
+
+ p.buf = obuf
+ p.index = index
+}
+
+// SetDefaults sets unset protocol buffer fields to their default values.
+// It only modifies fields that are both unset and have defined defaults.
+// It recursively sets default values in any non-nil sub-messages.
+func SetDefaults(pb Message) {
+ setDefaults(reflect.ValueOf(pb), true, false)
+}
+
+// v is a pointer to a struct.
+func setDefaults(v reflect.Value, recur, zeros bool) {
+ v = v.Elem()
+
+ defaultMu.RLock()
+ dm, ok := defaults[v.Type()]
+ defaultMu.RUnlock()
+ if !ok {
+ dm = buildDefaultMessage(v.Type())
+ defaultMu.Lock()
+ defaults[v.Type()] = dm
+ defaultMu.Unlock()
+ }
+
+ for _, sf := range dm.scalars {
+ f := v.Field(sf.index)
+ if !f.IsNil() {
+ // field already set
+ continue
+ }
+ dv := sf.value
+ if dv == nil && !zeros {
+ // no explicit default, and don't want to set zeros
+ continue
+ }
+ fptr := f.Addr().Interface() // **T
+ // TODO: Consider batching the allocations we do here.
+ switch sf.kind {
+ case reflect.Bool:
+ b := new(bool)
+ if dv != nil {
+ *b = dv.(bool)
+ }
+ *(fptr.(**bool)) = b
+ case reflect.Float32:
+ f := new(float32)
+ if dv != nil {
+ *f = dv.(float32)
+ }
+ *(fptr.(**float32)) = f
+ case reflect.Float64:
+ f := new(float64)
+ if dv != nil {
+ *f = dv.(float64)
+ }
+ *(fptr.(**float64)) = f
+ case reflect.Int32:
+ // might be an enum
+ if ft := f.Type(); ft != int32PtrType {
+ // enum
+ f.Set(reflect.New(ft.Elem()))
+ if dv != nil {
+ f.Elem().SetInt(int64(dv.(int32)))
+ }
+ } else {
+ // int32 field
+ i := new(int32)
+ if dv != nil {
+ *i = dv.(int32)
+ }
+ *(fptr.(**int32)) = i
+ }
+ case reflect.Int64:
+ i := new(int64)
+ if dv != nil {
+ *i = dv.(int64)
+ }
+ *(fptr.(**int64)) = i
+ case reflect.String:
+ s := new(string)
+ if dv != nil {
+ *s = dv.(string)
+ }
+ *(fptr.(**string)) = s
+ case reflect.Uint8:
+ // exceptional case: []byte
+ var b []byte
+ if dv != nil {
+ db := dv.([]byte)
+ b = make([]byte, len(db))
+ copy(b, db)
+ } else {
+ b = []byte{}
+ }
+ *(fptr.(*[]byte)) = b
+ case reflect.Uint32:
+ u := new(uint32)
+ if dv != nil {
+ *u = dv.(uint32)
+ }
+ *(fptr.(**uint32)) = u
+ case reflect.Uint64:
+ u := new(uint64)
+ if dv != nil {
+ *u = dv.(uint64)
+ }
+ *(fptr.(**uint64)) = u
+ default:
+ log.Printf("proto: can't set default for field %v (sf.kind=%v)", f, sf.kind)
+ }
+ }
+
+ for _, ni := range dm.nested {
+ f := v.Field(ni)
+ // f is *T or []*T or map[T]*T
+ switch f.Kind() {
+ case reflect.Ptr:
+ if f.IsNil() {
+ continue
+ }
+ setDefaults(f, recur, zeros)
+
+ case reflect.Slice:
+ for i := 0; i < f.Len(); i++ {
+ e := f.Index(i)
+ if e.IsNil() {
+ continue
+ }
+ setDefaults(e, recur, zeros)
+ }
+
+ case reflect.Map:
+ for _, k := range f.MapKeys() {
+ e := f.MapIndex(k)
+ if e.IsNil() {
+ continue
+ }
+ setDefaults(e, recur, zeros)
+ }
+ }
+ }
+}
+
+var (
+ // defaults maps a protocol buffer struct type to a slice of the fields,
+ // with its scalar fields set to their proto-declared non-zero default values.
+ defaultMu sync.RWMutex
+ defaults = make(map[reflect.Type]defaultMessage)
+
+ int32PtrType = reflect.TypeOf((*int32)(nil))
+)
+
+// defaultMessage represents information about the default values of a message.
+type defaultMessage struct {
+ scalars []scalarField
+ nested []int // struct field index of nested messages
+}
+
+type scalarField struct {
+ index int // struct field index
+ kind reflect.Kind // element type (the T in *T or []T)
+ value interface{} // the proto-declared default value, or nil
+}
+
+// t is a struct type.
+func buildDefaultMessage(t reflect.Type) (dm defaultMessage) {
+ sprop := GetProperties(t)
+ for _, prop := range sprop.Prop {
+ fi, ok := sprop.decoderTags.get(prop.Tag)
+ if !ok {
+ // XXX_unrecognized
+ continue
+ }
+ ft := t.Field(fi).Type
+
+ sf, nested, err := fieldDefault(ft, prop)
+ switch {
+ case err != nil:
+ log.Print(err)
+ case nested:
+ dm.nested = append(dm.nested, fi)
+ case sf != nil:
+ sf.index = fi
+ dm.scalars = append(dm.scalars, *sf)
+ }
+ }
+
+ return dm
+}
+
+// fieldDefault returns the scalarField for field type ft.
+// sf will be nil if the field can not have a default.
+// nestedMessage will be true if this is a nested message.
+// Note that sf.index is not set on return.
+func fieldDefault(ft reflect.Type, prop *Properties) (sf *scalarField, nestedMessage bool, err error) {
+ var canHaveDefault bool
+ switch ft.Kind() {
+ case reflect.Ptr:
+ if ft.Elem().Kind() == reflect.Struct {
+ nestedMessage = true
+ } else {
+ canHaveDefault = true // proto2 scalar field
+ }
+
+ case reflect.Slice:
+ switch ft.Elem().Kind() {
+ case reflect.Ptr:
+ nestedMessage = true // repeated message
+ case reflect.Uint8:
+ canHaveDefault = true // bytes field
+ }
+
+ case reflect.Map:
+ if ft.Elem().Kind() == reflect.Ptr {
+ nestedMessage = true // map with message values
+ }
+ }
+
+ if !canHaveDefault {
+ if nestedMessage {
+ return nil, true, nil
+ }
+ return nil, false, nil
+ }
+
+ // We now know that ft is a pointer or slice.
+ sf = &scalarField{kind: ft.Elem().Kind()}
+
+ // scalar fields without defaults
+ if !prop.HasDefault {
+ return sf, false, nil
+ }
+
+ // a scalar field: either *T or []byte
+ switch ft.Elem().Kind() {
+ case reflect.Bool:
+ x, err := strconv.ParseBool(prop.Default)
+ if err != nil {
+ return nil, false, fmt.Errorf("proto: bad default bool %q: %v", prop.Default, err)
+ }
+ sf.value = x
+ case reflect.Float32:
+ x, err := strconv.ParseFloat(prop.Default, 32)
+ if err != nil {
+ return nil, false, fmt.Errorf("proto: bad default float32 %q: %v", prop.Default, err)
+ }
+ sf.value = float32(x)
+ case reflect.Float64:
+ x, err := strconv.ParseFloat(prop.Default, 64)
+ if err != nil {
+ return nil, false, fmt.Errorf("proto: bad default float64 %q: %v", prop.Default, err)
+ }
+ sf.value = x
+ case reflect.Int32:
+ x, err := strconv.ParseInt(prop.Default, 10, 32)
+ if err != nil {
+ return nil, false, fmt.Errorf("proto: bad default int32 %q: %v", prop.Default, err)
+ }
+ sf.value = int32(x)
+ case reflect.Int64:
+ x, err := strconv.ParseInt(prop.Default, 10, 64)
+ if err != nil {
+ return nil, false, fmt.Errorf("proto: bad default int64 %q: %v", prop.Default, err)
+ }
+ sf.value = x
+ case reflect.String:
+ sf.value = prop.Default
+ case reflect.Uint8:
+ // []byte (not *uint8)
+ sf.value = []byte(prop.Default)
+ case reflect.Uint32:
+ x, err := strconv.ParseUint(prop.Default, 10, 32)
+ if err != nil {
+ return nil, false, fmt.Errorf("proto: bad default uint32 %q: %v", prop.Default, err)
+ }
+ sf.value = uint32(x)
+ case reflect.Uint64:
+ x, err := strconv.ParseUint(prop.Default, 10, 64)
+ if err != nil {
+ return nil, false, fmt.Errorf("proto: bad default uint64 %q: %v", prop.Default, err)
+ }
+ sf.value = x
+ default:
+ return nil, false, fmt.Errorf("proto: unhandled def kind %v", ft.Elem().Kind())
+ }
+
+ return sf, false, nil
+}
+
+// Map fields may have key types of non-float scalars, strings and enums.
+// The easiest way to sort them in some deterministic order is to use fmt.
+// If this turns out to be inefficient we can always consider other options,
+// such as doing a Schwartzian transform.
+
+func mapKeys(vs []reflect.Value) sort.Interface {
+ s := mapKeySorter{
+ vs: vs,
+ // default Less function: textual comparison
+ less: func(a, b reflect.Value) bool {
+ return fmt.Sprint(a.Interface()) < fmt.Sprint(b.Interface())
+ },
+ }
+
+ // Type specialization per https://developers.google.com/protocol-buffers/docs/proto#maps;
+ // numeric keys are sorted numerically.
+ if len(vs) == 0 {
+ return s
+ }
+ switch vs[0].Kind() {
+ case reflect.Int32, reflect.Int64:
+ s.less = func(a, b reflect.Value) bool { return a.Int() < b.Int() }
+ case reflect.Uint32, reflect.Uint64:
+ s.less = func(a, b reflect.Value) bool { return a.Uint() < b.Uint() }
+ }
+
+ return s
+}
+
+type mapKeySorter struct {
+ vs []reflect.Value
+ less func(a, b reflect.Value) bool
+}
+
+func (s mapKeySorter) Len() int { return len(s.vs) }
+func (s mapKeySorter) Swap(i, j int) { s.vs[i], s.vs[j] = s.vs[j], s.vs[i] }
+func (s mapKeySorter) Less(i, j int) bool {
+ return s.less(s.vs[i], s.vs[j])
+}
+
+// isProto3Zero reports whether v is a zero proto3 value.
+func isProto3Zero(v reflect.Value) bool {
+ switch v.Kind() {
+ case reflect.Bool:
+ return !v.Bool()
+ case reflect.Int32, reflect.Int64:
+ return v.Int() == 0
+ case reflect.Uint32, reflect.Uint64:
+ return v.Uint() == 0
+ case reflect.Float32, reflect.Float64:
+ return v.Float() == 0
+ case reflect.String:
+ return v.String() == ""
+ }
+ return false
+}
diff --git a/vendor/github.com/golang/protobuf/proto/message_set.go b/vendor/github.com/golang/protobuf/proto/message_set.go
new file mode 100644
index 0000000000..9d912bce19
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/message_set.go
@@ -0,0 +1,287 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+/*
+ * Support for message sets.
+ */
+
+import (
+ "bytes"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "reflect"
+ "sort"
+)
+
+// ErrNoMessageTypeId occurs when a protocol buffer does not have a message type ID.
+// A message type ID is required for storing a protocol buffer in a message set.
+var ErrNoMessageTypeId = errors.New("proto does not have a message type ID")
+
+// The first two types (_MessageSet_Item and MessageSet)
+// model what the protocol compiler produces for the following protocol message:
+// message MessageSet {
+// repeated group Item = 1 {
+// required int32 type_id = 2;
+// required string message = 3;
+// };
+// }
+// That is the MessageSet wire format. We can't use a proto to generate these
+// because that would introduce a circular dependency between it and this package.
+//
+// When a proto1 proto has a field that looks like:
+// optional message info = 3;
+// the protocol compiler produces a field in the generated struct that looks like:
+// Info *_proto_.MessageSet `protobuf:"bytes,3,opt,name=info"`
+// The package is automatically inserted so there is no need for that proto file to
+// import this package.
+
+type _MessageSet_Item struct {
+ TypeId *int32 `protobuf:"varint,2,req,name=type_id"`
+ Message []byte `protobuf:"bytes,3,req,name=message"`
+}
+
+type MessageSet struct {
+ Item []*_MessageSet_Item `protobuf:"group,1,rep"`
+ XXX_unrecognized []byte
+ // TODO: caching?
+}
+
+// Make sure MessageSet is a Message.
+var _ Message = (*MessageSet)(nil)
+
+// messageTypeIder is an interface satisfied by a protocol buffer type
+// that may be stored in a MessageSet.
+type messageTypeIder interface {
+ MessageTypeId() int32
+}
+
+func (ms *MessageSet) find(pb Message) *_MessageSet_Item {
+ mti, ok := pb.(messageTypeIder)
+ if !ok {
+ return nil
+ }
+ id := mti.MessageTypeId()
+ for _, item := range ms.Item {
+ if *item.TypeId == id {
+ return item
+ }
+ }
+ return nil
+}
+
+func (ms *MessageSet) Has(pb Message) bool {
+ if ms.find(pb) != nil {
+ return true
+ }
+ return false
+}
+
+func (ms *MessageSet) Unmarshal(pb Message) error {
+ if item := ms.find(pb); item != nil {
+ return Unmarshal(item.Message, pb)
+ }
+ if _, ok := pb.(messageTypeIder); !ok {
+ return ErrNoMessageTypeId
+ }
+ return nil // TODO: return error instead?
+}
+
+func (ms *MessageSet) Marshal(pb Message) error {
+ msg, err := Marshal(pb)
+ if err != nil {
+ return err
+ }
+ if item := ms.find(pb); item != nil {
+ // reuse existing item
+ item.Message = msg
+ return nil
+ }
+
+ mti, ok := pb.(messageTypeIder)
+ if !ok {
+ return ErrNoMessageTypeId
+ }
+
+ mtid := mti.MessageTypeId()
+ ms.Item = append(ms.Item, &_MessageSet_Item{
+ TypeId: &mtid,
+ Message: msg,
+ })
+ return nil
+}
+
+func (ms *MessageSet) Reset() { *ms = MessageSet{} }
+func (ms *MessageSet) String() string { return CompactTextString(ms) }
+func (*MessageSet) ProtoMessage() {}
+
+// Support for the message_set_wire_format message option.
+
+func skipVarint(buf []byte) []byte {
+ i := 0
+ for ; buf[i]&0x80 != 0; i++ {
+ }
+ return buf[i+1:]
+}
+
+// MarshalMessageSet encodes the extension map represented by m in the message set wire format.
+// It is called by generated Marshal methods on protocol buffer messages with the message_set_wire_format option.
+func MarshalMessageSet(m map[int32]Extension) ([]byte, error) {
+ if err := encodeExtensionMap(m); err != nil {
+ return nil, err
+ }
+
+ // Sort extension IDs to provide a deterministic encoding.
+ // See also enc_map in encode.go.
+ ids := make([]int, 0, len(m))
+ for id := range m {
+ ids = append(ids, int(id))
+ }
+ sort.Ints(ids)
+
+ ms := &MessageSet{Item: make([]*_MessageSet_Item, 0, len(m))}
+ for _, id := range ids {
+ e := m[int32(id)]
+ // Remove the wire type and field number varint, as well as the length varint.
+ msg := skipVarint(skipVarint(e.enc))
+
+ ms.Item = append(ms.Item, &_MessageSet_Item{
+ TypeId: Int32(int32(id)),
+ Message: msg,
+ })
+ }
+ return Marshal(ms)
+}
+
+// UnmarshalMessageSet decodes the extension map encoded in buf in the message set wire format.
+// It is called by generated Unmarshal methods on protocol buffer messages with the message_set_wire_format option.
+func UnmarshalMessageSet(buf []byte, m map[int32]Extension) error {
+ ms := new(MessageSet)
+ if err := Unmarshal(buf, ms); err != nil {
+ return err
+ }
+ for _, item := range ms.Item {
+ id := *item.TypeId
+ msg := item.Message
+
+ // Restore wire type and field number varint, plus length varint.
+ // Be careful to preserve duplicate items.
+ b := EncodeVarint(uint64(id)<<3 | WireBytes)
+ if ext, ok := m[id]; ok {
+ // Existing data; rip off the tag and length varint
+ // so we join the new data correctly.
+ // We can assume that ext.enc is set because we are unmarshaling.
+ o := ext.enc[len(b):] // skip wire type and field number
+ _, n := DecodeVarint(o) // calculate length of length varint
+ o = o[n:] // skip length varint
+ msg = append(o, msg...) // join old data and new data
+ }
+ b = append(b, EncodeVarint(uint64(len(msg)))...)
+ b = append(b, msg...)
+
+ m[id] = Extension{enc: b}
+ }
+ return nil
+}
+
+// MarshalMessageSetJSON encodes the extension map represented by m in JSON format.
+// It is called by generated MarshalJSON methods on protocol buffer messages with the message_set_wire_format option.
+func MarshalMessageSetJSON(m map[int32]Extension) ([]byte, error) {
+ var b bytes.Buffer
+ b.WriteByte('{')
+
+ // Process the map in key order for deterministic output.
+ ids := make([]int32, 0, len(m))
+ for id := range m {
+ ids = append(ids, id)
+ }
+ sort.Sort(int32Slice(ids)) // int32Slice defined in text.go
+
+ for i, id := range ids {
+ ext := m[id]
+ if i > 0 {
+ b.WriteByte(',')
+ }
+
+ msd, ok := messageSetMap[id]
+ if !ok {
+ // Unknown type; we can't render it, so skip it.
+ continue
+ }
+ fmt.Fprintf(&b, `"[%s]":`, msd.name)
+
+ x := ext.value
+ if x == nil {
+ x = reflect.New(msd.t.Elem()).Interface()
+ if err := Unmarshal(ext.enc, x.(Message)); err != nil {
+ return nil, err
+ }
+ }
+ d, err := json.Marshal(x)
+ if err != nil {
+ return nil, err
+ }
+ b.Write(d)
+ }
+ b.WriteByte('}')
+ return b.Bytes(), nil
+}
+
+// UnmarshalMessageSetJSON decodes the extension map encoded in buf in JSON format.
+// It is called by generated UnmarshalJSON methods on protocol buffer messages with the message_set_wire_format option.
+func UnmarshalMessageSetJSON(buf []byte, m map[int32]Extension) error {
+ // Common-case fast path.
+ if len(buf) == 0 || bytes.Equal(buf, []byte("{}")) {
+ return nil
+ }
+
+ // This is fairly tricky, and it's not clear that it is needed.
+ return errors.New("TODO: UnmarshalMessageSetJSON not yet implemented")
+}
+
+// A global registry of types that can be used in a MessageSet.
+
+var messageSetMap = make(map[int32]messageSetDesc)
+
+type messageSetDesc struct {
+ t reflect.Type // pointer to struct
+ name string
+}
+
+// RegisterMessageSetType is called from the generated code.
+func RegisterMessageSetType(m Message, fieldNum int32, name string) {
+ messageSetMap[fieldNum] = messageSetDesc{
+ t: reflect.TypeOf(m),
+ name: name,
+ }
+}
diff --git a/vendor/github.com/golang/protobuf/proto/pointer_reflect.go b/vendor/github.com/golang/protobuf/proto/pointer_reflect.go
new file mode 100644
index 0000000000..749919d250
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/pointer_reflect.go
@@ -0,0 +1,479 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2012 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// +build appengine
+
+// This file contains an implementation of proto field accesses using package reflect.
+// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can
+// be used on App Engine.
+
+package proto
+
+import (
+ "math"
+ "reflect"
+)
+
+// A structPointer is a pointer to a struct.
+type structPointer struct {
+ v reflect.Value
+}
+
+// toStructPointer returns a structPointer equivalent to the given reflect value.
+// The reflect value must itself be a pointer to a struct.
+func toStructPointer(v reflect.Value) structPointer {
+ return structPointer{v}
+}
+
+// IsNil reports whether p is nil.
+func structPointer_IsNil(p structPointer) bool {
+ return p.v.IsNil()
+}
+
+// Interface returns the struct pointer as an interface value.
+func structPointer_Interface(p structPointer, _ reflect.Type) interface{} {
+ return p.v.Interface()
+}
+
+// A field identifies a field in a struct, accessible from a structPointer.
+// In this implementation, a field is identified by the sequence of field indices
+// passed to reflect's FieldByIndex.
+type field []int
+
+// toField returns a field equivalent to the given reflect field.
+func toField(f *reflect.StructField) field {
+ return f.Index
+}
+
+// invalidField is an invalid field identifier.
+var invalidField = field(nil)
+
+// IsValid reports whether the field identifier is valid.
+func (f field) IsValid() bool { return f != nil }
+
+// field returns the given field in the struct as a reflect value.
+func structPointer_field(p structPointer, f field) reflect.Value {
+ // Special case: an extension map entry with a value of type T
+ // passes a *T to the struct-handling code with a zero field,
+ // expecting that it will be treated as equivalent to *struct{ X T },
+ // which has the same memory layout. We have to handle that case
+ // specially, because reflect will panic if we call FieldByIndex on a
+ // non-struct.
+ if f == nil {
+ return p.v.Elem()
+ }
+
+ return p.v.Elem().FieldByIndex(f)
+}
+
+// ifield returns the given field in the struct as an interface value.
+func structPointer_ifield(p structPointer, f field) interface{} {
+ return structPointer_field(p, f).Addr().Interface()
+}
+
+// Bytes returns the address of a []byte field in the struct.
+func structPointer_Bytes(p structPointer, f field) *[]byte {
+ return structPointer_ifield(p, f).(*[]byte)
+}
+
+// BytesSlice returns the address of a [][]byte field in the struct.
+func structPointer_BytesSlice(p structPointer, f field) *[][]byte {
+ return structPointer_ifield(p, f).(*[][]byte)
+}
+
+// Bool returns the address of a *bool field in the struct.
+func structPointer_Bool(p structPointer, f field) **bool {
+ return structPointer_ifield(p, f).(**bool)
+}
+
+// BoolVal returns the address of a bool field in the struct.
+func structPointer_BoolVal(p structPointer, f field) *bool {
+ return structPointer_ifield(p, f).(*bool)
+}
+
+// BoolSlice returns the address of a []bool field in the struct.
+func structPointer_BoolSlice(p structPointer, f field) *[]bool {
+ return structPointer_ifield(p, f).(*[]bool)
+}
+
+// String returns the address of a *string field in the struct.
+func structPointer_String(p structPointer, f field) **string {
+ return structPointer_ifield(p, f).(**string)
+}
+
+// StringVal returns the address of a string field in the struct.
+func structPointer_StringVal(p structPointer, f field) *string {
+ return structPointer_ifield(p, f).(*string)
+}
+
+// StringSlice returns the address of a []string field in the struct.
+func structPointer_StringSlice(p structPointer, f field) *[]string {
+ return structPointer_ifield(p, f).(*[]string)
+}
+
+// ExtMap returns the address of an extension map field in the struct.
+func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension {
+ return structPointer_ifield(p, f).(*map[int32]Extension)
+}
+
+// NewAt returns the reflect.Value for a pointer to a field in the struct.
+func structPointer_NewAt(p structPointer, f field, typ reflect.Type) reflect.Value {
+ return structPointer_field(p, f).Addr()
+}
+
+// SetStructPointer writes a *struct field in the struct.
+func structPointer_SetStructPointer(p structPointer, f field, q structPointer) {
+ structPointer_field(p, f).Set(q.v)
+}
+
+// GetStructPointer reads a *struct field in the struct.
+func structPointer_GetStructPointer(p structPointer, f field) structPointer {
+ return structPointer{structPointer_field(p, f)}
+}
+
+// StructPointerSlice the address of a []*struct field in the struct.
+func structPointer_StructPointerSlice(p structPointer, f field) structPointerSlice {
+ return structPointerSlice{structPointer_field(p, f)}
+}
+
+// A structPointerSlice represents the address of a slice of pointers to structs
+// (themselves messages or groups). That is, v.Type() is *[]*struct{...}.
+type structPointerSlice struct {
+ v reflect.Value
+}
+
+func (p structPointerSlice) Len() int { return p.v.Len() }
+func (p structPointerSlice) Index(i int) structPointer { return structPointer{p.v.Index(i)} }
+func (p structPointerSlice) Append(q structPointer) {
+ p.v.Set(reflect.Append(p.v, q.v))
+}
+
+var (
+ int32Type = reflect.TypeOf(int32(0))
+ uint32Type = reflect.TypeOf(uint32(0))
+ float32Type = reflect.TypeOf(float32(0))
+ int64Type = reflect.TypeOf(int64(0))
+ uint64Type = reflect.TypeOf(uint64(0))
+ float64Type = reflect.TypeOf(float64(0))
+)
+
+// A word32 represents a field of type *int32, *uint32, *float32, or *enum.
+// That is, v.Type() is *int32, *uint32, *float32, or *enum and v is assignable.
+type word32 struct {
+ v reflect.Value
+}
+
+// IsNil reports whether p is nil.
+func word32_IsNil(p word32) bool {
+ return p.v.IsNil()
+}
+
+// Set sets p to point at a newly allocated word with bits set to x.
+func word32_Set(p word32, o *Buffer, x uint32) {
+ t := p.v.Type().Elem()
+ switch t {
+ case int32Type:
+ if len(o.int32s) == 0 {
+ o.int32s = make([]int32, uint32PoolSize)
+ }
+ o.int32s[0] = int32(x)
+ p.v.Set(reflect.ValueOf(&o.int32s[0]))
+ o.int32s = o.int32s[1:]
+ return
+ case uint32Type:
+ if len(o.uint32s) == 0 {
+ o.uint32s = make([]uint32, uint32PoolSize)
+ }
+ o.uint32s[0] = x
+ p.v.Set(reflect.ValueOf(&o.uint32s[0]))
+ o.uint32s = o.uint32s[1:]
+ return
+ case float32Type:
+ if len(o.float32s) == 0 {
+ o.float32s = make([]float32, uint32PoolSize)
+ }
+ o.float32s[0] = math.Float32frombits(x)
+ p.v.Set(reflect.ValueOf(&o.float32s[0]))
+ o.float32s = o.float32s[1:]
+ return
+ }
+
+ // must be enum
+ p.v.Set(reflect.New(t))
+ p.v.Elem().SetInt(int64(int32(x)))
+}
+
+// Get gets the bits pointed at by p, as a uint32.
+func word32_Get(p word32) uint32 {
+ elem := p.v.Elem()
+ switch elem.Kind() {
+ case reflect.Int32:
+ return uint32(elem.Int())
+ case reflect.Uint32:
+ return uint32(elem.Uint())
+ case reflect.Float32:
+ return math.Float32bits(float32(elem.Float()))
+ }
+ panic("unreachable")
+}
+
+// Word32 returns a reference to a *int32, *uint32, *float32, or *enum field in the struct.
+func structPointer_Word32(p structPointer, f field) word32 {
+ return word32{structPointer_field(p, f)}
+}
+
+// A word32Val represents a field of type int32, uint32, float32, or enum.
+// That is, v.Type() is int32, uint32, float32, or enum and v is assignable.
+type word32Val struct {
+ v reflect.Value
+}
+
+// Set sets *p to x.
+func word32Val_Set(p word32Val, x uint32) {
+ switch p.v.Type() {
+ case int32Type:
+ p.v.SetInt(int64(x))
+ return
+ case uint32Type:
+ p.v.SetUint(uint64(x))
+ return
+ case float32Type:
+ p.v.SetFloat(float64(math.Float32frombits(x)))
+ return
+ }
+
+ // must be enum
+ p.v.SetInt(int64(int32(x)))
+}
+
+// Get gets the bits pointed at by p, as a uint32.
+func word32Val_Get(p word32Val) uint32 {
+ elem := p.v
+ switch elem.Kind() {
+ case reflect.Int32:
+ return uint32(elem.Int())
+ case reflect.Uint32:
+ return uint32(elem.Uint())
+ case reflect.Float32:
+ return math.Float32bits(float32(elem.Float()))
+ }
+ panic("unreachable")
+}
+
+// Word32Val returns a reference to a int32, uint32, float32, or enum field in the struct.
+func structPointer_Word32Val(p structPointer, f field) word32Val {
+ return word32Val{structPointer_field(p, f)}
+}
+
+// A word32Slice is a slice of 32-bit values.
+// That is, v.Type() is []int32, []uint32, []float32, or []enum.
+type word32Slice struct {
+ v reflect.Value
+}
+
+func (p word32Slice) Append(x uint32) {
+ n, m := p.v.Len(), p.v.Cap()
+ if n < m {
+ p.v.SetLen(n + 1)
+ } else {
+ t := p.v.Type().Elem()
+ p.v.Set(reflect.Append(p.v, reflect.Zero(t)))
+ }
+ elem := p.v.Index(n)
+ switch elem.Kind() {
+ case reflect.Int32:
+ elem.SetInt(int64(int32(x)))
+ case reflect.Uint32:
+ elem.SetUint(uint64(x))
+ case reflect.Float32:
+ elem.SetFloat(float64(math.Float32frombits(x)))
+ }
+}
+
+func (p word32Slice) Len() int {
+ return p.v.Len()
+}
+
+func (p word32Slice) Index(i int) uint32 {
+ elem := p.v.Index(i)
+ switch elem.Kind() {
+ case reflect.Int32:
+ return uint32(elem.Int())
+ case reflect.Uint32:
+ return uint32(elem.Uint())
+ case reflect.Float32:
+ return math.Float32bits(float32(elem.Float()))
+ }
+ panic("unreachable")
+}
+
+// Word32Slice returns a reference to a []int32, []uint32, []float32, or []enum field in the struct.
+func structPointer_Word32Slice(p structPointer, f field) word32Slice {
+ return word32Slice{structPointer_field(p, f)}
+}
+
+// word64 is like word32 but for 64-bit values.
+type word64 struct {
+ v reflect.Value
+}
+
+func word64_Set(p word64, o *Buffer, x uint64) {
+ t := p.v.Type().Elem()
+ switch t {
+ case int64Type:
+ if len(o.int64s) == 0 {
+ o.int64s = make([]int64, uint64PoolSize)
+ }
+ o.int64s[0] = int64(x)
+ p.v.Set(reflect.ValueOf(&o.int64s[0]))
+ o.int64s = o.int64s[1:]
+ return
+ case uint64Type:
+ if len(o.uint64s) == 0 {
+ o.uint64s = make([]uint64, uint64PoolSize)
+ }
+ o.uint64s[0] = x
+ p.v.Set(reflect.ValueOf(&o.uint64s[0]))
+ o.uint64s = o.uint64s[1:]
+ return
+ case float64Type:
+ if len(o.float64s) == 0 {
+ o.float64s = make([]float64, uint64PoolSize)
+ }
+ o.float64s[0] = math.Float64frombits(x)
+ p.v.Set(reflect.ValueOf(&o.float64s[0]))
+ o.float64s = o.float64s[1:]
+ return
+ }
+ panic("unreachable")
+}
+
+func word64_IsNil(p word64) bool {
+ return p.v.IsNil()
+}
+
+func word64_Get(p word64) uint64 {
+ elem := p.v.Elem()
+ switch elem.Kind() {
+ case reflect.Int64:
+ return uint64(elem.Int())
+ case reflect.Uint64:
+ return elem.Uint()
+ case reflect.Float64:
+ return math.Float64bits(elem.Float())
+ }
+ panic("unreachable")
+}
+
+func structPointer_Word64(p structPointer, f field) word64 {
+ return word64{structPointer_field(p, f)}
+}
+
+// word64Val is like word32Val but for 64-bit values.
+type word64Val struct {
+ v reflect.Value
+}
+
+func word64Val_Set(p word64Val, o *Buffer, x uint64) {
+ switch p.v.Type() {
+ case int64Type:
+ p.v.SetInt(int64(x))
+ return
+ case uint64Type:
+ p.v.SetUint(x)
+ return
+ case float64Type:
+ p.v.SetFloat(math.Float64frombits(x))
+ return
+ }
+ panic("unreachable")
+}
+
+func word64Val_Get(p word64Val) uint64 {
+ elem := p.v
+ switch elem.Kind() {
+ case reflect.Int64:
+ return uint64(elem.Int())
+ case reflect.Uint64:
+ return elem.Uint()
+ case reflect.Float64:
+ return math.Float64bits(elem.Float())
+ }
+ panic("unreachable")
+}
+
+func structPointer_Word64Val(p structPointer, f field) word64Val {
+ return word64Val{structPointer_field(p, f)}
+}
+
+type word64Slice struct {
+ v reflect.Value
+}
+
+func (p word64Slice) Append(x uint64) {
+ n, m := p.v.Len(), p.v.Cap()
+ if n < m {
+ p.v.SetLen(n + 1)
+ } else {
+ t := p.v.Type().Elem()
+ p.v.Set(reflect.Append(p.v, reflect.Zero(t)))
+ }
+ elem := p.v.Index(n)
+ switch elem.Kind() {
+ case reflect.Int64:
+ elem.SetInt(int64(int64(x)))
+ case reflect.Uint64:
+ elem.SetUint(uint64(x))
+ case reflect.Float64:
+ elem.SetFloat(float64(math.Float64frombits(x)))
+ }
+}
+
+func (p word64Slice) Len() int {
+ return p.v.Len()
+}
+
+func (p word64Slice) Index(i int) uint64 {
+ elem := p.v.Index(i)
+ switch elem.Kind() {
+ case reflect.Int64:
+ return uint64(elem.Int())
+ case reflect.Uint64:
+ return uint64(elem.Uint())
+ case reflect.Float64:
+ return math.Float64bits(float64(elem.Float()))
+ }
+ panic("unreachable")
+}
+
+func structPointer_Word64Slice(p structPointer, f field) word64Slice {
+ return word64Slice{structPointer_field(p, f)}
+}
diff --git a/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go b/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go
new file mode 100644
index 0000000000..e9be0fe92e
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go
@@ -0,0 +1,266 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2012 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// +build !appengine
+
+// This file contains the implementation of the proto field accesses using package unsafe.
+
+package proto
+
+import (
+ "reflect"
+ "unsafe"
+)
+
+// NOTE: These type_Foo functions would more idiomatically be methods,
+// but Go does not allow methods on pointer types, and we must preserve
+// some pointer type for the garbage collector. We use these
+// funcs with clunky names as our poor approximation to methods.
+//
+// An alternative would be
+// type structPointer struct { p unsafe.Pointer }
+// but that does not registerize as well.
+
+// A structPointer is a pointer to a struct.
+type structPointer unsafe.Pointer
+
+// toStructPointer returns a structPointer equivalent to the given reflect value.
+func toStructPointer(v reflect.Value) structPointer {
+ return structPointer(unsafe.Pointer(v.Pointer()))
+}
+
+// IsNil reports whether p is nil.
+func structPointer_IsNil(p structPointer) bool {
+ return p == nil
+}
+
+// Interface returns the struct pointer, assumed to have element type t,
+// as an interface value.
+func structPointer_Interface(p structPointer, t reflect.Type) interface{} {
+ return reflect.NewAt(t, unsafe.Pointer(p)).Interface()
+}
+
+// A field identifies a field in a struct, accessible from a structPointer.
+// In this implementation, a field is identified by its byte offset from the start of the struct.
+type field uintptr
+
+// toField returns a field equivalent to the given reflect field.
+func toField(f *reflect.StructField) field {
+ return field(f.Offset)
+}
+
+// invalidField is an invalid field identifier.
+const invalidField = ^field(0)
+
+// IsValid reports whether the field identifier is valid.
+func (f field) IsValid() bool {
+ return f != ^field(0)
+}
+
+// Bytes returns the address of a []byte field in the struct.
+func structPointer_Bytes(p structPointer, f field) *[]byte {
+ return (*[]byte)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// BytesSlice returns the address of a [][]byte field in the struct.
+func structPointer_BytesSlice(p structPointer, f field) *[][]byte {
+ return (*[][]byte)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// Bool returns the address of a *bool field in the struct.
+func structPointer_Bool(p structPointer, f field) **bool {
+ return (**bool)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// BoolVal returns the address of a bool field in the struct.
+func structPointer_BoolVal(p structPointer, f field) *bool {
+ return (*bool)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// BoolSlice returns the address of a []bool field in the struct.
+func structPointer_BoolSlice(p structPointer, f field) *[]bool {
+ return (*[]bool)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// String returns the address of a *string field in the struct.
+func structPointer_String(p structPointer, f field) **string {
+ return (**string)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// StringVal returns the address of a string field in the struct.
+func structPointer_StringVal(p structPointer, f field) *string {
+ return (*string)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// StringSlice returns the address of a []string field in the struct.
+func structPointer_StringSlice(p structPointer, f field) *[]string {
+ return (*[]string)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// ExtMap returns the address of an extension map field in the struct.
+func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension {
+ return (*map[int32]Extension)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// NewAt returns the reflect.Value for a pointer to a field in the struct.
+func structPointer_NewAt(p structPointer, f field, typ reflect.Type) reflect.Value {
+ return reflect.NewAt(typ, unsafe.Pointer(uintptr(p)+uintptr(f)))
+}
+
+// SetStructPointer writes a *struct field in the struct.
+func structPointer_SetStructPointer(p structPointer, f field, q structPointer) {
+ *(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f))) = q
+}
+
+// GetStructPointer reads a *struct field in the struct.
+func structPointer_GetStructPointer(p structPointer, f field) structPointer {
+ return *(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// StructPointerSlice the address of a []*struct field in the struct.
+func structPointer_StructPointerSlice(p structPointer, f field) *structPointerSlice {
+ return (*structPointerSlice)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// A structPointerSlice represents a slice of pointers to structs (themselves submessages or groups).
+type structPointerSlice []structPointer
+
+func (v *structPointerSlice) Len() int { return len(*v) }
+func (v *structPointerSlice) Index(i int) structPointer { return (*v)[i] }
+func (v *structPointerSlice) Append(p structPointer) { *v = append(*v, p) }
+
+// A word32 is the address of a "pointer to 32-bit value" field.
+type word32 **uint32
+
+// IsNil reports whether *v is nil.
+func word32_IsNil(p word32) bool {
+ return *p == nil
+}
+
+// Set sets *v to point at a newly allocated word set to x.
+func word32_Set(p word32, o *Buffer, x uint32) {
+ if len(o.uint32s) == 0 {
+ o.uint32s = make([]uint32, uint32PoolSize)
+ }
+ o.uint32s[0] = x
+ *p = &o.uint32s[0]
+ o.uint32s = o.uint32s[1:]
+}
+
+// Get gets the value pointed at by *v.
+func word32_Get(p word32) uint32 {
+ return **p
+}
+
+// Word32 returns the address of a *int32, *uint32, *float32, or *enum field in the struct.
+func structPointer_Word32(p structPointer, f field) word32 {
+ return word32((**uint32)(unsafe.Pointer(uintptr(p) + uintptr(f))))
+}
+
+// A word32Val is the address of a 32-bit value field.
+type word32Val *uint32
+
+// Set sets *p to x.
+func word32Val_Set(p word32Val, x uint32) {
+ *p = x
+}
+
+// Get gets the value pointed at by p.
+func word32Val_Get(p word32Val) uint32 {
+ return *p
+}
+
+// Word32Val returns the address of a *int32, *uint32, *float32, or *enum field in the struct.
+func structPointer_Word32Val(p structPointer, f field) word32Val {
+ return word32Val((*uint32)(unsafe.Pointer(uintptr(p) + uintptr(f))))
+}
+
+// A word32Slice is a slice of 32-bit values.
+type word32Slice []uint32
+
+func (v *word32Slice) Append(x uint32) { *v = append(*v, x) }
+func (v *word32Slice) Len() int { return len(*v) }
+func (v *word32Slice) Index(i int) uint32 { return (*v)[i] }
+
+// Word32Slice returns the address of a []int32, []uint32, []float32, or []enum field in the struct.
+func structPointer_Word32Slice(p structPointer, f field) *word32Slice {
+ return (*word32Slice)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// word64 is like word32 but for 64-bit values.
+type word64 **uint64
+
+func word64_Set(p word64, o *Buffer, x uint64) {
+ if len(o.uint64s) == 0 {
+ o.uint64s = make([]uint64, uint64PoolSize)
+ }
+ o.uint64s[0] = x
+ *p = &o.uint64s[0]
+ o.uint64s = o.uint64s[1:]
+}
+
+func word64_IsNil(p word64) bool {
+ return *p == nil
+}
+
+func word64_Get(p word64) uint64 {
+ return **p
+}
+
+func structPointer_Word64(p structPointer, f field) word64 {
+ return word64((**uint64)(unsafe.Pointer(uintptr(p) + uintptr(f))))
+}
+
+// word64Val is like word32Val but for 64-bit values.
+type word64Val *uint64
+
+func word64Val_Set(p word64Val, o *Buffer, x uint64) {
+ *p = x
+}
+
+func word64Val_Get(p word64Val) uint64 {
+ return *p
+}
+
+func structPointer_Word64Val(p structPointer, f field) word64Val {
+ return word64Val((*uint64)(unsafe.Pointer(uintptr(p) + uintptr(f))))
+}
+
+// word64Slice is like word32Slice but for 64-bit values.
+type word64Slice []uint64
+
+func (v *word64Slice) Append(x uint64) { *v = append(*v, x) }
+func (v *word64Slice) Len() int { return len(*v) }
+func (v *word64Slice) Index(i int) uint64 { return (*v)[i] }
+
+func structPointer_Word64Slice(p structPointer, f field) *word64Slice {
+ return (*word64Slice)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
diff --git a/vendor/github.com/golang/protobuf/proto/properties.go b/vendor/github.com/golang/protobuf/proto/properties.go
new file mode 100644
index 0000000000..692fe4310e
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/properties.go
@@ -0,0 +1,799 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+/*
+ * Routines for encoding data into the wire format for protocol buffers.
+ */
+
+import (
+ "fmt"
+ "os"
+ "reflect"
+ "sort"
+ "strconv"
+ "strings"
+ "sync"
+)
+
+const debug bool = false
+
+// Constants that identify the encoding of a value on the wire.
+const (
+ WireVarint = 0
+ WireFixed64 = 1
+ WireBytes = 2
+ WireStartGroup = 3
+ WireEndGroup = 4
+ WireFixed32 = 5
+)
+
+const startSize = 10 // initial slice/string sizes
+
+// Encoders are defined in encode.go
+// An encoder outputs the full representation of a field, including its
+// tag and encoder type.
+type encoder func(p *Buffer, prop *Properties, base structPointer) error
+
+// A valueEncoder encodes a single integer in a particular encoding.
+type valueEncoder func(o *Buffer, x uint64) error
+
+// Sizers are defined in encode.go
+// A sizer returns the encoded size of a field, including its tag and encoder
+// type.
+type sizer func(prop *Properties, base structPointer) int
+
+// A valueSizer returns the encoded size of a single integer in a particular
+// encoding.
+type valueSizer func(x uint64) int
+
+// Decoders are defined in decode.go
+// A decoder creates a value from its wire representation.
+// Unrecognized subelements are saved in unrec.
+type decoder func(p *Buffer, prop *Properties, base structPointer) error
+
+// A valueDecoder decodes a single integer in a particular encoding.
+type valueDecoder func(o *Buffer) (x uint64, err error)
+
+// A oneofMarshaler does the marshaling for all oneof fields in a message.
+type oneofMarshaler func(Message, *Buffer) error
+
+// A oneofUnmarshaler does the unmarshaling for a oneof field in a message.
+type oneofUnmarshaler func(Message, int, int, *Buffer) (bool, error)
+
+// tagMap is an optimization over map[int]int for typical protocol buffer
+// use-cases. Encoded protocol buffers are often in tag order with small tag
+// numbers.
+type tagMap struct {
+ fastTags []int
+ slowTags map[int]int
+}
+
+// tagMapFastLimit is the upper bound on the tag number that will be stored in
+// the tagMap slice rather than its map.
+const tagMapFastLimit = 1024
+
+func (p *tagMap) get(t int) (int, bool) {
+ if t > 0 && t < tagMapFastLimit {
+ if t >= len(p.fastTags) {
+ return 0, false
+ }
+ fi := p.fastTags[t]
+ return fi, fi >= 0
+ }
+ fi, ok := p.slowTags[t]
+ return fi, ok
+}
+
+func (p *tagMap) put(t int, fi int) {
+ if t > 0 && t < tagMapFastLimit {
+ for len(p.fastTags) < t+1 {
+ p.fastTags = append(p.fastTags, -1)
+ }
+ p.fastTags[t] = fi
+ return
+ }
+ if p.slowTags == nil {
+ p.slowTags = make(map[int]int)
+ }
+ p.slowTags[t] = fi
+}
+
+// StructProperties represents properties for all the fields of a struct.
+// decoderTags and decoderOrigNames should only be used by the decoder.
+type StructProperties struct {
+ Prop []*Properties // properties for each field
+ reqCount int // required count
+ decoderTags tagMap // map from proto tag to struct field number
+ decoderOrigNames map[string]int // map from original name to struct field number
+ order []int // list of struct field numbers in tag order
+ unrecField field // field id of the XXX_unrecognized []byte field
+ extendable bool // is this an extendable proto
+
+ oneofMarshaler oneofMarshaler
+ oneofUnmarshaler oneofUnmarshaler
+ stype reflect.Type
+
+ // OneofTypes contains information about the oneof fields in this message.
+ // It is keyed by the original name of a field.
+ OneofTypes map[string]*OneofProperties
+}
+
+// OneofProperties represents information about a specific field in a oneof.
+type OneofProperties struct {
+ Type reflect.Type // pointer to generated struct type for this oneof field
+ Field int // struct field number of the containing oneof in the message
+ Prop *Properties
+}
+
+// Implement the sorting interface so we can sort the fields in tag order, as recommended by the spec.
+// See encode.go, (*Buffer).enc_struct.
+
+func (sp *StructProperties) Len() int { return len(sp.order) }
+func (sp *StructProperties) Less(i, j int) bool {
+ return sp.Prop[sp.order[i]].Tag < sp.Prop[sp.order[j]].Tag
+}
+func (sp *StructProperties) Swap(i, j int) { sp.order[i], sp.order[j] = sp.order[j], sp.order[i] }
+
+// Properties represents the protocol-specific behavior of a single struct field.
+type Properties struct {
+ Name string // name of the field, for error messages
+ OrigName string // original name before protocol compiler (always set)
+ Wire string
+ WireType int
+ Tag int
+ Required bool
+ Optional bool
+ Repeated bool
+ Packed bool // relevant for repeated primitives only
+ Enum string // set for enum types only
+ proto3 bool // whether this is known to be a proto3 field; set for []byte only
+
+ Default string // default value
+ HasDefault bool // whether an explicit default was provided
+ def_uint64 uint64
+
+ enc encoder
+ valEnc valueEncoder // set for bool and numeric types only
+ field field
+ tagcode []byte // encoding of EncodeVarint((Tag<<3)|WireType)
+ tagbuf [8]byte
+ stype reflect.Type // set for struct types only
+ sprop *StructProperties // set for struct types only
+ isMarshaler bool
+ isUnmarshaler bool
+
+ mtype reflect.Type // set for map types only
+ mkeyprop *Properties // set for map types only
+ mvalprop *Properties // set for map types only
+
+ size sizer
+ valSize valueSizer // set for bool and numeric types only
+
+ dec decoder
+ valDec valueDecoder // set for bool and numeric types only
+
+ // If this is a packable field, this will be the decoder for the packed version of the field.
+ packedDec decoder
+}
+
+// String formats the properties in the protobuf struct field tag style.
+func (p *Properties) String() string {
+ s := p.Wire
+ s = ","
+ s += strconv.Itoa(p.Tag)
+ if p.Required {
+ s += ",req"
+ }
+ if p.Optional {
+ s += ",opt"
+ }
+ if p.Repeated {
+ s += ",rep"
+ }
+ if p.Packed {
+ s += ",packed"
+ }
+ if p.OrigName != p.Name {
+ s += ",name=" + p.OrigName
+ }
+ if p.proto3 {
+ s += ",proto3"
+ }
+ if len(p.Enum) > 0 {
+ s += ",enum=" + p.Enum
+ }
+ if p.HasDefault {
+ s += ",def=" + p.Default
+ }
+ return s
+}
+
+// Parse populates p by parsing a string in the protobuf struct field tag style.
+func (p *Properties) Parse(s string) {
+ // "bytes,49,opt,name=foo,def=hello!"
+ fields := strings.Split(s, ",") // breaks def=, but handled below.
+ if len(fields) < 2 {
+ fmt.Fprintf(os.Stderr, "proto: tag has too few fields: %q\n", s)
+ return
+ }
+
+ p.Wire = fields[0]
+ switch p.Wire {
+ case "varint":
+ p.WireType = WireVarint
+ p.valEnc = (*Buffer).EncodeVarint
+ p.valDec = (*Buffer).DecodeVarint
+ p.valSize = sizeVarint
+ case "fixed32":
+ p.WireType = WireFixed32
+ p.valEnc = (*Buffer).EncodeFixed32
+ p.valDec = (*Buffer).DecodeFixed32
+ p.valSize = sizeFixed32
+ case "fixed64":
+ p.WireType = WireFixed64
+ p.valEnc = (*Buffer).EncodeFixed64
+ p.valDec = (*Buffer).DecodeFixed64
+ p.valSize = sizeFixed64
+ case "zigzag32":
+ p.WireType = WireVarint
+ p.valEnc = (*Buffer).EncodeZigzag32
+ p.valDec = (*Buffer).DecodeZigzag32
+ p.valSize = sizeZigzag32
+ case "zigzag64":
+ p.WireType = WireVarint
+ p.valEnc = (*Buffer).EncodeZigzag64
+ p.valDec = (*Buffer).DecodeZigzag64
+ p.valSize = sizeZigzag64
+ case "bytes", "group":
+ p.WireType = WireBytes
+ // no numeric converter for non-numeric types
+ default:
+ fmt.Fprintf(os.Stderr, "proto: tag has unknown wire type: %q\n", s)
+ return
+ }
+
+ var err error
+ p.Tag, err = strconv.Atoi(fields[1])
+ if err != nil {
+ return
+ }
+
+ for i := 2; i < len(fields); i++ {
+ f := fields[i]
+ switch {
+ case f == "req":
+ p.Required = true
+ case f == "opt":
+ p.Optional = true
+ case f == "rep":
+ p.Repeated = true
+ case f == "packed":
+ p.Packed = true
+ case strings.HasPrefix(f, "name="):
+ p.OrigName = f[5:]
+ case strings.HasPrefix(f, "enum="):
+ p.Enum = f[5:]
+ case f == "proto3":
+ p.proto3 = true
+ case strings.HasPrefix(f, "def="):
+ p.HasDefault = true
+ p.Default = f[4:] // rest of string
+ if i+1 < len(fields) {
+ // Commas aren't escaped, and def is always last.
+ p.Default += "," + strings.Join(fields[i+1:], ",")
+ break
+ }
+ }
+ }
+}
+
+func logNoSliceEnc(t1, t2 reflect.Type) {
+ fmt.Fprintf(os.Stderr, "proto: no slice oenc for %T = []%T\n", t1, t2)
+}
+
+var protoMessageType = reflect.TypeOf((*Message)(nil)).Elem()
+
+// Initialize the fields for encoding and decoding.
+func (p *Properties) setEncAndDec(typ reflect.Type, f *reflect.StructField, lockGetProp bool) {
+ p.enc = nil
+ p.dec = nil
+ p.size = nil
+
+ switch t1 := typ; t1.Kind() {
+ default:
+ fmt.Fprintf(os.Stderr, "proto: no coders for %v\n", t1)
+
+ // proto3 scalar types
+
+ case reflect.Bool:
+ p.enc = (*Buffer).enc_proto3_bool
+ p.dec = (*Buffer).dec_proto3_bool
+ p.size = size_proto3_bool
+ case reflect.Int32:
+ p.enc = (*Buffer).enc_proto3_int32
+ p.dec = (*Buffer).dec_proto3_int32
+ p.size = size_proto3_int32
+ case reflect.Uint32:
+ p.enc = (*Buffer).enc_proto3_uint32
+ p.dec = (*Buffer).dec_proto3_int32 // can reuse
+ p.size = size_proto3_uint32
+ case reflect.Int64, reflect.Uint64:
+ p.enc = (*Buffer).enc_proto3_int64
+ p.dec = (*Buffer).dec_proto3_int64
+ p.size = size_proto3_int64
+ case reflect.Float32:
+ p.enc = (*Buffer).enc_proto3_uint32 // can just treat them as bits
+ p.dec = (*Buffer).dec_proto3_int32
+ p.size = size_proto3_uint32
+ case reflect.Float64:
+ p.enc = (*Buffer).enc_proto3_int64 // can just treat them as bits
+ p.dec = (*Buffer).dec_proto3_int64
+ p.size = size_proto3_int64
+ case reflect.String:
+ p.enc = (*Buffer).enc_proto3_string
+ p.dec = (*Buffer).dec_proto3_string
+ p.size = size_proto3_string
+
+ case reflect.Ptr:
+ switch t2 := t1.Elem(); t2.Kind() {
+ default:
+ fmt.Fprintf(os.Stderr, "proto: no encoder function for %v -> %v\n", t1, t2)
+ break
+ case reflect.Bool:
+ p.enc = (*Buffer).enc_bool
+ p.dec = (*Buffer).dec_bool
+ p.size = size_bool
+ case reflect.Int32:
+ p.enc = (*Buffer).enc_int32
+ p.dec = (*Buffer).dec_int32
+ p.size = size_int32
+ case reflect.Uint32:
+ p.enc = (*Buffer).enc_uint32
+ p.dec = (*Buffer).dec_int32 // can reuse
+ p.size = size_uint32
+ case reflect.Int64, reflect.Uint64:
+ p.enc = (*Buffer).enc_int64
+ p.dec = (*Buffer).dec_int64
+ p.size = size_int64
+ case reflect.Float32:
+ p.enc = (*Buffer).enc_uint32 // can just treat them as bits
+ p.dec = (*Buffer).dec_int32
+ p.size = size_uint32
+ case reflect.Float64:
+ p.enc = (*Buffer).enc_int64 // can just treat them as bits
+ p.dec = (*Buffer).dec_int64
+ p.size = size_int64
+ case reflect.String:
+ p.enc = (*Buffer).enc_string
+ p.dec = (*Buffer).dec_string
+ p.size = size_string
+ case reflect.Struct:
+ p.stype = t1.Elem()
+ p.isMarshaler = isMarshaler(t1)
+ p.isUnmarshaler = isUnmarshaler(t1)
+ if p.Wire == "bytes" {
+ p.enc = (*Buffer).enc_struct_message
+ p.dec = (*Buffer).dec_struct_message
+ p.size = size_struct_message
+ } else {
+ p.enc = (*Buffer).enc_struct_group
+ p.dec = (*Buffer).dec_struct_group
+ p.size = size_struct_group
+ }
+ }
+
+ case reflect.Slice:
+ switch t2 := t1.Elem(); t2.Kind() {
+ default:
+ logNoSliceEnc(t1, t2)
+ break
+ case reflect.Bool:
+ if p.Packed {
+ p.enc = (*Buffer).enc_slice_packed_bool
+ p.size = size_slice_packed_bool
+ } else {
+ p.enc = (*Buffer).enc_slice_bool
+ p.size = size_slice_bool
+ }
+ p.dec = (*Buffer).dec_slice_bool
+ p.packedDec = (*Buffer).dec_slice_packed_bool
+ case reflect.Int32:
+ if p.Packed {
+ p.enc = (*Buffer).enc_slice_packed_int32
+ p.size = size_slice_packed_int32
+ } else {
+ p.enc = (*Buffer).enc_slice_int32
+ p.size = size_slice_int32
+ }
+ p.dec = (*Buffer).dec_slice_int32
+ p.packedDec = (*Buffer).dec_slice_packed_int32
+ case reflect.Uint32:
+ if p.Packed {
+ p.enc = (*Buffer).enc_slice_packed_uint32
+ p.size = size_slice_packed_uint32
+ } else {
+ p.enc = (*Buffer).enc_slice_uint32
+ p.size = size_slice_uint32
+ }
+ p.dec = (*Buffer).dec_slice_int32
+ p.packedDec = (*Buffer).dec_slice_packed_int32
+ case reflect.Int64, reflect.Uint64:
+ if p.Packed {
+ p.enc = (*Buffer).enc_slice_packed_int64
+ p.size = size_slice_packed_int64
+ } else {
+ p.enc = (*Buffer).enc_slice_int64
+ p.size = size_slice_int64
+ }
+ p.dec = (*Buffer).dec_slice_int64
+ p.packedDec = (*Buffer).dec_slice_packed_int64
+ case reflect.Uint8:
+ p.enc = (*Buffer).enc_slice_byte
+ p.dec = (*Buffer).dec_slice_byte
+ p.size = size_slice_byte
+ // This is a []byte, which is either a bytes field,
+ // or the value of a map field. In the latter case,
+ // we always encode an empty []byte, so we should not
+ // use the proto3 enc/size funcs.
+ // f == nil iff this is the key/value of a map field.
+ if p.proto3 && f != nil {
+ p.enc = (*Buffer).enc_proto3_slice_byte
+ p.size = size_proto3_slice_byte
+ }
+ case reflect.Float32, reflect.Float64:
+ switch t2.Bits() {
+ case 32:
+ // can just treat them as bits
+ if p.Packed {
+ p.enc = (*Buffer).enc_slice_packed_uint32
+ p.size = size_slice_packed_uint32
+ } else {
+ p.enc = (*Buffer).enc_slice_uint32
+ p.size = size_slice_uint32
+ }
+ p.dec = (*Buffer).dec_slice_int32
+ p.packedDec = (*Buffer).dec_slice_packed_int32
+ case 64:
+ // can just treat them as bits
+ if p.Packed {
+ p.enc = (*Buffer).enc_slice_packed_int64
+ p.size = size_slice_packed_int64
+ } else {
+ p.enc = (*Buffer).enc_slice_int64
+ p.size = size_slice_int64
+ }
+ p.dec = (*Buffer).dec_slice_int64
+ p.packedDec = (*Buffer).dec_slice_packed_int64
+ default:
+ logNoSliceEnc(t1, t2)
+ break
+ }
+ case reflect.String:
+ p.enc = (*Buffer).enc_slice_string
+ p.dec = (*Buffer).dec_slice_string
+ p.size = size_slice_string
+ case reflect.Ptr:
+ switch t3 := t2.Elem(); t3.Kind() {
+ default:
+ fmt.Fprintf(os.Stderr, "proto: no ptr oenc for %T -> %T -> %T\n", t1, t2, t3)
+ break
+ case reflect.Struct:
+ p.stype = t2.Elem()
+ p.isMarshaler = isMarshaler(t2)
+ p.isUnmarshaler = isUnmarshaler(t2)
+ if p.Wire == "bytes" {
+ p.enc = (*Buffer).enc_slice_struct_message
+ p.dec = (*Buffer).dec_slice_struct_message
+ p.size = size_slice_struct_message
+ } else {
+ p.enc = (*Buffer).enc_slice_struct_group
+ p.dec = (*Buffer).dec_slice_struct_group
+ p.size = size_slice_struct_group
+ }
+ }
+ case reflect.Slice:
+ switch t2.Elem().Kind() {
+ default:
+ fmt.Fprintf(os.Stderr, "proto: no slice elem oenc for %T -> %T -> %T\n", t1, t2, t2.Elem())
+ break
+ case reflect.Uint8:
+ p.enc = (*Buffer).enc_slice_slice_byte
+ p.dec = (*Buffer).dec_slice_slice_byte
+ p.size = size_slice_slice_byte
+ }
+ }
+
+ case reflect.Map:
+ p.enc = (*Buffer).enc_new_map
+ p.dec = (*Buffer).dec_new_map
+ p.size = size_new_map
+
+ p.mtype = t1
+ p.mkeyprop = &Properties{}
+ p.mkeyprop.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp)
+ p.mvalprop = &Properties{}
+ vtype := p.mtype.Elem()
+ if vtype.Kind() != reflect.Ptr && vtype.Kind() != reflect.Slice {
+ // The value type is not a message (*T) or bytes ([]byte),
+ // so we need encoders for the pointer to this type.
+ vtype = reflect.PtrTo(vtype)
+ }
+ p.mvalprop.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp)
+ }
+
+ // precalculate tag code
+ wire := p.WireType
+ if p.Packed {
+ wire = WireBytes
+ }
+ x := uint32(p.Tag)<<3 | uint32(wire)
+ i := 0
+ for i = 0; x > 127; i++ {
+ p.tagbuf[i] = 0x80 | uint8(x&0x7F)
+ x >>= 7
+ }
+ p.tagbuf[i] = uint8(x)
+ p.tagcode = p.tagbuf[0 : i+1]
+
+ if p.stype != nil {
+ if lockGetProp {
+ p.sprop = GetProperties(p.stype)
+ } else {
+ p.sprop = getPropertiesLocked(p.stype)
+ }
+ }
+}
+
+var (
+ marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem()
+ unmarshalerType = reflect.TypeOf((*Unmarshaler)(nil)).Elem()
+)
+
+// isMarshaler reports whether type t implements Marshaler.
+func isMarshaler(t reflect.Type) bool {
+ // We're checking for (likely) pointer-receiver methods
+ // so if t is not a pointer, something is very wrong.
+ // The calls above only invoke isMarshaler on pointer types.
+ if t.Kind() != reflect.Ptr {
+ panic("proto: misuse of isMarshaler")
+ }
+ return t.Implements(marshalerType)
+}
+
+// isUnmarshaler reports whether type t implements Unmarshaler.
+func isUnmarshaler(t reflect.Type) bool {
+ // We're checking for (likely) pointer-receiver methods
+ // so if t is not a pointer, something is very wrong.
+ // The calls above only invoke isUnmarshaler on pointer types.
+ if t.Kind() != reflect.Ptr {
+ panic("proto: misuse of isUnmarshaler")
+ }
+ return t.Implements(unmarshalerType)
+}
+
+// Init populates the properties from a protocol buffer struct tag.
+func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) {
+ p.init(typ, name, tag, f, true)
+}
+
+func (p *Properties) init(typ reflect.Type, name, tag string, f *reflect.StructField, lockGetProp bool) {
+ // "bytes,49,opt,def=hello!"
+ p.Name = name
+ p.OrigName = name
+ if f != nil {
+ p.field = toField(f)
+ }
+ if tag == "" {
+ return
+ }
+ p.Parse(tag)
+ p.setEncAndDec(typ, f, lockGetProp)
+}
+
+var (
+ propertiesMu sync.RWMutex
+ propertiesMap = make(map[reflect.Type]*StructProperties)
+)
+
+// GetProperties returns the list of properties for the type represented by t.
+// t must represent a generated struct type of a protocol message.
+func GetProperties(t reflect.Type) *StructProperties {
+ if t.Kind() != reflect.Struct {
+ panic("proto: type must have kind struct")
+ }
+
+ // Most calls to GetProperties in a long-running program will be
+ // retrieving details for types we have seen before.
+ propertiesMu.RLock()
+ sprop, ok := propertiesMap[t]
+ propertiesMu.RUnlock()
+ if ok {
+ if collectStats {
+ stats.Chit++
+ }
+ return sprop
+ }
+
+ propertiesMu.Lock()
+ sprop = getPropertiesLocked(t)
+ propertiesMu.Unlock()
+ return sprop
+}
+
+// getPropertiesLocked requires that propertiesMu is held.
+func getPropertiesLocked(t reflect.Type) *StructProperties {
+ if prop, ok := propertiesMap[t]; ok {
+ if collectStats {
+ stats.Chit++
+ }
+ return prop
+ }
+ if collectStats {
+ stats.Cmiss++
+ }
+
+ prop := new(StructProperties)
+ // in case of recursive protos, fill this in now.
+ propertiesMap[t] = prop
+
+ // build properties
+ prop.extendable = reflect.PtrTo(t).Implements(extendableProtoType)
+ prop.unrecField = invalidField
+ prop.Prop = make([]*Properties, t.NumField())
+ prop.order = make([]int, t.NumField())
+
+ for i := 0; i < t.NumField(); i++ {
+ f := t.Field(i)
+ p := new(Properties)
+ name := f.Name
+ p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false)
+
+ if f.Name == "XXX_extensions" { // special case
+ p.enc = (*Buffer).enc_map
+ p.dec = nil // not needed
+ p.size = size_map
+ }
+ if f.Name == "XXX_unrecognized" { // special case
+ prop.unrecField = toField(&f)
+ }
+ oneof := f.Tag.Get("protobuf_oneof") != "" // special case
+ prop.Prop[i] = p
+ prop.order[i] = i
+ if debug {
+ print(i, " ", f.Name, " ", t.String(), " ")
+ if p.Tag > 0 {
+ print(p.String())
+ }
+ print("\n")
+ }
+ if p.enc == nil && !strings.HasPrefix(f.Name, "XXX_") && !oneof {
+ fmt.Fprintln(os.Stderr, "proto: no encoder for", f.Name, f.Type.String(), "[GetProperties]")
+ }
+ }
+
+ // Re-order prop.order.
+ sort.Sort(prop)
+
+ type oneofMessage interface {
+ XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), []interface{})
+ }
+ if om, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); ok {
+ var oots []interface{}
+ prop.oneofMarshaler, prop.oneofUnmarshaler, oots = om.XXX_OneofFuncs()
+ prop.stype = t
+
+ // Interpret oneof metadata.
+ prop.OneofTypes = make(map[string]*OneofProperties)
+ for _, oot := range oots {
+ oop := &OneofProperties{
+ Type: reflect.ValueOf(oot).Type(), // *T
+ Prop: new(Properties),
+ }
+ sft := oop.Type.Elem().Field(0)
+ oop.Prop.Name = sft.Name
+ oop.Prop.Parse(sft.Tag.Get("protobuf"))
+ // There will be exactly one interface field that
+ // this new value is assignable to.
+ for i := 0; i < t.NumField(); i++ {
+ f := t.Field(i)
+ if f.Type.Kind() != reflect.Interface {
+ continue
+ }
+ if !oop.Type.AssignableTo(f.Type) {
+ continue
+ }
+ oop.Field = i
+ break
+ }
+ prop.OneofTypes[oop.Prop.OrigName] = oop
+ }
+ }
+
+ // build required counts
+ // build tags
+ reqCount := 0
+ prop.decoderOrigNames = make(map[string]int)
+ for i, p := range prop.Prop {
+ if strings.HasPrefix(p.Name, "XXX_") {
+ // Internal fields should not appear in tags/origNames maps.
+ // They are handled specially when encoding and decoding.
+ continue
+ }
+ if p.Required {
+ reqCount++
+ }
+ prop.decoderTags.put(p.Tag, i)
+ prop.decoderOrigNames[p.OrigName] = i
+ }
+ prop.reqCount = reqCount
+
+ return prop
+}
+
+// Return the Properties object for the x[0]'th field of the structure.
+func propByIndex(t reflect.Type, x []int) *Properties {
+ if len(x) != 1 {
+ fmt.Fprintf(os.Stderr, "proto: field index dimension %d (not 1) for type %s\n", len(x), t)
+ return nil
+ }
+ prop := GetProperties(t)
+ return prop.Prop[x[0]]
+}
+
+// Get the address and type of a pointer to a struct from an interface.
+func getbase(pb Message) (t reflect.Type, b structPointer, err error) {
+ if pb == nil {
+ err = ErrNil
+ return
+ }
+ // get the reflect type of the pointer to the struct.
+ t = reflect.TypeOf(pb)
+ // get the address of the struct.
+ value := reflect.ValueOf(pb)
+ b = toStructPointer(value)
+ return
+}
+
+// A global registry of enum types.
+// The generated code will register the generated maps by calling RegisterEnum.
+
+var enumValueMaps = make(map[string]map[string]int32)
+
+// RegisterEnum is called from the generated code to install the enum descriptor
+// maps into the global table to aid parsing text format protocol buffers.
+func RegisterEnum(typeName string, unusedNameMap map[int32]string, valueMap map[string]int32) {
+ if _, ok := enumValueMaps[typeName]; ok {
+ panic("proto: duplicate enum registered: " + typeName)
+ }
+ enumValueMaps[typeName] = valueMap
+}
diff --git a/vendor/github.com/golang/protobuf/proto/text.go b/vendor/github.com/golang/protobuf/proto/text.go
new file mode 100644
index 0000000000..dd69110f80
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/text.go
@@ -0,0 +1,796 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+// Functions for writing the text protocol buffer format.
+
+import (
+ "bufio"
+ "bytes"
+ "encoding"
+ "errors"
+ "fmt"
+ "io"
+ "log"
+ "math"
+ "reflect"
+ "sort"
+ "strings"
+)
+
+var (
+ newline = []byte("\n")
+ spaces = []byte(" ")
+ gtNewline = []byte(">\n")
+ endBraceNewline = []byte("}\n")
+ backslashN = []byte{'\\', 'n'}
+ backslashR = []byte{'\\', 'r'}
+ backslashT = []byte{'\\', 't'}
+ backslashDQ = []byte{'\\', '"'}
+ backslashBS = []byte{'\\', '\\'}
+ posInf = []byte("inf")
+ negInf = []byte("-inf")
+ nan = []byte("nan")
+)
+
+type writer interface {
+ io.Writer
+ WriteByte(byte) error
+}
+
+// textWriter is an io.Writer that tracks its indentation level.
+type textWriter struct {
+ ind int
+ complete bool // if the current position is a complete line
+ compact bool // whether to write out as a one-liner
+ w writer
+}
+
+func (w *textWriter) WriteString(s string) (n int, err error) {
+ if !strings.Contains(s, "\n") {
+ if !w.compact && w.complete {
+ w.writeIndent()
+ }
+ w.complete = false
+ return io.WriteString(w.w, s)
+ }
+ // WriteString is typically called without newlines, so this
+ // codepath and its copy are rare. We copy to avoid
+ // duplicating all of Write's logic here.
+ return w.Write([]byte(s))
+}
+
+func (w *textWriter) Write(p []byte) (n int, err error) {
+ newlines := bytes.Count(p, newline)
+ if newlines == 0 {
+ if !w.compact && w.complete {
+ w.writeIndent()
+ }
+ n, err = w.w.Write(p)
+ w.complete = false
+ return n, err
+ }
+
+ frags := bytes.SplitN(p, newline, newlines+1)
+ if w.compact {
+ for i, frag := range frags {
+ if i > 0 {
+ if err := w.w.WriteByte(' '); err != nil {
+ return n, err
+ }
+ n++
+ }
+ nn, err := w.w.Write(frag)
+ n += nn
+ if err != nil {
+ return n, err
+ }
+ }
+ return n, nil
+ }
+
+ for i, frag := range frags {
+ if w.complete {
+ w.writeIndent()
+ }
+ nn, err := w.w.Write(frag)
+ n += nn
+ if err != nil {
+ return n, err
+ }
+ if i+1 < len(frags) {
+ if err := w.w.WriteByte('\n'); err != nil {
+ return n, err
+ }
+ n++
+ }
+ }
+ w.complete = len(frags[len(frags)-1]) == 0
+ return n, nil
+}
+
+func (w *textWriter) WriteByte(c byte) error {
+ if w.compact && c == '\n' {
+ c = ' '
+ }
+ if !w.compact && w.complete {
+ w.writeIndent()
+ }
+ err := w.w.WriteByte(c)
+ w.complete = c == '\n'
+ return err
+}
+
+func (w *textWriter) indent() { w.ind++ }
+
+func (w *textWriter) unindent() {
+ if w.ind == 0 {
+ log.Printf("proto: textWriter unindented too far")
+ return
+ }
+ w.ind--
+}
+
+func writeName(w *textWriter, props *Properties) error {
+ if _, err := w.WriteString(props.OrigName); err != nil {
+ return err
+ }
+ if props.Wire != "group" {
+ return w.WriteByte(':')
+ }
+ return nil
+}
+
+var (
+ messageSetType = reflect.TypeOf((*MessageSet)(nil)).Elem()
+)
+
+// raw is the interface satisfied by RawMessage.
+type raw interface {
+ Bytes() []byte
+}
+
+func writeStruct(w *textWriter, sv reflect.Value) error {
+ if sv.Type() == messageSetType {
+ return writeMessageSet(w, sv.Addr().Interface().(*MessageSet))
+ }
+
+ st := sv.Type()
+ sprops := GetProperties(st)
+ for i := 0; i < sv.NumField(); i++ {
+ fv := sv.Field(i)
+ props := sprops.Prop[i]
+ name := st.Field(i).Name
+
+ if strings.HasPrefix(name, "XXX_") {
+ // There are two XXX_ fields:
+ // XXX_unrecognized []byte
+ // XXX_extensions map[int32]proto.Extension
+ // The first is handled here;
+ // the second is handled at the bottom of this function.
+ if name == "XXX_unrecognized" && !fv.IsNil() {
+ if err := writeUnknownStruct(w, fv.Interface().([]byte)); err != nil {
+ return err
+ }
+ }
+ continue
+ }
+ if fv.Kind() == reflect.Ptr && fv.IsNil() {
+ // Field not filled in. This could be an optional field or
+ // a required field that wasn't filled in. Either way, there
+ // isn't anything we can show for it.
+ continue
+ }
+ if fv.Kind() == reflect.Slice && fv.IsNil() {
+ // Repeated field that is empty, or a bytes field that is unused.
+ continue
+ }
+
+ if props.Repeated && fv.Kind() == reflect.Slice {
+ // Repeated field.
+ for j := 0; j < fv.Len(); j++ {
+ if err := writeName(w, props); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ }
+ v := fv.Index(j)
+ if v.Kind() == reflect.Ptr && v.IsNil() {
+ // A nil message in a repeated field is not valid,
+ // but we can handle that more gracefully than panicking.
+ if _, err := w.Write([]byte("\n")); err != nil {
+ return err
+ }
+ continue
+ }
+ if err := writeAny(w, v, props); err != nil {
+ return err
+ }
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+ continue
+ }
+ if fv.Kind() == reflect.Map {
+ // Map fields are rendered as a repeated struct with key/value fields.
+ keys := fv.MapKeys()
+ sort.Sort(mapKeys(keys))
+ for _, key := range keys {
+ val := fv.MapIndex(key)
+ if err := writeName(w, props); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ }
+ // open struct
+ if err := w.WriteByte('<'); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+ w.indent()
+ // key
+ if _, err := w.WriteString("key:"); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ }
+ if err := writeAny(w, key, props.mkeyprop); err != nil {
+ return err
+ }
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ // nil values aren't legal, but we can avoid panicking because of them.
+ if val.Kind() != reflect.Ptr || !val.IsNil() {
+ // value
+ if _, err := w.WriteString("value:"); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ }
+ if err := writeAny(w, val, props.mvalprop); err != nil {
+ return err
+ }
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+ // close struct
+ w.unindent()
+ if err := w.WriteByte('>'); err != nil {
+ return err
+ }
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+ continue
+ }
+ if props.proto3 && fv.Kind() == reflect.Slice && fv.Len() == 0 {
+ // empty bytes field
+ continue
+ }
+ if fv.Kind() != reflect.Ptr && fv.Kind() != reflect.Slice {
+ // proto3 non-repeated scalar field; skip if zero value
+ if isProto3Zero(fv) {
+ continue
+ }
+ }
+
+ if fv.Kind() == reflect.Interface {
+ // Check if it is a oneof.
+ if st.Field(i).Tag.Get("protobuf_oneof") != "" {
+ // fv is nil, or holds a pointer to generated struct.
+ // That generated struct has exactly one field,
+ // which has a protobuf struct tag.
+ if fv.IsNil() {
+ continue
+ }
+ inner := fv.Elem().Elem() // interface -> *T -> T
+ tag := inner.Type().Field(0).Tag.Get("protobuf")
+ props.Parse(tag) // Overwrite the outer props.
+ // Write the value in the oneof, not the oneof itself.
+ fv = inner.Field(0)
+
+ // Special case to cope with malformed messages gracefully:
+ // If the value in the oneof is a nil pointer, don't panic
+ // in writeAny.
+ if fv.Kind() == reflect.Ptr && fv.IsNil() {
+ // Use errors.New so writeAny won't render quotes.
+ msg := errors.New("/* nil */")
+ fv = reflect.ValueOf(&msg).Elem()
+ }
+ }
+ }
+
+ if err := writeName(w, props); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ }
+ if b, ok := fv.Interface().(raw); ok {
+ if err := writeRaw(w, b.Bytes()); err != nil {
+ return err
+ }
+ continue
+ }
+
+ // Enums have a String method, so writeAny will work fine.
+ if err := writeAny(w, fv, props); err != nil {
+ return err
+ }
+
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+
+ // Extensions (the XXX_extensions field).
+ pv := sv.Addr()
+ if pv.Type().Implements(extendableProtoType) {
+ if err := writeExtensions(w, pv); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// writeRaw writes an uninterpreted raw message.
+func writeRaw(w *textWriter, b []byte) error {
+ if err := w.WriteByte('<'); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+ w.indent()
+ if err := writeUnknownStruct(w, b); err != nil {
+ return err
+ }
+ w.unindent()
+ if err := w.WriteByte('>'); err != nil {
+ return err
+ }
+ return nil
+}
+
+// writeAny writes an arbitrary field.
+func writeAny(w *textWriter, v reflect.Value, props *Properties) error {
+ v = reflect.Indirect(v)
+
+ // Floats have special cases.
+ if v.Kind() == reflect.Float32 || v.Kind() == reflect.Float64 {
+ x := v.Float()
+ var b []byte
+ switch {
+ case math.IsInf(x, 1):
+ b = posInf
+ case math.IsInf(x, -1):
+ b = negInf
+ case math.IsNaN(x):
+ b = nan
+ }
+ if b != nil {
+ _, err := w.Write(b)
+ return err
+ }
+ // Other values are handled below.
+ }
+
+ // We don't attempt to serialise every possible value type; only those
+ // that can occur in protocol buffers.
+ switch v.Kind() {
+ case reflect.Slice:
+ // Should only be a []byte; repeated fields are handled in writeStruct.
+ if err := writeString(w, string(v.Interface().([]byte))); err != nil {
+ return err
+ }
+ case reflect.String:
+ if err := writeString(w, v.String()); err != nil {
+ return err
+ }
+ case reflect.Struct:
+ // Required/optional group/message.
+ var bra, ket byte = '<', '>'
+ if props != nil && props.Wire == "group" {
+ bra, ket = '{', '}'
+ }
+ if err := w.WriteByte(bra); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+ w.indent()
+ if tm, ok := v.Interface().(encoding.TextMarshaler); ok {
+ text, err := tm.MarshalText()
+ if err != nil {
+ return err
+ }
+ if _, err = w.Write(text); err != nil {
+ return err
+ }
+ } else if err := writeStruct(w, v); err != nil {
+ return err
+ }
+ w.unindent()
+ if err := w.WriteByte(ket); err != nil {
+ return err
+ }
+ default:
+ _, err := fmt.Fprint(w, v.Interface())
+ return err
+ }
+ return nil
+}
+
+// equivalent to C's isprint.
+func isprint(c byte) bool {
+ return c >= 0x20 && c < 0x7f
+}
+
+// writeString writes a string in the protocol buffer text format.
+// It is similar to strconv.Quote except we don't use Go escape sequences,
+// we treat the string as a byte sequence, and we use octal escapes.
+// These differences are to maintain interoperability with the other
+// languages' implementations of the text format.
+func writeString(w *textWriter, s string) error {
+ // use WriteByte here to get any needed indent
+ if err := w.WriteByte('"'); err != nil {
+ return err
+ }
+ // Loop over the bytes, not the runes.
+ for i := 0; i < len(s); i++ {
+ var err error
+ // Divergence from C++: we don't escape apostrophes.
+ // There's no need to escape them, and the C++ parser
+ // copes with a naked apostrophe.
+ switch c := s[i]; c {
+ case '\n':
+ _, err = w.w.Write(backslashN)
+ case '\r':
+ _, err = w.w.Write(backslashR)
+ case '\t':
+ _, err = w.w.Write(backslashT)
+ case '"':
+ _, err = w.w.Write(backslashDQ)
+ case '\\':
+ _, err = w.w.Write(backslashBS)
+ default:
+ if isprint(c) {
+ err = w.w.WriteByte(c)
+ } else {
+ _, err = fmt.Fprintf(w.w, "\\%03o", c)
+ }
+ }
+ if err != nil {
+ return err
+ }
+ }
+ return w.WriteByte('"')
+}
+
+func writeMessageSet(w *textWriter, ms *MessageSet) error {
+ for _, item := range ms.Item {
+ id := *item.TypeId
+ if msd, ok := messageSetMap[id]; ok {
+ // Known message set type.
+ if _, err := fmt.Fprintf(w, "[%s]: <\n", msd.name); err != nil {
+ return err
+ }
+ w.indent()
+
+ pb := reflect.New(msd.t.Elem())
+ if err := Unmarshal(item.Message, pb.Interface().(Message)); err != nil {
+ if _, err := fmt.Fprintf(w, "/* bad message: %v */\n", err); err != nil {
+ return err
+ }
+ } else {
+ if err := writeStruct(w, pb.Elem()); err != nil {
+ return err
+ }
+ }
+ } else {
+ // Unknown type.
+ if _, err := fmt.Fprintf(w, "[%d]: <\n", id); err != nil {
+ return err
+ }
+ w.indent()
+ if err := writeUnknownStruct(w, item.Message); err != nil {
+ return err
+ }
+ }
+ w.unindent()
+ if _, err := w.Write(gtNewline); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func writeUnknownStruct(w *textWriter, data []byte) (err error) {
+ if !w.compact {
+ if _, err := fmt.Fprintf(w, "/* %d unknown bytes */\n", len(data)); err != nil {
+ return err
+ }
+ }
+ b := NewBuffer(data)
+ for b.index < len(b.buf) {
+ x, err := b.DecodeVarint()
+ if err != nil {
+ _, err := fmt.Fprintf(w, "/* %v */\n", err)
+ return err
+ }
+ wire, tag := x&7, x>>3
+ if wire == WireEndGroup {
+ w.unindent()
+ if _, err := w.Write(endBraceNewline); err != nil {
+ return err
+ }
+ continue
+ }
+ if _, err := fmt.Fprint(w, tag); err != nil {
+ return err
+ }
+ if wire != WireStartGroup {
+ if err := w.WriteByte(':'); err != nil {
+ return err
+ }
+ }
+ if !w.compact || wire == WireStartGroup {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ }
+ switch wire {
+ case WireBytes:
+ buf, e := b.DecodeRawBytes(false)
+ if e == nil {
+ _, err = fmt.Fprintf(w, "%q", buf)
+ } else {
+ _, err = fmt.Fprintf(w, "/* %v */", e)
+ }
+ case WireFixed32:
+ x, err = b.DecodeFixed32()
+ err = writeUnknownInt(w, x, err)
+ case WireFixed64:
+ x, err = b.DecodeFixed64()
+ err = writeUnknownInt(w, x, err)
+ case WireStartGroup:
+ err = w.WriteByte('{')
+ w.indent()
+ case WireVarint:
+ x, err = b.DecodeVarint()
+ err = writeUnknownInt(w, x, err)
+ default:
+ _, err = fmt.Fprintf(w, "/* unknown wire type %d */", wire)
+ }
+ if err != nil {
+ return err
+ }
+ if err = w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func writeUnknownInt(w *textWriter, x uint64, err error) error {
+ if err == nil {
+ _, err = fmt.Fprint(w, x)
+ } else {
+ _, err = fmt.Fprintf(w, "/* %v */", err)
+ }
+ return err
+}
+
+type int32Slice []int32
+
+func (s int32Slice) Len() int { return len(s) }
+func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] }
+func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
+
+// writeExtensions writes all the extensions in pv.
+// pv is assumed to be a pointer to a protocol message struct that is extendable.
+func writeExtensions(w *textWriter, pv reflect.Value) error {
+ emap := extensionMaps[pv.Type().Elem()]
+ ep := pv.Interface().(extendableProto)
+
+ // Order the extensions by ID.
+ // This isn't strictly necessary, but it will give us
+ // canonical output, which will also make testing easier.
+ m := ep.ExtensionMap()
+ ids := make([]int32, 0, len(m))
+ for id := range m {
+ ids = append(ids, id)
+ }
+ sort.Sort(int32Slice(ids))
+
+ for _, extNum := range ids {
+ ext := m[extNum]
+ var desc *ExtensionDesc
+ if emap != nil {
+ desc = emap[extNum]
+ }
+ if desc == nil {
+ // Unknown extension.
+ if err := writeUnknownStruct(w, ext.enc); err != nil {
+ return err
+ }
+ continue
+ }
+
+ pb, err := GetExtension(ep, desc)
+ if err != nil {
+ return fmt.Errorf("failed getting extension: %v", err)
+ }
+
+ // Repeated extensions will appear as a slice.
+ if !desc.repeated() {
+ if err := writeExtension(w, desc.Name, pb); err != nil {
+ return err
+ }
+ } else {
+ v := reflect.ValueOf(pb)
+ for i := 0; i < v.Len(); i++ {
+ if err := writeExtension(w, desc.Name, v.Index(i).Interface()); err != nil {
+ return err
+ }
+ }
+ }
+ }
+ return nil
+}
+
+func writeExtension(w *textWriter, name string, pb interface{}) error {
+ if _, err := fmt.Fprintf(w, "[%s]:", name); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ }
+ if err := writeAny(w, reflect.ValueOf(pb), nil); err != nil {
+ return err
+ }
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (w *textWriter) writeIndent() {
+ if !w.complete {
+ return
+ }
+ remain := w.ind * 2
+ for remain > 0 {
+ n := remain
+ if n > len(spaces) {
+ n = len(spaces)
+ }
+ w.w.Write(spaces[:n])
+ remain -= n
+ }
+ w.complete = false
+}
+
+func marshalText(w io.Writer, pb Message, compact bool) error {
+ val := reflect.ValueOf(pb)
+ if pb == nil || val.IsNil() {
+ w.Write([]byte(""))
+ return nil
+ }
+ var bw *bufio.Writer
+ ww, ok := w.(writer)
+ if !ok {
+ bw = bufio.NewWriter(w)
+ ww = bw
+ }
+ aw := &textWriter{
+ w: ww,
+ complete: true,
+ compact: compact,
+ }
+
+ if tm, ok := pb.(encoding.TextMarshaler); ok {
+ text, err := tm.MarshalText()
+ if err != nil {
+ return err
+ }
+ if _, err = aw.Write(text); err != nil {
+ return err
+ }
+ if bw != nil {
+ return bw.Flush()
+ }
+ return nil
+ }
+ // Dereference the received pointer so we don't have outer < and >.
+ v := reflect.Indirect(val)
+ if err := writeStruct(aw, v); err != nil {
+ return err
+ }
+ if bw != nil {
+ return bw.Flush()
+ }
+ return nil
+}
+
+// MarshalText writes a given protocol buffer in text format.
+// The only errors returned are from w.
+func MarshalText(w io.Writer, pb Message) error {
+ return marshalText(w, pb, false)
+}
+
+// MarshalTextString is the same as MarshalText, but returns the string directly.
+func MarshalTextString(pb Message) string {
+ var buf bytes.Buffer
+ marshalText(&buf, pb, false)
+ return buf.String()
+}
+
+// CompactText writes a given protocol buffer in compact text format (one line).
+func CompactText(w io.Writer, pb Message) error { return marshalText(w, pb, true) }
+
+// CompactTextString is the same as CompactText, but returns the string directly.
+func CompactTextString(pb Message) string {
+ var buf bytes.Buffer
+ marshalText(&buf, pb, true)
+ return buf.String()
+}
diff --git a/vendor/github.com/golang/protobuf/proto/text_parser.go b/vendor/github.com/golang/protobuf/proto/text_parser.go
new file mode 100644
index 0000000000..8bce369204
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/text_parser.go
@@ -0,0 +1,784 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+// Functions for parsing the Text protocol buffer format.
+// TODO: message sets.
+
+import (
+ "encoding"
+ "errors"
+ "fmt"
+ "reflect"
+ "strconv"
+ "strings"
+ "unicode/utf8"
+)
+
+type ParseError struct {
+ Message string
+ Line int // 1-based line number
+ Offset int // 0-based byte offset from start of input
+}
+
+func (p *ParseError) Error() string {
+ if p.Line == 1 {
+ // show offset only for first line
+ return fmt.Sprintf("line 1.%d: %v", p.Offset, p.Message)
+ }
+ return fmt.Sprintf("line %d: %v", p.Line, p.Message)
+}
+
+type token struct {
+ value string
+ err *ParseError
+ line int // line number
+ offset int // byte number from start of input, not start of line
+ unquoted string // the unquoted version of value, if it was a quoted string
+}
+
+func (t *token) String() string {
+ if t.err == nil {
+ return fmt.Sprintf("%q (line=%d, offset=%d)", t.value, t.line, t.offset)
+ }
+ return fmt.Sprintf("parse error: %v", t.err)
+}
+
+type textParser struct {
+ s string // remaining input
+ done bool // whether the parsing is finished (success or error)
+ backed bool // whether back() was called
+ offset, line int
+ cur token
+}
+
+func newTextParser(s string) *textParser {
+ p := new(textParser)
+ p.s = s
+ p.line = 1
+ p.cur.line = 1
+ return p
+}
+
+func (p *textParser) errorf(format string, a ...interface{}) *ParseError {
+ pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset}
+ p.cur.err = pe
+ p.done = true
+ return pe
+}
+
+// Numbers and identifiers are matched by [-+._A-Za-z0-9]
+func isIdentOrNumberChar(c byte) bool {
+ switch {
+ case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z':
+ return true
+ case '0' <= c && c <= '9':
+ return true
+ }
+ switch c {
+ case '-', '+', '.', '_':
+ return true
+ }
+ return false
+}
+
+func isWhitespace(c byte) bool {
+ switch c {
+ case ' ', '\t', '\n', '\r':
+ return true
+ }
+ return false
+}
+
+func (p *textParser) skipWhitespace() {
+ i := 0
+ for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') {
+ if p.s[i] == '#' {
+ // comment; skip to end of line or input
+ for i < len(p.s) && p.s[i] != '\n' {
+ i++
+ }
+ if i == len(p.s) {
+ break
+ }
+ }
+ if p.s[i] == '\n' {
+ p.line++
+ }
+ i++
+ }
+ p.offset += i
+ p.s = p.s[i:len(p.s)]
+ if len(p.s) == 0 {
+ p.done = true
+ }
+}
+
+func (p *textParser) advance() {
+ // Skip whitespace
+ p.skipWhitespace()
+ if p.done {
+ return
+ }
+
+ // Start of non-whitespace
+ p.cur.err = nil
+ p.cur.offset, p.cur.line = p.offset, p.line
+ p.cur.unquoted = ""
+ switch p.s[0] {
+ case '<', '>', '{', '}', ':', '[', ']', ';', ',':
+ // Single symbol
+ p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)]
+ case '"', '\'':
+ // Quoted string
+ i := 1
+ for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' {
+ if p.s[i] == '\\' && i+1 < len(p.s) {
+ // skip escaped char
+ i++
+ }
+ i++
+ }
+ if i >= len(p.s) || p.s[i] != p.s[0] {
+ p.errorf("unmatched quote")
+ return
+ }
+ unq, err := unquoteC(p.s[1:i], rune(p.s[0]))
+ if err != nil {
+ p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err)
+ return
+ }
+ p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)]
+ p.cur.unquoted = unq
+ default:
+ i := 0
+ for i < len(p.s) && isIdentOrNumberChar(p.s[i]) {
+ i++
+ }
+ if i == 0 {
+ p.errorf("unexpected byte %#x", p.s[0])
+ return
+ }
+ p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)]
+ }
+ p.offset += len(p.cur.value)
+}
+
+var (
+ errBadUTF8 = errors.New("proto: bad UTF-8")
+ errBadHex = errors.New("proto: bad hexadecimal")
+)
+
+func unquoteC(s string, quote rune) (string, error) {
+ // This is based on C++'s tokenizer.cc.
+ // Despite its name, this is *not* parsing C syntax.
+ // For instance, "\0" is an invalid quoted string.
+
+ // Avoid allocation in trivial cases.
+ simple := true
+ for _, r := range s {
+ if r == '\\' || r == quote {
+ simple = false
+ break
+ }
+ }
+ if simple {
+ return s, nil
+ }
+
+ buf := make([]byte, 0, 3*len(s)/2)
+ for len(s) > 0 {
+ r, n := utf8.DecodeRuneInString(s)
+ if r == utf8.RuneError && n == 1 {
+ return "", errBadUTF8
+ }
+ s = s[n:]
+ if r != '\\' {
+ if r < utf8.RuneSelf {
+ buf = append(buf, byte(r))
+ } else {
+ buf = append(buf, string(r)...)
+ }
+ continue
+ }
+
+ ch, tail, err := unescape(s)
+ if err != nil {
+ return "", err
+ }
+ buf = append(buf, ch...)
+ s = tail
+ }
+ return string(buf), nil
+}
+
+func unescape(s string) (ch string, tail string, err error) {
+ r, n := utf8.DecodeRuneInString(s)
+ if r == utf8.RuneError && n == 1 {
+ return "", "", errBadUTF8
+ }
+ s = s[n:]
+ switch r {
+ case 'a':
+ return "\a", s, nil
+ case 'b':
+ return "\b", s, nil
+ case 'f':
+ return "\f", s, nil
+ case 'n':
+ return "\n", s, nil
+ case 'r':
+ return "\r", s, nil
+ case 't':
+ return "\t", s, nil
+ case 'v':
+ return "\v", s, nil
+ case '?':
+ return "?", s, nil // trigraph workaround
+ case '\'', '"', '\\':
+ return string(r), s, nil
+ case '0', '1', '2', '3', '4', '5', '6', '7', 'x', 'X':
+ if len(s) < 2 {
+ return "", "", fmt.Errorf(`\%c requires 2 following digits`, r)
+ }
+ base := 8
+ ss := s[:2]
+ s = s[2:]
+ if r == 'x' || r == 'X' {
+ base = 16
+ } else {
+ ss = string(r) + ss
+ }
+ i, err := strconv.ParseUint(ss, base, 8)
+ if err != nil {
+ return "", "", err
+ }
+ return string([]byte{byte(i)}), s, nil
+ case 'u', 'U':
+ n := 4
+ if r == 'U' {
+ n = 8
+ }
+ if len(s) < n {
+ return "", "", fmt.Errorf(`\%c requires %d digits`, r, n)
+ }
+
+ bs := make([]byte, n/2)
+ for i := 0; i < n; i += 2 {
+ a, ok1 := unhex(s[i])
+ b, ok2 := unhex(s[i+1])
+ if !ok1 || !ok2 {
+ return "", "", errBadHex
+ }
+ bs[i/2] = a<<4 | b
+ }
+ s = s[n:]
+ return string(bs), s, nil
+ }
+ return "", "", fmt.Errorf(`unknown escape \%c`, r)
+}
+
+// Adapted from src/pkg/strconv/quote.go.
+func unhex(b byte) (v byte, ok bool) {
+ switch {
+ case '0' <= b && b <= '9':
+ return b - '0', true
+ case 'a' <= b && b <= 'f':
+ return b - 'a' + 10, true
+ case 'A' <= b && b <= 'F':
+ return b - 'A' + 10, true
+ }
+ return 0, false
+}
+
+// Back off the parser by one token. Can only be done between calls to next().
+// It makes the next advance() a no-op.
+func (p *textParser) back() { p.backed = true }
+
+// Advances the parser and returns the new current token.
+func (p *textParser) next() *token {
+ if p.backed || p.done {
+ p.backed = false
+ return &p.cur
+ }
+ p.advance()
+ if p.done {
+ p.cur.value = ""
+ } else if len(p.cur.value) > 0 && p.cur.value[0] == '"' {
+ // Look for multiple quoted strings separated by whitespace,
+ // and concatenate them.
+ cat := p.cur
+ for {
+ p.skipWhitespace()
+ if p.done || p.s[0] != '"' {
+ break
+ }
+ p.advance()
+ if p.cur.err != nil {
+ return &p.cur
+ }
+ cat.value += " " + p.cur.value
+ cat.unquoted += p.cur.unquoted
+ }
+ p.done = false // parser may have seen EOF, but we want to return cat
+ p.cur = cat
+ }
+ return &p.cur
+}
+
+func (p *textParser) consumeToken(s string) error {
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value != s {
+ p.back()
+ return p.errorf("expected %q, found %q", s, tok.value)
+ }
+ return nil
+}
+
+// Return a RequiredNotSetError indicating which required field was not set.
+func (p *textParser) missingRequiredFieldError(sv reflect.Value) *RequiredNotSetError {
+ st := sv.Type()
+ sprops := GetProperties(st)
+ for i := 0; i < st.NumField(); i++ {
+ if !isNil(sv.Field(i)) {
+ continue
+ }
+
+ props := sprops.Prop[i]
+ if props.Required {
+ return &RequiredNotSetError{fmt.Sprintf("%v.%v", st, props.OrigName)}
+ }
+ }
+ return &RequiredNotSetError{fmt.Sprintf("%v.", st)} // should not happen
+}
+
+// Returns the index in the struct for the named field, as well as the parsed tag properties.
+func structFieldByName(sprops *StructProperties, name string) (int, *Properties, bool) {
+ i, ok := sprops.decoderOrigNames[name]
+ if ok {
+ return i, sprops.Prop[i], true
+ }
+ return -1, nil, false
+}
+
+// Consume a ':' from the input stream (if the next token is a colon),
+// returning an error if a colon is needed but not present.
+func (p *textParser) checkForColon(props *Properties, typ reflect.Type) *ParseError {
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value != ":" {
+ // Colon is optional when the field is a group or message.
+ needColon := true
+ switch props.Wire {
+ case "group":
+ needColon = false
+ case "bytes":
+ // A "bytes" field is either a message, a string, or a repeated field;
+ // those three become *T, *string and []T respectively, so we can check for
+ // this field being a pointer to a non-string.
+ if typ.Kind() == reflect.Ptr {
+ // *T or *string
+ if typ.Elem().Kind() == reflect.String {
+ break
+ }
+ } else if typ.Kind() == reflect.Slice {
+ // []T or []*T
+ if typ.Elem().Kind() != reflect.Ptr {
+ break
+ }
+ } else if typ.Kind() == reflect.String {
+ // The proto3 exception is for a string field,
+ // which requires a colon.
+ break
+ }
+ needColon = false
+ }
+ if needColon {
+ return p.errorf("expected ':', found %q", tok.value)
+ }
+ p.back()
+ }
+ return nil
+}
+
+func (p *textParser) readStruct(sv reflect.Value, terminator string) error {
+ st := sv.Type()
+ sprops := GetProperties(st)
+ reqCount := sprops.reqCount
+ var reqFieldErr error
+ fieldSet := make(map[string]bool)
+ // A struct is a sequence of "name: value", terminated by one of
+ // '>' or '}', or the end of the input. A name may also be
+ // "[extension]".
+ for {
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value == terminator {
+ break
+ }
+ if tok.value == "[" {
+ // Looks like an extension.
+ //
+ // TODO: Check whether we need to handle
+ // namespace rooted names (e.g. ".something.Foo").
+ tok = p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ var desc *ExtensionDesc
+ // This could be faster, but it's functional.
+ // TODO: Do something smarter than a linear scan.
+ for _, d := range RegisteredExtensions(reflect.New(st).Interface().(Message)) {
+ if d.Name == tok.value {
+ desc = d
+ break
+ }
+ }
+ if desc == nil {
+ return p.errorf("unrecognized extension %q", tok.value)
+ }
+ // Check the extension terminator.
+ tok = p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value != "]" {
+ return p.errorf("unrecognized extension terminator %q", tok.value)
+ }
+
+ props := &Properties{}
+ props.Parse(desc.Tag)
+
+ typ := reflect.TypeOf(desc.ExtensionType)
+ if err := p.checkForColon(props, typ); err != nil {
+ return err
+ }
+
+ rep := desc.repeated()
+
+ // Read the extension structure, and set it in
+ // the value we're constructing.
+ var ext reflect.Value
+ if !rep {
+ ext = reflect.New(typ).Elem()
+ } else {
+ ext = reflect.New(typ.Elem()).Elem()
+ }
+ if err := p.readAny(ext, props); err != nil {
+ if _, ok := err.(*RequiredNotSetError); !ok {
+ return err
+ }
+ reqFieldErr = err
+ }
+ ep := sv.Addr().Interface().(extendableProto)
+ if !rep {
+ SetExtension(ep, desc, ext.Interface())
+ } else {
+ old, err := GetExtension(ep, desc)
+ var sl reflect.Value
+ if err == nil {
+ sl = reflect.ValueOf(old) // existing slice
+ } else {
+ sl = reflect.MakeSlice(typ, 0, 1)
+ }
+ sl = reflect.Append(sl, ext)
+ SetExtension(ep, desc, sl.Interface())
+ }
+ if err := p.consumeOptionalSeparator(); err != nil {
+ return err
+ }
+ continue
+ }
+
+ // This is a normal, non-extension field.
+ name := tok.value
+ var dst reflect.Value
+ fi, props, ok := structFieldByName(sprops, name)
+ if ok {
+ dst = sv.Field(fi)
+ } else if oop, ok := sprops.OneofTypes[name]; ok {
+ // It is a oneof.
+ props = oop.Prop
+ nv := reflect.New(oop.Type.Elem())
+ dst = nv.Elem().Field(0)
+ sv.Field(oop.Field).Set(nv)
+ }
+ if !dst.IsValid() {
+ return p.errorf("unknown field name %q in %v", name, st)
+ }
+
+ if dst.Kind() == reflect.Map {
+ // Consume any colon.
+ if err := p.checkForColon(props, dst.Type()); err != nil {
+ return err
+ }
+
+ // Construct the map if it doesn't already exist.
+ if dst.IsNil() {
+ dst.Set(reflect.MakeMap(dst.Type()))
+ }
+ key := reflect.New(dst.Type().Key()).Elem()
+ val := reflect.New(dst.Type().Elem()).Elem()
+
+ // The map entry should be this sequence of tokens:
+ // < key : KEY value : VALUE >
+ // Technically the "key" and "value" could come in any order,
+ // but in practice they won't.
+
+ tok := p.next()
+ var terminator string
+ switch tok.value {
+ case "<":
+ terminator = ">"
+ case "{":
+ terminator = "}"
+ default:
+ return p.errorf("expected '{' or '<', found %q", tok.value)
+ }
+ if err := p.consumeToken("key"); err != nil {
+ return err
+ }
+ if err := p.consumeToken(":"); err != nil {
+ return err
+ }
+ if err := p.readAny(key, props.mkeyprop); err != nil {
+ return err
+ }
+ if err := p.consumeOptionalSeparator(); err != nil {
+ return err
+ }
+ if err := p.consumeToken("value"); err != nil {
+ return err
+ }
+ if err := p.checkForColon(props.mvalprop, dst.Type().Elem()); err != nil {
+ return err
+ }
+ if err := p.readAny(val, props.mvalprop); err != nil {
+ return err
+ }
+ if err := p.consumeOptionalSeparator(); err != nil {
+ return err
+ }
+ if err := p.consumeToken(terminator); err != nil {
+ return err
+ }
+
+ dst.SetMapIndex(key, val)
+ continue
+ }
+
+ // Check that it's not already set if it's not a repeated field.
+ if !props.Repeated && fieldSet[name] {
+ return p.errorf("non-repeated field %q was repeated", name)
+ }
+
+ if err := p.checkForColon(props, dst.Type()); err != nil {
+ return err
+ }
+
+ // Parse into the field.
+ fieldSet[name] = true
+ if err := p.readAny(dst, props); err != nil {
+ if _, ok := err.(*RequiredNotSetError); !ok {
+ return err
+ }
+ reqFieldErr = err
+ } else if props.Required {
+ reqCount--
+ }
+
+ if err := p.consumeOptionalSeparator(); err != nil {
+ return err
+ }
+
+ }
+
+ if reqCount > 0 {
+ return p.missingRequiredFieldError(sv)
+ }
+ return reqFieldErr
+}
+
+// consumeOptionalSeparator consumes an optional semicolon or comma.
+// It is used in readStruct to provide backward compatibility.
+func (p *textParser) consumeOptionalSeparator() error {
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value != ";" && tok.value != "," {
+ p.back()
+ }
+ return nil
+}
+
+func (p *textParser) readAny(v reflect.Value, props *Properties) error {
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value == "" {
+ return p.errorf("unexpected EOF")
+ }
+
+ switch fv := v; fv.Kind() {
+ case reflect.Slice:
+ at := v.Type()
+ if at.Elem().Kind() == reflect.Uint8 {
+ // Special case for []byte
+ if tok.value[0] != '"' && tok.value[0] != '\'' {
+ // Deliberately written out here, as the error after
+ // this switch statement would write "invalid []byte: ...",
+ // which is not as user-friendly.
+ return p.errorf("invalid string: %v", tok.value)
+ }
+ bytes := []byte(tok.unquoted)
+ fv.Set(reflect.ValueOf(bytes))
+ return nil
+ }
+ // Repeated field. May already exist.
+ flen := fv.Len()
+ if flen == fv.Cap() {
+ nav := reflect.MakeSlice(at, flen, 2*flen+1)
+ reflect.Copy(nav, fv)
+ fv.Set(nav)
+ }
+ fv.SetLen(flen + 1)
+
+ // Read one.
+ p.back()
+ return p.readAny(fv.Index(flen), props)
+ case reflect.Bool:
+ // Either "true", "false", 1 or 0.
+ switch tok.value {
+ case "true", "1":
+ fv.SetBool(true)
+ return nil
+ case "false", "0":
+ fv.SetBool(false)
+ return nil
+ }
+ case reflect.Float32, reflect.Float64:
+ v := tok.value
+ // Ignore 'f' for compatibility with output generated by C++, but don't
+ // remove 'f' when the value is "-inf" or "inf".
+ if strings.HasSuffix(v, "f") && tok.value != "-inf" && tok.value != "inf" {
+ v = v[:len(v)-1]
+ }
+ if f, err := strconv.ParseFloat(v, fv.Type().Bits()); err == nil {
+ fv.SetFloat(f)
+ return nil
+ }
+ case reflect.Int32:
+ if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil {
+ fv.SetInt(x)
+ return nil
+ }
+
+ if len(props.Enum) == 0 {
+ break
+ }
+ m, ok := enumValueMaps[props.Enum]
+ if !ok {
+ break
+ }
+ x, ok := m[tok.value]
+ if !ok {
+ break
+ }
+ fv.SetInt(int64(x))
+ return nil
+ case reflect.Int64:
+ if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil {
+ fv.SetInt(x)
+ return nil
+ }
+
+ case reflect.Ptr:
+ // A basic field (indirected through pointer), or a repeated message/group
+ p.back()
+ fv.Set(reflect.New(fv.Type().Elem()))
+ return p.readAny(fv.Elem(), props)
+ case reflect.String:
+ if tok.value[0] == '"' || tok.value[0] == '\'' {
+ fv.SetString(tok.unquoted)
+ return nil
+ }
+ case reflect.Struct:
+ var terminator string
+ switch tok.value {
+ case "{":
+ terminator = "}"
+ case "<":
+ terminator = ">"
+ default:
+ return p.errorf("expected '{' or '<', found %q", tok.value)
+ }
+ // TODO: Handle nested messages which implement encoding.TextUnmarshaler.
+ return p.readStruct(fv, terminator)
+ case reflect.Uint32:
+ if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil {
+ fv.SetUint(uint64(x))
+ return nil
+ }
+ case reflect.Uint64:
+ if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil {
+ fv.SetUint(x)
+ return nil
+ }
+ }
+ return p.errorf("invalid %v: %v", v.Type(), tok.value)
+}
+
+// UnmarshalText reads a protocol buffer in Text format. UnmarshalText resets pb
+// before starting to unmarshal, so any existing data in pb is always removed.
+// If a required field is not set and no other error occurs,
+// UnmarshalText returns *RequiredNotSetError.
+func UnmarshalText(s string, pb Message) error {
+ if um, ok := pb.(encoding.TextUnmarshaler); ok {
+ err := um.UnmarshalText([]byte(s))
+ return err
+ }
+ pb.Reset()
+ v := reflect.ValueOf(pb)
+ if pe := newTextParser(s).readStruct(v.Elem(), ""); pe != nil {
+ return pe
+ }
+ return nil
+}
diff --git a/vendor/github.com/golang/snappy/AUTHORS b/vendor/github.com/golang/snappy/AUTHORS
new file mode 100644
index 0000000000..824bf2e148
--- /dev/null
+++ b/vendor/github.com/golang/snappy/AUTHORS
@@ -0,0 +1,14 @@
+# This is the official list of Snappy-Go authors for copyright purposes.
+# This file is distinct from the CONTRIBUTORS files.
+# See the latter for an explanation.
+
+# Names should be added to this file as
+# Name or Organization
+# The email address is not required for organizations.
+
+# Please keep the list sorted.
+
+Damian Gryski
+Google Inc.
+Jan Mercl <0xjnml@gmail.com>
+Sebastien Binet
diff --git a/vendor/github.com/golang/snappy/CONTRIBUTORS b/vendor/github.com/golang/snappy/CONTRIBUTORS
new file mode 100644
index 0000000000..9f54f21ff7
--- /dev/null
+++ b/vendor/github.com/golang/snappy/CONTRIBUTORS
@@ -0,0 +1,36 @@
+# This is the official list of people who can contribute
+# (and typically have contributed) code to the Snappy-Go repository.
+# The AUTHORS file lists the copyright holders; this file
+# lists people. For example, Google employees are listed here
+# but not in AUTHORS, because Google holds the copyright.
+#
+# The submission process automatically checks to make sure
+# that people submitting code are listed in this file (by email address).
+#
+# Names should be added to this file only after verifying that
+# the individual or the individual's organization has agreed to
+# the appropriate Contributor License Agreement, found here:
+#
+# http://code.google.com/legal/individual-cla-v1.0.html
+# http://code.google.com/legal/corporate-cla-v1.0.html
+#
+# The agreement for individuals can be filled out on the web.
+#
+# When adding J Random Contributor's name to this file,
+# either J's name or J's organization's name should be
+# added to the AUTHORS file, depending on whether the
+# individual or corporate CLA was used.
+
+# Names should be added to this file like so:
+# Name
+
+# Please keep the list sorted.
+
+Damian Gryski
+Jan Mercl <0xjnml@gmail.com>
+Kai Backman
+Marc-Antoine Ruel
+Nigel Tao
+Rob Pike
+Russ Cox
+Sebastien Binet
diff --git a/vendor/github.com/golang/snappy/LICENSE b/vendor/github.com/golang/snappy/LICENSE
new file mode 100644
index 0000000000..6050c10f4c
--- /dev/null
+++ b/vendor/github.com/golang/snappy/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2011 The Snappy-Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/golang/snappy/README b/vendor/github.com/golang/snappy/README
new file mode 100644
index 0000000000..5074bbab8d
--- /dev/null
+++ b/vendor/github.com/golang/snappy/README
@@ -0,0 +1,7 @@
+The Snappy compression format in the Go programming language.
+
+To download and install from source:
+$ go get github.com/golang/snappy
+
+Unless otherwise noted, the Snappy-Go source files are distributed
+under the BSD-style license found in the LICENSE file.
diff --git a/vendor/github.com/golang/snappy/decode.go b/vendor/github.com/golang/snappy/decode.go
new file mode 100644
index 0000000000..e7f1259a34
--- /dev/null
+++ b/vendor/github.com/golang/snappy/decode.go
@@ -0,0 +1,294 @@
+// Copyright 2011 The Snappy-Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package snappy
+
+import (
+ "encoding/binary"
+ "errors"
+ "io"
+)
+
+var (
+ // ErrCorrupt reports that the input is invalid.
+ ErrCorrupt = errors.New("snappy: corrupt input")
+ // ErrTooLarge reports that the uncompressed length is too large.
+ ErrTooLarge = errors.New("snappy: decoded block is too large")
+ // ErrUnsupported reports that the input isn't supported.
+ ErrUnsupported = errors.New("snappy: unsupported input")
+)
+
+// DecodedLen returns the length of the decoded block.
+func DecodedLen(src []byte) (int, error) {
+ v, _, err := decodedLen(src)
+ return v, err
+}
+
+// decodedLen returns the length of the decoded block and the number of bytes
+// that the length header occupied.
+func decodedLen(src []byte) (blockLen, headerLen int, err error) {
+ v, n := binary.Uvarint(src)
+ if n <= 0 || v > 0xffffffff {
+ return 0, 0, ErrCorrupt
+ }
+
+ const wordSize = 32 << (^uint(0) >> 32 & 1)
+ if wordSize == 32 && v > 0x7fffffff {
+ return 0, 0, ErrTooLarge
+ }
+ return int(v), n, nil
+}
+
+// Decode returns the decoded form of src. The returned slice may be a sub-
+// slice of dst if dst was large enough to hold the entire decoded block.
+// Otherwise, a newly allocated slice will be returned.
+// It is valid to pass a nil dst.
+func Decode(dst, src []byte) ([]byte, error) {
+ dLen, s, err := decodedLen(src)
+ if err != nil {
+ return nil, err
+ }
+ if len(dst) < dLen {
+ dst = make([]byte, dLen)
+ }
+
+ var d, offset, length int
+ for s < len(src) {
+ switch src[s] & 0x03 {
+ case tagLiteral:
+ x := uint(src[s] >> 2)
+ switch {
+ case x < 60:
+ s++
+ case x == 60:
+ s += 2
+ if s > len(src) {
+ return nil, ErrCorrupt
+ }
+ x = uint(src[s-1])
+ case x == 61:
+ s += 3
+ if s > len(src) {
+ return nil, ErrCorrupt
+ }
+ x = uint(src[s-2]) | uint(src[s-1])<<8
+ case x == 62:
+ s += 4
+ if s > len(src) {
+ return nil, ErrCorrupt
+ }
+ x = uint(src[s-3]) | uint(src[s-2])<<8 | uint(src[s-1])<<16
+ case x == 63:
+ s += 5
+ if s > len(src) {
+ return nil, ErrCorrupt
+ }
+ x = uint(src[s-4]) | uint(src[s-3])<<8 | uint(src[s-2])<<16 | uint(src[s-1])<<24
+ }
+ length = int(x + 1)
+ if length <= 0 {
+ return nil, errors.New("snappy: unsupported literal length")
+ }
+ if length > len(dst)-d || length > len(src)-s {
+ return nil, ErrCorrupt
+ }
+ copy(dst[d:], src[s:s+length])
+ d += length
+ s += length
+ continue
+
+ case tagCopy1:
+ s += 2
+ if s > len(src) {
+ return nil, ErrCorrupt
+ }
+ length = 4 + int(src[s-2])>>2&0x7
+ offset = int(src[s-2])&0xe0<<3 | int(src[s-1])
+
+ case tagCopy2:
+ s += 3
+ if s > len(src) {
+ return nil, ErrCorrupt
+ }
+ length = 1 + int(src[s-3])>>2
+ offset = int(src[s-2]) | int(src[s-1])<<8
+
+ case tagCopy4:
+ return nil, errors.New("snappy: unsupported COPY_4 tag")
+ }
+
+ end := d + length
+ if offset > d || end > len(dst) {
+ return nil, ErrCorrupt
+ }
+ for ; d < end; d++ {
+ dst[d] = dst[d-offset]
+ }
+ }
+ if d != dLen {
+ return nil, ErrCorrupt
+ }
+ return dst[:d], nil
+}
+
+// NewReader returns a new Reader that decompresses from r, using the framing
+// format described at
+// https://github.com/google/snappy/blob/master/framing_format.txt
+func NewReader(r io.Reader) *Reader {
+ return &Reader{
+ r: r,
+ decoded: make([]byte, maxUncompressedChunkLen),
+ buf: make([]byte, MaxEncodedLen(maxUncompressedChunkLen)+checksumSize),
+ }
+}
+
+// Reader is an io.Reader than can read Snappy-compressed bytes.
+type Reader struct {
+ r io.Reader
+ err error
+ decoded []byte
+ buf []byte
+ // decoded[i:j] contains decoded bytes that have not yet been passed on.
+ i, j int
+ readHeader bool
+}
+
+// Reset discards any buffered data, resets all state, and switches the Snappy
+// reader to read from r. This permits reusing a Reader rather than allocating
+// a new one.
+func (r *Reader) Reset(reader io.Reader) {
+ r.r = reader
+ r.err = nil
+ r.i = 0
+ r.j = 0
+ r.readHeader = false
+}
+
+func (r *Reader) readFull(p []byte) (ok bool) {
+ if _, r.err = io.ReadFull(r.r, p); r.err != nil {
+ if r.err == io.ErrUnexpectedEOF {
+ r.err = ErrCorrupt
+ }
+ return false
+ }
+ return true
+}
+
+// Read satisfies the io.Reader interface.
+func (r *Reader) Read(p []byte) (int, error) {
+ if r.err != nil {
+ return 0, r.err
+ }
+ for {
+ if r.i < r.j {
+ n := copy(p, r.decoded[r.i:r.j])
+ r.i += n
+ return n, nil
+ }
+ if !r.readFull(r.buf[:4]) {
+ return 0, r.err
+ }
+ chunkType := r.buf[0]
+ if !r.readHeader {
+ if chunkType != chunkTypeStreamIdentifier {
+ r.err = ErrCorrupt
+ return 0, r.err
+ }
+ r.readHeader = true
+ }
+ chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16
+ if chunkLen > len(r.buf) {
+ r.err = ErrUnsupported
+ return 0, r.err
+ }
+
+ // The chunk types are specified at
+ // https://github.com/google/snappy/blob/master/framing_format.txt
+ switch chunkType {
+ case chunkTypeCompressedData:
+ // Section 4.2. Compressed data (chunk type 0x00).
+ if chunkLen < checksumSize {
+ r.err = ErrCorrupt
+ return 0, r.err
+ }
+ buf := r.buf[:chunkLen]
+ if !r.readFull(buf) {
+ return 0, r.err
+ }
+ checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24
+ buf = buf[checksumSize:]
+
+ n, err := DecodedLen(buf)
+ if err != nil {
+ r.err = err
+ return 0, r.err
+ }
+ if n > len(r.decoded) {
+ r.err = ErrCorrupt
+ return 0, r.err
+ }
+ if _, err := Decode(r.decoded, buf); err != nil {
+ r.err = err
+ return 0, r.err
+ }
+ if crc(r.decoded[:n]) != checksum {
+ r.err = ErrCorrupt
+ return 0, r.err
+ }
+ r.i, r.j = 0, n
+ continue
+
+ case chunkTypeUncompressedData:
+ // Section 4.3. Uncompressed data (chunk type 0x01).
+ if chunkLen < checksumSize {
+ r.err = ErrCorrupt
+ return 0, r.err
+ }
+ buf := r.buf[:checksumSize]
+ if !r.readFull(buf) {
+ return 0, r.err
+ }
+ checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24
+ // Read directly into r.decoded instead of via r.buf.
+ n := chunkLen - checksumSize
+ if !r.readFull(r.decoded[:n]) {
+ return 0, r.err
+ }
+ if crc(r.decoded[:n]) != checksum {
+ r.err = ErrCorrupt
+ return 0, r.err
+ }
+ r.i, r.j = 0, n
+ continue
+
+ case chunkTypeStreamIdentifier:
+ // Section 4.1. Stream identifier (chunk type 0xff).
+ if chunkLen != len(magicBody) {
+ r.err = ErrCorrupt
+ return 0, r.err
+ }
+ if !r.readFull(r.buf[:len(magicBody)]) {
+ return 0, r.err
+ }
+ for i := 0; i < len(magicBody); i++ {
+ if r.buf[i] != magicBody[i] {
+ r.err = ErrCorrupt
+ return 0, r.err
+ }
+ }
+ continue
+ }
+
+ if chunkType <= 0x7f {
+ // Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f).
+ r.err = ErrUnsupported
+ return 0, r.err
+ }
+ // Section 4.4 Padding (chunk type 0xfe).
+ // Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd).
+ if !r.readFull(r.buf[:chunkLen]) {
+ return 0, r.err
+ }
+ }
+}
diff --git a/vendor/github.com/golang/snappy/encode.go b/vendor/github.com/golang/snappy/encode.go
new file mode 100644
index 0000000000..f3b5484bc7
--- /dev/null
+++ b/vendor/github.com/golang/snappy/encode.go
@@ -0,0 +1,254 @@
+// Copyright 2011 The Snappy-Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package snappy
+
+import (
+ "encoding/binary"
+ "io"
+)
+
+// We limit how far copy back-references can go, the same as the C++ code.
+const maxOffset = 1 << 15
+
+// emitLiteral writes a literal chunk and returns the number of bytes written.
+func emitLiteral(dst, lit []byte) int {
+ i, n := 0, uint(len(lit)-1)
+ switch {
+ case n < 60:
+ dst[0] = uint8(n)<<2 | tagLiteral
+ i = 1
+ case n < 1<<8:
+ dst[0] = 60<<2 | tagLiteral
+ dst[1] = uint8(n)
+ i = 2
+ case n < 1<<16:
+ dst[0] = 61<<2 | tagLiteral
+ dst[1] = uint8(n)
+ dst[2] = uint8(n >> 8)
+ i = 3
+ case n < 1<<24:
+ dst[0] = 62<<2 | tagLiteral
+ dst[1] = uint8(n)
+ dst[2] = uint8(n >> 8)
+ dst[3] = uint8(n >> 16)
+ i = 4
+ case int64(n) < 1<<32:
+ dst[0] = 63<<2 | tagLiteral
+ dst[1] = uint8(n)
+ dst[2] = uint8(n >> 8)
+ dst[3] = uint8(n >> 16)
+ dst[4] = uint8(n >> 24)
+ i = 5
+ default:
+ panic("snappy: source buffer is too long")
+ }
+ if copy(dst[i:], lit) != len(lit) {
+ panic("snappy: destination buffer is too short")
+ }
+ return i + len(lit)
+}
+
+// emitCopy writes a copy chunk and returns the number of bytes written.
+func emitCopy(dst []byte, offset, length int) int {
+ i := 0
+ for length > 0 {
+ x := length - 4
+ if 0 <= x && x < 1<<3 && offset < 1<<11 {
+ dst[i+0] = uint8(offset>>8)&0x07<<5 | uint8(x)<<2 | tagCopy1
+ dst[i+1] = uint8(offset)
+ i += 2
+ break
+ }
+
+ x = length
+ if x > 1<<6 {
+ x = 1 << 6
+ }
+ dst[i+0] = uint8(x-1)<<2 | tagCopy2
+ dst[i+1] = uint8(offset)
+ dst[i+2] = uint8(offset >> 8)
+ i += 3
+ length -= x
+ }
+ return i
+}
+
+// Encode returns the encoded form of src. The returned slice may be a sub-
+// slice of dst if dst was large enough to hold the entire encoded block.
+// Otherwise, a newly allocated slice will be returned.
+// It is valid to pass a nil dst.
+func Encode(dst, src []byte) []byte {
+ if n := MaxEncodedLen(len(src)); len(dst) < n {
+ dst = make([]byte, n)
+ }
+
+ // The block starts with the varint-encoded length of the decompressed bytes.
+ d := binary.PutUvarint(dst, uint64(len(src)))
+
+ // Return early if src is short.
+ if len(src) <= 4 {
+ if len(src) != 0 {
+ d += emitLiteral(dst[d:], src)
+ }
+ return dst[:d]
+ }
+
+ // Initialize the hash table. Its size ranges from 1<<8 to 1<<14 inclusive.
+ const maxTableSize = 1 << 14
+ shift, tableSize := uint(32-8), 1<<8
+ for tableSize < maxTableSize && tableSize < len(src) {
+ shift--
+ tableSize *= 2
+ }
+ var table [maxTableSize]int
+
+ // Iterate over the source bytes.
+ var (
+ s int // The iterator position.
+ t int // The last position with the same hash as s.
+ lit int // The start position of any pending literal bytes.
+ )
+ for s+3 < len(src) {
+ // Update the hash table.
+ b0, b1, b2, b3 := src[s], src[s+1], src[s+2], src[s+3]
+ h := uint32(b0) | uint32(b1)<<8 | uint32(b2)<<16 | uint32(b3)<<24
+ p := &table[(h*0x1e35a7bd)>>shift]
+ // We need to to store values in [-1, inf) in table. To save
+ // some initialization time, (re)use the table's zero value
+ // and shift the values against this zero: add 1 on writes,
+ // subtract 1 on reads.
+ t, *p = *p-1, s+1
+ // If t is invalid or src[s:s+4] differs from src[t:t+4], accumulate a literal byte.
+ if t < 0 || s-t >= maxOffset || b0 != src[t] || b1 != src[t+1] || b2 != src[t+2] || b3 != src[t+3] {
+ s++
+ continue
+ }
+ // Otherwise, we have a match. First, emit any pending literal bytes.
+ if lit != s {
+ d += emitLiteral(dst[d:], src[lit:s])
+ }
+ // Extend the match to be as long as possible.
+ s0 := s
+ s, t = s+4, t+4
+ for s < len(src) && src[s] == src[t] {
+ s++
+ t++
+ }
+ // Emit the copied bytes.
+ d += emitCopy(dst[d:], s-t, s-s0)
+ lit = s
+ }
+
+ // Emit any final pending literal bytes and return.
+ if lit != len(src) {
+ d += emitLiteral(dst[d:], src[lit:])
+ }
+ return dst[:d]
+}
+
+// MaxEncodedLen returns the maximum length of a snappy block, given its
+// uncompressed length.
+func MaxEncodedLen(srcLen int) int {
+ // Compressed data can be defined as:
+ // compressed := item* literal*
+ // item := literal* copy
+ //
+ // The trailing literal sequence has a space blowup of at most 62/60
+ // since a literal of length 60 needs one tag byte + one extra byte
+ // for length information.
+ //
+ // Item blowup is trickier to measure. Suppose the "copy" op copies
+ // 4 bytes of data. Because of a special check in the encoding code,
+ // we produce a 4-byte copy only if the offset is < 65536. Therefore
+ // the copy op takes 3 bytes to encode, and this type of item leads
+ // to at most the 62/60 blowup for representing literals.
+ //
+ // Suppose the "copy" op copies 5 bytes of data. If the offset is big
+ // enough, it will take 5 bytes to encode the copy op. Therefore the
+ // worst case here is a one-byte literal followed by a five-byte copy.
+ // That is, 6 bytes of input turn into 7 bytes of "compressed" data.
+ //
+ // This last factor dominates the blowup, so the final estimate is:
+ return 32 + srcLen + srcLen/6
+}
+
+// NewWriter returns a new Writer that compresses to w, using the framing
+// format described at
+// https://github.com/google/snappy/blob/master/framing_format.txt
+func NewWriter(w io.Writer) *Writer {
+ return &Writer{
+ w: w,
+ enc: make([]byte, MaxEncodedLen(maxUncompressedChunkLen)),
+ }
+}
+
+// Writer is an io.Writer than can write Snappy-compressed bytes.
+type Writer struct {
+ w io.Writer
+ err error
+ enc []byte
+ buf [checksumSize + chunkHeaderSize]byte
+ wroteHeader bool
+}
+
+// Reset discards the writer's state and switches the Snappy writer to write to
+// w. This permits reusing a Writer rather than allocating a new one.
+func (w *Writer) Reset(writer io.Writer) {
+ w.w = writer
+ w.err = nil
+ w.wroteHeader = false
+}
+
+// Write satisfies the io.Writer interface.
+func (w *Writer) Write(p []byte) (n int, errRet error) {
+ if w.err != nil {
+ return 0, w.err
+ }
+ if !w.wroteHeader {
+ copy(w.enc, magicChunk)
+ if _, err := w.w.Write(w.enc[:len(magicChunk)]); err != nil {
+ w.err = err
+ return n, err
+ }
+ w.wroteHeader = true
+ }
+ for len(p) > 0 {
+ var uncompressed []byte
+ if len(p) > maxUncompressedChunkLen {
+ uncompressed, p = p[:maxUncompressedChunkLen], p[maxUncompressedChunkLen:]
+ } else {
+ uncompressed, p = p, nil
+ }
+ checksum := crc(uncompressed)
+
+ // Compress the buffer, discarding the result if the improvement
+ // isn't at least 12.5%.
+ chunkType := uint8(chunkTypeCompressedData)
+ chunkBody := Encode(w.enc, uncompressed)
+ if len(chunkBody) >= len(uncompressed)-len(uncompressed)/8 {
+ chunkType, chunkBody = chunkTypeUncompressedData, uncompressed
+ }
+
+ chunkLen := 4 + len(chunkBody)
+ w.buf[0] = chunkType
+ w.buf[1] = uint8(chunkLen >> 0)
+ w.buf[2] = uint8(chunkLen >> 8)
+ w.buf[3] = uint8(chunkLen >> 16)
+ w.buf[4] = uint8(checksum >> 0)
+ w.buf[5] = uint8(checksum >> 8)
+ w.buf[6] = uint8(checksum >> 16)
+ w.buf[7] = uint8(checksum >> 24)
+ if _, err := w.w.Write(w.buf[:]); err != nil {
+ w.err = err
+ return n, err
+ }
+ if _, err := w.w.Write(chunkBody); err != nil {
+ w.err = err
+ return n, err
+ }
+ n += len(uncompressed)
+ }
+ return n, nil
+}
diff --git a/vendor/github.com/golang/snappy/snappy.go b/vendor/github.com/golang/snappy/snappy.go
new file mode 100644
index 0000000000..15af18d0a5
--- /dev/null
+++ b/vendor/github.com/golang/snappy/snappy.go
@@ -0,0 +1,68 @@
+// Copyright 2011 The Snappy-Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package snappy implements the snappy block-based compression format.
+// It aims for very high speeds and reasonable compression.
+//
+// The C++ snappy implementation is at https://github.com/google/snappy
+package snappy // import "github.com/golang/snappy"
+
+import (
+ "hash/crc32"
+)
+
+/*
+Each encoded block begins with the varint-encoded length of the decoded data,
+followed by a sequence of chunks. Chunks begin and end on byte boundaries. The
+first byte of each chunk is broken into its 2 least and 6 most significant bits
+called l and m: l ranges in [0, 4) and m ranges in [0, 64). l is the chunk tag.
+Zero means a literal tag. All other values mean a copy tag.
+
+For literal tags:
+ - If m < 60, the next 1 + m bytes are literal bytes.
+ - Otherwise, let n be the little-endian unsigned integer denoted by the next
+ m - 59 bytes. The next 1 + n bytes after that are literal bytes.
+
+For copy tags, length bytes are copied from offset bytes ago, in the style of
+Lempel-Ziv compression algorithms. In particular:
+ - For l == 1, the offset ranges in [0, 1<<11) and the length in [4, 12).
+ The length is 4 + the low 3 bits of m. The high 3 bits of m form bits 8-10
+ of the offset. The next byte is bits 0-7 of the offset.
+ - For l == 2, the offset ranges in [0, 1<<16) and the length in [1, 65).
+ The length is 1 + m. The offset is the little-endian unsigned integer
+ denoted by the next 2 bytes.
+ - For l == 3, this tag is a legacy format that is no longer supported.
+*/
+const (
+ tagLiteral = 0x00
+ tagCopy1 = 0x01
+ tagCopy2 = 0x02
+ tagCopy4 = 0x03
+)
+
+const (
+ checksumSize = 4
+ chunkHeaderSize = 4
+ magicChunk = "\xff\x06\x00\x00" + magicBody
+ magicBody = "sNaPpY"
+ // https://github.com/google/snappy/blob/master/framing_format.txt says
+ // that "the uncompressed data in a chunk must be no longer than 65536 bytes".
+ maxUncompressedChunkLen = 65536
+)
+
+const (
+ chunkTypeCompressedData = 0x00
+ chunkTypeUncompressedData = 0x01
+ chunkTypePadding = 0xfe
+ chunkTypeStreamIdentifier = 0xff
+)
+
+var crcTable = crc32.MakeTable(crc32.Castagnoli)
+
+// crc implements the checksum specified in section 3 of
+// https://github.com/google/snappy/blob/master/framing_format.txt
+func crc(b []byte) uint32 {
+ c := crc32.Update(0, crcTable, b)
+ return uint32(c>>15|c<<17) + 0xa282ead8
+}
diff --git a/vendor/github.com/hashicorp/consul/api/README.md b/vendor/github.com/hashicorp/consul/api/README.md
new file mode 100644
index 0000000000..bce2ebb516
--- /dev/null
+++ b/vendor/github.com/hashicorp/consul/api/README.md
@@ -0,0 +1,39 @@
+Consul API client
+=================
+
+This package provides the `api` package which attempts to
+provide programmatic access to the full Consul API.
+
+Currently, all of the Consul APIs included in version 0.3 are supported.
+
+Documentation
+=============
+
+The full documentation is available on [Godoc](http://godoc.org/github.com/hashicorp/consul/api)
+
+Usage
+=====
+
+Below is an example of using the Consul client:
+
+```go
+// Get a new client, with KV endpoints
+client, _ := api.NewClient(api.DefaultConfig())
+kv := client.KV()
+
+// PUT a new KV pair
+p := &api.KVPair{Key: "foo", Value: []byte("test")}
+_, err := kv.Put(p, nil)
+if err != nil {
+ panic(err)
+}
+
+// Lookup the pair
+pair, _, err := kv.Get("foo", nil)
+if err != nil {
+ panic(err)
+}
+fmt.Printf("KV: %v", pair)
+
+```
+
diff --git a/vendor/github.com/hashicorp/consul/api/acl.go b/vendor/github.com/hashicorp/consul/api/acl.go
new file mode 100644
index 0000000000..c3fb0d53aa
--- /dev/null
+++ b/vendor/github.com/hashicorp/consul/api/acl.go
@@ -0,0 +1,140 @@
+package api
+
+const (
+ // ACLCLientType is the client type token
+ ACLClientType = "client"
+
+ // ACLManagementType is the management type token
+ ACLManagementType = "management"
+)
+
+// ACLEntry is used to represent an ACL entry
+type ACLEntry struct {
+ CreateIndex uint64
+ ModifyIndex uint64
+ ID string
+ Name string
+ Type string
+ Rules string
+}
+
+// ACL can be used to query the ACL endpoints
+type ACL struct {
+ c *Client
+}
+
+// ACL returns a handle to the ACL endpoints
+func (c *Client) ACL() *ACL {
+ return &ACL{c}
+}
+
+// Create is used to generate a new token with the given parameters
+func (a *ACL) Create(acl *ACLEntry, q *WriteOptions) (string, *WriteMeta, error) {
+ r := a.c.newRequest("PUT", "/v1/acl/create")
+ r.setWriteOptions(q)
+ r.obj = acl
+ rtt, resp, err := requireOK(a.c.doRequest(r))
+ if err != nil {
+ return "", nil, err
+ }
+ defer resp.Body.Close()
+
+ wm := &WriteMeta{RequestTime: rtt}
+ var out struct{ ID string }
+ if err := decodeBody(resp, &out); err != nil {
+ return "", nil, err
+ }
+ return out.ID, wm, nil
+}
+
+// Update is used to update the rules of an existing token
+func (a *ACL) Update(acl *ACLEntry, q *WriteOptions) (*WriteMeta, error) {
+ r := a.c.newRequest("PUT", "/v1/acl/update")
+ r.setWriteOptions(q)
+ r.obj = acl
+ rtt, resp, err := requireOK(a.c.doRequest(r))
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ wm := &WriteMeta{RequestTime: rtt}
+ return wm, nil
+}
+
+// Destroy is used to destroy a given ACL token ID
+func (a *ACL) Destroy(id string, q *WriteOptions) (*WriteMeta, error) {
+ r := a.c.newRequest("PUT", "/v1/acl/destroy/"+id)
+ r.setWriteOptions(q)
+ rtt, resp, err := requireOK(a.c.doRequest(r))
+ if err != nil {
+ return nil, err
+ }
+ resp.Body.Close()
+
+ wm := &WriteMeta{RequestTime: rtt}
+ return wm, nil
+}
+
+// Clone is used to return a new token cloned from an existing one
+func (a *ACL) Clone(id string, q *WriteOptions) (string, *WriteMeta, error) {
+ r := a.c.newRequest("PUT", "/v1/acl/clone/"+id)
+ r.setWriteOptions(q)
+ rtt, resp, err := requireOK(a.c.doRequest(r))
+ if err != nil {
+ return "", nil, err
+ }
+ defer resp.Body.Close()
+
+ wm := &WriteMeta{RequestTime: rtt}
+ var out struct{ ID string }
+ if err := decodeBody(resp, &out); err != nil {
+ return "", nil, err
+ }
+ return out.ID, wm, nil
+}
+
+// Info is used to query for information about an ACL token
+func (a *ACL) Info(id string, q *QueryOptions) (*ACLEntry, *QueryMeta, error) {
+ r := a.c.newRequest("GET", "/v1/acl/info/"+id)
+ r.setQueryOptions(q)
+ rtt, resp, err := requireOK(a.c.doRequest(r))
+ if err != nil {
+ return nil, nil, err
+ }
+ defer resp.Body.Close()
+
+ qm := &QueryMeta{}
+ parseQueryMeta(resp, qm)
+ qm.RequestTime = rtt
+
+ var entries []*ACLEntry
+ if err := decodeBody(resp, &entries); err != nil {
+ return nil, nil, err
+ }
+ if len(entries) > 0 {
+ return entries[0], qm, nil
+ }
+ return nil, qm, nil
+}
+
+// List is used to get all the ACL tokens
+func (a *ACL) List(q *QueryOptions) ([]*ACLEntry, *QueryMeta, error) {
+ r := a.c.newRequest("GET", "/v1/acl/list")
+ r.setQueryOptions(q)
+ rtt, resp, err := requireOK(a.c.doRequest(r))
+ if err != nil {
+ return nil, nil, err
+ }
+ defer resp.Body.Close()
+
+ qm := &QueryMeta{}
+ parseQueryMeta(resp, qm)
+ qm.RequestTime = rtt
+
+ var entries []*ACLEntry
+ if err := decodeBody(resp, &entries); err != nil {
+ return nil, nil, err
+ }
+ return entries, qm, nil
+}
diff --git a/vendor/github.com/hashicorp/consul/api/agent.go b/vendor/github.com/hashicorp/consul/api/agent.go
new file mode 100644
index 0000000000..2b950d0a3e
--- /dev/null
+++ b/vendor/github.com/hashicorp/consul/api/agent.go
@@ -0,0 +1,335 @@
+package api
+
+import (
+ "fmt"
+)
+
+// AgentCheck represents a check known to the agent
+type AgentCheck struct {
+ Node string
+ CheckID string
+ Name string
+ Status string
+ Notes string
+ Output string
+ ServiceID string
+ ServiceName string
+}
+
+// AgentService represents a service known to the agent
+type AgentService struct {
+ ID string
+ Service string
+ Tags []string
+ Port int
+ Address string
+}
+
+// AgentMember represents a cluster member known to the agent
+type AgentMember struct {
+ Name string
+ Addr string
+ Port uint16
+ Tags map[string]string
+ Status int
+ ProtocolMin uint8
+ ProtocolMax uint8
+ ProtocolCur uint8
+ DelegateMin uint8
+ DelegateMax uint8
+ DelegateCur uint8
+}
+
+// AgentServiceRegistration is used to register a new service
+type AgentServiceRegistration struct {
+ ID string `json:",omitempty"`
+ Name string `json:",omitempty"`
+ Tags []string `json:",omitempty"`
+ Port int `json:",omitempty"`
+ Address string `json:",omitempty"`
+ Check *AgentServiceCheck
+ Checks AgentServiceChecks
+}
+
+// AgentCheckRegistration is used to register a new check
+type AgentCheckRegistration struct {
+ ID string `json:",omitempty"`
+ Name string `json:",omitempty"`
+ Notes string `json:",omitempty"`
+ ServiceID string `json:",omitempty"`
+ AgentServiceCheck
+}
+
+// AgentServiceCheck is used to create an associated
+// check for a service
+type AgentServiceCheck struct {
+ Script string `json:",omitempty"`
+ Interval string `json:",omitempty"`
+ Timeout string `json:",omitempty"`
+ TTL string `json:",omitempty"`
+ HTTP string `json:",omitempty"`
+ TCP string `json:",omitempty"`
+ Status string `json:",omitempty"`
+}
+type AgentServiceChecks []*AgentServiceCheck
+
+// Agent can be used to query the Agent endpoints
+type Agent struct {
+ c *Client
+
+ // cache the node name
+ nodeName string
+}
+
+// Agent returns a handle to the agent endpoints
+func (c *Client) Agent() *Agent {
+ return &Agent{c: c}
+}
+
+// Self is used to query the agent we are speaking to for
+// information about itself
+func (a *Agent) Self() (map[string]map[string]interface{}, error) {
+ r := a.c.newRequest("GET", "/v1/agent/self")
+ _, resp, err := requireOK(a.c.doRequest(r))
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ var out map[string]map[string]interface{}
+ if err := decodeBody(resp, &out); err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+// NodeName is used to get the node name of the agent
+func (a *Agent) NodeName() (string, error) {
+ if a.nodeName != "" {
+ return a.nodeName, nil
+ }
+ info, err := a.Self()
+ if err != nil {
+ return "", err
+ }
+ name := info["Config"]["NodeName"].(string)
+ a.nodeName = name
+ return name, nil
+}
+
+// Checks returns the locally registered checks
+func (a *Agent) Checks() (map[string]*AgentCheck, error) {
+ r := a.c.newRequest("GET", "/v1/agent/checks")
+ _, resp, err := requireOK(a.c.doRequest(r))
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ var out map[string]*AgentCheck
+ if err := decodeBody(resp, &out); err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+// Services returns the locally registered services
+func (a *Agent) Services() (map[string]*AgentService, error) {
+ r := a.c.newRequest("GET", "/v1/agent/services")
+ _, resp, err := requireOK(a.c.doRequest(r))
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ var out map[string]*AgentService
+ if err := decodeBody(resp, &out); err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+// Members returns the known gossip members. The WAN
+// flag can be used to query a server for WAN members.
+func (a *Agent) Members(wan bool) ([]*AgentMember, error) {
+ r := a.c.newRequest("GET", "/v1/agent/members")
+ if wan {
+ r.params.Set("wan", "1")
+ }
+ _, resp, err := requireOK(a.c.doRequest(r))
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ var out []*AgentMember
+ if err := decodeBody(resp, &out); err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+// ServiceRegister is used to register a new service with
+// the local agent
+func (a *Agent) ServiceRegister(service *AgentServiceRegistration) error {
+ r := a.c.newRequest("PUT", "/v1/agent/service/register")
+ r.obj = service
+ _, resp, err := requireOK(a.c.doRequest(r))
+ if err != nil {
+ return err
+ }
+ resp.Body.Close()
+ return nil
+}
+
+// ServiceDeregister is used to deregister a service with
+// the local agent
+func (a *Agent) ServiceDeregister(serviceID string) error {
+ r := a.c.newRequest("PUT", "/v1/agent/service/deregister/"+serviceID)
+ _, resp, err := requireOK(a.c.doRequest(r))
+ if err != nil {
+ return err
+ }
+ resp.Body.Close()
+ return nil
+}
+
+// PassTTL is used to set a TTL check to the passing state
+func (a *Agent) PassTTL(checkID, note string) error {
+ return a.UpdateTTL(checkID, note, "pass")
+}
+
+// WarnTTL is used to set a TTL check to the warning state
+func (a *Agent) WarnTTL(checkID, note string) error {
+ return a.UpdateTTL(checkID, note, "warn")
+}
+
+// FailTTL is used to set a TTL check to the failing state
+func (a *Agent) FailTTL(checkID, note string) error {
+ return a.UpdateTTL(checkID, note, "fail")
+}
+
+// UpdateTTL is used to update the TTL of a check
+func (a *Agent) UpdateTTL(checkID, note, status string) error {
+ switch status {
+ case "pass":
+ case "warn":
+ case "fail":
+ default:
+ return fmt.Errorf("Invalid status: %s", status)
+ }
+ endpoint := fmt.Sprintf("/v1/agent/check/%s/%s", status, checkID)
+ r := a.c.newRequest("PUT", endpoint)
+ r.params.Set("note", note)
+ _, resp, err := requireOK(a.c.doRequest(r))
+ if err != nil {
+ return err
+ }
+ resp.Body.Close()
+ return nil
+}
+
+// CheckRegister is used to register a new check with
+// the local agent
+func (a *Agent) CheckRegister(check *AgentCheckRegistration) error {
+ r := a.c.newRequest("PUT", "/v1/agent/check/register")
+ r.obj = check
+ _, resp, err := requireOK(a.c.doRequest(r))
+ if err != nil {
+ return err
+ }
+ resp.Body.Close()
+ return nil
+}
+
+// CheckDeregister is used to deregister a check with
+// the local agent
+func (a *Agent) CheckDeregister(checkID string) error {
+ r := a.c.newRequest("PUT", "/v1/agent/check/deregister/"+checkID)
+ _, resp, err := requireOK(a.c.doRequest(r))
+ if err != nil {
+ return err
+ }
+ resp.Body.Close()
+ return nil
+}
+
+// Join is used to instruct the agent to attempt a join to
+// another cluster member
+func (a *Agent) Join(addr string, wan bool) error {
+ r := a.c.newRequest("PUT", "/v1/agent/join/"+addr)
+ if wan {
+ r.params.Set("wan", "1")
+ }
+ _, resp, err := requireOK(a.c.doRequest(r))
+ if err != nil {
+ return err
+ }
+ resp.Body.Close()
+ return nil
+}
+
+// ForceLeave is used to have the agent eject a failed node
+func (a *Agent) ForceLeave(node string) error {
+ r := a.c.newRequest("PUT", "/v1/agent/force-leave/"+node)
+ _, resp, err := requireOK(a.c.doRequest(r))
+ if err != nil {
+ return err
+ }
+ resp.Body.Close()
+ return nil
+}
+
+// EnableServiceMaintenance toggles service maintenance mode on
+// for the given service ID.
+func (a *Agent) EnableServiceMaintenance(serviceID, reason string) error {
+ r := a.c.newRequest("PUT", "/v1/agent/service/maintenance/"+serviceID)
+ r.params.Set("enable", "true")
+ r.params.Set("reason", reason)
+ _, resp, err := requireOK(a.c.doRequest(r))
+ if err != nil {
+ return err
+ }
+ resp.Body.Close()
+ return nil
+}
+
+// DisableServiceMaintenance toggles service maintenance mode off
+// for the given service ID.
+func (a *Agent) DisableServiceMaintenance(serviceID string) error {
+ r := a.c.newRequest("PUT", "/v1/agent/service/maintenance/"+serviceID)
+ r.params.Set("enable", "false")
+ _, resp, err := requireOK(a.c.doRequest(r))
+ if err != nil {
+ return err
+ }
+ resp.Body.Close()
+ return nil
+}
+
+// EnableNodeMaintenance toggles node maintenance mode on for the
+// agent we are connected to.
+func (a *Agent) EnableNodeMaintenance(reason string) error {
+ r := a.c.newRequest("PUT", "/v1/agent/maintenance")
+ r.params.Set("enable", "true")
+ r.params.Set("reason", reason)
+ _, resp, err := requireOK(a.c.doRequest(r))
+ if err != nil {
+ return err
+ }
+ resp.Body.Close()
+ return nil
+}
+
+// DisableNodeMaintenance toggles node maintenance mode off for the
+// agent we are connected to.
+func (a *Agent) DisableNodeMaintenance() error {
+ r := a.c.newRequest("PUT", "/v1/agent/maintenance")
+ r.params.Set("enable", "false")
+ _, resp, err := requireOK(a.c.doRequest(r))
+ if err != nil {
+ return err
+ }
+ resp.Body.Close()
+ return nil
+}
diff --git a/vendor/github.com/hashicorp/consul/api/api.go b/vendor/github.com/hashicorp/consul/api/api.go
new file mode 100644
index 0000000000..8fe2ead048
--- /dev/null
+++ b/vendor/github.com/hashicorp/consul/api/api.go
@@ -0,0 +1,442 @@
+package api
+
+import (
+ "bytes"
+ "crypto/tls"
+ "encoding/json"
+ "fmt"
+ "io"
+ "log"
+ "net"
+ "net/http"
+ "net/url"
+ "os"
+ "strconv"
+ "strings"
+ "time"
+)
+
+// QueryOptions are used to parameterize a query
+type QueryOptions struct {
+ // Providing a datacenter overwrites the DC provided
+ // by the Config
+ Datacenter string
+
+ // AllowStale allows any Consul server (non-leader) to service
+ // a read. This allows for lower latency and higher throughput
+ AllowStale bool
+
+ // RequireConsistent forces the read to be fully consistent.
+ // This is more expensive but prevents ever performing a stale
+ // read.
+ RequireConsistent bool
+
+ // WaitIndex is used to enable a blocking query. Waits
+ // until the timeout or the next index is reached
+ WaitIndex uint64
+
+ // WaitTime is used to bound the duration of a wait.
+ // Defaults to that of the Config, but can be overriden.
+ WaitTime time.Duration
+
+ // Token is used to provide a per-request ACL token
+ // which overrides the agent's default token.
+ Token string
+}
+
+// WriteOptions are used to parameterize a write
+type WriteOptions struct {
+ // Providing a datacenter overwrites the DC provided
+ // by the Config
+ Datacenter string
+
+ // Token is used to provide a per-request ACL token
+ // which overrides the agent's default token.
+ Token string
+}
+
+// QueryMeta is used to return meta data about a query
+type QueryMeta struct {
+ // LastIndex. This can be used as a WaitIndex to perform
+ // a blocking query
+ LastIndex uint64
+
+ // Time of last contact from the leader for the
+ // server servicing the request
+ LastContact time.Duration
+
+ // Is there a known leader
+ KnownLeader bool
+
+ // How long did the request take
+ RequestTime time.Duration
+}
+
+// WriteMeta is used to return meta data about a write
+type WriteMeta struct {
+ // How long did the request take
+ RequestTime time.Duration
+}
+
+// HttpBasicAuth is used to authenticate http client with HTTP Basic Authentication
+type HttpBasicAuth struct {
+ // Username to use for HTTP Basic Authentication
+ Username string
+
+ // Password to use for HTTP Basic Authentication
+ Password string
+}
+
+// Config is used to configure the creation of a client
+type Config struct {
+ // Address is the address of the Consul server
+ Address string
+
+ // Scheme is the URI scheme for the Consul server
+ Scheme string
+
+ // Datacenter to use. If not provided, the default agent datacenter is used.
+ Datacenter string
+
+ // HttpClient is the client to use. Default will be
+ // used if not provided.
+ HttpClient *http.Client
+
+ // HttpAuth is the auth info to use for http access.
+ HttpAuth *HttpBasicAuth
+
+ // WaitTime limits how long a Watch will block. If not provided,
+ // the agent default values will be used.
+ WaitTime time.Duration
+
+ // Token is used to provide a per-request ACL token
+ // which overrides the agent's default token.
+ Token string
+}
+
+// DefaultConfig returns a default configuration for the client
+func DefaultConfig() *Config {
+ config := &Config{
+ Address: "127.0.0.1:8500",
+ Scheme: "http",
+ HttpClient: http.DefaultClient,
+ }
+
+ if addr := os.Getenv("CONSUL_HTTP_ADDR"); addr != "" {
+ config.Address = addr
+ }
+
+ if token := os.Getenv("CONSUL_HTTP_TOKEN"); token != "" {
+ config.Token = token
+ }
+
+ if auth := os.Getenv("CONSUL_HTTP_AUTH"); auth != "" {
+ var username, password string
+ if strings.Contains(auth, ":") {
+ split := strings.SplitN(auth, ":", 2)
+ username = split[0]
+ password = split[1]
+ } else {
+ username = auth
+ }
+
+ config.HttpAuth = &HttpBasicAuth{
+ Username: username,
+ Password: password,
+ }
+ }
+
+ if ssl := os.Getenv("CONSUL_HTTP_SSL"); ssl != "" {
+ enabled, err := strconv.ParseBool(ssl)
+ if err != nil {
+ log.Printf("[WARN] client: could not parse CONSUL_HTTP_SSL: %s", err)
+ }
+
+ if enabled {
+ config.Scheme = "https"
+ }
+ }
+
+ if verify := os.Getenv("CONSUL_HTTP_SSL_VERIFY"); verify != "" {
+ doVerify, err := strconv.ParseBool(verify)
+ if err != nil {
+ log.Printf("[WARN] client: could not parse CONSUL_HTTP_SSL_VERIFY: %s", err)
+ }
+
+ if !doVerify {
+ config.HttpClient.Transport = &http.Transport{
+ TLSClientConfig: &tls.Config{
+ InsecureSkipVerify: true,
+ },
+ }
+ }
+ }
+
+ return config
+}
+
+// Client provides a client to the Consul API
+type Client struct {
+ config Config
+}
+
+// NewClient returns a new client
+func NewClient(config *Config) (*Client, error) {
+ // bootstrap the config
+ defConfig := DefaultConfig()
+
+ if len(config.Address) == 0 {
+ config.Address = defConfig.Address
+ }
+
+ if len(config.Scheme) == 0 {
+ config.Scheme = defConfig.Scheme
+ }
+
+ if config.HttpClient == nil {
+ config.HttpClient = defConfig.HttpClient
+ }
+
+ if parts := strings.SplitN(config.Address, "unix://", 2); len(parts) == 2 {
+ config.HttpClient = &http.Client{
+ Transport: &http.Transport{
+ Dial: func(_, _ string) (net.Conn, error) {
+ return net.Dial("unix", parts[1])
+ },
+ },
+ }
+ config.Address = parts[1]
+ }
+
+ client := &Client{
+ config: *config,
+ }
+ return client, nil
+}
+
+// request is used to help build up a request
+type request struct {
+ config *Config
+ method string
+ url *url.URL
+ params url.Values
+ body io.Reader
+ obj interface{}
+}
+
+// setQueryOptions is used to annotate the request with
+// additional query options
+func (r *request) setQueryOptions(q *QueryOptions) {
+ if q == nil {
+ return
+ }
+ if q.Datacenter != "" {
+ r.params.Set("dc", q.Datacenter)
+ }
+ if q.AllowStale {
+ r.params.Set("stale", "")
+ }
+ if q.RequireConsistent {
+ r.params.Set("consistent", "")
+ }
+ if q.WaitIndex != 0 {
+ r.params.Set("index", strconv.FormatUint(q.WaitIndex, 10))
+ }
+ if q.WaitTime != 0 {
+ r.params.Set("wait", durToMsec(q.WaitTime))
+ }
+ if q.Token != "" {
+ r.params.Set("token", q.Token)
+ }
+}
+
+// durToMsec converts a duration to a millisecond specified string
+func durToMsec(dur time.Duration) string {
+ return fmt.Sprintf("%dms", dur/time.Millisecond)
+}
+
+// setWriteOptions is used to annotate the request with
+// additional write options
+func (r *request) setWriteOptions(q *WriteOptions) {
+ if q == nil {
+ return
+ }
+ if q.Datacenter != "" {
+ r.params.Set("dc", q.Datacenter)
+ }
+ if q.Token != "" {
+ r.params.Set("token", q.Token)
+ }
+}
+
+// toHTTP converts the request to an HTTP request
+func (r *request) toHTTP() (*http.Request, error) {
+ // Encode the query parameters
+ r.url.RawQuery = r.params.Encode()
+
+ // Check if we should encode the body
+ if r.body == nil && r.obj != nil {
+ if b, err := encodeBody(r.obj); err != nil {
+ return nil, err
+ } else {
+ r.body = b
+ }
+ }
+
+ // Create the HTTP request
+ req, err := http.NewRequest(r.method, r.url.RequestURI(), r.body)
+ if err != nil {
+ return nil, err
+ }
+
+ req.URL.Host = r.url.Host
+ req.URL.Scheme = r.url.Scheme
+ req.Host = r.url.Host
+
+ // Setup auth
+ if r.config.HttpAuth != nil {
+ req.SetBasicAuth(r.config.HttpAuth.Username, r.config.HttpAuth.Password)
+ }
+
+ return req, nil
+}
+
+// newRequest is used to create a new request
+func (c *Client) newRequest(method, path string) *request {
+ r := &request{
+ config: &c.config,
+ method: method,
+ url: &url.URL{
+ Scheme: c.config.Scheme,
+ Host: c.config.Address,
+ Path: path,
+ },
+ params: make(map[string][]string),
+ }
+ if c.config.Datacenter != "" {
+ r.params.Set("dc", c.config.Datacenter)
+ }
+ if c.config.WaitTime != 0 {
+ r.params.Set("wait", durToMsec(r.config.WaitTime))
+ }
+ if c.config.Token != "" {
+ r.params.Set("token", r.config.Token)
+ }
+ return r
+}
+
+// doRequest runs a request with our client
+func (c *Client) doRequest(r *request) (time.Duration, *http.Response, error) {
+ req, err := r.toHTTP()
+ if err != nil {
+ return 0, nil, err
+ }
+ start := time.Now()
+ resp, err := c.config.HttpClient.Do(req)
+ diff := time.Now().Sub(start)
+ return diff, resp, err
+}
+
+// Query is used to do a GET request against an endpoint
+// and deserialize the response into an interface using
+// standard Consul conventions.
+func (c *Client) query(endpoint string, out interface{}, q *QueryOptions) (*QueryMeta, error) {
+ r := c.newRequest("GET", endpoint)
+ r.setQueryOptions(q)
+ rtt, resp, err := requireOK(c.doRequest(r))
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ qm := &QueryMeta{}
+ parseQueryMeta(resp, qm)
+ qm.RequestTime = rtt
+
+ if err := decodeBody(resp, out); err != nil {
+ return nil, err
+ }
+ return qm, nil
+}
+
+// write is used to do a PUT request against an endpoint
+// and serialize/deserialized using the standard Consul conventions.
+func (c *Client) write(endpoint string, in, out interface{}, q *WriteOptions) (*WriteMeta, error) {
+ r := c.newRequest("PUT", endpoint)
+ r.setWriteOptions(q)
+ r.obj = in
+ rtt, resp, err := requireOK(c.doRequest(r))
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ wm := &WriteMeta{RequestTime: rtt}
+ if out != nil {
+ if err := decodeBody(resp, &out); err != nil {
+ return nil, err
+ }
+ }
+ return wm, nil
+}
+
+// parseQueryMeta is used to help parse query meta-data
+func parseQueryMeta(resp *http.Response, q *QueryMeta) error {
+ header := resp.Header
+
+ // Parse the X-Consul-Index
+ index, err := strconv.ParseUint(header.Get("X-Consul-Index"), 10, 64)
+ if err != nil {
+ return fmt.Errorf("Failed to parse X-Consul-Index: %v", err)
+ }
+ q.LastIndex = index
+
+ // Parse the X-Consul-LastContact
+ last, err := strconv.ParseUint(header.Get("X-Consul-LastContact"), 10, 64)
+ if err != nil {
+ return fmt.Errorf("Failed to parse X-Consul-LastContact: %v", err)
+ }
+ q.LastContact = time.Duration(last) * time.Millisecond
+
+ // Parse the X-Consul-KnownLeader
+ switch header.Get("X-Consul-KnownLeader") {
+ case "true":
+ q.KnownLeader = true
+ default:
+ q.KnownLeader = false
+ }
+ return nil
+}
+
+// decodeBody is used to JSON decode a body
+func decodeBody(resp *http.Response, out interface{}) error {
+ dec := json.NewDecoder(resp.Body)
+ return dec.Decode(out)
+}
+
+// encodeBody is used to encode a request body
+func encodeBody(obj interface{}) (io.Reader, error) {
+ buf := bytes.NewBuffer(nil)
+ enc := json.NewEncoder(buf)
+ if err := enc.Encode(obj); err != nil {
+ return nil, err
+ }
+ return buf, nil
+}
+
+// requireOK is used to wrap doRequest and check for a 200
+func requireOK(d time.Duration, resp *http.Response, e error) (time.Duration, *http.Response, error) {
+ if e != nil {
+ if resp != nil {
+ resp.Body.Close()
+ }
+ return d, nil, e
+ }
+ if resp.StatusCode != 200 {
+ var buf bytes.Buffer
+ io.Copy(&buf, resp.Body)
+ resp.Body.Close()
+ return d, nil, fmt.Errorf("Unexpected response code: %d (%s)", resp.StatusCode, buf.Bytes())
+ }
+ return d, resp, nil
+}
diff --git a/vendor/github.com/hashicorp/consul/api/catalog.go b/vendor/github.com/hashicorp/consul/api/catalog.go
new file mode 100644
index 0000000000..cf64bd9091
--- /dev/null
+++ b/vendor/github.com/hashicorp/consul/api/catalog.go
@@ -0,0 +1,182 @@
+package api
+
+type Node struct {
+ Node string
+ Address string
+}
+
+type CatalogService struct {
+ Node string
+ Address string
+ ServiceID string
+ ServiceName string
+ ServiceAddress string
+ ServiceTags []string
+ ServicePort int
+}
+
+type CatalogNode struct {
+ Node *Node
+ Services map[string]*AgentService
+}
+
+type CatalogRegistration struct {
+ Node string
+ Address string
+ Datacenter string
+ Service *AgentService
+ Check *AgentCheck
+}
+
+type CatalogDeregistration struct {
+ Node string
+ Address string
+ Datacenter string
+ ServiceID string
+ CheckID string
+}
+
+// Catalog can be used to query the Catalog endpoints
+type Catalog struct {
+ c *Client
+}
+
+// Catalog returns a handle to the catalog endpoints
+func (c *Client) Catalog() *Catalog {
+ return &Catalog{c}
+}
+
+func (c *Catalog) Register(reg *CatalogRegistration, q *WriteOptions) (*WriteMeta, error) {
+ r := c.c.newRequest("PUT", "/v1/catalog/register")
+ r.setWriteOptions(q)
+ r.obj = reg
+ rtt, resp, err := requireOK(c.c.doRequest(r))
+ if err != nil {
+ return nil, err
+ }
+ resp.Body.Close()
+
+ wm := &WriteMeta{}
+ wm.RequestTime = rtt
+
+ return wm, nil
+}
+
+func (c *Catalog) Deregister(dereg *CatalogDeregistration, q *WriteOptions) (*WriteMeta, error) {
+ r := c.c.newRequest("PUT", "/v1/catalog/deregister")
+ r.setWriteOptions(q)
+ r.obj = dereg
+ rtt, resp, err := requireOK(c.c.doRequest(r))
+ if err != nil {
+ return nil, err
+ }
+ resp.Body.Close()
+
+ wm := &WriteMeta{}
+ wm.RequestTime = rtt
+
+ return wm, nil
+}
+
+// Datacenters is used to query for all the known datacenters
+func (c *Catalog) Datacenters() ([]string, error) {
+ r := c.c.newRequest("GET", "/v1/catalog/datacenters")
+ _, resp, err := requireOK(c.c.doRequest(r))
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ var out []string
+ if err := decodeBody(resp, &out); err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+// Nodes is used to query all the known nodes
+func (c *Catalog) Nodes(q *QueryOptions) ([]*Node, *QueryMeta, error) {
+ r := c.c.newRequest("GET", "/v1/catalog/nodes")
+ r.setQueryOptions(q)
+ rtt, resp, err := requireOK(c.c.doRequest(r))
+ if err != nil {
+ return nil, nil, err
+ }
+ defer resp.Body.Close()
+
+ qm := &QueryMeta{}
+ parseQueryMeta(resp, qm)
+ qm.RequestTime = rtt
+
+ var out []*Node
+ if err := decodeBody(resp, &out); err != nil {
+ return nil, nil, err
+ }
+ return out, qm, nil
+}
+
+// Services is used to query for all known services
+func (c *Catalog) Services(q *QueryOptions) (map[string][]string, *QueryMeta, error) {
+ r := c.c.newRequest("GET", "/v1/catalog/services")
+ r.setQueryOptions(q)
+ rtt, resp, err := requireOK(c.c.doRequest(r))
+ if err != nil {
+ return nil, nil, err
+ }
+ defer resp.Body.Close()
+
+ qm := &QueryMeta{}
+ parseQueryMeta(resp, qm)
+ qm.RequestTime = rtt
+
+ var out map[string][]string
+ if err := decodeBody(resp, &out); err != nil {
+ return nil, nil, err
+ }
+ return out, qm, nil
+}
+
+// Service is used to query catalog entries for a given service
+func (c *Catalog) Service(service, tag string, q *QueryOptions) ([]*CatalogService, *QueryMeta, error) {
+ r := c.c.newRequest("GET", "/v1/catalog/service/"+service)
+ r.setQueryOptions(q)
+ if tag != "" {
+ r.params.Set("tag", tag)
+ }
+ rtt, resp, err := requireOK(c.c.doRequest(r))
+ if err != nil {
+ return nil, nil, err
+ }
+ defer resp.Body.Close()
+
+ qm := &QueryMeta{}
+ parseQueryMeta(resp, qm)
+ qm.RequestTime = rtt
+
+ var out []*CatalogService
+ if err := decodeBody(resp, &out); err != nil {
+ return nil, nil, err
+ }
+ return out, qm, nil
+}
+
+// Node is used to query for service information about a single node
+func (c *Catalog) Node(node string, q *QueryOptions) (*CatalogNode, *QueryMeta, error) {
+ r := c.c.newRequest("GET", "/v1/catalog/node/"+node)
+ r.setQueryOptions(q)
+ rtt, resp, err := requireOK(c.c.doRequest(r))
+ if err != nil {
+ return nil, nil, err
+ }
+ defer resp.Body.Close()
+
+ qm := &QueryMeta{}
+ parseQueryMeta(resp, qm)
+ qm.RequestTime = rtt
+
+ var out *CatalogNode
+ if err := decodeBody(resp, &out); err != nil {
+ return nil, nil, err
+ }
+ return out, qm, nil
+}
diff --git a/vendor/github.com/hashicorp/consul/api/event.go b/vendor/github.com/hashicorp/consul/api/event.go
new file mode 100644
index 0000000000..85b5b069b0
--- /dev/null
+++ b/vendor/github.com/hashicorp/consul/api/event.go
@@ -0,0 +1,104 @@
+package api
+
+import (
+ "bytes"
+ "strconv"
+)
+
+// Event can be used to query the Event endpoints
+type Event struct {
+ c *Client
+}
+
+// UserEvent represents an event that was fired by the user
+type UserEvent struct {
+ ID string
+ Name string
+ Payload []byte
+ NodeFilter string
+ ServiceFilter string
+ TagFilter string
+ Version int
+ LTime uint64
+}
+
+// Event returns a handle to the event endpoints
+func (c *Client) Event() *Event {
+ return &Event{c}
+}
+
+// Fire is used to fire a new user event. Only the Name, Payload and Filters
+// are respected. This returns the ID or an associated error. Cross DC requests
+// are supported.
+func (e *Event) Fire(params *UserEvent, q *WriteOptions) (string, *WriteMeta, error) {
+ r := e.c.newRequest("PUT", "/v1/event/fire/"+params.Name)
+ r.setWriteOptions(q)
+ if params.NodeFilter != "" {
+ r.params.Set("node", params.NodeFilter)
+ }
+ if params.ServiceFilter != "" {
+ r.params.Set("service", params.ServiceFilter)
+ }
+ if params.TagFilter != "" {
+ r.params.Set("tag", params.TagFilter)
+ }
+ if params.Payload != nil {
+ r.body = bytes.NewReader(params.Payload)
+ }
+
+ rtt, resp, err := requireOK(e.c.doRequest(r))
+ if err != nil {
+ return "", nil, err
+ }
+ defer resp.Body.Close()
+
+ wm := &WriteMeta{RequestTime: rtt}
+ var out UserEvent
+ if err := decodeBody(resp, &out); err != nil {
+ return "", nil, err
+ }
+ return out.ID, wm, nil
+}
+
+// List is used to get the most recent events an agent has received.
+// This list can be optionally filtered by the name. This endpoint supports
+// quasi-blocking queries. The index is not monotonic, nor does it provide provide
+// LastContact or KnownLeader.
+func (e *Event) List(name string, q *QueryOptions) ([]*UserEvent, *QueryMeta, error) {
+ r := e.c.newRequest("GET", "/v1/event/list")
+ r.setQueryOptions(q)
+ if name != "" {
+ r.params.Set("name", name)
+ }
+ rtt, resp, err := requireOK(e.c.doRequest(r))
+ if err != nil {
+ return nil, nil, err
+ }
+ defer resp.Body.Close()
+
+ qm := &QueryMeta{}
+ parseQueryMeta(resp, qm)
+ qm.RequestTime = rtt
+
+ var entries []*UserEvent
+ if err := decodeBody(resp, &entries); err != nil {
+ return nil, nil, err
+ }
+ return entries, qm, nil
+}
+
+// IDToIndex is a bit of a hack. This simulates the index generation to
+// convert an event ID into a WaitIndex.
+func (e *Event) IDToIndex(uuid string) uint64 {
+ lower := uuid[0:8] + uuid[9:13] + uuid[14:18]
+ upper := uuid[19:23] + uuid[24:36]
+ lowVal, err := strconv.ParseUint(lower, 16, 64)
+ if err != nil {
+ panic("Failed to convert " + lower)
+ }
+ highVal, err := strconv.ParseUint(upper, 16, 64)
+ if err != nil {
+ panic("Failed to convert " + upper)
+ }
+ return lowVal ^ highVal
+}
diff --git a/vendor/github.com/hashicorp/consul/api/health.go b/vendor/github.com/hashicorp/consul/api/health.go
new file mode 100644
index 0000000000..02b161e28e
--- /dev/null
+++ b/vendor/github.com/hashicorp/consul/api/health.go
@@ -0,0 +1,136 @@
+package api
+
+import (
+ "fmt"
+)
+
+// HealthCheck is used to represent a single check
+type HealthCheck struct {
+ Node string
+ CheckID string
+ Name string
+ Status string
+ Notes string
+ Output string
+ ServiceID string
+ ServiceName string
+}
+
+// ServiceEntry is used for the health service endpoint
+type ServiceEntry struct {
+ Node *Node
+ Service *AgentService
+ Checks []*HealthCheck
+}
+
+// Health can be used to query the Health endpoints
+type Health struct {
+ c *Client
+}
+
+// Health returns a handle to the health endpoints
+func (c *Client) Health() *Health {
+ return &Health{c}
+}
+
+// Node is used to query for checks belonging to a given node
+func (h *Health) Node(node string, q *QueryOptions) ([]*HealthCheck, *QueryMeta, error) {
+ r := h.c.newRequest("GET", "/v1/health/node/"+node)
+ r.setQueryOptions(q)
+ rtt, resp, err := requireOK(h.c.doRequest(r))
+ if err != nil {
+ return nil, nil, err
+ }
+ defer resp.Body.Close()
+
+ qm := &QueryMeta{}
+ parseQueryMeta(resp, qm)
+ qm.RequestTime = rtt
+
+ var out []*HealthCheck
+ if err := decodeBody(resp, &out); err != nil {
+ return nil, nil, err
+ }
+ return out, qm, nil
+}
+
+// Checks is used to return the checks associated with a service
+func (h *Health) Checks(service string, q *QueryOptions) ([]*HealthCheck, *QueryMeta, error) {
+ r := h.c.newRequest("GET", "/v1/health/checks/"+service)
+ r.setQueryOptions(q)
+ rtt, resp, err := requireOK(h.c.doRequest(r))
+ if err != nil {
+ return nil, nil, err
+ }
+ defer resp.Body.Close()
+
+ qm := &QueryMeta{}
+ parseQueryMeta(resp, qm)
+ qm.RequestTime = rtt
+
+ var out []*HealthCheck
+ if err := decodeBody(resp, &out); err != nil {
+ return nil, nil, err
+ }
+ return out, qm, nil
+}
+
+// Service is used to query health information along with service info
+// for a given service. It can optionally do server-side filtering on a tag
+// or nodes with passing health checks only.
+func (h *Health) Service(service, tag string, passingOnly bool, q *QueryOptions) ([]*ServiceEntry, *QueryMeta, error) {
+ r := h.c.newRequest("GET", "/v1/health/service/"+service)
+ r.setQueryOptions(q)
+ if tag != "" {
+ r.params.Set("tag", tag)
+ }
+ if passingOnly {
+ r.params.Set("passing", "1")
+ }
+ rtt, resp, err := requireOK(h.c.doRequest(r))
+ if err != nil {
+ return nil, nil, err
+ }
+ defer resp.Body.Close()
+
+ qm := &QueryMeta{}
+ parseQueryMeta(resp, qm)
+ qm.RequestTime = rtt
+
+ var out []*ServiceEntry
+ if err := decodeBody(resp, &out); err != nil {
+ return nil, nil, err
+ }
+ return out, qm, nil
+}
+
+// State is used to retreive all the checks in a given state.
+// The wildcard "any" state can also be used for all checks.
+func (h *Health) State(state string, q *QueryOptions) ([]*HealthCheck, *QueryMeta, error) {
+ switch state {
+ case "any":
+ case "warning":
+ case "critical":
+ case "passing":
+ case "unknown":
+ default:
+ return nil, nil, fmt.Errorf("Unsupported state: %v", state)
+ }
+ r := h.c.newRequest("GET", "/v1/health/state/"+state)
+ r.setQueryOptions(q)
+ rtt, resp, err := requireOK(h.c.doRequest(r))
+ if err != nil {
+ return nil, nil, err
+ }
+ defer resp.Body.Close()
+
+ qm := &QueryMeta{}
+ parseQueryMeta(resp, qm)
+ qm.RequestTime = rtt
+
+ var out []*HealthCheck
+ if err := decodeBody(resp, &out); err != nil {
+ return nil, nil, err
+ }
+ return out, qm, nil
+}
diff --git a/vendor/github.com/hashicorp/consul/api/kv.go b/vendor/github.com/hashicorp/consul/api/kv.go
new file mode 100644
index 0000000000..c1a8923bef
--- /dev/null
+++ b/vendor/github.com/hashicorp/consul/api/kv.go
@@ -0,0 +1,240 @@
+package api
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "net/http"
+ "strconv"
+ "strings"
+)
+
+// KVPair is used to represent a single K/V entry
+type KVPair struct {
+ Key string
+ CreateIndex uint64
+ ModifyIndex uint64
+ LockIndex uint64
+ Flags uint64
+ Value []byte
+ Session string
+}
+
+// KVPairs is a list of KVPair objects
+type KVPairs []*KVPair
+
+// KV is used to manipulate the K/V API
+type KV struct {
+ c *Client
+}
+
+// KV is used to return a handle to the K/V apis
+func (c *Client) KV() *KV {
+ return &KV{c}
+}
+
+// Get is used to lookup a single key
+func (k *KV) Get(key string, q *QueryOptions) (*KVPair, *QueryMeta, error) {
+ resp, qm, err := k.getInternal(key, nil, q)
+ if err != nil {
+ return nil, nil, err
+ }
+ if resp == nil {
+ return nil, qm, nil
+ }
+ defer resp.Body.Close()
+
+ var entries []*KVPair
+ if err := decodeBody(resp, &entries); err != nil {
+ return nil, nil, err
+ }
+ if len(entries) > 0 {
+ return entries[0], qm, nil
+ }
+ return nil, qm, nil
+}
+
+// List is used to lookup all keys under a prefix
+func (k *KV) List(prefix string, q *QueryOptions) (KVPairs, *QueryMeta, error) {
+ resp, qm, err := k.getInternal(prefix, map[string]string{"recurse": ""}, q)
+ if err != nil {
+ return nil, nil, err
+ }
+ if resp == nil {
+ return nil, qm, nil
+ }
+ defer resp.Body.Close()
+
+ var entries []*KVPair
+ if err := decodeBody(resp, &entries); err != nil {
+ return nil, nil, err
+ }
+ return entries, qm, nil
+}
+
+// Keys is used to list all the keys under a prefix. Optionally,
+// a separator can be used to limit the responses.
+func (k *KV) Keys(prefix, separator string, q *QueryOptions) ([]string, *QueryMeta, error) {
+ params := map[string]string{"keys": ""}
+ if separator != "" {
+ params["separator"] = separator
+ }
+ resp, qm, err := k.getInternal(prefix, params, q)
+ if err != nil {
+ return nil, nil, err
+ }
+ if resp == nil {
+ return nil, qm, nil
+ }
+ defer resp.Body.Close()
+
+ var entries []string
+ if err := decodeBody(resp, &entries); err != nil {
+ return nil, nil, err
+ }
+ return entries, qm, nil
+}
+
+func (k *KV) getInternal(key string, params map[string]string, q *QueryOptions) (*http.Response, *QueryMeta, error) {
+ r := k.c.newRequest("GET", "/v1/kv/"+key)
+ r.setQueryOptions(q)
+ for param, val := range params {
+ r.params.Set(param, val)
+ }
+ rtt, resp, err := k.c.doRequest(r)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ qm := &QueryMeta{}
+ parseQueryMeta(resp, qm)
+ qm.RequestTime = rtt
+
+ if resp.StatusCode == 404 {
+ resp.Body.Close()
+ return nil, qm, nil
+ } else if resp.StatusCode != 200 {
+ resp.Body.Close()
+ return nil, nil, fmt.Errorf("Unexpected response code: %d", resp.StatusCode)
+ }
+ return resp, qm, nil
+}
+
+// Put is used to write a new value. Only the
+// Key, Flags and Value is respected.
+func (k *KV) Put(p *KVPair, q *WriteOptions) (*WriteMeta, error) {
+ params := make(map[string]string, 1)
+ if p.Flags != 0 {
+ params["flags"] = strconv.FormatUint(p.Flags, 10)
+ }
+ _, wm, err := k.put(p.Key, params, p.Value, q)
+ return wm, err
+}
+
+// CAS is used for a Check-And-Set operation. The Key,
+// ModifyIndex, Flags and Value are respected. Returns true
+// on success or false on failures.
+func (k *KV) CAS(p *KVPair, q *WriteOptions) (bool, *WriteMeta, error) {
+ params := make(map[string]string, 2)
+ if p.Flags != 0 {
+ params["flags"] = strconv.FormatUint(p.Flags, 10)
+ }
+ params["cas"] = strconv.FormatUint(p.ModifyIndex, 10)
+ return k.put(p.Key, params, p.Value, q)
+}
+
+// Acquire is used for a lock acquisiiton operation. The Key,
+// Flags, Value and Session are respected. Returns true
+// on success or false on failures.
+func (k *KV) Acquire(p *KVPair, q *WriteOptions) (bool, *WriteMeta, error) {
+ params := make(map[string]string, 2)
+ if p.Flags != 0 {
+ params["flags"] = strconv.FormatUint(p.Flags, 10)
+ }
+ params["acquire"] = p.Session
+ return k.put(p.Key, params, p.Value, q)
+}
+
+// Release is used for a lock release operation. The Key,
+// Flags, Value and Session are respected. Returns true
+// on success or false on failures.
+func (k *KV) Release(p *KVPair, q *WriteOptions) (bool, *WriteMeta, error) {
+ params := make(map[string]string, 2)
+ if p.Flags != 0 {
+ params["flags"] = strconv.FormatUint(p.Flags, 10)
+ }
+ params["release"] = p.Session
+ return k.put(p.Key, params, p.Value, q)
+}
+
+func (k *KV) put(key string, params map[string]string, body []byte, q *WriteOptions) (bool, *WriteMeta, error) {
+ if len(key) > 0 && key[0] == '/' {
+ return false, nil, fmt.Errorf("Invalid key. Key must not begin with a '/': %s", key)
+ }
+
+ r := k.c.newRequest("PUT", "/v1/kv/"+key)
+ r.setWriteOptions(q)
+ for param, val := range params {
+ r.params.Set(param, val)
+ }
+ r.body = bytes.NewReader(body)
+ rtt, resp, err := requireOK(k.c.doRequest(r))
+ if err != nil {
+ return false, nil, err
+ }
+ defer resp.Body.Close()
+
+ qm := &WriteMeta{}
+ qm.RequestTime = rtt
+
+ var buf bytes.Buffer
+ if _, err := io.Copy(&buf, resp.Body); err != nil {
+ return false, nil, fmt.Errorf("Failed to read response: %v", err)
+ }
+ res := strings.Contains(string(buf.Bytes()), "true")
+ return res, qm, nil
+}
+
+// Delete is used to delete a single key
+func (k *KV) Delete(key string, w *WriteOptions) (*WriteMeta, error) {
+ _, qm, err := k.deleteInternal(key, nil, w)
+ return qm, err
+}
+
+// DeleteCAS is used for a Delete Check-And-Set operation. The Key
+// and ModifyIndex are respected. Returns true on success or false on failures.
+func (k *KV) DeleteCAS(p *KVPair, q *WriteOptions) (bool, *WriteMeta, error) {
+ params := map[string]string{
+ "cas": strconv.FormatUint(p.ModifyIndex, 10),
+ }
+ return k.deleteInternal(p.Key, params, q)
+}
+
+// DeleteTree is used to delete all keys under a prefix
+func (k *KV) DeleteTree(prefix string, w *WriteOptions) (*WriteMeta, error) {
+ _, qm, err := k.deleteInternal(prefix, map[string]string{"recurse": ""}, w)
+ return qm, err
+}
+
+func (k *KV) deleteInternal(key string, params map[string]string, q *WriteOptions) (bool, *WriteMeta, error) {
+ r := k.c.newRequest("DELETE", "/v1/kv/"+key)
+ r.setWriteOptions(q)
+ for param, val := range params {
+ r.params.Set(param, val)
+ }
+ rtt, resp, err := requireOK(k.c.doRequest(r))
+ if err != nil {
+ return false, nil, err
+ }
+ defer resp.Body.Close()
+
+ qm := &WriteMeta{}
+ qm.RequestTime = rtt
+
+ var buf bytes.Buffer
+ if _, err := io.Copy(&buf, resp.Body); err != nil {
+ return false, nil, fmt.Errorf("Failed to read response: %v", err)
+ }
+ res := strings.Contains(string(buf.Bytes()), "true")
+ return res, qm, nil
+}
diff --git a/vendor/github.com/hashicorp/consul/api/lock.go b/vendor/github.com/hashicorp/consul/api/lock.go
new file mode 100644
index 0000000000..a76685f04c
--- /dev/null
+++ b/vendor/github.com/hashicorp/consul/api/lock.go
@@ -0,0 +1,338 @@
+package api
+
+import (
+ "fmt"
+ "sync"
+ "time"
+)
+
+const (
+ // DefaultLockSessionName is the Session Name we assign if none is provided
+ DefaultLockSessionName = "Consul API Lock"
+
+ // DefaultLockSessionTTL is the default session TTL if no Session is provided
+ // when creating a new Lock. This is used because we do not have another
+ // other check to depend upon.
+ DefaultLockSessionTTL = "15s"
+
+ // DefaultLockWaitTime is how long we block for at a time to check if lock
+ // acquisition is possible. This affects the minimum time it takes to cancel
+ // a Lock acquisition.
+ DefaultLockWaitTime = 15 * time.Second
+
+ // DefaultLockRetryTime is how long we wait after a failed lock acquisition
+ // before attempting to do the lock again. This is so that once a lock-delay
+ // is in affect, we do not hot loop retrying the acquisition.
+ DefaultLockRetryTime = 5 * time.Second
+
+ // LockFlagValue is a magic flag we set to indicate a key
+ // is being used for a lock. It is used to detect a potential
+ // conflict with a semaphore.
+ LockFlagValue = 0x2ddccbc058a50c18
+)
+
+var (
+ // ErrLockHeld is returned if we attempt to double lock
+ ErrLockHeld = fmt.Errorf("Lock already held")
+
+ // ErrLockNotHeld is returned if we attempt to unlock a lock
+ // that we do not hold.
+ ErrLockNotHeld = fmt.Errorf("Lock not held")
+
+ // ErrLockInUse is returned if we attempt to destroy a lock
+ // that is in use.
+ ErrLockInUse = fmt.Errorf("Lock in use")
+
+ // ErrLockConflict is returned if the flags on a key
+ // used for a lock do not match expectation
+ ErrLockConflict = fmt.Errorf("Existing key does not match lock use")
+)
+
+// Lock is used to implement client-side leader election. It is follows the
+// algorithm as described here: https://consul.io/docs/guides/leader-election.html.
+type Lock struct {
+ c *Client
+ opts *LockOptions
+
+ isHeld bool
+ sessionRenew chan struct{}
+ lockSession string
+ l sync.Mutex
+}
+
+// LockOptions is used to parameterize the Lock behavior.
+type LockOptions struct {
+ Key string // Must be set and have write permissions
+ Value []byte // Optional, value to associate with the lock
+ Session string // Optional, created if not specified
+ SessionName string // Optional, defaults to DefaultLockSessionName
+ SessionTTL string // Optional, defaults to DefaultLockSessionTTL
+}
+
+// LockKey returns a handle to a lock struct which can be used
+// to acquire and release the mutex. The key used must have
+// write permissions.
+func (c *Client) LockKey(key string) (*Lock, error) {
+ opts := &LockOptions{
+ Key: key,
+ }
+ return c.LockOpts(opts)
+}
+
+// LockOpts returns a handle to a lock struct which can be used
+// to acquire and release the mutex. The key used must have
+// write permissions.
+func (c *Client) LockOpts(opts *LockOptions) (*Lock, error) {
+ if opts.Key == "" {
+ return nil, fmt.Errorf("missing key")
+ }
+ if opts.SessionName == "" {
+ opts.SessionName = DefaultLockSessionName
+ }
+ if opts.SessionTTL == "" {
+ opts.SessionTTL = DefaultLockSessionTTL
+ } else {
+ if _, err := time.ParseDuration(opts.SessionTTL); err != nil {
+ return nil, fmt.Errorf("invalid SessionTTL: %v", err)
+ }
+ }
+ l := &Lock{
+ c: c,
+ opts: opts,
+ }
+ return l, nil
+}
+
+// Lock attempts to acquire the lock and blocks while doing so.
+// Providing a non-nil stopCh can be used to abort the lock attempt.
+// Returns a channel that is closed if our lock is lost or an error.
+// This channel could be closed at any time due to session invalidation,
+// communication errors, operator intervention, etc. It is NOT safe to
+// assume that the lock is held until Unlock() unless the Session is specifically
+// created without any associated health checks. By default Consul sessions
+// prefer liveness over safety and an application must be able to handle
+// the lock being lost.
+func (l *Lock) Lock(stopCh <-chan struct{}) (<-chan struct{}, error) {
+ // Hold the lock as we try to acquire
+ l.l.Lock()
+ defer l.l.Unlock()
+
+ // Check if we already hold the lock
+ if l.isHeld {
+ return nil, ErrLockHeld
+ }
+
+ // Check if we need to create a session first
+ l.lockSession = l.opts.Session
+ if l.lockSession == "" {
+ if s, err := l.createSession(); err != nil {
+ return nil, fmt.Errorf("failed to create session: %v", err)
+ } else {
+ l.sessionRenew = make(chan struct{})
+ l.lockSession = s
+ session := l.c.Session()
+ go session.RenewPeriodic(l.opts.SessionTTL, s, nil, l.sessionRenew)
+
+ // If we fail to acquire the lock, cleanup the session
+ defer func() {
+ if !l.isHeld {
+ close(l.sessionRenew)
+ l.sessionRenew = nil
+ }
+ }()
+ }
+ }
+
+ // Setup the query options
+ kv := l.c.KV()
+ qOpts := &QueryOptions{
+ WaitTime: DefaultLockWaitTime,
+ }
+
+WAIT:
+ // Check if we should quit
+ select {
+ case <-stopCh:
+ return nil, nil
+ default:
+ }
+
+ // Look for an existing lock, blocking until not taken
+ pair, meta, err := kv.Get(l.opts.Key, qOpts)
+ if err != nil {
+ return nil, fmt.Errorf("failed to read lock: %v", err)
+ }
+ if pair != nil && pair.Flags != LockFlagValue {
+ return nil, ErrLockConflict
+ }
+ locked := false
+ if pair != nil && pair.Session == l.lockSession {
+ goto HELD
+ }
+ if pair != nil && pair.Session != "" {
+ qOpts.WaitIndex = meta.LastIndex
+ goto WAIT
+ }
+
+ // Try to acquire the lock
+ pair = l.lockEntry(l.lockSession)
+ locked, _, err = kv.Acquire(pair, nil)
+ if err != nil {
+ return nil, fmt.Errorf("failed to acquire lock: %v", err)
+ }
+
+ // Handle the case of not getting the lock
+ if !locked {
+ // Determine why the lock failed
+ qOpts.WaitIndex = 0
+ pair, meta, err = kv.Get(l.opts.Key, qOpts)
+ if pair != nil && pair.Session != "" {
+ //If the session is not null, this means that a wait can safely happen
+ //using a long poll
+ qOpts.WaitIndex = meta.LastIndex
+ goto WAIT
+ } else {
+ // If the session is empty and the lock failed to acquire, then it means
+ // a lock-delay is in effect and a timed wait must be used
+ select {
+ case <-time.After(DefaultLockRetryTime):
+ goto WAIT
+ case <-stopCh:
+ return nil, nil
+ }
+ }
+ }
+
+HELD:
+ // Watch to ensure we maintain leadership
+ leaderCh := make(chan struct{})
+ go l.monitorLock(l.lockSession, leaderCh)
+
+ // Set that we own the lock
+ l.isHeld = true
+
+ // Locked! All done
+ return leaderCh, nil
+}
+
+// Unlock released the lock. It is an error to call this
+// if the lock is not currently held.
+func (l *Lock) Unlock() error {
+ // Hold the lock as we try to release
+ l.l.Lock()
+ defer l.l.Unlock()
+
+ // Ensure the lock is actually held
+ if !l.isHeld {
+ return ErrLockNotHeld
+ }
+
+ // Set that we no longer own the lock
+ l.isHeld = false
+
+ // Stop the session renew
+ if l.sessionRenew != nil {
+ defer func() {
+ close(l.sessionRenew)
+ l.sessionRenew = nil
+ }()
+ }
+
+ // Get the lock entry, and clear the lock session
+ lockEnt := l.lockEntry(l.lockSession)
+ l.lockSession = ""
+
+ // Release the lock explicitly
+ kv := l.c.KV()
+ _, _, err := kv.Release(lockEnt, nil)
+ if err != nil {
+ return fmt.Errorf("failed to release lock: %v", err)
+ }
+ return nil
+}
+
+// Destroy is used to cleanup the lock entry. It is not necessary
+// to invoke. It will fail if the lock is in use.
+func (l *Lock) Destroy() error {
+ // Hold the lock as we try to release
+ l.l.Lock()
+ defer l.l.Unlock()
+
+ // Check if we already hold the lock
+ if l.isHeld {
+ return ErrLockHeld
+ }
+
+ // Look for an existing lock
+ kv := l.c.KV()
+ pair, _, err := kv.Get(l.opts.Key, nil)
+ if err != nil {
+ return fmt.Errorf("failed to read lock: %v", err)
+ }
+
+ // Nothing to do if the lock does not exist
+ if pair == nil {
+ return nil
+ }
+
+ // Check for possible flag conflict
+ if pair.Flags != LockFlagValue {
+ return ErrLockConflict
+ }
+
+ // Check if it is in use
+ if pair.Session != "" {
+ return ErrLockInUse
+ }
+
+ // Attempt the delete
+ didRemove, _, err := kv.DeleteCAS(pair, nil)
+ if err != nil {
+ return fmt.Errorf("failed to remove lock: %v", err)
+ }
+ if !didRemove {
+ return ErrLockInUse
+ }
+ return nil
+}
+
+// createSession is used to create a new managed session
+func (l *Lock) createSession() (string, error) {
+ session := l.c.Session()
+ se := &SessionEntry{
+ Name: l.opts.SessionName,
+ TTL: l.opts.SessionTTL,
+ }
+ id, _, err := session.Create(se, nil)
+ if err != nil {
+ return "", err
+ }
+ return id, nil
+}
+
+// lockEntry returns a formatted KVPair for the lock
+func (l *Lock) lockEntry(session string) *KVPair {
+ return &KVPair{
+ Key: l.opts.Key,
+ Value: l.opts.Value,
+ Session: session,
+ Flags: LockFlagValue,
+ }
+}
+
+// monitorLock is a long running routine to monitor a lock ownership
+// It closes the stopCh if we lose our leadership.
+func (l *Lock) monitorLock(session string, stopCh chan struct{}) {
+ defer close(stopCh)
+ kv := l.c.KV()
+ opts := &QueryOptions{RequireConsistent: true}
+WAIT:
+ pair, meta, err := kv.Get(l.opts.Key, opts)
+ if err != nil {
+ return
+ }
+ if pair != nil && pair.Session == session {
+ opts.WaitIndex = meta.LastIndex
+ goto WAIT
+ }
+}
diff --git a/vendor/github.com/hashicorp/consul/api/raw.go b/vendor/github.com/hashicorp/consul/api/raw.go
new file mode 100644
index 0000000000..745a208c99
--- /dev/null
+++ b/vendor/github.com/hashicorp/consul/api/raw.go
@@ -0,0 +1,24 @@
+package api
+
+// Raw can be used to do raw queries against custom endpoints
+type Raw struct {
+ c *Client
+}
+
+// Raw returns a handle to query endpoints
+func (c *Client) Raw() *Raw {
+ return &Raw{c}
+}
+
+// Query is used to do a GET request against an endpoint
+// and deserialize the response into an interface using
+// standard Consul conventions.
+func (raw *Raw) Query(endpoint string, out interface{}, q *QueryOptions) (*QueryMeta, error) {
+ return raw.c.query(endpoint, out, q)
+}
+
+// Write is used to do a PUT request against an endpoint
+// and serialize/deserialized using the standard Consul conventions.
+func (raw *Raw) Write(endpoint string, in, out interface{}, q *WriteOptions) (*WriteMeta, error) {
+ return raw.c.write(endpoint, in, out, q)
+}
diff --git a/vendor/github.com/hashicorp/consul/api/semaphore.go b/vendor/github.com/hashicorp/consul/api/semaphore.go
new file mode 100644
index 0000000000..ff4c2058ce
--- /dev/null
+++ b/vendor/github.com/hashicorp/consul/api/semaphore.go
@@ -0,0 +1,477 @@
+package api
+
+import (
+ "encoding/json"
+ "fmt"
+ "path"
+ "sync"
+ "time"
+)
+
+const (
+ // DefaultSemaphoreSessionName is the Session Name we assign if none is provided
+ DefaultSemaphoreSessionName = "Consul API Semaphore"
+
+ // DefaultSemaphoreSessionTTL is the default session TTL if no Session is provided
+ // when creating a new Semaphore. This is used because we do not have another
+ // other check to depend upon.
+ DefaultSemaphoreSessionTTL = "15s"
+
+ // DefaultSemaphoreWaitTime is how long we block for at a time to check if semaphore
+ // acquisition is possible. This affects the minimum time it takes to cancel
+ // a Semaphore acquisition.
+ DefaultSemaphoreWaitTime = 15 * time.Second
+
+ // DefaultSemaphoreKey is the key used within the prefix to
+ // use for coordination between all the contenders.
+ DefaultSemaphoreKey = ".lock"
+
+ // SemaphoreFlagValue is a magic flag we set to indicate a key
+ // is being used for a semaphore. It is used to detect a potential
+ // conflict with a lock.
+ SemaphoreFlagValue = 0xe0f69a2baa414de0
+)
+
+var (
+ // ErrSemaphoreHeld is returned if we attempt to double lock
+ ErrSemaphoreHeld = fmt.Errorf("Semaphore already held")
+
+ // ErrSemaphoreNotHeld is returned if we attempt to unlock a semaphore
+ // that we do not hold.
+ ErrSemaphoreNotHeld = fmt.Errorf("Semaphore not held")
+
+ // ErrSemaphoreInUse is returned if we attempt to destroy a semaphore
+ // that is in use.
+ ErrSemaphoreInUse = fmt.Errorf("Semaphore in use")
+
+ // ErrSemaphoreConflict is returned if the flags on a key
+ // used for a semaphore do not match expectation
+ ErrSemaphoreConflict = fmt.Errorf("Existing key does not match semaphore use")
+)
+
+// Semaphore is used to implement a distributed semaphore
+// using the Consul KV primitives.
+type Semaphore struct {
+ c *Client
+ opts *SemaphoreOptions
+
+ isHeld bool
+ sessionRenew chan struct{}
+ lockSession string
+ l sync.Mutex
+}
+
+// SemaphoreOptions is used to parameterize the Semaphore
+type SemaphoreOptions struct {
+ Prefix string // Must be set and have write permissions
+ Limit int // Must be set, and be positive
+ Value []byte // Optional, value to associate with the contender entry
+ Session string // OPtional, created if not specified
+ SessionName string // Optional, defaults to DefaultLockSessionName
+ SessionTTL string // Optional, defaults to DefaultLockSessionTTL
+}
+
+// semaphoreLock is written under the DefaultSemaphoreKey and
+// is used to coordinate between all the contenders.
+type semaphoreLock struct {
+ // Limit is the integer limit of holders. This is used to
+ // verify that all the holders agree on the value.
+ Limit int
+
+ // Holders is a list of all the semaphore holders.
+ // It maps the session ID to true. It is used as a set effectively.
+ Holders map[string]bool
+}
+
+// SemaphorePrefix is used to created a Semaphore which will operate
+// at the given KV prefix and uses the given limit for the semaphore.
+// The prefix must have write privileges, and the limit must be agreed
+// upon by all contenders.
+func (c *Client) SemaphorePrefix(prefix string, limit int) (*Semaphore, error) {
+ opts := &SemaphoreOptions{
+ Prefix: prefix,
+ Limit: limit,
+ }
+ return c.SemaphoreOpts(opts)
+}
+
+// SemaphoreOpts is used to create a Semaphore with the given options.
+// The prefix must have write privileges, and the limit must be agreed
+// upon by all contenders. If a Session is not provided, one will be created.
+func (c *Client) SemaphoreOpts(opts *SemaphoreOptions) (*Semaphore, error) {
+ if opts.Prefix == "" {
+ return nil, fmt.Errorf("missing prefix")
+ }
+ if opts.Limit <= 0 {
+ return nil, fmt.Errorf("semaphore limit must be positive")
+ }
+ if opts.SessionName == "" {
+ opts.SessionName = DefaultSemaphoreSessionName
+ }
+ if opts.SessionTTL == "" {
+ opts.SessionTTL = DefaultSemaphoreSessionTTL
+ } else {
+ if _, err := time.ParseDuration(opts.SessionTTL); err != nil {
+ return nil, fmt.Errorf("invalid SessionTTL: %v", err)
+ }
+ }
+ s := &Semaphore{
+ c: c,
+ opts: opts,
+ }
+ return s, nil
+}
+
+// Acquire attempts to reserve a slot in the semaphore, blocking until
+// success, interrupted via the stopCh or an error is encounted.
+// Providing a non-nil stopCh can be used to abort the attempt.
+// On success, a channel is returned that represents our slot.
+// This channel could be closed at any time due to session invalidation,
+// communication errors, operator intervention, etc. It is NOT safe to
+// assume that the slot is held until Release() unless the Session is specifically
+// created without any associated health checks. By default Consul sessions
+// prefer liveness over safety and an application must be able to handle
+// the session being lost.
+func (s *Semaphore) Acquire(stopCh <-chan struct{}) (<-chan struct{}, error) {
+ // Hold the lock as we try to acquire
+ s.l.Lock()
+ defer s.l.Unlock()
+
+ // Check if we already hold the semaphore
+ if s.isHeld {
+ return nil, ErrSemaphoreHeld
+ }
+
+ // Check if we need to create a session first
+ s.lockSession = s.opts.Session
+ if s.lockSession == "" {
+ if sess, err := s.createSession(); err != nil {
+ return nil, fmt.Errorf("failed to create session: %v", err)
+ } else {
+ s.sessionRenew = make(chan struct{})
+ s.lockSession = sess
+ session := s.c.Session()
+ go session.RenewPeriodic(s.opts.SessionTTL, sess, nil, s.sessionRenew)
+
+ // If we fail to acquire the lock, cleanup the session
+ defer func() {
+ if !s.isHeld {
+ close(s.sessionRenew)
+ s.sessionRenew = nil
+ }
+ }()
+ }
+ }
+
+ // Create the contender entry
+ kv := s.c.KV()
+ made, _, err := kv.Acquire(s.contenderEntry(s.lockSession), nil)
+ if err != nil || !made {
+ return nil, fmt.Errorf("failed to make contender entry: %v", err)
+ }
+
+ // Setup the query options
+ qOpts := &QueryOptions{
+ WaitTime: DefaultSemaphoreWaitTime,
+ }
+
+WAIT:
+ // Check if we should quit
+ select {
+ case <-stopCh:
+ return nil, nil
+ default:
+ }
+
+ // Read the prefix
+ pairs, meta, err := kv.List(s.opts.Prefix, qOpts)
+ if err != nil {
+ return nil, fmt.Errorf("failed to read prefix: %v", err)
+ }
+
+ // Decode the lock
+ lockPair := s.findLock(pairs)
+ if lockPair.Flags != SemaphoreFlagValue {
+ return nil, ErrSemaphoreConflict
+ }
+ lock, err := s.decodeLock(lockPair)
+ if err != nil {
+ return nil, err
+ }
+
+ // Verify we agree with the limit
+ if lock.Limit != s.opts.Limit {
+ return nil, fmt.Errorf("semaphore limit conflict (lock: %d, local: %d)",
+ lock.Limit, s.opts.Limit)
+ }
+
+ // Prune the dead holders
+ s.pruneDeadHolders(lock, pairs)
+
+ // Check if the lock is held
+ if len(lock.Holders) >= lock.Limit {
+ qOpts.WaitIndex = meta.LastIndex
+ goto WAIT
+ }
+
+ // Create a new lock with us as a holder
+ lock.Holders[s.lockSession] = true
+ newLock, err := s.encodeLock(lock, lockPair.ModifyIndex)
+ if err != nil {
+ return nil, err
+ }
+
+ // Attempt the acquisition
+ didSet, _, err := kv.CAS(newLock, nil)
+ if err != nil {
+ return nil, fmt.Errorf("failed to update lock: %v", err)
+ }
+ if !didSet {
+ // Update failed, could have been a race with another contender,
+ // retry the operation
+ goto WAIT
+ }
+
+ // Watch to ensure we maintain ownership of the slot
+ lockCh := make(chan struct{})
+ go s.monitorLock(s.lockSession, lockCh)
+
+ // Set that we own the lock
+ s.isHeld = true
+
+ // Acquired! All done
+ return lockCh, nil
+}
+
+// Release is used to voluntarily give up our semaphore slot. It is
+// an error to call this if the semaphore has not been acquired.
+func (s *Semaphore) Release() error {
+ // Hold the lock as we try to release
+ s.l.Lock()
+ defer s.l.Unlock()
+
+ // Ensure the lock is actually held
+ if !s.isHeld {
+ return ErrSemaphoreNotHeld
+ }
+
+ // Set that we no longer own the lock
+ s.isHeld = false
+
+ // Stop the session renew
+ if s.sessionRenew != nil {
+ defer func() {
+ close(s.sessionRenew)
+ s.sessionRenew = nil
+ }()
+ }
+
+ // Get and clear the lock session
+ lockSession := s.lockSession
+ s.lockSession = ""
+
+ // Remove ourselves as a lock holder
+ kv := s.c.KV()
+ key := path.Join(s.opts.Prefix, DefaultSemaphoreKey)
+READ:
+ pair, _, err := kv.Get(key, nil)
+ if err != nil {
+ return err
+ }
+ if pair == nil {
+ pair = &KVPair{}
+ }
+ lock, err := s.decodeLock(pair)
+ if err != nil {
+ return err
+ }
+
+ // Create a new lock without us as a holder
+ if _, ok := lock.Holders[lockSession]; ok {
+ delete(lock.Holders, lockSession)
+ newLock, err := s.encodeLock(lock, pair.ModifyIndex)
+ if err != nil {
+ return err
+ }
+
+ // Swap the locks
+ didSet, _, err := kv.CAS(newLock, nil)
+ if err != nil {
+ return fmt.Errorf("failed to update lock: %v", err)
+ }
+ if !didSet {
+ goto READ
+ }
+ }
+
+ // Destroy the contender entry
+ contenderKey := path.Join(s.opts.Prefix, lockSession)
+ if _, err := kv.Delete(contenderKey, nil); err != nil {
+ return err
+ }
+ return nil
+}
+
+// Destroy is used to cleanup the semaphore entry. It is not necessary
+// to invoke. It will fail if the semaphore is in use.
+func (s *Semaphore) Destroy() error {
+ // Hold the lock as we try to acquire
+ s.l.Lock()
+ defer s.l.Unlock()
+
+ // Check if we already hold the semaphore
+ if s.isHeld {
+ return ErrSemaphoreHeld
+ }
+
+ // List for the semaphore
+ kv := s.c.KV()
+ pairs, _, err := kv.List(s.opts.Prefix, nil)
+ if err != nil {
+ return fmt.Errorf("failed to read prefix: %v", err)
+ }
+
+ // Find the lock pair, bail if it doesn't exist
+ lockPair := s.findLock(pairs)
+ if lockPair.ModifyIndex == 0 {
+ return nil
+ }
+ if lockPair.Flags != SemaphoreFlagValue {
+ return ErrSemaphoreConflict
+ }
+
+ // Decode the lock
+ lock, err := s.decodeLock(lockPair)
+ if err != nil {
+ return err
+ }
+
+ // Prune the dead holders
+ s.pruneDeadHolders(lock, pairs)
+
+ // Check if there are any holders
+ if len(lock.Holders) > 0 {
+ return ErrSemaphoreInUse
+ }
+
+ // Attempt the delete
+ didRemove, _, err := kv.DeleteCAS(lockPair, nil)
+ if err != nil {
+ return fmt.Errorf("failed to remove semaphore: %v", err)
+ }
+ if !didRemove {
+ return ErrSemaphoreInUse
+ }
+ return nil
+}
+
+// createSession is used to create a new managed session
+func (s *Semaphore) createSession() (string, error) {
+ session := s.c.Session()
+ se := &SessionEntry{
+ Name: s.opts.SessionName,
+ TTL: s.opts.SessionTTL,
+ Behavior: SessionBehaviorDelete,
+ }
+ id, _, err := session.Create(se, nil)
+ if err != nil {
+ return "", err
+ }
+ return id, nil
+}
+
+// contenderEntry returns a formatted KVPair for the contender
+func (s *Semaphore) contenderEntry(session string) *KVPair {
+ return &KVPair{
+ Key: path.Join(s.opts.Prefix, session),
+ Value: s.opts.Value,
+ Session: session,
+ Flags: SemaphoreFlagValue,
+ }
+}
+
+// findLock is used to find the KV Pair which is used for coordination
+func (s *Semaphore) findLock(pairs KVPairs) *KVPair {
+ key := path.Join(s.opts.Prefix, DefaultSemaphoreKey)
+ for _, pair := range pairs {
+ if pair.Key == key {
+ return pair
+ }
+ }
+ return &KVPair{Flags: SemaphoreFlagValue}
+}
+
+// decodeLock is used to decode a semaphoreLock from an
+// entry in Consul
+func (s *Semaphore) decodeLock(pair *KVPair) (*semaphoreLock, error) {
+ // Handle if there is no lock
+ if pair == nil || pair.Value == nil {
+ return &semaphoreLock{
+ Limit: s.opts.Limit,
+ Holders: make(map[string]bool),
+ }, nil
+ }
+
+ l := &semaphoreLock{}
+ if err := json.Unmarshal(pair.Value, l); err != nil {
+ return nil, fmt.Errorf("lock decoding failed: %v", err)
+ }
+ return l, nil
+}
+
+// encodeLock is used to encode a semaphoreLock into a KVPair
+// that can be PUT
+func (s *Semaphore) encodeLock(l *semaphoreLock, oldIndex uint64) (*KVPair, error) {
+ enc, err := json.Marshal(l)
+ if err != nil {
+ return nil, fmt.Errorf("lock encoding failed: %v", err)
+ }
+ pair := &KVPair{
+ Key: path.Join(s.opts.Prefix, DefaultSemaphoreKey),
+ Value: enc,
+ Flags: SemaphoreFlagValue,
+ ModifyIndex: oldIndex,
+ }
+ return pair, nil
+}
+
+// pruneDeadHolders is used to remove all the dead lock holders
+func (s *Semaphore) pruneDeadHolders(lock *semaphoreLock, pairs KVPairs) {
+ // Gather all the live holders
+ alive := make(map[string]struct{}, len(pairs))
+ for _, pair := range pairs {
+ if pair.Session != "" {
+ alive[pair.Session] = struct{}{}
+ }
+ }
+
+ // Remove any holders that are dead
+ for holder := range lock.Holders {
+ if _, ok := alive[holder]; !ok {
+ delete(lock.Holders, holder)
+ }
+ }
+}
+
+// monitorLock is a long running routine to monitor a semaphore ownership
+// It closes the stopCh if we lose our slot.
+func (s *Semaphore) monitorLock(session string, stopCh chan struct{}) {
+ defer close(stopCh)
+ kv := s.c.KV()
+ opts := &QueryOptions{RequireConsistent: true}
+WAIT:
+ pairs, meta, err := kv.List(s.opts.Prefix, opts)
+ if err != nil {
+ return
+ }
+ lockPair := s.findLock(pairs)
+ lock, err := s.decodeLock(lockPair)
+ if err != nil {
+ return
+ }
+ s.pruneDeadHolders(lock, pairs)
+ if _, ok := lock.Holders[session]; ok {
+ opts.WaitIndex = meta.LastIndex
+ goto WAIT
+ }
+}
diff --git a/vendor/github.com/hashicorp/consul/api/session.go b/vendor/github.com/hashicorp/consul/api/session.go
new file mode 100644
index 0000000000..a99da511d6
--- /dev/null
+++ b/vendor/github.com/hashicorp/consul/api/session.go
@@ -0,0 +1,201 @@
+package api
+
+import (
+ "fmt"
+ "time"
+)
+
+const (
+ // SessionBehaviorRelease is the default behavior and causes
+ // all associated locks to be released on session invalidation.
+ SessionBehaviorRelease = "release"
+
+ // SessionBehaviorDelete is new in Consul 0.5 and changes the
+ // behavior to delete all associated locks on session invalidation.
+ // It can be used in a way similar to Ephemeral Nodes in ZooKeeper.
+ SessionBehaviorDelete = "delete"
+)
+
+// SessionEntry represents a session in consul
+type SessionEntry struct {
+ CreateIndex uint64
+ ID string
+ Name string
+ Node string
+ Checks []string
+ LockDelay time.Duration
+ Behavior string
+ TTL string
+}
+
+// Session can be used to query the Session endpoints
+type Session struct {
+ c *Client
+}
+
+// Session returns a handle to the session endpoints
+func (c *Client) Session() *Session {
+ return &Session{c}
+}
+
+// CreateNoChecks is like Create but is used specifically to create
+// a session with no associated health checks.
+func (s *Session) CreateNoChecks(se *SessionEntry, q *WriteOptions) (string, *WriteMeta, error) {
+ body := make(map[string]interface{})
+ body["Checks"] = []string{}
+ if se != nil {
+ if se.Name != "" {
+ body["Name"] = se.Name
+ }
+ if se.Node != "" {
+ body["Node"] = se.Node
+ }
+ if se.LockDelay != 0 {
+ body["LockDelay"] = durToMsec(se.LockDelay)
+ }
+ if se.Behavior != "" {
+ body["Behavior"] = se.Behavior
+ }
+ if se.TTL != "" {
+ body["TTL"] = se.TTL
+ }
+ }
+ return s.create(body, q)
+
+}
+
+// Create makes a new session. Providing a session entry can
+// customize the session. It can also be nil to use defaults.
+func (s *Session) Create(se *SessionEntry, q *WriteOptions) (string, *WriteMeta, error) {
+ var obj interface{}
+ if se != nil {
+ body := make(map[string]interface{})
+ obj = body
+ if se.Name != "" {
+ body["Name"] = se.Name
+ }
+ if se.Node != "" {
+ body["Node"] = se.Node
+ }
+ if se.LockDelay != 0 {
+ body["LockDelay"] = durToMsec(se.LockDelay)
+ }
+ if len(se.Checks) > 0 {
+ body["Checks"] = se.Checks
+ }
+ if se.Behavior != "" {
+ body["Behavior"] = se.Behavior
+ }
+ if se.TTL != "" {
+ body["TTL"] = se.TTL
+ }
+ }
+ return s.create(obj, q)
+}
+
+func (s *Session) create(obj interface{}, q *WriteOptions) (string, *WriteMeta, error) {
+ var out struct{ ID string }
+ wm, err := s.c.write("/v1/session/create", obj, &out, q)
+ if err != nil {
+ return "", nil, err
+ }
+ return out.ID, wm, nil
+}
+
+// Destroy invalides a given session
+func (s *Session) Destroy(id string, q *WriteOptions) (*WriteMeta, error) {
+ wm, err := s.c.write("/v1/session/destroy/"+id, nil, nil, q)
+ if err != nil {
+ return nil, err
+ }
+ return wm, nil
+}
+
+// Renew renews the TTL on a given session
+func (s *Session) Renew(id string, q *WriteOptions) (*SessionEntry, *WriteMeta, error) {
+ var entries []*SessionEntry
+ wm, err := s.c.write("/v1/session/renew/"+id, nil, &entries, q)
+ if err != nil {
+ return nil, nil, err
+ }
+ if len(entries) > 0 {
+ return entries[0], wm, nil
+ }
+ return nil, wm, nil
+}
+
+// RenewPeriodic is used to periodically invoke Session.Renew on a
+// session until a doneCh is closed. This is meant to be used in a long running
+// goroutine to ensure a session stays valid.
+func (s *Session) RenewPeriodic(initialTTL string, id string, q *WriteOptions, doneCh chan struct{}) error {
+ ttl, err := time.ParseDuration(initialTTL)
+ if err != nil {
+ return err
+ }
+
+ waitDur := ttl / 2
+ lastRenewTime := time.Now()
+ var lastErr error
+ for {
+ if time.Since(lastRenewTime) > ttl {
+ return lastErr
+ }
+ select {
+ case <-time.After(waitDur):
+ entry, _, err := s.Renew(id, q)
+ if err != nil {
+ waitDur = time.Second
+ lastErr = err
+ continue
+ }
+ if entry == nil {
+ waitDur = time.Second
+ lastErr = fmt.Errorf("No SessionEntry returned")
+ continue
+ }
+
+ // Handle the server updating the TTL
+ ttl, _ = time.ParseDuration(entry.TTL)
+ waitDur = ttl / 2
+ lastRenewTime = time.Now()
+
+ case <-doneCh:
+ // Attempt a session destroy
+ s.Destroy(id, q)
+ return nil
+ }
+ }
+}
+
+// Info looks up a single session
+func (s *Session) Info(id string, q *QueryOptions) (*SessionEntry, *QueryMeta, error) {
+ var entries []*SessionEntry
+ qm, err := s.c.query("/v1/session/info/"+id, &entries, q)
+ if err != nil {
+ return nil, nil, err
+ }
+ if len(entries) > 0 {
+ return entries[0], qm, nil
+ }
+ return nil, qm, nil
+}
+
+// List gets sessions for a node
+func (s *Session) Node(node string, q *QueryOptions) ([]*SessionEntry, *QueryMeta, error) {
+ var entries []*SessionEntry
+ qm, err := s.c.query("/v1/session/node/"+node, &entries, q)
+ if err != nil {
+ return nil, nil, err
+ }
+ return entries, qm, nil
+}
+
+// List gets all active sessions
+func (s *Session) List(q *QueryOptions) ([]*SessionEntry, *QueryMeta, error) {
+ var entries []*SessionEntry
+ qm, err := s.c.query("/v1/session/list", &entries, q)
+ if err != nil {
+ return nil, nil, err
+ }
+ return entries, qm, nil
+}
diff --git a/vendor/github.com/hashicorp/consul/api/status.go b/vendor/github.com/hashicorp/consul/api/status.go
new file mode 100644
index 0000000000..74ef61a678
--- /dev/null
+++ b/vendor/github.com/hashicorp/consul/api/status.go
@@ -0,0 +1,43 @@
+package api
+
+// Status can be used to query the Status endpoints
+type Status struct {
+ c *Client
+}
+
+// Status returns a handle to the status endpoints
+func (c *Client) Status() *Status {
+ return &Status{c}
+}
+
+// Leader is used to query for a known leader
+func (s *Status) Leader() (string, error) {
+ r := s.c.newRequest("GET", "/v1/status/leader")
+ _, resp, err := requireOK(s.c.doRequest(r))
+ if err != nil {
+ return "", err
+ }
+ defer resp.Body.Close()
+
+ var leader string
+ if err := decodeBody(resp, &leader); err != nil {
+ return "", err
+ }
+ return leader, nil
+}
+
+// Peers is used to query for a known raft peers
+func (s *Status) Peers() ([]string, error) {
+ r := s.c.newRequest("GET", "/v1/status/peers")
+ _, resp, err := requireOK(s.c.doRequest(r))
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ var peers []string
+ if err := decodeBody(resp, &peers); err != nil {
+ return nil, err
+ }
+ return peers, nil
+}
diff --git a/vendor/github.com/julienschmidt/httprouter/LICENSE b/vendor/github.com/julienschmidt/httprouter/LICENSE
new file mode 100644
index 0000000000..b829abc8a1
--- /dev/null
+++ b/vendor/github.com/julienschmidt/httprouter/LICENSE
@@ -0,0 +1,24 @@
+Copyright (c) 2013 Julien Schmidt. All rights reserved.
+
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+ * The names of the contributors may not be used to endorse or promote
+ products derived from this software without specific prior written
+ permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL JULIEN SCHMIDT BE LIABLE FOR ANY
+DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
\ No newline at end of file
diff --git a/vendor/github.com/julienschmidt/httprouter/README.md b/vendor/github.com/julienschmidt/httprouter/README.md
new file mode 100644
index 0000000000..785a108cae
--- /dev/null
+++ b/vendor/github.com/julienschmidt/httprouter/README.md
@@ -0,0 +1,323 @@
+# HttpRouter [](https://travis-ci.org/julienschmidt/httprouter) [](http://gocover.io/github.com/julienschmidt/httprouter) [](http://godoc.org/github.com/julienschmidt/httprouter)
+
+HttpRouter is a lightweight high performance HTTP request router
+(also called *multiplexer* or just *mux* for short) for [Go](http://golang.org/).
+
+In contrast to the [default mux](http://golang.org/pkg/net/http/#ServeMux) of Go's net/http package, this router supports
+variables in the routing pattern and matches against the request method.
+It also scales better.
+
+The router is optimized for high performance and a small memory footprint.
+It scales well even with very long paths and a large number of routes.
+A compressing dynamic trie (radix tree) structure is used for efficient matching.
+
+## Features
+**Only explicit matches:** With other routers, like [http.ServeMux](http://golang.org/pkg/net/http/#ServeMux),
+a requested URL path could match multiple patterns. Therefore they have some
+awkward pattern priority rules, like *longest match* or *first registered,
+first matched*. By design of this router, a request can only match exactly one
+or no route. As a result, there are also no unintended matches, which makes it
+great for SEO and improves the user experience.
+
+**Stop caring about trailing slashes:** Choose the URL style you like, the
+router automatically redirects the client if a trailing slash is missing or if
+there is one extra. Of course it only does so, if the new path has a handler.
+If you don't like it, you can [turn off this behavior](http://godoc.org/github.com/julienschmidt/httprouter#Router.RedirectTrailingSlash).
+
+**Path auto-correction:** Besides detecting the missing or additional trailing
+slash at no extra cost, the router can also fix wrong cases and remove
+superfluous path elements (like `../` or `//`).
+Is [CAPTAIN CAPS LOCK](http://www.urbandictionary.com/define.php?term=Captain+Caps+Lock) one of your users?
+HttpRouter can help him by making a case-insensitive look-up and redirecting him
+to the correct URL.
+
+**Parameters in your routing pattern:** Stop parsing the requested URL path,
+just give the path segment a name and the router delivers the dynamic value to
+you. Because of the design of the router, path parameters are very cheap.
+
+**Zero Garbage:** The matching and dispatching process generates zero bytes of
+garbage. In fact, the only heap allocations that are made, is by building the
+slice of the key-value pairs for path parameters. If the request path contains
+no parameters, not a single heap allocation is necessary.
+
+**Best Performance:** [Benchmarks speak for themselves](https://github.com/julienschmidt/go-http-routing-benchmark).
+See below for technical details of the implementation.
+
+**No more server crashes:** You can set a [Panic handler](http://godoc.org/github.com/julienschmidt/httprouter#Router.PanicHandler) to deal with panics
+occurring during handling a HTTP request. The router then recovers and lets the
+PanicHandler log what happened and deliver a nice error page.
+
+Of course you can also set **custom [NotFound](http://godoc.org/github.com/julienschmidt/httprouter#Router.NotFound) and [MethodNotAllowed](http://godoc.org/github.com/julienschmidt/httprouter#Router.MethodNotAllowed) handlers** and [**serve static files**](http://godoc.org/github.com/julienschmidt/httprouter#Router.ServeFiles).
+
+## Usage
+This is just a quick introduction, view the [GoDoc](http://godoc.org/github.com/julienschmidt/httprouter) for details.
+
+Let's start with a trivial example:
+```go
+package main
+
+import (
+ "fmt"
+ "github.com/julienschmidt/httprouter"
+ "net/http"
+ "log"
+)
+
+func Index(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {
+ fmt.Fprint(w, "Welcome!\n")
+}
+
+func Hello(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
+ fmt.Fprintf(w, "hello, %s!\n", ps.ByName("name"))
+}
+
+func main() {
+ router := httprouter.New()
+ router.GET("/", Index)
+ router.GET("/hello/:name", Hello)
+
+ log.Fatal(http.ListenAndServe(":8080", router))
+}
+```
+
+### Named parameters
+As you can see, `:name` is a *named parameter*.
+The values are accessible via `httprouter.Params`, which is just a slice of `httprouter.Param`s.
+You can get the value of a parameter either by its index in the slice, or by using the `ByName(name)` method:
+`:name` can be retrived by `ByName("name")`.
+
+Named parameters only match a single path segment:
+```
+Pattern: /user/:user
+
+ /user/gordon match
+ /user/you match
+ /user/gordon/profile no match
+ /user/ no match
+```
+
+**Note:** Since this router has only explicit matches, you can not register static routes and parameters for the same path segment. For example you can not register the patterns `/user/new` and `/user/:user` for the same request method at the same time. The routing of different request methods is independent from each other.
+
+### Catch-All parameters
+The second type are *catch-all* parameters and have the form `*name`.
+Like the name suggests, they match everything.
+Therefore they must always be at the **end** of the pattern:
+```
+Pattern: /src/*filepath
+
+ /src/ match
+ /src/somefile.go match
+ /src/subdir/somefile.go match
+```
+
+## How does it work?
+The router relies on a tree structure which makes heavy use of *common prefixes*,
+it is basically a *compact* [*prefix tree*](http://en.wikipedia.org/wiki/Trie)
+(or just [*Radix tree*](http://en.wikipedia.org/wiki/Radix_tree)).
+Nodes with a common prefix also share a common parent. Here is a short example
+what the routing tree for the `GET` request method could look like:
+
+```
+Priority Path Handle
+9 \ *<1>
+3 ├s nil
+2 |├earch\ *<2>
+1 |└upport\ *<3>
+2 ├blog\ *<4>
+1 | └:post nil
+1 | └\ *<5>
+2 ├about-us\ *<6>
+1 | └team\ *<7>
+1 └contact\ *<8>
+```
+Every `*` represents the memory address of a handler function (a pointer).
+If you follow a path trough the tree from the root to the leaf, you get the
+complete route path, e.g `\blog\:post\`, where `:post` is just a placeholder
+([*parameter*](#named-parameters)) for an actual post name. Unlike hash-maps, a
+tree structure also allows us to use dynamic parts like the `:post` parameter,
+since we actually match against the routing patterns instead of just comparing
+hashes. [As benchmarks show](https://github.com/julienschmidt/go-http-routing-benchmark),
+this works very well and efficient.
+
+Since URL paths have a hierarchical structure and make use only of a limited set
+of characters (byte values), it is very likely that there are a lot of common
+prefixes. This allows us to easily reduce the routing into ever smaller problems.
+Moreover the router manages a separate tree for every request method.
+For one thing it is more space efficient than holding a method->handle map in
+every single node, for another thing is also allows us to greatly reduce the
+routing problem before even starting the look-up in the prefix-tree.
+
+For even better scalability, the child nodes on each tree level are ordered by
+priority, where the priority is just the number of handles registered in sub
+nodes (children, grandchildren, and so on..).
+This helps in two ways:
+
+1. Nodes which are part of the most routing paths are evaluated first. This
+helps to make as much routes as possible to be reachable as fast as possible.
+2. It is some sort of cost compensation. The longest reachable path (highest
+cost) can always be evaluated first. The following scheme visualizes the tree
+structure. Nodes are evaluated from top to bottom and from left to right.
+
+```
+├------------
+├---------
+├-----
+├----
+├--
+├--
+└-
+```
+
+
+## Why doesn't this work with http.Handler?
+**It does!** The router itself implements the http.Handler interface.
+Moreover the router provides convenient [adapters for http.Handler](http://godoc.org/github.com/julienschmidt/httprouter#Router.Handler)s and [http.HandlerFunc](http://godoc.org/github.com/julienschmidt/httprouter#Router.HandlerFunc)s
+which allows them to be used as a [httprouter.Handle](http://godoc.org/github.com/julienschmidt/httprouter#Router.Handle) when registering a route.
+The only disadvantage is, that no parameter values can be retrieved when a
+http.Handler or http.HandlerFunc is used, since there is no efficient way to
+pass the values with the existing function parameters.
+Therefore [httprouter.Handle](http://godoc.org/github.com/julienschmidt/httprouter#Router.Handle) has a third function parameter.
+
+Just try it out for yourself, the usage of HttpRouter is very straightforward. The package is compact and minimalistic, but also probably one of the easiest routers to set up.
+
+
+## Where can I find Middleware *X*?
+This package just provides a very efficient request router with a few extra
+features. The router is just a [http.Handler](http://golang.org/pkg/net/http/#Handler),
+you can chain any http.Handler compatible middleware before the router,
+for example the [Gorilla handlers](http://www.gorillatoolkit.org/pkg/handlers).
+Or you could [just write your own](http://justinas.org/writing-http-middleware-in-go/),
+it's very easy!
+
+Alternatively, you could try [a web framework based on HttpRouter](#web-frameworks-based-on-httprouter).
+
+### Multi-domain / Sub-domains
+Here is a quick example: Does your server serve multiple domains / hosts?
+You want to use sub-domains?
+Define a router per host!
+```go
+// We need an object that implements the http.Handler interface.
+// Therefore we need a type for which we implement the ServeHTTP method.
+// We just use a map here, in which we map host names (with port) to http.Handlers
+type HostSwitch map[string]http.Handler
+
+// Implement the ServerHTTP method on our new type
+func (hs HostSwitch) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+ // Check if a http.Handler is registered for the given host.
+ // If yes, use it to handle the request.
+ if handler := hs[r.Host]; handler != nil {
+ handler.ServeHTTP(w, r)
+ } else {
+ // Handle host names for wich no handler is registered
+ http.Error(w, "Forbidden", 403) // Or Redirect?
+ }
+}
+
+func main() {
+ // Initialize a router as usual
+ router := httprouter.New()
+ router.GET("/", Index)
+ router.GET("/hello/:name", Hello)
+
+ // Make a new HostSwitch and insert the router (our http handler)
+ // for example.com and port 12345
+ hs := make(HostSwitch)
+ hs["example.com:12345"] = router
+
+ // Use the HostSwitch to listen and serve on port 12345
+ log.Fatal(http.ListenAndServe(":12345", hs))
+}
+```
+
+### Basic Authentication
+Another quick example: Basic Authentification (RFC 2617) for handles:
+
+```go
+package main
+
+import (
+ "bytes"
+ "encoding/base64"
+ "fmt"
+ "github.com/julienschmidt/httprouter"
+ "net/http"
+ "log"
+ "strings"
+)
+
+func BasicAuth(h httprouter.Handle, user, pass []byte) httprouter.Handle {
+ return func(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
+ const basicAuthPrefix string = "Basic "
+
+ // Get the Basic Authentication credentials
+ auth := r.Header.Get("Authorization")
+ if strings.HasPrefix(auth, basicAuthPrefix) {
+ // Check credentials
+ payload, err := base64.StdEncoding.DecodeString(auth[len(basicAuthPrefix):])
+ if err == nil {
+ pair := bytes.SplitN(payload, []byte(":"), 2)
+ if len(pair) == 2 &&
+ bytes.Equal(pair[0], user) &&
+ bytes.Equal(pair[1], pass) {
+
+ // Delegate request to the given handle
+ h(w, r, ps)
+ return
+ }
+ }
+ }
+
+ // Request Basic Authentication otherwise
+ w.Header().Set("WWW-Authenticate", "Basic realm=Restricted")
+ http.Error(w, http.StatusText(http.StatusUnauthorized), http.StatusUnauthorized)
+ }
+}
+
+func Index(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {
+ fmt.Fprint(w, "Not protected!\n")
+}
+
+func Protected(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {
+ fmt.Fprint(w, "Protected!\n")
+}
+
+func main() {
+ user := []byte("gordon")
+ pass := []byte("secret!")
+
+ router := httprouter.New()
+ router.GET("/", Index)
+ router.GET("/protected/", BasicAuth(Protected, user, pass))
+
+ log.Fatal(http.ListenAndServe(":8080", router))
+}
+```
+
+## Chaining with the NotFound handler
+
+**NOTE: It might be required to set [Router.HandleMethodNotAllowed](http://godoc.org/github.com/julienschmidt/httprouter#Router.HandleMethodNotAllowed) to `false` to avoid problems.**
+
+You can use another [http.Handler](http://golang.org/pkg/net/http/#Handler), for example another router, to handle requests which could not be matched by this router by using the [Router.NotFound](http://godoc.org/github.com/julienschmidt/httprouter#Router.NotFound) handler. This allows chaining.
+
+### Static files
+The `NotFound` handler can for example be used to serve static files from the root path `/` (like an index.html file along with other assets):
+```go
+// Serve static files from the ./public directory
+router.NotFound = http.FileServer(http.Dir("public")).ServeHTTP
+```
+
+But this approach sidesteps the strict core rules of this router to avoid routing problems. A cleaner approach is to use a distinct sub-path for serving files, like `/static/*filepath` or `/files/*filepath`.
+
+## Web Frameworks based on HttpRouter
+If the HttpRouter is a bit too minimalistic for you, you might try one of the following more high-level 3rd-party web frameworks building upon the HttpRouter package:
+* [Ace](https://github.com/plimble/ace): Blazing fast Go Web Framework
+* [api2go](https://github.com/univedo/api2go): A JSON API Implementation for Go
+* [Gin](https://github.com/gin-gonic/gin): Features a martini-like API with much better performance
+* [Goat](https://github.com/bahlo/goat): A minimalistic REST API server in Go
+* [Hikaru](https://github.com/najeira/hikaru): Supports standalone and Google AppEngine
+* [Hitch](https://github.com/nbio/hitch): Hitch ties httprouter, [httpcontext](https://github.com/nbio/httpcontext), and middleware up in a bow
+* [kami](https://github.com/guregu/kami): A tiny web framework using x/net/context
+* [Medeina](https://github.com/imdario/medeina): Inspired by Ruby's Roda and Cuba
+* [Neko](https://github.com/rocwong/neko): A lightweight web application framework for Golang
+* [Roxanna](https://github.com/iamthemuffinman/Roxanna): An amalgamation of httprouter, better logging, and hot reload
+* [siesta](https://github.com/VividCortex/siesta): Composable HTTP handlers with contexts
diff --git a/vendor/github.com/julienschmidt/httprouter/path.go b/vendor/github.com/julienschmidt/httprouter/path.go
new file mode 100644
index 0000000000..486134db37
--- /dev/null
+++ b/vendor/github.com/julienschmidt/httprouter/path.go
@@ -0,0 +1,123 @@
+// Copyright 2013 Julien Schmidt. All rights reserved.
+// Based on the path package, Copyright 2009 The Go Authors.
+// Use of this source code is governed by a BSD-style license that can be found
+// in the LICENSE file.
+
+package httprouter
+
+// CleanPath is the URL version of path.Clean, it returns a canonical URL path
+// for p, eliminating . and .. elements.
+//
+// The following rules are applied iteratively until no further processing can
+// be done:
+// 1. Replace multiple slashes with a single slash.
+// 2. Eliminate each . path name element (the current directory).
+// 3. Eliminate each inner .. path name element (the parent directory)
+// along with the non-.. element that precedes it.
+// 4. Eliminate .. elements that begin a rooted path:
+// that is, replace "/.." by "/" at the beginning of a path.
+//
+// If the result of this process is an empty string, "/" is returned
+func CleanPath(p string) string {
+ // Turn empty string into "/"
+ if p == "" {
+ return "/"
+ }
+
+ n := len(p)
+ var buf []byte
+
+ // Invariants:
+ // reading from path; r is index of next byte to process.
+ // writing to buf; w is index of next byte to write.
+
+ // path must start with '/'
+ r := 1
+ w := 1
+
+ if p[0] != '/' {
+ r = 0
+ buf = make([]byte, n+1)
+ buf[0] = '/'
+ }
+
+ trailing := n > 2 && p[n-1] == '/'
+
+ // A bit more clunky without a 'lazybuf' like the path package, but the loop
+ // gets completely inlined (bufApp). So in contrast to the path package this
+ // loop has no expensive function calls (except 1x make)
+
+ for r < n {
+ switch {
+ case p[r] == '/':
+ // empty path element, trailing slash is added after the end
+ r++
+
+ case p[r] == '.' && r+1 == n:
+ trailing = true
+ r++
+
+ case p[r] == '.' && p[r+1] == '/':
+ // . element
+ r++
+
+ case p[r] == '.' && p[r+1] == '.' && (r+2 == n || p[r+2] == '/'):
+ // .. element: remove to last /
+ r += 2
+
+ if w > 1 {
+ // can backtrack
+ w--
+
+ if buf == nil {
+ for w > 1 && p[w] != '/' {
+ w--
+ }
+ } else {
+ for w > 1 && buf[w] != '/' {
+ w--
+ }
+ }
+ }
+
+ default:
+ // real path element.
+ // add slash if needed
+ if w > 1 {
+ bufApp(&buf, p, w, '/')
+ w++
+ }
+
+ // copy element
+ for r < n && p[r] != '/' {
+ bufApp(&buf, p, w, p[r])
+ w++
+ r++
+ }
+ }
+ }
+
+ // re-append trailing slash
+ if trailing && w > 1 {
+ bufApp(&buf, p, w, '/')
+ w++
+ }
+
+ if buf == nil {
+ return p[:w]
+ }
+ return string(buf[:w])
+}
+
+// internal helper to lazily create a buffer if necessary
+func bufApp(buf *[]byte, s string, w int, c byte) {
+ if *buf == nil {
+ if s[w] == c {
+ return
+ }
+
+ *buf = make([]byte, len(s))
+ copy(*buf, s[:w])
+ }
+ (*buf)[w] = c
+}
diff --git a/vendor/github.com/julienschmidt/httprouter/router.go b/vendor/github.com/julienschmidt/httprouter/router.go
new file mode 100644
index 0000000000..8b5ff34687
--- /dev/null
+++ b/vendor/github.com/julienschmidt/httprouter/router.go
@@ -0,0 +1,363 @@
+// Copyright 2013 Julien Schmidt. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be found
+// in the LICENSE file.
+
+// Package httprouter is a trie based high performance HTTP request router.
+//
+// A trivial example is:
+//
+// package main
+//
+// import (
+// "fmt"
+// "github.com/julienschmidt/httprouter"
+// "net/http"
+// "log"
+// )
+//
+// func Index(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {
+// fmt.Fprint(w, "Welcome!\n")
+// }
+//
+// func Hello(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
+// fmt.Fprintf(w, "hello, %s!\n", ps.ByName("name"))
+// }
+//
+// func main() {
+// router := httprouter.New()
+// router.GET("/", Index)
+// router.GET("/hello/:name", Hello)
+//
+// log.Fatal(http.ListenAndServe(":8080", router))
+// }
+//
+// The router matches incoming requests by the request method and the path.
+// If a handle is registered for this path and method, the router delegates the
+// request to that function.
+// For the methods GET, POST, PUT, PATCH and DELETE shortcut functions exist to
+// register handles, for all other methods router.Handle can be used.
+//
+// The registered path, against which the router matches incoming requests, can
+// contain two types of parameters:
+// Syntax Type
+// :name named parameter
+// *name catch-all parameter
+//
+// Named parameters are dynamic path segments. They match anything until the
+// next '/' or the path end:
+// Path: /blog/:category/:post
+//
+// Requests:
+// /blog/go/request-routers match: category="go", post="request-routers"
+// /blog/go/request-routers/ no match, but the router would redirect
+// /blog/go/ no match
+// /blog/go/request-routers/comments no match
+//
+// Catch-all parameters match anything until the path end, including the
+// directory index (the '/' before the catch-all). Since they match anything
+// until the end, catch-all parameters must always be the final path element.
+// Path: /files/*filepath
+//
+// Requests:
+// /files/ match: filepath="/"
+// /files/LICENSE match: filepath="/LICENSE"
+// /files/templates/article.html match: filepath="/templates/article.html"
+// /files no match, but the router would redirect
+//
+// The value of parameters is saved as a slice of the Param struct, consisting
+// each of a key and a value. The slice is passed to the Handle func as a third
+// parameter.
+// There are two ways to retrieve the value of a parameter:
+// // by the name of the parameter
+// user := ps.ByName("user") // defined by :user or *user
+//
+// // by the index of the parameter. This way you can also get the name (key)
+// thirdKey := ps[2].Key // the name of the 3rd parameter
+// thirdValue := ps[2].Value // the value of the 3rd parameter
+package httprouter
+
+import (
+ "net/http"
+)
+
+// Handle is a function that can be registered to a route to handle HTTP
+// requests. Like http.HandlerFunc, but has a third parameter for the values of
+// wildcards (variables).
+type Handle func(http.ResponseWriter, *http.Request, Params)
+
+// Param is a single URL parameter, consisting of a key and a value.
+type Param struct {
+ Key string
+ Value string
+}
+
+// Params is a Param-slice, as returned by the router.
+// The slice is ordered, the first URL parameter is also the first slice value.
+// It is therefore safe to read values by the index.
+type Params []Param
+
+// ByName returns the value of the first Param which key matches the given name.
+// If no matching Param is found, an empty string is returned.
+func (ps Params) ByName(name string) string {
+ for i := range ps {
+ if ps[i].Key == name {
+ return ps[i].Value
+ }
+ }
+ return ""
+}
+
+// Router is a http.Handler which can be used to dispatch requests to different
+// handler functions via configurable routes
+type Router struct {
+ trees map[string]*node
+
+ // Enables automatic redirection if the current route can't be matched but a
+ // handler for the path with (without) the trailing slash exists.
+ // For example if /foo/ is requested but a route only exists for /foo, the
+ // client is redirected to /foo with http status code 301 for GET requests
+ // and 307 for all other request methods.
+ RedirectTrailingSlash bool
+
+ // If enabled, the router tries to fix the current request path, if no
+ // handle is registered for it.
+ // First superfluous path elements like ../ or // are removed.
+ // Afterwards the router does a case-insensitive lookup of the cleaned path.
+ // If a handle can be found for this route, the router makes a redirection
+ // to the corrected path with status code 301 for GET requests and 307 for
+ // all other request methods.
+ // For example /FOO and /..//Foo could be redirected to /foo.
+ // RedirectTrailingSlash is independent of this option.
+ RedirectFixedPath bool
+
+ // If enabled, the router checks if another method is allowed for the
+ // current route, if the current request can not be routed.
+ // If this is the case, the request is answered with 'Method Not Allowed'
+ // and HTTP status code 405.
+ // If no other Method is allowed, the request is delegated to the NotFound
+ // handler.
+ HandleMethodNotAllowed bool
+
+ // Configurable http.Handler which is called when no matching route is
+ // found. If it is not set, http.NotFound is used.
+ NotFound http.Handler
+
+ // Configurable http.Handler which is called when a request
+ // cannot be routed and HandleMethodNotAllowed is true.
+ // If it is not set, http.Error with http.StatusMethodNotAllowed is used.
+ MethodNotAllowed http.Handler
+
+ // Function to handle panics recovered from http handlers.
+ // It should be used to generate a error page and return the http error code
+ // 500 (Internal Server Error).
+ // The handler can be used to keep your server from crashing because of
+ // unrecovered panics.
+ PanicHandler func(http.ResponseWriter, *http.Request, interface{})
+}
+
+// Make sure the Router conforms with the http.Handler interface
+var _ http.Handler = New()
+
+// New returns a new initialized Router.
+// Path auto-correction, including trailing slashes, is enabled by default.
+func New() *Router {
+ return &Router{
+ RedirectTrailingSlash: true,
+ RedirectFixedPath: true,
+ HandleMethodNotAllowed: true,
+ }
+}
+
+// GET is a shortcut for router.Handle("GET", path, handle)
+func (r *Router) GET(path string, handle Handle) {
+ r.Handle("GET", path, handle)
+}
+
+// HEAD is a shortcut for router.Handle("HEAD", path, handle)
+func (r *Router) HEAD(path string, handle Handle) {
+ r.Handle("HEAD", path, handle)
+}
+
+// OPTIONS is a shortcut for router.Handle("OPTIONS", path, handle)
+func (r *Router) OPTIONS(path string, handle Handle) {
+ r.Handle("OPTIONS", path, handle)
+}
+
+// POST is a shortcut for router.Handle("POST", path, handle)
+func (r *Router) POST(path string, handle Handle) {
+ r.Handle("POST", path, handle)
+}
+
+// PUT is a shortcut for router.Handle("PUT", path, handle)
+func (r *Router) PUT(path string, handle Handle) {
+ r.Handle("PUT", path, handle)
+}
+
+// PATCH is a shortcut for router.Handle("PATCH", path, handle)
+func (r *Router) PATCH(path string, handle Handle) {
+ r.Handle("PATCH", path, handle)
+}
+
+// DELETE is a shortcut for router.Handle("DELETE", path, handle)
+func (r *Router) DELETE(path string, handle Handle) {
+ r.Handle("DELETE", path, handle)
+}
+
+// Handle registers a new request handle with the given path and method.
+//
+// For GET, POST, PUT, PATCH and DELETE requests the respective shortcut
+// functions can be used.
+//
+// This function is intended for bulk loading and to allow the usage of less
+// frequently used, non-standardized or custom methods (e.g. for internal
+// communication with a proxy).
+func (r *Router) Handle(method, path string, handle Handle) {
+ if path[0] != '/' {
+ panic("path must begin with '/' in path '" + path + "'")
+ }
+
+ if r.trees == nil {
+ r.trees = make(map[string]*node)
+ }
+
+ root := r.trees[method]
+ if root == nil {
+ root = new(node)
+ r.trees[method] = root
+ }
+
+ root.addRoute(path, handle)
+}
+
+// Handler is an adapter which allows the usage of an http.Handler as a
+// request handle.
+func (r *Router) Handler(method, path string, handler http.Handler) {
+ r.Handle(method, path,
+ func(w http.ResponseWriter, req *http.Request, _ Params) {
+ handler.ServeHTTP(w, req)
+ },
+ )
+}
+
+// HandlerFunc is an adapter which allows the usage of an http.HandlerFunc as a
+// request handle.
+func (r *Router) HandlerFunc(method, path string, handler http.HandlerFunc) {
+ r.Handler(method, path, handler)
+}
+
+// ServeFiles serves files from the given file system root.
+// The path must end with "/*filepath", files are then served from the local
+// path /defined/root/dir/*filepath.
+// For example if root is "/etc" and *filepath is "passwd", the local file
+// "/etc/passwd" would be served.
+// Internally a http.FileServer is used, therefore http.NotFound is used instead
+// of the Router's NotFound handler.
+// To use the operating system's file system implementation,
+// use http.Dir:
+// router.ServeFiles("/src/*filepath", http.Dir("/var/www"))
+func (r *Router) ServeFiles(path string, root http.FileSystem) {
+ if len(path) < 10 || path[len(path)-10:] != "/*filepath" {
+ panic("path must end with /*filepath in path '" + path + "'")
+ }
+
+ fileServer := http.FileServer(root)
+
+ r.GET(path, func(w http.ResponseWriter, req *http.Request, ps Params) {
+ req.URL.Path = ps.ByName("filepath")
+ fileServer.ServeHTTP(w, req)
+ })
+}
+
+func (r *Router) recv(w http.ResponseWriter, req *http.Request) {
+ if rcv := recover(); rcv != nil {
+ r.PanicHandler(w, req, rcv)
+ }
+}
+
+// Lookup allows the manual lookup of a method + path combo.
+// This is e.g. useful to build a framework around this router.
+// If the path was found, it returns the handle function and the path parameter
+// values. Otherwise the third return value indicates whether a redirection to
+// the same path with an extra / without the trailing slash should be performed.
+func (r *Router) Lookup(method, path string) (Handle, Params, bool) {
+ if root := r.trees[method]; root != nil {
+ return root.getValue(path)
+ }
+ return nil, nil, false
+}
+
+// ServeHTTP makes the router implement the http.Handler interface.
+func (r *Router) ServeHTTP(w http.ResponseWriter, req *http.Request) {
+ if r.PanicHandler != nil {
+ defer r.recv(w, req)
+ }
+
+ if root := r.trees[req.Method]; root != nil {
+ path := req.URL.Path
+
+ if handle, ps, tsr := root.getValue(path); handle != nil {
+ handle(w, req, ps)
+ return
+ } else if req.Method != "CONNECT" && path != "/" {
+ code := 301 // Permanent redirect, request with GET method
+ if req.Method != "GET" {
+ // Temporary redirect, request with same method
+ // As of Go 1.3, Go does not support status code 308.
+ code = 307
+ }
+
+ if tsr && r.RedirectTrailingSlash {
+ if len(path) > 1 && path[len(path)-1] == '/' {
+ req.URL.Path = path[:len(path)-1]
+ } else {
+ req.URL.Path = path + "/"
+ }
+ http.Redirect(w, req, req.URL.String(), code)
+ return
+ }
+
+ // Try to fix the request path
+ if r.RedirectFixedPath {
+ fixedPath, found := root.findCaseInsensitivePath(
+ CleanPath(path),
+ r.RedirectTrailingSlash,
+ )
+ if found {
+ req.URL.Path = string(fixedPath)
+ http.Redirect(w, req, req.URL.String(), code)
+ return
+ }
+ }
+ }
+ }
+
+ // Handle 405
+ if r.HandleMethodNotAllowed {
+ for method := range r.trees {
+ // Skip the requested method - we already tried this one
+ if method == req.Method {
+ continue
+ }
+
+ handle, _, _ := r.trees[method].getValue(req.URL.Path)
+ if handle != nil {
+ if r.MethodNotAllowed != nil {
+ r.MethodNotAllowed.ServeHTTP(w, req)
+ } else {
+ http.Error(w,
+ http.StatusText(http.StatusMethodNotAllowed),
+ http.StatusMethodNotAllowed,
+ )
+ }
+ return
+ }
+ }
+ }
+
+ // Handle 404
+ if r.NotFound != nil {
+ r.NotFound.ServeHTTP(w, req)
+ } else {
+ http.NotFound(w, req)
+ }
+}
diff --git a/vendor/github.com/julienschmidt/httprouter/tree.go b/vendor/github.com/julienschmidt/httprouter/tree.go
new file mode 100644
index 0000000000..a15bc2cbc3
--- /dev/null
+++ b/vendor/github.com/julienschmidt/httprouter/tree.go
@@ -0,0 +1,555 @@
+// Copyright 2013 Julien Schmidt. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be found
+// in the LICENSE file.
+
+package httprouter
+
+import (
+ "strings"
+ "unicode"
+)
+
+func min(a, b int) int {
+ if a <= b {
+ return a
+ }
+ return b
+}
+
+func countParams(path string) uint8 {
+ var n uint
+ for i := 0; i < len(path); i++ {
+ if path[i] != ':' && path[i] != '*' {
+ continue
+ }
+ n++
+ }
+ if n >= 255 {
+ return 255
+ }
+ return uint8(n)
+}
+
+type nodeType uint8
+
+const (
+ static nodeType = 0
+ param nodeType = 1
+ catchAll nodeType = 2
+)
+
+type node struct {
+ path string
+ wildChild bool
+ nType nodeType
+ maxParams uint8
+ indices string
+ children []*node
+ handle Handle
+ priority uint32
+}
+
+// increments priority of the given child and reorders if necessary
+func (n *node) incrementChildPrio(pos int) int {
+ n.children[pos].priority++
+ prio := n.children[pos].priority
+
+ // adjust position (move to front)
+ newPos := pos
+ for newPos > 0 && n.children[newPos-1].priority < prio {
+ // swap node positions
+ tmpN := n.children[newPos-1]
+ n.children[newPos-1] = n.children[newPos]
+ n.children[newPos] = tmpN
+
+ newPos--
+ }
+
+ // build new index char string
+ if newPos != pos {
+ n.indices = n.indices[:newPos] + // unchanged prefix, might be empty
+ n.indices[pos:pos+1] + // the index char we move
+ n.indices[newPos:pos] + n.indices[pos+1:] // rest without char at 'pos'
+ }
+
+ return newPos
+}
+
+// addRoute adds a node with the given handle to the path.
+// Not concurrency-safe!
+func (n *node) addRoute(path string, handle Handle) {
+ fullPath := path
+ n.priority++
+ numParams := countParams(path)
+
+ // non-empty tree
+ if len(n.path) > 0 || len(n.children) > 0 {
+ walk:
+ for {
+ // Update maxParams of the current node
+ if numParams > n.maxParams {
+ n.maxParams = numParams
+ }
+
+ // Find the longest common prefix.
+ // This also implies that the common prefix contains no ':' or '*'
+ // since the existing key can't contain those chars.
+ i := 0
+ max := min(len(path), len(n.path))
+ for i < max && path[i] == n.path[i] {
+ i++
+ }
+
+ // Split edge
+ if i < len(n.path) {
+ child := node{
+ path: n.path[i:],
+ wildChild: n.wildChild,
+ indices: n.indices,
+ children: n.children,
+ handle: n.handle,
+ priority: n.priority - 1,
+ }
+
+ // Update maxParams (max of all children)
+ for i := range child.children {
+ if child.children[i].maxParams > child.maxParams {
+ child.maxParams = child.children[i].maxParams
+ }
+ }
+
+ n.children = []*node{&child}
+ // []byte for proper unicode char conversion, see #65
+ n.indices = string([]byte{n.path[i]})
+ n.path = path[:i]
+ n.handle = nil
+ n.wildChild = false
+ }
+
+ // Make new node a child of this node
+ if i < len(path) {
+ path = path[i:]
+
+ if n.wildChild {
+ n = n.children[0]
+ n.priority++
+
+ // Update maxParams of the child node
+ if numParams > n.maxParams {
+ n.maxParams = numParams
+ }
+ numParams--
+
+ // Check if the wildcard matches
+ if len(path) >= len(n.path) && n.path == path[:len(n.path)] {
+ // check for longer wildcard, e.g. :name and :names
+ if len(n.path) >= len(path) || path[len(n.path)] == '/' {
+ continue walk
+ }
+ }
+
+ panic("path segment '" + path +
+ "' conflicts with existing wildcard '" + n.path +
+ "' in path '" + fullPath + "'")
+ }
+
+ c := path[0]
+
+ // slash after param
+ if n.nType == param && c == '/' && len(n.children) == 1 {
+ n = n.children[0]
+ n.priority++
+ continue walk
+ }
+
+ // Check if a child with the next path byte exists
+ for i := 0; i < len(n.indices); i++ {
+ if c == n.indices[i] {
+ i = n.incrementChildPrio(i)
+ n = n.children[i]
+ continue walk
+ }
+ }
+
+ // Otherwise insert it
+ if c != ':' && c != '*' {
+ // []byte for proper unicode char conversion, see #65
+ n.indices += string([]byte{c})
+ child := &node{
+ maxParams: numParams,
+ }
+ n.children = append(n.children, child)
+ n.incrementChildPrio(len(n.indices) - 1)
+ n = child
+ }
+ n.insertChild(numParams, path, fullPath, handle)
+ return
+
+ } else if i == len(path) { // Make node a (in-path) leaf
+ if n.handle != nil {
+ panic("a handle is already registered for path ''" + fullPath + "'")
+ }
+ n.handle = handle
+ }
+ return
+ }
+ } else { // Empty tree
+ n.insertChild(numParams, path, fullPath, handle)
+ }
+}
+
+func (n *node) insertChild(numParams uint8, path, fullPath string, handle Handle) {
+ var offset int // already handled bytes of the path
+
+ // find prefix until first wildcard (beginning with ':'' or '*'')
+ for i, max := 0, len(path); numParams > 0; i++ {
+ c := path[i]
+ if c != ':' && c != '*' {
+ continue
+ }
+
+ // find wildcard end (either '/' or path end)
+ end := i + 1
+ for end < max && path[end] != '/' {
+ switch path[end] {
+ // the wildcard name must not contain ':' and '*'
+ case ':', '*':
+ panic("only one wildcard per path segment is allowed, has: '" +
+ path[i:] + "' in path '" + fullPath + "'")
+ default:
+ end++
+ }
+ }
+
+ // check if this Node existing children which would be
+ // unreachable if we insert the wildcard here
+ if len(n.children) > 0 {
+ panic("wildcard route '" + path[i:end] +
+ "' conflicts with existing children in path '" + fullPath + "'")
+ }
+
+ // check if the wildcard has a name
+ if end-i < 2 {
+ panic("wildcards must be named with a non-empty name in path '" + fullPath + "'")
+ }
+
+ if c == ':' { // param
+ // split path at the beginning of the wildcard
+ if i > 0 {
+ n.path = path[offset:i]
+ offset = i
+ }
+
+ child := &node{
+ nType: param,
+ maxParams: numParams,
+ }
+ n.children = []*node{child}
+ n.wildChild = true
+ n = child
+ n.priority++
+ numParams--
+
+ // if the path doesn't end with the wildcard, then there
+ // will be another non-wildcard subpath starting with '/'
+ if end < max {
+ n.path = path[offset:end]
+ offset = end
+
+ child := &node{
+ maxParams: numParams,
+ priority: 1,
+ }
+ n.children = []*node{child}
+ n = child
+ }
+
+ } else { // catchAll
+ if end != max || numParams > 1 {
+ panic("catch-all routes are only allowed at the end of the path in path '" + fullPath + "'")
+ }
+
+ if len(n.path) > 0 && n.path[len(n.path)-1] == '/' {
+ panic("catch-all conflicts with existing handle for the path segment root in path '" + fullPath + "'")
+ }
+
+ // currently fixed width 1 for '/'
+ i--
+ if path[i] != '/' {
+ panic("no / before catch-all in path '" + fullPath + "'")
+ }
+
+ n.path = path[offset:i]
+
+ // first node: catchAll node with empty path
+ child := &node{
+ wildChild: true,
+ nType: catchAll,
+ maxParams: 1,
+ }
+ n.children = []*node{child}
+ n.indices = string(path[i])
+ n = child
+ n.priority++
+
+ // second node: node holding the variable
+ child = &node{
+ path: path[i:],
+ nType: catchAll,
+ maxParams: 1,
+ handle: handle,
+ priority: 1,
+ }
+ n.children = []*node{child}
+
+ return
+ }
+ }
+
+ // insert remaining path part and handle to the leaf
+ n.path = path[offset:]
+ n.handle = handle
+}
+
+// Returns the handle registered with the given path (key). The values of
+// wildcards are saved to a map.
+// If no handle can be found, a TSR (trailing slash redirect) recommendation is
+// made if a handle exists with an extra (without the) trailing slash for the
+// given path.
+func (n *node) getValue(path string) (handle Handle, p Params, tsr bool) {
+walk: // Outer loop for walking the tree
+ for {
+ if len(path) > len(n.path) {
+ if path[:len(n.path)] == n.path {
+ path = path[len(n.path):]
+ // If this node does not have a wildcard (param or catchAll)
+ // child, we can just look up the next child node and continue
+ // to walk down the tree
+ if !n.wildChild {
+ c := path[0]
+ for i := 0; i < len(n.indices); i++ {
+ if c == n.indices[i] {
+ n = n.children[i]
+ continue walk
+ }
+ }
+
+ // Nothing found.
+ // We can recommend to redirect to the same URL without a
+ // trailing slash if a leaf exists for that path.
+ tsr = (path == "/" && n.handle != nil)
+ return
+
+ }
+
+ // handle wildcard child
+ n = n.children[0]
+ switch n.nType {
+ case param:
+ // find param end (either '/' or path end)
+ end := 0
+ for end < len(path) && path[end] != '/' {
+ end++
+ }
+
+ // save param value
+ if p == nil {
+ // lazy allocation
+ p = make(Params, 0, n.maxParams)
+ }
+ i := len(p)
+ p = p[:i+1] // expand slice within preallocated capacity
+ p[i].Key = n.path[1:]
+ p[i].Value = path[:end]
+
+ // we need to go deeper!
+ if end < len(path) {
+ if len(n.children) > 0 {
+ path = path[end:]
+ n = n.children[0]
+ continue walk
+ }
+
+ // ... but we can't
+ tsr = (len(path) == end+1)
+ return
+ }
+
+ if handle = n.handle; handle != nil {
+ return
+ } else if len(n.children) == 1 {
+ // No handle found. Check if a handle for this path + a
+ // trailing slash exists for TSR recommendation
+ n = n.children[0]
+ tsr = (n.path == "/" && n.handle != nil)
+ }
+
+ return
+
+ case catchAll:
+ // save param value
+ if p == nil {
+ // lazy allocation
+ p = make(Params, 0, n.maxParams)
+ }
+ i := len(p)
+ p = p[:i+1] // expand slice within preallocated capacity
+ p[i].Key = n.path[2:]
+ p[i].Value = path
+
+ handle = n.handle
+ return
+
+ default:
+ panic("invalid node type")
+ }
+ }
+ } else if path == n.path {
+ // We should have reached the node containing the handle.
+ // Check if this node has a handle registered.
+ if handle = n.handle; handle != nil {
+ return
+ }
+
+ // No handle found. Check if a handle for this path + a
+ // trailing slash exists for trailing slash recommendation
+ for i := 0; i < len(n.indices); i++ {
+ if n.indices[i] == '/' {
+ n = n.children[i]
+ tsr = (len(n.path) == 1 && n.handle != nil) ||
+ (n.nType == catchAll && n.children[0].handle != nil)
+ return
+ }
+ }
+
+ return
+ }
+
+ // Nothing found. We can recommend to redirect to the same URL with an
+ // extra trailing slash if a leaf exists for that path
+ tsr = (path == "/") ||
+ (len(n.path) == len(path)+1 && n.path[len(path)] == '/' &&
+ path == n.path[:len(n.path)-1] && n.handle != nil)
+ return
+ }
+}
+
+// Makes a case-insensitive lookup of the given path and tries to find a handler.
+// It can optionally also fix trailing slashes.
+// It returns the case-corrected path and a bool indicating whether the lookup
+// was successful.
+func (n *node) findCaseInsensitivePath(path string, fixTrailingSlash bool) (ciPath []byte, found bool) {
+ ciPath = make([]byte, 0, len(path)+1) // preallocate enough memory
+
+ // Outer loop for walking the tree
+ for len(path) >= len(n.path) && strings.ToLower(path[:len(n.path)]) == strings.ToLower(n.path) {
+ path = path[len(n.path):]
+ ciPath = append(ciPath, n.path...)
+
+ if len(path) > 0 {
+ // If this node does not have a wildcard (param or catchAll) child,
+ // we can just look up the next child node and continue to walk down
+ // the tree
+ if !n.wildChild {
+ r := unicode.ToLower(rune(path[0]))
+ for i, index := range n.indices {
+ // must use recursive approach since both index and
+ // ToLower(index) could exist. We must check both.
+ if r == unicode.ToLower(index) {
+ out, found := n.children[i].findCaseInsensitivePath(path, fixTrailingSlash)
+ if found {
+ return append(ciPath, out...), true
+ }
+ }
+ }
+
+ // Nothing found. We can recommend to redirect to the same URL
+ // without a trailing slash if a leaf exists for that path
+ found = (fixTrailingSlash && path == "/" && n.handle != nil)
+ return
+ }
+
+ n = n.children[0]
+ switch n.nType {
+ case param:
+ // find param end (either '/' or path end)
+ k := 0
+ for k < len(path) && path[k] != '/' {
+ k++
+ }
+
+ // add param value to case insensitive path
+ ciPath = append(ciPath, path[:k]...)
+
+ // we need to go deeper!
+ if k < len(path) {
+ if len(n.children) > 0 {
+ path = path[k:]
+ n = n.children[0]
+ continue
+ }
+
+ // ... but we can't
+ if fixTrailingSlash && len(path) == k+1 {
+ return ciPath, true
+ }
+ return
+ }
+
+ if n.handle != nil {
+ return ciPath, true
+ } else if fixTrailingSlash && len(n.children) == 1 {
+ // No handle found. Check if a handle for this path + a
+ // trailing slash exists
+ n = n.children[0]
+ if n.path == "/" && n.handle != nil {
+ return append(ciPath, '/'), true
+ }
+ }
+ return
+
+ case catchAll:
+ return append(ciPath, path...), true
+
+ default:
+ panic("invalid node type")
+ }
+ } else {
+ // We should have reached the node containing the handle.
+ // Check if this node has a handle registered.
+ if n.handle != nil {
+ return ciPath, true
+ }
+
+ // No handle found.
+ // Try to fix the path by adding a trailing slash
+ if fixTrailingSlash {
+ for i := 0; i < len(n.indices); i++ {
+ if n.indices[i] == '/' {
+ n = n.children[i]
+ if (len(n.path) == 1 && n.handle != nil) ||
+ (n.nType == catchAll && n.children[0].handle != nil) {
+ return append(ciPath, '/'), true
+ }
+ return
+ }
+ }
+ }
+ return
+ }
+ }
+
+ // Nothing found.
+ // Try to fix the path by adding / removing a trailing slash
+ if fixTrailingSlash {
+ if path == "/" {
+ return ciPath, true
+ }
+ if len(path)+1 == len(n.path) && n.path[len(path)] == '/' &&
+ strings.ToLower(path) == strings.ToLower(n.path[:len(path)]) &&
+ n.handle != nil {
+ return append(ciPath, n.path...), true
+ }
+ }
+ return
+}
diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go
new file mode 100644
index 0000000000..66d9b5458f
--- /dev/null
+++ b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go
@@ -0,0 +1,75 @@
+// Copyright 2013 Matt T. Proud
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package pbutil
+
+import (
+ "encoding/binary"
+ "errors"
+ "io"
+
+ "github.com/golang/protobuf/proto"
+)
+
+var errInvalidVarint = errors.New("invalid varint32 encountered")
+
+// ReadDelimited decodes a message from the provided length-delimited stream,
+// where the length is encoded as 32-bit varint prefix to the message body.
+// It returns the total number of bytes read and any applicable error. This is
+// roughly equivalent to the companion Java API's
+// MessageLite#parseDelimitedFrom. As per the reader contract, this function
+// calls r.Read repeatedly as required until exactly one message including its
+// prefix is read and decoded (or an error has occurred). The function never
+// reads more bytes from the stream than required. The function never returns
+// an error if a message has been read and decoded correctly, even if the end
+// of the stream has been reached in doing so. In that case, any subsequent
+// calls return (0, io.EOF).
+func ReadDelimited(r io.Reader, m proto.Message) (n int, err error) {
+ // Per AbstractParser#parsePartialDelimitedFrom with
+ // CodedInputStream#readRawVarint32.
+ headerBuf := make([]byte, binary.MaxVarintLen32)
+ var bytesRead, varIntBytes int
+ var messageLength uint64
+ for varIntBytes == 0 { // i.e. no varint has been decoded yet.
+ if bytesRead >= len(headerBuf) {
+ return bytesRead, errInvalidVarint
+ }
+ // We have to read byte by byte here to avoid reading more bytes
+ // than required. Each read byte is appended to what we have
+ // read before.
+ newBytesRead, err := r.Read(headerBuf[bytesRead : bytesRead+1])
+ if newBytesRead == 0 {
+ if err != nil {
+ return bytesRead, err
+ }
+ // A Reader should not return (0, nil), but if it does,
+ // it should be treated as no-op (according to the
+ // Reader contract). So let's go on...
+ continue
+ }
+ bytesRead += newBytesRead
+ // Now present everything read so far to the varint decoder and
+ // see if a varint can be decoded already.
+ messageLength, varIntBytes = proto.DecodeVarint(headerBuf[:bytesRead])
+ }
+
+ messageBuf := make([]byte, messageLength)
+ newBytesRead, err := io.ReadFull(r, messageBuf)
+ bytesRead += newBytesRead
+ if err != nil {
+ return bytesRead, err
+ }
+
+ return bytesRead, proto.Unmarshal(messageBuf, m)
+}
diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/doc.go b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/doc.go
new file mode 100644
index 0000000000..c318385cbe
--- /dev/null
+++ b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/doc.go
@@ -0,0 +1,16 @@
+// Copyright 2013 Matt T. Proud
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package pbutil provides record length-delimited Protocol Buffer streaming.
+package pbutil
diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go
new file mode 100644
index 0000000000..4b76ea9a1d
--- /dev/null
+++ b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go
@@ -0,0 +1,46 @@
+// Copyright 2013 Matt T. Proud
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package pbutil
+
+import (
+ "encoding/binary"
+ "io"
+
+ "github.com/golang/protobuf/proto"
+)
+
+// WriteDelimited encodes and dumps a message to the provided writer prefixed
+// with a 32-bit varint indicating the length of the encoded message, producing
+// a length-delimited record stream, which can be used to chain together
+// encoded messages of the same type together in a file. It returns the total
+// number of bytes written and any applicable error. This is roughly
+// equivalent to the companion Java API's MessageLite#writeDelimitedTo.
+func WriteDelimited(w io.Writer, m proto.Message) (n int, err error) {
+ buffer, err := proto.Marshal(m)
+ if err != nil {
+ return 0, err
+ }
+
+ buf := make([]byte, binary.MaxVarintLen32)
+ encodedLength := binary.PutUvarint(buf, uint64(len(buffer)))
+
+ sync, err := w.Write(buf[:encodedLength])
+ if err != nil {
+ return sync, err
+ }
+
+ n, err = w.Write(buffer)
+ return n + sync, err
+}
diff --git a/vendor/github.com/miekg/dns/AUTHORS b/vendor/github.com/miekg/dns/AUTHORS
new file mode 100644
index 0000000000..1965683525
--- /dev/null
+++ b/vendor/github.com/miekg/dns/AUTHORS
@@ -0,0 +1 @@
+Miek Gieben
diff --git a/vendor/github.com/miekg/dns/CONTRIBUTORS b/vendor/github.com/miekg/dns/CONTRIBUTORS
new file mode 100644
index 0000000000..f77e8a895f
--- /dev/null
+++ b/vendor/github.com/miekg/dns/CONTRIBUTORS
@@ -0,0 +1,9 @@
+Alex A. Skinner
+Andrew Tunnell-Jones
+Ask Bjørn Hansen
+Dave Cheney
+Dusty Wilson
+Marek Majkowski
+Peter van Dijk
+Omri Bahumi
+Alex Sergeyev
diff --git a/vendor/github.com/miekg/dns/COPYRIGHT b/vendor/github.com/miekg/dns/COPYRIGHT
new file mode 100644
index 0000000000..35702b10e8
--- /dev/null
+++ b/vendor/github.com/miekg/dns/COPYRIGHT
@@ -0,0 +1,9 @@
+Copyright 2009 The Go Authors. All rights reserved. Use of this source code
+is governed by a BSD-style license that can be found in the LICENSE file.
+Extensions of the original work are copyright (c) 2011 Miek Gieben
+
+Copyright 2011 Miek Gieben. All rights reserved. Use of this source code is
+governed by a BSD-style license that can be found in the LICENSE file.
+
+Copyright 2014 CloudFlare. All rights reserved. Use of this source code is
+governed by a BSD-style license that can be found in the LICENSE file.
diff --git a/vendor/github.com/miekg/dns/LICENSE b/vendor/github.com/miekg/dns/LICENSE
new file mode 100644
index 0000000000..5763fa7fe5
--- /dev/null
+++ b/vendor/github.com/miekg/dns/LICENSE
@@ -0,0 +1,32 @@
+Extensions of the original work are copyright (c) 2011 Miek Gieben
+
+As this is fork of the official Go code the same license applies:
+
+Copyright (c) 2009 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
diff --git a/vendor/github.com/miekg/dns/README.md b/vendor/github.com/miekg/dns/README.md
new file mode 100644
index 0000000000..b9baec18bc
--- /dev/null
+++ b/vendor/github.com/miekg/dns/README.md
@@ -0,0 +1,148 @@
+[](https://travis-ci.org/miekg/dns)
+
+# Alternative (more granular) approach to a DNS library
+
+> Less is more.
+
+Complete and usable DNS library. All widely used Resource Records are
+supported, including the DNSSEC types. It follows a lean and mean philosophy.
+If there is stuff you should know as a DNS programmer there isn't a convenience
+function for it. Server side and client side programming is supported, i.e. you
+can build servers and resolvers with it.
+
+If you like this, you may also be interested in:
+
+* https://github.com/miekg/unbound -- Go wrapper for the Unbound resolver.
+
+# Goals
+
+* KISS;
+* Fast;
+* Small API, if its easy to code in Go, don't make a function for it.
+
+# Users
+
+A not-so-up-to-date-list-that-may-be-actually-current:
+
+* https://cloudflare.com
+* https://github.com/abh/geodns
+* http://www.statdns.com/
+* http://www.dnsinspect.com/
+* https://github.com/chuangbo/jianbing-dictionary-dns
+* http://www.dns-lg.com/
+* https://github.com/fcambus/rrda
+* https://github.com/kenshinx/godns
+* https://github.com/skynetservices/skydns
+* https://github.com/DevelopersPL/godnsagent
+* https://github.com/duedil-ltd/discodns
+* https://github.com/StalkR/dns-reverse-proxy
+* https://github.com/tianon/rawdns
+* https://mesosphere.github.io/mesos-dns/
+* https://pulse.turbobytes.com/
+* https://play.google.com/store/apps/details?id=com.turbobytes.dig
+* https://github.com/fcambus/statzone
+* https://github.com/benschw/dns-clb-go
+
+Send pull request if you want to be listed here.
+
+# Features
+
+* UDP/TCP queries, IPv4 and IPv6;
+* RFC 1035 zone file parsing ($INCLUDE, $ORIGIN, $TTL and $GENERATE (for all record types) are supported;
+* Fast:
+ * Reply speed around ~ 80K qps (faster hardware results in more qps);
+ * Parsing RRs ~ 100K RR/s, that's 5M records in about 50 seconds;
+* Server side programming (mimicking the net/http package);
+* Client side programming;
+* DNSSEC: signing, validating and key generation for DSA, RSA and ECDSA;
+* EDNS0, NSID;
+* AXFR/IXFR;
+* TSIG, SIG(0);
+* DNS name compression;
+* Depends only on the standard library.
+
+Have fun!
+
+Miek Gieben - 2010-2012 -
+
+# Building
+
+Building is done with the `go` tool. If you have setup your GOPATH
+correctly, the following should work:
+
+ go get github.com/miekg/dns
+ go build github.com/miekg/dns
+
+## Examples
+
+A short "how to use the API" is at the beginning of doc.go (this also will show
+when you call `godoc github.com/miekg/dns`).
+
+Example programs can be found in the `github.com/miekg/exdns` repository.
+
+## Supported RFCs
+
+*all of them*
+
+* 103{4,5} - DNS standard
+* 1348 - NSAP record (removed the record)
+* 1982 - Serial Arithmetic
+* 1876 - LOC record
+* 1995 - IXFR
+* 1996 - DNS notify
+* 2136 - DNS Update (dynamic updates)
+* 2181 - RRset definition - there is no RRset type though, just []RR
+* 2537 - RSAMD5 DNS keys
+* 2065 - DNSSEC (updated in later RFCs)
+* 2671 - EDNS record
+* 2782 - SRV record
+* 2845 - TSIG record
+* 2915 - NAPTR record
+* 2929 - DNS IANA Considerations
+* 3110 - RSASHA1 DNS keys
+* 3225 - DO bit (DNSSEC OK)
+* 340{1,2,3} - NAPTR record
+* 3445 - Limiting the scope of (DNS)KEY
+* 3597 - Unknown RRs
+* 4025 - IPSECKEY
+* 403{3,4,5} - DNSSEC + validation functions
+* 4255 - SSHFP record
+* 4343 - Case insensitivity
+* 4408 - SPF record
+* 4509 - SHA256 Hash in DS
+* 4592 - Wildcards in the DNS
+* 4635 - HMAC SHA TSIG
+* 4701 - DHCID
+* 4892 - id.server
+* 5001 - NSID
+* 5155 - NSEC3 record
+* 5205 - HIP record
+* 5702 - SHA2 in the DNS
+* 5936 - AXFR
+* 5966 - TCP implementation recommendations
+* 6605 - ECDSA
+* 6725 - IANA Registry Update
+* 6742 - ILNP DNS
+* 6844 - CAA record
+* 6891 - EDNS0 update
+* 6895 - DNS IANA considerations
+* 6975 - Algorithm Understanding in DNSSEC
+* 7043 - EUI48/EUI64 records
+* 7314 - DNS (EDNS) EXPIRE Option
+* 7553 - URI record
+* xxxx - EDNS0 DNS Update Lease (draft)
+
+## Loosely based upon
+
+* `ldns`
+* `NSD`
+* `Net::DNS`
+* `GRONG`
+
+## TODO
+
+* privatekey.Precompute() when signing?
+* Last remaining RRs: APL, ATMA, A6, NSAP and NXT.
+* Missing in parsing: ISDN, UNSPEC, NSAP and ATMA.
+* NSEC(3) cover/match/closest enclose.
+* Replies with TC bit are not parsed to the end.
diff --git a/vendor/github.com/miekg/dns/client.go b/vendor/github.com/miekg/dns/client.go
new file mode 100644
index 0000000000..140a7add7b
--- /dev/null
+++ b/vendor/github.com/miekg/dns/client.go
@@ -0,0 +1,378 @@
+package dns
+
+// A client implementation.
+
+import (
+ "bytes"
+ "io"
+ "net"
+ "time"
+)
+
+const dnsTimeout time.Duration = 2 * time.Second
+const tcpIdleTimeout time.Duration = 8 * time.Second
+
+// A Conn represents a connection to a DNS server.
+type Conn struct {
+ net.Conn // a net.Conn holding the connection
+ UDPSize uint16 // minimum receive buffer for UDP messages
+ TsigSecret map[string]string // secret(s) for Tsig map[], zonename must be fully qualified
+ rtt time.Duration
+ t time.Time
+ tsigRequestMAC string
+}
+
+// A Client defines parameters for a DNS client.
+type Client struct {
+ Net string // if "tcp" a TCP query will be initiated, otherwise an UDP one (default is "" for UDP)
+ UDPSize uint16 // minimum receive buffer for UDP messages
+ DialTimeout time.Duration // net.DialTimeout, defaults to 2 seconds
+ ReadTimeout time.Duration // net.Conn.SetReadTimeout value for connections, defaults to 2 seconds
+ WriteTimeout time.Duration // net.Conn.SetWriteTimeout value for connections, defaults to 2 seconds
+ TsigSecret map[string]string // secret(s) for Tsig map[], zonename must be fully qualified
+ SingleInflight bool // if true suppress multiple outstanding queries for the same Qname, Qtype and Qclass
+ group singleflight
+}
+
+// Exchange performs a synchronous UDP query. It sends the message m to the address
+// contained in a and waits for an reply. Exchange does not retry a failed query, nor
+// will it fall back to TCP in case of truncation.
+// If you need to send a DNS message on an already existing connection, you can use the
+// following:
+//
+// co := &dns.Conn{Conn: c} // c is your net.Conn
+// co.WriteMsg(m)
+// in, err := co.ReadMsg()
+// co.Close()
+//
+func Exchange(m *Msg, a string) (r *Msg, err error) {
+ var co *Conn
+ co, err = DialTimeout("udp", a, dnsTimeout)
+ if err != nil {
+ return nil, err
+ }
+
+ defer co.Close()
+ co.SetReadDeadline(time.Now().Add(dnsTimeout))
+ co.SetWriteDeadline(time.Now().Add(dnsTimeout))
+
+ opt := m.IsEdns0()
+ // If EDNS0 is used use that for size.
+ if opt != nil && opt.UDPSize() >= MinMsgSize {
+ co.UDPSize = opt.UDPSize()
+ }
+
+ if err = co.WriteMsg(m); err != nil {
+ return nil, err
+ }
+ r, err = co.ReadMsg()
+ if err == nil && r.Id != m.Id {
+ err = ErrId
+ }
+ return r, err
+}
+
+// ExchangeConn performs a synchronous query. It sends the message m via the connection
+// c and waits for a reply. The connection c is not closed by ExchangeConn.
+// This function is going away, but can easily be mimicked:
+//
+// co := &dns.Conn{Conn: c} // c is your net.Conn
+// co.WriteMsg(m)
+// in, _ := co.ReadMsg()
+// co.Close()
+//
+func ExchangeConn(c net.Conn, m *Msg) (r *Msg, err error) {
+ println("dns: this function is deprecated")
+ co := new(Conn)
+ co.Conn = c
+ if err = co.WriteMsg(m); err != nil {
+ return nil, err
+ }
+ r, err = co.ReadMsg()
+ if err == nil && r.Id != m.Id {
+ err = ErrId
+ }
+ return r, err
+}
+
+// Exchange performs an synchronous query. It sends the message m to the address
+// contained in a and waits for an reply. Basic use pattern with a *dns.Client:
+//
+// c := new(dns.Client)
+// in, rtt, err := c.Exchange(message, "127.0.0.1:53")
+//
+// Exchange does not retry a failed query, nor will it fall back to TCP in
+// case of truncation.
+func (c *Client) Exchange(m *Msg, a string) (r *Msg, rtt time.Duration, err error) {
+ if !c.SingleInflight {
+ return c.exchange(m, a)
+ }
+ // This adds a bunch of garbage, TODO(miek).
+ t := "nop"
+ if t1, ok := TypeToString[m.Question[0].Qtype]; ok {
+ t = t1
+ }
+ cl := "nop"
+ if cl1, ok := ClassToString[m.Question[0].Qclass]; ok {
+ cl = cl1
+ }
+ r, rtt, err, shared := c.group.Do(m.Question[0].Name+t+cl, func() (*Msg, time.Duration, error) {
+ return c.exchange(m, a)
+ })
+ if err != nil {
+ return r, rtt, err
+ }
+ if shared {
+ return r.Copy(), rtt, nil
+ }
+ return r, rtt, nil
+}
+
+func (c *Client) dialTimeout() time.Duration {
+ if c.DialTimeout != 0 {
+ return c.DialTimeout
+ }
+ return dnsTimeout
+}
+
+func (c *Client) readTimeout() time.Duration {
+ if c.ReadTimeout != 0 {
+ return c.ReadTimeout
+ }
+ return dnsTimeout
+}
+
+func (c *Client) writeTimeout() time.Duration {
+ if c.WriteTimeout != 0 {
+ return c.WriteTimeout
+ }
+ return dnsTimeout
+}
+
+func (c *Client) exchange(m *Msg, a string) (r *Msg, rtt time.Duration, err error) {
+ var co *Conn
+ if c.Net == "" {
+ co, err = DialTimeout("udp", a, c.dialTimeout())
+ } else {
+ co, err = DialTimeout(c.Net, a, c.dialTimeout())
+ }
+ if err != nil {
+ return nil, 0, err
+ }
+ defer co.Close()
+
+ opt := m.IsEdns0()
+ // If EDNS0 is used use that for size.
+ if opt != nil && opt.UDPSize() >= MinMsgSize {
+ co.UDPSize = opt.UDPSize()
+ }
+ // Otherwise use the client's configured UDP size.
+ if opt == nil && c.UDPSize >= MinMsgSize {
+ co.UDPSize = c.UDPSize
+ }
+
+ co.SetReadDeadline(time.Now().Add(c.readTimeout()))
+ co.SetWriteDeadline(time.Now().Add(c.writeTimeout()))
+
+ co.TsigSecret = c.TsigSecret
+ if err = co.WriteMsg(m); err != nil {
+ return nil, 0, err
+ }
+ r, err = co.ReadMsg()
+ if err == nil && r.Id != m.Id {
+ err = ErrId
+ }
+ return r, co.rtt, err
+}
+
+// ReadMsg reads a message from the connection co.
+// If the received message contains a TSIG record the transaction
+// signature is verified.
+func (co *Conn) ReadMsg() (*Msg, error) {
+ p, err := co.ReadMsgHeader(nil)
+ if err != nil {
+ return nil, err
+ }
+
+ m := new(Msg)
+ if err := m.Unpack(p); err != nil {
+ return nil, err
+ }
+ if t := m.IsTsig(); t != nil {
+ if _, ok := co.TsigSecret[t.Hdr.Name]; !ok {
+ return m, ErrSecret
+ }
+ // Need to work on the original message p, as that was used to calculate the tsig.
+ err = TsigVerify(p, co.TsigSecret[t.Hdr.Name], co.tsigRequestMAC, false)
+ }
+ return m, err
+}
+
+// ReadMsgHeader reads a DNS message, parses and populates hdr (when hdr is not nil).
+// Returns message as a byte slice to be parsed with Msg.Unpack later on.
+// Note that error handling on the message body is not possible as only the header is parsed.
+func (co *Conn) ReadMsgHeader(hdr *Header) ([]byte, error) {
+ var (
+ p []byte
+ n int
+ err error
+ )
+
+ if t, ok := co.Conn.(*net.TCPConn); ok {
+ // First two bytes specify the length of the entire message.
+ l, err := tcpMsgLen(t)
+ if err != nil {
+ return nil, err
+ }
+ p = make([]byte, l)
+ n, err = tcpRead(t, p)
+ } else {
+ if co.UDPSize > MinMsgSize {
+ p = make([]byte, co.UDPSize)
+ } else {
+ p = make([]byte, MinMsgSize)
+ }
+ n, err = co.Read(p)
+ }
+
+ if err != nil {
+ return nil, err
+ } else if n < headerSize {
+ return nil, ErrShortRead
+ }
+
+ p = p[:n]
+ if hdr != nil {
+ if _, err = UnpackStruct(hdr, p, 0); err != nil {
+ return nil, err
+ }
+ }
+ return p, err
+}
+
+// tcpMsgLen is a helper func to read first two bytes of stream as uint16 packet length.
+func tcpMsgLen(t *net.TCPConn) (int, error) {
+ p := []byte{0, 0}
+ n, err := t.Read(p)
+ if err != nil {
+ return 0, err
+ }
+ if n != 2 {
+ return 0, ErrShortRead
+ }
+ l, _ := unpackUint16(p, 0)
+ if l == 0 {
+ return 0, ErrShortRead
+ }
+ return int(l), nil
+}
+
+// tcpRead calls TCPConn.Read enough times to fill allocated buffer.
+func tcpRead(t *net.TCPConn, p []byte) (int, error) {
+ n, err := t.Read(p)
+ if err != nil {
+ return n, err
+ }
+ for n < len(p) {
+ j, err := t.Read(p[n:])
+ if err != nil {
+ return n, err
+ }
+ n += j
+ }
+ return n, err
+}
+
+// Read implements the net.Conn read method.
+func (co *Conn) Read(p []byte) (n int, err error) {
+ if co.Conn == nil {
+ return 0, ErrConnEmpty
+ }
+ if len(p) < 2 {
+ return 0, io.ErrShortBuffer
+ }
+ if t, ok := co.Conn.(*net.TCPConn); ok {
+ l, err := tcpMsgLen(t)
+ if err != nil {
+ return 0, err
+ }
+ if l > len(p) {
+ return int(l), io.ErrShortBuffer
+ }
+ return tcpRead(t, p[:l])
+ }
+ // UDP connection
+ n, err = co.Conn.Read(p)
+ if err != nil {
+ return n, err
+ }
+
+ co.rtt = time.Since(co.t)
+ return n, err
+}
+
+// WriteMsg sends a message throught the connection co.
+// If the message m contains a TSIG record the transaction
+// signature is calculated.
+func (co *Conn) WriteMsg(m *Msg) (err error) {
+ var out []byte
+ if t := m.IsTsig(); t != nil {
+ mac := ""
+ if _, ok := co.TsigSecret[t.Hdr.Name]; !ok {
+ return ErrSecret
+ }
+ out, mac, err = TsigGenerate(m, co.TsigSecret[t.Hdr.Name], co.tsigRequestMAC, false)
+ // Set for the next read, allthough only used in zone transfers
+ co.tsigRequestMAC = mac
+ } else {
+ out, err = m.Pack()
+ }
+ if err != nil {
+ return err
+ }
+ co.t = time.Now()
+ if _, err = co.Write(out); err != nil {
+ return err
+ }
+ return nil
+}
+
+// Write implements the net.Conn Write method.
+func (co *Conn) Write(p []byte) (n int, err error) {
+ if t, ok := co.Conn.(*net.TCPConn); ok {
+ lp := len(p)
+ if lp < 2 {
+ return 0, io.ErrShortBuffer
+ }
+ if lp > MaxMsgSize {
+ return 0, &Error{err: "message too large"}
+ }
+ l := make([]byte, 2, lp+2)
+ l[0], l[1] = packUint16(uint16(lp))
+ p = append(l, p...)
+ n, err := io.Copy(t, bytes.NewReader(p))
+ return int(n), err
+ }
+ n, err = co.Conn.(*net.UDPConn).Write(p)
+ return n, err
+}
+
+// Dial connects to the address on the named network.
+func Dial(network, address string) (conn *Conn, err error) {
+ conn = new(Conn)
+ conn.Conn, err = net.Dial(network, address)
+ if err != nil {
+ return nil, err
+ }
+ return conn, nil
+}
+
+// DialTimeout acts like Dial but takes a timeout.
+func DialTimeout(network, address string, timeout time.Duration) (conn *Conn, err error) {
+ conn = new(Conn)
+ conn.Conn, err = net.DialTimeout(network, address, timeout)
+ if err != nil {
+ return nil, err
+ }
+ return conn, nil
+}
diff --git a/vendor/github.com/miekg/dns/clientconfig.go b/vendor/github.com/miekg/dns/clientconfig.go
new file mode 100644
index 0000000000..cfa9ad0b22
--- /dev/null
+++ b/vendor/github.com/miekg/dns/clientconfig.go
@@ -0,0 +1,99 @@
+package dns
+
+import (
+ "bufio"
+ "os"
+ "strconv"
+ "strings"
+)
+
+// ClientConfig wraps the contents of the /etc/resolv.conf file.
+type ClientConfig struct {
+ Servers []string // servers to use
+ Search []string // suffixes to append to local name
+ Port string // what port to use
+ Ndots int // number of dots in name to trigger absolute lookup
+ Timeout int // seconds before giving up on packet
+ Attempts int // lost packets before giving up on server, not used in the package dns
+}
+
+// ClientConfigFromFile parses a resolv.conf(5) like file and returns
+// a *ClientConfig.
+func ClientConfigFromFile(resolvconf string) (*ClientConfig, error) {
+ file, err := os.Open(resolvconf)
+ if err != nil {
+ return nil, err
+ }
+ defer file.Close()
+ c := new(ClientConfig)
+ scanner := bufio.NewScanner(file)
+ c.Servers = make([]string, 0)
+ c.Search = make([]string, 0)
+ c.Port = "53"
+ c.Ndots = 1
+ c.Timeout = 5
+ c.Attempts = 2
+
+ for scanner.Scan() {
+ if err := scanner.Err(); err != nil {
+ return nil, err
+ }
+ line := scanner.Text()
+ f := strings.Fields(line)
+ if len(f) < 1 {
+ continue
+ }
+ switch f[0] {
+ case "nameserver": // add one name server
+ if len(f) > 1 {
+ // One more check: make sure server name is
+ // just an IP address. Otherwise we need DNS
+ // to look it up.
+ name := f[1]
+ c.Servers = append(c.Servers, name)
+ }
+
+ case "domain": // set search path to just this domain
+ if len(f) > 1 {
+ c.Search = make([]string, 1)
+ c.Search[0] = f[1]
+ } else {
+ c.Search = make([]string, 0)
+ }
+
+ case "search": // set search path to given servers
+ c.Search = make([]string, len(f)-1)
+ for i := 0; i < len(c.Search); i++ {
+ c.Search[i] = f[i+1]
+ }
+
+ case "options": // magic options
+ for i := 1; i < len(f); i++ {
+ s := f[i]
+ switch {
+ case len(s) >= 6 && s[:6] == "ndots:":
+ n, _ := strconv.Atoi(s[6:])
+ if n < 1 {
+ n = 1
+ }
+ c.Ndots = n
+ case len(s) >= 8 && s[:8] == "timeout:":
+ n, _ := strconv.Atoi(s[8:])
+ if n < 1 {
+ n = 1
+ }
+ c.Timeout = n
+ case len(s) >= 8 && s[:9] == "attempts:":
+ n, _ := strconv.Atoi(s[9:])
+ if n < 1 {
+ n = 1
+ }
+ c.Attempts = n
+ case s == "rotate":
+ /* not imp */
+ }
+ }
+ }
+ }
+ return c, nil
+}
diff --git a/vendor/github.com/miekg/dns/defaults.go b/vendor/github.com/miekg/dns/defaults.go
new file mode 100644
index 0000000000..7c5ecae03a
--- /dev/null
+++ b/vendor/github.com/miekg/dns/defaults.go
@@ -0,0 +1,275 @@
+package dns
+
+import (
+ "errors"
+ "net"
+ "strconv"
+)
+
+const hexDigit = "0123456789abcdef"
+
+// Everything is assumed in ClassINET.
+
+// SetReply creates a reply message from a request message.
+func (dns *Msg) SetReply(request *Msg) *Msg {
+ dns.Id = request.Id
+ dns.RecursionDesired = request.RecursionDesired // Copy rd bit
+ dns.Response = true
+ dns.Opcode = OpcodeQuery
+ dns.Rcode = RcodeSuccess
+ if len(request.Question) > 0 {
+ dns.Question = make([]Question, 1)
+ dns.Question[0] = request.Question[0]
+ }
+ return dns
+}
+
+// SetQuestion creates a question message, it sets the Question
+// section, generates an Id and sets the RecursionDesired (RD)
+// bit to true.
+func (dns *Msg) SetQuestion(z string, t uint16) *Msg {
+ dns.Id = Id()
+ dns.RecursionDesired = true
+ dns.Question = make([]Question, 1)
+ dns.Question[0] = Question{z, t, ClassINET}
+ return dns
+}
+
+// SetNotify creates a notify message, it sets the Question
+// section, generates an Id and sets the Authoritative (AA)
+// bit to true.
+func (dns *Msg) SetNotify(z string) *Msg {
+ dns.Opcode = OpcodeNotify
+ dns.Authoritative = true
+ dns.Id = Id()
+ dns.Question = make([]Question, 1)
+ dns.Question[0] = Question{z, TypeSOA, ClassINET}
+ return dns
+}
+
+// SetRcode creates an error message suitable for the request.
+func (dns *Msg) SetRcode(request *Msg, rcode int) *Msg {
+ dns.SetReply(request)
+ dns.Rcode = rcode
+ return dns
+}
+
+// SetRcodeFormatError creates a message with FormError set.
+func (dns *Msg) SetRcodeFormatError(request *Msg) *Msg {
+ dns.Rcode = RcodeFormatError
+ dns.Opcode = OpcodeQuery
+ dns.Response = true
+ dns.Authoritative = false
+ dns.Id = request.Id
+ return dns
+}
+
+// SetUpdate makes the message a dynamic update message. It
+// sets the ZONE section to: z, TypeSOA, ClassINET.
+func (dns *Msg) SetUpdate(z string) *Msg {
+ dns.Id = Id()
+ dns.Response = false
+ dns.Opcode = OpcodeUpdate
+ dns.Compress = false // BIND9 cannot handle compression
+ dns.Question = make([]Question, 1)
+ dns.Question[0] = Question{z, TypeSOA, ClassINET}
+ return dns
+}
+
+// SetIxfr creates message for requesting an IXFR.
+func (dns *Msg) SetIxfr(z string, serial uint32, ns, mbox string) *Msg {
+ dns.Id = Id()
+ dns.Question = make([]Question, 1)
+ dns.Ns = make([]RR, 1)
+ s := new(SOA)
+ s.Hdr = RR_Header{z, TypeSOA, ClassINET, defaultTtl, 0}
+ s.Serial = serial
+ s.Ns = ns
+ s.Mbox = mbox
+ dns.Question[0] = Question{z, TypeIXFR, ClassINET}
+ dns.Ns[0] = s
+ return dns
+}
+
+// SetAxfr creates message for requesting an AXFR.
+func (dns *Msg) SetAxfr(z string) *Msg {
+ dns.Id = Id()
+ dns.Question = make([]Question, 1)
+ dns.Question[0] = Question{z, TypeAXFR, ClassINET}
+ return dns
+}
+
+// SetTsig appends a TSIG RR to the message.
+// This is only a skeleton TSIG RR that is added as the last RR in the
+// additional section. The Tsig is calculated when the message is being send.
+func (dns *Msg) SetTsig(z, algo string, fudge, timesigned int64) *Msg {
+ t := new(TSIG)
+ t.Hdr = RR_Header{z, TypeTSIG, ClassANY, 0, 0}
+ t.Algorithm = algo
+ t.Fudge = 300
+ t.TimeSigned = uint64(timesigned)
+ t.OrigId = dns.Id
+ dns.Extra = append(dns.Extra, t)
+ return dns
+}
+
+// SetEdns0 appends a EDNS0 OPT RR to the message.
+// TSIG should always the last RR in a message.
+func (dns *Msg) SetEdns0(udpsize uint16, do bool) *Msg {
+ e := new(OPT)
+ e.Hdr.Name = "."
+ e.Hdr.Rrtype = TypeOPT
+ e.SetUDPSize(udpsize)
+ if do {
+ e.SetDo()
+ }
+ dns.Extra = append(dns.Extra, e)
+ return dns
+}
+
+// IsTsig checks if the message has a TSIG record as the last record
+// in the additional section. It returns the TSIG record found or nil.
+func (dns *Msg) IsTsig() *TSIG {
+ if len(dns.Extra) > 0 {
+ if dns.Extra[len(dns.Extra)-1].Header().Rrtype == TypeTSIG {
+ return dns.Extra[len(dns.Extra)-1].(*TSIG)
+ }
+ }
+ return nil
+}
+
+// IsEdns0 checks if the message has a EDNS0 (OPT) record, any EDNS0
+// record in the additional section will do. It returns the OPT record
+// found or nil.
+func (dns *Msg) IsEdns0() *OPT {
+ for _, r := range dns.Extra {
+ if r.Header().Rrtype == TypeOPT {
+ return r.(*OPT)
+ }
+ }
+ return nil
+}
+
+// IsDomainName checks if s is a valid domainname, it returns
+// the number of labels and true, when a domain name is valid.
+// Note that non fully qualified domain name is considered valid, in this case the
+// last label is counted in the number of labels.
+// When false is returned the number of labels is not defined.
+func IsDomainName(s string) (labels int, ok bool) {
+ _, labels, err := packDomainName(s, nil, 0, nil, false)
+ return labels, err == nil
+}
+
+// IsSubDomain checks if child is indeed a child of the parent. Both child and
+// parent are *not* downcased before doing the comparison.
+func IsSubDomain(parent, child string) bool {
+ // Entire child is contained in parent
+ return CompareDomainName(parent, child) == CountLabel(parent)
+}
+
+// IsMsg sanity checks buf and returns an error if it isn't a valid DNS packet.
+// The checking is performed on the binary payload.
+func IsMsg(buf []byte) error {
+ // Header
+ if len(buf) < 12 {
+ return errors.New("dns: bad message header")
+ }
+ // Header: Opcode
+ // TODO(miek): more checks here, e.g. check all header bits.
+ return nil
+}
+
+// IsFqdn checks if a domain name is fully qualified.
+func IsFqdn(s string) bool {
+ l := len(s)
+ if l == 0 {
+ return false
+ }
+ return s[l-1] == '.'
+}
+
+// IsRRset checks if a set of RRs is a valid RRset as defined by RFC 2181.
+// This means the RRs need to have the same type, name, and class. Returns true
+// if the RR set is valid, otherwise false.
+func IsRRset(rrset []RR) bool {
+ if len(rrset) == 0 {
+ return false
+ }
+ if len(rrset) == 1 {
+ return true
+ }
+ rrHeader := rrset[0].Header()
+ rrType := rrHeader.Rrtype
+ rrClass := rrHeader.Class
+ rrName := rrHeader.Name
+
+ for _, rr := range rrset[1:] {
+ curRRHeader := rr.Header()
+ if curRRHeader.Rrtype != rrType || curRRHeader.Class != rrClass || curRRHeader.Name != rrName {
+ // Mismatch between the records, so this is not a valid rrset for
+ //signing/verifying
+ return false
+ }
+ }
+
+ return true
+}
+
+// Fqdn return the fully qualified domain name from s.
+// If s is already fully qualified, it behaves as the identity function.
+func Fqdn(s string) string {
+ if IsFqdn(s) {
+ return s
+ }
+ return s + "."
+}
+
+// Copied from the official Go code.
+
+// ReverseAddr returns the in-addr.arpa. or ip6.arpa. hostname of the IP
+// address suitable for reverse DNS (PTR) record lookups or an error if it fails
+// to parse the IP address.
+func ReverseAddr(addr string) (arpa string, err error) {
+ ip := net.ParseIP(addr)
+ if ip == nil {
+ return "", &Error{err: "unrecognized address: " + addr}
+ }
+ if ip.To4() != nil {
+ return strconv.Itoa(int(ip[15])) + "." + strconv.Itoa(int(ip[14])) + "." + strconv.Itoa(int(ip[13])) + "." +
+ strconv.Itoa(int(ip[12])) + ".in-addr.arpa.", nil
+ }
+ // Must be IPv6
+ buf := make([]byte, 0, len(ip)*4+len("ip6.arpa."))
+ // Add it, in reverse, to the buffer
+ for i := len(ip) - 1; i >= 0; i-- {
+ v := ip[i]
+ buf = append(buf, hexDigit[v&0xF])
+ buf = append(buf, '.')
+ buf = append(buf, hexDigit[v>>4])
+ buf = append(buf, '.')
+ }
+ // Append "ip6.arpa." and return (buf already has the final .)
+ buf = append(buf, "ip6.arpa."...)
+ return string(buf), nil
+}
+
+// String returns the string representation for the type t.
+func (t Type) String() string {
+ if t1, ok := TypeToString[uint16(t)]; ok {
+ return t1
+ }
+ return "TYPE" + strconv.Itoa(int(t))
+}
+
+// String returns the string representation for the class c.
+func (c Class) String() string {
+ if c1, ok := ClassToString[uint16(c)]; ok {
+ return c1
+ }
+ return "CLASS" + strconv.Itoa(int(c))
+}
+
+// String returns the string representation for the name n.
+func (n Name) String() string {
+ return sprintName(string(n))
+}
diff --git a/vendor/github.com/miekg/dns/dns.go b/vendor/github.com/miekg/dns/dns.go
new file mode 100644
index 0000000000..a3e4a0efae
--- /dev/null
+++ b/vendor/github.com/miekg/dns/dns.go
@@ -0,0 +1,100 @@
+package dns
+
+import "strconv"
+
+const (
+ year68 = 1 << 31 // For RFC1982 (Serial Arithmetic) calculations in 32 bits.
+ // DefaultMsgSize is the standard default for messages larger than 512 bytes.
+ DefaultMsgSize = 4096
+ // MinMsgSize is the minimal size of a DNS packet.
+ MinMsgSize = 512
+ // MaxMsgSize is the largest possible DNS packet.
+ MaxMsgSize = 65535
+ defaultTtl = 3600 // Default internal TTL.
+)
+
+// Error represents a DNS error
+type Error struct{ err string }
+
+func (e *Error) Error() string {
+ if e == nil {
+ return "dns: "
+ }
+ return "dns: " + e.err
+}
+
+// An RR represents a resource record.
+type RR interface {
+ // Header returns the header of an resource record. The header contains
+ // everything up to the rdata.
+ Header() *RR_Header
+ // String returns the text representation of the resource record.
+ String() string
+ // copy returns a copy of the RR
+ copy() RR
+ // len returns the length (in octets) of the uncompressed RR in wire format.
+ len() int
+}
+
+// RR_Header is the header all DNS resource records share.
+type RR_Header struct {
+ Name string `dns:"cdomain-name"`
+ Rrtype uint16
+ Class uint16
+ Ttl uint32
+ Rdlength uint16 // length of data after header
+}
+
+// Header returns itself. This is here to make RR_Header implement the RR interface.
+func (h *RR_Header) Header() *RR_Header { return h }
+
+// Just to imlement the RR interface.
+func (h *RR_Header) copy() RR { return nil }
+
+func (h *RR_Header) copyHeader() *RR_Header {
+ r := new(RR_Header)
+ r.Name = h.Name
+ r.Rrtype = h.Rrtype
+ r.Class = h.Class
+ r.Ttl = h.Ttl
+ r.Rdlength = h.Rdlength
+ return r
+}
+
+func (h *RR_Header) String() string {
+ var s string
+
+ if h.Rrtype == TypeOPT {
+ s = ";"
+ // and maybe other things
+ }
+
+ s += sprintName(h.Name) + "\t"
+ s += strconv.FormatInt(int64(h.Ttl), 10) + "\t"
+ s += Class(h.Class).String() + "\t"
+ s += Type(h.Rrtype).String() + "\t"
+ return s
+}
+
+func (h *RR_Header) len() int {
+ l := len(h.Name) + 1
+ l += 10 // rrtype(2) + class(2) + ttl(4) + rdlength(2)
+ return l
+}
+
+// ToRFC3597 converts a known RR to the unknown RR representation
+// from RFC 3597.
+func (rr *RFC3597) ToRFC3597(r RR) error {
+ buf := make([]byte, r.len()*2)
+ off, err := PackStruct(r, buf, 0)
+ if err != nil {
+ return err
+ }
+ buf = buf[:off]
+ rawSetRdlength(buf, 0, off)
+ _, err = UnpackStruct(rr, buf, 0)
+ if err != nil {
+ return err
+ }
+ return nil
+}
diff --git a/vendor/github.com/miekg/dns/dnssec.go b/vendor/github.com/miekg/dns/dnssec.go
new file mode 100644
index 0000000000..49ab4f1eb6
--- /dev/null
+++ b/vendor/github.com/miekg/dns/dnssec.go
@@ -0,0 +1,659 @@
+package dns
+
+import (
+ "bytes"
+ "crypto"
+ "crypto/dsa"
+ "crypto/ecdsa"
+ "crypto/elliptic"
+ _ "crypto/md5"
+ "crypto/rand"
+ "crypto/rsa"
+ _ "crypto/sha1"
+ _ "crypto/sha256"
+ _ "crypto/sha512"
+ "encoding/asn1"
+ "encoding/hex"
+ "math/big"
+ "sort"
+ "strings"
+ "time"
+)
+
+// DNSSEC encryption algorithm codes.
+const (
+ _ uint8 = iota
+ RSAMD5
+ DH
+ DSA
+ _ // Skip 4, RFC 6725, section 2.1
+ RSASHA1
+ DSANSEC3SHA1
+ RSASHA1NSEC3SHA1
+ RSASHA256
+ _ // Skip 9, RFC 6725, section 2.1
+ RSASHA512
+ _ // Skip 11, RFC 6725, section 2.1
+ ECCGOST
+ ECDSAP256SHA256
+ ECDSAP384SHA384
+ INDIRECT uint8 = 252
+ PRIVATEDNS uint8 = 253 // Private (experimental keys)
+ PRIVATEOID uint8 = 254
+)
+
+// Map for algorithm names.
+var AlgorithmToString = map[uint8]string{
+ RSAMD5: "RSAMD5",
+ DH: "DH",
+ DSA: "DSA",
+ RSASHA1: "RSASHA1",
+ DSANSEC3SHA1: "DSA-NSEC3-SHA1",
+ RSASHA1NSEC3SHA1: "RSASHA1-NSEC3-SHA1",
+ RSASHA256: "RSASHA256",
+ RSASHA512: "RSASHA512",
+ ECCGOST: "ECC-GOST",
+ ECDSAP256SHA256: "ECDSAP256SHA256",
+ ECDSAP384SHA384: "ECDSAP384SHA384",
+ INDIRECT: "INDIRECT",
+ PRIVATEDNS: "PRIVATEDNS",
+ PRIVATEOID: "PRIVATEOID",
+}
+
+// Map of algorithm strings.
+var StringToAlgorithm = reverseInt8(AlgorithmToString)
+
+// Map of algorithm crypto hashes.
+var AlgorithmToHash = map[uint8]crypto.Hash{
+ RSAMD5: crypto.MD5, // Deprecated in RFC 6725
+ RSASHA1: crypto.SHA1,
+ RSASHA1NSEC3SHA1: crypto.SHA1,
+ RSASHA256: crypto.SHA256,
+ ECDSAP256SHA256: crypto.SHA256,
+ ECDSAP384SHA384: crypto.SHA384,
+ RSASHA512: crypto.SHA512,
+}
+
+// DNSSEC hashing algorithm codes.
+const (
+ _ uint8 = iota
+ SHA1 // RFC 4034
+ SHA256 // RFC 4509
+ GOST94 // RFC 5933
+ SHA384 // Experimental
+ SHA512 // Experimental
+)
+
+// Map for hash names.
+var HashToString = map[uint8]string{
+ SHA1: "SHA1",
+ SHA256: "SHA256",
+ GOST94: "GOST94",
+ SHA384: "SHA384",
+ SHA512: "SHA512",
+}
+
+// Map of hash strings.
+var StringToHash = reverseInt8(HashToString)
+
+// DNSKEY flag values.
+const (
+ SEP = 1
+ REVOKE = 1 << 7
+ ZONE = 1 << 8
+)
+
+// The RRSIG needs to be converted to wireformat with some of
+// the rdata (the signature) missing. Use this struct to easy
+// the conversion (and re-use the pack/unpack functions).
+type rrsigWireFmt struct {
+ TypeCovered uint16
+ Algorithm uint8
+ Labels uint8
+ OrigTtl uint32
+ Expiration uint32
+ Inception uint32
+ KeyTag uint16
+ SignerName string `dns:"domain-name"`
+ /* No Signature */
+}
+
+// Used for converting DNSKEY's rdata to wirefmt.
+type dnskeyWireFmt struct {
+ Flags uint16
+ Protocol uint8
+ Algorithm uint8
+ PublicKey string `dns:"base64"`
+ /* Nothing is left out */
+}
+
+func divRoundUp(a, b int) int {
+ return (a + b - 1) / b
+}
+
+// KeyTag calculates the keytag (or key-id) of the DNSKEY.
+func (k *DNSKEY) KeyTag() uint16 {
+ if k == nil {
+ return 0
+ }
+ var keytag int
+ switch k.Algorithm {
+ case RSAMD5:
+ // Look at the bottom two bytes of the modules, which the last
+ // item in the pubkey. We could do this faster by looking directly
+ // at the base64 values. But I'm lazy.
+ modulus, _ := fromBase64([]byte(k.PublicKey))
+ if len(modulus) > 1 {
+ x, _ := unpackUint16(modulus, len(modulus)-2)
+ keytag = int(x)
+ }
+ default:
+ keywire := new(dnskeyWireFmt)
+ keywire.Flags = k.Flags
+ keywire.Protocol = k.Protocol
+ keywire.Algorithm = k.Algorithm
+ keywire.PublicKey = k.PublicKey
+ wire := make([]byte, DefaultMsgSize)
+ n, err := PackStruct(keywire, wire, 0)
+ if err != nil {
+ return 0
+ }
+ wire = wire[:n]
+ for i, v := range wire {
+ if i&1 != 0 {
+ keytag += int(v) // must be larger than uint32
+ } else {
+ keytag += int(v) << 8
+ }
+ }
+ keytag += (keytag >> 16) & 0xFFFF
+ keytag &= 0xFFFF
+ }
+ return uint16(keytag)
+}
+
+// ToDS converts a DNSKEY record to a DS record.
+func (k *DNSKEY) ToDS(h uint8) *DS {
+ if k == nil {
+ return nil
+ }
+ ds := new(DS)
+ ds.Hdr.Name = k.Hdr.Name
+ ds.Hdr.Class = k.Hdr.Class
+ ds.Hdr.Rrtype = TypeDS
+ ds.Hdr.Ttl = k.Hdr.Ttl
+ ds.Algorithm = k.Algorithm
+ ds.DigestType = h
+ ds.KeyTag = k.KeyTag()
+
+ keywire := new(dnskeyWireFmt)
+ keywire.Flags = k.Flags
+ keywire.Protocol = k.Protocol
+ keywire.Algorithm = k.Algorithm
+ keywire.PublicKey = k.PublicKey
+ wire := make([]byte, DefaultMsgSize)
+ n, err := PackStruct(keywire, wire, 0)
+ if err != nil {
+ return nil
+ }
+ wire = wire[:n]
+
+ owner := make([]byte, 255)
+ off, err1 := PackDomainName(strings.ToLower(k.Hdr.Name), owner, 0, nil, false)
+ if err1 != nil {
+ return nil
+ }
+ owner = owner[:off]
+ // RFC4034:
+ // digest = digest_algorithm( DNSKEY owner name | DNSKEY RDATA);
+ // "|" denotes concatenation
+ // DNSKEY RDATA = Flags | Protocol | Algorithm | Public Key.
+
+ // digest buffer
+ digest := append(owner, wire...) // another copy
+
+ var hash crypto.Hash
+ switch h {
+ case SHA1:
+ hash = crypto.SHA1
+ case SHA256:
+ hash = crypto.SHA256
+ case SHA384:
+ hash = crypto.SHA384
+ case SHA512:
+ hash = crypto.SHA512
+ default:
+ return nil
+ }
+
+ s := hash.New()
+ s.Write(digest)
+ ds.Digest = hex.EncodeToString(s.Sum(nil))
+ return ds
+}
+
+// ToCDNSKEY converts a DNSKEY record to a CDNSKEY record.
+func (k *DNSKEY) ToCDNSKEY() *CDNSKEY {
+ c := &CDNSKEY{DNSKEY: *k}
+ c.Hdr = *k.Hdr.copyHeader()
+ c.Hdr.Rrtype = TypeCDNSKEY
+ return c
+}
+
+// ToCDS converts a DS record to a CDS record.
+func (d *DS) ToCDS() *CDS {
+ c := &CDS{DS: *d}
+ c.Hdr = *d.Hdr.copyHeader()
+ c.Hdr.Rrtype = TypeCDS
+ return c
+}
+
+// Sign signs an RRSet. The signature needs to be filled in with
+// the values: Inception, Expiration, KeyTag, SignerName and Algorithm.
+// The rest is copied from the RRset. Sign returns true when the signing went OK,
+// otherwise false.
+// There is no check if RRSet is a proper (RFC 2181) RRSet.
+// If OrigTTL is non zero, it is used as-is, otherwise the TTL of the RRset
+// is used as the OrigTTL.
+func (rr *RRSIG) Sign(k crypto.Signer, rrset []RR) error {
+ if k == nil {
+ return ErrPrivKey
+ }
+ // s.Inception and s.Expiration may be 0 (rollover etc.), the rest must be set
+ if rr.KeyTag == 0 || len(rr.SignerName) == 0 || rr.Algorithm == 0 {
+ return ErrKey
+ }
+
+ rr.Hdr.Rrtype = TypeRRSIG
+ rr.Hdr.Name = rrset[0].Header().Name
+ rr.Hdr.Class = rrset[0].Header().Class
+ if rr.OrigTtl == 0 { // If set don't override
+ rr.OrigTtl = rrset[0].Header().Ttl
+ }
+ rr.TypeCovered = rrset[0].Header().Rrtype
+ rr.Labels = uint8(CountLabel(rrset[0].Header().Name))
+
+ if strings.HasPrefix(rrset[0].Header().Name, "*") {
+ rr.Labels-- // wildcard, remove from label count
+ }
+
+ sigwire := new(rrsigWireFmt)
+ sigwire.TypeCovered = rr.TypeCovered
+ sigwire.Algorithm = rr.Algorithm
+ sigwire.Labels = rr.Labels
+ sigwire.OrigTtl = rr.OrigTtl
+ sigwire.Expiration = rr.Expiration
+ sigwire.Inception = rr.Inception
+ sigwire.KeyTag = rr.KeyTag
+ // For signing, lowercase this name
+ sigwire.SignerName = strings.ToLower(rr.SignerName)
+
+ // Create the desired binary blob
+ signdata := make([]byte, DefaultMsgSize)
+ n, err := PackStruct(sigwire, signdata, 0)
+ if err != nil {
+ return err
+ }
+ signdata = signdata[:n]
+ wire, err := rawSignatureData(rrset, rr)
+ if err != nil {
+ return err
+ }
+ signdata = append(signdata, wire...)
+
+ hash, ok := AlgorithmToHash[rr.Algorithm]
+ if !ok {
+ return ErrAlg
+ }
+
+ h := hash.New()
+ h.Write(signdata)
+
+ signature, err := sign(k, h.Sum(nil), hash, rr.Algorithm)
+ if err != nil {
+ return err
+ }
+
+ rr.Signature = toBase64(signature)
+
+ return nil
+}
+
+func sign(k crypto.Signer, hashed []byte, hash crypto.Hash, alg uint8) ([]byte, error) {
+ signature, err := k.Sign(rand.Reader, hashed, hash)
+ if err != nil {
+ return nil, err
+ }
+
+ switch alg {
+ case RSASHA1, RSASHA1NSEC3SHA1, RSASHA256, RSASHA512:
+ return signature, nil
+
+ case ECDSAP256SHA256, ECDSAP384SHA384:
+ ecdsaSignature := &struct {
+ R, S *big.Int
+ }{}
+ if _, err := asn1.Unmarshal(signature, ecdsaSignature); err != nil {
+ return nil, err
+ }
+
+ var intlen int
+ switch alg {
+ case ECDSAP256SHA256:
+ intlen = 32
+ case ECDSAP384SHA384:
+ intlen = 48
+ }
+
+ signature := intToBytes(ecdsaSignature.R, intlen)
+ signature = append(signature, intToBytes(ecdsaSignature.S, intlen)...)
+ return signature, nil
+
+ // There is no defined interface for what a DSA backed crypto.Signer returns
+ case DSA, DSANSEC3SHA1:
+ // t := divRoundUp(divRoundUp(p.PublicKey.Y.BitLen(), 8)-64, 8)
+ // signature := []byte{byte(t)}
+ // signature = append(signature, intToBytes(r1, 20)...)
+ // signature = append(signature, intToBytes(s1, 20)...)
+ // rr.Signature = signature
+ }
+
+ return nil, ErrAlg
+}
+
+// Verify validates an RRSet with the signature and key. This is only the
+// cryptographic test, the signature validity period must be checked separately.
+// This function copies the rdata of some RRs (to lowercase domain names) for the validation to work.
+func (rr *RRSIG) Verify(k *DNSKEY, rrset []RR) error {
+ // First the easy checks
+ if !IsRRset(rrset) {
+ return ErrRRset
+ }
+ if rr.KeyTag != k.KeyTag() {
+ return ErrKey
+ }
+ if rr.Hdr.Class != k.Hdr.Class {
+ return ErrKey
+ }
+ if rr.Algorithm != k.Algorithm {
+ return ErrKey
+ }
+ if strings.ToLower(rr.SignerName) != strings.ToLower(k.Hdr.Name) {
+ return ErrKey
+ }
+ if k.Protocol != 3 {
+ return ErrKey
+ }
+
+ // IsRRset checked that we have at least one RR and that the RRs in
+ // the set have consistent type, class, and name. Also check that type and
+ // class matches the RRSIG record.
+ if rrset[0].Header().Class != rr.Hdr.Class {
+ return ErrRRset
+ }
+ if rrset[0].Header().Rrtype != rr.TypeCovered {
+ return ErrRRset
+ }
+
+ // RFC 4035 5.3.2. Reconstructing the Signed Data
+ // Copy the sig, except the rrsig data
+ sigwire := new(rrsigWireFmt)
+ sigwire.TypeCovered = rr.TypeCovered
+ sigwire.Algorithm = rr.Algorithm
+ sigwire.Labels = rr.Labels
+ sigwire.OrigTtl = rr.OrigTtl
+ sigwire.Expiration = rr.Expiration
+ sigwire.Inception = rr.Inception
+ sigwire.KeyTag = rr.KeyTag
+ sigwire.SignerName = strings.ToLower(rr.SignerName)
+ // Create the desired binary blob
+ signeddata := make([]byte, DefaultMsgSize)
+ n, err := PackStruct(sigwire, signeddata, 0)
+ if err != nil {
+ return err
+ }
+ signeddata = signeddata[:n]
+ wire, err := rawSignatureData(rrset, rr)
+ if err != nil {
+ return err
+ }
+ signeddata = append(signeddata, wire...)
+
+ sigbuf := rr.sigBuf() // Get the binary signature data
+ if rr.Algorithm == PRIVATEDNS { // PRIVATEOID
+ // TODO(mg)
+ // remove the domain name and assume its our
+ }
+
+ hash, ok := AlgorithmToHash[rr.Algorithm]
+ if !ok {
+ return ErrAlg
+ }
+
+ switch rr.Algorithm {
+ case RSASHA1, RSASHA1NSEC3SHA1, RSASHA256, RSASHA512, RSAMD5:
+ // TODO(mg): this can be done quicker, ie. cache the pubkey data somewhere??
+ pubkey := k.publicKeyRSA() // Get the key
+ if pubkey == nil {
+ return ErrKey
+ }
+
+ h := hash.New()
+ h.Write(signeddata)
+ return rsa.VerifyPKCS1v15(pubkey, hash, h.Sum(nil), sigbuf)
+
+ case ECDSAP256SHA256, ECDSAP384SHA384:
+ pubkey := k.publicKeyECDSA()
+ if pubkey == nil {
+ return ErrKey
+ }
+
+ // Split sigbuf into the r and s coordinates
+ r := new(big.Int).SetBytes(sigbuf[:len(sigbuf)/2])
+ s := new(big.Int).SetBytes(sigbuf[len(sigbuf)/2:])
+
+ h := hash.New()
+ h.Write(signeddata)
+ if ecdsa.Verify(pubkey, h.Sum(nil), r, s) {
+ return nil
+ }
+ return ErrSig
+
+ default:
+ return ErrAlg
+ }
+}
+
+// ValidityPeriod uses RFC1982 serial arithmetic to calculate
+// if a signature period is valid. If t is the zero time, the
+// current time is taken other t is. Returns true if the signature
+// is valid at the given time, otherwise returns false.
+func (rr *RRSIG) ValidityPeriod(t time.Time) bool {
+ var utc int64
+ if t.IsZero() {
+ utc = time.Now().UTC().Unix()
+ } else {
+ utc = t.UTC().Unix()
+ }
+ modi := (int64(rr.Inception) - utc) / year68
+ mode := (int64(rr.Expiration) - utc) / year68
+ ti := int64(rr.Inception) + (modi * year68)
+ te := int64(rr.Expiration) + (mode * year68)
+ return ti <= utc && utc <= te
+}
+
+// Return the signatures base64 encodedig sigdata as a byte slice.
+func (rr *RRSIG) sigBuf() []byte {
+ sigbuf, err := fromBase64([]byte(rr.Signature))
+ if err != nil {
+ return nil
+ }
+ return sigbuf
+}
+
+// publicKeyRSA returns the RSA public key from a DNSKEY record.
+func (k *DNSKEY) publicKeyRSA() *rsa.PublicKey {
+ keybuf, err := fromBase64([]byte(k.PublicKey))
+ if err != nil {
+ return nil
+ }
+
+ // RFC 2537/3110, section 2. RSA Public KEY Resource Records
+ // Length is in the 0th byte, unless its zero, then it
+ // it in bytes 1 and 2 and its a 16 bit number
+ explen := uint16(keybuf[0])
+ keyoff := 1
+ if explen == 0 {
+ explen = uint16(keybuf[1])<<8 | uint16(keybuf[2])
+ keyoff = 3
+ }
+ pubkey := new(rsa.PublicKey)
+
+ pubkey.N = big.NewInt(0)
+ shift := uint64((explen - 1) * 8)
+ expo := uint64(0)
+ for i := int(explen - 1); i > 0; i-- {
+ expo += uint64(keybuf[keyoff+i]) << shift
+ shift -= 8
+ }
+ // Remainder
+ expo += uint64(keybuf[keyoff])
+ if expo > 2<<31 {
+ // Larger expo than supported.
+ // println("dns: F5 primes (or larger) are not supported")
+ return nil
+ }
+ pubkey.E = int(expo)
+
+ pubkey.N.SetBytes(keybuf[keyoff+int(explen):])
+ return pubkey
+}
+
+// publicKeyECDSA returns the Curve public key from the DNSKEY record.
+func (k *DNSKEY) publicKeyECDSA() *ecdsa.PublicKey {
+ keybuf, err := fromBase64([]byte(k.PublicKey))
+ if err != nil {
+ return nil
+ }
+ pubkey := new(ecdsa.PublicKey)
+ switch k.Algorithm {
+ case ECDSAP256SHA256:
+ pubkey.Curve = elliptic.P256()
+ if len(keybuf) != 64 {
+ // wrongly encoded key
+ return nil
+ }
+ case ECDSAP384SHA384:
+ pubkey.Curve = elliptic.P384()
+ if len(keybuf) != 96 {
+ // Wrongly encoded key
+ return nil
+ }
+ }
+ pubkey.X = big.NewInt(0)
+ pubkey.X.SetBytes(keybuf[:len(keybuf)/2])
+ pubkey.Y = big.NewInt(0)
+ pubkey.Y.SetBytes(keybuf[len(keybuf)/2:])
+ return pubkey
+}
+
+func (k *DNSKEY) publicKeyDSA() *dsa.PublicKey {
+ keybuf, err := fromBase64([]byte(k.PublicKey))
+ if err != nil {
+ return nil
+ }
+ if len(keybuf) < 22 {
+ return nil
+ }
+ t, keybuf := int(keybuf[0]), keybuf[1:]
+ size := 64 + t*8
+ q, keybuf := keybuf[:20], keybuf[20:]
+ if len(keybuf) != 3*size {
+ return nil
+ }
+ p, keybuf := keybuf[:size], keybuf[size:]
+ g, y := keybuf[:size], keybuf[size:]
+ pubkey := new(dsa.PublicKey)
+ pubkey.Parameters.Q = big.NewInt(0).SetBytes(q)
+ pubkey.Parameters.P = big.NewInt(0).SetBytes(p)
+ pubkey.Parameters.G = big.NewInt(0).SetBytes(g)
+ pubkey.Y = big.NewInt(0).SetBytes(y)
+ return pubkey
+}
+
+type wireSlice [][]byte
+
+func (p wireSlice) Len() int { return len(p) }
+func (p wireSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
+func (p wireSlice) Less(i, j int) bool {
+ _, ioff, _ := UnpackDomainName(p[i], 0)
+ _, joff, _ := UnpackDomainName(p[j], 0)
+ return bytes.Compare(p[i][ioff+10:], p[j][joff+10:]) < 0
+}
+
+// Return the raw signature data.
+func rawSignatureData(rrset []RR, s *RRSIG) (buf []byte, err error) {
+ wires := make(wireSlice, len(rrset))
+ for i, r := range rrset {
+ r1 := r.copy()
+ r1.Header().Ttl = s.OrigTtl
+ labels := SplitDomainName(r1.Header().Name)
+ // 6.2. Canonical RR Form. (4) - wildcards
+ if len(labels) > int(s.Labels) {
+ // Wildcard
+ r1.Header().Name = "*." + strings.Join(labels[len(labels)-int(s.Labels):], ".") + "."
+ }
+ // RFC 4034: 6.2. Canonical RR Form. (2) - domain name to lowercase
+ r1.Header().Name = strings.ToLower(r1.Header().Name)
+ // 6.2. Canonical RR Form. (3) - domain rdata to lowercase.
+ // NS, MD, MF, CNAME, SOA, MB, MG, MR, PTR,
+ // HINFO, MINFO, MX, RP, AFSDB, RT, SIG, PX, NXT, NAPTR, KX,
+ // SRV, DNAME, A6
+ switch x := r1.(type) {
+ case *NS:
+ x.Ns = strings.ToLower(x.Ns)
+ case *CNAME:
+ x.Target = strings.ToLower(x.Target)
+ case *SOA:
+ x.Ns = strings.ToLower(x.Ns)
+ x.Mbox = strings.ToLower(x.Mbox)
+ case *MB:
+ x.Mb = strings.ToLower(x.Mb)
+ case *MG:
+ x.Mg = strings.ToLower(x.Mg)
+ case *MR:
+ x.Mr = strings.ToLower(x.Mr)
+ case *PTR:
+ x.Ptr = strings.ToLower(x.Ptr)
+ case *MINFO:
+ x.Rmail = strings.ToLower(x.Rmail)
+ x.Email = strings.ToLower(x.Email)
+ case *MX:
+ x.Mx = strings.ToLower(x.Mx)
+ case *NAPTR:
+ x.Replacement = strings.ToLower(x.Replacement)
+ case *KX:
+ x.Exchanger = strings.ToLower(x.Exchanger)
+ case *SRV:
+ x.Target = strings.ToLower(x.Target)
+ case *DNAME:
+ x.Target = strings.ToLower(x.Target)
+ }
+ // 6.2. Canonical RR Form. (5) - origTTL
+ wire := make([]byte, r1.len()+1) // +1 to be safe(r)
+ off, err1 := PackRR(r1, wire, 0, nil, false)
+ if err1 != nil {
+ return nil, err1
+ }
+ wire = wire[:off]
+ wires[i] = wire
+ }
+ sort.Sort(wires)
+ for i, wire := range wires {
+ if i > 0 && bytes.Equal(wire, wires[i-1]) {
+ continue
+ }
+ buf = append(buf, wire...)
+ }
+ return buf, nil
+}
diff --git a/vendor/github.com/miekg/dns/dnssec_keygen.go b/vendor/github.com/miekg/dns/dnssec_keygen.go
new file mode 100644
index 0000000000..229a079370
--- /dev/null
+++ b/vendor/github.com/miekg/dns/dnssec_keygen.go
@@ -0,0 +1,156 @@
+package dns
+
+import (
+ "crypto"
+ "crypto/dsa"
+ "crypto/ecdsa"
+ "crypto/elliptic"
+ "crypto/rand"
+ "crypto/rsa"
+ "math/big"
+)
+
+// Generate generates a DNSKEY of the given bit size.
+// The public part is put inside the DNSKEY record.
+// The Algorithm in the key must be set as this will define
+// what kind of DNSKEY will be generated.
+// The ECDSA algorithms imply a fixed keysize, in that case
+// bits should be set to the size of the algorithm.
+func (k *DNSKEY) Generate(bits int) (crypto.PrivateKey, error) {
+ switch k.Algorithm {
+ case DSA, DSANSEC3SHA1:
+ if bits != 1024 {
+ return nil, ErrKeySize
+ }
+ case RSAMD5, RSASHA1, RSASHA256, RSASHA1NSEC3SHA1:
+ if bits < 512 || bits > 4096 {
+ return nil, ErrKeySize
+ }
+ case RSASHA512:
+ if bits < 1024 || bits > 4096 {
+ return nil, ErrKeySize
+ }
+ case ECDSAP256SHA256:
+ if bits != 256 {
+ return nil, ErrKeySize
+ }
+ case ECDSAP384SHA384:
+ if bits != 384 {
+ return nil, ErrKeySize
+ }
+ }
+
+ switch k.Algorithm {
+ case DSA, DSANSEC3SHA1:
+ params := new(dsa.Parameters)
+ if err := dsa.GenerateParameters(params, rand.Reader, dsa.L1024N160); err != nil {
+ return nil, err
+ }
+ priv := new(dsa.PrivateKey)
+ priv.PublicKey.Parameters = *params
+ err := dsa.GenerateKey(priv, rand.Reader)
+ if err != nil {
+ return nil, err
+ }
+ k.setPublicKeyDSA(params.Q, params.P, params.G, priv.PublicKey.Y)
+ return priv, nil
+ case RSAMD5, RSASHA1, RSASHA256, RSASHA512, RSASHA1NSEC3SHA1:
+ priv, err := rsa.GenerateKey(rand.Reader, bits)
+ if err != nil {
+ return nil, err
+ }
+ k.setPublicKeyRSA(priv.PublicKey.E, priv.PublicKey.N)
+ return priv, nil
+ case ECDSAP256SHA256, ECDSAP384SHA384:
+ var c elliptic.Curve
+ switch k.Algorithm {
+ case ECDSAP256SHA256:
+ c = elliptic.P256()
+ case ECDSAP384SHA384:
+ c = elliptic.P384()
+ }
+ priv, err := ecdsa.GenerateKey(c, rand.Reader)
+ if err != nil {
+ return nil, err
+ }
+ k.setPublicKeyECDSA(priv.PublicKey.X, priv.PublicKey.Y)
+ return priv, nil
+ default:
+ return nil, ErrAlg
+ }
+}
+
+// Set the public key (the value E and N)
+func (k *DNSKEY) setPublicKeyRSA(_E int, _N *big.Int) bool {
+ if _E == 0 || _N == nil {
+ return false
+ }
+ buf := exponentToBuf(_E)
+ buf = append(buf, _N.Bytes()...)
+ k.PublicKey = toBase64(buf)
+ return true
+}
+
+// Set the public key for Elliptic Curves
+func (k *DNSKEY) setPublicKeyECDSA(_X, _Y *big.Int) bool {
+ if _X == nil || _Y == nil {
+ return false
+ }
+ var intlen int
+ switch k.Algorithm {
+ case ECDSAP256SHA256:
+ intlen = 32
+ case ECDSAP384SHA384:
+ intlen = 48
+ }
+ k.PublicKey = toBase64(curveToBuf(_X, _Y, intlen))
+ return true
+}
+
+// Set the public key for DSA
+func (k *DNSKEY) setPublicKeyDSA(_Q, _P, _G, _Y *big.Int) bool {
+ if _Q == nil || _P == nil || _G == nil || _Y == nil {
+ return false
+ }
+ buf := dsaToBuf(_Q, _P, _G, _Y)
+ k.PublicKey = toBase64(buf)
+ return true
+}
+
+// Set the public key (the values E and N) for RSA
+// RFC 3110: Section 2. RSA Public KEY Resource Records
+func exponentToBuf(_E int) []byte {
+ var buf []byte
+ i := big.NewInt(int64(_E))
+ if len(i.Bytes()) < 256 {
+ buf = make([]byte, 1)
+ buf[0] = uint8(len(i.Bytes()))
+ } else {
+ buf = make([]byte, 3)
+ buf[0] = 0
+ buf[1] = uint8(len(i.Bytes()) >> 8)
+ buf[2] = uint8(len(i.Bytes()))
+ }
+ buf = append(buf, i.Bytes()...)
+ return buf
+}
+
+// Set the public key for X and Y for Curve. The two
+// values are just concatenated.
+func curveToBuf(_X, _Y *big.Int, intlen int) []byte {
+ buf := intToBytes(_X, intlen)
+ buf = append(buf, intToBytes(_Y, intlen)...)
+ return buf
+}
+
+// Set the public key for X and Y for Curve. The two
+// values are just concatenated.
+func dsaToBuf(_Q, _P, _G, _Y *big.Int) []byte {
+ t := divRoundUp(divRoundUp(_G.BitLen(), 8)-64, 8)
+ buf := []byte{byte(t)}
+ buf = append(buf, intToBytes(_Q, 20)...)
+ buf = append(buf, intToBytes(_P, 64+t*8)...)
+ buf = append(buf, intToBytes(_G, 64+t*8)...)
+ buf = append(buf, intToBytes(_Y, 64+t*8)...)
+ return buf
+}
diff --git a/vendor/github.com/miekg/dns/dnssec_keyscan.go b/vendor/github.com/miekg/dns/dnssec_keyscan.go
new file mode 100644
index 0000000000..19a783389a
--- /dev/null
+++ b/vendor/github.com/miekg/dns/dnssec_keyscan.go
@@ -0,0 +1,249 @@
+package dns
+
+import (
+ "crypto"
+ "crypto/dsa"
+ "crypto/ecdsa"
+ "crypto/rsa"
+ "io"
+ "math/big"
+ "strconv"
+ "strings"
+)
+
+// NewPrivateKey returns a PrivateKey by parsing the string s.
+// s should be in the same form of the BIND private key files.
+func (k *DNSKEY) NewPrivateKey(s string) (crypto.PrivateKey, error) {
+ if s[len(s)-1] != '\n' { // We need a closing newline
+ return k.ReadPrivateKey(strings.NewReader(s+"\n"), "")
+ }
+ return k.ReadPrivateKey(strings.NewReader(s), "")
+}
+
+// ReadPrivateKey reads a private key from the io.Reader q. The string file is
+// only used in error reporting.
+// The public key must be known, because some cryptographic algorithms embed
+// the public inside the privatekey.
+func (k *DNSKEY) ReadPrivateKey(q io.Reader, file string) (crypto.PrivateKey, error) {
+ m, e := parseKey(q, file)
+ if m == nil {
+ return nil, e
+ }
+ if _, ok := m["private-key-format"]; !ok {
+ return nil, ErrPrivKey
+ }
+ if m["private-key-format"] != "v1.2" && m["private-key-format"] != "v1.3" {
+ return nil, ErrPrivKey
+ }
+ // TODO(mg): check if the pubkey matches the private key
+ algo, err := strconv.Atoi(strings.SplitN(m["algorithm"], " ", 2)[0])
+ if err != nil {
+ return nil, ErrPrivKey
+ }
+ switch uint8(algo) {
+ case DSA:
+ priv, e := readPrivateKeyDSA(m)
+ if e != nil {
+ return nil, e
+ }
+ pub := k.publicKeyDSA()
+ if pub == nil {
+ return nil, ErrKey
+ }
+ priv.PublicKey = *pub
+ return priv, e
+ case RSAMD5:
+ fallthrough
+ case RSASHA1:
+ fallthrough
+ case RSASHA1NSEC3SHA1:
+ fallthrough
+ case RSASHA256:
+ fallthrough
+ case RSASHA512:
+ priv, e := readPrivateKeyRSA(m)
+ if e != nil {
+ return nil, e
+ }
+ pub := k.publicKeyRSA()
+ if pub == nil {
+ return nil, ErrKey
+ }
+ priv.PublicKey = *pub
+ return priv, e
+ case ECCGOST:
+ return nil, ErrPrivKey
+ case ECDSAP256SHA256:
+ fallthrough
+ case ECDSAP384SHA384:
+ priv, e := readPrivateKeyECDSA(m)
+ if e != nil {
+ return nil, e
+ }
+ pub := k.publicKeyECDSA()
+ if pub == nil {
+ return nil, ErrKey
+ }
+ priv.PublicKey = *pub
+ return priv, e
+ default:
+ return nil, ErrPrivKey
+ }
+}
+
+// Read a private key (file) string and create a public key. Return the private key.
+func readPrivateKeyRSA(m map[string]string) (*rsa.PrivateKey, error) {
+ p := new(rsa.PrivateKey)
+ p.Primes = []*big.Int{nil, nil}
+ for k, v := range m {
+ switch k {
+ case "modulus", "publicexponent", "privateexponent", "prime1", "prime2":
+ v1, err := fromBase64([]byte(v))
+ if err != nil {
+ return nil, err
+ }
+ switch k {
+ case "modulus":
+ p.PublicKey.N = big.NewInt(0)
+ p.PublicKey.N.SetBytes(v1)
+ case "publicexponent":
+ i := big.NewInt(0)
+ i.SetBytes(v1)
+ p.PublicKey.E = int(i.Int64()) // int64 should be large enough
+ case "privateexponent":
+ p.D = big.NewInt(0)
+ p.D.SetBytes(v1)
+ case "prime1":
+ p.Primes[0] = big.NewInt(0)
+ p.Primes[0].SetBytes(v1)
+ case "prime2":
+ p.Primes[1] = big.NewInt(0)
+ p.Primes[1].SetBytes(v1)
+ }
+ case "exponent1", "exponent2", "coefficient":
+ // not used in Go (yet)
+ case "created", "publish", "activate":
+ // not used in Go (yet)
+ }
+ }
+ return p, nil
+}
+
+func readPrivateKeyDSA(m map[string]string) (*dsa.PrivateKey, error) {
+ p := new(dsa.PrivateKey)
+ p.X = big.NewInt(0)
+ for k, v := range m {
+ switch k {
+ case "private_value(x)":
+ v1, err := fromBase64([]byte(v))
+ if err != nil {
+ return nil, err
+ }
+ p.X.SetBytes(v1)
+ case "created", "publish", "activate":
+ /* not used in Go (yet) */
+ }
+ }
+ return p, nil
+}
+
+func readPrivateKeyECDSA(m map[string]string) (*ecdsa.PrivateKey, error) {
+ p := new(ecdsa.PrivateKey)
+ p.D = big.NewInt(0)
+ // TODO: validate that the required flags are present
+ for k, v := range m {
+ switch k {
+ case "privatekey":
+ v1, err := fromBase64([]byte(v))
+ if err != nil {
+ return nil, err
+ }
+ p.D.SetBytes(v1)
+ case "created", "publish", "activate":
+ /* not used in Go (yet) */
+ }
+ }
+ return p, nil
+}
+
+// parseKey reads a private key from r. It returns a map[string]string,
+// with the key-value pairs, or an error when the file is not correct.
+func parseKey(r io.Reader, file string) (map[string]string, error) {
+ s := scanInit(r)
+ m := make(map[string]string)
+ c := make(chan lex)
+ k := ""
+ // Start the lexer
+ go klexer(s, c)
+ for l := range c {
+ // It should alternate
+ switch l.value {
+ case zKey:
+ k = l.token
+ case zValue:
+ if k == "" {
+ return nil, &ParseError{file, "no private key seen", l}
+ }
+ //println("Setting", strings.ToLower(k), "to", l.token, "b")
+ m[strings.ToLower(k)] = l.token
+ k = ""
+ }
+ }
+ return m, nil
+}
+
+// klexer scans the sourcefile and returns tokens on the channel c.
+func klexer(s *scan, c chan lex) {
+ var l lex
+ str := "" // Hold the current read text
+ commt := false
+ key := true
+ x, err := s.tokenText()
+ defer close(c)
+ for err == nil {
+ l.column = s.position.Column
+ l.line = s.position.Line
+ switch x {
+ case ':':
+ if commt {
+ break
+ }
+ l.token = str
+ if key {
+ l.value = zKey
+ c <- l
+ // Next token is a space, eat it
+ s.tokenText()
+ key = false
+ str = ""
+ } else {
+ l.value = zValue
+ }
+ case ';':
+ commt = true
+ case '\n':
+ if commt {
+ // Reset a comment
+ commt = false
+ }
+ l.value = zValue
+ l.token = str
+ c <- l
+ str = ""
+ commt = false
+ key = true
+ default:
+ if commt {
+ break
+ }
+ str += string(x)
+ }
+ x, err = s.tokenText()
+ }
+ if len(str) > 0 {
+ // Send remainder
+ l.token = str
+ l.value = zValue
+ c <- l
+ }
+}
diff --git a/vendor/github.com/miekg/dns/dnssec_privkey.go b/vendor/github.com/miekg/dns/dnssec_privkey.go
new file mode 100644
index 0000000000..56f3ea934f
--- /dev/null
+++ b/vendor/github.com/miekg/dns/dnssec_privkey.go
@@ -0,0 +1,85 @@
+package dns
+
+import (
+ "crypto"
+ "crypto/dsa"
+ "crypto/ecdsa"
+ "crypto/rsa"
+ "math/big"
+ "strconv"
+)
+
+const format = "Private-key-format: v1.3\n"
+
+// PrivateKeyString converts a PrivateKey to a string. This string has the same
+// format as the private-key-file of BIND9 (Private-key-format: v1.3).
+// It needs some info from the key (the algorithm), so its a method of the DNSKEY
+// It supports rsa.PrivateKey, ecdsa.PrivateKey and dsa.PrivateKey
+func (r *DNSKEY) PrivateKeyString(p crypto.PrivateKey) string {
+ algorithm := strconv.Itoa(int(r.Algorithm))
+ algorithm += " (" + AlgorithmToString[r.Algorithm] + ")"
+
+ switch p := p.(type) {
+ case *rsa.PrivateKey:
+ modulus := toBase64(p.PublicKey.N.Bytes())
+ e := big.NewInt(int64(p.PublicKey.E))
+ publicExponent := toBase64(e.Bytes())
+ privateExponent := toBase64(p.D.Bytes())
+ prime1 := toBase64(p.Primes[0].Bytes())
+ prime2 := toBase64(p.Primes[1].Bytes())
+ // Calculate Exponent1/2 and Coefficient as per: http://en.wikipedia.org/wiki/RSA#Using_the_Chinese_remainder_algorithm
+ // and from: http://code.google.com/p/go/issues/detail?id=987
+ one := big.NewInt(1)
+ p1 := big.NewInt(0).Sub(p.Primes[0], one)
+ q1 := big.NewInt(0).Sub(p.Primes[1], one)
+ exp1 := big.NewInt(0).Mod(p.D, p1)
+ exp2 := big.NewInt(0).Mod(p.D, q1)
+ coeff := big.NewInt(0).ModInverse(p.Primes[1], p.Primes[0])
+
+ exponent1 := toBase64(exp1.Bytes())
+ exponent2 := toBase64(exp2.Bytes())
+ coefficient := toBase64(coeff.Bytes())
+
+ return format +
+ "Algorithm: " + algorithm + "\n" +
+ "Modulus: " + modulus + "\n" +
+ "PublicExponent: " + publicExponent + "\n" +
+ "PrivateExponent: " + privateExponent + "\n" +
+ "Prime1: " + prime1 + "\n" +
+ "Prime2: " + prime2 + "\n" +
+ "Exponent1: " + exponent1 + "\n" +
+ "Exponent2: " + exponent2 + "\n" +
+ "Coefficient: " + coefficient + "\n"
+
+ case *ecdsa.PrivateKey:
+ var intlen int
+ switch r.Algorithm {
+ case ECDSAP256SHA256:
+ intlen = 32
+ case ECDSAP384SHA384:
+ intlen = 48
+ }
+ private := toBase64(intToBytes(p.D, intlen))
+ return format +
+ "Algorithm: " + algorithm + "\n" +
+ "PrivateKey: " + private + "\n"
+
+ case *dsa.PrivateKey:
+ T := divRoundUp(divRoundUp(p.PublicKey.Parameters.G.BitLen(), 8)-64, 8)
+ prime := toBase64(intToBytes(p.PublicKey.Parameters.P, 64+T*8))
+ subprime := toBase64(intToBytes(p.PublicKey.Parameters.Q, 20))
+ base := toBase64(intToBytes(p.PublicKey.Parameters.G, 64+T*8))
+ priv := toBase64(intToBytes(p.X, 20))
+ pub := toBase64(intToBytes(p.PublicKey.Y, 64+T*8))
+ return format +
+ "Algorithm: " + algorithm + "\n" +
+ "Prime(p): " + prime + "\n" +
+ "Subprime(q): " + subprime + "\n" +
+ "Base(g): " + base + "\n" +
+ "Private_value(x): " + priv + "\n" +
+ "Public_value(y): " + pub + "\n"
+
+ default:
+ return ""
+ }
+}
diff --git a/vendor/github.com/miekg/dns/doc.go b/vendor/github.com/miekg/dns/doc.go
new file mode 100644
index 0000000000..4e48ae2eb9
--- /dev/null
+++ b/vendor/github.com/miekg/dns/doc.go
@@ -0,0 +1,251 @@
+/*
+Package dns implements a full featured interface to the Domain Name System.
+Server- and client-side programming is supported.
+The package allows complete control over what is send out to the DNS. The package
+API follows the less-is-more principle, by presenting a small, clean interface.
+
+The package dns supports (asynchronous) querying/replying, incoming/outgoing zone transfers,
+TSIG, EDNS0, dynamic updates, notifies and DNSSEC validation/signing.
+Note that domain names MUST be fully qualified, before sending them, unqualified
+names in a message will result in a packing failure.
+
+Resource records are native types. They are not stored in wire format.
+Basic usage pattern for creating a new resource record:
+
+ r := new(dns.MX)
+ r.Hdr = dns.RR_Header{Name: "miek.nl.", Rrtype: dns.TypeMX,
+ Class: dns.ClassINET, Ttl: 3600}
+ r.Preference = 10
+ r.Mx = "mx.miek.nl."
+
+Or directly from a string:
+
+ mx, err := dns.NewRR("miek.nl. 3600 IN MX 10 mx.miek.nl.")
+
+Or when the default TTL (3600) and class (IN) suit you:
+
+ mx, err := dns.NewRR("miek.nl. MX 10 mx.miek.nl.")
+
+Or even:
+
+ mx, err := dns.NewRR("$ORIGIN nl.\nmiek 1H IN MX 10 mx.miek")
+
+In the DNS messages are exchanged, these messages contain resource
+records (sets). Use pattern for creating a message:
+
+ m := new(dns.Msg)
+ m.SetQuestion("miek.nl.", dns.TypeMX)
+
+Or when not certain if the domain name is fully qualified:
+
+ m.SetQuestion(dns.Fqdn("miek.nl"), dns.TypeMX)
+
+The message m is now a message with the question section set to ask
+the MX records for the miek.nl. zone.
+
+The following is slightly more verbose, but more flexible:
+
+ m1 := new(dns.Msg)
+ m1.Id = dns.Id()
+ m1.RecursionDesired = true
+ m1.Question = make([]dns.Question, 1)
+ m1.Question[0] = dns.Question{"miek.nl.", dns.TypeMX, dns.ClassINET}
+
+After creating a message it can be send.
+Basic use pattern for synchronous querying the DNS at a
+server configured on 127.0.0.1 and port 53:
+
+ c := new(dns.Client)
+ in, rtt, err := c.Exchange(m1, "127.0.0.1:53")
+
+Suppressing multiple outstanding queries (with the same question, type and
+class) is as easy as setting:
+
+ c.SingleInflight = true
+
+If these "advanced" features are not needed, a simple UDP query can be send,
+with:
+
+ in, err := dns.Exchange(m1, "127.0.0.1:53")
+
+When this functions returns you will get dns message. A dns message consists
+out of four sections.
+The question section: in.Question, the answer section: in.Answer,
+the authority section: in.Ns and the additional section: in.Extra.
+
+Each of these sections (except the Question section) contain a []RR. Basic
+use pattern for accessing the rdata of a TXT RR as the first RR in
+the Answer section:
+
+ if t, ok := in.Answer[0].(*dns.TXT); ok {
+ // do something with t.Txt
+ }
+
+Domain Name and TXT Character String Representations
+
+Both domain names and TXT character strings are converted to presentation
+form both when unpacked and when converted to strings.
+
+For TXT character strings, tabs, carriage returns and line feeds will be
+converted to \t, \r and \n respectively. Back slashes and quotations marks
+will be escaped. Bytes below 32 and above 127 will be converted to \DDD
+form.
+
+For domain names, in addition to the above rules brackets, periods,
+spaces, semicolons and the at symbol are escaped.
+
+DNSSEC
+
+DNSSEC (DNS Security Extension) adds a layer of security to the DNS. It
+uses public key cryptography to sign resource records. The
+public keys are stored in DNSKEY records and the signatures in RRSIG records.
+
+Requesting DNSSEC information for a zone is done by adding the DO (DNSSEC OK) bit
+to an request.
+
+ m := new(dns.Msg)
+ m.SetEdns0(4096, true)
+
+Signature generation, signature verification and key generation are all supported.
+
+DYNAMIC UPDATES
+
+Dynamic updates reuses the DNS message format, but renames three of
+the sections. Question is Zone, Answer is Prerequisite, Authority is
+Update, only the Additional is not renamed. See RFC 2136 for the gory details.
+
+You can set a rather complex set of rules for the existence of absence of
+certain resource records or names in a zone to specify if resource records
+should be added or removed. The table from RFC 2136 supplemented with the Go
+DNS function shows which functions exist to specify the prerequisites.
+
+ 3.2.4 - Table Of Metavalues Used In Prerequisite Section
+
+ CLASS TYPE RDATA Meaning Function
+ --------------------------------------------------------------
+ ANY ANY empty Name is in use dns.NameUsed
+ ANY rrset empty RRset exists (value indep) dns.RRsetUsed
+ NONE ANY empty Name is not in use dns.NameNotUsed
+ NONE rrset empty RRset does not exist dns.RRsetNotUsed
+ zone rrset rr RRset exists (value dep) dns.Used
+
+The prerequisite section can also be left empty.
+If you have decided on the prerequisites you can tell what RRs should
+be added or deleted. The next table shows the options you have and
+what functions to call.
+
+ 3.4.2.6 - Table Of Metavalues Used In Update Section
+
+ CLASS TYPE RDATA Meaning Function
+ ---------------------------------------------------------------
+ ANY ANY empty Delete all RRsets from name dns.RemoveName
+ ANY rrset empty Delete an RRset dns.RemoveRRset
+ NONE rrset rr Delete an RR from RRset dns.Remove
+ zone rrset rr Add to an RRset dns.Insert
+
+TRANSACTION SIGNATURE
+
+An TSIG or transaction signature adds a HMAC TSIG record to each message sent.
+The supported algorithms include: HmacMD5, HmacSHA1, HmacSHA256 and HmacSHA512.
+
+Basic use pattern when querying with a TSIG name "axfr." (note that these key names
+must be fully qualified - as they are domain names) and the base64 secret
+"so6ZGir4GPAqINNh9U5c3A==":
+
+ c := new(dns.Client)
+ c.TsigSecret = map[string]string{"axfr.": "so6ZGir4GPAqINNh9U5c3A=="}
+ m := new(dns.Msg)
+ m.SetQuestion("miek.nl.", dns.TypeMX)
+ m.SetTsig("axfr.", dns.HmacMD5, 300, time.Now().Unix())
+ ...
+ // When sending the TSIG RR is calculated and filled in before sending
+
+When requesting an zone transfer (almost all TSIG usage is when requesting zone transfers), with
+TSIG, this is the basic use pattern. In this example we request an AXFR for
+miek.nl. with TSIG key named "axfr." and secret "so6ZGir4GPAqINNh9U5c3A=="
+and using the server 176.58.119.54:
+
+ t := new(dns.Transfer)
+ m := new(dns.Msg)
+ t.TsigSecret = map[string]string{"axfr.": "so6ZGir4GPAqINNh9U5c3A=="}
+ m.SetAxfr("miek.nl.")
+ m.SetTsig("axfr.", dns.HmacMD5, 300, time.Now().Unix())
+ c, err := t.In(m, "176.58.119.54:53")
+ for r := range c { ... }
+
+You can now read the records from the transfer as they come in. Each envelope is checked with TSIG.
+If something is not correct an error is returned.
+
+Basic use pattern validating and replying to a message that has TSIG set.
+
+ server := &dns.Server{Addr: ":53", Net: "udp"}
+ server.TsigSecret = map[string]string{"axfr.": "so6ZGir4GPAqINNh9U5c3A=="}
+ go server.ListenAndServe()
+ dns.HandleFunc(".", handleRequest)
+
+ func handleRequest(w dns.ResponseWriter, r *dns.Msg) {
+ m := new(Msg)
+ m.SetReply(r)
+ if r.IsTsig() {
+ if w.TsigStatus() == nil {
+ // *Msg r has an TSIG record and it was validated
+ m.SetTsig("axfr.", dns.HmacMD5, 300, time.Now().Unix())
+ } else {
+ // *Msg r has an TSIG records and it was not valided
+ }
+ }
+ w.WriteMsg(m)
+ }
+
+PRIVATE RRS
+
+RFC 6895 sets aside a range of type codes for private use. This range
+is 65,280 - 65,534 (0xFF00 - 0xFFFE). When experimenting with new Resource Records these
+can be used, before requesting an official type code from IANA.
+
+see http://miek.nl/posts/2014/Sep/21/Private%20RRs%20and%20IDN%20in%20Go%20DNS/ for more
+information.
+
+EDNS0
+
+EDNS0 is an extension mechanism for the DNS defined in RFC 2671 and updated
+by RFC 6891. It defines an new RR type, the OPT RR, which is then completely
+abused.
+Basic use pattern for creating an (empty) OPT RR:
+
+ o := new(dns.OPT)
+ o.Hdr.Name = "." // MUST be the root zone, per definition.
+ o.Hdr.Rrtype = dns.TypeOPT
+
+The rdata of an OPT RR consists out of a slice of EDNS0 (RFC 6891)
+interfaces. Currently only a few have been standardized: EDNS0_NSID
+(RFC 5001) and EDNS0_SUBNET (draft-vandergaast-edns-client-subnet-02). Note
+that these options may be combined in an OPT RR.
+Basic use pattern for a server to check if (and which) options are set:
+
+ // o is a dns.OPT
+ for _, s := range o.Option {
+ switch e := s.(type) {
+ case *dns.EDNS0_NSID:
+ // do stuff with e.Nsid
+ case *dns.EDNS0_SUBNET:
+ // access e.Family, e.Address, etc.
+ }
+ }
+
+SIG(0)
+
+From RFC 2931:
+
+ SIG(0) provides protection for DNS transactions and requests ....
+ ... protection for glue records, DNS requests, protection for message headers
+ on requests and responses, and protection of the overall integrity of a response.
+
+It works like TSIG, except that SIG(0) uses public key cryptography, instead of the shared
+secret approach in TSIG.
+Supported algorithms: DSA, ECDSAP256SHA256, ECDSAP384SHA384, RSASHA1, RSASHA256 and
+RSASHA512.
+
+Signing subsequent messages in multi-message sessions is not implemented.
+*/
+package dns
diff --git a/vendor/github.com/miekg/dns/edns.go b/vendor/github.com/miekg/dns/edns.go
new file mode 100644
index 0000000000..87c8affec2
--- /dev/null
+++ b/vendor/github.com/miekg/dns/edns.go
@@ -0,0 +1,514 @@
+package dns
+
+import (
+ "encoding/hex"
+ "errors"
+ "net"
+ "strconv"
+)
+
+// EDNS0 Option codes.
+const (
+ EDNS0LLQ = 0x1 // long lived queries: http://tools.ietf.org/html/draft-sekar-dns-llq-01
+ EDNS0UL = 0x2 // update lease draft: http://files.dns-sd.org/draft-sekar-dns-ul.txt
+ EDNS0NSID = 0x3 // nsid (RFC5001)
+ EDNS0DAU = 0x5 // DNSSEC Algorithm Understood
+ EDNS0DHU = 0x6 // DS Hash Understood
+ EDNS0N3U = 0x7 // NSEC3 Hash Understood
+ EDNS0SUBNET = 0x8 // client-subnet (RFC6891)
+ EDNS0EXPIRE = 0x9 // EDNS0 expire
+ EDNS0SUBNETDRAFT = 0x50fa // Don't use! Use EDNS0SUBNET
+ EDNS0LOCALSTART = 0xFDE9 // Beginning of range reserved for local/experimental use (RFC6891)
+ EDNS0LOCALEND = 0xFFFE // End of range reserved for local/experimental use (RFC6891)
+ _DO = 1 << 15 // dnssec ok
+)
+
+// OPT is the EDNS0 RR appended to messages to convey extra (meta) information.
+// See RFC 6891.
+type OPT struct {
+ Hdr RR_Header
+ Option []EDNS0 `dns:"opt"`
+}
+
+// Header implements the RR interface.
+func (rr *OPT) Header() *RR_Header {
+ return &rr.Hdr
+}
+
+func (rr *OPT) String() string {
+ s := "\n;; OPT PSEUDOSECTION:\n; EDNS: version " + strconv.Itoa(int(rr.Version())) + "; "
+ if rr.Do() {
+ s += "flags: do; "
+ } else {
+ s += "flags: ; "
+ }
+ s += "udp: " + strconv.Itoa(int(rr.UDPSize()))
+
+ for _, o := range rr.Option {
+ switch o.(type) {
+ case *EDNS0_NSID:
+ s += "\n; NSID: " + o.String()
+ h, e := o.pack()
+ var r string
+ if e == nil {
+ for _, c := range h {
+ r += "(" + string(c) + ")"
+ }
+ s += " " + r
+ }
+ case *EDNS0_SUBNET:
+ s += "\n; SUBNET: " + o.String()
+ if o.(*EDNS0_SUBNET).DraftOption {
+ s += " (draft)"
+ }
+ case *EDNS0_UL:
+ s += "\n; UPDATE LEASE: " + o.String()
+ case *EDNS0_LLQ:
+ s += "\n; LONG LIVED QUERIES: " + o.String()
+ case *EDNS0_DAU:
+ s += "\n; DNSSEC ALGORITHM UNDERSTOOD: " + o.String()
+ case *EDNS0_DHU:
+ s += "\n; DS HASH UNDERSTOOD: " + o.String()
+ case *EDNS0_N3U:
+ s += "\n; NSEC3 HASH UNDERSTOOD: " + o.String()
+ case *EDNS0_LOCAL:
+ s += "\n; LOCAL OPT: " + o.String()
+ }
+ }
+ return s
+}
+
+func (rr *OPT) len() int {
+ l := rr.Hdr.len()
+ for i := 0; i < len(rr.Option); i++ {
+ l += 4 // Account for 2-byte option code and 2-byte option length.
+ lo, _ := rr.Option[i].pack()
+ l += len(lo)
+ }
+ return l
+}
+
+func (rr *OPT) copy() RR {
+ return &OPT{*rr.Hdr.copyHeader(), rr.Option}
+}
+
+// return the old value -> delete SetVersion?
+
+// Version returns the EDNS version used. Only zero is defined.
+func (rr *OPT) Version() uint8 {
+ return uint8((rr.Hdr.Ttl & 0x00FF0000) >> 16)
+}
+
+// SetVersion sets the version of EDNS. This is usually zero.
+func (rr *OPT) SetVersion(v uint8) {
+ rr.Hdr.Ttl = rr.Hdr.Ttl&0xFF00FFFF | (uint32(v) << 16)
+}
+
+// ExtendedRcode returns the EDNS extended RCODE field (the upper 8 bits of the TTL).
+func (rr *OPT) ExtendedRcode() uint8 {
+ return uint8((rr.Hdr.Ttl & 0xFF000000) >> 24)
+}
+
+// SetExtendedRcode sets the EDNS extended RCODE field.
+func (rr *OPT) SetExtendedRcode(v uint8) {
+ rr.Hdr.Ttl = rr.Hdr.Ttl&0x00FFFFFF | (uint32(v) << 24)
+}
+
+// UDPSize returns the UDP buffer size.
+func (rr *OPT) UDPSize() uint16 {
+ return rr.Hdr.Class
+}
+
+// SetUDPSize sets the UDP buffer size.
+func (rr *OPT) SetUDPSize(size uint16) {
+ rr.Hdr.Class = size
+}
+
+// Do returns the value of the DO (DNSSEC OK) bit.
+func (rr *OPT) Do() bool {
+ return rr.Hdr.Ttl&_DO == _DO
+}
+
+// SetDo sets the DO (DNSSEC OK) bit.
+func (rr *OPT) SetDo() {
+ rr.Hdr.Ttl |= _DO
+}
+
+// EDNS0 defines an EDNS0 Option. An OPT RR can have multiple options appended to
+// it.
+type EDNS0 interface {
+ // Option returns the option code for the option.
+ Option() uint16
+ // pack returns the bytes of the option data.
+ pack() ([]byte, error)
+ // unpack sets the data as found in the buffer. Is also sets
+ // the length of the slice as the length of the option data.
+ unpack([]byte) error
+ // String returns the string representation of the option.
+ String() string
+}
+
+// The nsid EDNS0 option is used to retrieve a nameserver
+// identifier. When sending a request Nsid must be set to the empty string
+// The identifier is an opaque string encoded as hex.
+// Basic use pattern for creating an nsid option:
+//
+// o := new(dns.OPT)
+// o.Hdr.Name = "."
+// o.Hdr.Rrtype = dns.TypeOPT
+// e := new(dns.EDNS0_NSID)
+// e.Code = dns.EDNS0NSID
+// e.Nsid = "AA"
+// o.Option = append(o.Option, e)
+type EDNS0_NSID struct {
+ Code uint16 // Always EDNS0NSID
+ Nsid string // This string needs to be hex encoded
+}
+
+func (e *EDNS0_NSID) pack() ([]byte, error) {
+ h, err := hex.DecodeString(e.Nsid)
+ if err != nil {
+ return nil, err
+ }
+ return h, nil
+}
+
+func (e *EDNS0_NSID) Option() uint16 { return EDNS0NSID }
+func (e *EDNS0_NSID) unpack(b []byte) error { e.Nsid = hex.EncodeToString(b); return nil }
+func (e *EDNS0_NSID) String() string { return string(e.Nsid) }
+
+// EDNS0_SUBNET is the subnet option that is used to give the remote nameserver
+// an idea of where the client lives. It can then give back a different
+// answer depending on the location or network topology.
+// Basic use pattern for creating an subnet option:
+//
+// o := new(dns.OPT)
+// o.Hdr.Name = "."
+// o.Hdr.Rrtype = dns.TypeOPT
+// e := new(dns.EDNS0_SUBNET)
+// e.Code = dns.EDNS0SUBNET
+// e.Family = 1 // 1 for IPv4 source address, 2 for IPv6
+// e.NetMask = 32 // 32 for IPV4, 128 for IPv6
+// e.SourceScope = 0
+// e.Address = net.ParseIP("127.0.0.1").To4() // for IPv4
+// // e.Address = net.ParseIP("2001:7b8:32a::2") // for IPV6
+// o.Option = append(o.Option, e)
+//
+// Note: the spec (draft-ietf-dnsop-edns-client-subnet-00) has some insane logic
+// for which netmask applies to the address. This code will parse all the
+// available bits when unpacking (up to optlen). When packing it will apply
+// SourceNetmask. If you need more advanced logic, patches welcome and good luck.
+type EDNS0_SUBNET struct {
+ Code uint16 // Always EDNS0SUBNET
+ Family uint16 // 1 for IP, 2 for IP6
+ SourceNetmask uint8
+ SourceScope uint8
+ Address net.IP
+ DraftOption bool // Set to true if using the old (0x50fa) option code
+}
+
+func (e *EDNS0_SUBNET) Option() uint16 {
+ if e.DraftOption {
+ return EDNS0SUBNETDRAFT
+ }
+ return EDNS0SUBNET
+}
+
+func (e *EDNS0_SUBNET) pack() ([]byte, error) {
+ b := make([]byte, 4)
+ b[0], b[1] = packUint16(e.Family)
+ b[2] = e.SourceNetmask
+ b[3] = e.SourceScope
+ switch e.Family {
+ case 1:
+ if e.SourceNetmask > net.IPv4len*8 {
+ return nil, errors.New("dns: bad netmask")
+ }
+ if len(e.Address.To4()) != net.IPv4len {
+ return nil, errors.New("dns: bad address")
+ }
+ ip := e.Address.To4().Mask(net.CIDRMask(int(e.SourceNetmask), net.IPv4len*8))
+ needLength := (e.SourceNetmask + 8 - 1) / 8 // division rounding up
+ b = append(b, ip[:needLength]...)
+ case 2:
+ if e.SourceNetmask > net.IPv6len*8 {
+ return nil, errors.New("dns: bad netmask")
+ }
+ if len(e.Address) != net.IPv6len {
+ return nil, errors.New("dns: bad address")
+ }
+ ip := e.Address.Mask(net.CIDRMask(int(e.SourceNetmask), net.IPv6len*8))
+ needLength := (e.SourceNetmask + 8 - 1) / 8 // division rounding up
+ b = append(b, ip[:needLength]...)
+ default:
+ return nil, errors.New("dns: bad address family")
+ }
+ return b, nil
+}
+
+func (e *EDNS0_SUBNET) unpack(b []byte) error {
+ if len(b) < 4 {
+ return ErrBuf
+ }
+ e.Family, _ = unpackUint16(b, 0)
+ e.SourceNetmask = b[2]
+ e.SourceScope = b[3]
+ switch e.Family {
+ case 1:
+ if e.SourceNetmask > net.IPv4len*8 || e.SourceScope > net.IPv4len*8 {
+ return errors.New("dns: bad netmask")
+ }
+ addr := make([]byte, net.IPv4len)
+ for i := 0; i < net.IPv4len && 4+i < len(b); i++ {
+ addr[i] = b[4+i]
+ }
+ e.Address = net.IPv4(addr[0], addr[1], addr[2], addr[3])
+ case 2:
+ if e.SourceNetmask > net.IPv6len*8 || e.SourceScope > net.IPv6len*8 {
+ return errors.New("dns: bad netmask")
+ }
+ addr := make([]byte, net.IPv6len)
+ for i := 0; i < net.IPv6len && 4+i < len(b); i++ {
+ addr[i] = b[4+i]
+ }
+ e.Address = net.IP{addr[0], addr[1], addr[2], addr[3], addr[4],
+ addr[5], addr[6], addr[7], addr[8], addr[9], addr[10],
+ addr[11], addr[12], addr[13], addr[14], addr[15]}
+ default:
+ return errors.New("dns: bad address family")
+ }
+ return nil
+}
+
+func (e *EDNS0_SUBNET) String() (s string) {
+ if e.Address == nil {
+ s = ""
+ } else if e.Address.To4() != nil {
+ s = e.Address.String()
+ } else {
+ s = "[" + e.Address.String() + "]"
+ }
+ s += "/" + strconv.Itoa(int(e.SourceNetmask)) + "/" + strconv.Itoa(int(e.SourceScope))
+ return
+}
+
+// The EDNS0_UL (Update Lease) (draft RFC) option is used to tell the server to set
+// an expiration on an update RR. This is helpful for clients that cannot clean
+// up after themselves. This is a draft RFC and more information can be found at
+// http://files.dns-sd.org/draft-sekar-dns-ul.txt
+//
+// o := new(dns.OPT)
+// o.Hdr.Name = "."
+// o.Hdr.Rrtype = dns.TypeOPT
+// e := new(dns.EDNS0_UL)
+// e.Code = dns.EDNS0UL
+// e.Lease = 120 // in seconds
+// o.Option = append(o.Option, e)
+type EDNS0_UL struct {
+ Code uint16 // Always EDNS0UL
+ Lease uint32
+}
+
+func (e *EDNS0_UL) Option() uint16 { return EDNS0UL }
+func (e *EDNS0_UL) String() string { return strconv.FormatUint(uint64(e.Lease), 10) }
+
+// Copied: http://golang.org/src/pkg/net/dnsmsg.go
+func (e *EDNS0_UL) pack() ([]byte, error) {
+ b := make([]byte, 4)
+ b[0] = byte(e.Lease >> 24)
+ b[1] = byte(e.Lease >> 16)
+ b[2] = byte(e.Lease >> 8)
+ b[3] = byte(e.Lease)
+ return b, nil
+}
+
+func (e *EDNS0_UL) unpack(b []byte) error {
+ if len(b) < 4 {
+ return ErrBuf
+ }
+ e.Lease = uint32(b[0])<<24 | uint32(b[1])<<16 | uint32(b[2])<<8 | uint32(b[3])
+ return nil
+}
+
+// EDNS0_LLQ stands for Long Lived Queries: http://tools.ietf.org/html/draft-sekar-dns-llq-01
+// Implemented for completeness, as the EDNS0 type code is assigned.
+type EDNS0_LLQ struct {
+ Code uint16 // Always EDNS0LLQ
+ Version uint16
+ Opcode uint16
+ Error uint16
+ Id uint64
+ LeaseLife uint32
+}
+
+func (e *EDNS0_LLQ) Option() uint16 { return EDNS0LLQ }
+
+func (e *EDNS0_LLQ) pack() ([]byte, error) {
+ b := make([]byte, 18)
+ b[0], b[1] = packUint16(e.Version)
+ b[2], b[3] = packUint16(e.Opcode)
+ b[4], b[5] = packUint16(e.Error)
+ b[6] = byte(e.Id >> 56)
+ b[7] = byte(e.Id >> 48)
+ b[8] = byte(e.Id >> 40)
+ b[9] = byte(e.Id >> 32)
+ b[10] = byte(e.Id >> 24)
+ b[11] = byte(e.Id >> 16)
+ b[12] = byte(e.Id >> 8)
+ b[13] = byte(e.Id)
+ b[14] = byte(e.LeaseLife >> 24)
+ b[15] = byte(e.LeaseLife >> 16)
+ b[16] = byte(e.LeaseLife >> 8)
+ b[17] = byte(e.LeaseLife)
+ return b, nil
+}
+
+func (e *EDNS0_LLQ) unpack(b []byte) error {
+ if len(b) < 18 {
+ return ErrBuf
+ }
+ e.Version, _ = unpackUint16(b, 0)
+ e.Opcode, _ = unpackUint16(b, 2)
+ e.Error, _ = unpackUint16(b, 4)
+ e.Id = uint64(b[6])<<56 | uint64(b[6+1])<<48 | uint64(b[6+2])<<40 |
+ uint64(b[6+3])<<32 | uint64(b[6+4])<<24 | uint64(b[6+5])<<16 | uint64(b[6+6])<<8 | uint64(b[6+7])
+ e.LeaseLife = uint32(b[14])<<24 | uint32(b[14+1])<<16 | uint32(b[14+2])<<8 | uint32(b[14+3])
+ return nil
+}
+
+func (e *EDNS0_LLQ) String() string {
+ s := strconv.FormatUint(uint64(e.Version), 10) + " " + strconv.FormatUint(uint64(e.Opcode), 10) +
+ " " + strconv.FormatUint(uint64(e.Error), 10) + " " + strconv.FormatUint(uint64(e.Id), 10) +
+ " " + strconv.FormatUint(uint64(e.LeaseLife), 10)
+ return s
+}
+
+type EDNS0_DAU struct {
+ Code uint16 // Always EDNS0DAU
+ AlgCode []uint8
+}
+
+func (e *EDNS0_DAU) Option() uint16 { return EDNS0DAU }
+func (e *EDNS0_DAU) pack() ([]byte, error) { return e.AlgCode, nil }
+func (e *EDNS0_DAU) unpack(b []byte) error { e.AlgCode = b; return nil }
+
+func (e *EDNS0_DAU) String() string {
+ s := ""
+ for i := 0; i < len(e.AlgCode); i++ {
+ if a, ok := AlgorithmToString[e.AlgCode[i]]; ok {
+ s += " " + a
+ } else {
+ s += " " + strconv.Itoa(int(e.AlgCode[i]))
+ }
+ }
+ return s
+}
+
+type EDNS0_DHU struct {
+ Code uint16 // Always EDNS0DHU
+ AlgCode []uint8
+}
+
+func (e *EDNS0_DHU) Option() uint16 { return EDNS0DHU }
+func (e *EDNS0_DHU) pack() ([]byte, error) { return e.AlgCode, nil }
+func (e *EDNS0_DHU) unpack(b []byte) error { e.AlgCode = b; return nil }
+
+func (e *EDNS0_DHU) String() string {
+ s := ""
+ for i := 0; i < len(e.AlgCode); i++ {
+ if a, ok := HashToString[e.AlgCode[i]]; ok {
+ s += " " + a
+ } else {
+ s += " " + strconv.Itoa(int(e.AlgCode[i]))
+ }
+ }
+ return s
+}
+
+type EDNS0_N3U struct {
+ Code uint16 // Always EDNS0N3U
+ AlgCode []uint8
+}
+
+func (e *EDNS0_N3U) Option() uint16 { return EDNS0N3U }
+func (e *EDNS0_N3U) pack() ([]byte, error) { return e.AlgCode, nil }
+func (e *EDNS0_N3U) unpack(b []byte) error { e.AlgCode = b; return nil }
+
+func (e *EDNS0_N3U) String() string {
+ // Re-use the hash map
+ s := ""
+ for i := 0; i < len(e.AlgCode); i++ {
+ if a, ok := HashToString[e.AlgCode[i]]; ok {
+ s += " " + a
+ } else {
+ s += " " + strconv.Itoa(int(e.AlgCode[i]))
+ }
+ }
+ return s
+}
+
+type EDNS0_EXPIRE struct {
+ Code uint16 // Always EDNS0EXPIRE
+ Expire uint32
+}
+
+func (e *EDNS0_EXPIRE) Option() uint16 { return EDNS0EXPIRE }
+func (e *EDNS0_EXPIRE) String() string { return strconv.FormatUint(uint64(e.Expire), 10) }
+
+func (e *EDNS0_EXPIRE) pack() ([]byte, error) {
+ b := make([]byte, 4)
+ b[0] = byte(e.Expire >> 24)
+ b[1] = byte(e.Expire >> 16)
+ b[2] = byte(e.Expire >> 8)
+ b[3] = byte(e.Expire)
+ return b, nil
+}
+
+func (e *EDNS0_EXPIRE) unpack(b []byte) error {
+ if len(b) < 4 {
+ return ErrBuf
+ }
+ e.Expire = uint32(b[0])<<24 | uint32(b[1])<<16 | uint32(b[2])<<8 | uint32(b[3])
+ return nil
+}
+
+// The EDNS0_LOCAL option is used for local/experimental purposes. The option
+// code is recommended to be within the range [EDNS0LOCALSTART, EDNS0LOCALEND]
+// (RFC6891), although any unassigned code can actually be used. The content of
+// the option is made available in Data, unaltered.
+// Basic use pattern for creating a local option:
+//
+// o := new(dns.OPT)
+// o.Hdr.Name = "."
+// o.Hdr.Rrtype = dns.TypeOPT
+// e := new(dns.EDNS0_LOCAL)
+// e.Code = dns.EDNS0LOCALSTART
+// e.Data = []byte{72, 82, 74}
+// o.Option = append(o.Option, e)
+type EDNS0_LOCAL struct {
+ Code uint16
+ Data []byte
+}
+
+func (e *EDNS0_LOCAL) Option() uint16 { return e.Code }
+func (e *EDNS0_LOCAL) String() string {
+ return strconv.FormatInt(int64(e.Code), 10) + ":0x" + hex.EncodeToString(e.Data)
+}
+
+func (e *EDNS0_LOCAL) pack() ([]byte, error) {
+ b := make([]byte, len(e.Data))
+ copied := copy(b, e.Data)
+ if copied != len(e.Data) {
+ return nil, ErrBuf
+ }
+ return b, nil
+}
+
+func (e *EDNS0_LOCAL) unpack(b []byte) error {
+ e.Data = make([]byte, len(b))
+ copied := copy(e.Data, b)
+ if copied != len(b) {
+ return ErrBuf
+ }
+ return nil
+}
diff --git a/vendor/github.com/miekg/dns/format.go b/vendor/github.com/miekg/dns/format.go
new file mode 100644
index 0000000000..1ac1664fe2
--- /dev/null
+++ b/vendor/github.com/miekg/dns/format.go
@@ -0,0 +1,96 @@
+package dns
+
+import (
+ "net"
+ "reflect"
+ "strconv"
+)
+
+// NumField returns the number of rdata fields r has.
+func NumField(r RR) int {
+ return reflect.ValueOf(r).Elem().NumField() - 1 // Remove RR_Header
+}
+
+// Field returns the rdata field i as a string. Fields are indexed starting from 1.
+// RR types that holds slice data, for instance the NSEC type bitmap will return a single
+// string where the types are concatenated using a space.
+// Accessing non existing fields will cause a panic.
+func Field(r RR, i int) string {
+ if i == 0 {
+ return ""
+ }
+ d := reflect.ValueOf(r).Elem().Field(i)
+ switch k := d.Kind(); k {
+ case reflect.String:
+ return d.String()
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return strconv.FormatInt(d.Int(), 10)
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ return strconv.FormatUint(d.Uint(), 10)
+ case reflect.Slice:
+ switch reflect.ValueOf(r).Elem().Type().Field(i).Tag {
+ case `dns:"a"`:
+ // TODO(miek): Hmm store this as 16 bytes
+ if d.Len() < net.IPv6len {
+ return net.IPv4(byte(d.Index(0).Uint()),
+ byte(d.Index(1).Uint()),
+ byte(d.Index(2).Uint()),
+ byte(d.Index(3).Uint())).String()
+ }
+ return net.IPv4(byte(d.Index(12).Uint()),
+ byte(d.Index(13).Uint()),
+ byte(d.Index(14).Uint()),
+ byte(d.Index(15).Uint())).String()
+ case `dns:"aaaa"`:
+ return net.IP{
+ byte(d.Index(0).Uint()),
+ byte(d.Index(1).Uint()),
+ byte(d.Index(2).Uint()),
+ byte(d.Index(3).Uint()),
+ byte(d.Index(4).Uint()),
+ byte(d.Index(5).Uint()),
+ byte(d.Index(6).Uint()),
+ byte(d.Index(7).Uint()),
+ byte(d.Index(8).Uint()),
+ byte(d.Index(9).Uint()),
+ byte(d.Index(10).Uint()),
+ byte(d.Index(11).Uint()),
+ byte(d.Index(12).Uint()),
+ byte(d.Index(13).Uint()),
+ byte(d.Index(14).Uint()),
+ byte(d.Index(15).Uint()),
+ }.String()
+ case `dns:"nsec"`:
+ if d.Len() == 0 {
+ return ""
+ }
+ s := Type(d.Index(0).Uint()).String()
+ for i := 1; i < d.Len(); i++ {
+ s += " " + Type(d.Index(i).Uint()).String()
+ }
+ return s
+ case `dns:"wks"`:
+ if d.Len() == 0 {
+ return ""
+ }
+ s := strconv.Itoa(int(d.Index(0).Uint()))
+ for i := 0; i < d.Len(); i++ {
+ s += " " + strconv.Itoa(int(d.Index(i).Uint()))
+ }
+ return s
+ default:
+ // if it does not have a tag its a string slice
+ fallthrough
+ case `dns:"txt"`:
+ if d.Len() == 0 {
+ return ""
+ }
+ s := d.Index(0).String()
+ for i := 1; i < d.Len(); i++ {
+ s += " " + d.Index(i).String()
+ }
+ return s
+ }
+ }
+ return ""
+}
diff --git a/vendor/github.com/miekg/dns/labels.go b/vendor/github.com/miekg/dns/labels.go
new file mode 100644
index 0000000000..3944dd0632
--- /dev/null
+++ b/vendor/github.com/miekg/dns/labels.go
@@ -0,0 +1,162 @@
+package dns
+
+// Holds a bunch of helper functions for dealing with labels.
+
+// SplitDomainName splits a name string into it's labels.
+// www.miek.nl. returns []string{"www", "miek", "nl"}
+// The root label (.) returns nil. Note that using
+// strings.Split(s) will work in most cases, but does not handle
+// escaped dots (\.) for instance.
+func SplitDomainName(s string) (labels []string) {
+ if len(s) == 0 {
+ return nil
+ }
+ fqdnEnd := 0 // offset of the final '.' or the length of the name
+ idx := Split(s)
+ begin := 0
+ if s[len(s)-1] == '.' {
+ fqdnEnd = len(s) - 1
+ } else {
+ fqdnEnd = len(s)
+ }
+
+ switch len(idx) {
+ case 0:
+ return nil
+ case 1:
+ // no-op
+ default:
+ end := 0
+ for i := 1; i < len(idx); i++ {
+ end = idx[i]
+ labels = append(labels, s[begin:end-1])
+ begin = end
+ }
+ }
+
+ labels = append(labels, s[begin:fqdnEnd])
+ return labels
+}
+
+// CompareDomainName compares the names s1 and s2 and
+// returns how many labels they have in common starting from the *right*.
+// The comparison stops at the first inequality. The names are not downcased
+// before the comparison.
+//
+// www.miek.nl. and miek.nl. have two labels in common: miek and nl
+// www.miek.nl. and www.bla.nl. have one label in common: nl
+func CompareDomainName(s1, s2 string) (n int) {
+ s1 = Fqdn(s1)
+ s2 = Fqdn(s2)
+ l1 := Split(s1)
+ l2 := Split(s2)
+
+ // the first check: root label
+ if l1 == nil || l2 == nil {
+ return
+ }
+
+ j1 := len(l1) - 1 // end
+ i1 := len(l1) - 2 // start
+ j2 := len(l2) - 1
+ i2 := len(l2) - 2
+ // the second check can be done here: last/only label
+ // before we fall through into the for-loop below
+ if s1[l1[j1]:] == s2[l2[j2]:] {
+ n++
+ } else {
+ return
+ }
+ for {
+ if i1 < 0 || i2 < 0 {
+ break
+ }
+ if s1[l1[i1]:l1[j1]] == s2[l2[i2]:l2[j2]] {
+ n++
+ } else {
+ break
+ }
+ j1--
+ i1--
+ j2--
+ i2--
+ }
+ return
+}
+
+// CountLabel counts the the number of labels in the string s.
+func CountLabel(s string) (labels int) {
+ if s == "." {
+ return
+ }
+ off := 0
+ end := false
+ for {
+ off, end = NextLabel(s, off)
+ labels++
+ if end {
+ return
+ }
+ }
+}
+
+// Split splits a name s into its label indexes.
+// www.miek.nl. returns []int{0, 4, 9}, www.miek.nl also returns []int{0, 4, 9}.
+// The root name (.) returns nil. Also see SplitDomainName.
+func Split(s string) []int {
+ if s == "." {
+ return nil
+ }
+ idx := make([]int, 1, 3)
+ off := 0
+ end := false
+
+ for {
+ off, end = NextLabel(s, off)
+ if end {
+ return idx
+ }
+ idx = append(idx, off)
+ }
+}
+
+// NextLabel returns the index of the start of the next label in the
+// string s starting at offset.
+// The bool end is true when the end of the string has been reached.
+// Also see PrevLabel.
+func NextLabel(s string, offset int) (i int, end bool) {
+ quote := false
+ for i = offset; i < len(s)-1; i++ {
+ switch s[i] {
+ case '\\':
+ quote = !quote
+ default:
+ quote = false
+ case '.':
+ if quote {
+ quote = !quote
+ continue
+ }
+ return i + 1, false
+ }
+ }
+ return i + 1, true
+}
+
+// PrevLabel returns the index of the label when starting from the right and
+// jumping n labels to the left.
+// The bool start is true when the start of the string has been overshot.
+// Also see NextLabel.
+func PrevLabel(s string, n int) (i int, start bool) {
+ if n == 0 {
+ return len(s), false
+ }
+ lab := Split(s)
+ if lab == nil {
+ return 0, true
+ }
+ if n > len(lab) {
+ return 0, true
+ }
+ return lab[len(lab)-n], false
+}
diff --git a/vendor/github.com/miekg/dns/msg.go b/vendor/github.com/miekg/dns/msg.go
new file mode 100644
index 0000000000..3d13135c69
--- /dev/null
+++ b/vendor/github.com/miekg/dns/msg.go
@@ -0,0 +1,2030 @@
+// DNS packet assembly, see RFC 1035. Converting from - Unpack() -
+// and to - Pack() - wire format.
+// All the packers and unpackers take a (msg []byte, off int)
+// and return (off1 int, ok bool). If they return ok==false, they
+// also return off1==len(msg), so that the next unpacker will
+// also fail. This lets us avoid checks of ok until the end of a
+// packing sequence.
+
+package dns
+
+import (
+ "encoding/base32"
+ "encoding/base64"
+ "encoding/hex"
+ "math/big"
+ "math/rand"
+ "net"
+ "reflect"
+ "strconv"
+ "time"
+)
+
+const maxCompressionOffset = 2 << 13 // We have 14 bits for the compression pointer
+
+var (
+ // ErrAlg indicates an error with the (DNSSEC) algorithm.
+ ErrAlg error = &Error{err: "bad algorithm"}
+ // ErrAuth indicates an error in the TSIG authentication.
+ ErrAuth error = &Error{err: "bad authentication"}
+ // ErrBuf indicates that the buffer used it too small for the message.
+ ErrBuf error = &Error{err: "buffer size too small"}
+ // ErrConnEmpty indicates a connection is being uses before it is initialized.
+ ErrConnEmpty error = &Error{err: "conn has no connection"}
+ // ErrExtendedRcode ...
+ ErrExtendedRcode error = &Error{err: "bad extended rcode"}
+ // ErrFqdn indicates that a domain name does not have a closing dot.
+ ErrFqdn error = &Error{err: "domain must be fully qualified"}
+ // ErrId indicates there is a mismatch with the message's ID.
+ ErrId error = &Error{err: "id mismatch"}
+ // ErrKeyAlg indicates that the algorithm in the key is not valid.
+ ErrKeyAlg error = &Error{err: "bad key algorithm"}
+ ErrKey error = &Error{err: "bad key"}
+ ErrKeySize error = &Error{err: "bad key size"}
+ ErrNoSig error = &Error{err: "no signature found"}
+ ErrPrivKey error = &Error{err: "bad private key"}
+ ErrRcode error = &Error{err: "bad rcode"}
+ ErrRdata error = &Error{err: "bad rdata"}
+ ErrRRset error = &Error{err: "bad rrset"}
+ ErrSecret error = &Error{err: "no secrets defined"}
+ ErrShortRead error = &Error{err: "short read"}
+ // ErrSig indicates that a signature can not be cryptographically validated.
+ ErrSig error = &Error{err: "bad signature"}
+ // ErrSOA indicates that no SOA RR was seen when doing zone transfers.
+ ErrSoa error = &Error{err: "no SOA"}
+ // ErrTime indicates a timing error in TSIG authentication.
+ ErrTime error = &Error{err: "bad time"}
+)
+
+// Id, by default, returns a 16 bits random number to be used as a
+// message id. The random provided should be good enough. This being a
+// variable the function can be reassigned to a custom function.
+// For instance, to make it return a static value:
+//
+// dns.Id = func() uint16 { return 3 }
+var Id func() uint16 = id
+
+// MsgHdr is a a manually-unpacked version of (id, bits).
+type MsgHdr struct {
+ Id uint16
+ Response bool
+ Opcode int
+ Authoritative bool
+ Truncated bool
+ RecursionDesired bool
+ RecursionAvailable bool
+ Zero bool
+ AuthenticatedData bool
+ CheckingDisabled bool
+ Rcode int
+}
+
+// Msg contains the layout of a DNS message.
+type Msg struct {
+ MsgHdr
+ Compress bool `json:"-"` // If true, the message will be compressed when converted to wire format. This not part of the official DNS packet format.
+ Question []Question // Holds the RR(s) of the question section.
+ Answer []RR // Holds the RR(s) of the answer section.
+ Ns []RR // Holds the RR(s) of the authority section.
+ Extra []RR // Holds the RR(s) of the additional section.
+}
+
+// TypeToString is a map of strings for each RR wire type.
+var TypeToString = map[uint16]string{
+ TypeA: "A",
+ TypeAAAA: "AAAA",
+ TypeAFSDB: "AFSDB",
+ TypeANY: "ANY", // Meta RR
+ TypeATMA: "ATMA",
+ TypeAXFR: "AXFR", // Meta RR
+ TypeCAA: "CAA",
+ TypeCDNSKEY: "CDNSKEY",
+ TypeCDS: "CDS",
+ TypeCERT: "CERT",
+ TypeCNAME: "CNAME",
+ TypeDHCID: "DHCID",
+ TypeDLV: "DLV",
+ TypeDNAME: "DNAME",
+ TypeDNSKEY: "DNSKEY",
+ TypeDS: "DS",
+ TypeEID: "EID",
+ TypeEUI48: "EUI48",
+ TypeEUI64: "EUI64",
+ TypeGID: "GID",
+ TypeGPOS: "GPOS",
+ TypeHINFO: "HINFO",
+ TypeHIP: "HIP",
+ TypeIPSECKEY: "IPSECKEY",
+ TypeISDN: "ISDN",
+ TypeIXFR: "IXFR", // Meta RR
+ TypeKEY: "KEY",
+ TypeKX: "KX",
+ TypeL32: "L32",
+ TypeL64: "L64",
+ TypeLOC: "LOC",
+ TypeLP: "LP",
+ TypeMB: "MB",
+ TypeMD: "MD",
+ TypeMF: "MF",
+ TypeMG: "MG",
+ TypeMINFO: "MINFO",
+ TypeMR: "MR",
+ TypeMX: "MX",
+ TypeNAPTR: "NAPTR",
+ TypeNID: "NID",
+ TypeNINFO: "NINFO",
+ TypeNIMLOC: "NIMLOC",
+ TypeNS: "NS",
+ TypeNSAPPTR: "NSAP-PTR",
+ TypeNSEC3: "NSEC3",
+ TypeNSEC3PARAM: "NSEC3PARAM",
+ TypeNSEC: "NSEC",
+ TypeNULL: "NULL",
+ TypeOPT: "OPT",
+ TypeOPENPGPKEY: "OPENPGPKEY",
+ TypePTR: "PTR",
+ TypeRKEY: "RKEY",
+ TypeRP: "RP",
+ TypeRRSIG: "RRSIG",
+ TypeRT: "RT",
+ TypeSIG: "SIG",
+ TypeSOA: "SOA",
+ TypeSPF: "SPF",
+ TypeSRV: "SRV",
+ TypeSSHFP: "SSHFP",
+ TypeTA: "TA",
+ TypeTALINK: "TALINK",
+ TypeTKEY: "TKEY", // Meta RR
+ TypeTLSA: "TLSA",
+ TypeTSIG: "TSIG", // Meta RR
+ TypeTXT: "TXT",
+ TypePX: "PX",
+ TypeUID: "UID",
+ TypeUINFO: "UINFO",
+ TypeUNSPEC: "UNSPEC",
+ TypeURI: "URI",
+ TypeWKS: "WKS",
+ TypeX25: "X25",
+}
+
+// StringToType is the reverse of TypeToString, needed for string parsing.
+var StringToType = reverseInt16(TypeToString)
+
+// StringToClass is the reverse of ClassToString, needed for string parsing.
+var StringToClass = reverseInt16(ClassToString)
+
+// Map of opcodes strings.
+var StringToOpcode = reverseInt(OpcodeToString)
+
+// Map of rcodes strings.
+var StringToRcode = reverseInt(RcodeToString)
+
+// ClassToString is a maps Classes to strings for each CLASS wire type.
+var ClassToString = map[uint16]string{
+ ClassINET: "IN",
+ ClassCSNET: "CS",
+ ClassCHAOS: "CH",
+ ClassHESIOD: "HS",
+ ClassNONE: "NONE",
+ ClassANY: "ANY",
+}
+
+// OpcodeToString maps Opcodes to strings.
+var OpcodeToString = map[int]string{
+ OpcodeQuery: "QUERY",
+ OpcodeIQuery: "IQUERY",
+ OpcodeStatus: "STATUS",
+ OpcodeNotify: "NOTIFY",
+ OpcodeUpdate: "UPDATE",
+}
+
+// RcodeToString maps Rcodes to strings.
+var RcodeToString = map[int]string{
+ RcodeSuccess: "NOERROR",
+ RcodeFormatError: "FORMERR",
+ RcodeServerFailure: "SERVFAIL",
+ RcodeNameError: "NXDOMAIN",
+ RcodeNotImplemented: "NOTIMPL",
+ RcodeRefused: "REFUSED",
+ RcodeYXDomain: "YXDOMAIN", // From RFC 2136
+ RcodeYXRrset: "YXRRSET",
+ RcodeNXRrset: "NXRRSET",
+ RcodeNotAuth: "NOTAUTH",
+ RcodeNotZone: "NOTZONE",
+ RcodeBadSig: "BADSIG", // Also known as RcodeBadVers, see RFC 6891
+ // RcodeBadVers: "BADVERS",
+ RcodeBadKey: "BADKEY",
+ RcodeBadTime: "BADTIME",
+ RcodeBadMode: "BADMODE",
+ RcodeBadName: "BADNAME",
+ RcodeBadAlg: "BADALG",
+ RcodeBadTrunc: "BADTRUNC",
+}
+
+// Rather than write the usual handful of routines to pack and
+// unpack every message that can appear on the wire, we use
+// reflection to write a generic pack/unpack for structs and then
+// use it. Thus, if in the future we need to define new message
+// structs, no new pack/unpack/printing code needs to be written.
+
+// Domain names are a sequence of counted strings
+// split at the dots. They end with a zero-length string.
+
+// PackDomainName packs a domain name s into msg[off:].
+// If compression is wanted compress must be true and the compression
+// map needs to hold a mapping between domain names and offsets
+// pointing into msg.
+func PackDomainName(s string, msg []byte, off int, compression map[string]int, compress bool) (off1 int, err error) {
+ off1, _, err = packDomainName(s, msg, off, compression, compress)
+ return
+}
+
+func packDomainName(s string, msg []byte, off int, compression map[string]int, compress bool) (off1 int, labels int, err error) {
+ // special case if msg == nil
+ lenmsg := 256
+ if msg != nil {
+ lenmsg = len(msg)
+ }
+ ls := len(s)
+ if ls == 0 { // Ok, for instance when dealing with update RR without any rdata.
+ return off, 0, nil
+ }
+ // If not fully qualified, error out, but only if msg == nil #ugly
+ switch {
+ case msg == nil:
+ if s[ls-1] != '.' {
+ s += "."
+ ls++
+ }
+ case msg != nil:
+ if s[ls-1] != '.' {
+ return lenmsg, 0, ErrFqdn
+ }
+ }
+ // Each dot ends a segment of the name.
+ // We trade each dot byte for a length byte.
+ // Except for escaped dots (\.), which are normal dots.
+ // There is also a trailing zero.
+
+ // Compression
+ nameoffset := -1
+ pointer := -1
+ // Emit sequence of counted strings, chopping at dots.
+ begin := 0
+ bs := []byte(s)
+ roBs, bsFresh, escapedDot := s, true, false
+ for i := 0; i < ls; i++ {
+ if bs[i] == '\\' {
+ for j := i; j < ls-1; j++ {
+ bs[j] = bs[j+1]
+ }
+ ls--
+ if off+1 > lenmsg {
+ return lenmsg, labels, ErrBuf
+ }
+ // check for \DDD
+ if i+2 < ls && isDigit(bs[i]) && isDigit(bs[i+1]) && isDigit(bs[i+2]) {
+ bs[i] = dddToByte(bs[i:])
+ for j := i + 1; j < ls-2; j++ {
+ bs[j] = bs[j+2]
+ }
+ ls -= 2
+ } else if bs[i] == 't' {
+ bs[i] = '\t'
+ } else if bs[i] == 'r' {
+ bs[i] = '\r'
+ } else if bs[i] == 'n' {
+ bs[i] = '\n'
+ }
+ escapedDot = bs[i] == '.'
+ bsFresh = false
+ continue
+ }
+
+ if bs[i] == '.' {
+ if i > 0 && bs[i-1] == '.' && !escapedDot {
+ // two dots back to back is not legal
+ return lenmsg, labels, ErrRdata
+ }
+ if i-begin >= 1<<6 { // top two bits of length must be clear
+ return lenmsg, labels, ErrRdata
+ }
+ // off can already (we're in a loop) be bigger than len(msg)
+ // this happens when a name isn't fully qualified
+ if off+1 > lenmsg {
+ return lenmsg, labels, ErrBuf
+ }
+ if msg != nil {
+ msg[off] = byte(i - begin)
+ }
+ offset := off
+ off++
+ for j := begin; j < i; j++ {
+ if off+1 > lenmsg {
+ return lenmsg, labels, ErrBuf
+ }
+ if msg != nil {
+ msg[off] = bs[j]
+ }
+ off++
+ }
+ if compress && !bsFresh {
+ roBs = string(bs)
+ bsFresh = true
+ }
+ // Dont try to compress '.'
+ if compress && roBs[begin:] != "." {
+ if p, ok := compression[roBs[begin:]]; !ok {
+ // Only offsets smaller than this can be used.
+ if offset < maxCompressionOffset {
+ compression[roBs[begin:]] = offset
+ }
+ } else {
+ // The first hit is the longest matching dname
+ // keep the pointer offset we get back and store
+ // the offset of the current name, because that's
+ // where we need to insert the pointer later
+
+ // If compress is true, we're allowed to compress this dname
+ if pointer == -1 && compress {
+ pointer = p // Where to point to
+ nameoffset = offset // Where to point from
+ break
+ }
+ }
+ }
+ labels++
+ begin = i + 1
+ }
+ escapedDot = false
+ }
+ // Root label is special
+ if len(bs) == 1 && bs[0] == '.' {
+ return off, labels, nil
+ }
+ // If we did compression and we find something add the pointer here
+ if pointer != -1 {
+ // We have two bytes (14 bits) to put the pointer in
+ // if msg == nil, we will never do compression
+ msg[nameoffset], msg[nameoffset+1] = packUint16(uint16(pointer ^ 0xC000))
+ off = nameoffset + 1
+ goto End
+ }
+ if msg != nil {
+ msg[off] = 0
+ }
+End:
+ off++
+ return off, labels, nil
+}
+
+// Unpack a domain name.
+// In addition to the simple sequences of counted strings above,
+// domain names are allowed to refer to strings elsewhere in the
+// packet, to avoid repeating common suffixes when returning
+// many entries in a single domain. The pointers are marked
+// by a length byte with the top two bits set. Ignoring those
+// two bits, that byte and the next give a 14 bit offset from msg[0]
+// where we should pick up the trail.
+// Note that if we jump elsewhere in the packet,
+// we return off1 == the offset after the first pointer we found,
+// which is where the next record will start.
+// In theory, the pointers are only allowed to jump backward.
+// We let them jump anywhere and stop jumping after a while.
+
+// UnpackDomainName unpacks a domain name into a string.
+func UnpackDomainName(msg []byte, off int) (string, int, error) {
+ s := make([]byte, 0, 64)
+ off1 := 0
+ lenmsg := len(msg)
+ ptr := 0 // number of pointers followed
+Loop:
+ for {
+ if off >= lenmsg {
+ return "", lenmsg, ErrBuf
+ }
+ c := int(msg[off])
+ off++
+ switch c & 0xC0 {
+ case 0x00:
+ if c == 0x00 {
+ // end of name
+ break Loop
+ }
+ // literal string
+ if off+c > lenmsg {
+ return "", lenmsg, ErrBuf
+ }
+ for j := off; j < off+c; j++ {
+ switch b := msg[j]; b {
+ case '.', '(', ')', ';', ' ', '@':
+ fallthrough
+ case '"', '\\':
+ s = append(s, '\\', b)
+ case '\t':
+ s = append(s, '\\', 't')
+ case '\r':
+ s = append(s, '\\', 'r')
+ default:
+ if b < 32 || b >= 127 { // unprintable use \DDD
+ var buf [3]byte
+ bufs := strconv.AppendInt(buf[:0], int64(b), 10)
+ s = append(s, '\\')
+ for i := 0; i < 3-len(bufs); i++ {
+ s = append(s, '0')
+ }
+ for _, r := range bufs {
+ s = append(s, r)
+ }
+ } else {
+ s = append(s, b)
+ }
+ }
+ }
+ s = append(s, '.')
+ off += c
+ case 0xC0:
+ // pointer to somewhere else in msg.
+ // remember location after first ptr,
+ // since that's how many bytes we consumed.
+ // also, don't follow too many pointers --
+ // maybe there's a loop.
+ if off >= lenmsg {
+ return "", lenmsg, ErrBuf
+ }
+ c1 := msg[off]
+ off++
+ if ptr == 0 {
+ off1 = off
+ }
+ if ptr++; ptr > 10 {
+ return "", lenmsg, &Error{err: "too many compression pointers"}
+ }
+ off = (c^0xC0)<<8 | int(c1)
+ default:
+ // 0x80 and 0x40 are reserved
+ return "", lenmsg, ErrRdata
+ }
+ }
+ if ptr == 0 {
+ off1 = off
+ }
+ if len(s) == 0 {
+ s = []byte(".")
+ }
+ return string(s), off1, nil
+}
+
+func packTxt(txt []string, msg []byte, offset int, tmp []byte) (int, error) {
+ var err error
+ if len(txt) == 0 {
+ if offset >= len(msg) {
+ return offset, ErrBuf
+ }
+ msg[offset] = 0
+ return offset, nil
+ }
+ for i := range txt {
+ if len(txt[i]) > len(tmp) {
+ return offset, ErrBuf
+ }
+ offset, err = packTxtString(txt[i], msg, offset, tmp)
+ if err != nil {
+ return offset, err
+ }
+ }
+ return offset, err
+}
+
+func packTxtString(s string, msg []byte, offset int, tmp []byte) (int, error) {
+ lenByteOffset := offset
+ if offset >= len(msg) {
+ return offset, ErrBuf
+ }
+ offset++
+ bs := tmp[:len(s)]
+ copy(bs, s)
+ for i := 0; i < len(bs); i++ {
+ if len(msg) <= offset {
+ return offset, ErrBuf
+ }
+ if bs[i] == '\\' {
+ i++
+ if i == len(bs) {
+ break
+ }
+ // check for \DDD
+ if i+2 < len(bs) && isDigit(bs[i]) && isDigit(bs[i+1]) && isDigit(bs[i+2]) {
+ msg[offset] = dddToByte(bs[i:])
+ i += 2
+ } else if bs[i] == 't' {
+ msg[offset] = '\t'
+ } else if bs[i] == 'r' {
+ msg[offset] = '\r'
+ } else if bs[i] == 'n' {
+ msg[offset] = '\n'
+ } else {
+ msg[offset] = bs[i]
+ }
+ } else {
+ msg[offset] = bs[i]
+ }
+ offset++
+ }
+ l := offset - lenByteOffset - 1
+ if l > 255 {
+ return offset, &Error{err: "string exceeded 255 bytes in txt"}
+ }
+ msg[lenByteOffset] = byte(l)
+ return offset, nil
+}
+
+func packOctetString(s string, msg []byte, offset int, tmp []byte) (int, error) {
+ if offset >= len(msg) {
+ return offset, ErrBuf
+ }
+ bs := tmp[:len(s)]
+ copy(bs, s)
+ for i := 0; i < len(bs); i++ {
+ if len(msg) <= offset {
+ return offset, ErrBuf
+ }
+ if bs[i] == '\\' {
+ i++
+ if i == len(bs) {
+ break
+ }
+ // check for \DDD
+ if i+2 < len(bs) && isDigit(bs[i]) && isDigit(bs[i+1]) && isDigit(bs[i+2]) {
+ msg[offset] = dddToByte(bs[i:])
+ i += 2
+ } else {
+ msg[offset] = bs[i]
+ }
+ } else {
+ msg[offset] = bs[i]
+ }
+ offset++
+ }
+ return offset, nil
+}
+
+func unpackTxt(msg []byte, off0 int) (ss []string, off int, err error) {
+ off = off0
+ var s string
+ for off < len(msg) && err == nil {
+ s, off, err = unpackTxtString(msg, off)
+ if err == nil {
+ ss = append(ss, s)
+ }
+ }
+ return
+}
+
+func unpackTxtString(msg []byte, offset int) (string, int, error) {
+ if offset+1 > len(msg) {
+ return "", offset, &Error{err: "overflow unpacking txt"}
+ }
+ l := int(msg[offset])
+ if offset+l+1 > len(msg) {
+ return "", offset, &Error{err: "overflow unpacking txt"}
+ }
+ s := make([]byte, 0, l)
+ for _, b := range msg[offset+1 : offset+1+l] {
+ switch b {
+ case '"', '\\':
+ s = append(s, '\\', b)
+ case '\t':
+ s = append(s, `\t`...)
+ case '\r':
+ s = append(s, `\r`...)
+ case '\n':
+ s = append(s, `\n`...)
+ default:
+ if b < 32 || b > 127 { // unprintable
+ var buf [3]byte
+ bufs := strconv.AppendInt(buf[:0], int64(b), 10)
+ s = append(s, '\\')
+ for i := 0; i < 3-len(bufs); i++ {
+ s = append(s, '0')
+ }
+ for _, r := range bufs {
+ s = append(s, r)
+ }
+ } else {
+ s = append(s, b)
+ }
+ }
+ }
+ offset += 1 + l
+ return string(s), offset, nil
+}
+
+// Pack a reflect.StructValue into msg. Struct members can only be uint8, uint16, uint32, string,
+// slices and other (often anonymous) structs.
+func packStructValue(val reflect.Value, msg []byte, off int, compression map[string]int, compress bool) (off1 int, err error) {
+ var txtTmp []byte
+ lenmsg := len(msg)
+ numfield := val.NumField()
+ for i := 0; i < numfield; i++ {
+ typefield := val.Type().Field(i)
+ if typefield.Tag == `dns:"-"` {
+ continue
+ }
+ switch fv := val.Field(i); fv.Kind() {
+ default:
+ return lenmsg, &Error{err: "bad kind packing"}
+ case reflect.Interface:
+ // PrivateRR is the only RR implementation that has interface field.
+ // therefore it's expected that this interface would be PrivateRdata
+ switch data := fv.Interface().(type) {
+ case PrivateRdata:
+ n, err := data.Pack(msg[off:])
+ if err != nil {
+ return lenmsg, err
+ }
+ off += n
+ default:
+ return lenmsg, &Error{err: "bad kind interface packing"}
+ }
+ case reflect.Slice:
+ switch typefield.Tag {
+ default:
+ return lenmsg, &Error{"bad tag packing slice: " + typefield.Tag.Get("dns")}
+ case `dns:"domain-name"`:
+ for j := 0; j < val.Field(i).Len(); j++ {
+ element := val.Field(i).Index(j).String()
+ off, err = PackDomainName(element, msg, off, compression, false && compress)
+ if err != nil {
+ return lenmsg, err
+ }
+ }
+ case `dns:"txt"`:
+ if txtTmp == nil {
+ txtTmp = make([]byte, 256*4+1)
+ }
+ off, err = packTxt(fv.Interface().([]string), msg, off, txtTmp)
+ if err != nil {
+ return lenmsg, err
+ }
+ case `dns:"opt"`: // edns
+ for j := 0; j < val.Field(i).Len(); j++ {
+ element := val.Field(i).Index(j).Interface()
+ b, e := element.(EDNS0).pack()
+ if e != nil {
+ return lenmsg, &Error{err: "overflow packing opt"}
+ }
+ // Option code
+ msg[off], msg[off+1] = packUint16(element.(EDNS0).Option())
+ // Length
+ msg[off+2], msg[off+3] = packUint16(uint16(len(b)))
+ off += 4
+ if off+len(b) > lenmsg {
+ copy(msg[off:], b)
+ off = lenmsg
+ continue
+ }
+ // Actual data
+ copy(msg[off:off+len(b)], b)
+ off += len(b)
+ }
+ case `dns:"a"`:
+ if val.Type().String() == "dns.IPSECKEY" {
+ // Field(2) is GatewayType, must be 1
+ if val.Field(2).Uint() != 1 {
+ continue
+ }
+ }
+ // It must be a slice of 4, even if it is 16, we encode
+ // only the first 4
+ if off+net.IPv4len > lenmsg {
+ return lenmsg, &Error{err: "overflow packing a"}
+ }
+ switch fv.Len() {
+ case net.IPv6len:
+ msg[off] = byte(fv.Index(12).Uint())
+ msg[off+1] = byte(fv.Index(13).Uint())
+ msg[off+2] = byte(fv.Index(14).Uint())
+ msg[off+3] = byte(fv.Index(15).Uint())
+ off += net.IPv4len
+ case net.IPv4len:
+ msg[off] = byte(fv.Index(0).Uint())
+ msg[off+1] = byte(fv.Index(1).Uint())
+ msg[off+2] = byte(fv.Index(2).Uint())
+ msg[off+3] = byte(fv.Index(3).Uint())
+ off += net.IPv4len
+ case 0:
+ // Allowed, for dynamic updates
+ default:
+ return lenmsg, &Error{err: "overflow packing a"}
+ }
+ case `dns:"aaaa"`:
+ if val.Type().String() == "dns.IPSECKEY" {
+ // Field(2) is GatewayType, must be 2
+ if val.Field(2).Uint() != 2 {
+ continue
+ }
+ }
+ if fv.Len() == 0 {
+ break
+ }
+ if fv.Len() > net.IPv6len || off+fv.Len() > lenmsg {
+ return lenmsg, &Error{err: "overflow packing aaaa"}
+ }
+ for j := 0; j < net.IPv6len; j++ {
+ msg[off] = byte(fv.Index(j).Uint())
+ off++
+ }
+ case `dns:"wks"`:
+ // TODO(miek): this is wrong should be lenrd
+ if off == lenmsg {
+ break // dyn. updates
+ }
+ if val.Field(i).Len() == 0 {
+ break
+ }
+ off1 := off
+ for j := 0; j < val.Field(i).Len(); j++ {
+ serv := int(fv.Index(j).Uint())
+ if off+serv/8+1 > len(msg) {
+ return len(msg), &Error{err: "overflow packing wks"}
+ }
+ msg[off+serv/8] |= byte(1 << (7 - uint(serv%8)))
+ if off+serv/8+1 > off1 {
+ off1 = off + serv/8 + 1
+ }
+ }
+ off = off1
+ case `dns:"nsec"`: // NSEC/NSEC3
+ // This is the uint16 type bitmap
+ if val.Field(i).Len() == 0 {
+ // Do absolutely nothing
+ break
+ }
+ var lastwindow, lastlength uint16
+ for j := 0; j < val.Field(i).Len(); j++ {
+ t := uint16(fv.Index(j).Uint())
+ window := t / 256
+ length := (t-window*256)/8 + 1
+ if window > lastwindow && lastlength != 0 {
+ // New window, jump to the new offset
+ off += int(lastlength) + 2
+ lastlength = 0
+ }
+ if window < lastwindow || length < lastlength {
+ return len(msg), &Error{err: "nsec bits out of order"}
+ }
+ if off+2+int(length) > len(msg) {
+ return len(msg), &Error{err: "overflow packing nsec"}
+ }
+ // Setting the window #
+ msg[off] = byte(window)
+ // Setting the octets length
+ msg[off+1] = byte(length)
+ // Setting the bit value for the type in the right octet
+ msg[off+1+int(length)] |= byte(1 << (7 - (t % 8)))
+ lastwindow, lastlength = window, length
+ }
+ off += int(lastlength) + 2
+ }
+ case reflect.Struct:
+ off, err = packStructValue(fv, msg, off, compression, compress)
+ if err != nil {
+ return lenmsg, err
+ }
+ case reflect.Uint8:
+ if off+1 > lenmsg {
+ return lenmsg, &Error{err: "overflow packing uint8"}
+ }
+ msg[off] = byte(fv.Uint())
+ off++
+ case reflect.Uint16:
+ if off+2 > lenmsg {
+ return lenmsg, &Error{err: "overflow packing uint16"}
+ }
+ i := fv.Uint()
+ msg[off] = byte(i >> 8)
+ msg[off+1] = byte(i)
+ off += 2
+ case reflect.Uint32:
+ if off+4 > lenmsg {
+ return lenmsg, &Error{err: "overflow packing uint32"}
+ }
+ i := fv.Uint()
+ msg[off] = byte(i >> 24)
+ msg[off+1] = byte(i >> 16)
+ msg[off+2] = byte(i >> 8)
+ msg[off+3] = byte(i)
+ off += 4
+ case reflect.Uint64:
+ switch typefield.Tag {
+ default:
+ if off+8 > lenmsg {
+ return lenmsg, &Error{err: "overflow packing uint64"}
+ }
+ i := fv.Uint()
+ msg[off] = byte(i >> 56)
+ msg[off+1] = byte(i >> 48)
+ msg[off+2] = byte(i >> 40)
+ msg[off+3] = byte(i >> 32)
+ msg[off+4] = byte(i >> 24)
+ msg[off+5] = byte(i >> 16)
+ msg[off+6] = byte(i >> 8)
+ msg[off+7] = byte(i)
+ off += 8
+ case `dns:"uint48"`:
+ // Used in TSIG, where it stops at 48 bits, so we discard the upper 16
+ if off+6 > lenmsg {
+ return lenmsg, &Error{err: "overflow packing uint64 as uint48"}
+ }
+ i := fv.Uint()
+ msg[off] = byte(i >> 40)
+ msg[off+1] = byte(i >> 32)
+ msg[off+2] = byte(i >> 24)
+ msg[off+3] = byte(i >> 16)
+ msg[off+4] = byte(i >> 8)
+ msg[off+5] = byte(i)
+ off += 6
+ }
+ case reflect.String:
+ // There are multiple string encodings.
+ // The tag distinguishes ordinary strings from domain names.
+ s := fv.String()
+ switch typefield.Tag {
+ default:
+ return lenmsg, &Error{"bad tag packing string: " + typefield.Tag.Get("dns")}
+ case `dns:"base64"`:
+ b64, e := fromBase64([]byte(s))
+ if e != nil {
+ return lenmsg, e
+ }
+ copy(msg[off:off+len(b64)], b64)
+ off += len(b64)
+ case `dns:"domain-name"`:
+ if val.Type().String() == "dns.IPSECKEY" {
+ // Field(2) is GatewayType, 1 and 2 or used for addresses
+ x := val.Field(2).Uint()
+ if x == 1 || x == 2 {
+ continue
+ }
+ }
+ if off, err = PackDomainName(s, msg, off, compression, false && compress); err != nil {
+ return lenmsg, err
+ }
+ case `dns:"cdomain-name"`:
+ if off, err = PackDomainName(s, msg, off, compression, true && compress); err != nil {
+ return lenmsg, err
+ }
+ case `dns:"size-base32"`:
+ // This is purely for NSEC3 atm, the previous byte must
+ // holds the length of the encoded string. As NSEC3
+ // is only defined to SHA1, the hashlength is 20 (160 bits)
+ msg[off-1] = 20
+ fallthrough
+ case `dns:"base32"`:
+ b32, e := fromBase32([]byte(s))
+ if e != nil {
+ return lenmsg, e
+ }
+ copy(msg[off:off+len(b32)], b32)
+ off += len(b32)
+ case `dns:"size-hex"`:
+ fallthrough
+ case `dns:"hex"`:
+ // There is no length encoded here
+ h, e := hex.DecodeString(s)
+ if e != nil {
+ return lenmsg, e
+ }
+ if off+hex.DecodedLen(len(s)) > lenmsg {
+ return lenmsg, &Error{err: "overflow packing hex"}
+ }
+ copy(msg[off:off+hex.DecodedLen(len(s))], h)
+ off += hex.DecodedLen(len(s))
+ case `dns:"size"`:
+ // the size is already encoded in the RR, we can safely use the
+ // length of string. String is RAW (not encoded in hex, nor base64)
+ copy(msg[off:off+len(s)], s)
+ off += len(s)
+ case `dns:"octet"`:
+ bytesTmp := make([]byte, 256)
+ off, err = packOctetString(fv.String(), msg, off, bytesTmp)
+ if err != nil {
+ return lenmsg, err
+ }
+ case `dns:"txt"`:
+ fallthrough
+ case "":
+ if txtTmp == nil {
+ txtTmp = make([]byte, 256*4+1)
+ }
+ off, err = packTxtString(fv.String(), msg, off, txtTmp)
+ if err != nil {
+ return lenmsg, err
+ }
+ }
+ }
+ }
+ return off, nil
+}
+
+func structValue(any interface{}) reflect.Value {
+ return reflect.ValueOf(any).Elem()
+}
+
+// PackStruct packs any structure to wire format.
+func PackStruct(any interface{}, msg []byte, off int) (off1 int, err error) {
+ off, err = packStructValue(structValue(any), msg, off, nil, false)
+ return off, err
+}
+
+func packStructCompress(any interface{}, msg []byte, off int, compression map[string]int, compress bool) (off1 int, err error) {
+ off, err = packStructValue(structValue(any), msg, off, compression, compress)
+ return off, err
+}
+
+// Unpack a reflect.StructValue from msg.
+// Same restrictions as packStructValue.
+func unpackStructValue(val reflect.Value, msg []byte, off int) (off1 int, err error) {
+ lenmsg := len(msg)
+ for i := 0; i < val.NumField(); i++ {
+ if off > lenmsg {
+ return lenmsg, &Error{"bad offset unpacking"}
+ }
+ switch fv := val.Field(i); fv.Kind() {
+ default:
+ return lenmsg, &Error{err: "bad kind unpacking"}
+ case reflect.Interface:
+ // PrivateRR is the only RR implementation that has interface field.
+ // therefore it's expected that this interface would be PrivateRdata
+ switch data := fv.Interface().(type) {
+ case PrivateRdata:
+ n, err := data.Unpack(msg[off:])
+ if err != nil {
+ return lenmsg, err
+ }
+ off += n
+ default:
+ return lenmsg, &Error{err: "bad kind interface unpacking"}
+ }
+ case reflect.Slice:
+ switch val.Type().Field(i).Tag {
+ default:
+ return lenmsg, &Error{"bad tag unpacking slice: " + val.Type().Field(i).Tag.Get("dns")}
+ case `dns:"domain-name"`:
+ // HIP record slice of name (or none)
+ var servers []string
+ var s string
+ for off < lenmsg {
+ s, off, err = UnpackDomainName(msg, off)
+ if err != nil {
+ return lenmsg, err
+ }
+ servers = append(servers, s)
+ }
+ fv.Set(reflect.ValueOf(servers))
+ case `dns:"txt"`:
+ if off == lenmsg {
+ break
+ }
+ var txt []string
+ txt, off, err = unpackTxt(msg, off)
+ if err != nil {
+ return lenmsg, err
+ }
+ fv.Set(reflect.ValueOf(txt))
+ case `dns:"opt"`: // edns0
+ if off == lenmsg {
+ // This is an EDNS0 (OPT Record) with no rdata
+ // We can safely return here.
+ break
+ }
+ var edns []EDNS0
+ Option:
+ code := uint16(0)
+ if off+4 > lenmsg {
+ return lenmsg, &Error{err: "overflow unpacking opt"}
+ }
+ code, off = unpackUint16(msg, off)
+ optlen, off1 := unpackUint16(msg, off)
+ if off1+int(optlen) > lenmsg {
+ return lenmsg, &Error{err: "overflow unpacking opt"}
+ }
+ switch code {
+ case EDNS0NSID:
+ e := new(EDNS0_NSID)
+ if err := e.unpack(msg[off1 : off1+int(optlen)]); err != nil {
+ return lenmsg, err
+ }
+ edns = append(edns, e)
+ off = off1 + int(optlen)
+ case EDNS0SUBNET, EDNS0SUBNETDRAFT:
+ e := new(EDNS0_SUBNET)
+ if err := e.unpack(msg[off1 : off1+int(optlen)]); err != nil {
+ return lenmsg, err
+ }
+ edns = append(edns, e)
+ off = off1 + int(optlen)
+ if code == EDNS0SUBNETDRAFT {
+ e.DraftOption = true
+ }
+ case EDNS0UL:
+ e := new(EDNS0_UL)
+ if err := e.unpack(msg[off1 : off1+int(optlen)]); err != nil {
+ return lenmsg, err
+ }
+ edns = append(edns, e)
+ off = off1 + int(optlen)
+ case EDNS0LLQ:
+ e := new(EDNS0_LLQ)
+ if err := e.unpack(msg[off1 : off1+int(optlen)]); err != nil {
+ return lenmsg, err
+ }
+ edns = append(edns, e)
+ off = off1 + int(optlen)
+ case EDNS0DAU:
+ e := new(EDNS0_DAU)
+ if err := e.unpack(msg[off1 : off1+int(optlen)]); err != nil {
+ return lenmsg, err
+ }
+ edns = append(edns, e)
+ off = off1 + int(optlen)
+ case EDNS0DHU:
+ e := new(EDNS0_DHU)
+ if err := e.unpack(msg[off1 : off1+int(optlen)]); err != nil {
+ return lenmsg, err
+ }
+ edns = append(edns, e)
+ off = off1 + int(optlen)
+ case EDNS0N3U:
+ e := new(EDNS0_N3U)
+ if err := e.unpack(msg[off1 : off1+int(optlen)]); err != nil {
+ return lenmsg, err
+ }
+ edns = append(edns, e)
+ off = off1 + int(optlen)
+ default:
+ e := new(EDNS0_LOCAL)
+ e.Code = code
+ if err := e.unpack(msg[off1 : off1+int(optlen)]); err != nil {
+ return lenmsg, err
+ }
+ edns = append(edns, e)
+ off = off1 + int(optlen)
+ }
+ if off < lenmsg {
+ goto Option
+ }
+ fv.Set(reflect.ValueOf(edns))
+ case `dns:"a"`:
+ if val.Type().String() == "dns.IPSECKEY" {
+ // Field(2) is GatewayType, must be 1
+ if val.Field(2).Uint() != 1 {
+ continue
+ }
+ }
+ if off == lenmsg {
+ break // dyn. update
+ }
+ if off+net.IPv4len > lenmsg {
+ return lenmsg, &Error{err: "overflow unpacking a"}
+ }
+ fv.Set(reflect.ValueOf(net.IPv4(msg[off], msg[off+1], msg[off+2], msg[off+3])))
+ off += net.IPv4len
+ case `dns:"aaaa"`:
+ if val.Type().String() == "dns.IPSECKEY" {
+ // Field(2) is GatewayType, must be 2
+ if val.Field(2).Uint() != 2 {
+ continue
+ }
+ }
+ if off == lenmsg {
+ break
+ }
+ if off+net.IPv6len > lenmsg {
+ return lenmsg, &Error{err: "overflow unpacking aaaa"}
+ }
+ fv.Set(reflect.ValueOf(net.IP{msg[off], msg[off+1], msg[off+2], msg[off+3], msg[off+4],
+ msg[off+5], msg[off+6], msg[off+7], msg[off+8], msg[off+9], msg[off+10],
+ msg[off+11], msg[off+12], msg[off+13], msg[off+14], msg[off+15]}))
+ off += net.IPv6len
+ case `dns:"wks"`:
+ // Rest of the record is the bitmap
+ var serv []uint16
+ j := 0
+ for off < lenmsg {
+ if off+1 > lenmsg {
+ return lenmsg, &Error{err: "overflow unpacking wks"}
+ }
+ b := msg[off]
+ // Check the bits one by one, and set the type
+ if b&0x80 == 0x80 {
+ serv = append(serv, uint16(j*8+0))
+ }
+ if b&0x40 == 0x40 {
+ serv = append(serv, uint16(j*8+1))
+ }
+ if b&0x20 == 0x20 {
+ serv = append(serv, uint16(j*8+2))
+ }
+ if b&0x10 == 0x10 {
+ serv = append(serv, uint16(j*8+3))
+ }
+ if b&0x8 == 0x8 {
+ serv = append(serv, uint16(j*8+4))
+ }
+ if b&0x4 == 0x4 {
+ serv = append(serv, uint16(j*8+5))
+ }
+ if b&0x2 == 0x2 {
+ serv = append(serv, uint16(j*8+6))
+ }
+ if b&0x1 == 0x1 {
+ serv = append(serv, uint16(j*8+7))
+ }
+ j++
+ off++
+ }
+ fv.Set(reflect.ValueOf(serv))
+ case `dns:"nsec"`: // NSEC/NSEC3
+ if off == len(msg) {
+ break
+ }
+ // Rest of the record is the type bitmap
+ var nsec []uint16
+ length := 0
+ window := 0
+ lastwindow := -1
+ for off < len(msg) {
+ if off+2 > len(msg) {
+ return len(msg), &Error{err: "overflow unpacking nsecx"}
+ }
+ window = int(msg[off])
+ length = int(msg[off+1])
+ off += 2
+ if window <= lastwindow {
+ // RFC 4034: Blocks are present in the NSEC RR RDATA in
+ // increasing numerical order.
+ return len(msg), &Error{err: "out of order NSEC block"}
+ }
+ if length == 0 {
+ // RFC 4034: Blocks with no types present MUST NOT be included.
+ return len(msg), &Error{err: "empty NSEC block"}
+ }
+ if length > 32 {
+ return len(msg), &Error{err: "NSEC block too long"}
+ }
+ if off+length > len(msg) {
+ return len(msg), &Error{err: "overflowing NSEC block"}
+ }
+
+ // Walk the bytes in the window and extract the type bits
+ for j := 0; j < length; j++ {
+ b := msg[off+j]
+ // Check the bits one by one, and set the type
+ if b&0x80 == 0x80 {
+ nsec = append(nsec, uint16(window*256+j*8+0))
+ }
+ if b&0x40 == 0x40 {
+ nsec = append(nsec, uint16(window*256+j*8+1))
+ }
+ if b&0x20 == 0x20 {
+ nsec = append(nsec, uint16(window*256+j*8+2))
+ }
+ if b&0x10 == 0x10 {
+ nsec = append(nsec, uint16(window*256+j*8+3))
+ }
+ if b&0x8 == 0x8 {
+ nsec = append(nsec, uint16(window*256+j*8+4))
+ }
+ if b&0x4 == 0x4 {
+ nsec = append(nsec, uint16(window*256+j*8+5))
+ }
+ if b&0x2 == 0x2 {
+ nsec = append(nsec, uint16(window*256+j*8+6))
+ }
+ if b&0x1 == 0x1 {
+ nsec = append(nsec, uint16(window*256+j*8+7))
+ }
+ }
+ off += length
+ lastwindow = window
+ }
+ fv.Set(reflect.ValueOf(nsec))
+ }
+ case reflect.Struct:
+ off, err = unpackStructValue(fv, msg, off)
+ if err != nil {
+ return lenmsg, err
+ }
+ if val.Type().Field(i).Name == "Hdr" {
+ lenrd := off + int(val.FieldByName("Hdr").FieldByName("Rdlength").Uint())
+ if lenrd > lenmsg {
+ return lenmsg, &Error{err: "overflowing header size"}
+ }
+ msg = msg[:lenrd]
+ lenmsg = len(msg)
+ }
+ case reflect.Uint8:
+ if off == lenmsg {
+ break
+ }
+ if off+1 > lenmsg {
+ return lenmsg, &Error{err: "overflow unpacking uint8"}
+ }
+ fv.SetUint(uint64(uint8(msg[off])))
+ off++
+ case reflect.Uint16:
+ if off == lenmsg {
+ break
+ }
+ var i uint16
+ if off+2 > lenmsg {
+ return lenmsg, &Error{err: "overflow unpacking uint16"}
+ }
+ i, off = unpackUint16(msg, off)
+ fv.SetUint(uint64(i))
+ case reflect.Uint32:
+ if off == lenmsg {
+ break
+ }
+ if off+4 > lenmsg {
+ return lenmsg, &Error{err: "overflow unpacking uint32"}
+ }
+ fv.SetUint(uint64(uint32(msg[off])<<24 | uint32(msg[off+1])<<16 | uint32(msg[off+2])<<8 | uint32(msg[off+3])))
+ off += 4
+ case reflect.Uint64:
+ if off == lenmsg {
+ break
+ }
+ switch val.Type().Field(i).Tag {
+ default:
+ if off+8 > lenmsg {
+ return lenmsg, &Error{err: "overflow unpacking uint64"}
+ }
+ fv.SetUint(uint64(uint64(msg[off])<<56 | uint64(msg[off+1])<<48 | uint64(msg[off+2])<<40 |
+ uint64(msg[off+3])<<32 | uint64(msg[off+4])<<24 | uint64(msg[off+5])<<16 | uint64(msg[off+6])<<8 | uint64(msg[off+7])))
+ off += 8
+ case `dns:"uint48"`:
+ // Used in TSIG where the last 48 bits are occupied, so for now, assume a uint48 (6 bytes)
+ if off+6 > lenmsg {
+ return lenmsg, &Error{err: "overflow unpacking uint64 as uint48"}
+ }
+ fv.SetUint(uint64(uint64(msg[off])<<40 | uint64(msg[off+1])<<32 | uint64(msg[off+2])<<24 | uint64(msg[off+3])<<16 |
+ uint64(msg[off+4])<<8 | uint64(msg[off+5])))
+ off += 6
+ }
+ case reflect.String:
+ var s string
+ if off == lenmsg {
+ break
+ }
+ switch val.Type().Field(i).Tag {
+ default:
+ return lenmsg, &Error{"bad tag unpacking string: " + val.Type().Field(i).Tag.Get("dns")}
+ case `dns:"octet"`:
+ s = string(msg[off:])
+ off = lenmsg
+ case `dns:"hex"`:
+ hexend := lenmsg
+ if val.FieldByName("Hdr").FieldByName("Rrtype").Uint() == uint64(TypeHIP) {
+ hexend = off + int(val.FieldByName("HitLength").Uint())
+ }
+ if hexend > lenmsg {
+ return lenmsg, &Error{err: "overflow unpacking HIP hex"}
+ }
+ s = hex.EncodeToString(msg[off:hexend])
+ off = hexend
+ case `dns:"base64"`:
+ // Rest of the RR is base64 encoded value
+ b64end := lenmsg
+ if val.FieldByName("Hdr").FieldByName("Rrtype").Uint() == uint64(TypeHIP) {
+ b64end = off + int(val.FieldByName("PublicKeyLength").Uint())
+ }
+ if b64end > lenmsg {
+ return lenmsg, &Error{err: "overflow unpacking HIP base64"}
+ }
+ s = toBase64(msg[off:b64end])
+ off = b64end
+ case `dns:"cdomain-name"`:
+ fallthrough
+ case `dns:"domain-name"`:
+ if val.Type().String() == "dns.IPSECKEY" {
+ // Field(2) is GatewayType, 1 and 2 or used for addresses
+ x := val.Field(2).Uint()
+ if x == 1 || x == 2 {
+ continue
+ }
+ }
+ if off == lenmsg {
+ // zero rdata foo, OK for dyn. updates
+ break
+ }
+ s, off, err = UnpackDomainName(msg, off)
+ if err != nil {
+ return lenmsg, err
+ }
+ case `dns:"size-base32"`:
+ var size int
+ switch val.Type().Name() {
+ case "NSEC3":
+ switch val.Type().Field(i).Name {
+ case "NextDomain":
+ name := val.FieldByName("HashLength")
+ size = int(name.Uint())
+ }
+ }
+ if off+size > lenmsg {
+ return lenmsg, &Error{err: "overflow unpacking base32"}
+ }
+ s = toBase32(msg[off : off+size])
+ off += size
+ case `dns:"size-hex"`:
+ // a "size" string, but it must be encoded in hex in the string
+ var size int
+ switch val.Type().Name() {
+ case "NSEC3":
+ switch val.Type().Field(i).Name {
+ case "Salt":
+ name := val.FieldByName("SaltLength")
+ size = int(name.Uint())
+ case "NextDomain":
+ name := val.FieldByName("HashLength")
+ size = int(name.Uint())
+ }
+ case "TSIG":
+ switch val.Type().Field(i).Name {
+ case "MAC":
+ name := val.FieldByName("MACSize")
+ size = int(name.Uint())
+ case "OtherData":
+ name := val.FieldByName("OtherLen")
+ size = int(name.Uint())
+ }
+ }
+ if off+size > lenmsg {
+ return lenmsg, &Error{err: "overflow unpacking hex"}
+ }
+ s = hex.EncodeToString(msg[off : off+size])
+ off += size
+ case `dns:"txt"`:
+ fallthrough
+ case "":
+ s, off, err = unpackTxtString(msg, off)
+ }
+ fv.SetString(s)
+ }
+ }
+ return off, nil
+}
+
+// Helpers for dealing with escaped bytes
+func isDigit(b byte) bool { return b >= '0' && b <= '9' }
+
+func dddToByte(s []byte) byte {
+ return byte((s[0]-'0')*100 + (s[1]-'0')*10 + (s[2] - '0'))
+}
+
+// UnpackStruct unpacks a binary message from offset off to the interface
+// value given.
+func UnpackStruct(any interface{}, msg []byte, off int) (int, error) {
+ return unpackStructValue(structValue(any), msg, off)
+}
+
+// Helper function for packing and unpacking
+func intToBytes(i *big.Int, length int) []byte {
+ buf := i.Bytes()
+ if len(buf) < length {
+ b := make([]byte, length)
+ copy(b[length-len(buf):], buf)
+ return b
+ }
+ return buf
+}
+
+func unpackUint16(msg []byte, off int) (uint16, int) {
+ return uint16(msg[off])<<8 | uint16(msg[off+1]), off + 2
+}
+
+func packUint16(i uint16) (byte, byte) {
+ return byte(i >> 8), byte(i)
+}
+
+func toBase32(b []byte) string {
+ return base32.HexEncoding.EncodeToString(b)
+}
+
+func fromBase32(s []byte) (buf []byte, err error) {
+ buflen := base32.HexEncoding.DecodedLen(len(s))
+ buf = make([]byte, buflen)
+ n, err := base32.HexEncoding.Decode(buf, s)
+ buf = buf[:n]
+ return
+}
+
+func toBase64(b []byte) string {
+ return base64.StdEncoding.EncodeToString(b)
+}
+
+func fromBase64(s []byte) (buf []byte, err error) {
+ buflen := base64.StdEncoding.DecodedLen(len(s))
+ buf = make([]byte, buflen)
+ n, err := base64.StdEncoding.Decode(buf, s)
+ buf = buf[:n]
+ return
+}
+
+// PackRR packs a resource record rr into msg[off:].
+// See PackDomainName for documentation about the compression.
+func PackRR(rr RR, msg []byte, off int, compression map[string]int, compress bool) (off1 int, err error) {
+ if rr == nil {
+ return len(msg), &Error{err: "nil rr"}
+ }
+
+ off1, err = packStructCompress(rr, msg, off, compression, compress)
+ if err != nil {
+ return len(msg), err
+ }
+ if rawSetRdlength(msg, off, off1) {
+ return off1, nil
+ }
+ return off, ErrRdata
+}
+
+// UnpackRR unpacks msg[off:] into an RR.
+func UnpackRR(msg []byte, off int) (rr RR, off1 int, err error) {
+ // unpack just the header, to find the rr type and length
+ var h RR_Header
+ off0 := off
+ if off, err = UnpackStruct(&h, msg, off); err != nil {
+ return nil, len(msg), err
+ }
+ end := off + int(h.Rdlength)
+ // make an rr of that type and re-unpack.
+ mk, known := typeToRR[h.Rrtype]
+ if !known {
+ rr = new(RFC3597)
+ } else {
+ rr = mk()
+ }
+ off, err = UnpackStruct(rr, msg, off0)
+ if off != end {
+ return &h, end, &Error{err: "bad rdlength"}
+ }
+ return rr, off, err
+}
+
+// Reverse a map
+func reverseInt8(m map[uint8]string) map[string]uint8 {
+ n := make(map[string]uint8)
+ for u, s := range m {
+ n[s] = u
+ }
+ return n
+}
+
+func reverseInt16(m map[uint16]string) map[string]uint16 {
+ n := make(map[string]uint16)
+ for u, s := range m {
+ n[s] = u
+ }
+ return n
+}
+
+func reverseInt(m map[int]string) map[string]int {
+ n := make(map[string]int)
+ for u, s := range m {
+ n[s] = u
+ }
+ return n
+}
+
+// Convert a MsgHdr to a string, with dig-like headers:
+//
+//;; opcode: QUERY, status: NOERROR, id: 48404
+//
+//;; flags: qr aa rd ra;
+func (h *MsgHdr) String() string {
+ if h == nil {
+ return " MsgHdr"
+ }
+
+ s := ";; opcode: " + OpcodeToString[h.Opcode]
+ s += ", status: " + RcodeToString[h.Rcode]
+ s += ", id: " + strconv.Itoa(int(h.Id)) + "\n"
+
+ s += ";; flags:"
+ if h.Response {
+ s += " qr"
+ }
+ if h.Authoritative {
+ s += " aa"
+ }
+ if h.Truncated {
+ s += " tc"
+ }
+ if h.RecursionDesired {
+ s += " rd"
+ }
+ if h.RecursionAvailable {
+ s += " ra"
+ }
+ if h.Zero { // Hmm
+ s += " z"
+ }
+ if h.AuthenticatedData {
+ s += " ad"
+ }
+ if h.CheckingDisabled {
+ s += " cd"
+ }
+
+ s += ";"
+ return s
+}
+
+// Pack packs a Msg: it is converted to to wire format.
+// If the dns.Compress is true the message will be in compressed wire format.
+func (dns *Msg) Pack() (msg []byte, err error) {
+ return dns.PackBuffer(nil)
+}
+
+// PackBuffer packs a Msg, using the given buffer buf. If buf is too small
+// a new buffer is allocated.
+func (dns *Msg) PackBuffer(buf []byte) (msg []byte, err error) {
+ var dh Header
+ var compression map[string]int
+ if dns.Compress {
+ compression = make(map[string]int) // Compression pointer mappings
+ }
+
+ if dns.Rcode < 0 || dns.Rcode > 0xFFF {
+ return nil, ErrRcode
+ }
+ if dns.Rcode > 0xF {
+ // Regular RCODE field is 4 bits
+ opt := dns.IsEdns0()
+ if opt == nil {
+ return nil, ErrExtendedRcode
+ }
+ opt.SetExtendedRcode(uint8(dns.Rcode >> 4))
+ dns.Rcode &= 0xF
+ }
+
+ // Convert convenient Msg into wire-like Header.
+ dh.Id = dns.Id
+ dh.Bits = uint16(dns.Opcode)<<11 | uint16(dns.Rcode)
+ if dns.Response {
+ dh.Bits |= _QR
+ }
+ if dns.Authoritative {
+ dh.Bits |= _AA
+ }
+ if dns.Truncated {
+ dh.Bits |= _TC
+ }
+ if dns.RecursionDesired {
+ dh.Bits |= _RD
+ }
+ if dns.RecursionAvailable {
+ dh.Bits |= _RA
+ }
+ if dns.Zero {
+ dh.Bits |= _Z
+ }
+ if dns.AuthenticatedData {
+ dh.Bits |= _AD
+ }
+ if dns.CheckingDisabled {
+ dh.Bits |= _CD
+ }
+
+ // Prepare variable sized arrays.
+ question := dns.Question
+ answer := dns.Answer
+ ns := dns.Ns
+ extra := dns.Extra
+
+ dh.Qdcount = uint16(len(question))
+ dh.Ancount = uint16(len(answer))
+ dh.Nscount = uint16(len(ns))
+ dh.Arcount = uint16(len(extra))
+
+ // We need the uncompressed length here, because we first pack it and then compress it.
+ msg = buf
+ compress := dns.Compress
+ dns.Compress = false
+ if packLen := dns.Len() + 1; len(msg) < packLen {
+ msg = make([]byte, packLen)
+ }
+ dns.Compress = compress
+
+ // Pack it in: header and then the pieces.
+ off := 0
+ off, err = packStructCompress(&dh, msg, off, compression, dns.Compress)
+ if err != nil {
+ return nil, err
+ }
+ for i := 0; i < len(question); i++ {
+ off, err = packStructCompress(&question[i], msg, off, compression, dns.Compress)
+ if err != nil {
+ return nil, err
+ }
+ }
+ for i := 0; i < len(answer); i++ {
+ off, err = PackRR(answer[i], msg, off, compression, dns.Compress)
+ if err != nil {
+ return nil, err
+ }
+ }
+ for i := 0; i < len(ns); i++ {
+ off, err = PackRR(ns[i], msg, off, compression, dns.Compress)
+ if err != nil {
+ return nil, err
+ }
+ }
+ for i := 0; i < len(extra); i++ {
+ off, err = PackRR(extra[i], msg, off, compression, dns.Compress)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return msg[:off], nil
+}
+
+// Unpack unpacks a binary message to a Msg structure.
+func (dns *Msg) Unpack(msg []byte) (err error) {
+ // Header.
+ var dh Header
+ off := 0
+ if off, err = UnpackStruct(&dh, msg, off); err != nil {
+ return err
+ }
+ dns.Id = dh.Id
+ dns.Response = (dh.Bits & _QR) != 0
+ dns.Opcode = int(dh.Bits>>11) & 0xF
+ dns.Authoritative = (dh.Bits & _AA) != 0
+ dns.Truncated = (dh.Bits & _TC) != 0
+ dns.RecursionDesired = (dh.Bits & _RD) != 0
+ dns.RecursionAvailable = (dh.Bits & _RA) != 0
+ dns.Zero = (dh.Bits & _Z) != 0
+ dns.AuthenticatedData = (dh.Bits & _AD) != 0
+ dns.CheckingDisabled = (dh.Bits & _CD) != 0
+ dns.Rcode = int(dh.Bits & 0xF)
+
+ // Don't pre-alloc these arrays, the incoming lengths are from the network.
+ dns.Question = make([]Question, 0, 1)
+ dns.Answer = make([]RR, 0, 10)
+ dns.Ns = make([]RR, 0, 10)
+ dns.Extra = make([]RR, 0, 10)
+
+ var q Question
+ for i := 0; i < int(dh.Qdcount); i++ {
+ off1 := off
+ off, err = UnpackStruct(&q, msg, off)
+ if err != nil {
+ return err
+ }
+ if off1 == off { // Offset does not increase anymore, dh.Qdcount is a lie!
+ dh.Qdcount = uint16(i)
+ break
+ }
+
+ dns.Question = append(dns.Question, q)
+
+ }
+ // If we see a TC bit being set we return here, without
+ // an error, because technically it isn't an error. So return
+ // without parsing the potentially corrupt packet and hitting an error.
+ // TODO(miek): this isn't the best strategy!
+ // Better stragey would be: set boolean indicating truncated message, go forth and parse
+ // until we hit an error, return the message without the latest parsed rr if this boolean
+ // is true.
+ if dns.Truncated {
+ dns.Answer = nil
+ dns.Ns = nil
+ dns.Extra = nil
+ return nil
+ }
+
+ var r RR
+ for i := 0; i < int(dh.Ancount); i++ {
+ off1 := off
+ r, off, err = UnpackRR(msg, off)
+ if err != nil {
+ return err
+ }
+ if off1 == off { // Offset does not increase anymore, dh.Ancount is a lie!
+ dh.Ancount = uint16(i)
+ break
+ }
+ dns.Answer = append(dns.Answer, r)
+ }
+ for i := 0; i < int(dh.Nscount); i++ {
+ off1 := off
+ r, off, err = UnpackRR(msg, off)
+ if err != nil {
+ return err
+ }
+ if off1 == off { // Offset does not increase anymore, dh.Nscount is a lie!
+ dh.Nscount = uint16(i)
+ break
+ }
+ dns.Ns = append(dns.Ns, r)
+ }
+ for i := 0; i < int(dh.Arcount); i++ {
+ off1 := off
+ r, off, err = UnpackRR(msg, off)
+ if err != nil {
+ return err
+ }
+ if off1 == off { // Offset does not increase anymore, dh.Arcount is a lie!
+ dh.Arcount = uint16(i)
+ break
+ }
+ dns.Extra = append(dns.Extra, r)
+ }
+ if off != len(msg) {
+ // TODO(miek) make this an error?
+ // use PackOpt to let people tell how detailed the error reporting should be?
+ // println("dns: extra bytes in dns packet", off, "<", len(msg))
+ }
+ return nil
+}
+
+// Convert a complete message to a string with dig-like output.
+func (dns *Msg) String() string {
+ if dns == nil {
+ return " MsgHdr"
+ }
+ s := dns.MsgHdr.String() + " "
+ s += "QUERY: " + strconv.Itoa(len(dns.Question)) + ", "
+ s += "ANSWER: " + strconv.Itoa(len(dns.Answer)) + ", "
+ s += "AUTHORITY: " + strconv.Itoa(len(dns.Ns)) + ", "
+ s += "ADDITIONAL: " + strconv.Itoa(len(dns.Extra)) + "\n"
+ if len(dns.Question) > 0 {
+ s += "\n;; QUESTION SECTION:\n"
+ for i := 0; i < len(dns.Question); i++ {
+ s += dns.Question[i].String() + "\n"
+ }
+ }
+ if len(dns.Answer) > 0 {
+ s += "\n;; ANSWER SECTION:\n"
+ for i := 0; i < len(dns.Answer); i++ {
+ if dns.Answer[i] != nil {
+ s += dns.Answer[i].String() + "\n"
+ }
+ }
+ }
+ if len(dns.Ns) > 0 {
+ s += "\n;; AUTHORITY SECTION:\n"
+ for i := 0; i < len(dns.Ns); i++ {
+ if dns.Ns[i] != nil {
+ s += dns.Ns[i].String() + "\n"
+ }
+ }
+ }
+ if len(dns.Extra) > 0 {
+ s += "\n;; ADDITIONAL SECTION:\n"
+ for i := 0; i < len(dns.Extra); i++ {
+ if dns.Extra[i] != nil {
+ s += dns.Extra[i].String() + "\n"
+ }
+ }
+ }
+ return s
+}
+
+// Len returns the message length when in (un)compressed wire format.
+// If dns.Compress is true compression it is taken into account. Len()
+// is provided to be a faster way to get the size of the resulting packet,
+// than packing it, measuring the size and discarding the buffer.
+func (dns *Msg) Len() int {
+ // We always return one more than needed.
+ l := 12 // Message header is always 12 bytes
+ var compression map[string]int
+ if dns.Compress {
+ compression = make(map[string]int)
+ }
+ for i := 0; i < len(dns.Question); i++ {
+ l += dns.Question[i].len()
+ if dns.Compress {
+ compressionLenHelper(compression, dns.Question[i].Name)
+ }
+ }
+ for i := 0; i < len(dns.Answer); i++ {
+ l += dns.Answer[i].len()
+ if dns.Compress {
+ k, ok := compressionLenSearch(compression, dns.Answer[i].Header().Name)
+ if ok {
+ l += 1 - k
+ }
+ compressionLenHelper(compression, dns.Answer[i].Header().Name)
+ k, ok = compressionLenSearchType(compression, dns.Answer[i])
+ if ok {
+ l += 1 - k
+ }
+ compressionLenHelperType(compression, dns.Answer[i])
+ }
+ }
+ for i := 0; i < len(dns.Ns); i++ {
+ l += dns.Ns[i].len()
+ if dns.Compress {
+ k, ok := compressionLenSearch(compression, dns.Ns[i].Header().Name)
+ if ok {
+ l += 1 - k
+ }
+ compressionLenHelper(compression, dns.Ns[i].Header().Name)
+ k, ok = compressionLenSearchType(compression, dns.Ns[i])
+ if ok {
+ l += 1 - k
+ }
+ compressionLenHelperType(compression, dns.Ns[i])
+ }
+ }
+ for i := 0; i < len(dns.Extra); i++ {
+ l += dns.Extra[i].len()
+ if dns.Compress {
+ k, ok := compressionLenSearch(compression, dns.Extra[i].Header().Name)
+ if ok {
+ l += 1 - k
+ }
+ compressionLenHelper(compression, dns.Extra[i].Header().Name)
+ k, ok = compressionLenSearchType(compression, dns.Extra[i])
+ if ok {
+ l += 1 - k
+ }
+ compressionLenHelperType(compression, dns.Extra[i])
+ }
+ }
+ return l
+}
+
+// Put the parts of the name in the compression map.
+func compressionLenHelper(c map[string]int, s string) {
+ pref := ""
+ lbs := Split(s)
+ for j := len(lbs) - 1; j >= 0; j-- {
+ pref = s[lbs[j]:]
+ if _, ok := c[pref]; !ok {
+ c[pref] = len(pref)
+ }
+ }
+}
+
+// Look for each part in the compression map and returns its length,
+// keep on searching so we get the longest match.
+func compressionLenSearch(c map[string]int, s string) (int, bool) {
+ off := 0
+ end := false
+ if s == "" { // don't bork on bogus data
+ return 0, false
+ }
+ for {
+ if _, ok := c[s[off:]]; ok {
+ return len(s[off:]), true
+ }
+ if end {
+ break
+ }
+ off, end = NextLabel(s, off)
+ }
+ return 0, false
+}
+
+// TODO(miek): should add all types, because the all can be *used* for compression.
+func compressionLenHelperType(c map[string]int, r RR) {
+ switch x := r.(type) {
+ case *NS:
+ compressionLenHelper(c, x.Ns)
+ case *MX:
+ compressionLenHelper(c, x.Mx)
+ case *CNAME:
+ compressionLenHelper(c, x.Target)
+ case *PTR:
+ compressionLenHelper(c, x.Ptr)
+ case *SOA:
+ compressionLenHelper(c, x.Ns)
+ compressionLenHelper(c, x.Mbox)
+ case *MB:
+ compressionLenHelper(c, x.Mb)
+ case *MG:
+ compressionLenHelper(c, x.Mg)
+ case *MR:
+ compressionLenHelper(c, x.Mr)
+ case *MF:
+ compressionLenHelper(c, x.Mf)
+ case *MD:
+ compressionLenHelper(c, x.Md)
+ case *RT:
+ compressionLenHelper(c, x.Host)
+ case *MINFO:
+ compressionLenHelper(c, x.Rmail)
+ compressionLenHelper(c, x.Email)
+ case *AFSDB:
+ compressionLenHelper(c, x.Hostname)
+ }
+}
+
+// Only search on compressing these types.
+func compressionLenSearchType(c map[string]int, r RR) (int, bool) {
+ switch x := r.(type) {
+ case *NS:
+ return compressionLenSearch(c, x.Ns)
+ case *MX:
+ return compressionLenSearch(c, x.Mx)
+ case *CNAME:
+ return compressionLenSearch(c, x.Target)
+ case *PTR:
+ return compressionLenSearch(c, x.Ptr)
+ case *SOA:
+ k, ok := compressionLenSearch(c, x.Ns)
+ k1, ok1 := compressionLenSearch(c, x.Mbox)
+ if !ok && !ok1 {
+ return 0, false
+ }
+ return k + k1, true
+ case *MB:
+ return compressionLenSearch(c, x.Mb)
+ case *MG:
+ return compressionLenSearch(c, x.Mg)
+ case *MR:
+ return compressionLenSearch(c, x.Mr)
+ case *MF:
+ return compressionLenSearch(c, x.Mf)
+ case *MD:
+ return compressionLenSearch(c, x.Md)
+ case *RT:
+ return compressionLenSearch(c, x.Host)
+ case *MINFO:
+ k, ok := compressionLenSearch(c, x.Rmail)
+ k1, ok1 := compressionLenSearch(c, x.Email)
+ if !ok && !ok1 {
+ return 0, false
+ }
+ return k + k1, true
+ case *AFSDB:
+ return compressionLenSearch(c, x.Hostname)
+ }
+ return 0, false
+}
+
+// id returns a 16 bits random number to be used as a
+// message id. The random provided should be good enough.
+func id() uint16 {
+ return uint16(rand.Int()) ^ uint16(time.Now().Nanosecond())
+}
+
+// Copy returns a new RR which is a deep-copy of r.
+func Copy(r RR) RR {
+ r1 := r.copy()
+ return r1
+}
+
+// Copy returns a new *Msg which is a deep-copy of dns.
+func (dns *Msg) Copy() *Msg {
+ return dns.CopyTo(new(Msg))
+}
+
+// CopyTo copies the contents to the provided message using a deep-copy and returns the copy.
+func (dns *Msg) CopyTo(r1 *Msg) *Msg {
+ r1.MsgHdr = dns.MsgHdr
+ r1.Compress = dns.Compress
+
+ if len(dns.Question) > 0 {
+ r1.Question = make([]Question, len(dns.Question))
+ copy(r1.Question, dns.Question) // TODO(miek): Question is an immutable value, ok to do a shallow-copy
+ }
+
+ rrArr := make([]RR, len(dns.Answer)+len(dns.Ns)+len(dns.Extra))
+ var rri int
+
+ if len(dns.Answer) > 0 {
+ rrbegin := rri
+ for i := 0; i < len(dns.Answer); i++ {
+ rrArr[rri] = dns.Answer[i].copy()
+ rri++
+ }
+ r1.Answer = rrArr[rrbegin:rri:rri]
+ }
+
+ if len(dns.Ns) > 0 {
+ rrbegin := rri
+ for i := 0; i < len(dns.Ns); i++ {
+ rrArr[rri] = dns.Ns[i].copy()
+ rri++
+ }
+ r1.Ns = rrArr[rrbegin:rri:rri]
+ }
+
+ if len(dns.Extra) > 0 {
+ rrbegin := rri
+ for i := 0; i < len(dns.Extra); i++ {
+ rrArr[rri] = dns.Extra[i].copy()
+ rri++
+ }
+ r1.Extra = rrArr[rrbegin:rri:rri]
+ }
+
+ return r1
+}
diff --git a/vendor/github.com/miekg/dns/nsecx.go b/vendor/github.com/miekg/dns/nsecx.go
new file mode 100644
index 0000000000..d2392c6ec6
--- /dev/null
+++ b/vendor/github.com/miekg/dns/nsecx.go
@@ -0,0 +1,112 @@
+package dns
+
+import (
+ "crypto/sha1"
+ "hash"
+ "io"
+ "strings"
+)
+
+type saltWireFmt struct {
+ Salt string `dns:"size-hex"`
+}
+
+// HashName hashes a string (label) according to RFC 5155. It returns the hashed string in
+// uppercase.
+func HashName(label string, ha uint8, iter uint16, salt string) string {
+ saltwire := new(saltWireFmt)
+ saltwire.Salt = salt
+ wire := make([]byte, DefaultMsgSize)
+ n, err := PackStruct(saltwire, wire, 0)
+ if err != nil {
+ return ""
+ }
+ wire = wire[:n]
+ name := make([]byte, 255)
+ off, err := PackDomainName(strings.ToLower(label), name, 0, nil, false)
+ if err != nil {
+ return ""
+ }
+ name = name[:off]
+ var s hash.Hash
+ switch ha {
+ case SHA1:
+ s = sha1.New()
+ default:
+ return ""
+ }
+
+ // k = 0
+ name = append(name, wire...)
+ io.WriteString(s, string(name))
+ nsec3 := s.Sum(nil)
+ // k > 0
+ for k := uint16(0); k < iter; k++ {
+ s.Reset()
+ nsec3 = append(nsec3, wire...)
+ io.WriteString(s, string(nsec3))
+ nsec3 = s.Sum(nil)
+ }
+ return toBase32(nsec3)
+}
+
+// Denialer is an interface that should be implemented by types that are used to denial
+// answers in DNSSEC.
+type Denialer interface {
+ // Cover will check if the (unhashed) name is being covered by this NSEC or NSEC3.
+ Cover(name string) bool
+ // Match will check if the ownername matches the (unhashed) name for this NSEC3 or NSEC3.
+ Match(name string) bool
+}
+
+// Cover implements the Denialer interface.
+func (rr *NSEC) Cover(name string) bool {
+ return true
+}
+
+// Match implements the Denialer interface.
+func (rr *NSEC) Match(name string) bool {
+ return true
+}
+
+// Cover implements the Denialer interface.
+func (rr *NSEC3) Cover(name string) bool {
+ // FIXME(miek): check if the zones match
+ // FIXME(miek): check if we're not dealing with parent nsec3
+ hname := HashName(name, rr.Hash, rr.Iterations, rr.Salt)
+ labels := Split(rr.Hdr.Name)
+ if len(labels) < 2 {
+ return false
+ }
+ hash := strings.ToUpper(rr.Hdr.Name[labels[0] : labels[1]-1]) // -1 to remove the dot
+ if hash == rr.NextDomain {
+ return false // empty interval
+ }
+ if hash > rr.NextDomain { // last name, points to apex
+ // hname > hash
+ // hname > rr.NextDomain
+ // TODO(miek)
+ }
+ if hname <= hash {
+ return false
+ }
+ if hname >= rr.NextDomain {
+ return false
+ }
+ return true
+}
+
+// Match implements the Denialer interface.
+func (rr *NSEC3) Match(name string) bool {
+ // FIXME(miek): Check if we are in the same zone
+ hname := HashName(name, rr.Hash, rr.Iterations, rr.Salt)
+ labels := Split(rr.Hdr.Name)
+ if len(labels) < 2 {
+ return false
+ }
+ hash := strings.ToUpper(rr.Hdr.Name[labels[0] : labels[1]-1]) // -1 to remove the .
+ if hash == hname {
+ return true
+ }
+ return false
+}
diff --git a/vendor/github.com/miekg/dns/privaterr.go b/vendor/github.com/miekg/dns/privaterr.go
new file mode 100644
index 0000000000..7290791be3
--- /dev/null
+++ b/vendor/github.com/miekg/dns/privaterr.go
@@ -0,0 +1,117 @@
+package dns
+
+import (
+ "fmt"
+ "strings"
+)
+
+// PrivateRdata is an interface used for implementing "Private Use" RR types, see
+// RFC 6895. This allows one to experiment with new RR types, without requesting an
+// official type code. Also see dns.PrivateHandle and dns.PrivateHandleRemove.
+type PrivateRdata interface {
+ // String returns the text presentaton of the Rdata of the Private RR.
+ String() string
+ // Parse parses the Rdata of the private RR.
+ Parse([]string) error
+ // Pack is used when packing a private RR into a buffer.
+ Pack([]byte) (int, error)
+ // Unpack is used when unpacking a private RR from a buffer.
+ // TODO(miek): diff. signature than Pack, see edns0.go for instance.
+ Unpack([]byte) (int, error)
+ // Copy copies the Rdata.
+ Copy(PrivateRdata) error
+ // Len returns the length in octets of the Rdata.
+ Len() int
+}
+
+// PrivateRR represents an RR that uses a PrivateRdata user-defined type.
+// It mocks normal RRs and implements dns.RR interface.
+type PrivateRR struct {
+ Hdr RR_Header
+ Data PrivateRdata
+}
+
+func mkPrivateRR(rrtype uint16) *PrivateRR {
+ // Panics if RR is not an instance of PrivateRR.
+ rrfunc, ok := typeToRR[rrtype]
+ if !ok {
+ panic(fmt.Sprintf("dns: invalid operation with Private RR type %d", rrtype))
+ }
+
+ anyrr := rrfunc()
+ switch rr := anyrr.(type) {
+ case *PrivateRR:
+ return rr
+ }
+ panic(fmt.Sprintf("dns: RR is not a PrivateRR, typeToRR[%d] generator returned %T", rrtype, anyrr))
+}
+
+// Header return the RR header of r.
+func (r *PrivateRR) Header() *RR_Header { return &r.Hdr }
+
+func (r *PrivateRR) String() string { return r.Hdr.String() + r.Data.String() }
+
+// Private len and copy parts to satisfy RR interface.
+func (r *PrivateRR) len() int { return r.Hdr.len() + r.Data.Len() }
+func (r *PrivateRR) copy() RR {
+ // make new RR like this:
+ rr := mkPrivateRR(r.Hdr.Rrtype)
+ newh := r.Hdr.copyHeader()
+ rr.Hdr = *newh
+
+ err := r.Data.Copy(rr.Data)
+ if err != nil {
+ panic("dns: got value that could not be used to copy Private rdata")
+ }
+ return rr
+}
+
+// PrivateHandle registers a private resource record type. It requires
+// string and numeric representation of private RR type and generator function as argument.
+func PrivateHandle(rtypestr string, rtype uint16, generator func() PrivateRdata) {
+ rtypestr = strings.ToUpper(rtypestr)
+
+ typeToRR[rtype] = func() RR { return &PrivateRR{RR_Header{}, generator()} }
+ TypeToString[rtype] = rtypestr
+ StringToType[rtypestr] = rtype
+
+ setPrivateRR := func(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
+ rr := mkPrivateRR(h.Rrtype)
+ rr.Hdr = h
+
+ var l lex
+ text := make([]string, 0, 2) // could be 0..N elements, median is probably 1
+ FETCH:
+ for {
+ // TODO(miek): we could also be returning _QUOTE, this might or might not
+ // be an issue (basically parsing TXT becomes hard)
+ switch l = <-c; l.value {
+ case zNewline, zEOF:
+ break FETCH
+ case zString:
+ text = append(text, l.token)
+ }
+ }
+
+ err := rr.Data.Parse(text)
+ if err != nil {
+ return nil, &ParseError{f, err.Error(), l}, ""
+ }
+
+ return rr, nil, ""
+ }
+
+ typeToparserFunc[rtype] = parserFunc{setPrivateRR, true}
+}
+
+// PrivateHandleRemove removes defenitions required to support private RR type.
+func PrivateHandleRemove(rtype uint16) {
+ rtypestr, ok := TypeToString[rtype]
+ if ok {
+ delete(typeToRR, rtype)
+ delete(TypeToString, rtype)
+ delete(typeToparserFunc, rtype)
+ delete(StringToType, rtypestr)
+ }
+ return
+}
diff --git a/vendor/github.com/miekg/dns/rawmsg.go b/vendor/github.com/miekg/dns/rawmsg.go
new file mode 100644
index 0000000000..f138b7761d
--- /dev/null
+++ b/vendor/github.com/miekg/dns/rawmsg.go
@@ -0,0 +1,95 @@
+package dns
+
+// These raw* functions do not use reflection, they directly set the values
+// in the buffer. There are faster than their reflection counterparts.
+
+// RawSetId sets the message id in buf.
+func rawSetId(msg []byte, i uint16) bool {
+ if len(msg) < 2 {
+ return false
+ }
+ msg[0], msg[1] = packUint16(i)
+ return true
+}
+
+// rawSetQuestionLen sets the length of the question section.
+func rawSetQuestionLen(msg []byte, i uint16) bool {
+ if len(msg) < 6 {
+ return false
+ }
+ msg[4], msg[5] = packUint16(i)
+ return true
+}
+
+// rawSetAnswerLen sets the lenght of the answer section.
+func rawSetAnswerLen(msg []byte, i uint16) bool {
+ if len(msg) < 8 {
+ return false
+ }
+ msg[6], msg[7] = packUint16(i)
+ return true
+}
+
+// rawSetsNsLen sets the lenght of the authority section.
+func rawSetNsLen(msg []byte, i uint16) bool {
+ if len(msg) < 10 {
+ return false
+ }
+ msg[8], msg[9] = packUint16(i)
+ return true
+}
+
+// rawSetExtraLen sets the lenght of the additional section.
+func rawSetExtraLen(msg []byte, i uint16) bool {
+ if len(msg) < 12 {
+ return false
+ }
+ msg[10], msg[11] = packUint16(i)
+ return true
+}
+
+// rawSetRdlength sets the rdlength in the header of
+// the RR. The offset 'off' must be positioned at the
+// start of the header of the RR, 'end' must be the
+// end of the RR.
+func rawSetRdlength(msg []byte, off, end int) bool {
+ l := len(msg)
+Loop:
+ for {
+ if off+1 > l {
+ return false
+ }
+ c := int(msg[off])
+ off++
+ switch c & 0xC0 {
+ case 0x00:
+ if c == 0x00 {
+ // End of the domainname
+ break Loop
+ }
+ if off+c > l {
+ return false
+ }
+ off += c
+
+ case 0xC0:
+ // pointer, next byte included, ends domainname
+ off++
+ break Loop
+ }
+ }
+ // The domainname has been seen, we at the start of the fixed part in the header.
+ // Type is 2 bytes, class is 2 bytes, ttl 4 and then 2 bytes for the length.
+ off += 2 + 2 + 4
+ if off+2 > l {
+ return false
+ }
+ //off+1 is the end of the header, 'end' is the end of the rr
+ //so 'end' - 'off+2' is the length of the rdata
+ rdatalen := end - (off + 2)
+ if rdatalen > 0xFFFF {
+ return false
+ }
+ msg[off], msg[off+1] = packUint16(uint16(rdatalen))
+ return true
+}
diff --git a/vendor/github.com/miekg/dns/sanitize.go b/vendor/github.com/miekg/dns/sanitize.go
new file mode 100644
index 0000000000..b489f3f050
--- /dev/null
+++ b/vendor/github.com/miekg/dns/sanitize.go
@@ -0,0 +1,84 @@
+package dns
+
+// Dedup removes identical RRs from rrs. It preserves the original ordering.
+// The lowest TTL of any duplicates is used in the remaining one. Dedup modifies
+// rrs.
+// m is used to store the RRs temporay. If it is nil a new map will be allocated.
+func Dedup(rrs []RR, m map[string]RR) []RR {
+ if m == nil {
+ m = make(map[string]RR)
+ }
+ // Save the keys, so we don't have to call normalizedString twice.
+ keys := make([]*string, 0, len(rrs))
+
+ for _, r := range rrs {
+ key := normalizedString(r)
+ keys = append(keys, &key)
+ if _, ok := m[key]; ok {
+ // Shortest TTL wins.
+ if m[key].Header().Ttl > r.Header().Ttl {
+ m[key].Header().Ttl = r.Header().Ttl
+ }
+ continue
+ }
+
+ m[key] = r
+ }
+ // If the length of the result map equals the amount of RRs we got,
+ // it means they were all different. We can then just return the original rrset.
+ if len(m) == len(rrs) {
+ return rrs
+ }
+
+ j := 0
+ for i, r := range rrs {
+ // If keys[i] lives in the map, we should copy and remove it.
+ if _, ok := m[*keys[i]]; ok {
+ delete(m, *keys[i])
+ rrs[j] = r
+ j++
+ }
+
+ if len(m) == 0 {
+ break
+ }
+ }
+
+ return rrs[:j]
+}
+
+// normalizedString returns a normalized string from r. The TTL
+// is removed and the domain name is lowercased. We go from this:
+// DomainNameTTLCLASSTYPERDATA to:
+// lowercasenameCLASSTYPE...
+func normalizedString(r RR) string {
+ // A string Go DNS makes has: domainnameTTL...
+ b := []byte(r.String())
+
+ // find the first non-escaped tab, then another, so we capture where the TTL lives.
+ esc := false
+ ttlStart, ttlEnd := 0, 0
+ for i := 0; i < len(b) && ttlEnd == 0; i++ {
+ switch {
+ case b[i] == '\\':
+ esc = !esc
+ case b[i] == '\t' && !esc:
+ if ttlStart == 0 {
+ ttlStart = i
+ continue
+ }
+ if ttlEnd == 0 {
+ ttlEnd = i
+ }
+ case b[i] >= 'A' && b[i] <= 'Z' && !esc:
+ b[i] += 32
+ default:
+ esc = false
+ }
+ }
+
+ // remove TTL.
+ copy(b[ttlStart:], b[ttlEnd:])
+ cut := ttlEnd - ttlStart
+ return string(b[:len(b)-cut])
+}
diff --git a/vendor/github.com/miekg/dns/scanner.go b/vendor/github.com/miekg/dns/scanner.go
new file mode 100644
index 0000000000..c29bc2f388
--- /dev/null
+++ b/vendor/github.com/miekg/dns/scanner.go
@@ -0,0 +1,43 @@
+package dns
+
+// Implement a simple scanner, return a byte stream from an io reader.
+
+import (
+ "bufio"
+ "io"
+ "text/scanner"
+)
+
+type scan struct {
+ src *bufio.Reader
+ position scanner.Position
+ eof bool // Have we just seen a eof
+}
+
+func scanInit(r io.Reader) *scan {
+ s := new(scan)
+ s.src = bufio.NewReader(r)
+ s.position.Line = 1
+ return s
+}
+
+// tokenText returns the next byte from the input
+func (s *scan) tokenText() (byte, error) {
+ c, err := s.src.ReadByte()
+ if err != nil {
+ return c, err
+ }
+ // delay the newline handling until the next token is delivered,
+ // fixes off-by-one errors when reporting a parse error.
+ if s.eof == true {
+ s.position.Line++
+ s.position.Column = 0
+ s.eof = false
+ }
+ if c == '\n' {
+ s.eof = true
+ return c, nil
+ }
+ s.position.Column++
+ return c, nil
+}
diff --git a/vendor/github.com/miekg/dns/server.go b/vendor/github.com/miekg/dns/server.go
new file mode 100644
index 0000000000..40f525e390
--- /dev/null
+++ b/vendor/github.com/miekg/dns/server.go
@@ -0,0 +1,700 @@
+// DNS server implementation.
+
+package dns
+
+import (
+ "bytes"
+ "io"
+ "net"
+ "sync"
+ "time"
+)
+
+// Maximum number of TCP queries before we close the socket.
+const maxTCPQueries = 128
+
+// Handler is implemented by any value that implements ServeDNS.
+type Handler interface {
+ ServeDNS(w ResponseWriter, r *Msg)
+}
+
+// A ResponseWriter interface is used by an DNS handler to
+// construct an DNS response.
+type ResponseWriter interface {
+ // LocalAddr returns the net.Addr of the server
+ LocalAddr() net.Addr
+ // RemoteAddr returns the net.Addr of the client that sent the current request.
+ RemoteAddr() net.Addr
+ // WriteMsg writes a reply back to the client.
+ WriteMsg(*Msg) error
+ // Write writes a raw buffer back to the client.
+ Write([]byte) (int, error)
+ // Close closes the connection.
+ Close() error
+ // TsigStatus returns the status of the Tsig.
+ TsigStatus() error
+ // TsigTimersOnly sets the tsig timers only boolean.
+ TsigTimersOnly(bool)
+ // Hijack lets the caller take over the connection.
+ // After a call to Hijack(), the DNS package will not do anything with the connection.
+ Hijack()
+}
+
+type response struct {
+ hijacked bool // connection has been hijacked by handler
+ tsigStatus error
+ tsigTimersOnly bool
+ tsigRequestMAC string
+ tsigSecret map[string]string // the tsig secrets
+ udp *net.UDPConn // i/o connection if UDP was used
+ tcp *net.TCPConn // i/o connection if TCP was used
+ udpSession *SessionUDP // oob data to get egress interface right
+ remoteAddr net.Addr // address of the client
+ writer Writer // writer to output the raw DNS bits
+}
+
+// ServeMux is an DNS request multiplexer. It matches the
+// zone name of each incoming request against a list of
+// registered patterns add calls the handler for the pattern
+// that most closely matches the zone name. ServeMux is DNSSEC aware, meaning
+// that queries for the DS record are redirected to the parent zone (if that
+// is also registered), otherwise the child gets the query.
+// ServeMux is also safe for concurrent access from multiple goroutines.
+type ServeMux struct {
+ z map[string]Handler
+ m *sync.RWMutex
+}
+
+// NewServeMux allocates and returns a new ServeMux.
+func NewServeMux() *ServeMux { return &ServeMux{z: make(map[string]Handler), m: new(sync.RWMutex)} }
+
+// DefaultServeMux is the default ServeMux used by Serve.
+var DefaultServeMux = NewServeMux()
+
+// The HandlerFunc type is an adapter to allow the use of
+// ordinary functions as DNS handlers. If f is a function
+// with the appropriate signature, HandlerFunc(f) is a
+// Handler object that calls f.
+type HandlerFunc func(ResponseWriter, *Msg)
+
+// ServeDNS calls f(w, r).
+func (f HandlerFunc) ServeDNS(w ResponseWriter, r *Msg) {
+ f(w, r)
+}
+
+// HandleFailed returns a HandlerFunc that returns SERVFAIL for every request it gets.
+func HandleFailed(w ResponseWriter, r *Msg) {
+ m := new(Msg)
+ m.SetRcode(r, RcodeServerFailure)
+ // does not matter if this write fails
+ w.WriteMsg(m)
+}
+
+func failedHandler() Handler { return HandlerFunc(HandleFailed) }
+
+// ListenAndServe Starts a server on addresss and network speficied. Invoke handler
+// for incoming queries.
+func ListenAndServe(addr string, network string, handler Handler) error {
+ server := &Server{Addr: addr, Net: network, Handler: handler}
+ return server.ListenAndServe()
+}
+
+// ActivateAndServe activates a server with a listener from systemd,
+// l and p should not both be non-nil.
+// If both l and p are not nil only p will be used.
+// Invoke handler for incoming queries.
+func ActivateAndServe(l net.Listener, p net.PacketConn, handler Handler) error {
+ server := &Server{Listener: l, PacketConn: p, Handler: handler}
+ return server.ActivateAndServe()
+}
+
+func (mux *ServeMux) match(q string, t uint16) Handler {
+ mux.m.RLock()
+ defer mux.m.RUnlock()
+ var handler Handler
+ b := make([]byte, len(q)) // worst case, one label of length q
+ off := 0
+ end := false
+ for {
+ l := len(q[off:])
+ for i := 0; i < l; i++ {
+ b[i] = q[off+i]
+ if b[i] >= 'A' && b[i] <= 'Z' {
+ b[i] |= ('a' - 'A')
+ }
+ }
+ if h, ok := mux.z[string(b[:l])]; ok { // 'causes garbage, might want to change the map key
+ if t != TypeDS {
+ return h
+ }
+ // Continue for DS to see if we have a parent too, if so delegeate to the parent
+ handler = h
+ }
+ off, end = NextLabel(q, off)
+ if end {
+ break
+ }
+ }
+ // Wildcard match, if we have found nothing try the root zone as a last resort.
+ if h, ok := mux.z["."]; ok {
+ return h
+ }
+ return handler
+}
+
+// Handle adds a handler to the ServeMux for pattern.
+func (mux *ServeMux) Handle(pattern string, handler Handler) {
+ if pattern == "" {
+ panic("dns: invalid pattern " + pattern)
+ }
+ mux.m.Lock()
+ mux.z[Fqdn(pattern)] = handler
+ mux.m.Unlock()
+}
+
+// HandleFunc adds a handler function to the ServeMux for pattern.
+func (mux *ServeMux) HandleFunc(pattern string, handler func(ResponseWriter, *Msg)) {
+ mux.Handle(pattern, HandlerFunc(handler))
+}
+
+// HandleRemove deregistrars the handler specific for pattern from the ServeMux.
+func (mux *ServeMux) HandleRemove(pattern string) {
+ if pattern == "" {
+ panic("dns: invalid pattern " + pattern)
+ }
+ mux.m.Lock()
+ delete(mux.z, Fqdn(pattern))
+ mux.m.Unlock()
+}
+
+// ServeDNS dispatches the request to the handler whose
+// pattern most closely matches the request message. If DefaultServeMux
+// is used the correct thing for DS queries is done: a possible parent
+// is sought.
+// If no handler is found a standard SERVFAIL message is returned
+// If the request message does not have exactly one question in the
+// question section a SERVFAIL is returned, unlesss Unsafe is true.
+func (mux *ServeMux) ServeDNS(w ResponseWriter, request *Msg) {
+ var h Handler
+ if len(request.Question) < 1 { // allow more than one question
+ h = failedHandler()
+ } else {
+ if h = mux.match(request.Question[0].Name, request.Question[0].Qtype); h == nil {
+ h = failedHandler()
+ }
+ }
+ h.ServeDNS(w, request)
+}
+
+// Handle registers the handler with the given pattern
+// in the DefaultServeMux. The documentation for
+// ServeMux explains how patterns are matched.
+func Handle(pattern string, handler Handler) { DefaultServeMux.Handle(pattern, handler) }
+
+// HandleRemove deregisters the handle with the given pattern
+// in the DefaultServeMux.
+func HandleRemove(pattern string) { DefaultServeMux.HandleRemove(pattern) }
+
+// HandleFunc registers the handler function with the given pattern
+// in the DefaultServeMux.
+func HandleFunc(pattern string, handler func(ResponseWriter, *Msg)) {
+ DefaultServeMux.HandleFunc(pattern, handler)
+}
+
+// Writer writes raw DNS messages; each call to Write should send an entire message.
+type Writer interface {
+ io.Writer
+}
+
+// Reader reads raw DNS messages; each call to ReadTCP or ReadUDP should return an entire message.
+type Reader interface {
+ // ReadTCP reads a raw message from a TCP connection. Implementations may alter
+ // connection properties, for example the read-deadline.
+ ReadTCP(conn *net.TCPConn, timeout time.Duration) ([]byte, error)
+ // ReadUDP reads a raw message from a UDP connection. Implementations may alter
+ // connection properties, for example the read-deadline.
+ ReadUDP(conn *net.UDPConn, timeout time.Duration) ([]byte, *SessionUDP, error)
+}
+
+// defaultReader is an adapter for the Server struct that implements the Reader interface
+// using the readTCP and readUDP func of the embedded Server.
+type defaultReader struct {
+ *Server
+}
+
+func (dr *defaultReader) ReadTCP(conn *net.TCPConn, timeout time.Duration) ([]byte, error) {
+ return dr.readTCP(conn, timeout)
+}
+
+func (dr *defaultReader) ReadUDP(conn *net.UDPConn, timeout time.Duration) ([]byte, *SessionUDP, error) {
+ return dr.readUDP(conn, timeout)
+}
+
+// DecorateReader is a decorator hook for extending or supplanting the functionality of a Reader.
+// Implementations should never return a nil Reader.
+type DecorateReader func(Reader) Reader
+
+// DecorateWriter is a decorator hook for extending or supplanting the functionality of a Writer.
+// Implementations should never return a nil Writer.
+type DecorateWriter func(Writer) Writer
+
+// A Server defines parameters for running an DNS server.
+type Server struct {
+ // Address to listen on, ":dns" if empty.
+ Addr string
+ // if "tcp" it will invoke a TCP listener, otherwise an UDP one.
+ Net string
+ // TCP Listener to use, this is to aid in systemd's socket activation.
+ Listener net.Listener
+ // UDP "Listener" to use, this is to aid in systemd's socket activation.
+ PacketConn net.PacketConn
+ // Handler to invoke, dns.DefaultServeMux if nil.
+ Handler Handler
+ // Default buffer size to use to read incoming UDP messages. If not set
+ // it defaults to MinMsgSize (512 B).
+ UDPSize int
+ // The net.Conn.SetReadTimeout value for new connections, defaults to 2 * time.Second.
+ ReadTimeout time.Duration
+ // The net.Conn.SetWriteTimeout value for new connections, defaults to 2 * time.Second.
+ WriteTimeout time.Duration
+ // TCP idle timeout for multiple queries, if nil, defaults to 8 * time.Second (RFC 5966).
+ IdleTimeout func() time.Duration
+ // Secret(s) for Tsig map[].
+ TsigSecret map[string]string
+ // Unsafe instructs the server to disregard any sanity checks and directly hand the message to
+ // the handler. It will specfically not check if the query has the QR bit not set.
+ Unsafe bool
+ // If NotifyStartedFunc is set it is called once the server has started listening.
+ NotifyStartedFunc func()
+ // DecorateReader is optional, allows customization of the process that reads raw DNS messages.
+ DecorateReader DecorateReader
+ // DecorateWriter is optional, allows customization of the process that writes raw DNS messages.
+ DecorateWriter DecorateWriter
+
+ // For graceful shutdown.
+ stopUDP chan bool
+ stopTCP chan bool
+ wgUDP sync.WaitGroup
+ wgTCP sync.WaitGroup
+
+ // make start/shutdown not racy
+ lock sync.Mutex
+ started bool
+}
+
+// ListenAndServe starts a nameserver on the configured address in *Server.
+func (srv *Server) ListenAndServe() error {
+ srv.lock.Lock()
+ if srv.started {
+ srv.lock.Unlock()
+ return &Error{err: "server already started"}
+ }
+ srv.stopUDP, srv.stopTCP = make(chan bool), make(chan bool)
+ srv.started = true
+ addr := srv.Addr
+ if addr == "" {
+ addr = ":domain"
+ }
+ if srv.UDPSize == 0 {
+ srv.UDPSize = MinMsgSize
+ }
+ switch srv.Net {
+ case "tcp", "tcp4", "tcp6":
+ a, e := net.ResolveTCPAddr(srv.Net, addr)
+ if e != nil {
+ return e
+ }
+ l, e := net.ListenTCP(srv.Net, a)
+ if e != nil {
+ return e
+ }
+ srv.Listener = l
+ srv.lock.Unlock()
+ return srv.serveTCP(l)
+ case "udp", "udp4", "udp6":
+ a, e := net.ResolveUDPAddr(srv.Net, addr)
+ if e != nil {
+ return e
+ }
+ l, e := net.ListenUDP(srv.Net, a)
+ if e != nil {
+ return e
+ }
+ if e := setUDPSocketOptions(l); e != nil {
+ return e
+ }
+ srv.PacketConn = l
+ srv.lock.Unlock()
+ return srv.serveUDP(l)
+ }
+ srv.lock.Unlock()
+ return &Error{err: "bad network"}
+}
+
+// ActivateAndServe starts a nameserver with the PacketConn or Listener
+// configured in *Server. Its main use is to start a server from systemd.
+func (srv *Server) ActivateAndServe() error {
+ srv.lock.Lock()
+ if srv.started {
+ srv.lock.Unlock()
+ return &Error{err: "server already started"}
+ }
+ srv.stopUDP, srv.stopTCP = make(chan bool), make(chan bool)
+ srv.started = true
+ pConn := srv.PacketConn
+ l := srv.Listener
+ srv.lock.Unlock()
+ if pConn != nil {
+ if srv.UDPSize == 0 {
+ srv.UDPSize = MinMsgSize
+ }
+ if t, ok := pConn.(*net.UDPConn); ok {
+ if e := setUDPSocketOptions(t); e != nil {
+ return e
+ }
+ return srv.serveUDP(t)
+ }
+ }
+ if l != nil {
+ if t, ok := l.(*net.TCPListener); ok {
+ return srv.serveTCP(t)
+ }
+ }
+ return &Error{err: "bad listeners"}
+}
+
+// Shutdown gracefully shuts down a server. After a call to Shutdown, ListenAndServe and
+// ActivateAndServe will return. All in progress queries are completed before the server
+// is taken down. If the Shutdown is taking longer than the reading timeout an error
+// is returned.
+func (srv *Server) Shutdown() error {
+ srv.lock.Lock()
+ if !srv.started {
+ srv.lock.Unlock()
+ return &Error{err: "server not started"}
+ }
+ srv.started = false
+ net, addr := srv.Net, srv.Addr
+ switch {
+ case srv.Listener != nil:
+ a := srv.Listener.Addr()
+ net, addr = a.Network(), a.String()
+ case srv.PacketConn != nil:
+ a := srv.PacketConn.LocalAddr()
+ net, addr = a.Network(), a.String()
+ }
+ srv.lock.Unlock()
+
+ fin := make(chan bool)
+ switch net {
+ case "tcp", "tcp4", "tcp6":
+ go func() {
+ srv.stopTCP <- true
+ srv.wgTCP.Wait()
+ fin <- true
+ }()
+
+ case "udp", "udp4", "udp6":
+ go func() {
+ srv.stopUDP <- true
+ srv.wgUDP.Wait()
+ fin <- true
+ }()
+ }
+
+ c := &Client{Net: net}
+ go c.Exchange(new(Msg), addr) // extra query to help ReadXXX loop to pass
+
+ select {
+ case <-time.After(srv.getReadTimeout()):
+ return &Error{err: "server shutdown is pending"}
+ case <-fin:
+ return nil
+ }
+}
+
+// getReadTimeout is a helper func to use system timeout if server did not intend to change it.
+func (srv *Server) getReadTimeout() time.Duration {
+ rtimeout := dnsTimeout
+ if srv.ReadTimeout != 0 {
+ rtimeout = srv.ReadTimeout
+ }
+ return rtimeout
+}
+
+// serveTCP starts a TCP listener for the server.
+// Each request is handled in a separate goroutine.
+func (srv *Server) serveTCP(l *net.TCPListener) error {
+ defer l.Close()
+
+ if srv.NotifyStartedFunc != nil {
+ srv.NotifyStartedFunc()
+ }
+
+ reader := Reader(&defaultReader{srv})
+ if srv.DecorateReader != nil {
+ reader = srv.DecorateReader(reader)
+ }
+
+ handler := srv.Handler
+ if handler == nil {
+ handler = DefaultServeMux
+ }
+ rtimeout := srv.getReadTimeout()
+ // deadline is not used here
+ for {
+ rw, e := l.AcceptTCP()
+ if e != nil {
+ continue
+ }
+ m, e := reader.ReadTCP(rw, rtimeout)
+ select {
+ case <-srv.stopTCP:
+ return nil
+ default:
+ }
+ if e != nil {
+ continue
+ }
+ srv.wgTCP.Add(1)
+ go srv.serve(rw.RemoteAddr(), handler, m, nil, nil, rw)
+ }
+}
+
+// serveUDP starts a UDP listener for the server.
+// Each request is handled in a separate goroutine.
+func (srv *Server) serveUDP(l *net.UDPConn) error {
+ defer l.Close()
+
+ if srv.NotifyStartedFunc != nil {
+ srv.NotifyStartedFunc()
+ }
+
+ reader := Reader(&defaultReader{srv})
+ if srv.DecorateReader != nil {
+ reader = srv.DecorateReader(reader)
+ }
+
+ handler := srv.Handler
+ if handler == nil {
+ handler = DefaultServeMux
+ }
+ rtimeout := srv.getReadTimeout()
+ // deadline is not used here
+ for {
+ m, s, e := reader.ReadUDP(l, rtimeout)
+ select {
+ case <-srv.stopUDP:
+ return nil
+ default:
+ }
+ if e != nil {
+ continue
+ }
+ srv.wgUDP.Add(1)
+ go srv.serve(s.RemoteAddr(), handler, m, l, s, nil)
+ }
+}
+
+// Serve a new connection.
+func (srv *Server) serve(a net.Addr, h Handler, m []byte, u *net.UDPConn, s *SessionUDP, t *net.TCPConn) {
+ w := &response{tsigSecret: srv.TsigSecret, udp: u, tcp: t, remoteAddr: a, udpSession: s}
+ if srv.DecorateWriter != nil {
+ w.writer = srv.DecorateWriter(w)
+ } else {
+ w.writer = w
+ }
+
+ q := 0 // counter for the amount of TCP queries we get
+
+ defer func() {
+ if u != nil {
+ srv.wgUDP.Done()
+ }
+ if t != nil {
+ srv.wgTCP.Done()
+ }
+ }()
+
+ reader := Reader(&defaultReader{srv})
+ if srv.DecorateReader != nil {
+ reader = srv.DecorateReader(reader)
+ }
+Redo:
+ req := new(Msg)
+ err := req.Unpack(m)
+ if err != nil { // Send a FormatError back
+ x := new(Msg)
+ x.SetRcodeFormatError(req)
+ w.WriteMsg(x)
+ goto Exit
+ }
+ if !srv.Unsafe && req.Response {
+ goto Exit
+ }
+
+ w.tsigStatus = nil
+ if w.tsigSecret != nil {
+ if t := req.IsTsig(); t != nil {
+ secret := t.Hdr.Name
+ if _, ok := w.tsigSecret[secret]; !ok {
+ w.tsigStatus = ErrKeyAlg
+ }
+ w.tsigStatus = TsigVerify(m, w.tsigSecret[secret], "", false)
+ w.tsigTimersOnly = false
+ w.tsigRequestMAC = req.Extra[len(req.Extra)-1].(*TSIG).MAC
+ }
+ }
+ h.ServeDNS(w, req) // Writes back to the client
+
+Exit:
+ // TODO(miek): make this number configurable?
+ if q > maxTCPQueries { // close socket after this many queries
+ w.Close()
+ return
+ }
+
+ if w.hijacked {
+ return // client calls Close()
+ }
+ if u != nil { // UDP, "close" and return
+ w.Close()
+ return
+ }
+ idleTimeout := tcpIdleTimeout
+ if srv.IdleTimeout != nil {
+ idleTimeout = srv.IdleTimeout()
+ }
+ m, e := reader.ReadTCP(w.tcp, idleTimeout)
+ if e == nil {
+ q++
+ goto Redo
+ }
+ w.Close()
+ return
+}
+
+func (srv *Server) readTCP(conn *net.TCPConn, timeout time.Duration) ([]byte, error) {
+ conn.SetReadDeadline(time.Now().Add(timeout))
+ l := make([]byte, 2)
+ n, err := conn.Read(l)
+ if err != nil || n != 2 {
+ if err != nil {
+ return nil, err
+ }
+ return nil, ErrShortRead
+ }
+ length, _ := unpackUint16(l, 0)
+ if length == 0 {
+ return nil, ErrShortRead
+ }
+ m := make([]byte, int(length))
+ n, err = conn.Read(m[:int(length)])
+ if err != nil || n == 0 {
+ if err != nil {
+ return nil, err
+ }
+ return nil, ErrShortRead
+ }
+ i := n
+ for i < int(length) {
+ j, err := conn.Read(m[i:int(length)])
+ if err != nil {
+ return nil, err
+ }
+ i += j
+ }
+ n = i
+ m = m[:n]
+ return m, nil
+}
+
+func (srv *Server) readUDP(conn *net.UDPConn, timeout time.Duration) ([]byte, *SessionUDP, error) {
+ conn.SetReadDeadline(time.Now().Add(timeout))
+ m := make([]byte, srv.UDPSize)
+ n, s, e := ReadFromSessionUDP(conn, m)
+ if e != nil || n == 0 {
+ if e != nil {
+ return nil, nil, e
+ }
+ return nil, nil, ErrShortRead
+ }
+ m = m[:n]
+ return m, s, nil
+}
+
+// WriteMsg implements the ResponseWriter.WriteMsg method.
+func (w *response) WriteMsg(m *Msg) (err error) {
+ var data []byte
+ if w.tsigSecret != nil { // if no secrets, dont check for the tsig (which is a longer check)
+ if t := m.IsTsig(); t != nil {
+ data, w.tsigRequestMAC, err = TsigGenerate(m, w.tsigSecret[t.Hdr.Name], w.tsigRequestMAC, w.tsigTimersOnly)
+ if err != nil {
+ return err
+ }
+ _, err = w.writer.Write(data)
+ return err
+ }
+ }
+ data, err = m.Pack()
+ if err != nil {
+ return err
+ }
+ _, err = w.writer.Write(data)
+ return err
+}
+
+// Write implements the ResponseWriter.Write method.
+func (w *response) Write(m []byte) (int, error) {
+ switch {
+ case w.udp != nil:
+ n, err := WriteToSessionUDP(w.udp, m, w.udpSession)
+ return n, err
+ case w.tcp != nil:
+ lm := len(m)
+ if lm < 2 {
+ return 0, io.ErrShortBuffer
+ }
+ if lm > MaxMsgSize {
+ return 0, &Error{err: "message too large"}
+ }
+ l := make([]byte, 2, 2+lm)
+ l[0], l[1] = packUint16(uint16(lm))
+ m = append(l, m...)
+
+ n, err := io.Copy(w.tcp, bytes.NewReader(m))
+ return int(n), err
+ }
+ panic("not reached")
+}
+
+// LocalAddr implements the ResponseWriter.LocalAddr method.
+func (w *response) LocalAddr() net.Addr {
+ if w.tcp != nil {
+ return w.tcp.LocalAddr()
+ }
+ return w.udp.LocalAddr()
+}
+
+// RemoteAddr implements the ResponseWriter.RemoteAddr method.
+func (w *response) RemoteAddr() net.Addr { return w.remoteAddr }
+
+// TsigStatus implements the ResponseWriter.TsigStatus method.
+func (w *response) TsigStatus() error { return w.tsigStatus }
+
+// TsigTimersOnly implements the ResponseWriter.TsigTimersOnly method.
+func (w *response) TsigTimersOnly(b bool) { w.tsigTimersOnly = b }
+
+// Hijack implements the ResponseWriter.Hijack method.
+func (w *response) Hijack() { w.hijacked = true }
+
+// Close implements the ResponseWriter.Close method
+func (w *response) Close() error {
+ // Can't close the udp conn, as that is actually the listener.
+ if w.tcp != nil {
+ e := w.tcp.Close()
+ w.tcp = nil
+ return e
+ }
+ return nil
+}
diff --git a/vendor/github.com/miekg/dns/sig0.go b/vendor/github.com/miekg/dns/sig0.go
new file mode 100644
index 0000000000..0fccddbc15
--- /dev/null
+++ b/vendor/github.com/miekg/dns/sig0.go
@@ -0,0 +1,216 @@
+package dns
+
+import (
+ "crypto"
+ "crypto/dsa"
+ "crypto/ecdsa"
+ "crypto/rsa"
+ "math/big"
+ "strings"
+ "time"
+)
+
+// Sign signs a dns.Msg. It fills the signature with the appropriate data.
+// The SIG record should have the SignerName, KeyTag, Algorithm, Inception
+// and Expiration set.
+func (rr *SIG) Sign(k crypto.Signer, m *Msg) ([]byte, error) {
+ if k == nil {
+ return nil, ErrPrivKey
+ }
+ if rr.KeyTag == 0 || len(rr.SignerName) == 0 || rr.Algorithm == 0 {
+ return nil, ErrKey
+ }
+ rr.Header().Rrtype = TypeSIG
+ rr.Header().Class = ClassANY
+ rr.Header().Ttl = 0
+ rr.Header().Name = "."
+ rr.OrigTtl = 0
+ rr.TypeCovered = 0
+ rr.Labels = 0
+
+ buf := make([]byte, m.Len()+rr.len())
+ mbuf, err := m.PackBuffer(buf)
+ if err != nil {
+ return nil, err
+ }
+ if &buf[0] != &mbuf[0] {
+ return nil, ErrBuf
+ }
+ off, err := PackRR(rr, buf, len(mbuf), nil, false)
+ if err != nil {
+ return nil, err
+ }
+ buf = buf[:off:cap(buf)]
+
+ hash, ok := AlgorithmToHash[rr.Algorithm]
+ if !ok {
+ return nil, ErrAlg
+ }
+
+ hasher := hash.New()
+ // Write SIG rdata
+ hasher.Write(buf[len(mbuf)+1+2+2+4+2:])
+ // Write message
+ hasher.Write(buf[:len(mbuf)])
+
+ signature, err := sign(k, hasher.Sum(nil), hash, rr.Algorithm)
+ if err != nil {
+ return nil, err
+ }
+
+ rr.Signature = toBase64(signature)
+ sig := string(signature)
+
+ buf = append(buf, sig...)
+ if len(buf) > int(^uint16(0)) {
+ return nil, ErrBuf
+ }
+ // Adjust sig data length
+ rdoff := len(mbuf) + 1 + 2 + 2 + 4
+ rdlen, _ := unpackUint16(buf, rdoff)
+ rdlen += uint16(len(sig))
+ buf[rdoff], buf[rdoff+1] = packUint16(rdlen)
+ // Adjust additional count
+ adc, _ := unpackUint16(buf, 10)
+ adc++
+ buf[10], buf[11] = packUint16(adc)
+ return buf, nil
+}
+
+// Verify validates the message buf using the key k.
+// It's assumed that buf is a valid message from which rr was unpacked.
+func (rr *SIG) Verify(k *KEY, buf []byte) error {
+ if k == nil {
+ return ErrKey
+ }
+ if rr.KeyTag == 0 || len(rr.SignerName) == 0 || rr.Algorithm == 0 {
+ return ErrKey
+ }
+
+ var hash crypto.Hash
+ switch rr.Algorithm {
+ case DSA, RSASHA1:
+ hash = crypto.SHA1
+ case RSASHA256, ECDSAP256SHA256:
+ hash = crypto.SHA256
+ case ECDSAP384SHA384:
+ hash = crypto.SHA384
+ case RSASHA512:
+ hash = crypto.SHA512
+ default:
+ return ErrAlg
+ }
+ hasher := hash.New()
+
+ buflen := len(buf)
+ qdc, _ := unpackUint16(buf, 4)
+ anc, _ := unpackUint16(buf, 6)
+ auc, _ := unpackUint16(buf, 8)
+ adc, offset := unpackUint16(buf, 10)
+ var err error
+ for i := uint16(0); i < qdc && offset < buflen; i++ {
+ _, offset, err = UnpackDomainName(buf, offset)
+ if err != nil {
+ return err
+ }
+ // Skip past Type and Class
+ offset += 2 + 2
+ }
+ for i := uint16(1); i < anc+auc+adc && offset < buflen; i++ {
+ _, offset, err = UnpackDomainName(buf, offset)
+ if err != nil {
+ return err
+ }
+ // Skip past Type, Class and TTL
+ offset += 2 + 2 + 4
+ if offset+1 >= buflen {
+ continue
+ }
+ var rdlen uint16
+ rdlen, offset = unpackUint16(buf, offset)
+ offset += int(rdlen)
+ }
+ if offset >= buflen {
+ return &Error{err: "overflowing unpacking signed message"}
+ }
+
+ // offset should be just prior to SIG
+ bodyend := offset
+ // owner name SHOULD be root
+ _, offset, err = UnpackDomainName(buf, offset)
+ if err != nil {
+ return err
+ }
+ // Skip Type, Class, TTL, RDLen
+ offset += 2 + 2 + 4 + 2
+ sigstart := offset
+ // Skip Type Covered, Algorithm, Labels, Original TTL
+ offset += 2 + 1 + 1 + 4
+ if offset+4+4 >= buflen {
+ return &Error{err: "overflow unpacking signed message"}
+ }
+ expire := uint32(buf[offset])<<24 | uint32(buf[offset+1])<<16 | uint32(buf[offset+2])<<8 | uint32(buf[offset+3])
+ offset += 4
+ incept := uint32(buf[offset])<<24 | uint32(buf[offset+1])<<16 | uint32(buf[offset+2])<<8 | uint32(buf[offset+3])
+ offset += 4
+ now := uint32(time.Now().Unix())
+ if now < incept || now > expire {
+ return ErrTime
+ }
+ // Skip key tag
+ offset += 2
+ var signername string
+ signername, offset, err = UnpackDomainName(buf, offset)
+ if err != nil {
+ return err
+ }
+ // If key has come from the DNS name compression might
+ // have mangled the case of the name
+ if strings.ToLower(signername) != strings.ToLower(k.Header().Name) {
+ return &Error{err: "signer name doesn't match key name"}
+ }
+ sigend := offset
+ hasher.Write(buf[sigstart:sigend])
+ hasher.Write(buf[:10])
+ hasher.Write([]byte{
+ byte((adc - 1) << 8),
+ byte(adc - 1),
+ })
+ hasher.Write(buf[12:bodyend])
+
+ hashed := hasher.Sum(nil)
+ sig := buf[sigend:]
+ switch k.Algorithm {
+ case DSA:
+ pk := k.publicKeyDSA()
+ sig = sig[1:]
+ r := big.NewInt(0)
+ r.SetBytes(sig[:len(sig)/2])
+ s := big.NewInt(0)
+ s.SetBytes(sig[len(sig)/2:])
+ if pk != nil {
+ if dsa.Verify(pk, hashed, r, s) {
+ return nil
+ }
+ return ErrSig
+ }
+ case RSASHA1, RSASHA256, RSASHA512:
+ pk := k.publicKeyRSA()
+ if pk != nil {
+ return rsa.VerifyPKCS1v15(pk, hash, hashed, sig)
+ }
+ case ECDSAP256SHA256, ECDSAP384SHA384:
+ pk := k.publicKeyECDSA()
+ r := big.NewInt(0)
+ r.SetBytes(sig[:len(sig)/2])
+ s := big.NewInt(0)
+ s.SetBytes(sig[len(sig)/2:])
+ if pk != nil {
+ if ecdsa.Verify(pk, hashed, r, s) {
+ return nil
+ }
+ return ErrSig
+ }
+ }
+ return ErrKeyAlg
+}
diff --git a/vendor/github.com/miekg/dns/singleinflight.go b/vendor/github.com/miekg/dns/singleinflight.go
new file mode 100644
index 0000000000..9573c7d0b8
--- /dev/null
+++ b/vendor/github.com/miekg/dns/singleinflight.go
@@ -0,0 +1,57 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Adapted for dns package usage by Miek Gieben.
+
+package dns
+
+import "sync"
+import "time"
+
+// call is an in-flight or completed singleflight.Do call
+type call struct {
+ wg sync.WaitGroup
+ val *Msg
+ rtt time.Duration
+ err error
+ dups int
+}
+
+// singleflight represents a class of work and forms a namespace in
+// which units of work can be executed with duplicate suppression.
+type singleflight struct {
+ sync.Mutex // protects m
+ m map[string]*call // lazily initialized
+}
+
+// Do executes and returns the results of the given function, making
+// sure that only one execution is in-flight for a given key at a
+// time. If a duplicate comes in, the duplicate caller waits for the
+// original to complete and receives the same results.
+// The return value shared indicates whether v was given to multiple callers.
+func (g *singleflight) Do(key string, fn func() (*Msg, time.Duration, error)) (v *Msg, rtt time.Duration, err error, shared bool) {
+ g.Lock()
+ if g.m == nil {
+ g.m = make(map[string]*call)
+ }
+ if c, ok := g.m[key]; ok {
+ c.dups++
+ g.Unlock()
+ c.wg.Wait()
+ return c.val, c.rtt, c.err, true
+ }
+ c := new(call)
+ c.wg.Add(1)
+ g.m[key] = c
+ g.Unlock()
+
+ c.val, c.rtt, c.err = fn()
+ c.wg.Done()
+
+ g.Lock()
+ delete(g.m, key)
+ g.Unlock()
+
+ return c.val, c.rtt, c.err, c.dups > 0
+}
diff --git a/vendor/github.com/miekg/dns/tlsa.go b/vendor/github.com/miekg/dns/tlsa.go
new file mode 100644
index 0000000000..f027787df3
--- /dev/null
+++ b/vendor/github.com/miekg/dns/tlsa.go
@@ -0,0 +1,86 @@
+package dns
+
+import (
+ "crypto/sha256"
+ "crypto/sha512"
+ "crypto/x509"
+ "encoding/hex"
+ "errors"
+ "io"
+ "net"
+ "strconv"
+)
+
+// CertificateToDANE converts a certificate to a hex string as used in the TLSA record.
+func CertificateToDANE(selector, matchingType uint8, cert *x509.Certificate) (string, error) {
+ switch matchingType {
+ case 0:
+ switch selector {
+ case 0:
+ return hex.EncodeToString(cert.Raw), nil
+ case 1:
+ return hex.EncodeToString(cert.RawSubjectPublicKeyInfo), nil
+ }
+ case 1:
+ h := sha256.New()
+ switch selector {
+ case 0:
+ io.WriteString(h, string(cert.Raw))
+ return hex.EncodeToString(h.Sum(nil)), nil
+ case 1:
+ io.WriteString(h, string(cert.RawSubjectPublicKeyInfo))
+ return hex.EncodeToString(h.Sum(nil)), nil
+ }
+ case 2:
+ h := sha512.New()
+ switch selector {
+ case 0:
+ io.WriteString(h, string(cert.Raw))
+ return hex.EncodeToString(h.Sum(nil)), nil
+ case 1:
+ io.WriteString(h, string(cert.RawSubjectPublicKeyInfo))
+ return hex.EncodeToString(h.Sum(nil)), nil
+ }
+ }
+ return "", errors.New("dns: bad TLSA MatchingType or TLSA Selector")
+}
+
+// Sign creates a TLSA record from an SSL certificate.
+func (r *TLSA) Sign(usage, selector, matchingType int, cert *x509.Certificate) (err error) {
+ r.Hdr.Rrtype = TypeTLSA
+ r.Usage = uint8(usage)
+ r.Selector = uint8(selector)
+ r.MatchingType = uint8(matchingType)
+
+ r.Certificate, err = CertificateToDANE(r.Selector, r.MatchingType, cert)
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+// Verify verifies a TLSA record against an SSL certificate. If it is OK
+// a nil error is returned.
+func (r *TLSA) Verify(cert *x509.Certificate) error {
+ c, err := CertificateToDANE(r.Selector, r.MatchingType, cert)
+ if err != nil {
+ return err // Not also ErrSig?
+ }
+ if r.Certificate == c {
+ return nil
+ }
+ return ErrSig // ErrSig, really?
+}
+
+// TLSAName returns the ownername of a TLSA resource record as per the
+// rules specified in RFC 6698, Section 3.
+func TLSAName(name, service, network string) (string, error) {
+ if !IsFqdn(name) {
+ return "", ErrFqdn
+ }
+ p, e := net.LookupPort(network, service)
+ if e != nil {
+ return "", e
+ }
+ return "_" + strconv.Itoa(p) + "_" + network + "." + name, nil
+}
diff --git a/vendor/github.com/miekg/dns/tsig.go b/vendor/github.com/miekg/dns/tsig.go
new file mode 100644
index 0000000000..d7bc25056c
--- /dev/null
+++ b/vendor/github.com/miekg/dns/tsig.go
@@ -0,0 +1,333 @@
+package dns
+
+import (
+ "crypto/hmac"
+ "crypto/md5"
+ "crypto/sha1"
+ "crypto/sha256"
+ "crypto/sha512"
+ "encoding/hex"
+ "hash"
+ "io"
+ "strconv"
+ "strings"
+ "time"
+)
+
+// HMAC hashing codes. These are transmitted as domain names.
+const (
+ HmacMD5 = "hmac-md5.sig-alg.reg.int."
+ HmacSHA1 = "hmac-sha1."
+ HmacSHA256 = "hmac-sha256."
+ HmacSHA512 = "hmac-sha512."
+)
+
+// TSIG is the RR the holds the transaction signature of a message.
+// See RFC 2845 and RFC 4635.
+type TSIG struct {
+ Hdr RR_Header
+ Algorithm string `dns:"domain-name"`
+ TimeSigned uint64 `dns:"uint48"`
+ Fudge uint16
+ MACSize uint16
+ MAC string `dns:"size-hex"`
+ OrigId uint16
+ Error uint16
+ OtherLen uint16
+ OtherData string `dns:"size-hex"`
+}
+
+func (rr *TSIG) Header() *RR_Header {
+ return &rr.Hdr
+}
+
+// TSIG has no official presentation format, but this will suffice.
+
+func (rr *TSIG) String() string {
+ s := "\n;; TSIG PSEUDOSECTION:\n"
+ s += rr.Hdr.String() +
+ " " + rr.Algorithm +
+ " " + tsigTimeToString(rr.TimeSigned) +
+ " " + strconv.Itoa(int(rr.Fudge)) +
+ " " + strconv.Itoa(int(rr.MACSize)) +
+ " " + strings.ToUpper(rr.MAC) +
+ " " + strconv.Itoa(int(rr.OrigId)) +
+ " " + strconv.Itoa(int(rr.Error)) + // BIND prints NOERROR
+ " " + strconv.Itoa(int(rr.OtherLen)) +
+ " " + rr.OtherData
+ return s
+}
+
+func (rr *TSIG) len() int {
+ return rr.Hdr.len() + len(rr.Algorithm) + 1 + 6 +
+ 4 + len(rr.MAC)/2 + 1 + 6 + len(rr.OtherData)/2 + 1
+}
+
+func (rr *TSIG) copy() RR {
+ return &TSIG{*rr.Hdr.copyHeader(), rr.Algorithm, rr.TimeSigned, rr.Fudge, rr.MACSize, rr.MAC, rr.OrigId, rr.Error, rr.OtherLen, rr.OtherData}
+}
+
+// The following values must be put in wireformat, so that the MAC can be calculated.
+// RFC 2845, section 3.4.2. TSIG Variables.
+type tsigWireFmt struct {
+ // From RR_Header
+ Name string `dns:"domain-name"`
+ Class uint16
+ Ttl uint32
+ // Rdata of the TSIG
+ Algorithm string `dns:"domain-name"`
+ TimeSigned uint64 `dns:"uint48"`
+ Fudge uint16
+ // MACSize, MAC and OrigId excluded
+ Error uint16
+ OtherLen uint16
+ OtherData string `dns:"size-hex"`
+}
+
+// If we have the MAC use this type to convert it to wiredata.
+// Section 3.4.3. Request MAC
+type macWireFmt struct {
+ MACSize uint16
+ MAC string `dns:"size-hex"`
+}
+
+// 3.3. Time values used in TSIG calculations
+type timerWireFmt struct {
+ TimeSigned uint64 `dns:"uint48"`
+ Fudge uint16
+}
+
+// TsigGenerate fills out the TSIG record attached to the message.
+// The message should contain
+// a "stub" TSIG RR with the algorithm, key name (owner name of the RR),
+// time fudge (defaults to 300 seconds) and the current time
+// The TSIG MAC is saved in that Tsig RR.
+// When TsigGenerate is called for the first time requestMAC is set to the empty string and
+// timersOnly is false.
+// If something goes wrong an error is returned, otherwise it is nil.
+func TsigGenerate(m *Msg, secret, requestMAC string, timersOnly bool) ([]byte, string, error) {
+ if m.IsTsig() == nil {
+ panic("dns: TSIG not last RR in additional")
+ }
+ // If we barf here, the caller is to blame
+ rawsecret, err := fromBase64([]byte(secret))
+ if err != nil {
+ return nil, "", err
+ }
+
+ rr := m.Extra[len(m.Extra)-1].(*TSIG)
+ m.Extra = m.Extra[0 : len(m.Extra)-1] // kill the TSIG from the msg
+ mbuf, err := m.Pack()
+ if err != nil {
+ return nil, "", err
+ }
+ buf := tsigBuffer(mbuf, rr, requestMAC, timersOnly)
+
+ t := new(TSIG)
+ var h hash.Hash
+ switch rr.Algorithm {
+ case HmacMD5:
+ h = hmac.New(md5.New, []byte(rawsecret))
+ case HmacSHA1:
+ h = hmac.New(sha1.New, []byte(rawsecret))
+ case HmacSHA256:
+ h = hmac.New(sha256.New, []byte(rawsecret))
+ case HmacSHA512:
+ h = hmac.New(sha512.New, []byte(rawsecret))
+ default:
+ return nil, "", ErrKeyAlg
+ }
+ io.WriteString(h, string(buf))
+ t.MAC = hex.EncodeToString(h.Sum(nil))
+ t.MACSize = uint16(len(t.MAC) / 2) // Size is half!
+
+ t.Hdr = RR_Header{Name: rr.Hdr.Name, Rrtype: TypeTSIG, Class: ClassANY, Ttl: 0}
+ t.Fudge = rr.Fudge
+ t.TimeSigned = rr.TimeSigned
+ t.Algorithm = rr.Algorithm
+ t.OrigId = m.Id
+
+ tbuf := make([]byte, t.len())
+ if off, err := PackRR(t, tbuf, 0, nil, false); err == nil {
+ tbuf = tbuf[:off] // reset to actual size used
+ } else {
+ return nil, "", err
+ }
+ mbuf = append(mbuf, tbuf...)
+ rawSetExtraLen(mbuf, uint16(len(m.Extra)+1))
+ return mbuf, t.MAC, nil
+}
+
+// TsigVerify verifies the TSIG on a message.
+// If the signature does not validate err contains the
+// error, otherwise it is nil.
+func TsigVerify(msg []byte, secret, requestMAC string, timersOnly bool) error {
+ rawsecret, err := fromBase64([]byte(secret))
+ if err != nil {
+ return err
+ }
+ // Strip the TSIG from the incoming msg
+ stripped, tsig, err := stripTsig(msg)
+ if err != nil {
+ return err
+ }
+
+ msgMAC, err := hex.DecodeString(tsig.MAC)
+ if err != nil {
+ return err
+ }
+
+ buf := tsigBuffer(stripped, tsig, requestMAC, timersOnly)
+
+ // Fudge factor works both ways. A message can arrive before it was signed because
+ // of clock skew.
+ now := uint64(time.Now().Unix())
+ ti := now - tsig.TimeSigned
+ if now < tsig.TimeSigned {
+ ti = tsig.TimeSigned - now
+ }
+ if uint64(tsig.Fudge) < ti {
+ return ErrTime
+ }
+
+ var h hash.Hash
+ switch tsig.Algorithm {
+ case HmacMD5:
+ h = hmac.New(md5.New, rawsecret)
+ case HmacSHA1:
+ h = hmac.New(sha1.New, rawsecret)
+ case HmacSHA256:
+ h = hmac.New(sha256.New, rawsecret)
+ case HmacSHA512:
+ h = hmac.New(sha512.New, rawsecret)
+ default:
+ return ErrKeyAlg
+ }
+ h.Write(buf)
+ if !hmac.Equal(h.Sum(nil), msgMAC) {
+ return ErrSig
+ }
+ return nil
+}
+
+// Create a wiredata buffer for the MAC calculation.
+func tsigBuffer(msgbuf []byte, rr *TSIG, requestMAC string, timersOnly bool) []byte {
+ var buf []byte
+ if rr.TimeSigned == 0 {
+ rr.TimeSigned = uint64(time.Now().Unix())
+ }
+ if rr.Fudge == 0 {
+ rr.Fudge = 300 // Standard (RFC) default.
+ }
+
+ if requestMAC != "" {
+ m := new(macWireFmt)
+ m.MACSize = uint16(len(requestMAC) / 2)
+ m.MAC = requestMAC
+ buf = make([]byte, len(requestMAC)) // long enough
+ n, _ := PackStruct(m, buf, 0)
+ buf = buf[:n]
+ }
+
+ tsigvar := make([]byte, DefaultMsgSize)
+ if timersOnly {
+ tsig := new(timerWireFmt)
+ tsig.TimeSigned = rr.TimeSigned
+ tsig.Fudge = rr.Fudge
+ n, _ := PackStruct(tsig, tsigvar, 0)
+ tsigvar = tsigvar[:n]
+ } else {
+ tsig := new(tsigWireFmt)
+ tsig.Name = strings.ToLower(rr.Hdr.Name)
+ tsig.Class = ClassANY
+ tsig.Ttl = rr.Hdr.Ttl
+ tsig.Algorithm = strings.ToLower(rr.Algorithm)
+ tsig.TimeSigned = rr.TimeSigned
+ tsig.Fudge = rr.Fudge
+ tsig.Error = rr.Error
+ tsig.OtherLen = rr.OtherLen
+ tsig.OtherData = rr.OtherData
+ n, _ := PackStruct(tsig, tsigvar, 0)
+ tsigvar = tsigvar[:n]
+ }
+
+ if requestMAC != "" {
+ x := append(buf, msgbuf...)
+ buf = append(x, tsigvar...)
+ } else {
+ buf = append(msgbuf, tsigvar...)
+ }
+ return buf
+}
+
+// Strip the TSIG from the raw message.
+func stripTsig(msg []byte) ([]byte, *TSIG, error) {
+ // Copied from msg.go's Unpack()
+ // Header.
+ var dh Header
+ var err error
+ dns := new(Msg)
+ rr := new(TSIG)
+ off := 0
+ tsigoff := 0
+ if off, err = UnpackStruct(&dh, msg, off); err != nil {
+ return nil, nil, err
+ }
+ if dh.Arcount == 0 {
+ return nil, nil, ErrNoSig
+ }
+ // Rcode, see msg.go Unpack()
+ if int(dh.Bits&0xF) == RcodeNotAuth {
+ return nil, nil, ErrAuth
+ }
+
+ // Arrays.
+ dns.Question = make([]Question, dh.Qdcount)
+ dns.Answer = make([]RR, dh.Ancount)
+ dns.Ns = make([]RR, dh.Nscount)
+ dns.Extra = make([]RR, dh.Arcount)
+
+ for i := 0; i < len(dns.Question); i++ {
+ off, err = UnpackStruct(&dns.Question[i], msg, off)
+ if err != nil {
+ return nil, nil, err
+ }
+ }
+ for i := 0; i < len(dns.Answer); i++ {
+ dns.Answer[i], off, err = UnpackRR(msg, off)
+ if err != nil {
+ return nil, nil, err
+ }
+ }
+ for i := 0; i < len(dns.Ns); i++ {
+ dns.Ns[i], off, err = UnpackRR(msg, off)
+ if err != nil {
+ return nil, nil, err
+ }
+ }
+ for i := 0; i < len(dns.Extra); i++ {
+ tsigoff = off
+ dns.Extra[i], off, err = UnpackRR(msg, off)
+ if err != nil {
+ return nil, nil, err
+ }
+ if dns.Extra[i].Header().Rrtype == TypeTSIG {
+ rr = dns.Extra[i].(*TSIG)
+ // Adjust Arcount.
+ arcount, _ := unpackUint16(msg, 10)
+ msg[10], msg[11] = packUint16(arcount - 1)
+ break
+ }
+ }
+ if rr == nil {
+ return nil, nil, ErrNoSig
+ }
+ return msg[:tsigoff], rr, nil
+}
+
+// Translate the TSIG time signed into a date. There is no
+// need for RFC1982 calculations as this date is 48 bits.
+func tsigTimeToString(t uint64) string {
+ ti := time.Unix(int64(t), 0).UTC()
+ return ti.Format("20060102150405")
+}
diff --git a/vendor/github.com/miekg/dns/types.go b/vendor/github.com/miekg/dns/types.go
new file mode 100644
index 0000000000..55a50b811d
--- /dev/null
+++ b/vendor/github.com/miekg/dns/types.go
@@ -0,0 +1,1741 @@
+package dns
+
+import (
+ "encoding/base64"
+ "fmt"
+ "net"
+ "strconv"
+ "strings"
+ "time"
+)
+
+type (
+ // Type is a DNS type.
+ Type uint16
+ // Class is a DNS class.
+ Class uint16
+ // Name is a DNS domain name.
+ Name string
+)
+
+// Packet formats
+
+// Wire constants and supported types.
+const (
+ // valid RR_Header.Rrtype and Question.qtype
+
+ TypeNone uint16 = 0
+ TypeA uint16 = 1
+ TypeNS uint16 = 2
+ TypeMD uint16 = 3
+ TypeMF uint16 = 4
+ TypeCNAME uint16 = 5
+ TypeSOA uint16 = 6
+ TypeMB uint16 = 7
+ TypeMG uint16 = 8
+ TypeMR uint16 = 9
+ TypeNULL uint16 = 10
+ TypeWKS uint16 = 11
+ TypePTR uint16 = 12
+ TypeHINFO uint16 = 13
+ TypeMINFO uint16 = 14
+ TypeMX uint16 = 15
+ TypeTXT uint16 = 16
+ TypeRP uint16 = 17
+ TypeAFSDB uint16 = 18
+ TypeX25 uint16 = 19
+ TypeISDN uint16 = 20
+ TypeRT uint16 = 21
+ TypeNSAPPTR uint16 = 23
+ TypeSIG uint16 = 24
+ TypeKEY uint16 = 25
+ TypePX uint16 = 26
+ TypeGPOS uint16 = 27
+ TypeAAAA uint16 = 28
+ TypeLOC uint16 = 29
+ TypeNXT uint16 = 30
+ TypeEID uint16 = 31
+ TypeNIMLOC uint16 = 32
+ TypeSRV uint16 = 33
+ TypeATMA uint16 = 34
+ TypeNAPTR uint16 = 35
+ TypeKX uint16 = 36
+ TypeCERT uint16 = 37
+ TypeDNAME uint16 = 39
+ TypeOPT uint16 = 41 // EDNS
+ TypeDS uint16 = 43
+ TypeSSHFP uint16 = 44
+ TypeIPSECKEY uint16 = 45
+ TypeRRSIG uint16 = 46
+ TypeNSEC uint16 = 47
+ TypeDNSKEY uint16 = 48
+ TypeDHCID uint16 = 49
+ TypeNSEC3 uint16 = 50
+ TypeNSEC3PARAM uint16 = 51
+ TypeTLSA uint16 = 52
+ TypeHIP uint16 = 55
+ TypeNINFO uint16 = 56
+ TypeRKEY uint16 = 57
+ TypeTALINK uint16 = 58
+ TypeCDS uint16 = 59
+ TypeCDNSKEY uint16 = 60
+ TypeOPENPGPKEY uint16 = 61
+ TypeSPF uint16 = 99
+ TypeUINFO uint16 = 100
+ TypeUID uint16 = 101
+ TypeGID uint16 = 102
+ TypeUNSPEC uint16 = 103
+ TypeNID uint16 = 104
+ TypeL32 uint16 = 105
+ TypeL64 uint16 = 106
+ TypeLP uint16 = 107
+ TypeEUI48 uint16 = 108
+ TypeEUI64 uint16 = 109
+ TypeURI uint16 = 256
+ TypeCAA uint16 = 257
+
+ TypeTKEY uint16 = 249
+ TypeTSIG uint16 = 250
+
+ // valid Question.Qtype only
+ TypeIXFR uint16 = 251
+ TypeAXFR uint16 = 252
+ TypeMAILB uint16 = 253
+ TypeMAILA uint16 = 254
+ TypeANY uint16 = 255
+
+ TypeTA uint16 = 32768
+ TypeDLV uint16 = 32769
+ TypeReserved uint16 = 65535
+
+ // valid Question.Qclass
+ ClassINET = 1
+ ClassCSNET = 2
+ ClassCHAOS = 3
+ ClassHESIOD = 4
+ ClassNONE = 254
+ ClassANY = 255
+
+ // Message Response Codes.
+ RcodeSuccess = 0
+ RcodeFormatError = 1
+ RcodeServerFailure = 2
+ RcodeNameError = 3
+ RcodeNotImplemented = 4
+ RcodeRefused = 5
+ RcodeYXDomain = 6
+ RcodeYXRrset = 7
+ RcodeNXRrset = 8
+ RcodeNotAuth = 9
+ RcodeNotZone = 10
+ RcodeBadSig = 16 // TSIG
+ RcodeBadVers = 16 // EDNS0
+ RcodeBadKey = 17
+ RcodeBadTime = 18
+ RcodeBadMode = 19 // TKEY
+ RcodeBadName = 20
+ RcodeBadAlg = 21
+ RcodeBadTrunc = 22 // TSIG
+
+ // Message Opcodes. There is no 3.
+ OpcodeQuery = 0
+ OpcodeIQuery = 1
+ OpcodeStatus = 2
+ OpcodeNotify = 4
+ OpcodeUpdate = 5
+)
+
+// Headers is the wire format for the DNS packet header.
+type Header struct {
+ Id uint16
+ Bits uint16
+ Qdcount, Ancount, Nscount, Arcount uint16
+}
+
+const (
+ headerSize = 12
+
+ // Header.Bits
+ _QR = 1 << 15 // query/response (response=1)
+ _AA = 1 << 10 // authoritative
+ _TC = 1 << 9 // truncated
+ _RD = 1 << 8 // recursion desired
+ _RA = 1 << 7 // recursion available
+ _Z = 1 << 6 // Z
+ _AD = 1 << 5 // authticated data
+ _CD = 1 << 4 // checking disabled
+
+ LOC_EQUATOR = 1 << 31 // RFC 1876, Section 2.
+ LOC_PRIMEMERIDIAN = 1 << 31 // RFC 1876, Section 2.
+
+ LOC_HOURS = 60 * 1000
+ LOC_DEGREES = 60 * LOC_HOURS
+
+ LOC_ALTITUDEBASE = 100000
+)
+
+// Different Certificate Types, see RFC 4398, Section 2.1
+const (
+ CertPKIX = 1 + iota
+ CertSPKI
+ CertPGP
+ CertIPIX
+ CertISPKI
+ CertIPGP
+ CertACPKIX
+ CertIACPKIX
+ CertURI = 253
+ CertOID = 254
+)
+
+// CertTypeToString converts the Cert Type to its string representation.
+// See RFC 4398 and RFC 6944.
+var CertTypeToString = map[uint16]string{
+ CertPKIX: "PKIX",
+ CertSPKI: "SPKI",
+ CertPGP: "PGP",
+ CertIPIX: "IPIX",
+ CertISPKI: "ISPKI",
+ CertIPGP: "IPGP",
+ CertACPKIX: "ACPKIX",
+ CertIACPKIX: "IACPKIX",
+ CertURI: "URI",
+ CertOID: "OID",
+}
+
+// StringToCertType is the reverseof CertTypeToString.
+var StringToCertType = reverseInt16(CertTypeToString)
+
+// Question holds a DNS question. There can be multiple questions in the
+// question section of a message. Usually there is just one.
+type Question struct {
+ Name string `dns:"cdomain-name"` // "cdomain-name" specifies encoding (and may be compressed)
+ Qtype uint16
+ Qclass uint16
+}
+
+func (q *Question) String() (s string) {
+ // prefix with ; (as in dig)
+ s = ";" + sprintName(q.Name) + "\t"
+ s += Class(q.Qclass).String() + "\t"
+ s += " " + Type(q.Qtype).String()
+ return s
+}
+
+func (q *Question) len() int {
+ l := len(q.Name) + 1
+ return l + 4
+}
+
+// ANY is a wildcard record. See RFC 1035, Section 3.2.3. ANY
+// is named "*" there.
+type ANY struct {
+ Hdr RR_Header
+ // Does not have any rdata
+}
+
+func (rr *ANY) Header() *RR_Header { return &rr.Hdr }
+func (rr *ANY) copy() RR { return &ANY{*rr.Hdr.copyHeader()} }
+func (rr *ANY) String() string { return rr.Hdr.String() }
+func (rr *ANY) len() int { return rr.Hdr.len() }
+
+type CNAME struct {
+ Hdr RR_Header
+ Target string `dns:"cdomain-name"`
+}
+
+func (rr *CNAME) Header() *RR_Header { return &rr.Hdr }
+func (rr *CNAME) copy() RR { return &CNAME{*rr.Hdr.copyHeader(), sprintName(rr.Target)} }
+func (rr *CNAME) String() string { return rr.Hdr.String() + rr.Target }
+func (rr *CNAME) len() int { return rr.Hdr.len() + len(rr.Target) + 1 }
+
+type HINFO struct {
+ Hdr RR_Header
+ Cpu string
+ Os string
+}
+
+func (rr *HINFO) Header() *RR_Header { return &rr.Hdr }
+func (rr *HINFO) copy() RR { return &HINFO{*rr.Hdr.copyHeader(), rr.Cpu, rr.Os} }
+func (rr *HINFO) String() string {
+ return rr.Hdr.String() + sprintTxt([]string{rr.Cpu, rr.Os})
+}
+func (rr *HINFO) len() int { return rr.Hdr.len() + len(rr.Cpu) + len(rr.Os) }
+
+type MB struct {
+ Hdr RR_Header
+ Mb string `dns:"cdomain-name"`
+}
+
+func (rr *MB) Header() *RR_Header { return &rr.Hdr }
+func (rr *MB) copy() RR { return &MB{*rr.Hdr.copyHeader(), sprintName(rr.Mb)} }
+
+func (rr *MB) String() string { return rr.Hdr.String() + rr.Mb }
+func (rr *MB) len() int { return rr.Hdr.len() + len(rr.Mb) + 1 }
+
+type MG struct {
+ Hdr RR_Header
+ Mg string `dns:"cdomain-name"`
+}
+
+func (rr *MG) Header() *RR_Header { return &rr.Hdr }
+func (rr *MG) copy() RR { return &MG{*rr.Hdr.copyHeader(), rr.Mg} }
+func (rr *MG) len() int { l := len(rr.Mg) + 1; return rr.Hdr.len() + l }
+func (rr *MG) String() string { return rr.Hdr.String() + sprintName(rr.Mg) }
+
+type MINFO struct {
+ Hdr RR_Header
+ Rmail string `dns:"cdomain-name"`
+ Email string `dns:"cdomain-name"`
+}
+
+func (rr *MINFO) Header() *RR_Header { return &rr.Hdr }
+func (rr *MINFO) copy() RR { return &MINFO{*rr.Hdr.copyHeader(), rr.Rmail, rr.Email} }
+
+func (rr *MINFO) String() string {
+ return rr.Hdr.String() + sprintName(rr.Rmail) + " " + sprintName(rr.Email)
+}
+
+func (rr *MINFO) len() int {
+ l := len(rr.Rmail) + 1
+ n := len(rr.Email) + 1
+ return rr.Hdr.len() + l + n
+}
+
+type MR struct {
+ Hdr RR_Header
+ Mr string `dns:"cdomain-name"`
+}
+
+func (rr *MR) Header() *RR_Header { return &rr.Hdr }
+func (rr *MR) copy() RR { return &MR{*rr.Hdr.copyHeader(), rr.Mr} }
+func (rr *MR) len() int { l := len(rr.Mr) + 1; return rr.Hdr.len() + l }
+
+func (rr *MR) String() string {
+ return rr.Hdr.String() + sprintName(rr.Mr)
+}
+
+type MF struct {
+ Hdr RR_Header
+ Mf string `dns:"cdomain-name"`
+}
+
+func (rr *MF) Header() *RR_Header { return &rr.Hdr }
+func (rr *MF) copy() RR { return &MF{*rr.Hdr.copyHeader(), rr.Mf} }
+func (rr *MF) len() int { return rr.Hdr.len() + len(rr.Mf) + 1 }
+
+func (rr *MF) String() string {
+ return rr.Hdr.String() + sprintName(rr.Mf)
+}
+
+type MD struct {
+ Hdr RR_Header
+ Md string `dns:"cdomain-name"`
+}
+
+func (rr *MD) Header() *RR_Header { return &rr.Hdr }
+func (rr *MD) copy() RR { return &MD{*rr.Hdr.copyHeader(), rr.Md} }
+func (rr *MD) len() int { return rr.Hdr.len() + len(rr.Md) + 1 }
+
+func (rr *MD) String() string {
+ return rr.Hdr.String() + sprintName(rr.Md)
+}
+
+type MX struct {
+ Hdr RR_Header
+ Preference uint16
+ Mx string `dns:"cdomain-name"`
+}
+
+func (rr *MX) Header() *RR_Header { return &rr.Hdr }
+func (rr *MX) copy() RR { return &MX{*rr.Hdr.copyHeader(), rr.Preference, rr.Mx} }
+func (rr *MX) len() int { l := len(rr.Mx) + 1; return rr.Hdr.len() + l + 2 }
+
+func (rr *MX) String() string {
+ return rr.Hdr.String() + strconv.Itoa(int(rr.Preference)) + " " + sprintName(rr.Mx)
+}
+
+type AFSDB struct {
+ Hdr RR_Header
+ Subtype uint16
+ Hostname string `dns:"cdomain-name"`
+}
+
+func (rr *AFSDB) Header() *RR_Header { return &rr.Hdr }
+func (rr *AFSDB) copy() RR { return &AFSDB{*rr.Hdr.copyHeader(), rr.Subtype, rr.Hostname} }
+func (rr *AFSDB) len() int { l := len(rr.Hostname) + 1; return rr.Hdr.len() + l + 2 }
+
+func (rr *AFSDB) String() string {
+ return rr.Hdr.String() + strconv.Itoa(int(rr.Subtype)) + " " + sprintName(rr.Hostname)
+}
+
+type X25 struct {
+ Hdr RR_Header
+ PSDNAddress string
+}
+
+func (rr *X25) Header() *RR_Header { return &rr.Hdr }
+func (rr *X25) copy() RR { return &X25{*rr.Hdr.copyHeader(), rr.PSDNAddress} }
+func (rr *X25) len() int { return rr.Hdr.len() + len(rr.PSDNAddress) + 1 }
+
+func (rr *X25) String() string {
+ return rr.Hdr.String() + rr.PSDNAddress
+}
+
+type RT struct {
+ Hdr RR_Header
+ Preference uint16
+ Host string `dns:"cdomain-name"`
+}
+
+func (rr *RT) Header() *RR_Header { return &rr.Hdr }
+func (rr *RT) copy() RR { return &RT{*rr.Hdr.copyHeader(), rr.Preference, rr.Host} }
+func (rr *RT) len() int { l := len(rr.Host) + 1; return rr.Hdr.len() + l + 2 }
+
+func (rr *RT) String() string {
+ return rr.Hdr.String() + strconv.Itoa(int(rr.Preference)) + " " + sprintName(rr.Host)
+}
+
+type NS struct {
+ Hdr RR_Header
+ Ns string `dns:"cdomain-name"`
+}
+
+func (rr *NS) Header() *RR_Header { return &rr.Hdr }
+func (rr *NS) len() int { l := len(rr.Ns) + 1; return rr.Hdr.len() + l }
+func (rr *NS) copy() RR { return &NS{*rr.Hdr.copyHeader(), rr.Ns} }
+
+func (rr *NS) String() string {
+ return rr.Hdr.String() + sprintName(rr.Ns)
+}
+
+type PTR struct {
+ Hdr RR_Header
+ Ptr string `dns:"cdomain-name"`
+}
+
+func (rr *PTR) Header() *RR_Header { return &rr.Hdr }
+func (rr *PTR) copy() RR { return &PTR{*rr.Hdr.copyHeader(), rr.Ptr} }
+func (rr *PTR) len() int { l := len(rr.Ptr) + 1; return rr.Hdr.len() + l }
+
+func (rr *PTR) String() string {
+ return rr.Hdr.String() + sprintName(rr.Ptr)
+}
+
+type RP struct {
+ Hdr RR_Header
+ Mbox string `dns:"domain-name"`
+ Txt string `dns:"domain-name"`
+}
+
+func (rr *RP) Header() *RR_Header { return &rr.Hdr }
+func (rr *RP) copy() RR { return &RP{*rr.Hdr.copyHeader(), rr.Mbox, rr.Txt} }
+func (rr *RP) len() int { return rr.Hdr.len() + len(rr.Mbox) + 1 + len(rr.Txt) + 1 }
+
+func (rr *RP) String() string {
+ return rr.Hdr.String() + rr.Mbox + " " + sprintTxt([]string{rr.Txt})
+}
+
+type SOA struct {
+ Hdr RR_Header
+ Ns string `dns:"cdomain-name"`
+ Mbox string `dns:"cdomain-name"`
+ Serial uint32
+ Refresh uint32
+ Retry uint32
+ Expire uint32
+ Minttl uint32
+}
+
+func (rr *SOA) Header() *RR_Header { return &rr.Hdr }
+func (rr *SOA) copy() RR {
+ return &SOA{*rr.Hdr.copyHeader(), rr.Ns, rr.Mbox, rr.Serial, rr.Refresh, rr.Retry, rr.Expire, rr.Minttl}
+}
+
+func (rr *SOA) String() string {
+ return rr.Hdr.String() + sprintName(rr.Ns) + " " + sprintName(rr.Mbox) +
+ " " + strconv.FormatInt(int64(rr.Serial), 10) +
+ " " + strconv.FormatInt(int64(rr.Refresh), 10) +
+ " " + strconv.FormatInt(int64(rr.Retry), 10) +
+ " " + strconv.FormatInt(int64(rr.Expire), 10) +
+ " " + strconv.FormatInt(int64(rr.Minttl), 10)
+}
+
+func (rr *SOA) len() int {
+ l := len(rr.Ns) + 1
+ n := len(rr.Mbox) + 1
+ return rr.Hdr.len() + l + n + 20
+}
+
+type TXT struct {
+ Hdr RR_Header
+ Txt []string `dns:"txt"`
+}
+
+func (rr *TXT) Header() *RR_Header { return &rr.Hdr }
+func (rr *TXT) copy() RR {
+ cp := make([]string, len(rr.Txt), cap(rr.Txt))
+ copy(cp, rr.Txt)
+ return &TXT{*rr.Hdr.copyHeader(), cp}
+}
+
+func (rr *TXT) String() string { return rr.Hdr.String() + sprintTxt(rr.Txt) }
+
+func sprintName(s string) string {
+ src := []byte(s)
+ dst := make([]byte, 0, len(src))
+ for i := 0; i < len(src); {
+ if i+1 < len(src) && src[i] == '\\' && src[i+1] == '.' {
+ dst = append(dst, src[i:i+2]...)
+ i += 2
+ } else {
+ b, n := nextByte(src, i)
+ if n == 0 {
+ i++ // dangling back slash
+ } else if b == '.' {
+ dst = append(dst, b)
+ } else {
+ dst = appendDomainNameByte(dst, b)
+ }
+ i += n
+ }
+ }
+ return string(dst)
+}
+
+func sprintTxtOctet(s string) string {
+ src := []byte(s)
+ dst := make([]byte, 0, len(src))
+ dst = append(dst, '"')
+ for i := 0; i < len(src); {
+ if i+1 < len(src) && src[i] == '\\' && src[i+1] == '.' {
+ dst = append(dst, src[i:i+2]...)
+ i += 2
+ } else {
+ b, n := nextByte(src, i)
+ if n == 0 {
+ i++ // dangling back slash
+ } else if b == '.' {
+ dst = append(dst, b)
+ } else {
+ if b < ' ' || b > '~' {
+ dst = appendByte(dst, b)
+ } else {
+ dst = append(dst, b)
+ }
+ }
+ i += n
+ }
+ }
+ dst = append(dst, '"')
+ return string(dst)
+}
+
+func sprintTxt(txt []string) string {
+ var out []byte
+ for i, s := range txt {
+ if i > 0 {
+ out = append(out, ` "`...)
+ } else {
+ out = append(out, '"')
+ }
+ bs := []byte(s)
+ for j := 0; j < len(bs); {
+ b, n := nextByte(bs, j)
+ if n == 0 {
+ break
+ }
+ out = appendTXTStringByte(out, b)
+ j += n
+ }
+ out = append(out, '"')
+ }
+ return string(out)
+}
+
+func appendDomainNameByte(s []byte, b byte) []byte {
+ switch b {
+ case '.', ' ', '\'', '@', ';', '(', ')': // additional chars to escape
+ return append(s, '\\', b)
+ }
+ return appendTXTStringByte(s, b)
+}
+
+func appendTXTStringByte(s []byte, b byte) []byte {
+ switch b {
+ case '\t':
+ return append(s, '\\', 't')
+ case '\r':
+ return append(s, '\\', 'r')
+ case '\n':
+ return append(s, '\\', 'n')
+ case '"', '\\':
+ return append(s, '\\', b)
+ }
+ if b < ' ' || b > '~' {
+ return appendByte(s, b)
+ }
+ return append(s, b)
+}
+
+func appendByte(s []byte, b byte) []byte {
+ var buf [3]byte
+ bufs := strconv.AppendInt(buf[:0], int64(b), 10)
+ s = append(s, '\\')
+ for i := 0; i < 3-len(bufs); i++ {
+ s = append(s, '0')
+ }
+ for _, r := range bufs {
+ s = append(s, r)
+ }
+ return s
+}
+
+func nextByte(b []byte, offset int) (byte, int) {
+ if offset >= len(b) {
+ return 0, 0
+ }
+ if b[offset] != '\\' {
+ // not an escape sequence
+ return b[offset], 1
+ }
+ switch len(b) - offset {
+ case 1: // dangling escape
+ return 0, 0
+ case 2, 3: // too short to be \ddd
+ default: // maybe \ddd
+ if isDigit(b[offset+1]) && isDigit(b[offset+2]) && isDigit(b[offset+3]) {
+ return dddToByte(b[offset+1:]), 4
+ }
+ }
+ // not \ddd, maybe a control char
+ switch b[offset+1] {
+ case 't':
+ return '\t', 2
+ case 'r':
+ return '\r', 2
+ case 'n':
+ return '\n', 2
+ default:
+ return b[offset+1], 2
+ }
+}
+
+func (rr *TXT) len() int {
+ l := rr.Hdr.len()
+ for _, t := range rr.Txt {
+ l += len(t) + 1
+ }
+ return l
+}
+
+type SPF struct {
+ Hdr RR_Header
+ Txt []string `dns:"txt"`
+}
+
+func (rr *SPF) Header() *RR_Header { return &rr.Hdr }
+func (rr *SPF) copy() RR {
+ cp := make([]string, len(rr.Txt), cap(rr.Txt))
+ copy(cp, rr.Txt)
+ return &SPF{*rr.Hdr.copyHeader(), cp}
+}
+
+func (rr *SPF) String() string { return rr.Hdr.String() + sprintTxt(rr.Txt) }
+
+func (rr *SPF) len() int {
+ l := rr.Hdr.len()
+ for _, t := range rr.Txt {
+ l += len(t) + 1
+ }
+ return l
+}
+
+type SRV struct {
+ Hdr RR_Header
+ Priority uint16
+ Weight uint16
+ Port uint16
+ Target string `dns:"domain-name"`
+}
+
+func (rr *SRV) Header() *RR_Header { return &rr.Hdr }
+func (rr *SRV) len() int { l := len(rr.Target) + 1; return rr.Hdr.len() + l + 6 }
+func (rr *SRV) copy() RR {
+ return &SRV{*rr.Hdr.copyHeader(), rr.Priority, rr.Weight, rr.Port, rr.Target}
+}
+
+func (rr *SRV) String() string {
+ return rr.Hdr.String() +
+ strconv.Itoa(int(rr.Priority)) + " " +
+ strconv.Itoa(int(rr.Weight)) + " " +
+ strconv.Itoa(int(rr.Port)) + " " + sprintName(rr.Target)
+}
+
+type NAPTR struct {
+ Hdr RR_Header
+ Order uint16
+ Preference uint16
+ Flags string
+ Service string
+ Regexp string
+ Replacement string `dns:"domain-name"`
+}
+
+func (rr *NAPTR) Header() *RR_Header { return &rr.Hdr }
+func (rr *NAPTR) copy() RR {
+ return &NAPTR{*rr.Hdr.copyHeader(), rr.Order, rr.Preference, rr.Flags, rr.Service, rr.Regexp, rr.Replacement}
+}
+
+func (rr *NAPTR) String() string {
+ return rr.Hdr.String() +
+ strconv.Itoa(int(rr.Order)) + " " +
+ strconv.Itoa(int(rr.Preference)) + " " +
+ "\"" + rr.Flags + "\" " +
+ "\"" + rr.Service + "\" " +
+ "\"" + rr.Regexp + "\" " +
+ rr.Replacement
+}
+
+func (rr *NAPTR) len() int {
+ return rr.Hdr.len() + 4 + len(rr.Flags) + 1 + len(rr.Service) + 1 +
+ len(rr.Regexp) + 1 + len(rr.Replacement) + 1
+}
+
+// The CERT resource record, see RFC 4398.
+type CERT struct {
+ Hdr RR_Header
+ Type uint16
+ KeyTag uint16
+ Algorithm uint8
+ Certificate string `dns:"base64"`
+}
+
+func (rr *CERT) Header() *RR_Header { return &rr.Hdr }
+func (rr *CERT) copy() RR {
+ return &CERT{*rr.Hdr.copyHeader(), rr.Type, rr.KeyTag, rr.Algorithm, rr.Certificate}
+}
+
+func (rr *CERT) String() string {
+ var (
+ ok bool
+ certtype, algorithm string
+ )
+ if certtype, ok = CertTypeToString[rr.Type]; !ok {
+ certtype = strconv.Itoa(int(rr.Type))
+ }
+ if algorithm, ok = AlgorithmToString[rr.Algorithm]; !ok {
+ algorithm = strconv.Itoa(int(rr.Algorithm))
+ }
+ return rr.Hdr.String() + certtype +
+ " " + strconv.Itoa(int(rr.KeyTag)) +
+ " " + algorithm +
+ " " + rr.Certificate
+}
+
+func (rr *CERT) len() int {
+ return rr.Hdr.len() + 5 +
+ base64.StdEncoding.DecodedLen(len(rr.Certificate))
+}
+
+// The DNAME resource record, see RFC 2672.
+type DNAME struct {
+ Hdr RR_Header
+ Target string `dns:"domain-name"`
+}
+
+func (rr *DNAME) Header() *RR_Header { return &rr.Hdr }
+func (rr *DNAME) copy() RR { return &DNAME{*rr.Hdr.copyHeader(), rr.Target} }
+func (rr *DNAME) len() int { l := len(rr.Target) + 1; return rr.Hdr.len() + l }
+
+func (rr *DNAME) String() string {
+ return rr.Hdr.String() + sprintName(rr.Target)
+}
+
+type A struct {
+ Hdr RR_Header
+ A net.IP `dns:"a"`
+}
+
+func (rr *A) Header() *RR_Header { return &rr.Hdr }
+func (rr *A) copy() RR { return &A{*rr.Hdr.copyHeader(), copyIP(rr.A)} }
+func (rr *A) len() int { return rr.Hdr.len() + net.IPv4len }
+
+func (rr *A) String() string {
+ if rr.A == nil {
+ return rr.Hdr.String()
+ }
+ return rr.Hdr.String() + rr.A.String()
+}
+
+type AAAA struct {
+ Hdr RR_Header
+ AAAA net.IP `dns:"aaaa"`
+}
+
+func (rr *AAAA) Header() *RR_Header { return &rr.Hdr }
+func (rr *AAAA) copy() RR { return &AAAA{*rr.Hdr.copyHeader(), copyIP(rr.AAAA)} }
+func (rr *AAAA) len() int { return rr.Hdr.len() + net.IPv6len }
+
+func (rr *AAAA) String() string {
+ if rr.AAAA == nil {
+ return rr.Hdr.String()
+ }
+ return rr.Hdr.String() + rr.AAAA.String()
+}
+
+type PX struct {
+ Hdr RR_Header
+ Preference uint16
+ Map822 string `dns:"domain-name"`
+ Mapx400 string `dns:"domain-name"`
+}
+
+func (rr *PX) Header() *RR_Header { return &rr.Hdr }
+func (rr *PX) copy() RR { return &PX{*rr.Hdr.copyHeader(), rr.Preference, rr.Map822, rr.Mapx400} }
+func (rr *PX) String() string {
+ return rr.Hdr.String() + strconv.Itoa(int(rr.Preference)) + " " + sprintName(rr.Map822) + " " + sprintName(rr.Mapx400)
+}
+func (rr *PX) len() int { return rr.Hdr.len() + 2 + len(rr.Map822) + 1 + len(rr.Mapx400) + 1 }
+
+type GPOS struct {
+ Hdr RR_Header
+ Longitude string
+ Latitude string
+ Altitude string
+}
+
+func (rr *GPOS) Header() *RR_Header { return &rr.Hdr }
+func (rr *GPOS) copy() RR { return &GPOS{*rr.Hdr.copyHeader(), rr.Longitude, rr.Latitude, rr.Altitude} }
+func (rr *GPOS) len() int {
+ return rr.Hdr.len() + len(rr.Longitude) + len(rr.Latitude) + len(rr.Altitude) + 3
+}
+func (rr *GPOS) String() string {
+ return rr.Hdr.String() + rr.Longitude + " " + rr.Latitude + " " + rr.Altitude
+}
+
+type LOC struct {
+ Hdr RR_Header
+ Version uint8
+ Size uint8
+ HorizPre uint8
+ VertPre uint8
+ Latitude uint32
+ Longitude uint32
+ Altitude uint32
+}
+
+func (rr *LOC) Header() *RR_Header { return &rr.Hdr }
+func (rr *LOC) len() int { return rr.Hdr.len() + 4 + 12 }
+func (rr *LOC) copy() RR {
+ return &LOC{*rr.Hdr.copyHeader(), rr.Version, rr.Size, rr.HorizPre, rr.VertPre, rr.Latitude, rr.Longitude, rr.Altitude}
+}
+
+// cmToM takes a cm value expressed in RFC1876 SIZE mantissa/exponent
+// format and returns a string in m (two decimals for the cm)
+func cmToM(m, e uint8) string {
+ if e < 2 {
+ if e == 1 {
+ m *= 10
+ }
+
+ return fmt.Sprintf("0.%02d", m)
+ }
+
+ s := fmt.Sprintf("%d", m)
+ for e > 2 {
+ s += "0"
+ e--
+ }
+ return s
+}
+
+func (rr *LOC) String() string {
+ s := rr.Hdr.String()
+
+ lat := rr.Latitude
+ ns := "N"
+ if lat > LOC_EQUATOR {
+ lat = lat - LOC_EQUATOR
+ } else {
+ ns = "S"
+ lat = LOC_EQUATOR - lat
+ }
+ h := lat / LOC_DEGREES
+ lat = lat % LOC_DEGREES
+ m := lat / LOC_HOURS
+ lat = lat % LOC_HOURS
+ s += fmt.Sprintf("%02d %02d %0.3f %s ", h, m, (float64(lat) / 1000), ns)
+
+ lon := rr.Longitude
+ ew := "E"
+ if lon > LOC_PRIMEMERIDIAN {
+ lon = lon - LOC_PRIMEMERIDIAN
+ } else {
+ ew = "W"
+ lon = LOC_PRIMEMERIDIAN - lon
+ }
+ h = lon / LOC_DEGREES
+ lon = lon % LOC_DEGREES
+ m = lon / LOC_HOURS
+ lon = lon % LOC_HOURS
+ s += fmt.Sprintf("%02d %02d %0.3f %s ", h, m, (float64(lon) / 1000), ew)
+
+ var alt = float64(rr.Altitude) / 100
+ alt -= LOC_ALTITUDEBASE
+ if rr.Altitude%100 != 0 {
+ s += fmt.Sprintf("%.2fm ", alt)
+ } else {
+ s += fmt.Sprintf("%.0fm ", alt)
+ }
+
+ s += cmToM((rr.Size&0xf0)>>4, rr.Size&0x0f) + "m "
+ s += cmToM((rr.HorizPre&0xf0)>>4, rr.HorizPre&0x0f) + "m "
+ s += cmToM((rr.VertPre&0xf0)>>4, rr.VertPre&0x0f) + "m"
+
+ return s
+}
+
+// SIG is identical to RRSIG and nowadays only used for SIG(0), RFC2931.
+type SIG struct {
+ RRSIG
+}
+
+type RRSIG struct {
+ Hdr RR_Header
+ TypeCovered uint16
+ Algorithm uint8
+ Labels uint8
+ OrigTtl uint32
+ Expiration uint32
+ Inception uint32
+ KeyTag uint16
+ SignerName string `dns:"domain-name"`
+ Signature string `dns:"base64"`
+}
+
+func (rr *RRSIG) Header() *RR_Header { return &rr.Hdr }
+func (rr *RRSIG) copy() RR {
+ return &RRSIG{*rr.Hdr.copyHeader(), rr.TypeCovered, rr.Algorithm, rr.Labels, rr.OrigTtl, rr.Expiration, rr.Inception, rr.KeyTag, rr.SignerName, rr.Signature}
+}
+
+func (rr *RRSIG) String() string {
+ s := rr.Hdr.String()
+ s += Type(rr.TypeCovered).String()
+ s += " " + strconv.Itoa(int(rr.Algorithm)) +
+ " " + strconv.Itoa(int(rr.Labels)) +
+ " " + strconv.FormatInt(int64(rr.OrigTtl), 10) +
+ " " + TimeToString(rr.Expiration) +
+ " " + TimeToString(rr.Inception) +
+ " " + strconv.Itoa(int(rr.KeyTag)) +
+ " " + sprintName(rr.SignerName) +
+ " " + rr.Signature
+ return s
+}
+
+func (rr *RRSIG) len() int {
+ return rr.Hdr.len() + len(rr.SignerName) + 1 +
+ base64.StdEncoding.DecodedLen(len(rr.Signature)) + 18
+}
+
+type NSEC struct {
+ Hdr RR_Header
+ NextDomain string `dns:"domain-name"`
+ TypeBitMap []uint16 `dns:"nsec"`
+}
+
+func (rr *NSEC) Header() *RR_Header { return &rr.Hdr }
+func (rr *NSEC) copy() RR {
+ cp := make([]uint16, len(rr.TypeBitMap), cap(rr.TypeBitMap))
+ copy(cp, rr.TypeBitMap)
+ return &NSEC{*rr.Hdr.copyHeader(), rr.NextDomain, cp}
+}
+
+func (rr *NSEC) String() string {
+ s := rr.Hdr.String() + sprintName(rr.NextDomain)
+ for i := 0; i < len(rr.TypeBitMap); i++ {
+ s += " " + Type(rr.TypeBitMap[i]).String()
+ }
+ return s
+}
+
+func (rr *NSEC) len() int {
+ l := rr.Hdr.len() + len(rr.NextDomain) + 1
+ lastwindow := uint32(2 ^ 32 + 1)
+ for _, t := range rr.TypeBitMap {
+ window := t / 256
+ if uint32(window) != lastwindow {
+ l += 1 + 32
+ }
+ lastwindow = uint32(window)
+ }
+ return l
+}
+
+type DLV struct {
+ DS
+}
+
+type CDS struct {
+ DS
+}
+
+type DS struct {
+ Hdr RR_Header
+ KeyTag uint16
+ Algorithm uint8
+ DigestType uint8
+ Digest string `dns:"hex"`
+}
+
+func (rr *DS) Header() *RR_Header { return &rr.Hdr }
+func (rr *DS) len() int { return rr.Hdr.len() + 4 + len(rr.Digest)/2 }
+func (rr *DS) copy() RR {
+ return &DS{*rr.Hdr.copyHeader(), rr.KeyTag, rr.Algorithm, rr.DigestType, rr.Digest}
+}
+
+func (rr *DS) String() string {
+ return rr.Hdr.String() + strconv.Itoa(int(rr.KeyTag)) +
+ " " + strconv.Itoa(int(rr.Algorithm)) +
+ " " + strconv.Itoa(int(rr.DigestType)) +
+ " " + strings.ToUpper(rr.Digest)
+}
+
+type KX struct {
+ Hdr RR_Header
+ Preference uint16
+ Exchanger string `dns:"domain-name"`
+}
+
+func (rr *KX) Header() *RR_Header { return &rr.Hdr }
+func (rr *KX) len() int { return rr.Hdr.len() + 2 + len(rr.Exchanger) + 1 }
+func (rr *KX) copy() RR { return &KX{*rr.Hdr.copyHeader(), rr.Preference, rr.Exchanger} }
+
+func (rr *KX) String() string {
+ return rr.Hdr.String() + strconv.Itoa(int(rr.Preference)) +
+ " " + sprintName(rr.Exchanger)
+}
+
+type TA struct {
+ Hdr RR_Header
+ KeyTag uint16
+ Algorithm uint8
+ DigestType uint8
+ Digest string `dns:"hex"`
+}
+
+func (rr *TA) Header() *RR_Header { return &rr.Hdr }
+func (rr *TA) len() int { return rr.Hdr.len() + 4 + len(rr.Digest)/2 }
+func (rr *TA) copy() RR {
+ return &TA{*rr.Hdr.copyHeader(), rr.KeyTag, rr.Algorithm, rr.DigestType, rr.Digest}
+}
+
+func (rr *TA) String() string {
+ return rr.Hdr.String() + strconv.Itoa(int(rr.KeyTag)) +
+ " " + strconv.Itoa(int(rr.Algorithm)) +
+ " " + strconv.Itoa(int(rr.DigestType)) +
+ " " + strings.ToUpper(rr.Digest)
+}
+
+type TALINK struct {
+ Hdr RR_Header
+ PreviousName string `dns:"domain-name"`
+ NextName string `dns:"domain-name"`
+}
+
+func (rr *TALINK) Header() *RR_Header { return &rr.Hdr }
+func (rr *TALINK) copy() RR { return &TALINK{*rr.Hdr.copyHeader(), rr.PreviousName, rr.NextName} }
+func (rr *TALINK) len() int { return rr.Hdr.len() + len(rr.PreviousName) + len(rr.NextName) + 2 }
+
+func (rr *TALINK) String() string {
+ return rr.Hdr.String() +
+ sprintName(rr.PreviousName) + " " + sprintName(rr.NextName)
+}
+
+type SSHFP struct {
+ Hdr RR_Header
+ Algorithm uint8
+ Type uint8
+ FingerPrint string `dns:"hex"`
+}
+
+func (rr *SSHFP) Header() *RR_Header { return &rr.Hdr }
+func (rr *SSHFP) len() int { return rr.Hdr.len() + 2 + len(rr.FingerPrint)/2 }
+func (rr *SSHFP) copy() RR {
+ return &SSHFP{*rr.Hdr.copyHeader(), rr.Algorithm, rr.Type, rr.FingerPrint}
+}
+
+func (rr *SSHFP) String() string {
+ return rr.Hdr.String() + strconv.Itoa(int(rr.Algorithm)) +
+ " " + strconv.Itoa(int(rr.Type)) +
+ " " + strings.ToUpper(rr.FingerPrint)
+}
+
+type IPSECKEY struct {
+ Hdr RR_Header
+ Precedence uint8
+ // GatewayType: 1: A record, 2: AAAA record, 3: domainname.
+ // 0 is use for no type and GatewayName should be "." then.
+ GatewayType uint8
+ Algorithm uint8
+ // Gateway can be an A record, AAAA record or a domain name.
+ GatewayA net.IP `dns:"a"`
+ GatewayAAAA net.IP `dns:"aaaa"`
+ GatewayName string `dns:"domain-name"`
+ PublicKey string `dns:"base64"`
+}
+
+func (rr *IPSECKEY) Header() *RR_Header { return &rr.Hdr }
+func (rr *IPSECKEY) copy() RR {
+ return &IPSECKEY{*rr.Hdr.copyHeader(), rr.Precedence, rr.GatewayType, rr.Algorithm, rr.GatewayA, rr.GatewayAAAA, rr.GatewayName, rr.PublicKey}
+}
+
+func (rr *IPSECKEY) String() string {
+ s := rr.Hdr.String() + strconv.Itoa(int(rr.Precedence)) +
+ " " + strconv.Itoa(int(rr.GatewayType)) +
+ " " + strconv.Itoa(int(rr.Algorithm))
+ switch rr.GatewayType {
+ case 0:
+ fallthrough
+ case 3:
+ s += " " + rr.GatewayName
+ case 1:
+ s += " " + rr.GatewayA.String()
+ case 2:
+ s += " " + rr.GatewayAAAA.String()
+ default:
+ s += " ."
+ }
+ s += " " + rr.PublicKey
+ return s
+}
+
+func (rr *IPSECKEY) len() int {
+ l := rr.Hdr.len() + 3 + 1
+ switch rr.GatewayType {
+ default:
+ fallthrough
+ case 0:
+ fallthrough
+ case 3:
+ l += len(rr.GatewayName)
+ case 1:
+ l += 4
+ case 2:
+ l += 16
+ }
+ return l + base64.StdEncoding.DecodedLen(len(rr.PublicKey))
+}
+
+type KEY struct {
+ DNSKEY
+}
+
+type CDNSKEY struct {
+ DNSKEY
+}
+
+type DNSKEY struct {
+ Hdr RR_Header
+ Flags uint16
+ Protocol uint8
+ Algorithm uint8
+ PublicKey string `dns:"base64"`
+}
+
+func (rr *DNSKEY) Header() *RR_Header { return &rr.Hdr }
+func (rr *DNSKEY) len() int {
+ return rr.Hdr.len() + 4 + base64.StdEncoding.DecodedLen(len(rr.PublicKey))
+}
+func (rr *DNSKEY) copy() RR {
+ return &DNSKEY{*rr.Hdr.copyHeader(), rr.Flags, rr.Protocol, rr.Algorithm, rr.PublicKey}
+}
+
+func (rr *DNSKEY) String() string {
+ return rr.Hdr.String() + strconv.Itoa(int(rr.Flags)) +
+ " " + strconv.Itoa(int(rr.Protocol)) +
+ " " + strconv.Itoa(int(rr.Algorithm)) +
+ " " + rr.PublicKey
+}
+
+type RKEY struct {
+ Hdr RR_Header
+ Flags uint16
+ Protocol uint8
+ Algorithm uint8
+ PublicKey string `dns:"base64"`
+}
+
+func (rr *RKEY) Header() *RR_Header { return &rr.Hdr }
+func (rr *RKEY) len() int { return rr.Hdr.len() + 4 + base64.StdEncoding.DecodedLen(len(rr.PublicKey)) }
+func (rr *RKEY) copy() RR {
+ return &RKEY{*rr.Hdr.copyHeader(), rr.Flags, rr.Protocol, rr.Algorithm, rr.PublicKey}
+}
+
+func (rr *RKEY) String() string {
+ return rr.Hdr.String() + strconv.Itoa(int(rr.Flags)) +
+ " " + strconv.Itoa(int(rr.Protocol)) +
+ " " + strconv.Itoa(int(rr.Algorithm)) +
+ " " + rr.PublicKey
+}
+
+type NSAPPTR struct {
+ Hdr RR_Header
+ Ptr string `dns:"domain-name"`
+}
+
+func (rr *NSAPPTR) Header() *RR_Header { return &rr.Hdr }
+func (rr *NSAPPTR) copy() RR { return &NSAPPTR{*rr.Hdr.copyHeader(), rr.Ptr} }
+func (rr *NSAPPTR) String() string { return rr.Hdr.String() + sprintName(rr.Ptr) }
+func (rr *NSAPPTR) len() int { return rr.Hdr.len() + len(rr.Ptr) }
+
+type NSEC3 struct {
+ Hdr RR_Header
+ Hash uint8
+ Flags uint8
+ Iterations uint16
+ SaltLength uint8
+ Salt string `dns:"size-hex"`
+ HashLength uint8
+ NextDomain string `dns:"size-base32"`
+ TypeBitMap []uint16 `dns:"nsec"`
+}
+
+func (rr *NSEC3) Header() *RR_Header { return &rr.Hdr }
+func (rr *NSEC3) copy() RR {
+ cp := make([]uint16, len(rr.TypeBitMap), cap(rr.TypeBitMap))
+ copy(cp, rr.TypeBitMap)
+ return &NSEC3{*rr.Hdr.copyHeader(), rr.Hash, rr.Flags, rr.Iterations, rr.SaltLength, rr.Salt, rr.HashLength, rr.NextDomain, cp}
+}
+
+func (rr *NSEC3) String() string {
+ s := rr.Hdr.String()
+ s += strconv.Itoa(int(rr.Hash)) +
+ " " + strconv.Itoa(int(rr.Flags)) +
+ " " + strconv.Itoa(int(rr.Iterations)) +
+ " " + saltToString(rr.Salt) +
+ " " + rr.NextDomain
+ for i := 0; i < len(rr.TypeBitMap); i++ {
+ s += " " + Type(rr.TypeBitMap[i]).String()
+ }
+ return s
+}
+
+func (rr *NSEC3) len() int {
+ l := rr.Hdr.len() + 6 + len(rr.Salt)/2 + 1 + len(rr.NextDomain) + 1
+ lastwindow := uint32(2 ^ 32 + 1)
+ for _, t := range rr.TypeBitMap {
+ window := t / 256
+ if uint32(window) != lastwindow {
+ l += 1 + 32
+ }
+ lastwindow = uint32(window)
+ }
+ return l
+}
+
+type NSEC3PARAM struct {
+ Hdr RR_Header
+ Hash uint8
+ Flags uint8
+ Iterations uint16
+ SaltLength uint8
+ Salt string `dns:"hex"`
+}
+
+func (rr *NSEC3PARAM) Header() *RR_Header { return &rr.Hdr }
+func (rr *NSEC3PARAM) len() int { return rr.Hdr.len() + 2 + 4 + 1 + len(rr.Salt)/2 }
+func (rr *NSEC3PARAM) copy() RR {
+ return &NSEC3PARAM{*rr.Hdr.copyHeader(), rr.Hash, rr.Flags, rr.Iterations, rr.SaltLength, rr.Salt}
+}
+
+func (rr *NSEC3PARAM) String() string {
+ s := rr.Hdr.String()
+ s += strconv.Itoa(int(rr.Hash)) +
+ " " + strconv.Itoa(int(rr.Flags)) +
+ " " + strconv.Itoa(int(rr.Iterations)) +
+ " " + saltToString(rr.Salt)
+ return s
+}
+
+type TKEY struct {
+ Hdr RR_Header
+ Algorithm string `dns:"domain-name"`
+ Inception uint32
+ Expiration uint32
+ Mode uint16
+ Error uint16
+ KeySize uint16
+ Key string
+ OtherLen uint16
+ OtherData string
+}
+
+func (rr *TKEY) Header() *RR_Header { return &rr.Hdr }
+func (rr *TKEY) copy() RR {
+ return &TKEY{*rr.Hdr.copyHeader(), rr.Algorithm, rr.Inception, rr.Expiration, rr.Mode, rr.Error, rr.KeySize, rr.Key, rr.OtherLen, rr.OtherData}
+}
+
+func (rr *TKEY) String() string {
+ // It has no presentation format
+ return ""
+}
+
+func (rr *TKEY) len() int {
+ return rr.Hdr.len() + len(rr.Algorithm) + 1 + 4 + 4 + 6 +
+ len(rr.Key) + 2 + len(rr.OtherData)
+}
+
+// RFC3597 represents an unknown/generic RR.
+type RFC3597 struct {
+ Hdr RR_Header
+ Rdata string `dns:"hex"`
+}
+
+func (rr *RFC3597) Header() *RR_Header { return &rr.Hdr }
+func (rr *RFC3597) copy() RR { return &RFC3597{*rr.Hdr.copyHeader(), rr.Rdata} }
+func (rr *RFC3597) len() int { return rr.Hdr.len() + len(rr.Rdata)/2 + 2 }
+
+func (rr *RFC3597) String() string {
+ // Let's call it a hack
+ s := rfc3597Header(rr.Hdr)
+
+ s += "\\# " + strconv.Itoa(len(rr.Rdata)/2) + " " + rr.Rdata
+ return s
+}
+
+func rfc3597Header(h RR_Header) string {
+ var s string
+
+ s += sprintName(h.Name) + "\t"
+ s += strconv.FormatInt(int64(h.Ttl), 10) + "\t"
+ s += "CLASS" + strconv.Itoa(int(h.Class)) + "\t"
+ s += "TYPE" + strconv.Itoa(int(h.Rrtype)) + "\t"
+ return s
+}
+
+type URI struct {
+ Hdr RR_Header
+ Priority uint16
+ Weight uint16
+ Target string `dns:"octet"`
+}
+
+func (rr *URI) Header() *RR_Header { return &rr.Hdr }
+func (rr *URI) copy() RR { return &URI{*rr.Hdr.copyHeader(), rr.Weight, rr.Priority, rr.Target} }
+func (rr *URI) len() int { return rr.Hdr.len() + 4 + len(rr.Target) }
+func (rr *URI) String() string {
+ return rr.Hdr.String() + strconv.Itoa(int(rr.Priority)) +
+ " " + strconv.Itoa(int(rr.Weight)) + " " + sprintTxtOctet(rr.Target)
+}
+
+type DHCID struct {
+ Hdr RR_Header
+ Digest string `dns:"base64"`
+}
+
+func (rr *DHCID) Header() *RR_Header { return &rr.Hdr }
+func (rr *DHCID) copy() RR { return &DHCID{*rr.Hdr.copyHeader(), rr.Digest} }
+func (rr *DHCID) String() string { return rr.Hdr.String() + rr.Digest }
+func (rr *DHCID) len() int { return rr.Hdr.len() + base64.StdEncoding.DecodedLen(len(rr.Digest)) }
+
+type TLSA struct {
+ Hdr RR_Header
+ Usage uint8
+ Selector uint8
+ MatchingType uint8
+ Certificate string `dns:"hex"`
+}
+
+func (rr *TLSA) Header() *RR_Header { return &rr.Hdr }
+func (rr *TLSA) len() int { return rr.Hdr.len() + 3 + len(rr.Certificate)/2 }
+
+func (rr *TLSA) copy() RR {
+ return &TLSA{*rr.Hdr.copyHeader(), rr.Usage, rr.Selector, rr.MatchingType, rr.Certificate}
+}
+
+func (rr *TLSA) String() string {
+ return rr.Hdr.String() +
+ strconv.Itoa(int(rr.Usage)) +
+ " " + strconv.Itoa(int(rr.Selector)) +
+ " " + strconv.Itoa(int(rr.MatchingType)) +
+ " " + rr.Certificate
+}
+
+type HIP struct {
+ Hdr RR_Header
+ HitLength uint8
+ PublicKeyAlgorithm uint8
+ PublicKeyLength uint16
+ Hit string `dns:"hex"`
+ PublicKey string `dns:"base64"`
+ RendezvousServers []string `dns:"domain-name"`
+}
+
+func (rr *HIP) Header() *RR_Header { return &rr.Hdr }
+func (rr *HIP) copy() RR {
+ cp := make([]string, len(rr.RendezvousServers), cap(rr.RendezvousServers))
+ copy(cp, rr.RendezvousServers)
+ return &HIP{*rr.Hdr.copyHeader(), rr.HitLength, rr.PublicKeyAlgorithm, rr.PublicKeyLength, rr.Hit, rr.PublicKey, cp}
+}
+
+func (rr *HIP) String() string {
+ s := rr.Hdr.String() +
+ strconv.Itoa(int(rr.PublicKeyAlgorithm)) +
+ " " + rr.Hit +
+ " " + rr.PublicKey
+ for _, d := range rr.RendezvousServers {
+ s += " " + sprintName(d)
+ }
+ return s
+}
+
+func (rr *HIP) len() int {
+ l := rr.Hdr.len() + 4 +
+ len(rr.Hit)/2 +
+ base64.StdEncoding.DecodedLen(len(rr.PublicKey))
+ for _, d := range rr.RendezvousServers {
+ l += len(d) + 1
+ }
+ return l
+}
+
+type NINFO struct {
+ Hdr RR_Header
+ ZSData []string `dns:"txt"`
+}
+
+func (rr *NINFO) Header() *RR_Header { return &rr.Hdr }
+func (rr *NINFO) copy() RR {
+ cp := make([]string, len(rr.ZSData), cap(rr.ZSData))
+ copy(cp, rr.ZSData)
+ return &NINFO{*rr.Hdr.copyHeader(), cp}
+}
+
+func (rr *NINFO) String() string { return rr.Hdr.String() + sprintTxt(rr.ZSData) }
+
+func (rr *NINFO) len() int {
+ l := rr.Hdr.len()
+ for _, t := range rr.ZSData {
+ l += len(t) + 1
+ }
+ return l
+}
+
+type WKS struct {
+ Hdr RR_Header
+ Address net.IP `dns:"a"`
+ Protocol uint8
+ BitMap []uint16 `dns:"wks"`
+}
+
+func (rr *WKS) Header() *RR_Header { return &rr.Hdr }
+func (rr *WKS) len() int { return rr.Hdr.len() + net.IPv4len + 1 }
+
+func (rr *WKS) copy() RR {
+ cp := make([]uint16, len(rr.BitMap), cap(rr.BitMap))
+ copy(cp, rr.BitMap)
+ return &WKS{*rr.Hdr.copyHeader(), copyIP(rr.Address), rr.Protocol, cp}
+}
+
+func (rr *WKS) String() (s string) {
+ s = rr.Hdr.String()
+ if rr.Address != nil {
+ s += rr.Address.String()
+ }
+ // TODO(miek): missing protocol here, see /etc/protocols
+ for i := 0; i < len(rr.BitMap); i++ {
+ // should lookup the port
+ s += " " + strconv.Itoa(int(rr.BitMap[i]))
+ }
+ return s
+}
+
+type NID struct {
+ Hdr RR_Header
+ Preference uint16
+ NodeID uint64
+}
+
+func (rr *NID) Header() *RR_Header { return &rr.Hdr }
+func (rr *NID) copy() RR { return &NID{*rr.Hdr.copyHeader(), rr.Preference, rr.NodeID} }
+func (rr *NID) len() int { return rr.Hdr.len() + 2 + 8 }
+
+func (rr *NID) String() string {
+ s := rr.Hdr.String() + strconv.Itoa(int(rr.Preference))
+ node := fmt.Sprintf("%0.16x", rr.NodeID)
+ s += " " + node[0:4] + ":" + node[4:8] + ":" + node[8:12] + ":" + node[12:16]
+ return s
+}
+
+type L32 struct {
+ Hdr RR_Header
+ Preference uint16
+ Locator32 net.IP `dns:"a"`
+}
+
+func (rr *L32) Header() *RR_Header { return &rr.Hdr }
+func (rr *L32) copy() RR { return &L32{*rr.Hdr.copyHeader(), rr.Preference, copyIP(rr.Locator32)} }
+func (rr *L32) len() int { return rr.Hdr.len() + net.IPv4len }
+
+func (rr *L32) String() string {
+ if rr.Locator32 == nil {
+ return rr.Hdr.String() + strconv.Itoa(int(rr.Preference))
+ }
+ return rr.Hdr.String() + strconv.Itoa(int(rr.Preference)) +
+ " " + rr.Locator32.String()
+}
+
+type L64 struct {
+ Hdr RR_Header
+ Preference uint16
+ Locator64 uint64
+}
+
+func (rr *L64) Header() *RR_Header { return &rr.Hdr }
+func (rr *L64) copy() RR { return &L64{*rr.Hdr.copyHeader(), rr.Preference, rr.Locator64} }
+func (rr *L64) len() int { return rr.Hdr.len() + 2 + 8 }
+
+func (rr *L64) String() string {
+ s := rr.Hdr.String() + strconv.Itoa(int(rr.Preference))
+ node := fmt.Sprintf("%0.16X", rr.Locator64)
+ s += " " + node[0:4] + ":" + node[4:8] + ":" + node[8:12] + ":" + node[12:16]
+ return s
+}
+
+type LP struct {
+ Hdr RR_Header
+ Preference uint16
+ Fqdn string `dns:"domain-name"`
+}
+
+func (rr *LP) Header() *RR_Header { return &rr.Hdr }
+func (rr *LP) copy() RR { return &LP{*rr.Hdr.copyHeader(), rr.Preference, rr.Fqdn} }
+func (rr *LP) len() int { return rr.Hdr.len() + 2 + len(rr.Fqdn) + 1 }
+
+func (rr *LP) String() string {
+ return rr.Hdr.String() + strconv.Itoa(int(rr.Preference)) + " " + sprintName(rr.Fqdn)
+}
+
+type EUI48 struct {
+ Hdr RR_Header
+ Address uint64 `dns:"uint48"`
+}
+
+func (rr *EUI48) Header() *RR_Header { return &rr.Hdr }
+func (rr *EUI48) copy() RR { return &EUI48{*rr.Hdr.copyHeader(), rr.Address} }
+func (rr *EUI48) String() string { return rr.Hdr.String() + euiToString(rr.Address, 48) }
+func (rr *EUI48) len() int { return rr.Hdr.len() + 6 }
+
+type EUI64 struct {
+ Hdr RR_Header
+ Address uint64
+}
+
+func (rr *EUI64) Header() *RR_Header { return &rr.Hdr }
+func (rr *EUI64) copy() RR { return &EUI64{*rr.Hdr.copyHeader(), rr.Address} }
+func (rr *EUI64) String() string { return rr.Hdr.String() + euiToString(rr.Address, 64) }
+func (rr *EUI64) len() int { return rr.Hdr.len() + 8 }
+
+type CAA struct {
+ Hdr RR_Header
+ Flag uint8
+ Tag string
+ Value string `dns:"octet"`
+}
+
+func (rr *CAA) Header() *RR_Header { return &rr.Hdr }
+func (rr *CAA) copy() RR { return &CAA{*rr.Hdr.copyHeader(), rr.Flag, rr.Tag, rr.Value} }
+func (rr *CAA) len() int { return rr.Hdr.len() + 2 + len(rr.Tag) + len(rr.Value) }
+func (rr *CAA) String() string {
+ return rr.Hdr.String() + strconv.Itoa(int(rr.Flag)) + " " + rr.Tag + " " + sprintTxtOctet(rr.Value)
+}
+
+type UID struct {
+ Hdr RR_Header
+ Uid uint32
+}
+
+func (rr *UID) Header() *RR_Header { return &rr.Hdr }
+func (rr *UID) copy() RR { return &UID{*rr.Hdr.copyHeader(), rr.Uid} }
+func (rr *UID) String() string { return rr.Hdr.String() + strconv.FormatInt(int64(rr.Uid), 10) }
+func (rr *UID) len() int { return rr.Hdr.len() + 4 }
+
+type GID struct {
+ Hdr RR_Header
+ Gid uint32
+}
+
+func (rr *GID) Header() *RR_Header { return &rr.Hdr }
+func (rr *GID) copy() RR { return &GID{*rr.Hdr.copyHeader(), rr.Gid} }
+func (rr *GID) String() string { return rr.Hdr.String() + strconv.FormatInt(int64(rr.Gid), 10) }
+func (rr *GID) len() int { return rr.Hdr.len() + 4 }
+
+type UINFO struct {
+ Hdr RR_Header
+ Uinfo string
+}
+
+func (rr *UINFO) Header() *RR_Header { return &rr.Hdr }
+func (rr *UINFO) copy() RR { return &UINFO{*rr.Hdr.copyHeader(), rr.Uinfo} }
+func (rr *UINFO) String() string { return rr.Hdr.String() + sprintTxt([]string{rr.Uinfo}) }
+func (rr *UINFO) len() int { return rr.Hdr.len() + len(rr.Uinfo) + 1 }
+
+type EID struct {
+ Hdr RR_Header
+ Endpoint string `dns:"hex"`
+}
+
+func (rr *EID) Header() *RR_Header { return &rr.Hdr }
+func (rr *EID) copy() RR { return &EID{*rr.Hdr.copyHeader(), rr.Endpoint} }
+func (rr *EID) String() string { return rr.Hdr.String() + strings.ToUpper(rr.Endpoint) }
+func (rr *EID) len() int { return rr.Hdr.len() + len(rr.Endpoint)/2 }
+
+type NIMLOC struct {
+ Hdr RR_Header
+ Locator string `dns:"hex"`
+}
+
+func (rr *NIMLOC) Header() *RR_Header { return &rr.Hdr }
+func (rr *NIMLOC) copy() RR { return &NIMLOC{*rr.Hdr.copyHeader(), rr.Locator} }
+func (rr *NIMLOC) String() string { return rr.Hdr.String() + strings.ToUpper(rr.Locator) }
+func (rr *NIMLOC) len() int { return rr.Hdr.len() + len(rr.Locator)/2 }
+
+type OPENPGPKEY struct {
+ Hdr RR_Header
+ PublicKey string `dns:"base64"`
+}
+
+func (rr *OPENPGPKEY) Header() *RR_Header { return &rr.Hdr }
+func (rr *OPENPGPKEY) copy() RR { return &OPENPGPKEY{*rr.Hdr.copyHeader(), rr.PublicKey} }
+func (rr *OPENPGPKEY) String() string { return rr.Hdr.String() + rr.PublicKey }
+func (rr *OPENPGPKEY) len() int {
+ return rr.Hdr.len() + base64.StdEncoding.DecodedLen(len(rr.PublicKey))
+}
+
+// TimeToString translates the RRSIG's incep. and expir. times to the
+// string representation used when printing the record.
+// It takes serial arithmetic (RFC 1982) into account.
+func TimeToString(t uint32) string {
+ mod := ((int64(t) - time.Now().Unix()) / year68) - 1
+ if mod < 0 {
+ mod = 0
+ }
+ ti := time.Unix(int64(t)-(mod*year68), 0).UTC()
+ return ti.Format("20060102150405")
+}
+
+// StringToTime translates the RRSIG's incep. and expir. times from
+// string values like "20110403154150" to an 32 bit integer.
+// It takes serial arithmetic (RFC 1982) into account.
+func StringToTime(s string) (uint32, error) {
+ t, e := time.Parse("20060102150405", s)
+ if e != nil {
+ return 0, e
+ }
+ mod := (t.Unix() / year68) - 1
+ if mod < 0 {
+ mod = 0
+ }
+ return uint32(t.Unix() - (mod * year68)), nil
+}
+
+// saltToString converts a NSECX salt to uppercase and
+// returns "-" when it is empty
+func saltToString(s string) string {
+ if len(s) == 0 {
+ return "-"
+ }
+ return strings.ToUpper(s)
+}
+
+func euiToString(eui uint64, bits int) (hex string) {
+ switch bits {
+ case 64:
+ hex = fmt.Sprintf("%16.16x", eui)
+ hex = hex[0:2] + "-" + hex[2:4] + "-" + hex[4:6] + "-" + hex[6:8] +
+ "-" + hex[8:10] + "-" + hex[10:12] + "-" + hex[12:14] + "-" + hex[14:16]
+ case 48:
+ hex = fmt.Sprintf("%12.12x", eui)
+ hex = hex[0:2] + "-" + hex[2:4] + "-" + hex[4:6] + "-" + hex[6:8] +
+ "-" + hex[8:10] + "-" + hex[10:12]
+ }
+ return
+}
+
+// copyIP returns a copy of ip.
+func copyIP(ip net.IP) net.IP {
+ p := make(net.IP, len(ip))
+ copy(p, ip)
+ return p
+}
+
+// Map of constructors for each RR type.
+var typeToRR = map[uint16]func() RR{
+ TypeA: func() RR { return new(A) },
+ TypeAAAA: func() RR { return new(AAAA) },
+ TypeAFSDB: func() RR { return new(AFSDB) },
+ TypeCAA: func() RR { return new(CAA) },
+ TypeCDS: func() RR { return new(CDS) },
+ TypeCERT: func() RR { return new(CERT) },
+ TypeCNAME: func() RR { return new(CNAME) },
+ TypeDHCID: func() RR { return new(DHCID) },
+ TypeDLV: func() RR { return new(DLV) },
+ TypeDNAME: func() RR { return new(DNAME) },
+ TypeKEY: func() RR { return new(KEY) },
+ TypeDNSKEY: func() RR { return new(DNSKEY) },
+ TypeDS: func() RR { return new(DS) },
+ TypeEUI48: func() RR { return new(EUI48) },
+ TypeEUI64: func() RR { return new(EUI64) },
+ TypeGID: func() RR { return new(GID) },
+ TypeGPOS: func() RR { return new(GPOS) },
+ TypeEID: func() RR { return new(EID) },
+ TypeHINFO: func() RR { return new(HINFO) },
+ TypeHIP: func() RR { return new(HIP) },
+ TypeIPSECKEY: func() RR { return new(IPSECKEY) },
+ TypeKX: func() RR { return new(KX) },
+ TypeL32: func() RR { return new(L32) },
+ TypeL64: func() RR { return new(L64) },
+ TypeLOC: func() RR { return new(LOC) },
+ TypeLP: func() RR { return new(LP) },
+ TypeMB: func() RR { return new(MB) },
+ TypeMD: func() RR { return new(MD) },
+ TypeMF: func() RR { return new(MF) },
+ TypeMG: func() RR { return new(MG) },
+ TypeMINFO: func() RR { return new(MINFO) },
+ TypeMR: func() RR { return new(MR) },
+ TypeMX: func() RR { return new(MX) },
+ TypeNAPTR: func() RR { return new(NAPTR) },
+ TypeNID: func() RR { return new(NID) },
+ TypeNINFO: func() RR { return new(NINFO) },
+ TypeNIMLOC: func() RR { return new(NIMLOC) },
+ TypeNS: func() RR { return new(NS) },
+ TypeNSAPPTR: func() RR { return new(NSAPPTR) },
+ TypeNSEC3: func() RR { return new(NSEC3) },
+ TypeNSEC3PARAM: func() RR { return new(NSEC3PARAM) },
+ TypeNSEC: func() RR { return new(NSEC) },
+ TypeOPENPGPKEY: func() RR { return new(OPENPGPKEY) },
+ TypeOPT: func() RR { return new(OPT) },
+ TypePTR: func() RR { return new(PTR) },
+ TypeRKEY: func() RR { return new(RKEY) },
+ TypeRP: func() RR { return new(RP) },
+ TypePX: func() RR { return new(PX) },
+ TypeSIG: func() RR { return new(SIG) },
+ TypeRRSIG: func() RR { return new(RRSIG) },
+ TypeRT: func() RR { return new(RT) },
+ TypeSOA: func() RR { return new(SOA) },
+ TypeSPF: func() RR { return new(SPF) },
+ TypeSRV: func() RR { return new(SRV) },
+ TypeSSHFP: func() RR { return new(SSHFP) },
+ TypeTA: func() RR { return new(TA) },
+ TypeTALINK: func() RR { return new(TALINK) },
+ TypeTKEY: func() RR { return new(TKEY) },
+ TypeTLSA: func() RR { return new(TLSA) },
+ TypeTSIG: func() RR { return new(TSIG) },
+ TypeTXT: func() RR { return new(TXT) },
+ TypeUID: func() RR { return new(UID) },
+ TypeUINFO: func() RR { return new(UINFO) },
+ TypeURI: func() RR { return new(URI) },
+ TypeWKS: func() RR { return new(WKS) },
+ TypeX25: func() RR { return new(X25) },
+}
diff --git a/vendor/github.com/miekg/dns/udp.go b/vendor/github.com/miekg/dns/udp.go
new file mode 100644
index 0000000000..fc86563744
--- /dev/null
+++ b/vendor/github.com/miekg/dns/udp.go
@@ -0,0 +1,58 @@
+// +build !windows
+
+package dns
+
+import (
+ "net"
+ "syscall"
+)
+
+// SessionUDP holds the remote address and the associated
+// out-of-band data.
+type SessionUDP struct {
+ raddr *net.UDPAddr
+ context []byte
+}
+
+// RemoteAddr returns the remote network address.
+func (s *SessionUDP) RemoteAddr() net.Addr { return s.raddr }
+
+// setUDPSocketOptions sets the UDP socket options.
+// This function is implemented on a per platform basis. See udp_*.go for more details
+func setUDPSocketOptions(conn *net.UDPConn) error {
+ sa, err := getUDPSocketName(conn)
+ if err != nil {
+ return err
+ }
+ switch sa.(type) {
+ case *syscall.SockaddrInet6:
+ v6only, err := getUDPSocketOptions6Only(conn)
+ if err != nil {
+ return err
+ }
+ setUDPSocketOptions6(conn)
+ if !v6only {
+ setUDPSocketOptions4(conn)
+ }
+ case *syscall.SockaddrInet4:
+ setUDPSocketOptions4(conn)
+ }
+ return nil
+}
+
+// ReadFromSessionUDP acts just like net.UDPConn.ReadFrom(), but returns a session object instead of a
+// net.UDPAddr.
+func ReadFromSessionUDP(conn *net.UDPConn, b []byte) (int, *SessionUDP, error) {
+ oob := make([]byte, 40)
+ n, oobn, _, raddr, err := conn.ReadMsgUDP(b, oob)
+ if err != nil {
+ return n, nil, err
+ }
+ return n, &SessionUDP{raddr, oob[:oobn]}, err
+}
+
+// WriteToSessionUDP acts just like net.UDPConn.WritetTo(), but uses a *SessionUDP instead of a net.Addr.
+func WriteToSessionUDP(conn *net.UDPConn, b []byte, session *SessionUDP) (int, error) {
+ n, _, err := conn.WriteMsgUDP(b, session.context, session.raddr)
+ return n, err
+}
diff --git a/vendor/github.com/miekg/dns/udp_linux.go b/vendor/github.com/miekg/dns/udp_linux.go
new file mode 100644
index 0000000000..7a107857e1
--- /dev/null
+++ b/vendor/github.com/miekg/dns/udp_linux.go
@@ -0,0 +1,63 @@
+// +build linux
+
+package dns
+
+// See:
+// * http://stackoverflow.com/questions/3062205/setting-the-source-ip-for-a-udp-socket and
+// * http://blog.powerdns.com/2012/10/08/on-binding-datagram-udp-sockets-to-the-any-addresses/
+//
+// Why do we need this: When listening on 0.0.0.0 with UDP so kernel decides what is the outgoing
+// interface, this might not always be the correct one. This code will make sure the egress
+// packet's interface matched the ingress' one.
+
+import (
+ "net"
+ "syscall"
+)
+
+// setUDPSocketOptions4 prepares the v4 socket for sessions.
+func setUDPSocketOptions4(conn *net.UDPConn) error {
+ file, err := conn.File()
+ if err != nil {
+ return err
+ }
+ if err := syscall.SetsockoptInt(int(file.Fd()), syscall.IPPROTO_IP, syscall.IP_PKTINFO, 1); err != nil {
+ return err
+ }
+ return nil
+}
+
+// setUDPSocketOptions6 prepares the v6 socket for sessions.
+func setUDPSocketOptions6(conn *net.UDPConn) error {
+ file, err := conn.File()
+ if err != nil {
+ return err
+ }
+ if err := syscall.SetsockoptInt(int(file.Fd()), syscall.IPPROTO_IPV6, syscall.IPV6_RECVPKTINFO, 1); err != nil {
+ return err
+ }
+ return nil
+}
+
+// getUDPSocketOption6Only return true if the socket is v6 only and false when it is v4/v6 combined
+// (dualstack).
+func getUDPSocketOptions6Only(conn *net.UDPConn) (bool, error) {
+ file, err := conn.File()
+ if err != nil {
+ return false, err
+ }
+ // dual stack. See http://stackoverflow.com/questions/1618240/how-to-support-both-ipv4-and-ipv6-connections
+ v6only, err := syscall.GetsockoptInt(int(file.Fd()), syscall.IPPROTO_IPV6, syscall.IPV6_V6ONLY)
+ if err != nil {
+ return false, err
+ }
+ return v6only == 1, nil
+}
+
+func getUDPSocketName(conn *net.UDPConn) (syscall.Sockaddr, error) {
+ file, err := conn.File()
+ if err != nil {
+ return nil, err
+ }
+ return syscall.Getsockname(int(file.Fd()))
+}
diff --git a/vendor/github.com/miekg/dns/udp_other.go b/vendor/github.com/miekg/dns/udp_other.go
new file mode 100644
index 0000000000..c38dd3e7f0
--- /dev/null
+++ b/vendor/github.com/miekg/dns/udp_other.go
@@ -0,0 +1,17 @@
+// +build !linux
+
+package dns
+
+import (
+ "net"
+ "syscall"
+)
+
+// These do nothing. See udp_linux.go for an example of how to implement this.
+
+// We tried to adhire to some kind of naming scheme.
+
+func setUDPSocketOptions4(conn *net.UDPConn) error { return nil }
+func setUDPSocketOptions6(conn *net.UDPConn) error { return nil }
+func getUDPSocketOptions6Only(conn *net.UDPConn) (bool, error) { return false, nil }
+func getUDPSocketName(conn *net.UDPConn) (syscall.Sockaddr, error) { return nil, nil }
diff --git a/vendor/github.com/miekg/dns/udp_windows.go b/vendor/github.com/miekg/dns/udp_windows.go
new file mode 100644
index 0000000000..2ce4b33002
--- /dev/null
+++ b/vendor/github.com/miekg/dns/udp_windows.go
@@ -0,0 +1,34 @@
+// +build windows
+
+package dns
+
+import "net"
+
+type SessionUDP struct {
+ raddr *net.UDPAddr
+}
+
+// ReadFromSessionUDP acts just like net.UDPConn.ReadFrom(), but returns a session object instead of a
+// net.UDPAddr.
+func ReadFromSessionUDP(conn *net.UDPConn, b []byte) (int, *SessionUDP, error) {
+ n, raddr, err := conn.ReadFrom(b)
+ if err != nil {
+ return n, nil, err
+ }
+ session := &SessionUDP{raddr.(*net.UDPAddr)}
+ return n, session, err
+}
+
+// WriteToSessionUDP acts just like net.UDPConn.WritetTo(), but uses a *SessionUDP instead of a net.Addr.
+func WriteToSessionUDP(conn *net.UDPConn, b []byte, session *SessionUDP) (int, error) {
+ n, err := conn.WriteTo(b, session.raddr)
+ return n, err
+}
+
+func (s *SessionUDP) RemoteAddr() net.Addr { return s.raddr }
+
+// setUDPSocketOptions sets the UDP socket options.
+// This function is implemented on a per platform basis. See udp_*.go for more details
+func setUDPSocketOptions(conn *net.UDPConn) error {
+ return nil
+}
diff --git a/vendor/github.com/miekg/dns/update.go b/vendor/github.com/miekg/dns/update.go
new file mode 100644
index 0000000000..3539987ccb
--- /dev/null
+++ b/vendor/github.com/miekg/dns/update.go
@@ -0,0 +1,94 @@
+package dns
+
+// NameUsed sets the RRs in the prereq section to
+// "Name is in use" RRs. RFC 2136 section 2.4.4.
+func (u *Msg) NameUsed(rr []RR) {
+ u.Answer = make([]RR, len(rr))
+ for i, r := range rr {
+ u.Answer[i] = &ANY{Hdr: RR_Header{Name: r.Header().Name, Ttl: 0, Rrtype: TypeANY, Class: ClassANY}}
+ }
+}
+
+// NameNotUsed sets the RRs in the prereq section to
+// "Name is in not use" RRs. RFC 2136 section 2.4.5.
+func (u *Msg) NameNotUsed(rr []RR) {
+ u.Answer = make([]RR, len(rr))
+ for i, r := range rr {
+ u.Answer[i] = &ANY{Hdr: RR_Header{Name: r.Header().Name, Ttl: 0, Rrtype: TypeANY, Class: ClassNONE}}
+ }
+}
+
+// Used sets the RRs in the prereq section to
+// "RRset exists (value dependent -- with rdata)" RRs. RFC 2136 section 2.4.2.
+func (u *Msg) Used(rr []RR) {
+ if len(u.Question) == 0 {
+ panic("dns: empty question section")
+ }
+ u.Answer = make([]RR, len(rr))
+ for i, r := range rr {
+ u.Answer[i] = r
+ u.Answer[i].Header().Class = u.Question[0].Qclass
+ }
+}
+
+// RRsetUsed sets the RRs in the prereq section to
+// "RRset exists (value independent -- no rdata)" RRs. RFC 2136 section 2.4.1.
+func (u *Msg) RRsetUsed(rr []RR) {
+ u.Answer = make([]RR, len(rr))
+ for i, r := range rr {
+ u.Answer[i] = r
+ u.Answer[i].Header().Class = ClassANY
+ u.Answer[i].Header().Ttl = 0
+ u.Answer[i].Header().Rdlength = 0
+ }
+}
+
+// RRsetNotUsed sets the RRs in the prereq section to
+// "RRset does not exist" RRs. RFC 2136 section 2.4.3.
+func (u *Msg) RRsetNotUsed(rr []RR) {
+ u.Answer = make([]RR, len(rr))
+ for i, r := range rr {
+ u.Answer[i] = r
+ u.Answer[i].Header().Class = ClassNONE
+ u.Answer[i].Header().Rdlength = 0
+ u.Answer[i].Header().Ttl = 0
+ }
+}
+
+// Insert creates a dynamic update packet that adds an complete RRset, see RFC 2136 section 2.5.1.
+func (u *Msg) Insert(rr []RR) {
+ if len(u.Question) == 0 {
+ panic("dns: empty question section")
+ }
+ u.Ns = make([]RR, len(rr))
+ for i, r := range rr {
+ u.Ns[i] = r
+ u.Ns[i].Header().Class = u.Question[0].Qclass
+ }
+}
+
+// RemoveRRset creates a dynamic update packet that deletes an RRset, see RFC 2136 section 2.5.2.
+func (u *Msg) RemoveRRset(rr []RR) {
+ u.Ns = make([]RR, len(rr))
+ for i, r := range rr {
+ u.Ns[i] = &ANY{Hdr: RR_Header{Name: r.Header().Name, Ttl: 0, Rrtype: r.Header().Rrtype, Class: ClassANY}}
+ }
+}
+
+// RemoveName creates a dynamic update packet that deletes all RRsets of a name, see RFC 2136 section 2.5.3
+func (u *Msg) RemoveName(rr []RR) {
+ u.Ns = make([]RR, len(rr))
+ for i, r := range rr {
+ u.Ns[i] = &ANY{Hdr: RR_Header{Name: r.Header().Name, Ttl: 0, Rrtype: TypeANY, Class: ClassANY}}
+ }
+}
+
+// Remove creates a dynamic update packet deletes RR from the RRSset, see RFC 2136 section 2.5.4
+func (u *Msg) Remove(rr []RR) {
+ u.Ns = make([]RR, len(rr))
+ for i, r := range rr {
+ u.Ns[i] = r
+ u.Ns[i].Header().Class = ClassNONE
+ u.Ns[i].Header().Ttl = 0
+ }
+}
diff --git a/vendor/github.com/miekg/dns/xfr.go b/vendor/github.com/miekg/dns/xfr.go
new file mode 100644
index 0000000000..2da75931e7
--- /dev/null
+++ b/vendor/github.com/miekg/dns/xfr.go
@@ -0,0 +1,232 @@
+package dns
+
+import (
+ "time"
+)
+
+// Envelope is used when doing a zone transfer with a remote server.
+type Envelope struct {
+ RR []RR // The set of RRs in the answer section of the xfr reply message.
+ Error error // If something went wrong, this contains the error.
+}
+
+// A Transfer defines parameters that are used during a zone transfer.
+type Transfer struct {
+ *Conn
+ DialTimeout time.Duration // net.DialTimeout, defaults to 2 seconds
+ ReadTimeout time.Duration // net.Conn.SetReadTimeout value for connections, defaults to 2 seconds
+ WriteTimeout time.Duration // net.Conn.SetWriteTimeout value for connections, defaults to 2 seconds
+ TsigSecret map[string]string // Secret(s) for Tsig map[], zonename must be fully qualified
+ tsigTimersOnly bool
+}
+
+// Think we need to away to stop the transfer
+
+// In performs an incoming transfer with the server in a.
+func (t *Transfer) In(q *Msg, a string) (env chan *Envelope, err error) {
+ timeout := dnsTimeout
+ if t.DialTimeout != 0 {
+ timeout = t.DialTimeout
+ }
+ t.Conn, err = DialTimeout("tcp", a, timeout)
+ if err != nil {
+ return nil, err
+ }
+ if err := t.WriteMsg(q); err != nil {
+ return nil, err
+ }
+ env = make(chan *Envelope)
+ go func() {
+ if q.Question[0].Qtype == TypeAXFR {
+ go t.inAxfr(q.Id, env)
+ return
+ }
+ if q.Question[0].Qtype == TypeIXFR {
+ go t.inIxfr(q.Id, env)
+ return
+ }
+ }()
+ return env, nil
+}
+
+func (t *Transfer) inAxfr(id uint16, c chan *Envelope) {
+ first := true
+ defer t.Close()
+ defer close(c)
+ timeout := dnsTimeout
+ if t.ReadTimeout != 0 {
+ timeout = t.ReadTimeout
+ }
+ for {
+ t.Conn.SetReadDeadline(time.Now().Add(timeout))
+ in, err := t.ReadMsg()
+ if err != nil {
+ c <- &Envelope{nil, err}
+ return
+ }
+ if id != in.Id {
+ c <- &Envelope{in.Answer, ErrId}
+ return
+ }
+ if first {
+ if !isSOAFirst(in) {
+ c <- &Envelope{in.Answer, ErrSoa}
+ return
+ }
+ first = !first
+ // only one answer that is SOA, receive more
+ if len(in.Answer) == 1 {
+ t.tsigTimersOnly = true
+ c <- &Envelope{in.Answer, nil}
+ continue
+ }
+ }
+
+ if !first {
+ t.tsigTimersOnly = true // Subsequent envelopes use this.
+ if isSOALast(in) {
+ c <- &Envelope{in.Answer, nil}
+ return
+ }
+ c <- &Envelope{in.Answer, nil}
+ }
+ }
+}
+
+func (t *Transfer) inIxfr(id uint16, c chan *Envelope) {
+ serial := uint32(0) // The first serial seen is the current server serial
+ first := true
+ defer t.Close()
+ defer close(c)
+ timeout := dnsTimeout
+ if t.ReadTimeout != 0 {
+ timeout = t.ReadTimeout
+ }
+ for {
+ t.SetReadDeadline(time.Now().Add(timeout))
+ in, err := t.ReadMsg()
+ if err != nil {
+ c <- &Envelope{nil, err}
+ return
+ }
+ if id != in.Id {
+ c <- &Envelope{in.Answer, ErrId}
+ return
+ }
+ if first {
+ // A single SOA RR signals "no changes"
+ if len(in.Answer) == 1 && isSOAFirst(in) {
+ c <- &Envelope{in.Answer, nil}
+ return
+ }
+
+ // Check if the returned answer is ok
+ if !isSOAFirst(in) {
+ c <- &Envelope{in.Answer, ErrSoa}
+ return
+ }
+ // This serial is important
+ serial = in.Answer[0].(*SOA).Serial
+ first = !first
+ }
+
+ // Now we need to check each message for SOA records, to see what we need to do
+ if !first {
+ t.tsigTimersOnly = true
+ // If the last record in the IXFR contains the servers' SOA, we should quit
+ if v, ok := in.Answer[len(in.Answer)-1].(*SOA); ok {
+ if v.Serial == serial {
+ c <- &Envelope{in.Answer, nil}
+ return
+ }
+ }
+ c <- &Envelope{in.Answer, nil}
+ }
+ }
+}
+
+// Out performs an outgoing transfer with the client connecting in w.
+// Basic use pattern:
+//
+// ch := make(chan *dns.Envelope)
+// tr := new(dns.Transfer)
+// tr.Out(w, r, ch)
+// c <- &dns.Envelope{RR: []dns.RR{soa, rr1, rr2, rr3, soa}}
+// close(ch)
+// w.Hijack()
+// // w.Close() // Client closes connection
+//
+// The server is responsible for sending the correct sequence of RRs through the
+// channel ch.
+func (t *Transfer) Out(w ResponseWriter, q *Msg, ch chan *Envelope) error {
+ for x := range ch {
+ r := new(Msg)
+ // Compress?
+ r.SetReply(q)
+ r.Authoritative = true
+ // assume it fits TODO(miek): fix
+ r.Answer = append(r.Answer, x.RR...)
+ if err := w.WriteMsg(r); err != nil {
+ return err
+ }
+ }
+ w.TsigTimersOnly(true)
+ return nil
+}
+
+// ReadMsg reads a message from the transfer connection t.
+func (t *Transfer) ReadMsg() (*Msg, error) {
+ m := new(Msg)
+ p := make([]byte, MaxMsgSize)
+ n, err := t.Read(p)
+ if err != nil && n == 0 {
+ return nil, err
+ }
+ p = p[:n]
+ if err := m.Unpack(p); err != nil {
+ return nil, err
+ }
+ if ts := m.IsTsig(); ts != nil && t.TsigSecret != nil {
+ if _, ok := t.TsigSecret[ts.Hdr.Name]; !ok {
+ return m, ErrSecret
+ }
+ // Need to work on the original message p, as that was used to calculate the tsig.
+ err = TsigVerify(p, t.TsigSecret[ts.Hdr.Name], t.tsigRequestMAC, t.tsigTimersOnly)
+ t.tsigRequestMAC = ts.MAC
+ }
+ return m, err
+}
+
+// WriteMsg writes a message through the transfer connection t.
+func (t *Transfer) WriteMsg(m *Msg) (err error) {
+ var out []byte
+ if ts := m.IsTsig(); ts != nil && t.TsigSecret != nil {
+ if _, ok := t.TsigSecret[ts.Hdr.Name]; !ok {
+ return ErrSecret
+ }
+ out, t.tsigRequestMAC, err = TsigGenerate(m, t.TsigSecret[ts.Hdr.Name], t.tsigRequestMAC, t.tsigTimersOnly)
+ } else {
+ out, err = m.Pack()
+ }
+ if err != nil {
+ return err
+ }
+ if _, err = t.Write(out); err != nil {
+ return err
+ }
+ return nil
+}
+
+func isSOAFirst(in *Msg) bool {
+ if len(in.Answer) > 0 {
+ return in.Answer[0].Header().Rrtype == TypeSOA
+ }
+ return false
+}
+
+func isSOALast(in *Msg) bool {
+ if len(in.Answer) > 0 {
+ return in.Answer[len(in.Answer)-1].Header().Rrtype == TypeSOA
+ }
+ return false
+}
diff --git a/vendor/github.com/miekg/dns/zgenerate.go b/vendor/github.com/miekg/dns/zgenerate.go
new file mode 100644
index 0000000000..c506e96266
--- /dev/null
+++ b/vendor/github.com/miekg/dns/zgenerate.go
@@ -0,0 +1,158 @@
+package dns
+
+import (
+ "bytes"
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+// Parse the $GENERATE statement as used in BIND9 zones.
+// See http://www.zytrax.com/books/dns/ch8/generate.html for instance.
+// We are called after '$GENERATE '. After which we expect:
+// * the range (12-24/2)
+// * lhs (ownername)
+// * [[ttl][class]]
+// * type
+// * rhs (rdata)
+// But we are lazy here, only the range is parsed *all* occurences
+// of $ after that are interpreted.
+// Any error are returned as a string value, the empty string signals
+// "no error".
+func generate(l lex, c chan lex, t chan *Token, o string) string {
+ step := 1
+ if i := strings.IndexAny(l.token, "/"); i != -1 {
+ if i+1 == len(l.token) {
+ return "bad step in $GENERATE range"
+ }
+ if s, e := strconv.Atoi(l.token[i+1:]); e == nil {
+ if s < 0 {
+ return "bad step in $GENERATE range"
+ }
+ step = s
+ } else {
+ return "bad step in $GENERATE range"
+ }
+ l.token = l.token[:i]
+ }
+ sx := strings.SplitN(l.token, "-", 2)
+ if len(sx) != 2 {
+ return "bad start-stop in $GENERATE range"
+ }
+ start, err := strconv.Atoi(sx[0])
+ if err != nil {
+ return "bad start in $GENERATE range"
+ }
+ end, err := strconv.Atoi(sx[1])
+ if err != nil {
+ return "bad stop in $GENERATE range"
+ }
+ if end < 0 || start < 0 || end < start {
+ return "bad range in $GENERATE range"
+ }
+
+ <-c // _BLANK
+ // Create a complete new string, which we then parse again.
+ s := ""
+BuildRR:
+ l = <-c
+ if l.value != zNewline && l.value != zEOF {
+ s += l.token
+ goto BuildRR
+ }
+ for i := start; i <= end; i += step {
+ var (
+ escape bool
+ dom bytes.Buffer
+ mod string
+ err string
+ offset int
+ )
+
+ for j := 0; j < len(s); j++ { // No 'range' because we need to jump around
+ switch s[j] {
+ case '\\':
+ if escape {
+ dom.WriteByte('\\')
+ escape = false
+ continue
+ }
+ escape = true
+ case '$':
+ mod = "%d"
+ offset = 0
+ if escape {
+ dom.WriteByte('$')
+ escape = false
+ continue
+ }
+ escape = false
+ if j+1 >= len(s) { // End of the string
+ dom.WriteString(fmt.Sprintf(mod, i+offset))
+ continue
+ } else {
+ if s[j+1] == '$' {
+ dom.WriteByte('$')
+ j++
+ continue
+ }
+ }
+ // Search for { and }
+ if s[j+1] == '{' { // Modifier block
+ sep := strings.Index(s[j+2:], "}")
+ if sep == -1 {
+ return "bad modifier in $GENERATE"
+ }
+ mod, offset, err = modToPrintf(s[j+2 : j+2+sep])
+ if err != "" {
+ return err
+ }
+ j += 2 + sep // Jump to it
+ }
+ dom.WriteString(fmt.Sprintf(mod, i+offset))
+ default:
+ if escape { // Pretty useless here
+ escape = false
+ continue
+ }
+ dom.WriteByte(s[j])
+ }
+ }
+ // Re-parse the RR and send it on the current channel t
+ rx, e := NewRR("$ORIGIN " + o + "\n" + dom.String())
+ if e != nil {
+ return e.(*ParseError).err
+ }
+ t <- &Token{RR: rx}
+ // Its more efficient to first built the rrlist and then parse it in
+ // one go! But is this a problem?
+ }
+ return ""
+}
+
+// Convert a $GENERATE modifier 0,0,d to something Printf can deal with.
+func modToPrintf(s string) (string, int, string) {
+ xs := strings.SplitN(s, ",", 3)
+ if len(xs) != 3 {
+ return "", 0, "bad modifier in $GENERATE"
+ }
+ // xs[0] is offset, xs[1] is width, xs[2] is base
+ if xs[2] != "o" && xs[2] != "d" && xs[2] != "x" && xs[2] != "X" {
+ return "", 0, "bad base in $GENERATE"
+ }
+ offset, err := strconv.Atoi(xs[0])
+ if err != nil || offset > 255 {
+ return "", 0, "bad offset in $GENERATE"
+ }
+ width, err := strconv.Atoi(xs[1])
+ if err != nil || width > 255 {
+ return "", offset, "bad width in $GENERATE"
+ }
+ switch {
+ case width < 0:
+ return "", offset, "bad width in $GENERATE"
+ case width == 0:
+ return "%" + xs[1] + xs[2], offset, ""
+ }
+ return "%0" + xs[1] + xs[2], offset, ""
+}
diff --git a/vendor/github.com/miekg/dns/zscan.go b/vendor/github.com/miekg/dns/zscan.go
new file mode 100644
index 0000000000..40ba35c36d
--- /dev/null
+++ b/vendor/github.com/miekg/dns/zscan.go
@@ -0,0 +1,972 @@
+package dns
+
+import (
+ "io"
+ "log"
+ "os"
+ "strconv"
+ "strings"
+)
+
+type debugging bool
+
+const debug debugging = false
+
+func (d debugging) Printf(format string, args ...interface{}) {
+ if d {
+ log.Printf(format, args...)
+ }
+}
+
+const maxTok = 2048 // Largest token we can return.
+const maxUint16 = 1<<16 - 1
+
+// Tokinize a RFC 1035 zone file. The tokenizer will normalize it:
+// * Add ownernames if they are left blank;
+// * Suppress sequences of spaces;
+// * Make each RR fit on one line (_NEWLINE is send as last)
+// * Handle comments: ;
+// * Handle braces - anywhere.
+const (
+ // Zonefile
+ zEOF = iota
+ zString
+ zBlank
+ zQuote
+ zNewline
+ zRrtpe
+ zOwner
+ zClass
+ zDirOrigin // $ORIGIN
+ zDirTtl // $TTL
+ zDirInclude // $INCLUDE
+ zDirGenerate // $GENERATE
+
+ // Privatekey file
+ zValue
+ zKey
+
+ zExpectOwnerDir // Ownername
+ zExpectOwnerBl // Whitespace after the ownername
+ zExpectAny // Expect rrtype, ttl or class
+ zExpectAnyNoClass // Expect rrtype or ttl
+ zExpectAnyNoClassBl // The whitespace after _EXPECT_ANY_NOCLASS
+ zExpectAnyNoTtl // Expect rrtype or class
+ zExpectAnyNoTtlBl // Whitespace after _EXPECT_ANY_NOTTL
+ zExpectRrtype // Expect rrtype
+ zExpectRrtypeBl // Whitespace BEFORE rrtype
+ zExpectRdata // The first element of the rdata
+ zExpectDirTtlBl // Space after directive $TTL
+ zExpectDirTtl // Directive $TTL
+ zExpectDirOriginBl // Space after directive $ORIGIN
+ zExpectDirOrigin // Directive $ORIGIN
+ zExpectDirIncludeBl // Space after directive $INCLUDE
+ zExpectDirInclude // Directive $INCLUDE
+ zExpectDirGenerate // Directive $GENERATE
+ zExpectDirGenerateBl // Space after directive $GENERATE
+)
+
+// ParseError is a parsing error. It contains the parse error and the location in the io.Reader
+// where the error occured.
+type ParseError struct {
+ file string
+ err string
+ lex lex
+}
+
+func (e *ParseError) Error() (s string) {
+ if e.file != "" {
+ s = e.file + ": "
+ }
+ s += "dns: " + e.err + ": " + strconv.QuoteToASCII(e.lex.token) + " at line: " +
+ strconv.Itoa(e.lex.line) + ":" + strconv.Itoa(e.lex.column)
+ return
+}
+
+type lex struct {
+ token string // text of the token
+ tokenUpper string // uppercase text of the token
+ length int // lenght of the token
+ err bool // when true, token text has lexer error
+ value uint8 // value: zString, _BLANK, etc.
+ line int // line in the file
+ column int // column in the file
+ torc uint16 // type or class as parsed in the lexer, we only need to look this up in the grammar
+ comment string // any comment text seen
+}
+
+// Token holds the token that are returned when a zone file is parsed.
+type Token struct {
+ // The scanned resource record when error is not nil.
+ RR
+ // When an error occured, this has the error specifics.
+ Error *ParseError
+ // A potential comment positioned after the RR and on the same line.
+ Comment string
+}
+
+// NewRR reads the RR contained in the string s. Only the first RR is
+// returned. If s contains no RR, return nil with no error. The class
+// defaults to IN and TTL defaults to 3600. The full zone file syntax
+// like $TTL, $ORIGIN, etc. is supported. All fields of the returned
+// RR are set, except RR.Header().Rdlength which is set to 0.
+func NewRR(s string) (RR, error) {
+ if len(s) > 0 && s[len(s)-1] != '\n' { // We need a closing newline
+ return ReadRR(strings.NewReader(s+"\n"), "")
+ }
+ return ReadRR(strings.NewReader(s), "")
+}
+
+// ReadRR reads the RR contained in q.
+// See NewRR for more documentation.
+func ReadRR(q io.Reader, filename string) (RR, error) {
+ r := <-parseZoneHelper(q, ".", filename, 1)
+ if r == nil {
+ return nil, nil
+ }
+
+ if r.Error != nil {
+ return nil, r.Error
+ }
+ return r.RR, nil
+}
+
+// ParseZone reads a RFC 1035 style zonefile from r. It returns *Tokens on the
+// returned channel, which consist out the parsed RR, a potential comment or an error.
+// If there is an error the RR is nil. The string file is only used
+// in error reporting. The string origin is used as the initial origin, as
+// if the file would start with: $ORIGIN origin .
+// The directives $INCLUDE, $ORIGIN, $TTL and $GENERATE are supported.
+// The channel t is closed by ParseZone when the end of r is reached.
+//
+// Basic usage pattern when reading from a string (z) containing the
+// zone data:
+//
+// for x := range dns.ParseZone(strings.NewReader(z), "", "") {
+// if x.Error != nil {
+// // Do something with x.RR
+// }
+// }
+//
+// Comments specified after an RR (and on the same line!) are returned too:
+//
+// foo. IN A 10.0.0.1 ; this is a comment
+//
+// The text "; this is comment" is returned in Token.Comment. Comments inside the
+// RR are discarded. Comments on a line by themselves are discarded too.
+func ParseZone(r io.Reader, origin, file string) chan *Token {
+ return parseZoneHelper(r, origin, file, 10000)
+}
+
+func parseZoneHelper(r io.Reader, origin, file string, chansize int) chan *Token {
+ t := make(chan *Token, chansize)
+ go parseZone(r, origin, file, t, 0)
+ return t
+}
+
+func parseZone(r io.Reader, origin, f string, t chan *Token, include int) {
+ defer func() {
+ if include == 0 {
+ close(t)
+ }
+ }()
+ s := scanInit(r)
+ c := make(chan lex)
+ // Start the lexer
+ go zlexer(s, c)
+ // 6 possible beginnings of a line, _ is a space
+ // 0. zRRTYPE -> all omitted until the rrtype
+ // 1. zOwner _ zRrtype -> class/ttl omitted
+ // 2. zOwner _ zString _ zRrtype -> class omitted
+ // 3. zOwner _ zString _ zClass _ zRrtype -> ttl/class
+ // 4. zOwner _ zClass _ zRrtype -> ttl omitted
+ // 5. zOwner _ zClass _ zString _ zRrtype -> class/ttl (reversed)
+ // After detecting these, we know the zRrtype so we can jump to functions
+ // handling the rdata for each of these types.
+
+ if origin == "" {
+ origin = "."
+ }
+ origin = Fqdn(origin)
+ if _, ok := IsDomainName(origin); !ok {
+ t <- &Token{Error: &ParseError{f, "bad initial origin name", lex{}}}
+ return
+ }
+
+ st := zExpectOwnerDir // initial state
+ var h RR_Header
+ var defttl uint32 = defaultTtl
+ var prevName string
+ for l := range c {
+ // Lexer spotted an error already
+ if l.err == true {
+ t <- &Token{Error: &ParseError{f, l.token, l}}
+ return
+
+ }
+ switch st {
+ case zExpectOwnerDir:
+ // We can also expect a directive, like $TTL or $ORIGIN
+ h.Ttl = defttl
+ h.Class = ClassINET
+ switch l.value {
+ case zNewline:
+ st = zExpectOwnerDir
+ case zOwner:
+ h.Name = l.token
+ if l.token[0] == '@' {
+ h.Name = origin
+ prevName = h.Name
+ st = zExpectOwnerBl
+ break
+ }
+ if h.Name[l.length-1] != '.' {
+ h.Name = appendOrigin(h.Name, origin)
+ }
+ _, ok := IsDomainName(l.token)
+ if !ok {
+ t <- &Token{Error: &ParseError{f, "bad owner name", l}}
+ return
+ }
+ prevName = h.Name
+ st = zExpectOwnerBl
+ case zDirTtl:
+ st = zExpectDirTtlBl
+ case zDirOrigin:
+ st = zExpectDirOriginBl
+ case zDirInclude:
+ st = zExpectDirIncludeBl
+ case zDirGenerate:
+ st = zExpectDirGenerateBl
+ case zRrtpe:
+ h.Name = prevName
+ h.Rrtype = l.torc
+ st = zExpectRdata
+ case zClass:
+ h.Name = prevName
+ h.Class = l.torc
+ st = zExpectAnyNoClassBl
+ case zBlank:
+ // Discard, can happen when there is nothing on the
+ // line except the RR type
+ case zString:
+ ttl, ok := stringToTtl(l.token)
+ if !ok {
+ t <- &Token{Error: &ParseError{f, "not a TTL", l}}
+ return
+ }
+ h.Ttl = ttl
+ // Don't about the defttl, we should take the $TTL value
+ // defttl = ttl
+ st = zExpectAnyNoTtlBl
+
+ default:
+ t <- &Token{Error: &ParseError{f, "syntax error at beginning", l}}
+ return
+ }
+ case zExpectDirIncludeBl:
+ if l.value != zBlank {
+ t <- &Token{Error: &ParseError{f, "no blank after $INCLUDE-directive", l}}
+ return
+ }
+ st = zExpectDirInclude
+ case zExpectDirInclude:
+ if l.value != zString {
+ t <- &Token{Error: &ParseError{f, "expecting $INCLUDE value, not this...", l}}
+ return
+ }
+ neworigin := origin // There may be optionally a new origin set after the filename, if not use current one
+ l := <-c
+ switch l.value {
+ case zBlank:
+ l := <-c
+ if l.value == zString {
+ if _, ok := IsDomainName(l.token); !ok || l.length == 0 || l.err {
+ t <- &Token{Error: &ParseError{f, "bad origin name", l}}
+ return
+ }
+ // a new origin is specified.
+ if l.token[l.length-1] != '.' {
+ if origin != "." { // Prevent .. endings
+ neworigin = l.token + "." + origin
+ } else {
+ neworigin = l.token + origin
+ }
+ } else {
+ neworigin = l.token
+ }
+ }
+ case zNewline, zEOF:
+ // Ok
+ default:
+ t <- &Token{Error: &ParseError{f, "garbage after $INCLUDE", l}}
+ return
+ }
+ // Start with the new file
+ r1, e1 := os.Open(l.token)
+ if e1 != nil {
+ t <- &Token{Error: &ParseError{f, "failed to open `" + l.token + "'", l}}
+ return
+ }
+ if include+1 > 7 {
+ t <- &Token{Error: &ParseError{f, "too deeply nested $INCLUDE", l}}
+ return
+ }
+ parseZone(r1, l.token, neworigin, t, include+1)
+ st = zExpectOwnerDir
+ case zExpectDirTtlBl:
+ if l.value != zBlank {
+ t <- &Token{Error: &ParseError{f, "no blank after $TTL-directive", l}}
+ return
+ }
+ st = zExpectDirTtl
+ case zExpectDirTtl:
+ if l.value != zString {
+ t <- &Token{Error: &ParseError{f, "expecting $TTL value, not this...", l}}
+ return
+ }
+ if e, _ := slurpRemainder(c, f); e != nil {
+ t <- &Token{Error: e}
+ return
+ }
+ ttl, ok := stringToTtl(l.token)
+ if !ok {
+ t <- &Token{Error: &ParseError{f, "expecting $TTL value, not this...", l}}
+ return
+ }
+ defttl = ttl
+ st = zExpectOwnerDir
+ case zExpectDirOriginBl:
+ if l.value != zBlank {
+ t <- &Token{Error: &ParseError{f, "no blank after $ORIGIN-directive", l}}
+ return
+ }
+ st = zExpectDirOrigin
+ case zExpectDirOrigin:
+ if l.value != zString {
+ t <- &Token{Error: &ParseError{f, "expecting $ORIGIN value, not this...", l}}
+ return
+ }
+ if e, _ := slurpRemainder(c, f); e != nil {
+ t <- &Token{Error: e}
+ }
+ if _, ok := IsDomainName(l.token); !ok {
+ t <- &Token{Error: &ParseError{f, "bad origin name", l}}
+ return
+ }
+ if l.token[l.length-1] != '.' {
+ if origin != "." { // Prevent .. endings
+ origin = l.token + "." + origin
+ } else {
+ origin = l.token + origin
+ }
+ } else {
+ origin = l.token
+ }
+ st = zExpectOwnerDir
+ case zExpectDirGenerateBl:
+ if l.value != zBlank {
+ t <- &Token{Error: &ParseError{f, "no blank after $GENERATE-directive", l}}
+ return
+ }
+ st = zExpectDirGenerate
+ case zExpectDirGenerate:
+ if l.value != zString {
+ t <- &Token{Error: &ParseError{f, "expecting $GENERATE value, not this...", l}}
+ return
+ }
+ if e := generate(l, c, t, origin); e != "" {
+ t <- &Token{Error: &ParseError{f, e, l}}
+ return
+ }
+ st = zExpectOwnerDir
+ case zExpectOwnerBl:
+ if l.value != zBlank {
+ t <- &Token{Error: &ParseError{f, "no blank after owner", l}}
+ return
+ }
+ st = zExpectAny
+ case zExpectAny:
+ switch l.value {
+ case zRrtpe:
+ h.Rrtype = l.torc
+ st = zExpectRdata
+ case zClass:
+ h.Class = l.torc
+ st = zExpectAnyNoClassBl
+ case zString:
+ ttl, ok := stringToTtl(l.token)
+ if !ok {
+ t <- &Token{Error: &ParseError{f, "not a TTL", l}}
+ return
+ }
+ h.Ttl = ttl
+ // defttl = ttl // don't set the defttl here
+ st = zExpectAnyNoTtlBl
+ default:
+ t <- &Token{Error: &ParseError{f, "expecting RR type, TTL or class, not this...", l}}
+ return
+ }
+ case zExpectAnyNoClassBl:
+ if l.value != zBlank {
+ t <- &Token{Error: &ParseError{f, "no blank before class", l}}
+ return
+ }
+ st = zExpectAnyNoClass
+ case zExpectAnyNoTtlBl:
+ if l.value != zBlank {
+ t <- &Token{Error: &ParseError{f, "no blank before TTL", l}}
+ return
+ }
+ st = zExpectAnyNoTtl
+ case zExpectAnyNoTtl:
+ switch l.value {
+ case zClass:
+ h.Class = l.torc
+ st = zExpectRrtypeBl
+ case zRrtpe:
+ h.Rrtype = l.torc
+ st = zExpectRdata
+ default:
+ t <- &Token{Error: &ParseError{f, "expecting RR type or class, not this...", l}}
+ return
+ }
+ case zExpectAnyNoClass:
+ switch l.value {
+ case zString:
+ ttl, ok := stringToTtl(l.token)
+ if !ok {
+ t <- &Token{Error: &ParseError{f, "not a TTL", l}}
+ return
+ }
+ h.Ttl = ttl
+ // defttl = ttl // don't set the def ttl anymore
+ st = zExpectRrtypeBl
+ case zRrtpe:
+ h.Rrtype = l.torc
+ st = zExpectRdata
+ default:
+ t <- &Token{Error: &ParseError{f, "expecting RR type or TTL, not this...", l}}
+ return
+ }
+ case zExpectRrtypeBl:
+ if l.value != zBlank {
+ t <- &Token{Error: &ParseError{f, "no blank before RR type", l}}
+ return
+ }
+ st = zExpectRrtype
+ case zExpectRrtype:
+ if l.value != zRrtpe {
+ t <- &Token{Error: &ParseError{f, "unknown RR type", l}}
+ return
+ }
+ h.Rrtype = l.torc
+ st = zExpectRdata
+ case zExpectRdata:
+ r, e, c1 := setRR(h, c, origin, f)
+ if e != nil {
+ // If e.lex is nil than we have encounter a unknown RR type
+ // in that case we substitute our current lex token
+ if e.lex.token == "" && e.lex.value == 0 {
+ e.lex = l // Uh, dirty
+ }
+ t <- &Token{Error: e}
+ return
+ }
+ t <- &Token{RR: r, Comment: c1}
+ st = zExpectOwnerDir
+ }
+ }
+ // If we get here, we and the h.Rrtype is still zero, we haven't parsed anything, this
+ // is not an error, because an empty zone file is still a zone file.
+}
+
+// zlexer scans the sourcefile and returns tokens on the channel c.
+func zlexer(s *scan, c chan lex) {
+ var l lex
+ str := make([]byte, maxTok) // Should be enough for any token
+ stri := 0 // Offset in str (0 means empty)
+ com := make([]byte, maxTok) // Hold comment text
+ comi := 0
+ quote := false
+ escape := false
+ space := false
+ commt := false
+ rrtype := false
+ owner := true
+ brace := 0
+ x, err := s.tokenText()
+ defer close(c)
+ for err == nil {
+ l.column = s.position.Column
+ l.line = s.position.Line
+ if stri >= maxTok {
+ l.token = "token length insufficient for parsing"
+ l.err = true
+ debug.Printf("[%+v]", l.token)
+ c <- l
+ return
+ }
+ if comi >= maxTok {
+ l.token = "comment length insufficient for parsing"
+ l.err = true
+ debug.Printf("[%+v]", l.token)
+ c <- l
+ return
+ }
+
+ switch x {
+ case ' ', '\t':
+ if escape {
+ escape = false
+ str[stri] = x
+ stri++
+ break
+ }
+ if quote {
+ // Inside quotes this is legal
+ str[stri] = x
+ stri++
+ break
+ }
+ if commt {
+ com[comi] = x
+ comi++
+ break
+ }
+ if stri == 0 {
+ // Space directly in the beginning, handled in the grammar
+ } else if owner {
+ // If we have a string and its the first, make it an owner
+ l.value = zOwner
+ l.token = string(str[:stri])
+ l.tokenUpper = strings.ToUpper(l.token)
+ l.length = stri
+ // escape $... start with a \ not a $, so this will work
+ switch l.tokenUpper {
+ case "$TTL":
+ l.value = zDirTtl
+ case "$ORIGIN":
+ l.value = zDirOrigin
+ case "$INCLUDE":
+ l.value = zDirInclude
+ case "$GENERATE":
+ l.value = zDirGenerate
+ }
+ debug.Printf("[7 %+v]", l.token)
+ c <- l
+ } else {
+ l.value = zString
+ l.token = string(str[:stri])
+ l.tokenUpper = strings.ToUpper(l.token)
+ l.length = stri
+ if !rrtype {
+ if t, ok := StringToType[l.tokenUpper]; ok {
+ l.value = zRrtpe
+ l.torc = t
+ rrtype = true
+ } else {
+ if strings.HasPrefix(l.tokenUpper, "TYPE") {
+ t, ok := typeToInt(l.token)
+ if !ok {
+ l.token = "unknown RR type"
+ l.err = true
+ c <- l
+ return
+ }
+ l.value = zRrtpe
+ l.torc = t
+ }
+ }
+ if t, ok := StringToClass[l.tokenUpper]; ok {
+ l.value = zClass
+ l.torc = t
+ } else {
+ if strings.HasPrefix(l.tokenUpper, "CLASS") {
+ t, ok := classToInt(l.token)
+ if !ok {
+ l.token = "unknown class"
+ l.err = true
+ c <- l
+ return
+ }
+ l.value = zClass
+ l.torc = t
+ }
+ }
+ }
+ debug.Printf("[6 %+v]", l.token)
+ c <- l
+ }
+ stri = 0
+ // I reverse space stuff here
+ if !space && !commt {
+ l.value = zBlank
+ l.token = " "
+ l.length = 1
+ debug.Printf("[5 %+v]", l.token)
+ c <- l
+ }
+ owner = false
+ space = true
+ case ';':
+ if escape {
+ escape = false
+ str[stri] = x
+ stri++
+ break
+ }
+ if quote {
+ // Inside quotes this is legal
+ str[stri] = x
+ stri++
+ break
+ }
+ if stri > 0 {
+ l.value = zString
+ l.token = string(str[:stri])
+ l.length = stri
+ debug.Printf("[4 %+v]", l.token)
+ c <- l
+ stri = 0
+ }
+ commt = true
+ com[comi] = ';'
+ comi++
+ case '\r':
+ escape = false
+ if quote {
+ str[stri] = x
+ stri++
+ break
+ }
+ // discard if outside of quotes
+ case '\n':
+ escape = false
+ // Escaped newline
+ if quote {
+ str[stri] = x
+ stri++
+ break
+ }
+ // inside quotes this is legal
+ if commt {
+ // Reset a comment
+ commt = false
+ rrtype = false
+ stri = 0
+ // If not in a brace this ends the comment AND the RR
+ if brace == 0 {
+ owner = true
+ owner = true
+ l.value = zNewline
+ l.token = "\n"
+ l.length = 1
+ l.comment = string(com[:comi])
+ debug.Printf("[3 %+v %+v]", l.token, l.comment)
+ c <- l
+ l.comment = ""
+ comi = 0
+ break
+ }
+ com[comi] = ' ' // convert newline to space
+ comi++
+ break
+ }
+
+ if brace == 0 {
+ // If there is previous text, we should output it here
+ if stri != 0 {
+ l.value = zString
+ l.token = string(str[:stri])
+ l.tokenUpper = strings.ToUpper(l.token)
+
+ l.length = stri
+ if !rrtype {
+ if t, ok := StringToType[l.tokenUpper]; ok {
+ l.value = zRrtpe
+ l.torc = t
+ rrtype = true
+ }
+ }
+ debug.Printf("[2 %+v]", l.token)
+ c <- l
+ }
+ l.value = zNewline
+ l.token = "\n"
+ l.length = 1
+ debug.Printf("[1 %+v]", l.token)
+ c <- l
+ stri = 0
+ commt = false
+ rrtype = false
+ owner = true
+ comi = 0
+ }
+ case '\\':
+ // comments do not get escaped chars, everything is copied
+ if commt {
+ com[comi] = x
+ comi++
+ break
+ }
+ // something already escaped must be in string
+ if escape {
+ str[stri] = x
+ stri++
+ escape = false
+ break
+ }
+ // something escaped outside of string gets added to string
+ str[stri] = x
+ stri++
+ escape = true
+ case '"':
+ if commt {
+ com[comi] = x
+ comi++
+ break
+ }
+ if escape {
+ str[stri] = x
+ stri++
+ escape = false
+ break
+ }
+ space = false
+ // send previous gathered text and the quote
+ if stri != 0 {
+ l.value = zString
+ l.token = string(str[:stri])
+ l.length = stri
+
+ debug.Printf("[%+v]", l.token)
+ c <- l
+ stri = 0
+ }
+
+ // send quote itself as separate token
+ l.value = zQuote
+ l.token = "\""
+ l.length = 1
+ c <- l
+ quote = !quote
+ case '(', ')':
+ if commt {
+ com[comi] = x
+ comi++
+ break
+ }
+ if escape {
+ str[stri] = x
+ stri++
+ escape = false
+ break
+ }
+ if quote {
+ str[stri] = x
+ stri++
+ break
+ }
+ switch x {
+ case ')':
+ brace--
+ if brace < 0 {
+ l.token = "extra closing brace"
+ l.err = true
+ debug.Printf("[%+v]", l.token)
+ c <- l
+ return
+ }
+ case '(':
+ brace++
+ }
+ default:
+ escape = false
+ if commt {
+ com[comi] = x
+ comi++
+ break
+ }
+ str[stri] = x
+ stri++
+ space = false
+ }
+ x, err = s.tokenText()
+ }
+ if stri > 0 {
+ // Send remainder
+ l.token = string(str[:stri])
+ l.length = stri
+ l.value = zString
+ debug.Printf("[%+v]", l.token)
+ c <- l
+ }
+}
+
+// Extract the class number from CLASSxx
+func classToInt(token string) (uint16, bool) {
+ offset := 5
+ if len(token) < offset+1 {
+ return 0, false
+ }
+ class, ok := strconv.Atoi(token[offset:])
+ if ok != nil || class > maxUint16 {
+ return 0, false
+ }
+ return uint16(class), true
+}
+
+// Extract the rr number from TYPExxx
+func typeToInt(token string) (uint16, bool) {
+ offset := 4
+ if len(token) < offset+1 {
+ return 0, false
+ }
+ typ, ok := strconv.Atoi(token[offset:])
+ if ok != nil || typ > maxUint16 {
+ return 0, false
+ }
+ return uint16(typ), true
+}
+
+// Parse things like 2w, 2m, etc, Return the time in seconds.
+func stringToTtl(token string) (uint32, bool) {
+ s := uint32(0)
+ i := uint32(0)
+ for _, c := range token {
+ switch c {
+ case 's', 'S':
+ s += i
+ i = 0
+ case 'm', 'M':
+ s += i * 60
+ i = 0
+ case 'h', 'H':
+ s += i * 60 * 60
+ i = 0
+ case 'd', 'D':
+ s += i * 60 * 60 * 24
+ i = 0
+ case 'w', 'W':
+ s += i * 60 * 60 * 24 * 7
+ i = 0
+ case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
+ i *= 10
+ i += uint32(c) - '0'
+ default:
+ return 0, false
+ }
+ }
+ return s + i, true
+}
+
+// Parse LOC records' [.][mM] into a
+// mantissa exponent format. Token should contain the entire
+// string (i.e. no spaces allowed)
+func stringToCm(token string) (e, m uint8, ok bool) {
+ if token[len(token)-1] == 'M' || token[len(token)-1] == 'm' {
+ token = token[0 : len(token)-1]
+ }
+ s := strings.SplitN(token, ".", 2)
+ var meters, cmeters, val int
+ var err error
+ switch len(s) {
+ case 2:
+ if cmeters, err = strconv.Atoi(s[1]); err != nil {
+ return
+ }
+ fallthrough
+ case 1:
+ if meters, err = strconv.Atoi(s[0]); err != nil {
+ return
+ }
+ case 0:
+ // huh?
+ return 0, 0, false
+ }
+ ok = true
+ if meters > 0 {
+ e = 2
+ val = meters
+ } else {
+ e = 0
+ val = cmeters
+ }
+ for val > 10 {
+ e++
+ val /= 10
+ }
+ if e > 9 {
+ ok = false
+ }
+ m = uint8(val)
+ return
+}
+
+func appendOrigin(name, origin string) string {
+ if origin == "." {
+ return name + origin
+ }
+ return name + "." + origin
+}
+
+// LOC record helper function
+func locCheckNorth(token string, latitude uint32) (uint32, bool) {
+ switch token {
+ case "n", "N":
+ return LOC_EQUATOR + latitude, true
+ case "s", "S":
+ return LOC_EQUATOR - latitude, true
+ }
+ return latitude, false
+}
+
+// LOC record helper function
+func locCheckEast(token string, longitude uint32) (uint32, bool) {
+ switch token {
+ case "e", "E":
+ return LOC_EQUATOR + longitude, true
+ case "w", "W":
+ return LOC_EQUATOR - longitude, true
+ }
+ return longitude, false
+}
+
+// "Eat" the rest of the "line". Return potential comments
+func slurpRemainder(c chan lex, f string) (*ParseError, string) {
+ l := <-c
+ com := ""
+ switch l.value {
+ case zBlank:
+ l = <-c
+ com = l.comment
+ if l.value != zNewline && l.value != zEOF {
+ return &ParseError{f, "garbage after rdata", l}, ""
+ }
+ case zNewline:
+ com = l.comment
+ case zEOF:
+ default:
+ return &ParseError{f, "garbage after rdata", l}, ""
+ }
+ return nil, com
+}
+
+// Parse a 64 bit-like ipv6 address: "0014:4fff:ff20:ee64"
+// Used for NID and L64 record.
+func stringToNodeID(l lex) (uint64, *ParseError) {
+ if len(l.token) < 19 {
+ return 0, &ParseError{l.token, "bad NID/L64 NodeID/Locator64", l}
+ }
+ // There must be three colons at fixes postitions, if not its a parse error
+ if l.token[4] != ':' && l.token[9] != ':' && l.token[14] != ':' {
+ return 0, &ParseError{l.token, "bad NID/L64 NodeID/Locator64", l}
+ }
+ s := l.token[0:4] + l.token[5:9] + l.token[10:14] + l.token[15:19]
+ u, e := strconv.ParseUint(s, 16, 64)
+ if e != nil {
+ return 0, &ParseError{l.token, "bad NID/L64 NodeID/Locator64", l}
+ }
+ return u, nil
+}
diff --git a/vendor/github.com/miekg/dns/zscan_rr.go b/vendor/github.com/miekg/dns/zscan_rr.go
new file mode 100644
index 0000000000..a2db008fa9
--- /dev/null
+++ b/vendor/github.com/miekg/dns/zscan_rr.go
@@ -0,0 +1,2270 @@
+package dns
+
+import (
+ "encoding/base64"
+ "net"
+ "strconv"
+ "strings"
+)
+
+type parserFunc struct {
+ // Func defines the function that parses the tokens and returns the RR
+ // or an error. The last string contains any comments in the line as
+ // they returned by the lexer as well.
+ Func func(h RR_Header, c chan lex, origin string, file string) (RR, *ParseError, string)
+ // Signals if the RR ending is of variable length, like TXT or records
+ // that have Hexadecimal or Base64 as their last element in the Rdata. Records
+ // that have a fixed ending or for instance A, AAAA, SOA and etc.
+ Variable bool
+}
+
+// Parse the rdata of each rrtype.
+// All data from the channel c is either zString or zBlank.
+// After the rdata there may come a zBlank and then a zNewline
+// or immediately a zNewline. If this is not the case we flag
+// an *ParseError: garbage after rdata.
+func setRR(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
+ parserfunc, ok := typeToparserFunc[h.Rrtype]
+ if ok {
+ r, e, cm := parserfunc.Func(h, c, o, f)
+ if parserfunc.Variable {
+ return r, e, cm
+ }
+ if e != nil {
+ return nil, e, ""
+ }
+ e, cm = slurpRemainder(c, f)
+ if e != nil {
+ return nil, e, ""
+ }
+ return r, nil, cm
+ }
+ // RFC3957 RR (Unknown RR handling)
+ return setRFC3597(h, c, o, f)
+}
+
+// A remainder of the rdata with embedded spaces, return the parsed string (sans the spaces)
+// or an error
+func endingToString(c chan lex, errstr, f string) (string, *ParseError, string) {
+ s := ""
+ l := <-c // zString
+ for l.value != zNewline && l.value != zEOF {
+ if l.err {
+ return s, &ParseError{f, errstr, l}, ""
+ }
+ switch l.value {
+ case zString:
+ s += l.token
+ case zBlank: // Ok
+ default:
+ return "", &ParseError{f, errstr, l}, ""
+ }
+ l = <-c
+ }
+ return s, nil, l.comment
+}
+
+// A remainder of the rdata with embedded spaces, return the parsed string slice (sans the spaces)
+// or an error
+func endingToTxtSlice(c chan lex, errstr, f string) ([]string, *ParseError, string) {
+ // Get the remaining data until we see a zNewline
+ quote := false
+ l := <-c
+ var s []string
+ if l.err {
+ return s, &ParseError{f, errstr, l}, ""
+ }
+ switch l.value == zQuote {
+ case true: // A number of quoted string
+ s = make([]string, 0)
+ empty := true
+ for l.value != zNewline && l.value != zEOF {
+ if l.err {
+ return nil, &ParseError{f, errstr, l}, ""
+ }
+ switch l.value {
+ case zString:
+ empty = false
+ if len(l.token) > 255 {
+ // split up tokens that are larger than 255 into 255-chunks
+ sx := []string{}
+ p, i := 0, 255
+ for {
+ if i <= len(l.token) {
+ sx = append(sx, l.token[p:i])
+ } else {
+ sx = append(sx, l.token[p:])
+ break
+
+ }
+ p, i = p+255, i+255
+ }
+ s = append(s, sx...)
+ break
+ }
+
+ s = append(s, l.token)
+ case zBlank:
+ if quote {
+ // zBlank can only be seen in between txt parts.
+ return nil, &ParseError{f, errstr, l}, ""
+ }
+ case zQuote:
+ if empty && quote {
+ s = append(s, "")
+ }
+ quote = !quote
+ empty = true
+ default:
+ return nil, &ParseError{f, errstr, l}, ""
+ }
+ l = <-c
+ }
+ if quote {
+ return nil, &ParseError{f, errstr, l}, ""
+ }
+ case false: // Unquoted text record
+ s = make([]string, 1)
+ for l.value != zNewline && l.value != zEOF {
+ if l.err {
+ return s, &ParseError{f, errstr, l}, ""
+ }
+ s[0] += l.token
+ l = <-c
+ }
+ }
+ return s, nil, l.comment
+}
+
+func setA(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
+ rr := new(A)
+ rr.Hdr = h
+
+ l := <-c
+ if l.length == 0 { // Dynamic updates.
+ return rr, nil, ""
+ }
+ rr.A = net.ParseIP(l.token)
+ if rr.A == nil || l.err {
+ return nil, &ParseError{f, "bad A A", l}, ""
+ }
+ return rr, nil, ""
+}
+
+func setAAAA(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
+ rr := new(AAAA)
+ rr.Hdr = h
+
+ l := <-c
+ if l.length == 0 {
+ return rr, nil, ""
+ }
+ rr.AAAA = net.ParseIP(l.token)
+ if rr.AAAA == nil || l.err {
+ return nil, &ParseError{f, "bad AAAA AAAA", l}, ""
+ }
+ return rr, nil, ""
+}
+
+func setNS(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
+ rr := new(NS)
+ rr.Hdr = h
+
+ l := <-c
+ rr.Ns = l.token
+ if l.length == 0 {
+ return rr, nil, ""
+ }
+ if l.token == "@" {
+ rr.Ns = o
+ return rr, nil, ""
+ }
+ _, ok := IsDomainName(l.token)
+ if !ok || l.length == 0 || l.err {
+ return nil, &ParseError{f, "bad NS Ns", l}, ""
+ }
+ if rr.Ns[l.length-1] != '.' {
+ rr.Ns = appendOrigin(rr.Ns, o)
+ }
+ return rr, nil, ""
+}
+
+func setPTR(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
+ rr := new(PTR)
+ rr.Hdr = h
+
+ l := <-c
+ rr.Ptr = l.token
+ if l.length == 0 { // dynamic update rr.
+ return rr, nil, ""
+ }
+ if l.token == "@" {
+ rr.Ptr = o
+ return rr, nil, ""
+ }
+ _, ok := IsDomainName(l.token)
+ if !ok || l.length == 0 || l.err {
+ return nil, &ParseError{f, "bad PTR Ptr", l}, ""
+ }
+ if rr.Ptr[l.length-1] != '.' {
+ rr.Ptr = appendOrigin(rr.Ptr, o)
+ }
+ return rr, nil, ""
+}
+
+func setNSAPPTR(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
+ rr := new(NSAPPTR)
+ rr.Hdr = h
+
+ l := <-c
+ rr.Ptr = l.token
+ if l.length == 0 {
+ return rr, nil, ""
+ }
+ if l.token == "@" {
+ rr.Ptr = o
+ return rr, nil, ""
+ }
+ _, ok := IsDomainName(l.token)
+ if !ok || l.length == 0 || l.err {
+ return nil, &ParseError{f, "bad NSAP-PTR Ptr", l}, ""
+ }
+ if rr.Ptr[l.length-1] != '.' {
+ rr.Ptr = appendOrigin(rr.Ptr, o)
+ }
+ return rr, nil, ""
+}
+
+func setRP(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
+ rr := new(RP)
+ rr.Hdr = h
+
+ l := <-c
+ rr.Mbox = l.token
+ if l.length == 0 {
+ return rr, nil, ""
+ }
+ if l.token == "@" {
+ rr.Mbox = o
+ } else {
+ _, ok := IsDomainName(l.token)
+ if !ok || l.length == 0 || l.err {
+ return nil, &ParseError{f, "bad RP Mbox", l}, ""
+ }
+ if rr.Mbox[l.length-1] != '.' {
+ rr.Mbox = appendOrigin(rr.Mbox, o)
+ }
+ }
+ <-c // zBlank
+ l = <-c
+ rr.Txt = l.token
+ if l.token == "@" {
+ rr.Txt = o
+ return rr, nil, ""
+ }
+ _, ok := IsDomainName(l.token)
+ if !ok || l.length == 0 || l.err {
+ return nil, &ParseError{f, "bad RP Txt", l}, ""
+ }
+ if rr.Txt[l.length-1] != '.' {
+ rr.Txt = appendOrigin(rr.Txt, o)
+ }
+ return rr, nil, ""
+}
+
+func setMR(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
+ rr := new(MR)
+ rr.Hdr = h
+
+ l := <-c
+ rr.Mr = l.token
+ if l.length == 0 {
+ return rr, nil, ""
+ }
+ if l.token == "@" {
+ rr.Mr = o
+ return rr, nil, ""
+ }
+ _, ok := IsDomainName(l.token)
+ if !ok || l.length == 0 || l.err {
+ return nil, &ParseError{f, "bad MR Mr", l}, ""
+ }
+ if rr.Mr[l.length-1] != '.' {
+ rr.Mr = appendOrigin(rr.Mr, o)
+ }
+ return rr, nil, ""
+}
+
+func setMB(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
+ rr := new(MB)
+ rr.Hdr = h
+
+ l := <-c
+ rr.Mb = l.token
+ if l.length == 0 {
+ return rr, nil, ""
+ }
+ if l.token == "@" {
+ rr.Mb = o
+ return rr, nil, ""
+ }
+ _, ok := IsDomainName(l.token)
+ if !ok || l.length == 0 || l.err {
+ return nil, &ParseError{f, "bad MB Mb", l}, ""
+ }
+ if rr.Mb[l.length-1] != '.' {
+ rr.Mb = appendOrigin(rr.Mb, o)
+ }
+ return rr, nil, ""
+}
+
+func setMG(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
+ rr := new(MG)
+ rr.Hdr = h
+
+ l := <-c
+ rr.Mg = l.token
+ if l.length == 0 {
+ return rr, nil, ""
+ }
+ if l.token == "@" {
+ rr.Mg = o
+ return rr, nil, ""
+ }
+ _, ok := IsDomainName(l.token)
+ if !ok || l.length == 0 || l.err {
+ return nil, &ParseError{f, "bad MG Mg", l}, ""
+ }
+ if rr.Mg[l.length-1] != '.' {
+ rr.Mg = appendOrigin(rr.Mg, o)
+ }
+ return rr, nil, ""
+}
+
+func setHINFO(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
+ rr := new(HINFO)
+ rr.Hdr = h
+
+ chunks, e, c1 := endingToTxtSlice(c, "bad HINFO Fields", f)
+ if e != nil {
+ return nil, e, c1
+ }
+
+ if ln := len(chunks); ln == 0 {
+ return rr, nil, ""
+ } else if ln == 1 {
+ // Can we split it?
+ if out := strings.Fields(chunks[0]); len(out) > 1 {
+ chunks = out
+ } else {
+ chunks = append(chunks, "")
+ }
+ }
+
+ rr.Cpu = chunks[0]
+ rr.Os = strings.Join(chunks[1:], " ")
+
+ return rr, nil, ""
+}
+
+func setMINFO(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
+ rr := new(MINFO)
+ rr.Hdr = h
+
+ l := <-c
+ rr.Rmail = l.token
+ if l.length == 0 {
+ return rr, nil, ""
+ }
+ if l.token == "@" {
+ rr.Rmail = o
+ } else {
+ _, ok := IsDomainName(l.token)
+ if !ok || l.length == 0 || l.err {
+ return nil, &ParseError{f, "bad MINFO Rmail", l}, ""
+ }
+ if rr.Rmail[l.length-1] != '.' {
+ rr.Rmail = appendOrigin(rr.Rmail, o)
+ }
+ }
+ <-c // zBlank
+ l = <-c
+ rr.Email = l.token
+ if l.token == "@" {
+ rr.Email = o
+ return rr, nil, ""
+ }
+ _, ok := IsDomainName(l.token)
+ if !ok || l.length == 0 || l.err {
+ return nil, &ParseError{f, "bad MINFO Email", l}, ""
+ }
+ if rr.Email[l.length-1] != '.' {
+ rr.Email = appendOrigin(rr.Email, o)
+ }
+ return rr, nil, ""
+}
+
+func setMF(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
+ rr := new(MF)
+ rr.Hdr = h
+
+ l := <-c
+ rr.Mf = l.token
+ if l.length == 0 {
+ return rr, nil, ""
+ }
+ if l.token == "@" {
+ rr.Mf = o
+ return rr, nil, ""
+ }
+ _, ok := IsDomainName(l.token)
+ if !ok || l.length == 0 || l.err {
+ return nil, &ParseError{f, "bad MF Mf", l}, ""
+ }
+ if rr.Mf[l.length-1] != '.' {
+ rr.Mf = appendOrigin(rr.Mf, o)
+ }
+ return rr, nil, ""
+}
+
+func setMD(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
+ rr := new(MD)
+ rr.Hdr = h
+
+ l := <-c
+ rr.Md = l.token
+ if l.length == 0 {
+ return rr, nil, ""
+ }
+ if l.token == "@" {
+ rr.Md = o
+ return rr, nil, ""
+ }
+ _, ok := IsDomainName(l.token)
+ if !ok || l.length == 0 || l.err {
+ return nil, &ParseError{f, "bad MD Md", l}, ""
+ }
+ if rr.Md[l.length-1] != '.' {
+ rr.Md = appendOrigin(rr.Md, o)
+ }
+ return rr, nil, ""
+}
+
+func setMX(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
+ rr := new(MX)
+ rr.Hdr = h
+
+ l := <-c
+ if l.length == 0 {
+ return rr, nil, ""
+ }
+ i, e := strconv.Atoi(l.token)
+ if e != nil || l.err {
+ return nil, &ParseError{f, "bad MX Pref", l}, ""
+ }
+ rr.Preference = uint16(i)
+ <-c // zBlank
+ l = <-c // zString
+ rr.Mx = l.token
+ if l.token == "@" {
+ rr.Mx = o
+ return rr, nil, ""
+ }
+ _, ok := IsDomainName(l.token)
+ if !ok || l.length == 0 || l.err {
+ return nil, &ParseError{f, "bad MX Mx", l}, ""
+ }
+ if rr.Mx[l.length-1] != '.' {
+ rr.Mx = appendOrigin(rr.Mx, o)
+ }
+ return rr, nil, ""
+}
+
+func setRT(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
+ rr := new(RT)
+ rr.Hdr = h
+ l := <-c
+ if l.length == 0 {
+ return rr, nil, ""
+ }
+ i, e := strconv.Atoi(l.token)
+ if e != nil {
+ return nil, &ParseError{f, "bad RT Preference", l}, ""
+ }
+ rr.Preference = uint16(i)
+ <-c // zBlank
+ l = <-c // zString
+ rr.Host = l.token
+ if l.token == "@" {
+ rr.Host = o
+ return rr, nil, ""
+ }
+ _, ok := IsDomainName(l.token)
+ if !ok || l.length == 0 || l.err {
+ return nil, &ParseError{f, "bad RT Host", l}, ""
+ }
+ if rr.Host[l.length-1] != '.' {
+ rr.Host = appendOrigin(rr.Host, o)
+ }
+ return rr, nil, ""
+}
+
+func setAFSDB(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
+ rr := new(AFSDB)
+ rr.Hdr = h
+
+ l := <-c
+ if l.length == 0 {
+ return rr, nil, ""
+ }
+ i, e := strconv.Atoi(l.token)
+ if e != nil || l.err {
+ return nil, &ParseError{f, "bad AFSDB Subtype", l}, ""
+ }
+ rr.Subtype = uint16(i)
+ <-c // zBlank
+ l = <-c // zString
+ rr.Hostname = l.token
+ if l.token == "@" {
+ rr.Hostname = o
+ return rr, nil, ""
+ }
+ _, ok := IsDomainName(l.token)
+ if !ok || l.length == 0 || l.err {
+ return nil, &ParseError{f, "bad AFSDB Hostname", l}, ""
+ }
+ if rr.Hostname[l.length-1] != '.' {
+ rr.Hostname = appendOrigin(rr.Hostname, o)
+ }
+ return rr, nil, ""
+}
+
+func setX25(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
+ rr := new(X25)
+ rr.Hdr = h
+
+ l := <-c
+ if l.length == 0 {
+ return rr, nil, ""
+ }
+ if l.err {
+ return nil, &ParseError{f, "bad X25 PSDNAddress", l}, ""
+ }
+ rr.PSDNAddress = l.token
+ return rr, nil, ""
+}
+
+func setKX(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
+ rr := new(KX)
+ rr.Hdr = h
+
+ l := <-c
+ if l.length == 0 {
+ return rr, nil, ""
+ }
+ i, e := strconv.Atoi(l.token)
+ if e != nil || l.err {
+ return nil, &ParseError{f, "bad KX Pref", l}, ""
+ }
+ rr.Preference = uint16(i)
+ <-c // zBlank
+ l = <-c // zString
+ rr.Exchanger = l.token
+ if l.token == "@" {
+ rr.Exchanger = o
+ return rr, nil, ""
+ }
+ _, ok := IsDomainName(l.token)
+ if !ok || l.length == 0 || l.err {
+ return nil, &ParseError{f, "bad KX Exchanger", l}, ""
+ }
+ if rr.Exchanger[l.length-1] != '.' {
+ rr.Exchanger = appendOrigin(rr.Exchanger, o)
+ }
+ return rr, nil, ""
+}
+
+func setCNAME(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
+ rr := new(CNAME)
+ rr.Hdr = h
+
+ l := <-c
+ rr.Target = l.token
+ if l.length == 0 {
+ return rr, nil, ""
+ }
+ if l.token == "@" {
+ rr.Target = o
+ return rr, nil, ""
+ }
+ _, ok := IsDomainName(l.token)
+ if !ok || l.length == 0 || l.err {
+ return nil, &ParseError{f, "bad CNAME Target", l}, ""
+ }
+ if rr.Target[l.length-1] != '.' {
+ rr.Target = appendOrigin(rr.Target, o)
+ }
+ return rr, nil, ""
+}
+
+func setDNAME(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
+ rr := new(DNAME)
+ rr.Hdr = h
+
+ l := <-c
+ rr.Target = l.token
+ if l.length == 0 {
+ return rr, nil, ""
+ }
+ if l.token == "@" {
+ rr.Target = o
+ return rr, nil, ""
+ }
+ _, ok := IsDomainName(l.token)
+ if !ok || l.length == 0 || l.err {
+ return nil, &ParseError{f, "bad CNAME Target", l}, ""
+ }
+ if rr.Target[l.length-1] != '.' {
+ rr.Target = appendOrigin(rr.Target, o)
+ }
+ return rr, nil, ""
+}
+
+func setSOA(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
+ rr := new(SOA)
+ rr.Hdr = h
+
+ l := <-c
+ rr.Ns = l.token
+ if l.length == 0 {
+ return rr, nil, ""
+ }
+ <-c // zBlank
+ if l.token == "@" {
+ rr.Ns = o
+ } else {
+ _, ok := IsDomainName(l.token)
+ if !ok || l.length == 0 || l.err {
+ return nil, &ParseError{f, "bad SOA Ns", l}, ""
+ }
+ if rr.Ns[l.length-1] != '.' {
+ rr.Ns = appendOrigin(rr.Ns, o)
+ }
+ }
+
+ l = <-c
+ rr.Mbox = l.token
+ if l.token == "@" {
+ rr.Mbox = o
+ } else {
+ _, ok := IsDomainName(l.token)
+ if !ok || l.length == 0 || l.err {
+ return nil, &ParseError{f, "bad SOA Mbox", l}, ""
+ }
+ if rr.Mbox[l.length-1] != '.' {
+ rr.Mbox = appendOrigin(rr.Mbox, o)
+ }
+ }
+ <-c // zBlank
+
+ var (
+ v uint32
+ ok bool
+ )
+ for i := 0; i < 5; i++ {
+ l = <-c
+ if l.err {
+ return nil, &ParseError{f, "bad SOA zone parameter", l}, ""
+ }
+ if j, e := strconv.Atoi(l.token); e != nil {
+ if i == 0 {
+ // Serial should be a number
+ return nil, &ParseError{f, "bad SOA zone parameter", l}, ""
+ }
+ if v, ok = stringToTtl(l.token); !ok {
+ return nil, &ParseError{f, "bad SOA zone parameter", l}, ""
+
+ }
+ } else {
+ v = uint32(j)
+ }
+ switch i {
+ case 0:
+ rr.Serial = v
+ <-c // zBlank
+ case 1:
+ rr.Refresh = v
+ <-c // zBlank
+ case 2:
+ rr.Retry = v
+ <-c // zBlank
+ case 3:
+ rr.Expire = v
+ <-c // zBlank
+ case 4:
+ rr.Minttl = v
+ }
+ }
+ return rr, nil, ""
+}
+
+func setSRV(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
+ rr := new(SRV)
+ rr.Hdr = h
+
+ l := <-c
+ if l.length == 0 {
+ return rr, nil, ""
+ }
+ i, e := strconv.Atoi(l.token)
+ if e != nil || l.err {
+ return nil, &ParseError{f, "bad SRV Priority", l}, ""
+ }
+ rr.Priority = uint16(i)
+ <-c // zBlank
+ l = <-c // zString
+ i, e = strconv.Atoi(l.token)
+ if e != nil || l.err {
+ return nil, &ParseError{f, "bad SRV Weight", l}, ""
+ }
+ rr.Weight = uint16(i)
+ <-c // zBlank
+ l = <-c // zString
+ i, e = strconv.Atoi(l.token)
+ if e != nil || l.err {
+ return nil, &ParseError{f, "bad SRV Port", l}, ""
+ }
+ rr.Port = uint16(i)
+ <-c // zBlank
+ l = <-c // zString
+ rr.Target = l.token
+ if l.token == "@" {
+ rr.Target = o
+ return rr, nil, ""
+ }
+ _, ok := IsDomainName(l.token)
+ if !ok || l.length == 0 || l.err {
+ return nil, &ParseError{f, "bad SRV Target", l}, ""
+ }
+ if rr.Target[l.length-1] != '.' {
+ rr.Target = appendOrigin(rr.Target, o)
+ }
+ return rr, nil, ""
+}
+
+func setNAPTR(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
+ rr := new(NAPTR)
+ rr.Hdr = h
+
+ l := <-c
+ if l.length == 0 {
+ return rr, nil, ""
+ }
+ i, e := strconv.Atoi(l.token)
+ if e != nil || l.err {
+ return nil, &ParseError{f, "bad NAPTR Order", l}, ""
+ }
+ rr.Order = uint16(i)
+ <-c // zBlank
+ l = <-c // zString
+ i, e = strconv.Atoi(l.token)
+ if e != nil || l.err {
+ return nil, &ParseError{f, "bad NAPTR Preference", l}, ""
+ }
+ rr.Preference = uint16(i)
+ // Flags
+ <-c // zBlank
+ l = <-c // _QUOTE
+ if l.value != zQuote {
+ return nil, &ParseError{f, "bad NAPTR Flags", l}, ""
+ }
+ l = <-c // Either String or Quote
+ if l.value == zString {
+ rr.Flags = l.token
+ l = <-c // _QUOTE
+ if l.value != zQuote {
+ return nil, &ParseError{f, "bad NAPTR Flags", l}, ""
+ }
+ } else if l.value == zQuote {
+ rr.Flags = ""
+ } else {
+ return nil, &ParseError{f, "bad NAPTR Flags", l}, ""
+ }
+
+ // Service
+ <-c // zBlank
+ l = <-c // _QUOTE
+ if l.value != zQuote {
+ return nil, &ParseError{f, "bad NAPTR Service", l}, ""
+ }
+ l = <-c // Either String or Quote
+ if l.value == zString {
+ rr.Service = l.token
+ l = <-c // _QUOTE
+ if l.value != zQuote {
+ return nil, &ParseError{f, "bad NAPTR Service", l}, ""
+ }
+ } else if l.value == zQuote {
+ rr.Service = ""
+ } else {
+ return nil, &ParseError{f, "bad NAPTR Service", l}, ""
+ }
+
+ // Regexp
+ <-c // zBlank
+ l = <-c // _QUOTE
+ if l.value != zQuote {
+ return nil, &ParseError{f, "bad NAPTR Regexp", l}, ""
+ }
+ l = <-c // Either String or Quote
+ if l.value == zString {
+ rr.Regexp = l.token
+ l = <-c // _QUOTE
+ if l.value != zQuote {
+ return nil, &ParseError{f, "bad NAPTR Regexp", l}, ""
+ }
+ } else if l.value == zQuote {
+ rr.Regexp = ""
+ } else {
+ return nil, &ParseError{f, "bad NAPTR Regexp", l}, ""
+ }
+ // After quote no space??
+ <-c // zBlank
+ l = <-c // zString
+ rr.Replacement = l.token
+ if l.token == "@" {
+ rr.Replacement = o
+ return rr, nil, ""
+ }
+ _, ok := IsDomainName(l.token)
+ if !ok || l.length == 0 || l.err {
+ return nil, &ParseError{f, "bad NAPTR Replacement", l}, ""
+ }
+ if rr.Replacement[l.length-1] != '.' {
+ rr.Replacement = appendOrigin(rr.Replacement, o)
+ }
+ return rr, nil, ""
+}
+
+func setTALINK(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
+ rr := new(TALINK)
+ rr.Hdr = h
+
+ l := <-c
+ rr.PreviousName = l.token
+ if l.length == 0 {
+ return rr, nil, ""
+ }
+ if l.token == "@" {
+ rr.PreviousName = o
+ } else {
+ _, ok := IsDomainName(l.token)
+ if !ok || l.length == 0 || l.err {
+ return nil, &ParseError{f, "bad TALINK PreviousName", l}, ""
+ }
+ if rr.PreviousName[l.length-1] != '.' {
+ rr.PreviousName = appendOrigin(rr.PreviousName, o)
+ }
+ }
+ <-c // zBlank
+ l = <-c
+ rr.NextName = l.token
+ if l.token == "@" {
+ rr.NextName = o
+ return rr, nil, ""
+ }
+ _, ok := IsDomainName(l.token)
+ if !ok || l.length == 0 || l.err {
+ return nil, &ParseError{f, "bad TALINK NextName", l}, ""
+ }
+ if rr.NextName[l.length-1] != '.' {
+ rr.NextName = appendOrigin(rr.NextName, o)
+ }
+ return rr, nil, ""
+}
+
+func setLOC(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
+ rr := new(LOC)
+ rr.Hdr = h
+ // Non zero defaults for LOC record, see RFC 1876, Section 3.
+ rr.HorizPre = 165 // 10000
+ rr.VertPre = 162 // 10
+ rr.Size = 18 // 1
+ ok := false
+ // North
+ l := <-c
+ if l.length == 0 {
+ return rr, nil, ""
+ }
+ i, e := strconv.Atoi(l.token)
+ if e != nil || l.err {
+ return nil, &ParseError{f, "bad LOC Latitude", l}, ""
+ }
+ rr.Latitude = 1000 * 60 * 60 * uint32(i)
+
+ <-c // zBlank
+ // Either number, 'N' or 'S'
+ l = <-c
+ if rr.Latitude, ok = locCheckNorth(l.token, rr.Latitude); ok {
+ goto East
+ }
+ i, e = strconv.Atoi(l.token)
+ if e != nil || l.err {
+ return nil, &ParseError{f, "bad LOC Latitude minutes", l}, ""
+ }
+ rr.Latitude += 1000 * 60 * uint32(i)
+
+ <-c // zBlank
+ l = <-c
+ if i, e := strconv.ParseFloat(l.token, 32); e != nil || l.err {
+ return nil, &ParseError{f, "bad LOC Latitude seconds", l}, ""
+ } else {
+ rr.Latitude += uint32(1000 * i)
+ }
+ <-c // zBlank
+ // Either number, 'N' or 'S'
+ l = <-c
+ if rr.Latitude, ok = locCheckNorth(l.token, rr.Latitude); ok {
+ goto East
+ }
+ // If still alive, flag an error
+ return nil, &ParseError{f, "bad LOC Latitude North/South", l}, ""
+
+East:
+ // East
+ <-c // zBlank
+ l = <-c
+ if i, e := strconv.Atoi(l.token); e != nil || l.err {
+ return nil, &ParseError{f, "bad LOC Longitude", l}, ""
+ } else {
+ rr.Longitude = 1000 * 60 * 60 * uint32(i)
+ }
+ <-c // zBlank
+ // Either number, 'E' or 'W'
+ l = <-c
+ if rr.Longitude, ok = locCheckEast(l.token, rr.Longitude); ok {
+ goto Altitude
+ }
+ if i, e := strconv.Atoi(l.token); e != nil || l.err {
+ return nil, &ParseError{f, "bad LOC Longitude minutes", l}, ""
+ } else {
+ rr.Longitude += 1000 * 60 * uint32(i)
+ }
+ <-c // zBlank
+ l = <-c
+ if i, e := strconv.ParseFloat(l.token, 32); e != nil || l.err {
+ return nil, &ParseError{f, "bad LOC Longitude seconds", l}, ""
+ } else {
+ rr.Longitude += uint32(1000 * i)
+ }
+ <-c // zBlank
+ // Either number, 'E' or 'W'
+ l = <-c
+ if rr.Longitude, ok = locCheckEast(l.token, rr.Longitude); ok {
+ goto Altitude
+ }
+ // If still alive, flag an error
+ return nil, &ParseError{f, "bad LOC Longitude East/West", l}, ""
+
+Altitude:
+ <-c // zBlank
+ l = <-c
+ if l.length == 0 || l.err {
+ return nil, &ParseError{f, "bad LOC Altitude", l}, ""
+ }
+ if l.token[len(l.token)-1] == 'M' || l.token[len(l.token)-1] == 'm' {
+ l.token = l.token[0 : len(l.token)-1]
+ }
+ if i, e := strconv.ParseFloat(l.token, 32); e != nil {
+ return nil, &ParseError{f, "bad LOC Altitude", l}, ""
+ } else {
+ rr.Altitude = uint32(i*100.0 + 10000000.0 + 0.5)
+ }
+
+ // And now optionally the other values
+ l = <-c
+ count := 0
+ for l.value != zNewline && l.value != zEOF {
+ switch l.value {
+ case zString:
+ switch count {
+ case 0: // Size
+ e, m, ok := stringToCm(l.token)
+ if !ok {
+ return nil, &ParseError{f, "bad LOC Size", l}, ""
+ }
+ rr.Size = (e & 0x0f) | (m << 4 & 0xf0)
+ case 1: // HorizPre
+ e, m, ok := stringToCm(l.token)
+ if !ok {
+ return nil, &ParseError{f, "bad LOC HorizPre", l}, ""
+ }
+ rr.HorizPre = (e & 0x0f) | (m << 4 & 0xf0)
+ case 2: // VertPre
+ e, m, ok := stringToCm(l.token)
+ if !ok {
+ return nil, &ParseError{f, "bad LOC VertPre", l}, ""
+ }
+ rr.VertPre = (e & 0x0f) | (m << 4 & 0xf0)
+ }
+ count++
+ case zBlank:
+ // Ok
+ default:
+ return nil, &ParseError{f, "bad LOC Size, HorizPre or VertPre", l}, ""
+ }
+ l = <-c
+ }
+ return rr, nil, ""
+}
+
+func setHIP(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
+ rr := new(HIP)
+ rr.Hdr = h
+
+ // HitLength is not represented
+ l := <-c
+ if l.length == 0 {
+ return rr, nil, l.comment
+ }
+ i, e := strconv.Atoi(l.token)
+ if e != nil || l.err {
+ return nil, &ParseError{f, "bad HIP PublicKeyAlgorithm", l}, ""
+ }
+ rr.PublicKeyAlgorithm = uint8(i)
+ <-c // zBlank
+ l = <-c // zString
+ if l.length == 0 || l.err {
+ return nil, &ParseError{f, "bad HIP Hit", l}, ""
+ }
+ rr.Hit = l.token // This can not contain spaces, see RFC 5205 Section 6.
+ rr.HitLength = uint8(len(rr.Hit)) / 2
+
+ <-c // zBlank
+ l = <-c // zString
+ if l.length == 0 || l.err {
+ return nil, &ParseError{f, "bad HIP PublicKey", l}, ""
+ }
+ rr.PublicKey = l.token // This cannot contain spaces
+ rr.PublicKeyLength = uint16(base64.StdEncoding.DecodedLen(len(rr.PublicKey)))
+
+ // RendezvousServers (if any)
+ l = <-c
+ var xs []string
+ for l.value != zNewline && l.value != zEOF {
+ switch l.value {
+ case zString:
+ if l.token == "@" {
+ xs = append(xs, o)
+ l = <-c
+ continue
+ }
+ _, ok := IsDomainName(l.token)
+ if !ok || l.length == 0 || l.err {
+ return nil, &ParseError{f, "bad HIP RendezvousServers", l}, ""
+ }
+ if l.token[l.length-1] != '.' {
+ l.token = appendOrigin(l.token, o)
+ }
+ xs = append(xs, l.token)
+ case zBlank:
+ // Ok
+ default:
+ return nil, &ParseError{f, "bad HIP RendezvousServers", l}, ""
+ }
+ l = <-c
+ }
+ rr.RendezvousServers = xs
+ return rr, nil, l.comment
+}
+
+func setCERT(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
+ rr := new(CERT)
+ rr.Hdr = h
+
+ l := <-c
+ if l.length == 0 {
+ return rr, nil, l.comment
+ }
+ if v, ok := StringToCertType[l.token]; ok {
+ rr.Type = v
+ } else if i, e := strconv.Atoi(l.token); e != nil {
+ return nil, &ParseError{f, "bad CERT Type", l}, ""
+ } else {
+ rr.Type = uint16(i)
+ }
+ <-c // zBlank
+ l = <-c // zString
+ i, e := strconv.Atoi(l.token)
+ if e != nil || l.err {
+ return nil, &ParseError{f, "bad CERT KeyTag", l}, ""
+ }
+ rr.KeyTag = uint16(i)
+ <-c // zBlank
+ l = <-c // zString
+ if v, ok := StringToAlgorithm[l.token]; ok {
+ rr.Algorithm = v
+ } else if i, e := strconv.Atoi(l.token); e != nil {
+ return nil, &ParseError{f, "bad CERT Algorithm", l}, ""
+ } else {
+ rr.Algorithm = uint8(i)
+ }
+ s, e1, c1 := endingToString(c, "bad CERT Certificate", f)
+ if e1 != nil {
+ return nil, e1, c1
+ }
+ rr.Certificate = s
+ return rr, nil, c1
+}
+
+func setOPENPGPKEY(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
+ rr := new(OPENPGPKEY)
+ rr.Hdr = h
+
+ s, e, c1 := endingToString(c, "bad OPENPGPKEY PublicKey", f)
+ if e != nil {
+ return nil, e, c1
+ }
+ rr.PublicKey = s
+ return rr, nil, c1
+}
+
+func setSIG(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
+ r, e, s := setRRSIG(h, c, o, f)
+ if r != nil {
+ return &SIG{*r.(*RRSIG)}, e, s
+ }
+ return nil, e, s
+}
+
+func setRRSIG(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
+ rr := new(RRSIG)
+ rr.Hdr = h
+ l := <-c
+ if l.length == 0 {
+ return rr, nil, l.comment
+ }
+ if t, ok := StringToType[l.tokenUpper]; !ok {
+ if strings.HasPrefix(l.tokenUpper, "TYPE") {
+ t, ok = typeToInt(l.tokenUpper)
+ if !ok {
+ return nil, &ParseError{f, "bad RRSIG Typecovered", l}, ""
+ }
+ rr.TypeCovered = t
+ } else {
+ return nil, &ParseError{f, "bad RRSIG Typecovered", l}, ""
+ }
+ } else {
+ rr.TypeCovered = t
+ }
+ <-c // zBlank
+ l = <-c
+ i, err := strconv.Atoi(l.token)
+ if err != nil || l.err {
+ return nil, &ParseError{f, "bad RRSIG Algorithm", l}, ""
+ }
+ rr.Algorithm = uint8(i)
+ <-c // zBlank
+ l = <-c
+ i, err = strconv.Atoi(l.token)
+ if err != nil || l.err {
+ return nil, &ParseError{f, "bad RRSIG Labels", l}, ""
+ }
+ rr.Labels = uint8(i)
+ <-c // zBlank
+ l = <-c
+ i, err = strconv.Atoi(l.token)
+ if err != nil || l.err {
+ return nil, &ParseError{f, "bad RRSIG OrigTtl", l}, ""
+ }
+ rr.OrigTtl = uint32(i)
+ <-c // zBlank
+ l = <-c
+ if i, err := StringToTime(l.token); err != nil {
+ // Try to see if all numeric and use it as epoch
+ if i, err := strconv.ParseInt(l.token, 10, 64); err == nil {
+ // TODO(miek): error out on > MAX_UINT32, same below
+ rr.Expiration = uint32(i)
+ } else {
+ return nil, &ParseError{f, "bad RRSIG Expiration", l}, ""
+ }
+ } else {
+ rr.Expiration = i
+ }
+ <-c // zBlank
+ l = <-c
+ if i, err := StringToTime(l.token); err != nil {
+ if i, err := strconv.ParseInt(l.token, 10, 64); err == nil {
+ rr.Inception = uint32(i)
+ } else {
+ return nil, &ParseError{f, "bad RRSIG Inception", l}, ""
+ }
+ } else {
+ rr.Inception = i
+ }
+ <-c // zBlank
+ l = <-c
+ i, err = strconv.Atoi(l.token)
+ if err != nil || l.err {
+ return nil, &ParseError{f, "bad RRSIG KeyTag", l}, ""
+ }
+ rr.KeyTag = uint16(i)
+ <-c // zBlank
+ l = <-c
+ rr.SignerName = l.token
+ if l.token == "@" {
+ rr.SignerName = o
+ } else {
+ _, ok := IsDomainName(l.token)
+ if !ok || l.length == 0 || l.err {
+ return nil, &ParseError{f, "bad RRSIG SignerName", l}, ""
+ }
+ if rr.SignerName[l.length-1] != '.' {
+ rr.SignerName = appendOrigin(rr.SignerName, o)
+ }
+ }
+ s, e, c1 := endingToString(c, "bad RRSIG Signature", f)
+ if e != nil {
+ return nil, e, c1
+ }
+ rr.Signature = s
+ return rr, nil, c1
+}
+
+func setNSEC(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
+ rr := new(NSEC)
+ rr.Hdr = h
+
+ l := <-c
+ rr.NextDomain = l.token
+ if l.length == 0 {
+ return rr, nil, l.comment
+ }
+ if l.token == "@" {
+ rr.NextDomain = o
+ } else {
+ _, ok := IsDomainName(l.token)
+ if !ok || l.length == 0 || l.err {
+ return nil, &ParseError{f, "bad NSEC NextDomain", l}, ""
+ }
+ if rr.NextDomain[l.length-1] != '.' {
+ rr.NextDomain = appendOrigin(rr.NextDomain, o)
+ }
+ }
+
+ rr.TypeBitMap = make([]uint16, 0)
+ var (
+ k uint16
+ ok bool
+ )
+ l = <-c
+ for l.value != zNewline && l.value != zEOF {
+ switch l.value {
+ case zBlank:
+ // Ok
+ case zString:
+ if k, ok = StringToType[l.tokenUpper]; !ok {
+ if k, ok = typeToInt(l.tokenUpper); !ok {
+ return nil, &ParseError{f, "bad NSEC TypeBitMap", l}, ""
+ }
+ }
+ rr.TypeBitMap = append(rr.TypeBitMap, k)
+ default:
+ return nil, &ParseError{f, "bad NSEC TypeBitMap", l}, ""
+ }
+ l = <-c
+ }
+ return rr, nil, l.comment
+}
+
+func setNSEC3(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
+ rr := new(NSEC3)
+ rr.Hdr = h
+
+ l := <-c
+ if l.length == 0 {
+ return rr, nil, l.comment
+ }
+ i, e := strconv.Atoi(l.token)
+ if e != nil || l.err {
+ return nil, &ParseError{f, "bad NSEC3 Hash", l}, ""
+ }
+ rr.Hash = uint8(i)
+ <-c // zBlank
+ l = <-c
+ i, e = strconv.Atoi(l.token)
+ if e != nil || l.err {
+ return nil, &ParseError{f, "bad NSEC3 Flags", l}, ""
+ }
+ rr.Flags = uint8(i)
+ <-c // zBlank
+ l = <-c
+ i, e = strconv.Atoi(l.token)
+ if e != nil || l.err {
+ return nil, &ParseError{f, "bad NSEC3 Iterations", l}, ""
+ }
+ rr.Iterations = uint16(i)
+ <-c
+ l = <-c
+ if len(l.token) == 0 || l.err {
+ return nil, &ParseError{f, "bad NSEC3 Salt", l}, ""
+ }
+ rr.SaltLength = uint8(len(l.token)) / 2
+ rr.Salt = l.token
+
+ <-c
+ l = <-c
+ if len(l.token) == 0 || l.err {
+ return nil, &ParseError{f, "bad NSEC3 NextDomain", l}, ""
+ }
+ rr.HashLength = 20 // Fix for NSEC3 (sha1 160 bits)
+ rr.NextDomain = l.token
+
+ rr.TypeBitMap = make([]uint16, 0)
+ var (
+ k uint16
+ ok bool
+ )
+ l = <-c
+ for l.value != zNewline && l.value != zEOF {
+ switch l.value {
+ case zBlank:
+ // Ok
+ case zString:
+ if k, ok = StringToType[l.tokenUpper]; !ok {
+ if k, ok = typeToInt(l.tokenUpper); !ok {
+ return nil, &ParseError{f, "bad NSEC3 TypeBitMap", l}, ""
+ }
+ }
+ rr.TypeBitMap = append(rr.TypeBitMap, k)
+ default:
+ return nil, &ParseError{f, "bad NSEC3 TypeBitMap", l}, ""
+ }
+ l = <-c
+ }
+ return rr, nil, l.comment
+}
+
+func setNSEC3PARAM(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
+ rr := new(NSEC3PARAM)
+ rr.Hdr = h
+
+ l := <-c
+ if l.length == 0 {
+ return rr, nil, ""
+ }
+ i, e := strconv.Atoi(l.token)
+ if e != nil || l.err {
+ return nil, &ParseError{f, "bad NSEC3PARAM Hash", l}, ""
+ }
+ rr.Hash = uint8(i)
+ <-c // zBlank
+ l = <-c
+ i, e = strconv.Atoi(l.token)
+ if e != nil || l.err {
+ return nil, &ParseError{f, "bad NSEC3PARAM Flags", l}, ""
+ }
+ rr.Flags = uint8(i)
+ <-c // zBlank
+ l = <-c
+ i, e = strconv.Atoi(l.token)
+ if e != nil || l.err {
+ return nil, &ParseError{f, "bad NSEC3PARAM Iterations", l}, ""
+ }
+ rr.Iterations = uint16(i)
+ <-c
+ l = <-c
+ rr.SaltLength = uint8(len(l.token))
+ rr.Salt = l.token
+ return rr, nil, ""
+}
+
+func setEUI48(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
+ rr := new(EUI48)
+ rr.Hdr = h
+
+ l := <-c
+ if l.length == 0 {
+ return rr, nil, ""
+ }
+ if l.length != 17 || l.err {
+ return nil, &ParseError{f, "bad EUI48 Address", l}, ""
+ }
+ addr := make([]byte, 12)
+ dash := 0
+ for i := 0; i < 10; i += 2 {
+ addr[i] = l.token[i+dash]
+ addr[i+1] = l.token[i+1+dash]
+ dash++
+ if l.token[i+1+dash] != '-' {
+ return nil, &ParseError{f, "bad EUI48 Address", l}, ""
+ }
+ }
+ addr[10] = l.token[15]
+ addr[11] = l.token[16]
+
+ i, e := strconv.ParseUint(string(addr), 16, 48)
+ if e != nil {
+ return nil, &ParseError{f, "bad EUI48 Address", l}, ""
+ }
+ rr.Address = i
+ return rr, nil, ""
+}
+
+func setEUI64(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
+ rr := new(EUI64)
+ rr.Hdr = h
+
+ l := <-c
+ if l.length == 0 {
+ return rr, nil, ""
+ }
+ if l.length != 23 || l.err {
+ return nil, &ParseError{f, "bad EUI64 Address", l}, ""
+ }
+ addr := make([]byte, 16)
+ dash := 0
+ for i := 0; i < 14; i += 2 {
+ addr[i] = l.token[i+dash]
+ addr[i+1] = l.token[i+1+dash]
+ dash++
+ if l.token[i+1+dash] != '-' {
+ return nil, &ParseError{f, "bad EUI64 Address", l}, ""
+ }
+ }
+ addr[14] = l.token[21]
+ addr[15] = l.token[22]
+
+ i, e := strconv.ParseUint(string(addr), 16, 64)
+ if e != nil {
+ return nil, &ParseError{f, "bad EUI68 Address", l}, ""
+ }
+ rr.Address = uint64(i)
+ return rr, nil, ""
+}
+
+func setWKS(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
+ rr := new(WKS)
+ rr.Hdr = h
+
+ l := <-c
+ if l.length == 0 {
+ return rr, nil, l.comment
+ }
+ rr.Address = net.ParseIP(l.token)
+ if rr.Address == nil || l.err {
+ return nil, &ParseError{f, "bad WKS Address", l}, ""
+ }
+
+ <-c // zBlank
+ l = <-c
+ proto := "tcp"
+ i, e := strconv.Atoi(l.token)
+ if e != nil || l.err {
+ return nil, &ParseError{f, "bad WKS Protocol", l}, ""
+ }
+ rr.Protocol = uint8(i)
+ switch rr.Protocol {
+ case 17:
+ proto = "udp"
+ case 6:
+ proto = "tcp"
+ default:
+ return nil, &ParseError{f, "bad WKS Protocol", l}, ""
+ }
+
+ <-c
+ l = <-c
+ rr.BitMap = make([]uint16, 0)
+ var (
+ k int
+ err error
+ )
+ for l.value != zNewline && l.value != zEOF {
+ switch l.value {
+ case zBlank:
+ // Ok
+ case zString:
+ if k, err = net.LookupPort(proto, l.token); err != nil {
+ i, e := strconv.Atoi(l.token) // If a number use that
+ if e != nil {
+ return nil, &ParseError{f, "bad WKS BitMap", l}, ""
+ }
+ rr.BitMap = append(rr.BitMap, uint16(i))
+ }
+ rr.BitMap = append(rr.BitMap, uint16(k))
+ default:
+ return nil, &ParseError{f, "bad WKS BitMap", l}, ""
+ }
+ l = <-c
+ }
+ return rr, nil, l.comment
+}
+
+func setSSHFP(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
+ rr := new(SSHFP)
+ rr.Hdr = h
+
+ l := <-c
+ if l.length == 0 {
+ return rr, nil, ""
+ }
+ i, e := strconv.Atoi(l.token)
+ if e != nil || l.err {
+ return nil, &ParseError{f, "bad SSHFP Algorithm", l}, ""
+ }
+ rr.Algorithm = uint8(i)
+ <-c // zBlank
+ l = <-c
+ i, e = strconv.Atoi(l.token)
+ if e != nil || l.err {
+ return nil, &ParseError{f, "bad SSHFP Type", l}, ""
+ }
+ rr.Type = uint8(i)
+ <-c // zBlank
+ s, e1, c1 := endingToString(c, "bad SSHFP Fingerprint", f)
+ if e1 != nil {
+ return nil, e1, c1
+ }
+ rr.FingerPrint = s
+ return rr, nil, ""
+}
+
+func setDNSKEYs(h RR_Header, c chan lex, o, f, typ string) (RR, *ParseError, string) {
+ rr := new(DNSKEY)
+ rr.Hdr = h
+
+ l := <-c
+ if l.length == 0 {
+ return rr, nil, l.comment
+ }
+ i, e := strconv.Atoi(l.token)
+ if e != nil || l.err {
+ return nil, &ParseError{f, "bad " + typ + " Flags", l}, ""
+ }
+ rr.Flags = uint16(i)
+ <-c // zBlank
+ l = <-c // zString
+ i, e = strconv.Atoi(l.token)
+ if e != nil || l.err {
+ return nil, &ParseError{f, "bad " + typ + " Protocol", l}, ""
+ }
+ rr.Protocol = uint8(i)
+ <-c // zBlank
+ l = <-c // zString
+ i, e = strconv.Atoi(l.token)
+ if e != nil || l.err {
+ return nil, &ParseError{f, "bad " + typ + " Algorithm", l}, ""
+ }
+ rr.Algorithm = uint8(i)
+ s, e1, c1 := endingToString(c, "bad "+typ+" PublicKey", f)
+ if e1 != nil {
+ return nil, e1, c1
+ }
+ rr.PublicKey = s
+ return rr, nil, c1
+}
+
+func setKEY(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
+ r, e, s := setDNSKEYs(h, c, o, f, "KEY")
+ if r != nil {
+ return &KEY{*r.(*DNSKEY)}, e, s
+ }
+ return nil, e, s
+}
+
+func setDNSKEY(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
+ r, e, s := setDNSKEYs(h, c, o, f, "DNSKEY")
+ return r, e, s
+}
+
+func setCDNSKEY(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
+ r, e, s := setDNSKEYs(h, c, o, f, "CDNSKEY")
+ if r != nil {
+ return &CDNSKEY{*r.(*DNSKEY)}, e, s
+ }
+ return nil, e, s
+}
+
+func setRKEY(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
+ rr := new(RKEY)
+ rr.Hdr = h
+
+ l := <-c
+ if l.length == 0 {
+ return rr, nil, l.comment
+ }
+ i, e := strconv.Atoi(l.token)
+ if e != nil || l.err {
+ return nil, &ParseError{f, "bad RKEY Flags", l}, ""
+ }
+ rr.Flags = uint16(i)
+ <-c // zBlank
+ l = <-c // zString
+ i, e = strconv.Atoi(l.token)
+ if e != nil || l.err {
+ return nil, &ParseError{f, "bad RKEY Protocol", l}, ""
+ }
+ rr.Protocol = uint8(i)
+ <-c // zBlank
+ l = <-c // zString
+ i, e = strconv.Atoi(l.token)
+ if e != nil || l.err {
+ return nil, &ParseError{f, "bad RKEY Algorithm", l}, ""
+ }
+ rr.Algorithm = uint8(i)
+ s, e1, c1 := endingToString(c, "bad RKEY PublicKey", f)
+ if e1 != nil {
+ return nil, e1, c1
+ }
+ rr.PublicKey = s
+ return rr, nil, c1
+}
+
+func setEID(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
+ rr := new(EID)
+ rr.Hdr = h
+ s, e, c1 := endingToString(c, "bad EID Endpoint", f)
+ if e != nil {
+ return nil, e, c1
+ }
+ rr.Endpoint = s
+ return rr, nil, c1
+}
+
+func setNIMLOC(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
+ rr := new(NIMLOC)
+ rr.Hdr = h
+ s, e, c1 := endingToString(c, "bad NIMLOC Locator", f)
+ if e != nil {
+ return nil, e, c1
+ }
+ rr.Locator = s
+ return rr, nil, c1
+}
+
+func setGPOS(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
+ rr := new(GPOS)
+ rr.Hdr = h
+ l := <-c
+ if l.length == 0 {
+ return rr, nil, ""
+ }
+ _, e := strconv.ParseFloat(l.token, 64)
+ if e != nil || l.err {
+ return nil, &ParseError{f, "bad GPOS Longitude", l}, ""
+ }
+ rr.Longitude = l.token
+ <-c // zBlank
+ l = <-c
+ _, e = strconv.ParseFloat(l.token, 64)
+ if e != nil || l.err {
+ return nil, &ParseError{f, "bad GPOS Latitude", l}, ""
+ }
+ rr.Latitude = l.token
+ <-c // zBlank
+ l = <-c
+ _, e = strconv.ParseFloat(l.token, 64)
+ if e != nil || l.err {
+ return nil, &ParseError{f, "bad GPOS Altitude", l}, ""
+ }
+ rr.Altitude = l.token
+ return rr, nil, ""
+}
+
+func setDSs(h RR_Header, c chan lex, o, f, typ string) (RR, *ParseError, string) {
+ rr := new(DS)
+ rr.Hdr = h
+ l := <-c
+ if l.length == 0 {
+ return rr, nil, l.comment
+ }
+ i, e := strconv.Atoi(l.token)
+ if e != nil || l.err {
+ return nil, &ParseError{f, "bad " + typ + " KeyTag", l}, ""
+ }
+ rr.KeyTag = uint16(i)
+ <-c // zBlank
+ l = <-c
+ if i, e := strconv.Atoi(l.token); e != nil {
+ i, ok := StringToAlgorithm[l.tokenUpper]
+ if !ok || l.err {
+ return nil, &ParseError{f, "bad " + typ + " Algorithm", l}, ""
+ }
+ rr.Algorithm = i
+ } else {
+ rr.Algorithm = uint8(i)
+ }
+ <-c // zBlank
+ l = <-c
+ i, e = strconv.Atoi(l.token)
+ if e != nil || l.err {
+ return nil, &ParseError{f, "bad " + typ + " DigestType", l}, ""
+ }
+ rr.DigestType = uint8(i)
+ s, e1, c1 := endingToString(c, "bad "+typ+" Digest", f)
+ if e1 != nil {
+ return nil, e1, c1
+ }
+ rr.Digest = s
+ return rr, nil, c1
+}
+
+func setDS(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
+ r, e, s := setDSs(h, c, o, f, "DS")
+ return r, e, s
+}
+
+func setDLV(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
+ r, e, s := setDSs(h, c, o, f, "DLV")
+ if r != nil {
+ return &DLV{*r.(*DS)}, e, s
+ }
+ return nil, e, s
+}
+
+func setCDS(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
+ r, e, s := setDSs(h, c, o, f, "CDS")
+ if r != nil {
+ return &CDS{*r.(*DS)}, e, s
+ }
+ return nil, e, s
+}
+
+func setTA(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
+ rr := new(TA)
+ rr.Hdr = h
+ l := <-c
+ if l.length == 0 {
+ return rr, nil, l.comment
+ }
+ i, e := strconv.Atoi(l.token)
+ if e != nil || l.err {
+ return nil, &ParseError{f, "bad TA KeyTag", l}, ""
+ }
+ rr.KeyTag = uint16(i)
+ <-c // zBlank
+ l = <-c
+ if i, e := strconv.Atoi(l.token); e != nil {
+ i, ok := StringToAlgorithm[l.tokenUpper]
+ if !ok || l.err {
+ return nil, &ParseError{f, "bad TA Algorithm", l}, ""
+ }
+ rr.Algorithm = i
+ } else {
+ rr.Algorithm = uint8(i)
+ }
+ <-c // zBlank
+ l = <-c
+ i, e = strconv.Atoi(l.token)
+ if e != nil || l.err {
+ return nil, &ParseError{f, "bad TA DigestType", l}, ""
+ }
+ rr.DigestType = uint8(i)
+ s, e, c1 := endingToString(c, "bad TA Digest", f)
+ if e != nil {
+ return nil, e.(*ParseError), c1
+ }
+ rr.Digest = s
+ return rr, nil, c1
+}
+
+func setTLSA(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
+ rr := new(TLSA)
+ rr.Hdr = h
+ l := <-c
+ if l.length == 0 {
+ return rr, nil, l.comment
+ }
+ i, e := strconv.Atoi(l.token)
+ if e != nil || l.err {
+ return nil, &ParseError{f, "bad TLSA Usage", l}, ""
+ }
+ rr.Usage = uint8(i)
+ <-c // zBlank
+ l = <-c
+ i, e = strconv.Atoi(l.token)
+ if e != nil || l.err {
+ return nil, &ParseError{f, "bad TLSA Selector", l}, ""
+ }
+ rr.Selector = uint8(i)
+ <-c // zBlank
+ l = <-c
+ i, e = strconv.Atoi(l.token)
+ if e != nil || l.err {
+ return nil, &ParseError{f, "bad TLSA MatchingType", l}, ""
+ }
+ rr.MatchingType = uint8(i)
+ // So this needs be e2 (i.e. different than e), because...??t
+ s, e2, c1 := endingToString(c, "bad TLSA Certificate", f)
+ if e2 != nil {
+ return nil, e2, c1
+ }
+ rr.Certificate = s
+ return rr, nil, c1
+}
+
+func setRFC3597(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
+ rr := new(RFC3597)
+ rr.Hdr = h
+ l := <-c
+ if l.token != "\\#" {
+ return nil, &ParseError{f, "bad RFC3597 Rdata", l}, ""
+ }
+ <-c // zBlank
+ l = <-c
+ rdlength, e := strconv.Atoi(l.token)
+ if e != nil || l.err {
+ return nil, &ParseError{f, "bad RFC3597 Rdata ", l}, ""
+ }
+
+ s, e1, c1 := endingToString(c, "bad RFC3597 Rdata", f)
+ if e1 != nil {
+ return nil, e1, c1
+ }
+ if rdlength*2 != len(s) {
+ return nil, &ParseError{f, "bad RFC3597 Rdata", l}, ""
+ }
+ rr.Rdata = s
+ return rr, nil, c1
+}
+
+func setSPF(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
+ rr := new(SPF)
+ rr.Hdr = h
+
+ s, e, c1 := endingToTxtSlice(c, "bad SPF Txt", f)
+ if e != nil {
+ return nil, e, ""
+ }
+ rr.Txt = s
+ return rr, nil, c1
+}
+
+func setTXT(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
+ rr := new(TXT)
+ rr.Hdr = h
+
+ // no zBlank reading here, because all this rdata is TXT
+ s, e, c1 := endingToTxtSlice(c, "bad TXT Txt", f)
+ if e != nil {
+ return nil, e, ""
+ }
+ rr.Txt = s
+ return rr, nil, c1
+}
+
+// identical to setTXT
+func setNINFO(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
+ rr := new(NINFO)
+ rr.Hdr = h
+
+ s, e, c1 := endingToTxtSlice(c, "bad NINFO ZSData", f)
+ if e != nil {
+ return nil, e, ""
+ }
+ rr.ZSData = s
+ return rr, nil, c1
+}
+
+func setURI(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
+ rr := new(URI)
+ rr.Hdr = h
+
+ l := <-c
+ if l.length == 0 { // Dynamic updates.
+ return rr, nil, ""
+ }
+
+ i, e := strconv.Atoi(l.token)
+ if e != nil || l.err {
+ return nil, &ParseError{f, "bad URI Priority", l}, ""
+ }
+ rr.Priority = uint16(i)
+ <-c // zBlank
+ l = <-c
+ i, e = strconv.Atoi(l.token)
+ if e != nil || l.err {
+ return nil, &ParseError{f, "bad URI Weight", l}, ""
+ }
+ rr.Weight = uint16(i)
+
+ <-c // zBlank
+ s, err, c1 := endingToTxtSlice(c, "bad URI Target", f)
+ if err != nil {
+ return nil, err, ""
+ }
+ if len(s) > 1 {
+ return nil, &ParseError{f, "bad URI Target", l}, ""
+ }
+ rr.Target = s[0]
+ return rr, nil, c1
+}
+
+func setDHCID(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
+ // awesome record to parse!
+ rr := new(DHCID)
+ rr.Hdr = h
+
+ s, e, c1 := endingToString(c, "bad DHCID Digest", f)
+ if e != nil {
+ return nil, e, c1
+ }
+ rr.Digest = s
+ return rr, nil, c1
+}
+
+func setNID(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
+ rr := new(NID)
+ rr.Hdr = h
+
+ l := <-c
+ if l.length == 0 {
+ return rr, nil, ""
+ }
+ i, e := strconv.Atoi(l.token)
+ if e != nil || l.err {
+ return nil, &ParseError{f, "bad NID Preference", l}, ""
+ }
+ rr.Preference = uint16(i)
+ <-c // zBlank
+ l = <-c // zString
+ u, err := stringToNodeID(l)
+ if err != nil || l.err {
+ return nil, err, ""
+ }
+ rr.NodeID = u
+ return rr, nil, ""
+}
+
+func setL32(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
+ rr := new(L32)
+ rr.Hdr = h
+
+ l := <-c
+ if l.length == 0 {
+ return rr, nil, ""
+ }
+ i, e := strconv.Atoi(l.token)
+ if e != nil || l.err {
+ return nil, &ParseError{f, "bad L32 Preference", l}, ""
+ }
+ rr.Preference = uint16(i)
+ <-c // zBlank
+ l = <-c // zString
+ rr.Locator32 = net.ParseIP(l.token)
+ if rr.Locator32 == nil || l.err {
+ return nil, &ParseError{f, "bad L32 Locator", l}, ""
+ }
+ return rr, nil, ""
+}
+
+func setLP(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
+ rr := new(LP)
+ rr.Hdr = h
+
+ l := <-c
+ if l.length == 0 {
+ return rr, nil, ""
+ }
+ i, e := strconv.Atoi(l.token)
+ if e != nil || l.err {
+ return nil, &ParseError{f, "bad LP Preference", l}, ""
+ }
+ rr.Preference = uint16(i)
+ <-c // zBlank
+ l = <-c // zString
+ rr.Fqdn = l.token
+ if l.length == 0 {
+ return rr, nil, ""
+ }
+ if l.token == "@" {
+ rr.Fqdn = o
+ return rr, nil, ""
+ }
+ _, ok := IsDomainName(l.token)
+ if !ok || l.length == 0 || l.err {
+ return nil, &ParseError{f, "bad LP Fqdn", l}, ""
+ }
+ if rr.Fqdn[l.length-1] != '.' {
+ rr.Fqdn = appendOrigin(rr.Fqdn, o)
+ }
+ return rr, nil, ""
+}
+
+func setL64(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
+ rr := new(L64)
+ rr.Hdr = h
+
+ l := <-c
+ if l.length == 0 {
+ return rr, nil, ""
+ }
+ i, e := strconv.Atoi(l.token)
+ if e != nil || l.err {
+ return nil, &ParseError{f, "bad L64 Preference", l}, ""
+ }
+ rr.Preference = uint16(i)
+ <-c // zBlank
+ l = <-c // zString
+ u, err := stringToNodeID(l)
+ if err != nil || l.err {
+ return nil, err, ""
+ }
+ rr.Locator64 = u
+ return rr, nil, ""
+}
+
+func setUID(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
+ rr := new(UID)
+ rr.Hdr = h
+ l := <-c
+ if l.length == 0 {
+ return rr, nil, ""
+ }
+ i, e := strconv.Atoi(l.token)
+ if e != nil || l.err {
+ return nil, &ParseError{f, "bad UID Uid", l}, ""
+ }
+ rr.Uid = uint32(i)
+ return rr, nil, ""
+}
+
+func setGID(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
+ rr := new(GID)
+ rr.Hdr = h
+ l := <-c
+ if l.length == 0 {
+ return rr, nil, ""
+ }
+ i, e := strconv.Atoi(l.token)
+ if e != nil || l.err {
+ return nil, &ParseError{f, "bad GID Gid", l}, ""
+ }
+ rr.Gid = uint32(i)
+ return rr, nil, ""
+}
+
+func setUINFO(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
+ rr := new(UINFO)
+ rr.Hdr = h
+ s, e, c1 := endingToTxtSlice(c, "bad UINFO Uinfo", f)
+ if e != nil {
+ return nil, e, ""
+ }
+ rr.Uinfo = s[0] // silently discard anything above
+ return rr, nil, c1
+}
+
+func setPX(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
+ rr := new(PX)
+ rr.Hdr = h
+
+ l := <-c
+ if l.length == 0 {
+ return rr, nil, ""
+ }
+ i, e := strconv.Atoi(l.token)
+ if e != nil || l.err {
+ return nil, &ParseError{f, "bad PX Preference", l}, ""
+ }
+ rr.Preference = uint16(i)
+ <-c // zBlank
+ l = <-c // zString
+ rr.Map822 = l.token
+ if l.length == 0 {
+ return rr, nil, ""
+ }
+ if l.token == "@" {
+ rr.Map822 = o
+ return rr, nil, ""
+ }
+ _, ok := IsDomainName(l.token)
+ if !ok || l.length == 0 || l.err {
+ return nil, &ParseError{f, "bad PX Map822", l}, ""
+ }
+ if rr.Map822[l.length-1] != '.' {
+ rr.Map822 = appendOrigin(rr.Map822, o)
+ }
+ <-c // zBlank
+ l = <-c // zString
+ rr.Mapx400 = l.token
+ if l.token == "@" {
+ rr.Mapx400 = o
+ return rr, nil, ""
+ }
+ _, ok = IsDomainName(l.token)
+ if !ok || l.length == 0 || l.err {
+ return nil, &ParseError{f, "bad PX Mapx400", l}, ""
+ }
+ if rr.Mapx400[l.length-1] != '.' {
+ rr.Mapx400 = appendOrigin(rr.Mapx400, o)
+ }
+ return rr, nil, ""
+}
+
+func setIPSECKEY(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
+ rr := new(IPSECKEY)
+ rr.Hdr = h
+ l := <-c
+ if l.length == 0 {
+ return rr, nil, l.comment
+ }
+ i, err := strconv.Atoi(l.token)
+ if err != nil || l.err {
+ return nil, &ParseError{f, "bad IPSECKEY Precedence", l}, ""
+ }
+ rr.Precedence = uint8(i)
+ <-c // zBlank
+ l = <-c
+ i, err = strconv.Atoi(l.token)
+ if err != nil || l.err {
+ return nil, &ParseError{f, "bad IPSECKEY GatewayType", l}, ""
+ }
+ rr.GatewayType = uint8(i)
+ <-c // zBlank
+ l = <-c
+ i, err = strconv.Atoi(l.token)
+ if err != nil || l.err {
+ return nil, &ParseError{f, "bad IPSECKEY Algorithm", l}, ""
+ }
+ rr.Algorithm = uint8(i)
+
+ // Now according to GatewayType we can have different elements here
+ <-c // zBlank
+ l = <-c
+ switch rr.GatewayType {
+ case 0:
+ fallthrough
+ case 3:
+ rr.GatewayName = l.token
+ if l.token == "@" {
+ rr.GatewayName = o
+ }
+ _, ok := IsDomainName(l.token)
+ if !ok || l.length == 0 || l.err {
+ return nil, &ParseError{f, "bad IPSECKEY GatewayName", l}, ""
+ }
+ if rr.GatewayName[l.length-1] != '.' {
+ rr.GatewayName = appendOrigin(rr.GatewayName, o)
+ }
+ case 1:
+ rr.GatewayA = net.ParseIP(l.token)
+ if rr.GatewayA == nil {
+ return nil, &ParseError{f, "bad IPSECKEY GatewayA", l}, ""
+ }
+ case 2:
+ rr.GatewayAAAA = net.ParseIP(l.token)
+ if rr.GatewayAAAA == nil {
+ return nil, &ParseError{f, "bad IPSECKEY GatewayAAAA", l}, ""
+ }
+ default:
+ return nil, &ParseError{f, "bad IPSECKEY GatewayType", l}, ""
+ }
+
+ s, e, c1 := endingToString(c, "bad IPSECKEY PublicKey", f)
+ if e != nil {
+ return nil, e, c1
+ }
+ rr.PublicKey = s
+ return rr, nil, c1
+}
+
+func setCAA(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
+ rr := new(CAA)
+ rr.Hdr = h
+ l := <-c
+ if l.length == 0 {
+ return rr, nil, l.comment
+ }
+ i, err := strconv.Atoi(l.token)
+ if err != nil || l.err {
+ return nil, &ParseError{f, "bad CAA Flag", l}, ""
+ }
+ rr.Flag = uint8(i)
+
+ <-c // zBlank
+ l = <-c // zString
+ if l.value != zString {
+ return nil, &ParseError{f, "bad CAA Tag", l}, ""
+ }
+ rr.Tag = l.token
+
+ <-c // zBlank
+ s, e, c1 := endingToTxtSlice(c, "bad CAA Value", f)
+ if e != nil {
+ return nil, e, ""
+ }
+ if len(s) > 1 {
+ return nil, &ParseError{f, "bad CAA Value", l}, ""
+ }
+ rr.Value = s[0]
+ return rr, nil, c1
+}
+
+var typeToparserFunc = map[uint16]parserFunc{
+ TypeAAAA: parserFunc{setAAAA, false},
+ TypeAFSDB: parserFunc{setAFSDB, false},
+ TypeA: parserFunc{setA, false},
+ TypeCAA: parserFunc{setCAA, true},
+ TypeCDS: parserFunc{setCDS, true},
+ TypeCDNSKEY: parserFunc{setCDNSKEY, true},
+ TypeCERT: parserFunc{setCERT, true},
+ TypeCNAME: parserFunc{setCNAME, false},
+ TypeDHCID: parserFunc{setDHCID, true},
+ TypeDLV: parserFunc{setDLV, true},
+ TypeDNAME: parserFunc{setDNAME, false},
+ TypeKEY: parserFunc{setKEY, true},
+ TypeDNSKEY: parserFunc{setDNSKEY, true},
+ TypeDS: parserFunc{setDS, true},
+ TypeEID: parserFunc{setEID, true},
+ TypeEUI48: parserFunc{setEUI48, false},
+ TypeEUI64: parserFunc{setEUI64, false},
+ TypeGID: parserFunc{setGID, false},
+ TypeGPOS: parserFunc{setGPOS, false},
+ TypeHINFO: parserFunc{setHINFO, true},
+ TypeHIP: parserFunc{setHIP, true},
+ TypeIPSECKEY: parserFunc{setIPSECKEY, true},
+ TypeKX: parserFunc{setKX, false},
+ TypeL32: parserFunc{setL32, false},
+ TypeL64: parserFunc{setL64, false},
+ TypeLOC: parserFunc{setLOC, true},
+ TypeLP: parserFunc{setLP, false},
+ TypeMB: parserFunc{setMB, false},
+ TypeMD: parserFunc{setMD, false},
+ TypeMF: parserFunc{setMF, false},
+ TypeMG: parserFunc{setMG, false},
+ TypeMINFO: parserFunc{setMINFO, false},
+ TypeMR: parserFunc{setMR, false},
+ TypeMX: parserFunc{setMX, false},
+ TypeNAPTR: parserFunc{setNAPTR, false},
+ TypeNID: parserFunc{setNID, false},
+ TypeNIMLOC: parserFunc{setNIMLOC, true},
+ TypeNINFO: parserFunc{setNINFO, true},
+ TypeNSAPPTR: parserFunc{setNSAPPTR, false},
+ TypeNSEC3PARAM: parserFunc{setNSEC3PARAM, false},
+ TypeNSEC3: parserFunc{setNSEC3, true},
+ TypeNSEC: parserFunc{setNSEC, true},
+ TypeNS: parserFunc{setNS, false},
+ TypeOPENPGPKEY: parserFunc{setOPENPGPKEY, true},
+ TypePTR: parserFunc{setPTR, false},
+ TypePX: parserFunc{setPX, false},
+ TypeSIG: parserFunc{setSIG, true},
+ TypeRKEY: parserFunc{setRKEY, true},
+ TypeRP: parserFunc{setRP, false},
+ TypeRRSIG: parserFunc{setRRSIG, true},
+ TypeRT: parserFunc{setRT, false},
+ TypeSOA: parserFunc{setSOA, false},
+ TypeSPF: parserFunc{setSPF, true},
+ TypeSRV: parserFunc{setSRV, false},
+ TypeSSHFP: parserFunc{setSSHFP, true},
+ TypeTALINK: parserFunc{setTALINK, false},
+ TypeTA: parserFunc{setTA, true},
+ TypeTLSA: parserFunc{setTLSA, true},
+ TypeTXT: parserFunc{setTXT, true},
+ TypeUID: parserFunc{setUID, false},
+ TypeUINFO: parserFunc{setUINFO, true},
+ TypeURI: parserFunc{setURI, true},
+ TypeWKS: parserFunc{setWKS, true},
+ TypeX25: parserFunc{setX25, false},
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/README.md b/vendor/github.com/prometheus/client_golang/prometheus/README.md
new file mode 100644
index 0000000000..81032bed88
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/README.md
@@ -0,0 +1,53 @@
+# Overview
+This is the [Prometheus](http://www.prometheus.io) telemetric
+instrumentation client [Go](http://golang.org) client library. It
+enable authors to define process-space metrics for their servers and
+expose them through a web service interface for extraction,
+aggregation, and a whole slew of other post processing techniques.
+
+# Installing
+ $ go get github.com/prometheus/client_golang/prometheus
+
+# Example
+```go
+package main
+
+import (
+ "net/http"
+
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+var (
+ indexed = prometheus.NewCounter(prometheus.CounterOpts{
+ Namespace: "my_company",
+ Subsystem: "indexer",
+ Name: "documents_indexed",
+ Help: "The number of documents indexed.",
+ })
+ size = prometheus.NewGauge(prometheus.GaugeOpts{
+ Namespace: "my_company",
+ Subsystem: "storage",
+ Name: "documents_total_size_bytes",
+ Help: "The total size of all documents in the storage.",
+ })
+)
+
+func main() {
+ http.Handle("/metrics", prometheus.Handler())
+
+ indexed.Inc()
+ size.Set(5)
+
+ http.ListenAndServe(":8080", nil)
+}
+
+func init() {
+ prometheus.MustRegister(indexed)
+ prometheus.MustRegister(size)
+}
+```
+
+# Documentation
+
+[](https://godoc.org/github.com/prometheus/client_golang)
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/collector.go b/vendor/github.com/prometheus/client_golang/prometheus/collector.go
new file mode 100644
index 0000000000..c04688009f
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/collector.go
@@ -0,0 +1,75 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+// Collector is the interface implemented by anything that can be used by
+// Prometheus to collect metrics. A Collector has to be registered for
+// collection. See Register, MustRegister, RegisterOrGet, and MustRegisterOrGet.
+//
+// The stock metrics provided by this package (like Gauge, Counter, Summary) are
+// also Collectors (which only ever collect one metric, namely itself). An
+// implementer of Collector may, however, collect multiple metrics in a
+// coordinated fashion and/or create metrics on the fly. Examples for collectors
+// already implemented in this library are the metric vectors (i.e. collection
+// of multiple instances of the same Metric but with different label values)
+// like GaugeVec or SummaryVec, and the ExpvarCollector.
+type Collector interface {
+ // Describe sends the super-set of all possible descriptors of metrics
+ // collected by this Collector to the provided channel and returns once
+ // the last descriptor has been sent. The sent descriptors fulfill the
+ // consistency and uniqueness requirements described in the Desc
+ // documentation. (It is valid if one and the same Collector sends
+ // duplicate descriptors. Those duplicates are simply ignored. However,
+ // two different Collectors must not send duplicate descriptors.) This
+ // method idempotently sends the same descriptors throughout the
+ // lifetime of the Collector. If a Collector encounters an error while
+ // executing this method, it must send an invalid descriptor (created
+ // with NewInvalidDesc) to signal the error to the registry.
+ Describe(chan<- *Desc)
+ // Collect is called by Prometheus when collecting metrics. The
+ // implementation sends each collected metric via the provided channel
+ // and returns once the last metric has been sent. The descriptor of
+ // each sent metric is one of those returned by Describe. Returned
+ // metrics that share the same descriptor must differ in their variable
+ // label values. This method may be called concurrently and must
+ // therefore be implemented in a concurrency safe way. Blocking occurs
+ // at the expense of total performance of rendering all registered
+ // metrics. Ideally, Collector implementations support concurrent
+ // readers.
+ Collect(chan<- Metric)
+}
+
+// SelfCollector implements Collector for a single Metric so that that the
+// Metric collects itself. Add it as an anonymous field to a struct that
+// implements Metric, and call Init with the Metric itself as an argument.
+type SelfCollector struct {
+ self Metric
+}
+
+// Init provides the SelfCollector with a reference to the metric it is supposed
+// to collect. It is usually called within the factory function to create a
+// metric. See example.
+func (c *SelfCollector) Init(self Metric) {
+ c.self = self
+}
+
+// Describe implements Collector.
+func (c *SelfCollector) Describe(ch chan<- *Desc) {
+ ch <- c.self.Desc()
+}
+
+// Collect implements Collector.
+func (c *SelfCollector) Collect(ch chan<- Metric) {
+ ch <- c.self
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/counter.go b/vendor/github.com/prometheus/client_golang/prometheus/counter.go
new file mode 100644
index 0000000000..a2952d1c88
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/counter.go
@@ -0,0 +1,175 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+ "errors"
+ "hash/fnv"
+)
+
+// Counter is a Metric that represents a single numerical value that only ever
+// goes up. That implies that it cannot be used to count items whose number can
+// also go down, e.g. the number of currently running goroutines. Those
+// "counters" are represented by Gauges.
+//
+// A Counter is typically used to count requests served, tasks completed, errors
+// occurred, etc.
+//
+// To create Counter instances, use NewCounter.
+type Counter interface {
+ Metric
+ Collector
+
+ // Set is used to set the Counter to an arbitrary value. It is only used
+ // if you have to transfer a value from an external counter into this
+ // Prometheus metric. Do not use it for regular handling of a
+ // Prometheus counter (as it can be used to break the contract of
+ // monotonically increasing values).
+ Set(float64)
+ // Inc increments the counter by 1.
+ Inc()
+ // Add adds the given value to the counter. It panics if the value is <
+ // 0.
+ Add(float64)
+}
+
+// CounterOpts is an alias for Opts. See there for doc comments.
+type CounterOpts Opts
+
+// NewCounter creates a new Counter based on the provided CounterOpts.
+func NewCounter(opts CounterOpts) Counter {
+ desc := NewDesc(
+ BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+ opts.Help,
+ nil,
+ opts.ConstLabels,
+ )
+ result := &counter{value: value{desc: desc, valType: CounterValue, labelPairs: desc.constLabelPairs}}
+ result.Init(result) // Init self-collection.
+ return result
+}
+
+type counter struct {
+ value
+}
+
+func (c *counter) Add(v float64) {
+ if v < 0 {
+ panic(errors.New("counter cannot decrease in value"))
+ }
+ c.value.Add(v)
+}
+
+// CounterVec is a Collector that bundles a set of Counters that all share the
+// same Desc, but have different values for their variable labels. This is used
+// if you want to count the same thing partitioned by various dimensions
+// (e.g. number of HTTP requests, partitioned by response code and
+// method). Create instances with NewCounterVec.
+//
+// CounterVec embeds MetricVec. See there for a full list of methods with
+// detailed documentation.
+type CounterVec struct {
+ MetricVec
+}
+
+// NewCounterVec creates a new CounterVec based on the provided CounterOpts and
+// partitioned by the given label names. At least one label name must be
+// provided.
+func NewCounterVec(opts CounterOpts, labelNames []string) *CounterVec {
+ desc := NewDesc(
+ BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+ opts.Help,
+ labelNames,
+ opts.ConstLabels,
+ )
+ return &CounterVec{
+ MetricVec: MetricVec{
+ children: map[uint64]Metric{},
+ desc: desc,
+ hash: fnv.New64a(),
+ newMetric: func(lvs ...string) Metric {
+ result := &counter{value: value{
+ desc: desc,
+ valType: CounterValue,
+ labelPairs: makeLabelPairs(desc, lvs),
+ }}
+ result.Init(result) // Init self-collection.
+ return result
+ },
+ },
+ }
+}
+
+// GetMetricWithLabelValues replaces the method of the same name in
+// MetricVec. The difference is that this method returns a Counter and not a
+// Metric so that no type conversion is required.
+func (m *CounterVec) GetMetricWithLabelValues(lvs ...string) (Counter, error) {
+ metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...)
+ if metric != nil {
+ return metric.(Counter), err
+ }
+ return nil, err
+}
+
+// GetMetricWith replaces the method of the same name in MetricVec. The
+// difference is that this method returns a Counter and not a Metric so that no
+// type conversion is required.
+func (m *CounterVec) GetMetricWith(labels Labels) (Counter, error) {
+ metric, err := m.MetricVec.GetMetricWith(labels)
+ if metric != nil {
+ return metric.(Counter), err
+ }
+ return nil, err
+}
+
+// WithLabelValues works as GetMetricWithLabelValues, but panics where
+// GetMetricWithLabelValues would have returned an error. By not returning an
+// error, WithLabelValues allows shortcuts like
+// myVec.WithLabelValues("404", "GET").Add(42)
+func (m *CounterVec) WithLabelValues(lvs ...string) Counter {
+ return m.MetricVec.WithLabelValues(lvs...).(Counter)
+}
+
+// With works as GetMetricWith, but panics where GetMetricWithLabels would have
+// returned an error. By not returning an error, With allows shortcuts like
+// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42)
+func (m *CounterVec) With(labels Labels) Counter {
+ return m.MetricVec.With(labels).(Counter)
+}
+
+// CounterFunc is a Counter whose value is determined at collect time by calling a
+// provided function.
+//
+// To create CounterFunc instances, use NewCounterFunc.
+type CounterFunc interface {
+ Metric
+ Collector
+}
+
+// NewCounterFunc creates a new CounterFunc based on the provided
+// CounterOpts. The value reported is determined by calling the given function
+// from within the Write method. Take into account that metric collection may
+// happen concurrently. If that results in concurrent calls to Write, like in
+// the case where a CounterFunc is directly registered with Prometheus, the
+// provided function must be concurrency-safe. The function should also honor
+// the contract for a Counter (values only go up, not down), but compliance will
+// not be checked.
+func NewCounterFunc(opts CounterOpts, function func() float64) CounterFunc {
+ return newValueFunc(NewDesc(
+ BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+ opts.Help,
+ nil,
+ opts.ConstLabels,
+ ), CounterValue, function)
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/desc.go b/vendor/github.com/prometheus/client_golang/prometheus/desc.go
new file mode 100644
index 0000000000..fcde784d64
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/desc.go
@@ -0,0 +1,201 @@
+package prometheus
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "hash/fnv"
+ "regexp"
+ "sort"
+ "strings"
+
+ "github.com/golang/protobuf/proto"
+
+ dto "github.com/prometheus/client_model/go"
+)
+
+var (
+ metricNameRE = regexp.MustCompile(`^[a-zA-Z_][a-zA-Z0-9_:]*$`)
+ labelNameRE = regexp.MustCompile("^[a-zA-Z_][a-zA-Z0-9_]*$")
+)
+
+// reservedLabelPrefix is a prefix which is not legal in user-supplied
+// label names.
+const reservedLabelPrefix = "__"
+
+// Labels represents a collection of label name -> value mappings. This type is
+// commonly used with the With(Labels) and GetMetricWith(Labels) methods of
+// metric vector Collectors, e.g.:
+// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42)
+//
+// The other use-case is the specification of constant label pairs in Opts or to
+// create a Desc.
+type Labels map[string]string
+
+// Desc is the descriptor used by every Prometheus Metric. It is essentially
+// the immutable meta-data of a Metric. The normal Metric implementations
+// included in this package manage their Desc under the hood. Users only have to
+// deal with Desc if they use advanced features like the ExpvarCollector or
+// custom Collectors and Metrics.
+//
+// Descriptors registered with the same registry have to fulfill certain
+// consistency and uniqueness criteria if they share the same fully-qualified
+// name: They must have the same help string and the same label names (aka label
+// dimensions) in each, constLabels and variableLabels, but they must differ in
+// the values of the constLabels.
+//
+// Descriptors that share the same fully-qualified names and the same label
+// values of their constLabels are considered equal.
+//
+// Use NewDesc to create new Desc instances.
+type Desc struct {
+ // fqName has been built from Namespace, Subsystem, and Name.
+ fqName string
+ // help provides some helpful information about this metric.
+ help string
+ // constLabelPairs contains precalculated DTO label pairs based on
+ // the constant labels.
+ constLabelPairs []*dto.LabelPair
+ // VariableLabels contains names of labels for which the metric
+ // maintains variable values.
+ variableLabels []string
+ // id is a hash of the values of the ConstLabels and fqName. This
+ // must be unique among all registered descriptors and can therefore be
+ // used as an identifier of the descriptor.
+ id uint64
+ // dimHash is a hash of the label names (preset and variable) and the
+ // Help string. Each Desc with the same fqName must have the same
+ // dimHash.
+ dimHash uint64
+ // err is an error that occured during construction. It is reported on
+ // registration time.
+ err error
+}
+
+// NewDesc allocates and initializes a new Desc. Errors are recorded in the Desc
+// and will be reported on registration time. variableLabels and constLabels can
+// be nil if no such labels should be set. fqName and help must not be empty.
+//
+// variableLabels only contain the label names. Their label values are variable
+// and therefore not part of the Desc. (They are managed within the Metric.)
+//
+// For constLabels, the label values are constant. Therefore, they are fully
+// specified in the Desc. See the Opts documentation for the implications of
+// constant labels.
+func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) *Desc {
+ d := &Desc{
+ fqName: fqName,
+ help: help,
+ variableLabels: variableLabels,
+ }
+ if help == "" {
+ d.err = errors.New("empty help string")
+ return d
+ }
+ if !metricNameRE.MatchString(fqName) {
+ d.err = fmt.Errorf("%q is not a valid metric name", fqName)
+ return d
+ }
+ // labelValues contains the label values of const labels (in order of
+ // their sorted label names) plus the fqName (at position 0).
+ labelValues := make([]string, 1, len(constLabels)+1)
+ labelValues[0] = fqName
+ labelNames := make([]string, 0, len(constLabels)+len(variableLabels))
+ labelNameSet := map[string]struct{}{}
+ // First add only the const label names and sort them...
+ for labelName := range constLabels {
+ if !checkLabelName(labelName) {
+ d.err = fmt.Errorf("%q is not a valid label name", labelName)
+ return d
+ }
+ labelNames = append(labelNames, labelName)
+ labelNameSet[labelName] = struct{}{}
+ }
+ sort.Strings(labelNames)
+ // ... so that we can now add const label values in the order of their names.
+ for _, labelName := range labelNames {
+ labelValues = append(labelValues, constLabels[labelName])
+ }
+ // Now add the variable label names, but prefix them with something that
+ // cannot be in a regular label name. That prevents matching the label
+ // dimension with a different mix between preset and variable labels.
+ for _, labelName := range variableLabels {
+ if !checkLabelName(labelName) {
+ d.err = fmt.Errorf("%q is not a valid label name", labelName)
+ return d
+ }
+ labelNames = append(labelNames, "$"+labelName)
+ labelNameSet[labelName] = struct{}{}
+ }
+ if len(labelNames) != len(labelNameSet) {
+ d.err = errors.New("duplicate label names")
+ return d
+ }
+ h := fnv.New64a()
+ var b bytes.Buffer // To copy string contents into, avoiding []byte allocations.
+ for _, val := range labelValues {
+ b.Reset()
+ b.WriteString(val)
+ b.WriteByte(separatorByte)
+ h.Write(b.Bytes())
+ }
+ d.id = h.Sum64()
+ // Sort labelNames so that order doesn't matter for the hash.
+ sort.Strings(labelNames)
+ // Now hash together (in this order) the help string and the sorted
+ // label names.
+ h.Reset()
+ b.Reset()
+ b.WriteString(help)
+ b.WriteByte(separatorByte)
+ h.Write(b.Bytes())
+ for _, labelName := range labelNames {
+ b.Reset()
+ b.WriteString(labelName)
+ b.WriteByte(separatorByte)
+ h.Write(b.Bytes())
+ }
+ d.dimHash = h.Sum64()
+
+ d.constLabelPairs = make([]*dto.LabelPair, 0, len(constLabels))
+ for n, v := range constLabels {
+ d.constLabelPairs = append(d.constLabelPairs, &dto.LabelPair{
+ Name: proto.String(n),
+ Value: proto.String(v),
+ })
+ }
+ sort.Sort(LabelPairSorter(d.constLabelPairs))
+ return d
+}
+
+// NewInvalidDesc returns an invalid descriptor, i.e. a descriptor with the
+// provided error set. If a collector returning such a descriptor is registered,
+// registration will fail with the provided error. NewInvalidDesc can be used by
+// a Collector to signal inability to describe itself.
+func NewInvalidDesc(err error) *Desc {
+ return &Desc{
+ err: err,
+ }
+}
+
+func (d *Desc) String() string {
+ lpStrings := make([]string, 0, len(d.constLabelPairs))
+ for _, lp := range d.constLabelPairs {
+ lpStrings = append(
+ lpStrings,
+ fmt.Sprintf("%s=%q", lp.GetName(), lp.GetValue()),
+ )
+ }
+ return fmt.Sprintf(
+ "Desc{fqName: %q, help: %q, constLabels: {%s}, variableLabels: %v}",
+ d.fqName,
+ d.help,
+ strings.Join(lpStrings, ","),
+ d.variableLabels,
+ )
+}
+
+func checkLabelName(l string) bool {
+ return labelNameRE.MatchString(l) &&
+ !strings.HasPrefix(l, reservedLabelPrefix)
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/doc.go b/vendor/github.com/prometheus/client_golang/prometheus/doc.go
new file mode 100644
index 0000000000..425fe8793c
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/doc.go
@@ -0,0 +1,109 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package prometheus provides embeddable metric primitives for servers and
+// standardized exposition of telemetry through a web services interface.
+//
+// All exported functions and methods are safe to be used concurrently unless
+// specified otherwise.
+//
+// To expose metrics registered with the Prometheus registry, an HTTP server
+// needs to know about the Prometheus handler. The usual endpoint is "/metrics".
+//
+// http.Handle("/metrics", prometheus.Handler())
+//
+// As a starting point a very basic usage example:
+//
+// package main
+//
+// import (
+// "net/http"
+//
+// "github.com/prometheus/client_golang/prometheus"
+// )
+//
+// var (
+// cpuTemp = prometheus.NewGauge(prometheus.GaugeOpts{
+// Name: "cpu_temperature_celsius",
+// Help: "Current temperature of the CPU.",
+// })
+// hdFailures = prometheus.NewCounter(prometheus.CounterOpts{
+// Name: "hd_errors_total",
+// Help: "Number of hard-disk errors.",
+// })
+// )
+//
+// func init() {
+// prometheus.MustRegister(cpuTemp)
+// prometheus.MustRegister(hdFailures)
+// }
+//
+// func main() {
+// cpuTemp.Set(65.3)
+// hdFailures.Inc()
+//
+// http.Handle("/metrics", prometheus.Handler())
+// http.ListenAndServe(":8080", nil)
+// }
+//
+//
+// This is a complete program that exports two metrics, a Gauge and a Counter.
+// It also exports some stats about the HTTP usage of the /metrics
+// endpoint. (See the Handler function for more detail.)
+//
+// Two more advanced metric types are the Summary and Histogram.
+//
+// In addition to the fundamental metric types Gauge, Counter, Summary, and
+// Histogram, a very important part of the Prometheus data model is the
+// partitioning of samples along dimensions called labels, which results in
+// metric vectors. The fundamental types are GaugeVec, CounterVec, SummaryVec,
+// and HistogramVec.
+//
+// Those are all the parts needed for basic usage. Detailed documentation and
+// examples are provided below.
+//
+// Everything else this package offers is essentially for "power users" only. A
+// few pointers to "power user features":
+//
+// All the various ...Opts structs have a ConstLabels field for labels that
+// never change their value (which is only useful under special circumstances,
+// see documentation of the Opts type).
+//
+// The Untyped metric behaves like a Gauge, but signals the Prometheus server
+// not to assume anything about its type.
+//
+// Functions to fine-tune how the metric registry works: EnableCollectChecks,
+// PanicOnCollectError, Register, Unregister, SetMetricFamilyInjectionHook.
+//
+// For custom metric collection, there are two entry points: Custom Metric
+// implementations and custom Collector implementations. A Metric is the
+// fundamental unit in the Prometheus data model: a sample at a point in time
+// together with its meta-data (like its fully-qualified name and any number of
+// pairs of label name and label value) that knows how to marshal itself into a
+// data transfer object (aka DTO, implemented as a protocol buffer). A Collector
+// gets registered with the Prometheus registry and manages the collection of
+// one or more Metrics. Many parts of this package are building blocks for
+// Metrics and Collectors. Desc is the metric descriptor, actually used by all
+// metrics under the hood, and by Collectors to describe the Metrics to be
+// collected, but only to be dealt with by users if they implement their own
+// Metrics or Collectors. To create a Desc, the BuildFQName function will come
+// in handy. Other useful components for Metric and Collector implementation
+// include: LabelPairSorter to sort the DTO version of label pairs,
+// NewConstMetric and MustNewConstMetric to create "throw away" Metrics at
+// collection time, MetricVec to bundle custom Metrics into a metric vector
+// Collector, SelfCollector to make a custom Metric collect itself.
+//
+// A good example for a custom Collector is the ExpVarCollector included in this
+// package, which exports variables exported via the "expvar" package as
+// Prometheus metrics.
+package prometheus
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/expvar.go b/vendor/github.com/prometheus/client_golang/prometheus/expvar.go
new file mode 100644
index 0000000000..0f7630d53f
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/expvar.go
@@ -0,0 +1,119 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+ "encoding/json"
+ "expvar"
+)
+
+// ExpvarCollector collects metrics from the expvar interface. It provides a
+// quick way to expose numeric values that are already exported via expvar as
+// Prometheus metrics. Note that the data models of expvar and Prometheus are
+// fundamentally different, and that the ExpvarCollector is inherently
+// slow. Thus, the ExpvarCollector is probably great for experiments and
+// prototying, but you should seriously consider a more direct implementation of
+// Prometheus metrics for monitoring production systems.
+//
+// Use NewExpvarCollector to create new instances.
+type ExpvarCollector struct {
+ exports map[string]*Desc
+}
+
+// NewExpvarCollector returns a newly allocated ExpvarCollector that still has
+// to be registered with the Prometheus registry.
+//
+// The exports map has the following meaning:
+//
+// The keys in the map correspond to expvar keys, i.e. for every expvar key you
+// want to export as Prometheus metric, you need an entry in the exports
+// map. The descriptor mapped to each key describes how to export the expvar
+// value. It defines the name and the help string of the Prometheus metric
+// proxying the expvar value. The type will always be Untyped.
+//
+// For descriptors without variable labels, the expvar value must be a number or
+// a bool. The number is then directly exported as the Prometheus sample
+// value. (For a bool, 'false' translates to 0 and 'true' to 1). Expvar values
+// that are not numbers or bools are silently ignored.
+//
+// If the descriptor has one variable label, the expvar value must be an expvar
+// map. The keys in the expvar map become the various values of the one
+// Prometheus label. The values in the expvar map must be numbers or bools again
+// as above.
+//
+// For descriptors with more than one variable label, the expvar must be a
+// nested expvar map, i.e. where the values of the topmost map are maps again
+// etc. until a depth is reached that corresponds to the number of labels. The
+// leaves of that structure must be numbers or bools as above to serve as the
+// sample values.
+//
+// Anything that does not fit into the scheme above is silently ignored.
+func NewExpvarCollector(exports map[string]*Desc) *ExpvarCollector {
+ return &ExpvarCollector{
+ exports: exports,
+ }
+}
+
+// Describe implements Collector.
+func (e *ExpvarCollector) Describe(ch chan<- *Desc) {
+ for _, desc := range e.exports {
+ ch <- desc
+ }
+}
+
+// Collect implements Collector.
+func (e *ExpvarCollector) Collect(ch chan<- Metric) {
+ for name, desc := range e.exports {
+ var m Metric
+ expVar := expvar.Get(name)
+ if expVar == nil {
+ continue
+ }
+ var v interface{}
+ labels := make([]string, len(desc.variableLabels))
+ if err := json.Unmarshal([]byte(expVar.String()), &v); err != nil {
+ ch <- NewInvalidMetric(desc, err)
+ continue
+ }
+ var processValue func(v interface{}, i int)
+ processValue = func(v interface{}, i int) {
+ if i >= len(labels) {
+ copiedLabels := append(make([]string, 0, len(labels)), labels...)
+ switch v := v.(type) {
+ case float64:
+ m = MustNewConstMetric(desc, UntypedValue, v, copiedLabels...)
+ case bool:
+ if v {
+ m = MustNewConstMetric(desc, UntypedValue, 1, copiedLabels...)
+ } else {
+ m = MustNewConstMetric(desc, UntypedValue, 0, copiedLabels...)
+ }
+ default:
+ return
+ }
+ ch <- m
+ return
+ }
+ vm, ok := v.(map[string]interface{})
+ if !ok {
+ return
+ }
+ for lv, val := range vm {
+ labels[i] = lv
+ processValue(val, i+1)
+ }
+ }
+ processValue(v, 0)
+ }
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/gauge.go b/vendor/github.com/prometheus/client_golang/prometheus/gauge.go
new file mode 100644
index 0000000000..ba8a402caf
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/gauge.go
@@ -0,0 +1,147 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import "hash/fnv"
+
+// Gauge is a Metric that represents a single numerical value that can
+// arbitrarily go up and down.
+//
+// A Gauge is typically used for measured values like temperatures or current
+// memory usage, but also "counts" that can go up and down, like the number of
+// running goroutines.
+//
+// To create Gauge instances, use NewGauge.
+type Gauge interface {
+ Metric
+ Collector
+
+ // Set sets the Gauge to an arbitrary value.
+ Set(float64)
+ // Inc increments the Gauge by 1.
+ Inc()
+ // Dec decrements the Gauge by 1.
+ Dec()
+ // Add adds the given value to the Gauge. (The value can be
+ // negative, resulting in a decrease of the Gauge.)
+ Add(float64)
+ // Sub subtracts the given value from the Gauge. (The value can be
+ // negative, resulting in an increase of the Gauge.)
+ Sub(float64)
+}
+
+// GaugeOpts is an alias for Opts. See there for doc comments.
+type GaugeOpts Opts
+
+// NewGauge creates a new Gauge based on the provided GaugeOpts.
+func NewGauge(opts GaugeOpts) Gauge {
+ return newValue(NewDesc(
+ BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+ opts.Help,
+ nil,
+ opts.ConstLabels,
+ ), GaugeValue, 0)
+}
+
+// GaugeVec is a Collector that bundles a set of Gauges that all share the same
+// Desc, but have different values for their variable labels. This is used if
+// you want to count the same thing partitioned by various dimensions
+// (e.g. number of operations queued, partitioned by user and operation
+// type). Create instances with NewGaugeVec.
+type GaugeVec struct {
+ MetricVec
+}
+
+// NewGaugeVec creates a new GaugeVec based on the provided GaugeOpts and
+// partitioned by the given label names. At least one label name must be
+// provided.
+func NewGaugeVec(opts GaugeOpts, labelNames []string) *GaugeVec {
+ desc := NewDesc(
+ BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+ opts.Help,
+ labelNames,
+ opts.ConstLabels,
+ )
+ return &GaugeVec{
+ MetricVec: MetricVec{
+ children: map[uint64]Metric{},
+ desc: desc,
+ hash: fnv.New64a(),
+ newMetric: func(lvs ...string) Metric {
+ return newValue(desc, GaugeValue, 0, lvs...)
+ },
+ },
+ }
+}
+
+// GetMetricWithLabelValues replaces the method of the same name in
+// MetricVec. The difference is that this method returns a Gauge and not a
+// Metric so that no type conversion is required.
+func (m *GaugeVec) GetMetricWithLabelValues(lvs ...string) (Gauge, error) {
+ metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...)
+ if metric != nil {
+ return metric.(Gauge), err
+ }
+ return nil, err
+}
+
+// GetMetricWith replaces the method of the same name in MetricVec. The
+// difference is that this method returns a Gauge and not a Metric so that no
+// type conversion is required.
+func (m *GaugeVec) GetMetricWith(labels Labels) (Gauge, error) {
+ metric, err := m.MetricVec.GetMetricWith(labels)
+ if metric != nil {
+ return metric.(Gauge), err
+ }
+ return nil, err
+}
+
+// WithLabelValues works as GetMetricWithLabelValues, but panics where
+// GetMetricWithLabelValues would have returned an error. By not returning an
+// error, WithLabelValues allows shortcuts like
+// myVec.WithLabelValues("404", "GET").Add(42)
+func (m *GaugeVec) WithLabelValues(lvs ...string) Gauge {
+ return m.MetricVec.WithLabelValues(lvs...).(Gauge)
+}
+
+// With works as GetMetricWith, but panics where GetMetricWithLabels would have
+// returned an error. By not returning an error, With allows shortcuts like
+// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42)
+func (m *GaugeVec) With(labels Labels) Gauge {
+ return m.MetricVec.With(labels).(Gauge)
+}
+
+// GaugeFunc is a Gauge whose value is determined at collect time by calling a
+// provided function.
+//
+// To create GaugeFunc instances, use NewGaugeFunc.
+type GaugeFunc interface {
+ Metric
+ Collector
+}
+
+// NewGaugeFunc creates a new GaugeFunc based on the provided GaugeOpts. The
+// value reported is determined by calling the given function from within the
+// Write method. Take into account that metric collection may happen
+// concurrently. If that results in concurrent calls to Write, like in the case
+// where a GaugeFunc is directly registered with Prometheus, the provided
+// function must be concurrency-safe.
+func NewGaugeFunc(opts GaugeOpts, function func() float64) GaugeFunc {
+ return newValueFunc(NewDesc(
+ BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+ opts.Help,
+ nil,
+ opts.ConstLabels,
+ ), GaugeValue, function)
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go
new file mode 100644
index 0000000000..85fa20be45
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go
@@ -0,0 +1,50 @@
+package prometheus
+
+import (
+ "runtime"
+ "runtime/debug"
+ "time"
+)
+
+type goCollector struct {
+ goroutines Gauge
+ gcDesc *Desc
+}
+
+// NewGoCollector returns a collector which exports metrics about the current
+// go process.
+func NewGoCollector() *goCollector {
+ return &goCollector{
+ goroutines: NewGauge(GaugeOpts{
+ Name: "go_goroutines",
+ Help: "Number of goroutines that currently exist.",
+ }),
+ gcDesc: NewDesc(
+ "go_gc_duration_seconds",
+ "A summary of the GC invocation durations.",
+ nil, nil),
+ }
+}
+
+// Describe returns all descriptions of the collector.
+func (c *goCollector) Describe(ch chan<- *Desc) {
+ ch <- c.goroutines.Desc()
+ ch <- c.gcDesc
+}
+
+// Collect returns the current state of all metrics of the collector.
+func (c *goCollector) Collect(ch chan<- Metric) {
+ c.goroutines.Set(float64(runtime.NumGoroutine()))
+ ch <- c.goroutines
+
+ var stats debug.GCStats
+ stats.PauseQuantiles = make([]time.Duration, 5)
+ debug.ReadGCStats(&stats)
+
+ quantiles := make(map[float64]float64)
+ for idx, pq := range stats.PauseQuantiles[1:] {
+ quantiles[float64(idx+1)/float64(len(stats.PauseQuantiles)-1)] = pq.Seconds()
+ }
+ quantiles[0.0] = stats.PauseQuantiles[0].Seconds()
+ ch <- MustNewConstSummary(c.gcDesc, uint64(stats.NumGC), float64(stats.PauseTotal.Seconds()), quantiles)
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go
new file mode 100644
index 0000000000..f98a41bc89
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go
@@ -0,0 +1,450 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+ "fmt"
+ "hash/fnv"
+ "math"
+ "sort"
+ "sync/atomic"
+
+ "github.com/golang/protobuf/proto"
+
+ dto "github.com/prometheus/client_model/go"
+)
+
+// A Histogram counts individual observations from an event or sample stream in
+// configurable buckets. Similar to a summary, it also provides a sum of
+// observations and an observation count.
+//
+// On the Prometheus server, quantiles can be calculated from a Histogram using
+// the histogram_quantile function in the query language.
+//
+// Note that Histograms, in contrast to Summaries, can be aggregated with the
+// Prometheus query language (see the documentation for detailed
+// procedures). However, Histograms require the user to pre-define suitable
+// buckets, and they are in general less accurate. The Observe method of a
+// Histogram has a very low performance overhead in comparison with the Observe
+// method of a Summary.
+//
+// To create Histogram instances, use NewHistogram.
+type Histogram interface {
+ Metric
+ Collector
+
+ // Observe adds a single observation to the histogram.
+ Observe(float64)
+}
+
+// bucketLabel is used for the label that defines the upper bound of a
+// bucket of a histogram ("le" -> "less or equal").
+const bucketLabel = "le"
+
+var (
+ // DefBuckets are the default Histogram buckets. The default buckets are
+ // tailored to broadly measure the response time (in seconds) of a
+ // network service. Most likely, however, you will be required to define
+ // buckets customized to your use case.
+ DefBuckets = []float64{.005, .01, .025, .05, .1, .25, .5, 1, 2.5, 5, 10}
+
+ errBucketLabelNotAllowed = fmt.Errorf(
+ "%q is not allowed as label name in histograms", bucketLabel,
+ )
+)
+
+// LinearBuckets creates 'count' buckets, each 'width' wide, where the lowest
+// bucket has an upper bound of 'start'. The final +Inf bucket is not counted
+// and not included in the returned slice. The returned slice is meant to be
+// used for the Buckets field of HistogramOpts.
+//
+// The function panics if 'count' is zero or negative.
+func LinearBuckets(start, width float64, count int) []float64 {
+ if count < 1 {
+ panic("LinearBuckets needs a positive count")
+ }
+ buckets := make([]float64, count)
+ for i := range buckets {
+ buckets[i] = start
+ start += width
+ }
+ return buckets
+}
+
+// ExponentialBuckets creates 'count' buckets, where the lowest bucket has an
+// upper bound of 'start' and each following bucket's upper bound is 'factor'
+// times the previous bucket's upper bound. The final +Inf bucket is not counted
+// and not included in the returned slice. The returned slice is meant to be
+// used for the Buckets field of HistogramOpts.
+//
+// The function panics if 'count' is 0 or negative, if 'start' is 0 or negative,
+// or if 'factor' is less than or equal 1.
+func ExponentialBuckets(start, factor float64, count int) []float64 {
+ if count < 1 {
+ panic("ExponentialBuckets needs a positive count")
+ }
+ if start <= 0 {
+ panic("ExponentialBuckets needs a positive start value")
+ }
+ if factor <= 1 {
+ panic("ExponentialBuckets needs a factor greater than 1")
+ }
+ buckets := make([]float64, count)
+ for i := range buckets {
+ buckets[i] = start
+ start *= factor
+ }
+ return buckets
+}
+
+// HistogramOpts bundles the options for creating a Histogram metric. It is
+// mandatory to set Name and Help to a non-empty string. All other fields are
+// optional and can safely be left at their zero value.
+type HistogramOpts struct {
+ // Namespace, Subsystem, and Name are components of the fully-qualified
+ // name of the Histogram (created by joining these components with
+ // "_"). Only Name is mandatory, the others merely help structuring the
+ // name. Note that the fully-qualified name of the Histogram must be a
+ // valid Prometheus metric name.
+ Namespace string
+ Subsystem string
+ Name string
+
+ // Help provides information about this Histogram. Mandatory!
+ //
+ // Metrics with the same fully-qualified name must have the same Help
+ // string.
+ Help string
+
+ // ConstLabels are used to attach fixed labels to this
+ // Histogram. Histograms with the same fully-qualified name must have the
+ // same label names in their ConstLabels.
+ //
+ // Note that in most cases, labels have a value that varies during the
+ // lifetime of a process. Those labels are usually managed with a
+ // HistogramVec. ConstLabels serve only special purposes. One is for the
+ // special case where the value of a label does not change during the
+ // lifetime of a process, e.g. if the revision of the running binary is
+ // put into a label. Another, more advanced purpose is if more than one
+ // Collector needs to collect Histograms with the same fully-qualified
+ // name. In that case, those Summaries must differ in the values of
+ // their ConstLabels. See the Collector examples.
+ //
+ // If the value of a label never changes (not even between binaries),
+ // that label most likely should not be a label at all (but part of the
+ // metric name).
+ ConstLabels Labels
+
+ // Buckets defines the buckets into which observations are counted. Each
+ // element in the slice is the upper inclusive bound of a bucket. The
+ // values must be sorted in strictly increasing order. There is no need
+ // to add a highest bucket with +Inf bound, it will be added
+ // implicitly. The default value is DefBuckets.
+ Buckets []float64
+}
+
+// NewHistogram creates a new Histogram based on the provided HistogramOpts. It
+// panics if the buckets in HistogramOpts are not in strictly increasing order.
+func NewHistogram(opts HistogramOpts) Histogram {
+ return newHistogram(
+ NewDesc(
+ BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+ opts.Help,
+ nil,
+ opts.ConstLabels,
+ ),
+ opts,
+ )
+}
+
+func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogram {
+ if len(desc.variableLabels) != len(labelValues) {
+ panic(errInconsistentCardinality)
+ }
+
+ for _, n := range desc.variableLabels {
+ if n == bucketLabel {
+ panic(errBucketLabelNotAllowed)
+ }
+ }
+ for _, lp := range desc.constLabelPairs {
+ if lp.GetName() == bucketLabel {
+ panic(errBucketLabelNotAllowed)
+ }
+ }
+
+ if len(opts.Buckets) == 0 {
+ opts.Buckets = DefBuckets
+ }
+
+ h := &histogram{
+ desc: desc,
+ upperBounds: opts.Buckets,
+ labelPairs: makeLabelPairs(desc, labelValues),
+ }
+ for i, upperBound := range h.upperBounds {
+ if i < len(h.upperBounds)-1 {
+ if upperBound >= h.upperBounds[i+1] {
+ panic(fmt.Errorf(
+ "histogram buckets must be in increasing order: %f >= %f",
+ upperBound, h.upperBounds[i+1],
+ ))
+ }
+ } else {
+ if math.IsInf(upperBound, +1) {
+ // The +Inf bucket is implicit. Remove it here.
+ h.upperBounds = h.upperBounds[:i]
+ }
+ }
+ }
+ // Finally we know the final length of h.upperBounds and can make counts.
+ h.counts = make([]uint64, len(h.upperBounds))
+
+ h.Init(h) // Init self-collection.
+ return h
+}
+
+type histogram struct {
+ // sumBits contains the bits of the float64 representing the sum of all
+ // observations. sumBits and count have to go first in the struct to
+ // guarantee alignment for atomic operations.
+ // http://golang.org/pkg/sync/atomic/#pkg-note-BUG
+ sumBits uint64
+ count uint64
+
+ SelfCollector
+ // Note that there is no mutex required.
+
+ desc *Desc
+
+ upperBounds []float64
+ counts []uint64
+
+ labelPairs []*dto.LabelPair
+}
+
+func (h *histogram) Desc() *Desc {
+ return h.desc
+}
+
+func (h *histogram) Observe(v float64) {
+ // TODO(beorn7): For small numbers of buckets (<30), a linear search is
+ // slightly faster than the binary search. If we really care, we could
+ // switch from one search strategy to the other depending on the number
+ // of buckets.
+ //
+ // Microbenchmarks (BenchmarkHistogramNoLabels):
+ // 11 buckets: 38.3 ns/op linear - binary 48.7 ns/op
+ // 100 buckets: 78.1 ns/op linear - binary 54.9 ns/op
+ // 300 buckets: 154 ns/op linear - binary 61.6 ns/op
+ i := sort.SearchFloat64s(h.upperBounds, v)
+ if i < len(h.counts) {
+ atomic.AddUint64(&h.counts[i], 1)
+ }
+ atomic.AddUint64(&h.count, 1)
+ for {
+ oldBits := atomic.LoadUint64(&h.sumBits)
+ newBits := math.Float64bits(math.Float64frombits(oldBits) + v)
+ if atomic.CompareAndSwapUint64(&h.sumBits, oldBits, newBits) {
+ break
+ }
+ }
+}
+
+func (h *histogram) Write(out *dto.Metric) error {
+ his := &dto.Histogram{}
+ buckets := make([]*dto.Bucket, len(h.upperBounds))
+
+ his.SampleSum = proto.Float64(math.Float64frombits(atomic.LoadUint64(&h.sumBits)))
+ his.SampleCount = proto.Uint64(atomic.LoadUint64(&h.count))
+ var count uint64
+ for i, upperBound := range h.upperBounds {
+ count += atomic.LoadUint64(&h.counts[i])
+ buckets[i] = &dto.Bucket{
+ CumulativeCount: proto.Uint64(count),
+ UpperBound: proto.Float64(upperBound),
+ }
+ }
+ his.Bucket = buckets
+ out.Histogram = his
+ out.Label = h.labelPairs
+ return nil
+}
+
+// HistogramVec is a Collector that bundles a set of Histograms that all share the
+// same Desc, but have different values for their variable labels. This is used
+// if you want to count the same thing partitioned by various dimensions
+// (e.g. HTTP request latencies, partitioned by status code and method). Create
+// instances with NewHistogramVec.
+type HistogramVec struct {
+ MetricVec
+}
+
+// NewHistogramVec creates a new HistogramVec based on the provided HistogramOpts and
+// partitioned by the given label names. At least one label name must be
+// provided.
+func NewHistogramVec(opts HistogramOpts, labelNames []string) *HistogramVec {
+ desc := NewDesc(
+ BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+ opts.Help,
+ labelNames,
+ opts.ConstLabels,
+ )
+ return &HistogramVec{
+ MetricVec: MetricVec{
+ children: map[uint64]Metric{},
+ desc: desc,
+ hash: fnv.New64a(),
+ newMetric: func(lvs ...string) Metric {
+ return newHistogram(desc, opts, lvs...)
+ },
+ },
+ }
+}
+
+// GetMetricWithLabelValues replaces the method of the same name in
+// MetricVec. The difference is that this method returns a Histogram and not a
+// Metric so that no type conversion is required.
+func (m *HistogramVec) GetMetricWithLabelValues(lvs ...string) (Histogram, error) {
+ metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...)
+ if metric != nil {
+ return metric.(Histogram), err
+ }
+ return nil, err
+}
+
+// GetMetricWith replaces the method of the same name in MetricVec. The
+// difference is that this method returns a Histogram and not a Metric so that no
+// type conversion is required.
+func (m *HistogramVec) GetMetricWith(labels Labels) (Histogram, error) {
+ metric, err := m.MetricVec.GetMetricWith(labels)
+ if metric != nil {
+ return metric.(Histogram), err
+ }
+ return nil, err
+}
+
+// WithLabelValues works as GetMetricWithLabelValues, but panics where
+// GetMetricWithLabelValues would have returned an error. By not returning an
+// error, WithLabelValues allows shortcuts like
+// myVec.WithLabelValues("404", "GET").Observe(42.21)
+func (m *HistogramVec) WithLabelValues(lvs ...string) Histogram {
+ return m.MetricVec.WithLabelValues(lvs...).(Histogram)
+}
+
+// With works as GetMetricWith, but panics where GetMetricWithLabels would have
+// returned an error. By not returning an error, With allows shortcuts like
+// myVec.With(Labels{"code": "404", "method": "GET"}).Observe(42.21)
+func (m *HistogramVec) With(labels Labels) Histogram {
+ return m.MetricVec.With(labels).(Histogram)
+}
+
+type constHistogram struct {
+ desc *Desc
+ count uint64
+ sum float64
+ buckets map[float64]uint64
+ labelPairs []*dto.LabelPair
+}
+
+func (h *constHistogram) Desc() *Desc {
+ return h.desc
+}
+
+func (h *constHistogram) Write(out *dto.Metric) error {
+ his := &dto.Histogram{}
+ buckets := make([]*dto.Bucket, 0, len(h.buckets))
+
+ his.SampleCount = proto.Uint64(h.count)
+ his.SampleSum = proto.Float64(h.sum)
+
+ for upperBound, count := range h.buckets {
+ buckets = append(buckets, &dto.Bucket{
+ CumulativeCount: proto.Uint64(count),
+ UpperBound: proto.Float64(upperBound),
+ })
+ }
+
+ if len(buckets) > 0 {
+ sort.Sort(buckSort(buckets))
+ }
+ his.Bucket = buckets
+
+ out.Histogram = his
+ out.Label = h.labelPairs
+
+ return nil
+}
+
+// NewConstHistogram returns a metric representing a Prometheus histogram with
+// fixed values for the count, sum, and bucket counts. As those parameters
+// cannot be changed, the returned value does not implement the Histogram
+// interface (but only the Metric interface). Users of this package will not
+// have much use for it in regular operations. However, when implementing custom
+// Collectors, it is useful as a throw-away metric that is generated on the fly
+// to send it to Prometheus in the Collect method.
+//
+// buckets is a map of upper bounds to cumulative counts, excluding the +Inf
+// bucket.
+//
+// NewConstHistogram returns an error if the length of labelValues is not
+// consistent with the variable labels in Desc.
+func NewConstHistogram(
+ desc *Desc,
+ count uint64,
+ sum float64,
+ buckets map[float64]uint64,
+ labelValues ...string,
+) (Metric, error) {
+ if len(desc.variableLabels) != len(labelValues) {
+ return nil, errInconsistentCardinality
+ }
+ return &constHistogram{
+ desc: desc,
+ count: count,
+ sum: sum,
+ buckets: buckets,
+ labelPairs: makeLabelPairs(desc, labelValues),
+ }, nil
+}
+
+// MustNewConstHistogram is a version of NewConstHistogram that panics where
+// NewConstMetric would have returned an error.
+func MustNewConstHistogram(
+ desc *Desc,
+ count uint64,
+ sum float64,
+ buckets map[float64]uint64,
+ labelValues ...string,
+) Metric {
+ m, err := NewConstHistogram(desc, count, sum, buckets, labelValues...)
+ if err != nil {
+ panic(err)
+ }
+ return m
+}
+
+type buckSort []*dto.Bucket
+
+func (s buckSort) Len() int {
+ return len(s)
+}
+
+func (s buckSort) Swap(i, j int) {
+ s[i], s[j] = s[j], s[i]
+}
+
+func (s buckSort) Less(i, j int) bool {
+ return s[i].GetUpperBound() < s[j].GetUpperBound()
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/http.go b/vendor/github.com/prometheus/client_golang/prometheus/http.go
new file mode 100644
index 0000000000..eabe602468
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/http.go
@@ -0,0 +1,361 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+ "bufio"
+ "io"
+ "net"
+ "net/http"
+ "strconv"
+ "strings"
+ "time"
+)
+
+var instLabels = []string{"method", "code"}
+
+type nower interface {
+ Now() time.Time
+}
+
+type nowFunc func() time.Time
+
+func (n nowFunc) Now() time.Time {
+ return n()
+}
+
+var now nower = nowFunc(func() time.Time {
+ return time.Now()
+})
+
+func nowSeries(t ...time.Time) nower {
+ return nowFunc(func() time.Time {
+ defer func() {
+ t = t[1:]
+ }()
+
+ return t[0]
+ })
+}
+
+// InstrumentHandler wraps the given HTTP handler for instrumentation. It
+// registers four metric collectors (if not already done) and reports HTTP
+// metrics to the (newly or already) registered collectors: http_requests_total
+// (CounterVec), http_request_duration_microseconds (Summary),
+// http_request_size_bytes (Summary), http_response_size_bytes (Summary). Each
+// has a constant label named "handler" with the provided handlerName as
+// value. http_requests_total is a metric vector partitioned by HTTP method
+// (label name "method") and HTTP status code (label name "code").
+func InstrumentHandler(handlerName string, handler http.Handler) http.HandlerFunc {
+ return InstrumentHandlerFunc(handlerName, handler.ServeHTTP)
+}
+
+// InstrumentHandlerFunc wraps the given function for instrumentation. It
+// otherwise works in the same way as InstrumentHandler.
+func InstrumentHandlerFunc(handlerName string, handlerFunc func(http.ResponseWriter, *http.Request)) http.HandlerFunc {
+ return InstrumentHandlerFuncWithOpts(
+ SummaryOpts{
+ Subsystem: "http",
+ ConstLabels: Labels{"handler": handlerName},
+ },
+ handlerFunc,
+ )
+}
+
+// InstrumentHandlerWithOpts works like InstrumentHandler but provides more
+// flexibility (at the cost of a more complex call syntax). As
+// InstrumentHandler, this function registers four metric collectors, but it
+// uses the provided SummaryOpts to create them. However, the fields "Name" and
+// "Help" in the SummaryOpts are ignored. "Name" is replaced by
+// "requests_total", "request_duration_microseconds", "request_size_bytes", and
+// "response_size_bytes", respectively. "Help" is replaced by an appropriate
+// help string. The names of the variable labels of the http_requests_total
+// CounterVec are "method" (get, post, etc.), and "code" (HTTP status code).
+//
+// If InstrumentHandlerWithOpts is called as follows, it mimics exactly the
+// behavior of InstrumentHandler:
+//
+// prometheus.InstrumentHandlerWithOpts(
+// prometheus.SummaryOpts{
+// Subsystem: "http",
+// ConstLabels: prometheus.Labels{"handler": handlerName},
+// },
+// handler,
+// )
+//
+// Technical detail: "requests_total" is a CounterVec, not a SummaryVec, so it
+// cannot use SummaryOpts. Instead, a CounterOpts struct is created internally,
+// and all its fields are set to the equally named fields in the provided
+// SummaryOpts.
+func InstrumentHandlerWithOpts(opts SummaryOpts, handler http.Handler) http.HandlerFunc {
+ return InstrumentHandlerFuncWithOpts(opts, handler.ServeHTTP)
+}
+
+// InstrumentHandlerFuncWithOpts works like InstrumentHandlerFunc but provides
+// more flexibility (at the cost of a more complex call syntax). See
+// InstrumentHandlerWithOpts for details how the provided SummaryOpts are used.
+func InstrumentHandlerFuncWithOpts(opts SummaryOpts, handlerFunc func(http.ResponseWriter, *http.Request)) http.HandlerFunc {
+ reqCnt := NewCounterVec(
+ CounterOpts{
+ Namespace: opts.Namespace,
+ Subsystem: opts.Subsystem,
+ Name: "requests_total",
+ Help: "Total number of HTTP requests made.",
+ ConstLabels: opts.ConstLabels,
+ },
+ instLabels,
+ )
+
+ opts.Name = "request_duration_microseconds"
+ opts.Help = "The HTTP request latencies in microseconds."
+ reqDur := NewSummary(opts)
+
+ opts.Name = "request_size_bytes"
+ opts.Help = "The HTTP request sizes in bytes."
+ reqSz := NewSummary(opts)
+
+ opts.Name = "response_size_bytes"
+ opts.Help = "The HTTP response sizes in bytes."
+ resSz := NewSummary(opts)
+
+ regReqCnt := MustRegisterOrGet(reqCnt).(*CounterVec)
+ regReqDur := MustRegisterOrGet(reqDur).(Summary)
+ regReqSz := MustRegisterOrGet(reqSz).(Summary)
+ regResSz := MustRegisterOrGet(resSz).(Summary)
+
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ now := time.Now()
+
+ delegate := &responseWriterDelegator{ResponseWriter: w}
+ out := make(chan int)
+ urlLen := 0
+ if r.URL != nil {
+ urlLen = len(r.URL.String())
+ }
+ go computeApproximateRequestSize(r, out, urlLen)
+
+ _, cn := w.(http.CloseNotifier)
+ _, fl := w.(http.Flusher)
+ _, hj := w.(http.Hijacker)
+ _, rf := w.(io.ReaderFrom)
+ var rw http.ResponseWriter
+ if cn && fl && hj && rf {
+ rw = &fancyResponseWriterDelegator{delegate}
+ } else {
+ rw = delegate
+ }
+ handlerFunc(rw, r)
+
+ elapsed := float64(time.Since(now)) / float64(time.Microsecond)
+
+ method := sanitizeMethod(r.Method)
+ code := sanitizeCode(delegate.status)
+ regReqCnt.WithLabelValues(method, code).Inc()
+ regReqDur.Observe(elapsed)
+ regResSz.Observe(float64(delegate.written))
+ regReqSz.Observe(float64(<-out))
+ })
+}
+
+func computeApproximateRequestSize(r *http.Request, out chan int, s int) {
+ s += len(r.Method)
+ s += len(r.Proto)
+ for name, values := range r.Header {
+ s += len(name)
+ for _, value := range values {
+ s += len(value)
+ }
+ }
+ s += len(r.Host)
+
+ // N.B. r.Form and r.MultipartForm are assumed to be included in r.URL.
+
+ if r.ContentLength != -1 {
+ s += int(r.ContentLength)
+ }
+ out <- s
+}
+
+type responseWriterDelegator struct {
+ http.ResponseWriter
+
+ handler, method string
+ status int
+ written int64
+ wroteHeader bool
+}
+
+func (r *responseWriterDelegator) WriteHeader(code int) {
+ r.status = code
+ r.wroteHeader = true
+ r.ResponseWriter.WriteHeader(code)
+}
+
+func (r *responseWriterDelegator) Write(b []byte) (int, error) {
+ if !r.wroteHeader {
+ r.WriteHeader(http.StatusOK)
+ }
+ n, err := r.ResponseWriter.Write(b)
+ r.written += int64(n)
+ return n, err
+}
+
+type fancyResponseWriterDelegator struct {
+ *responseWriterDelegator
+}
+
+func (f *fancyResponseWriterDelegator) CloseNotify() <-chan bool {
+ return f.ResponseWriter.(http.CloseNotifier).CloseNotify()
+}
+
+func (f *fancyResponseWriterDelegator) Flush() {
+ f.ResponseWriter.(http.Flusher).Flush()
+}
+
+func (f *fancyResponseWriterDelegator) Hijack() (net.Conn, *bufio.ReadWriter, error) {
+ return f.ResponseWriter.(http.Hijacker).Hijack()
+}
+
+func (f *fancyResponseWriterDelegator) ReadFrom(r io.Reader) (int64, error) {
+ if !f.wroteHeader {
+ f.WriteHeader(http.StatusOK)
+ }
+ n, err := f.ResponseWriter.(io.ReaderFrom).ReadFrom(r)
+ f.written += n
+ return n, err
+}
+
+func sanitizeMethod(m string) string {
+ switch m {
+ case "GET", "get":
+ return "get"
+ case "PUT", "put":
+ return "put"
+ case "HEAD", "head":
+ return "head"
+ case "POST", "post":
+ return "post"
+ case "DELETE", "delete":
+ return "delete"
+ case "CONNECT", "connect":
+ return "connect"
+ case "OPTIONS", "options":
+ return "options"
+ case "NOTIFY", "notify":
+ return "notify"
+ default:
+ return strings.ToLower(m)
+ }
+}
+
+func sanitizeCode(s int) string {
+ switch s {
+ case 100:
+ return "100"
+ case 101:
+ return "101"
+
+ case 200:
+ return "200"
+ case 201:
+ return "201"
+ case 202:
+ return "202"
+ case 203:
+ return "203"
+ case 204:
+ return "204"
+ case 205:
+ return "205"
+ case 206:
+ return "206"
+
+ case 300:
+ return "300"
+ case 301:
+ return "301"
+ case 302:
+ return "302"
+ case 304:
+ return "304"
+ case 305:
+ return "305"
+ case 307:
+ return "307"
+
+ case 400:
+ return "400"
+ case 401:
+ return "401"
+ case 402:
+ return "402"
+ case 403:
+ return "403"
+ case 404:
+ return "404"
+ case 405:
+ return "405"
+ case 406:
+ return "406"
+ case 407:
+ return "407"
+ case 408:
+ return "408"
+ case 409:
+ return "409"
+ case 410:
+ return "410"
+ case 411:
+ return "411"
+ case 412:
+ return "412"
+ case 413:
+ return "413"
+ case 414:
+ return "414"
+ case 415:
+ return "415"
+ case 416:
+ return "416"
+ case 417:
+ return "417"
+ case 418:
+ return "418"
+
+ case 500:
+ return "500"
+ case 501:
+ return "501"
+ case 502:
+ return "502"
+ case 503:
+ return "503"
+ case 504:
+ return "504"
+ case 505:
+ return "505"
+
+ case 428:
+ return "428"
+ case 429:
+ return "429"
+ case 431:
+ return "431"
+ case 511:
+ return "511"
+
+ default:
+ return strconv.Itoa(s)
+ }
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/metric.go b/vendor/github.com/prometheus/client_golang/prometheus/metric.go
new file mode 100644
index 0000000000..86fd81c108
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/metric.go
@@ -0,0 +1,166 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+ "strings"
+
+ dto "github.com/prometheus/client_model/go"
+)
+
+const separatorByte byte = 255
+
+// A Metric models a single sample value with its meta data being exported to
+// Prometheus. Implementers of Metric in this package inclued Gauge, Counter,
+// Untyped, and Summary. Users can implement their own Metric types, but that
+// should be rarely needed. See the example for SelfCollector, which is also an
+// example for a user-implemented Metric.
+type Metric interface {
+ // Desc returns the descriptor for the Metric. This method idempotently
+ // returns the same descriptor throughout the lifetime of the
+ // Metric. The returned descriptor is immutable by contract. A Metric
+ // unable to describe itself must return an invalid descriptor (created
+ // with NewInvalidDesc).
+ Desc() *Desc
+ // Write encodes the Metric into a "Metric" Protocol Buffer data
+ // transmission object.
+ //
+ // Implementers of custom Metric types must observe concurrency safety
+ // as reads of this metric may occur at any time, and any blocking
+ // occurs at the expense of total performance of rendering all
+ // registered metrics. Ideally Metric implementations should support
+ // concurrent readers.
+ //
+ // The Prometheus client library attempts to minimize memory allocations
+ // and will provide a pre-existing reset dto.Metric pointer. Prometheus
+ // may recycle the dto.Metric proto message, so Metric implementations
+ // should just populate the provided dto.Metric and then should not keep
+ // any reference to it.
+ //
+ // While populating dto.Metric, labels must be sorted lexicographically.
+ // (Implementers may find LabelPairSorter useful for that.)
+ Write(*dto.Metric) error
+}
+
+// Opts bundles the options for creating most Metric types. Each metric
+// implementation XXX has its own XXXOpts type, but in most cases, it is just be
+// an alias of this type (which might change when the requirement arises.)
+//
+// It is mandatory to set Name and Help to a non-empty string. All other fields
+// are optional and can safely be left at their zero value.
+type Opts struct {
+ // Namespace, Subsystem, and Name are components of the fully-qualified
+ // name of the Metric (created by joining these components with
+ // "_"). Only Name is mandatory, the others merely help structuring the
+ // name. Note that the fully-qualified name of the metric must be a
+ // valid Prometheus metric name.
+ Namespace string
+ Subsystem string
+ Name string
+
+ // Help provides information about this metric. Mandatory!
+ //
+ // Metrics with the same fully-qualified name must have the same Help
+ // string.
+ Help string
+
+ // ConstLabels are used to attach fixed labels to this metric. Metrics
+ // with the same fully-qualified name must have the same label names in
+ // their ConstLabels.
+ //
+ // Note that in most cases, labels have a value that varies during the
+ // lifetime of a process. Those labels are usually managed with a metric
+ // vector collector (like CounterVec, GaugeVec, UntypedVec). ConstLabels
+ // serve only special purposes. One is for the special case where the
+ // value of a label does not change during the lifetime of a process,
+ // e.g. if the revision of the running binary is put into a
+ // label. Another, more advanced purpose is if more than one Collector
+ // needs to collect Metrics with the same fully-qualified name. In that
+ // case, those Metrics must differ in the values of their
+ // ConstLabels. See the Collector examples.
+ //
+ // If the value of a label never changes (not even between binaries),
+ // that label most likely should not be a label at all (but part of the
+ // metric name).
+ ConstLabels Labels
+}
+
+// BuildFQName joins the given three name components by "_". Empty name
+// components are ignored. If the name parameter itself is empty, an empty
+// string is returned, no matter what. Metric implementations included in this
+// library use this function internally to generate the fully-qualified metric
+// name from the name component in their Opts. Users of the library will only
+// need this function if they implement their own Metric or instantiate a Desc
+// (with NewDesc) directly.
+func BuildFQName(namespace, subsystem, name string) string {
+ if name == "" {
+ return ""
+ }
+ switch {
+ case namespace != "" && subsystem != "":
+ return strings.Join([]string{namespace, subsystem, name}, "_")
+ case namespace != "":
+ return strings.Join([]string{namespace, name}, "_")
+ case subsystem != "":
+ return strings.Join([]string{subsystem, name}, "_")
+ }
+ return name
+}
+
+// LabelPairSorter implements sort.Interface. It is used to sort a slice of
+// dto.LabelPair pointers. This is useful for implementing the Write method of
+// custom metrics.
+type LabelPairSorter []*dto.LabelPair
+
+func (s LabelPairSorter) Len() int {
+ return len(s)
+}
+
+func (s LabelPairSorter) Swap(i, j int) {
+ s[i], s[j] = s[j], s[i]
+}
+
+func (s LabelPairSorter) Less(i, j int) bool {
+ return s[i].GetName() < s[j].GetName()
+}
+
+type hashSorter []uint64
+
+func (s hashSorter) Len() int {
+ return len(s)
+}
+
+func (s hashSorter) Swap(i, j int) {
+ s[i], s[j] = s[j], s[i]
+}
+
+func (s hashSorter) Less(i, j int) bool {
+ return s[i] < s[j]
+}
+
+type invalidMetric struct {
+ desc *Desc
+ err error
+}
+
+// NewInvalidMetric returns a metric whose Write method always returns the
+// provided error. It is useful if a Collector finds itself unable to collect
+// a metric and wishes to report an error to the registry.
+func NewInvalidMetric(desc *Desc, err error) Metric {
+ return &invalidMetric{desc, err}
+}
+
+func (m *invalidMetric) Desc() *Desc { return m.desc }
+
+func (m *invalidMetric) Write(*dto.Metric) error { return m.err }
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go
new file mode 100644
index 0000000000..d8cf0eda34
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go
@@ -0,0 +1,142 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import "github.com/prometheus/procfs"
+
+type processCollector struct {
+ pid int
+ collectFn func(chan<- Metric)
+ pidFn func() (int, error)
+ cpuTotal Counter
+ openFDs, maxFDs Gauge
+ vsize, rss Gauge
+ startTime Gauge
+}
+
+// NewProcessCollector returns a collector which exports the current state of
+// process metrics including cpu, memory and file descriptor usage as well as
+// the process start time for the given process id under the given namespace.
+func NewProcessCollector(pid int, namespace string) *processCollector {
+ return NewProcessCollectorPIDFn(
+ func() (int, error) { return pid, nil },
+ namespace,
+ )
+}
+
+// NewProcessCollectorPIDFn returns a collector which exports the current state
+// of process metrics including cpu, memory and file descriptor usage as well
+// as the process start time under the given namespace. The given pidFn is
+// called on each collect and is used to determine the process to export
+// metrics for.
+func NewProcessCollectorPIDFn(
+ pidFn func() (int, error),
+ namespace string,
+) *processCollector {
+ c := processCollector{
+ pidFn: pidFn,
+ collectFn: func(chan<- Metric) {},
+
+ cpuTotal: NewCounter(CounterOpts{
+ Namespace: namespace,
+ Name: "process_cpu_seconds_total",
+ Help: "Total user and system CPU time spent in seconds.",
+ }),
+ openFDs: NewGauge(GaugeOpts{
+ Namespace: namespace,
+ Name: "process_open_fds",
+ Help: "Number of open file descriptors.",
+ }),
+ maxFDs: NewGauge(GaugeOpts{
+ Namespace: namespace,
+ Name: "process_max_fds",
+ Help: "Maximum number of open file descriptors.",
+ }),
+ vsize: NewGauge(GaugeOpts{
+ Namespace: namespace,
+ Name: "process_virtual_memory_bytes",
+ Help: "Virtual memory size in bytes.",
+ }),
+ rss: NewGauge(GaugeOpts{
+ Namespace: namespace,
+ Name: "process_resident_memory_bytes",
+ Help: "Resident memory size in bytes.",
+ }),
+ startTime: NewGauge(GaugeOpts{
+ Namespace: namespace,
+ Name: "process_start_time_seconds",
+ Help: "Start time of the process since unix epoch in seconds.",
+ }),
+ }
+
+ // Set up process metric collection if supported by the runtime.
+ if _, err := procfs.NewStat(); err == nil {
+ c.collectFn = c.processCollect
+ }
+
+ return &c
+}
+
+// Describe returns all descriptions of the collector.
+func (c *processCollector) Describe(ch chan<- *Desc) {
+ ch <- c.cpuTotal.Desc()
+ ch <- c.openFDs.Desc()
+ ch <- c.maxFDs.Desc()
+ ch <- c.vsize.Desc()
+ ch <- c.rss.Desc()
+ ch <- c.startTime.Desc()
+}
+
+// Collect returns the current state of all metrics of the collector.
+func (c *processCollector) Collect(ch chan<- Metric) {
+ c.collectFn(ch)
+}
+
+// TODO(ts): Bring back error reporting by reverting 7faf9e7 as soon as the
+// client allows users to configure the error behavior.
+func (c *processCollector) processCollect(ch chan<- Metric) {
+ pid, err := c.pidFn()
+ if err != nil {
+ return
+ }
+
+ p, err := procfs.NewProc(pid)
+ if err != nil {
+ return
+ }
+
+ if stat, err := p.NewStat(); err == nil {
+ c.cpuTotal.Set(stat.CPUTime())
+ ch <- c.cpuTotal
+ c.vsize.Set(float64(stat.VirtualMemory()))
+ ch <- c.vsize
+ c.rss.Set(float64(stat.ResidentMemory()))
+ ch <- c.rss
+
+ if startTime, err := stat.StartTime(); err == nil {
+ c.startTime.Set(startTime)
+ ch <- c.startTime
+ }
+ }
+
+ if fds, err := p.FileDescriptorsLen(); err == nil {
+ c.openFDs.Set(float64(fds))
+ ch <- c.openFDs
+ }
+
+ if limits, err := p.NewLimits(); err == nil {
+ c.maxFDs.Set(float64(limits.OpenFiles))
+ ch <- c.maxFDs
+ }
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/push.go b/vendor/github.com/prometheus/client_golang/prometheus/push.go
new file mode 100644
index 0000000000..1c33848a35
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/push.go
@@ -0,0 +1,65 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Copyright (c) 2013, The Prometheus Authors
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be found
+// in the LICENSE file.
+
+package prometheus
+
+// Push triggers a metric collection by the default registry and pushes all
+// collected metrics to the Pushgateway specified by addr. See the Pushgateway
+// documentation for detailed implications of the job and instance
+// parameter. instance can be left empty. You can use just host:port or ip:port
+// as url, in which case 'http://' is added automatically. You can also include
+// the schema in the URL. However, do not include the '/metrics/jobs/...' part.
+//
+// Note that all previously pushed metrics with the same job and instance will
+// be replaced with the metrics pushed by this call. (It uses HTTP method 'PUT'
+// to push to the Pushgateway.)
+func Push(job, instance, url string) error {
+ return defRegistry.Push(job, instance, url, "PUT")
+}
+
+// PushAdd works like Push, but only previously pushed metrics with the same
+// name (and the same job and instance) will be replaced. (It uses HTTP method
+// 'POST' to push to the Pushgateway.)
+func PushAdd(job, instance, url string) error {
+ return defRegistry.Push(job, instance, url, "POST")
+}
+
+// PushCollectors works like Push, but it does not collect from the default
+// registry. Instead, it collects from the provided collectors. It is a
+// convenient way to push only a few metrics.
+func PushCollectors(job, instance, url string, collectors ...Collector) error {
+ return pushCollectors(job, instance, url, "PUT", collectors...)
+}
+
+// PushAddCollectors works like PushAdd, but it does not collect from the
+// default registry. Instead, it collects from the provided collectors. It is a
+// convenient way to push only a few metrics.
+func PushAddCollectors(job, instance, url string, collectors ...Collector) error {
+ return pushCollectors(job, instance, url, "POST", collectors...)
+}
+
+func pushCollectors(job, instance, url, method string, collectors ...Collector) error {
+ r := newRegistry()
+ for _, collector := range collectors {
+ if _, err := r.Register(collector); err != nil {
+ return err
+ }
+ }
+ return r.Push(job, instance, url, method)
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/registry.go b/vendor/github.com/prometheus/client_golang/prometheus/registry.go
new file mode 100644
index 0000000000..871558fa1c
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/registry.go
@@ -0,0 +1,759 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Copyright (c) 2013, The Prometheus Authors
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be found
+// in the LICENSE file.
+
+package prometheus
+
+import (
+ "bytes"
+ "compress/gzip"
+ "errors"
+ "fmt"
+ "hash/fnv"
+ "io"
+ "net/http"
+ "net/url"
+ "os"
+ "sort"
+ "strings"
+ "sync"
+
+ "bitbucket.org/ww/goautoneg"
+ "github.com/golang/protobuf/proto"
+
+ dto "github.com/prometheus/client_model/go"
+
+ "github.com/prometheus/client_golang/text"
+)
+
+var (
+ defRegistry = newDefaultRegistry()
+ errAlreadyReg = errors.New("duplicate metrics collector registration attempted")
+)
+
+// Constants relevant to the HTTP interface.
+const (
+ // APIVersion is the version of the format of the exported data. This
+ // will match this library's version, which subscribes to the Semantic
+ // Versioning scheme.
+ APIVersion = "0.0.4"
+
+ // DelimitedTelemetryContentType is the content type set on telemetry
+ // data responses in delimited protobuf format.
+ DelimitedTelemetryContentType = `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=delimited`
+ // TextTelemetryContentType is the content type set on telemetry data
+ // responses in text format.
+ TextTelemetryContentType = `text/plain; version=` + APIVersion
+ // ProtoTextTelemetryContentType is the content type set on telemetry
+ // data responses in protobuf text format. (Only used for debugging.)
+ ProtoTextTelemetryContentType = `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=text`
+ // ProtoCompactTextTelemetryContentType is the content type set on
+ // telemetry data responses in protobuf compact text format. (Only used
+ // for debugging.)
+ ProtoCompactTextTelemetryContentType = `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=compact-text`
+
+ // Constants for object pools.
+ numBufs = 4
+ numMetricFamilies = 1000
+ numMetrics = 10000
+
+ // Capacity for the channel to collect metrics and descriptors.
+ capMetricChan = 1000
+ capDescChan = 10
+
+ contentTypeHeader = "Content-Type"
+ contentLengthHeader = "Content-Length"
+ contentEncodingHeader = "Content-Encoding"
+
+ acceptEncodingHeader = "Accept-Encoding"
+ acceptHeader = "Accept"
+)
+
+// Handler returns the HTTP handler for the global Prometheus registry. It is
+// already instrumented with InstrumentHandler (using "prometheus" as handler
+// name). Usually the handler is used to handle the "/metrics" endpoint.
+func Handler() http.Handler {
+ return InstrumentHandler("prometheus", defRegistry)
+}
+
+// UninstrumentedHandler works in the same way as Handler, but the returned HTTP
+// handler is not instrumented. This is useful if no instrumentation is desired
+// (for whatever reason) or if the instrumentation has to happen with a
+// different handler name (or with a different instrumentation approach
+// altogether). See the InstrumentHandler example.
+func UninstrumentedHandler() http.Handler {
+ return defRegistry
+}
+
+// Register registers a new Collector to be included in metrics collection. It
+// returns an error if the descriptors provided by the Collector are invalid or
+// if they - in combination with descriptors of already registered Collectors -
+// do not fulfill the consistency and uniqueness criteria described in the Desc
+// documentation.
+//
+// Do not register the same Collector multiple times concurrently. (Registering
+// the same Collector twice would result in an error anyway, but on top of that,
+// it is not safe to do so concurrently.)
+func Register(m Collector) error {
+ _, err := defRegistry.Register(m)
+ return err
+}
+
+// MustRegister works like Register but panics where Register would have
+// returned an error.
+func MustRegister(m Collector) {
+ err := Register(m)
+ if err != nil {
+ panic(err)
+ }
+}
+
+// RegisterOrGet works like Register but does not return an error if a Collector
+// is registered that equals a previously registered Collector. (Two Collectors
+// are considered equal if their Describe method yields the same set of
+// descriptors.) Instead, the previously registered Collector is returned (which
+// is helpful if the new and previously registered Collectors are equal but not
+// identical, i.e. not pointers to the same object).
+//
+// As for Register, it is still not safe to call RegisterOrGet with the same
+// Collector multiple times concurrently.
+func RegisterOrGet(m Collector) (Collector, error) {
+ return defRegistry.RegisterOrGet(m)
+}
+
+// MustRegisterOrGet works like Register but panics where RegisterOrGet would
+// have returned an error.
+func MustRegisterOrGet(m Collector) Collector {
+ existing, err := RegisterOrGet(m)
+ if err != nil {
+ panic(err)
+ }
+ return existing
+}
+
+// Unregister unregisters the Collector that equals the Collector passed in as
+// an argument. (Two Collectors are considered equal if their Describe method
+// yields the same set of descriptors.) The function returns whether a Collector
+// was unregistered.
+func Unregister(c Collector) bool {
+ return defRegistry.Unregister(c)
+}
+
+// SetMetricFamilyInjectionHook sets a function that is called whenever metrics
+// are collected. The hook function must be set before metrics collection begins
+// (i.e. call SetMetricFamilyInjectionHook before setting the HTTP handler.) The
+// MetricFamily protobufs returned by the hook function are merged with the
+// metrics collected in the usual way.
+//
+// This is a way to directly inject MetricFamily protobufs managed and owned by
+// the caller. The caller has full responsibility. As no registration of the
+// injected metrics has happened, there is no descriptor to check against, and
+// there are no registration-time checks. If collect-time checks are disabled
+// (see function EnableCollectChecks), no sanity checks are performed on the
+// returned protobufs at all. If collect-checks are enabled, type and uniqueness
+// checks are performed, but no further consistency checks (which would require
+// knowledge of a metric descriptor).
+//
+// Sorting concerns: The caller is responsible for sorting the label pairs in
+// each metric. However, the order of metrics will be sorted by the registry as
+// it is required anyway after merging with the metric families collected
+// conventionally.
+//
+// The function must be callable at any time and concurrently.
+func SetMetricFamilyInjectionHook(hook func() []*dto.MetricFamily) {
+ defRegistry.metricFamilyInjectionHook = hook
+}
+
+// PanicOnCollectError sets the behavior whether a panic is caused upon an error
+// while metrics are collected and served to the HTTP endpoint. By default, an
+// internal server error (status code 500) is served with an error message.
+func PanicOnCollectError(b bool) {
+ defRegistry.panicOnCollectError = b
+}
+
+// EnableCollectChecks enables (or disables) additional consistency checks
+// during metrics collection. These additional checks are not enabled by default
+// because they inflict a performance penalty and the errors they check for can
+// only happen if the used Metric and Collector types have internal programming
+// errors. It can be helpful to enable these checks while working with custom
+// Collectors or Metrics whose correctness is not well established yet.
+func EnableCollectChecks(b bool) {
+ defRegistry.collectChecksEnabled = b
+}
+
+// encoder is a function that writes a dto.MetricFamily to an io.Writer in a
+// certain encoding. It returns the number of bytes written and any error
+// encountered. Note that pbutil.WriteDelimited and pbutil.MetricFamilyToText
+// are encoders.
+type encoder func(io.Writer, *dto.MetricFamily) (int, error)
+
+type registry struct {
+ mtx sync.RWMutex
+ collectorsByID map[uint64]Collector // ID is a hash of the descIDs.
+ descIDs map[uint64]struct{}
+ dimHashesByName map[string]uint64
+ bufPool chan *bytes.Buffer
+ metricFamilyPool chan *dto.MetricFamily
+ metricPool chan *dto.Metric
+ metricFamilyInjectionHook func() []*dto.MetricFamily
+
+ panicOnCollectError, collectChecksEnabled bool
+}
+
+func (r *registry) Register(c Collector) (Collector, error) {
+ descChan := make(chan *Desc, capDescChan)
+ go func() {
+ c.Describe(descChan)
+ close(descChan)
+ }()
+
+ newDescIDs := map[uint64]struct{}{}
+ newDimHashesByName := map[string]uint64{}
+ var collectorID uint64 // Just a sum of all desc IDs.
+ var duplicateDescErr error
+
+ r.mtx.Lock()
+ defer r.mtx.Unlock()
+ // Coduct various tests...
+ for desc := range descChan {
+
+ // Is the descriptor valid at all?
+ if desc.err != nil {
+ return c, fmt.Errorf("descriptor %s is invalid: %s", desc, desc.err)
+ }
+
+ // Is the descID unique?
+ // (In other words: Is the fqName + constLabel combination unique?)
+ if _, exists := r.descIDs[desc.id]; exists {
+ duplicateDescErr = fmt.Errorf("descriptor %s already exists with the same fully-qualified name and const label values", desc)
+ }
+ // If it is not a duplicate desc in this collector, add it to
+ // the collectorID. (We allow duplicate descs within the same
+ // collector, but their existence must be a no-op.)
+ if _, exists := newDescIDs[desc.id]; !exists {
+ newDescIDs[desc.id] = struct{}{}
+ collectorID += desc.id
+ }
+
+ // Are all the label names and the help string consistent with
+ // previous descriptors of the same name?
+ // First check existing descriptors...
+ if dimHash, exists := r.dimHashesByName[desc.fqName]; exists {
+ if dimHash != desc.dimHash {
+ return nil, fmt.Errorf("a previously registered descriptor with the same fully-qualified name as %s has different label names or a different help string", desc)
+ }
+ } else {
+ // ...then check the new descriptors already seen.
+ if dimHash, exists := newDimHashesByName[desc.fqName]; exists {
+ if dimHash != desc.dimHash {
+ return nil, fmt.Errorf("descriptors reported by collector have inconsistent label names or help strings for the same fully-qualified name, offender is %s", desc)
+ }
+ } else {
+ newDimHashesByName[desc.fqName] = desc.dimHash
+ }
+ }
+ }
+ // Did anything happen at all?
+ if len(newDescIDs) == 0 {
+ return nil, errors.New("collector has no descriptors")
+ }
+ if existing, exists := r.collectorsByID[collectorID]; exists {
+ return existing, errAlreadyReg
+ }
+ // If the collectorID is new, but at least one of the descs existed
+ // before, we are in trouble.
+ if duplicateDescErr != nil {
+ return nil, duplicateDescErr
+ }
+
+ // Only after all tests have passed, actually register.
+ r.collectorsByID[collectorID] = c
+ for hash := range newDescIDs {
+ r.descIDs[hash] = struct{}{}
+ }
+ for name, dimHash := range newDimHashesByName {
+ r.dimHashesByName[name] = dimHash
+ }
+ return c, nil
+}
+
+func (r *registry) RegisterOrGet(m Collector) (Collector, error) {
+ existing, err := r.Register(m)
+ if err != nil && err != errAlreadyReg {
+ return nil, err
+ }
+ return existing, nil
+}
+
+func (r *registry) Unregister(c Collector) bool {
+ descChan := make(chan *Desc, capDescChan)
+ go func() {
+ c.Describe(descChan)
+ close(descChan)
+ }()
+
+ descIDs := map[uint64]struct{}{}
+ var collectorID uint64 // Just a sum of the desc IDs.
+ for desc := range descChan {
+ if _, exists := descIDs[desc.id]; !exists {
+ collectorID += desc.id
+ descIDs[desc.id] = struct{}{}
+ }
+ }
+
+ r.mtx.RLock()
+ if _, exists := r.collectorsByID[collectorID]; !exists {
+ r.mtx.RUnlock()
+ return false
+ }
+ r.mtx.RUnlock()
+
+ r.mtx.Lock()
+ defer r.mtx.Unlock()
+
+ delete(r.collectorsByID, collectorID)
+ for id := range descIDs {
+ delete(r.descIDs, id)
+ }
+ // dimHashesByName is left untouched as those must be consistent
+ // throughout the lifetime of a program.
+ return true
+}
+
+func (r *registry) Push(job, instance, pushURL, method string) error {
+ if !strings.Contains(pushURL, "://") {
+ pushURL = "http://" + pushURL
+ }
+ pushURL = fmt.Sprintf("%s/metrics/jobs/%s", pushURL, url.QueryEscape(job))
+ if instance != "" {
+ pushURL += "/instances/" + url.QueryEscape(instance)
+ }
+ buf := r.getBuf()
+ defer r.giveBuf(buf)
+ if _, err := r.writePB(buf, text.WriteProtoDelimited); err != nil {
+ if r.panicOnCollectError {
+ panic(err)
+ }
+ return err
+ }
+ req, err := http.NewRequest(method, pushURL, buf)
+ if err != nil {
+ return err
+ }
+ req.Header.Set(contentTypeHeader, DelimitedTelemetryContentType)
+ resp, err := http.DefaultClient.Do(req)
+ if err != nil {
+ return err
+ }
+ defer resp.Body.Close()
+ if resp.StatusCode != 202 {
+ return fmt.Errorf("unexpected status code %d while pushing to %s", resp.StatusCode, pushURL)
+ }
+ return nil
+}
+
+func (r *registry) ServeHTTP(w http.ResponseWriter, req *http.Request) {
+ enc, contentType := chooseEncoder(req)
+ buf := r.getBuf()
+ defer r.giveBuf(buf)
+ writer, encoding := decorateWriter(req, buf)
+ if _, err := r.writePB(writer, enc); err != nil {
+ if r.panicOnCollectError {
+ panic(err)
+ }
+ http.Error(w, "An error has occurred:\n\n"+err.Error(), http.StatusInternalServerError)
+ return
+ }
+ if closer, ok := writer.(io.Closer); ok {
+ closer.Close()
+ }
+ header := w.Header()
+ header.Set(contentTypeHeader, contentType)
+ header.Set(contentLengthHeader, fmt.Sprint(buf.Len()))
+ if encoding != "" {
+ header.Set(contentEncodingHeader, encoding)
+ }
+ w.Write(buf.Bytes())
+}
+
+func (r *registry) writePB(w io.Writer, writeEncoded encoder) (int, error) {
+ var metricHashes map[uint64]struct{}
+ if r.collectChecksEnabled {
+ metricHashes = make(map[uint64]struct{})
+ }
+ metricChan := make(chan Metric, capMetricChan)
+ wg := sync.WaitGroup{}
+
+ r.mtx.RLock()
+ metricFamiliesByName := make(map[string]*dto.MetricFamily, len(r.dimHashesByName))
+
+ // Scatter.
+ // (Collectors could be complex and slow, so we call them all at once.)
+ wg.Add(len(r.collectorsByID))
+ go func() {
+ wg.Wait()
+ close(metricChan)
+ }()
+ for _, collector := range r.collectorsByID {
+ go func(collector Collector) {
+ defer wg.Done()
+ collector.Collect(metricChan)
+ }(collector)
+ }
+ r.mtx.RUnlock()
+
+ // Drain metricChan in case of premature return.
+ defer func() {
+ for _ = range metricChan {
+ }
+ }()
+
+ // Gather.
+ for metric := range metricChan {
+ // This could be done concurrently, too, but it required locking
+ // of metricFamiliesByName (and of metricHashes if checks are
+ // enabled). Most likely not worth it.
+ desc := metric.Desc()
+ metricFamily, ok := metricFamiliesByName[desc.fqName]
+ if !ok {
+ metricFamily = r.getMetricFamily()
+ defer r.giveMetricFamily(metricFamily)
+ metricFamily.Name = proto.String(desc.fqName)
+ metricFamily.Help = proto.String(desc.help)
+ metricFamiliesByName[desc.fqName] = metricFamily
+ }
+ dtoMetric := r.getMetric()
+ defer r.giveMetric(dtoMetric)
+ if err := metric.Write(dtoMetric); err != nil {
+ // TODO: Consider different means of error reporting so
+ // that a single erroneous metric could be skipped
+ // instead of blowing up the whole collection.
+ return 0, fmt.Errorf("error collecting metric %v: %s", desc, err)
+ }
+ switch {
+ case metricFamily.Type != nil:
+ // Type already set. We are good.
+ case dtoMetric.Gauge != nil:
+ metricFamily.Type = dto.MetricType_GAUGE.Enum()
+ case dtoMetric.Counter != nil:
+ metricFamily.Type = dto.MetricType_COUNTER.Enum()
+ case dtoMetric.Summary != nil:
+ metricFamily.Type = dto.MetricType_SUMMARY.Enum()
+ case dtoMetric.Untyped != nil:
+ metricFamily.Type = dto.MetricType_UNTYPED.Enum()
+ case dtoMetric.Histogram != nil:
+ metricFamily.Type = dto.MetricType_HISTOGRAM.Enum()
+ default:
+ return 0, fmt.Errorf("empty metric collected: %s", dtoMetric)
+ }
+ if r.collectChecksEnabled {
+ if err := r.checkConsistency(metricFamily, dtoMetric, desc, metricHashes); err != nil {
+ return 0, err
+ }
+ }
+ metricFamily.Metric = append(metricFamily.Metric, dtoMetric)
+ }
+
+ if r.metricFamilyInjectionHook != nil {
+ for _, mf := range r.metricFamilyInjectionHook() {
+ existingMF, exists := metricFamiliesByName[mf.GetName()]
+ if !exists {
+ metricFamiliesByName[mf.GetName()] = mf
+ if r.collectChecksEnabled {
+ for _, m := range mf.Metric {
+ if err := r.checkConsistency(mf, m, nil, metricHashes); err != nil {
+ return 0, err
+ }
+ }
+ }
+ continue
+ }
+ for _, m := range mf.Metric {
+ if r.collectChecksEnabled {
+ if err := r.checkConsistency(existingMF, m, nil, metricHashes); err != nil {
+ return 0, err
+ }
+ }
+ existingMF.Metric = append(existingMF.Metric, m)
+ }
+ }
+ }
+
+ // Now that MetricFamilies are all set, sort their Metrics
+ // lexicographically by their label values.
+ for _, mf := range metricFamiliesByName {
+ sort.Sort(metricSorter(mf.Metric))
+ }
+
+ // Write out MetricFamilies sorted by their name.
+ names := make([]string, 0, len(metricFamiliesByName))
+ for name := range metricFamiliesByName {
+ names = append(names, name)
+ }
+ sort.Strings(names)
+
+ var written int
+ for _, name := range names {
+ w, err := writeEncoded(w, metricFamiliesByName[name])
+ written += w
+ if err != nil {
+ return written, err
+ }
+ }
+ return written, nil
+}
+
+func (r *registry) checkConsistency(metricFamily *dto.MetricFamily, dtoMetric *dto.Metric, desc *Desc, metricHashes map[uint64]struct{}) error {
+
+ // Type consistency with metric family.
+ if metricFamily.GetType() == dto.MetricType_GAUGE && dtoMetric.Gauge == nil ||
+ metricFamily.GetType() == dto.MetricType_COUNTER && dtoMetric.Counter == nil ||
+ metricFamily.GetType() == dto.MetricType_SUMMARY && dtoMetric.Summary == nil ||
+ metricFamily.GetType() == dto.MetricType_HISTOGRAM && dtoMetric.Histogram == nil ||
+ metricFamily.GetType() == dto.MetricType_UNTYPED && dtoMetric.Untyped == nil {
+ return fmt.Errorf(
+ "collected metric %s %s is not a %s",
+ metricFamily.GetName(), dtoMetric, metricFamily.GetType(),
+ )
+ }
+
+ // Is the metric unique (i.e. no other metric with the same name and the same label values)?
+ h := fnv.New64a()
+ var buf bytes.Buffer
+ buf.WriteString(metricFamily.GetName())
+ buf.WriteByte(separatorByte)
+ h.Write(buf.Bytes())
+ // Make sure label pairs are sorted. We depend on it for the consistency
+ // check. Label pairs must be sorted by contract. But the point of this
+ // method is to check for contract violations. So we better do the sort
+ // now.
+ sort.Sort(LabelPairSorter(dtoMetric.Label))
+ for _, lp := range dtoMetric.Label {
+ buf.Reset()
+ buf.WriteString(lp.GetValue())
+ buf.WriteByte(separatorByte)
+ h.Write(buf.Bytes())
+ }
+ metricHash := h.Sum64()
+ if _, exists := metricHashes[metricHash]; exists {
+ return fmt.Errorf(
+ "collected metric %s %s was collected before with the same name and label values",
+ metricFamily.GetName(), dtoMetric,
+ )
+ }
+ metricHashes[metricHash] = struct{}{}
+
+ if desc == nil {
+ return nil // Nothing left to check if we have no desc.
+ }
+
+ // Desc consistency with metric family.
+ if metricFamily.GetName() != desc.fqName {
+ return fmt.Errorf(
+ "collected metric %s %s has name %q but should have %q",
+ metricFamily.GetName(), dtoMetric, metricFamily.GetName(), desc.fqName,
+ )
+ }
+ if metricFamily.GetHelp() != desc.help {
+ return fmt.Errorf(
+ "collected metric %s %s has help %q but should have %q",
+ metricFamily.GetName(), dtoMetric, metricFamily.GetHelp(), desc.help,
+ )
+ }
+
+ // Is the desc consistent with the content of the metric?
+ lpsFromDesc := make([]*dto.LabelPair, 0, len(dtoMetric.Label))
+ lpsFromDesc = append(lpsFromDesc, desc.constLabelPairs...)
+ for _, l := range desc.variableLabels {
+ lpsFromDesc = append(lpsFromDesc, &dto.LabelPair{
+ Name: proto.String(l),
+ })
+ }
+ if len(lpsFromDesc) != len(dtoMetric.Label) {
+ return fmt.Errorf(
+ "labels in collected metric %s %s are inconsistent with descriptor %s",
+ metricFamily.GetName(), dtoMetric, desc,
+ )
+ }
+ sort.Sort(LabelPairSorter(lpsFromDesc))
+ for i, lpFromDesc := range lpsFromDesc {
+ lpFromMetric := dtoMetric.Label[i]
+ if lpFromDesc.GetName() != lpFromMetric.GetName() ||
+ lpFromDesc.Value != nil && lpFromDesc.GetValue() != lpFromMetric.GetValue() {
+ return fmt.Errorf(
+ "labels in collected metric %s %s are inconsistent with descriptor %s",
+ metricFamily.GetName(), dtoMetric, desc,
+ )
+ }
+ }
+
+ r.mtx.RLock() // Remaining checks need the read lock.
+ defer r.mtx.RUnlock()
+
+ // Is the desc registered?
+ if _, exist := r.descIDs[desc.id]; !exist {
+ return fmt.Errorf(
+ "collected metric %s %s with unregistered descriptor %s",
+ metricFamily.GetName(), dtoMetric, desc,
+ )
+ }
+
+ return nil
+}
+
+func (r *registry) getBuf() *bytes.Buffer {
+ select {
+ case buf := <-r.bufPool:
+ return buf
+ default:
+ return &bytes.Buffer{}
+ }
+}
+
+func (r *registry) giveBuf(buf *bytes.Buffer) {
+ buf.Reset()
+ select {
+ case r.bufPool <- buf:
+ default:
+ }
+}
+
+func (r *registry) getMetricFamily() *dto.MetricFamily {
+ select {
+ case mf := <-r.metricFamilyPool:
+ return mf
+ default:
+ return &dto.MetricFamily{}
+ }
+}
+
+func (r *registry) giveMetricFamily(mf *dto.MetricFamily) {
+ mf.Reset()
+ select {
+ case r.metricFamilyPool <- mf:
+ default:
+ }
+}
+
+func (r *registry) getMetric() *dto.Metric {
+ select {
+ case m := <-r.metricPool:
+ return m
+ default:
+ return &dto.Metric{}
+ }
+}
+
+func (r *registry) giveMetric(m *dto.Metric) {
+ m.Reset()
+ select {
+ case r.metricPool <- m:
+ default:
+ }
+}
+
+func newRegistry() *registry {
+ return ®istry{
+ collectorsByID: map[uint64]Collector{},
+ descIDs: map[uint64]struct{}{},
+ dimHashesByName: map[string]uint64{},
+ bufPool: make(chan *bytes.Buffer, numBufs),
+ metricFamilyPool: make(chan *dto.MetricFamily, numMetricFamilies),
+ metricPool: make(chan *dto.Metric, numMetrics),
+ }
+}
+
+func newDefaultRegistry() *registry {
+ r := newRegistry()
+ r.Register(NewProcessCollector(os.Getpid(), ""))
+ r.Register(NewGoCollector())
+ return r
+}
+
+func chooseEncoder(req *http.Request) (encoder, string) {
+ accepts := goautoneg.ParseAccept(req.Header.Get(acceptHeader))
+ for _, accept := range accepts {
+ switch {
+ case accept.Type == "application" &&
+ accept.SubType == "vnd.google.protobuf" &&
+ accept.Params["proto"] == "io.prometheus.client.MetricFamily":
+ switch accept.Params["encoding"] {
+ case "delimited":
+ return text.WriteProtoDelimited, DelimitedTelemetryContentType
+ case "text":
+ return text.WriteProtoText, ProtoTextTelemetryContentType
+ case "compact-text":
+ return text.WriteProtoCompactText, ProtoCompactTextTelemetryContentType
+ default:
+ continue
+ }
+ case accept.Type == "text" &&
+ accept.SubType == "plain" &&
+ (accept.Params["version"] == "0.0.4" || accept.Params["version"] == ""):
+ return text.MetricFamilyToText, TextTelemetryContentType
+ default:
+ continue
+ }
+ }
+ return text.MetricFamilyToText, TextTelemetryContentType
+}
+
+// decorateWriter wraps a writer to handle gzip compression if requested. It
+// returns the decorated writer and the appropriate "Content-Encoding" header
+// (which is empty if no compression is enabled).
+func decorateWriter(request *http.Request, writer io.Writer) (io.Writer, string) {
+ header := request.Header.Get(acceptEncodingHeader)
+ parts := strings.Split(header, ",")
+ for _, part := range parts {
+ part := strings.TrimSpace(part)
+ if part == "gzip" || strings.HasPrefix(part, "gzip;") {
+ return gzip.NewWriter(writer), "gzip"
+ }
+ }
+ return writer, ""
+}
+
+type metricSorter []*dto.Metric
+
+func (s metricSorter) Len() int {
+ return len(s)
+}
+
+func (s metricSorter) Swap(i, j int) {
+ s[i], s[j] = s[j], s[i]
+}
+
+func (s metricSorter) Less(i, j int) bool {
+ if len(s[i].Label) != len(s[j].Label) {
+ // This should not happen. The metrics are
+ // inconsistent. However, we have to deal with the fact, as
+ // people might use custom collectors or metric family injection
+ // to create inconsistent metrics. So let's simply compare the
+ // number of labels in this case. That will still yield
+ // reproducible sorting.
+ return len(s[i].Label) < len(s[j].Label)
+ }
+ for n, lp := range s[i].Label {
+ vi := lp.GetValue()
+ vj := s[j].Label[n].GetValue()
+ if vi != vj {
+ return vi < vj
+ }
+ }
+ return true
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/summary.go b/vendor/github.com/prometheus/client_golang/prometheus/summary.go
new file mode 100644
index 0000000000..fe81e004f6
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/summary.go
@@ -0,0 +1,540 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+ "fmt"
+ "hash/fnv"
+ "math"
+ "sort"
+ "sync"
+ "time"
+
+ "github.com/beorn7/perks/quantile"
+ "github.com/golang/protobuf/proto"
+
+ dto "github.com/prometheus/client_model/go"
+)
+
+// quantileLabel is used for the label that defines the quantile in a
+// summary.
+const quantileLabel = "quantile"
+
+// A Summary captures individual observations from an event or sample stream and
+// summarizes them in a manner similar to traditional summary statistics: 1. sum
+// of observations, 2. observation count, 3. rank estimations.
+//
+// A typical use-case is the observation of request latencies. By default, a
+// Summary provides the median, the 90th and the 99th percentile of the latency
+// as rank estimations.
+//
+// Note that the rank estimations cannot be aggregated in a meaningful way with
+// the Prometheus query language (i.e. you cannot average or add them). If you
+// need aggregatable quantiles (e.g. you want the 99th percentile latency of all
+// queries served across all instances of a service), consider the Histogram
+// metric type. See the Prometheus documentation for more details.
+//
+// To create Summary instances, use NewSummary.
+type Summary interface {
+ Metric
+ Collector
+
+ // Observe adds a single observation to the summary.
+ Observe(float64)
+}
+
+var (
+ // DefObjectives are the default Summary quantile values.
+ DefObjectives = map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}
+
+ errQuantileLabelNotAllowed = fmt.Errorf(
+ "%q is not allowed as label name in summaries", quantileLabel,
+ )
+)
+
+// Default values for SummaryOpts.
+const (
+ // DefMaxAge is the default duration for which observations stay
+ // relevant.
+ DefMaxAge time.Duration = 10 * time.Minute
+ // DefAgeBuckets is the default number of buckets used to calculate the
+ // age of observations.
+ DefAgeBuckets = 5
+ // DefBufCap is the standard buffer size for collecting Summary observations.
+ DefBufCap = 500
+)
+
+// SummaryOpts bundles the options for creating a Summary metric. It is
+// mandatory to set Name and Help to a non-empty string. All other fields are
+// optional and can safely be left at their zero value.
+type SummaryOpts struct {
+ // Namespace, Subsystem, and Name are components of the fully-qualified
+ // name of the Summary (created by joining these components with
+ // "_"). Only Name is mandatory, the others merely help structuring the
+ // name. Note that the fully-qualified name of the Summary must be a
+ // valid Prometheus metric name.
+ Namespace string
+ Subsystem string
+ Name string
+
+ // Help provides information about this Summary. Mandatory!
+ //
+ // Metrics with the same fully-qualified name must have the same Help
+ // string.
+ Help string
+
+ // ConstLabels are used to attach fixed labels to this
+ // Summary. Summaries with the same fully-qualified name must have the
+ // same label names in their ConstLabels.
+ //
+ // Note that in most cases, labels have a value that varies during the
+ // lifetime of a process. Those labels are usually managed with a
+ // SummaryVec. ConstLabels serve only special purposes. One is for the
+ // special case where the value of a label does not change during the
+ // lifetime of a process, e.g. if the revision of the running binary is
+ // put into a label. Another, more advanced purpose is if more than one
+ // Collector needs to collect Summaries with the same fully-qualified
+ // name. In that case, those Summaries must differ in the values of
+ // their ConstLabels. See the Collector examples.
+ //
+ // If the value of a label never changes (not even between binaries),
+ // that label most likely should not be a label at all (but part of the
+ // metric name).
+ ConstLabels Labels
+
+ // Objectives defines the quantile rank estimates with their respective
+ // absolute error. If Objectives[q] = e, then the value reported
+ // for q will be the φ-quantile value for some φ between q-e and q+e.
+ // The default value is DefObjectives.
+ Objectives map[float64]float64
+
+ // MaxAge defines the duration for which an observation stays relevant
+ // for the summary. Must be positive. The default value is DefMaxAge.
+ MaxAge time.Duration
+
+ // AgeBuckets is the number of buckets used to exclude observations that
+ // are older than MaxAge from the summary. A higher number has a
+ // resource penalty, so only increase it if the higher resolution is
+ // really required. For very high observation rates, you might want to
+ // reduce the number of age buckets. With only one age bucket, you will
+ // effectively see a complete reset of the summary each time MaxAge has
+ // passed. The default value is DefAgeBuckets.
+ AgeBuckets uint32
+
+ // BufCap defines the default sample stream buffer size. The default
+ // value of DefBufCap should suffice for most uses. If there is a need
+ // to increase the value, a multiple of 500 is recommended (because that
+ // is the internal buffer size of the underlying package
+ // "github.com/bmizerany/perks/quantile").
+ BufCap uint32
+}
+
+// TODO: Great fuck-up with the sliding-window decay algorithm... The Merge
+// method of perk/quantile is actually not working as advertised - and it might
+// be unfixable, as the underlying algorithm is apparently not capable of
+// merging summaries in the first place. To avoid using Merge, we are currently
+// adding observations to _each_ age bucket, i.e. the effort to add a sample is
+// essentially multiplied by the number of age buckets. When rotating age
+// buckets, we empty the previous head stream. On scrape time, we simply take
+// the quantiles from the head stream (no merging required). Result: More effort
+// on observation time, less effort on scrape time, which is exactly the
+// opposite of what we try to accomplish, but at least the results are correct.
+//
+// The quite elegant previous contraption to merge the age buckets efficiently
+// on scrape time (see code up commit 6b9530d72ea715f0ba612c0120e6e09fbf1d49d0)
+// can't be used anymore.
+
+// NewSummary creates a new Summary based on the provided SummaryOpts.
+func NewSummary(opts SummaryOpts) Summary {
+ return newSummary(
+ NewDesc(
+ BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+ opts.Help,
+ nil,
+ opts.ConstLabels,
+ ),
+ opts,
+ )
+}
+
+func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary {
+ if len(desc.variableLabels) != len(labelValues) {
+ panic(errInconsistentCardinality)
+ }
+
+ for _, n := range desc.variableLabels {
+ if n == quantileLabel {
+ panic(errQuantileLabelNotAllowed)
+ }
+ }
+ for _, lp := range desc.constLabelPairs {
+ if lp.GetName() == quantileLabel {
+ panic(errQuantileLabelNotAllowed)
+ }
+ }
+
+ if len(opts.Objectives) == 0 {
+ opts.Objectives = DefObjectives
+ }
+
+ if opts.MaxAge < 0 {
+ panic(fmt.Errorf("illegal max age MaxAge=%v", opts.MaxAge))
+ }
+ if opts.MaxAge == 0 {
+ opts.MaxAge = DefMaxAge
+ }
+
+ if opts.AgeBuckets == 0 {
+ opts.AgeBuckets = DefAgeBuckets
+ }
+
+ if opts.BufCap == 0 {
+ opts.BufCap = DefBufCap
+ }
+
+ s := &summary{
+ desc: desc,
+
+ objectives: opts.Objectives,
+ sortedObjectives: make([]float64, 0, len(opts.Objectives)),
+
+ labelPairs: makeLabelPairs(desc, labelValues),
+
+ hotBuf: make([]float64, 0, opts.BufCap),
+ coldBuf: make([]float64, 0, opts.BufCap),
+ streamDuration: opts.MaxAge / time.Duration(opts.AgeBuckets),
+ }
+ s.headStreamExpTime = time.Now().Add(s.streamDuration)
+ s.hotBufExpTime = s.headStreamExpTime
+
+ for i := uint32(0); i < opts.AgeBuckets; i++ {
+ s.streams = append(s.streams, s.newStream())
+ }
+ s.headStream = s.streams[0]
+
+ for qu := range s.objectives {
+ s.sortedObjectives = append(s.sortedObjectives, qu)
+ }
+ sort.Float64s(s.sortedObjectives)
+
+ s.Init(s) // Init self-collection.
+ return s
+}
+
+type summary struct {
+ SelfCollector
+
+ bufMtx sync.Mutex // Protects hotBuf and hotBufExpTime.
+ mtx sync.Mutex // Protects every other moving part.
+ // Lock bufMtx before mtx if both are needed.
+
+ desc *Desc
+
+ objectives map[float64]float64
+ sortedObjectives []float64
+
+ labelPairs []*dto.LabelPair
+
+ sum float64
+ cnt uint64
+
+ hotBuf, coldBuf []float64
+
+ streams []*quantile.Stream
+ streamDuration time.Duration
+ headStream *quantile.Stream
+ headStreamIdx int
+ headStreamExpTime, hotBufExpTime time.Time
+}
+
+func (s *summary) Desc() *Desc {
+ return s.desc
+}
+
+func (s *summary) Observe(v float64) {
+ s.bufMtx.Lock()
+ defer s.bufMtx.Unlock()
+
+ now := time.Now()
+ if now.After(s.hotBufExpTime) {
+ s.asyncFlush(now)
+ }
+ s.hotBuf = append(s.hotBuf, v)
+ if len(s.hotBuf) == cap(s.hotBuf) {
+ s.asyncFlush(now)
+ }
+}
+
+func (s *summary) Write(out *dto.Metric) error {
+ sum := &dto.Summary{}
+ qs := make([]*dto.Quantile, 0, len(s.objectives))
+
+ s.bufMtx.Lock()
+ s.mtx.Lock()
+ // Swap bufs even if hotBuf is empty to set new hotBufExpTime.
+ s.swapBufs(time.Now())
+ s.bufMtx.Unlock()
+
+ s.flushColdBuf()
+ sum.SampleCount = proto.Uint64(s.cnt)
+ sum.SampleSum = proto.Float64(s.sum)
+
+ for _, rank := range s.sortedObjectives {
+ var q float64
+ if s.headStream.Count() == 0 {
+ q = math.NaN()
+ } else {
+ q = s.headStream.Query(rank)
+ }
+ qs = append(qs, &dto.Quantile{
+ Quantile: proto.Float64(rank),
+ Value: proto.Float64(q),
+ })
+ }
+
+ s.mtx.Unlock()
+
+ if len(qs) > 0 {
+ sort.Sort(quantSort(qs))
+ }
+ sum.Quantile = qs
+
+ out.Summary = sum
+ out.Label = s.labelPairs
+ return nil
+}
+
+func (s *summary) newStream() *quantile.Stream {
+ return quantile.NewTargeted(s.objectives)
+}
+
+// asyncFlush needs bufMtx locked.
+func (s *summary) asyncFlush(now time.Time) {
+ s.mtx.Lock()
+ s.swapBufs(now)
+
+ // Unblock the original goroutine that was responsible for the mutation
+ // that triggered the compaction. But hold onto the global non-buffer
+ // state mutex until the operation finishes.
+ go func() {
+ s.flushColdBuf()
+ s.mtx.Unlock()
+ }()
+}
+
+// rotateStreams needs mtx AND bufMtx locked.
+func (s *summary) maybeRotateStreams() {
+ for !s.hotBufExpTime.Equal(s.headStreamExpTime) {
+ s.headStream.Reset()
+ s.headStreamIdx++
+ if s.headStreamIdx >= len(s.streams) {
+ s.headStreamIdx = 0
+ }
+ s.headStream = s.streams[s.headStreamIdx]
+ s.headStreamExpTime = s.headStreamExpTime.Add(s.streamDuration)
+ }
+}
+
+// flushColdBuf needs mtx locked.
+func (s *summary) flushColdBuf() {
+ for _, v := range s.coldBuf {
+ for _, stream := range s.streams {
+ stream.Insert(v)
+ }
+ s.cnt++
+ s.sum += v
+ }
+ s.coldBuf = s.coldBuf[0:0]
+ s.maybeRotateStreams()
+}
+
+// swapBufs needs mtx AND bufMtx locked, coldBuf must be empty.
+func (s *summary) swapBufs(now time.Time) {
+ if len(s.coldBuf) != 0 {
+ panic("coldBuf is not empty")
+ }
+ s.hotBuf, s.coldBuf = s.coldBuf, s.hotBuf
+ // hotBuf is now empty and gets new expiration set.
+ for now.After(s.hotBufExpTime) {
+ s.hotBufExpTime = s.hotBufExpTime.Add(s.streamDuration)
+ }
+}
+
+type quantSort []*dto.Quantile
+
+func (s quantSort) Len() int {
+ return len(s)
+}
+
+func (s quantSort) Swap(i, j int) {
+ s[i], s[j] = s[j], s[i]
+}
+
+func (s quantSort) Less(i, j int) bool {
+ return s[i].GetQuantile() < s[j].GetQuantile()
+}
+
+// SummaryVec is a Collector that bundles a set of Summaries that all share the
+// same Desc, but have different values for their variable labels. This is used
+// if you want to count the same thing partitioned by various dimensions
+// (e.g. HTTP request latencies, partitioned by status code and method). Create
+// instances with NewSummaryVec.
+type SummaryVec struct {
+ MetricVec
+}
+
+// NewSummaryVec creates a new SummaryVec based on the provided SummaryOpts and
+// partitioned by the given label names. At least one label name must be
+// provided.
+func NewSummaryVec(opts SummaryOpts, labelNames []string) *SummaryVec {
+ desc := NewDesc(
+ BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+ opts.Help,
+ labelNames,
+ opts.ConstLabels,
+ )
+ return &SummaryVec{
+ MetricVec: MetricVec{
+ children: map[uint64]Metric{},
+ desc: desc,
+ hash: fnv.New64a(),
+ newMetric: func(lvs ...string) Metric {
+ return newSummary(desc, opts, lvs...)
+ },
+ },
+ }
+}
+
+// GetMetricWithLabelValues replaces the method of the same name in
+// MetricVec. The difference is that this method returns a Summary and not a
+// Metric so that no type conversion is required.
+func (m *SummaryVec) GetMetricWithLabelValues(lvs ...string) (Summary, error) {
+ metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...)
+ if metric != nil {
+ return metric.(Summary), err
+ }
+ return nil, err
+}
+
+// GetMetricWith replaces the method of the same name in MetricVec. The
+// difference is that this method returns a Summary and not a Metric so that no
+// type conversion is required.
+func (m *SummaryVec) GetMetricWith(labels Labels) (Summary, error) {
+ metric, err := m.MetricVec.GetMetricWith(labels)
+ if metric != nil {
+ return metric.(Summary), err
+ }
+ return nil, err
+}
+
+// WithLabelValues works as GetMetricWithLabelValues, but panics where
+// GetMetricWithLabelValues would have returned an error. By not returning an
+// error, WithLabelValues allows shortcuts like
+// myVec.WithLabelValues("404", "GET").Observe(42.21)
+func (m *SummaryVec) WithLabelValues(lvs ...string) Summary {
+ return m.MetricVec.WithLabelValues(lvs...).(Summary)
+}
+
+// With works as GetMetricWith, but panics where GetMetricWithLabels would have
+// returned an error. By not returning an error, With allows shortcuts like
+// myVec.With(Labels{"code": "404", "method": "GET"}).Observe(42.21)
+func (m *SummaryVec) With(labels Labels) Summary {
+ return m.MetricVec.With(labels).(Summary)
+}
+
+type constSummary struct {
+ desc *Desc
+ count uint64
+ sum float64
+ quantiles map[float64]float64
+ labelPairs []*dto.LabelPair
+}
+
+func (s *constSummary) Desc() *Desc {
+ return s.desc
+}
+
+func (s *constSummary) Write(out *dto.Metric) error {
+ sum := &dto.Summary{}
+ qs := make([]*dto.Quantile, 0, len(s.quantiles))
+
+ sum.SampleCount = proto.Uint64(s.count)
+ sum.SampleSum = proto.Float64(s.sum)
+
+ for rank, q := range s.quantiles {
+ qs = append(qs, &dto.Quantile{
+ Quantile: proto.Float64(rank),
+ Value: proto.Float64(q),
+ })
+ }
+
+ if len(qs) > 0 {
+ sort.Sort(quantSort(qs))
+ }
+ sum.Quantile = qs
+
+ out.Summary = sum
+ out.Label = s.labelPairs
+
+ return nil
+}
+
+// NewConstSummary returns a metric representing a Prometheus summary with fixed
+// values for the count, sum, and quantiles. As those parameters cannot be
+// changed, the returned value does not implement the Summary interface (but
+// only the Metric interface). Users of this package will not have much use for
+// it in regular operations. However, when implementing custom Collectors, it is
+// useful as a throw-away metric that is generated on the fly to send it to
+// Prometheus in the Collect method.
+//
+// quantiles maps ranks to quantile values. For example, a median latency of
+// 0.23s and a 99th percentile latency of 0.56s would be expressed as:
+// map[float64]float64{0.5: 0.23, 0.99: 0.56}
+//
+// NewConstSummary returns an error if the length of labelValues is not
+// consistent with the variable labels in Desc.
+func NewConstSummary(
+ desc *Desc,
+ count uint64,
+ sum float64,
+ quantiles map[float64]float64,
+ labelValues ...string,
+) (Metric, error) {
+ if len(desc.variableLabels) != len(labelValues) {
+ return nil, errInconsistentCardinality
+ }
+ return &constSummary{
+ desc: desc,
+ count: count,
+ sum: sum,
+ quantiles: quantiles,
+ labelPairs: makeLabelPairs(desc, labelValues),
+ }, nil
+}
+
+// MustNewConstSummary is a version of NewConstSummary that panics where
+// NewConstMetric would have returned an error.
+func MustNewConstSummary(
+ desc *Desc,
+ count uint64,
+ sum float64,
+ quantiles map[float64]float64,
+ labelValues ...string,
+) Metric {
+ m, err := NewConstSummary(desc, count, sum, quantiles, labelValues...)
+ if err != nil {
+ panic(err)
+ }
+ return m
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/untyped.go b/vendor/github.com/prometheus/client_golang/prometheus/untyped.go
new file mode 100644
index 0000000000..c65ab1c531
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/untyped.go
@@ -0,0 +1,145 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import "hash/fnv"
+
+// Untyped is a Metric that represents a single numerical value that can
+// arbitrarily go up and down.
+//
+// An Untyped metric works the same as a Gauge. The only difference is that to
+// no type information is implied.
+//
+// To create Untyped instances, use NewUntyped.
+type Untyped interface {
+ Metric
+ Collector
+
+ // Set sets the Untyped metric to an arbitrary value.
+ Set(float64)
+ // Inc increments the Untyped metric by 1.
+ Inc()
+ // Dec decrements the Untyped metric by 1.
+ Dec()
+ // Add adds the given value to the Untyped metric. (The value can be
+ // negative, resulting in a decrease.)
+ Add(float64)
+ // Sub subtracts the given value from the Untyped metric. (The value can
+ // be negative, resulting in an increase.)
+ Sub(float64)
+}
+
+// UntypedOpts is an alias for Opts. See there for doc comments.
+type UntypedOpts Opts
+
+// NewUntyped creates a new Untyped metric from the provided UntypedOpts.
+func NewUntyped(opts UntypedOpts) Untyped {
+ return newValue(NewDesc(
+ BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+ opts.Help,
+ nil,
+ opts.ConstLabels,
+ ), UntypedValue, 0)
+}
+
+// UntypedVec is a Collector that bundles a set of Untyped metrics that all
+// share the same Desc, but have different values for their variable
+// labels. This is used if you want to count the same thing partitioned by
+// various dimensions. Create instances with NewUntypedVec.
+type UntypedVec struct {
+ MetricVec
+}
+
+// NewUntypedVec creates a new UntypedVec based on the provided UntypedOpts and
+// partitioned by the given label names. At least one label name must be
+// provided.
+func NewUntypedVec(opts UntypedOpts, labelNames []string) *UntypedVec {
+ desc := NewDesc(
+ BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+ opts.Help,
+ labelNames,
+ opts.ConstLabels,
+ )
+ return &UntypedVec{
+ MetricVec: MetricVec{
+ children: map[uint64]Metric{},
+ desc: desc,
+ hash: fnv.New64a(),
+ newMetric: func(lvs ...string) Metric {
+ return newValue(desc, UntypedValue, 0, lvs...)
+ },
+ },
+ }
+}
+
+// GetMetricWithLabelValues replaces the method of the same name in
+// MetricVec. The difference is that this method returns an Untyped and not a
+// Metric so that no type conversion is required.
+func (m *UntypedVec) GetMetricWithLabelValues(lvs ...string) (Untyped, error) {
+ metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...)
+ if metric != nil {
+ return metric.(Untyped), err
+ }
+ return nil, err
+}
+
+// GetMetricWith replaces the method of the same name in MetricVec. The
+// difference is that this method returns an Untyped and not a Metric so that no
+// type conversion is required.
+func (m *UntypedVec) GetMetricWith(labels Labels) (Untyped, error) {
+ metric, err := m.MetricVec.GetMetricWith(labels)
+ if metric != nil {
+ return metric.(Untyped), err
+ }
+ return nil, err
+}
+
+// WithLabelValues works as GetMetricWithLabelValues, but panics where
+// GetMetricWithLabelValues would have returned an error. By not returning an
+// error, WithLabelValues allows shortcuts like
+// myVec.WithLabelValues("404", "GET").Add(42)
+func (m *UntypedVec) WithLabelValues(lvs ...string) Untyped {
+ return m.MetricVec.WithLabelValues(lvs...).(Untyped)
+}
+
+// With works as GetMetricWith, but panics where GetMetricWithLabels would have
+// returned an error. By not returning an error, With allows shortcuts like
+// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42)
+func (m *UntypedVec) With(labels Labels) Untyped {
+ return m.MetricVec.With(labels).(Untyped)
+}
+
+// UntypedFunc is an Untyped whose value is determined at collect time by
+// calling a provided function.
+//
+// To create UntypedFunc instances, use NewUntypedFunc.
+type UntypedFunc interface {
+ Metric
+ Collector
+}
+
+// NewUntypedFunc creates a new UntypedFunc based on the provided
+// UntypedOpts. The value reported is determined by calling the given function
+// from within the Write method. Take into account that metric collection may
+// happen concurrently. If that results in concurrent calls to Write, like in
+// the case where an UntypedFunc is directly registered with Prometheus, the
+// provided function must be concurrency-safe.
+func NewUntypedFunc(opts UntypedOpts, function func() float64) UntypedFunc {
+ return newValueFunc(NewDesc(
+ BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+ opts.Help,
+ nil,
+ opts.ConstLabels,
+ ), UntypedValue, function)
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/value.go b/vendor/github.com/prometheus/client_golang/prometheus/value.go
new file mode 100644
index 0000000000..b54ac11e88
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/value.go
@@ -0,0 +1,234 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+ "errors"
+ "fmt"
+ "math"
+ "sort"
+ "sync/atomic"
+
+ dto "github.com/prometheus/client_model/go"
+
+ "github.com/golang/protobuf/proto"
+)
+
+// ValueType is an enumeration of metric types that represent a simple value.
+type ValueType int
+
+// Possible values for the ValueType enum.
+const (
+ _ ValueType = iota
+ CounterValue
+ GaugeValue
+ UntypedValue
+)
+
+var errInconsistentCardinality = errors.New("inconsistent label cardinality")
+
+// value is a generic metric for simple values. It implements Metric, Collector,
+// Counter, Gauge, and Untyped. Its effective type is determined by
+// ValueType. This is a low-level building block used by the library to back the
+// implementations of Counter, Gauge, and Untyped.
+type value struct {
+ // valBits containst the bits of the represented float64 value. It has
+ // to go first in the struct to guarantee alignment for atomic
+ // operations. http://golang.org/pkg/sync/atomic/#pkg-note-BUG
+ valBits uint64
+
+ SelfCollector
+
+ desc *Desc
+ valType ValueType
+ labelPairs []*dto.LabelPair
+}
+
+// newValue returns a newly allocated value with the given Desc, ValueType,
+// sample value and label values. It panics if the number of label
+// values is different from the number of variable labels in Desc.
+func newValue(desc *Desc, valueType ValueType, val float64, labelValues ...string) *value {
+ if len(labelValues) != len(desc.variableLabels) {
+ panic(errInconsistentCardinality)
+ }
+ result := &value{
+ desc: desc,
+ valType: valueType,
+ valBits: math.Float64bits(val),
+ labelPairs: makeLabelPairs(desc, labelValues),
+ }
+ result.Init(result)
+ return result
+}
+
+func (v *value) Desc() *Desc {
+ return v.desc
+}
+
+func (v *value) Set(val float64) {
+ atomic.StoreUint64(&v.valBits, math.Float64bits(val))
+}
+
+func (v *value) Inc() {
+ v.Add(1)
+}
+
+func (v *value) Dec() {
+ v.Add(-1)
+}
+
+func (v *value) Add(val float64) {
+ for {
+ oldBits := atomic.LoadUint64(&v.valBits)
+ newBits := math.Float64bits(math.Float64frombits(oldBits) + val)
+ if atomic.CompareAndSwapUint64(&v.valBits, oldBits, newBits) {
+ return
+ }
+ }
+}
+
+func (v *value) Sub(val float64) {
+ v.Add(val * -1)
+}
+
+func (v *value) Write(out *dto.Metric) error {
+ val := math.Float64frombits(atomic.LoadUint64(&v.valBits))
+ return populateMetric(v.valType, val, v.labelPairs, out)
+}
+
+// valueFunc is a generic metric for simple values retrieved on collect time
+// from a function. It implements Metric and Collector. Its effective type is
+// determined by ValueType. This is a low-level building block used by the
+// library to back the implementations of CounterFunc, GaugeFunc, and
+// UntypedFunc.
+type valueFunc struct {
+ SelfCollector
+
+ desc *Desc
+ valType ValueType
+ function func() float64
+ labelPairs []*dto.LabelPair
+}
+
+// newValueFunc returns a newly allocated valueFunc with the given Desc and
+// ValueType. The value reported is determined by calling the given function
+// from within the Write method. Take into account that metric collection may
+// happen concurrently. If that results in concurrent calls to Write, like in
+// the case where a valueFunc is directly registered with Prometheus, the
+// provided function must be concurrency-safe.
+func newValueFunc(desc *Desc, valueType ValueType, function func() float64) *valueFunc {
+ result := &valueFunc{
+ desc: desc,
+ valType: valueType,
+ function: function,
+ labelPairs: makeLabelPairs(desc, nil),
+ }
+ result.Init(result)
+ return result
+}
+
+func (v *valueFunc) Desc() *Desc {
+ return v.desc
+}
+
+func (v *valueFunc) Write(out *dto.Metric) error {
+ return populateMetric(v.valType, v.function(), v.labelPairs, out)
+}
+
+// NewConstMetric returns a metric with one fixed value that cannot be
+// changed. Users of this package will not have much use for it in regular
+// operations. However, when implementing custom Collectors, it is useful as a
+// throw-away metric that is generated on the fly to send it to Prometheus in
+// the Collect method. NewConstMetric returns an error if the length of
+// labelValues is not consistent with the variable labels in Desc.
+func NewConstMetric(desc *Desc, valueType ValueType, value float64, labelValues ...string) (Metric, error) {
+ if len(desc.variableLabels) != len(labelValues) {
+ return nil, errInconsistentCardinality
+ }
+ return &constMetric{
+ desc: desc,
+ valType: valueType,
+ val: value,
+ labelPairs: makeLabelPairs(desc, labelValues),
+ }, nil
+}
+
+// MustNewConstMetric is a version of NewConstMetric that panics where
+// NewConstMetric would have returned an error.
+func MustNewConstMetric(desc *Desc, valueType ValueType, value float64, labelValues ...string) Metric {
+ m, err := NewConstMetric(desc, valueType, value, labelValues...)
+ if err != nil {
+ panic(err)
+ }
+ return m
+}
+
+type constMetric struct {
+ desc *Desc
+ valType ValueType
+ val float64
+ labelPairs []*dto.LabelPair
+}
+
+func (m *constMetric) Desc() *Desc {
+ return m.desc
+}
+
+func (m *constMetric) Write(out *dto.Metric) error {
+ return populateMetric(m.valType, m.val, m.labelPairs, out)
+}
+
+func populateMetric(
+ t ValueType,
+ v float64,
+ labelPairs []*dto.LabelPair,
+ m *dto.Metric,
+) error {
+ m.Label = labelPairs
+ switch t {
+ case CounterValue:
+ m.Counter = &dto.Counter{Value: proto.Float64(v)}
+ case GaugeValue:
+ m.Gauge = &dto.Gauge{Value: proto.Float64(v)}
+ case UntypedValue:
+ m.Untyped = &dto.Untyped{Value: proto.Float64(v)}
+ default:
+ return fmt.Errorf("encountered unknown type %v", t)
+ }
+ return nil
+}
+
+func makeLabelPairs(desc *Desc, labelValues []string) []*dto.LabelPair {
+ totalLen := len(desc.variableLabels) + len(desc.constLabelPairs)
+ if totalLen == 0 {
+ // Super fast path.
+ return nil
+ }
+ if len(desc.variableLabels) == 0 {
+ // Moderately fast path.
+ return desc.constLabelPairs
+ }
+ labelPairs := make([]*dto.LabelPair, 0, totalLen)
+ for i, n := range desc.variableLabels {
+ labelPairs = append(labelPairs, &dto.LabelPair{
+ Name: proto.String(n),
+ Value: proto.String(labelValues[i]),
+ })
+ }
+ for _, lp := range desc.constLabelPairs {
+ labelPairs = append(labelPairs, lp)
+ }
+ sort.Sort(LabelPairSorter(labelPairs))
+ return labelPairs
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/vec.go b/vendor/github.com/prometheus/client_golang/prometheus/vec.go
new file mode 100644
index 0000000000..a1f3bdf37d
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/vec.go
@@ -0,0 +1,247 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+ "bytes"
+ "fmt"
+ "hash"
+ "sync"
+)
+
+// MetricVec is a Collector to bundle metrics of the same name that
+// differ in their label values. MetricVec is usually not used directly but as a
+// building block for implementations of vectors of a given metric
+// type. GaugeVec, CounterVec, SummaryVec, and UntypedVec are examples already
+// provided in this package.
+type MetricVec struct {
+ mtx sync.RWMutex // Protects not only children, but also hash and buf.
+ children map[uint64]Metric
+ desc *Desc
+
+ // hash is our own hash instance to avoid repeated allocations.
+ hash hash.Hash64
+ // buf is used to copy string contents into it for hashing,
+ // again to avoid allocations.
+ buf bytes.Buffer
+
+ newMetric func(labelValues ...string) Metric
+}
+
+// Describe implements Collector. The length of the returned slice
+// is always one.
+func (m *MetricVec) Describe(ch chan<- *Desc) {
+ ch <- m.desc
+}
+
+// Collect implements Collector.
+func (m *MetricVec) Collect(ch chan<- Metric) {
+ m.mtx.RLock()
+ defer m.mtx.RUnlock()
+
+ for _, metric := range m.children {
+ ch <- metric
+ }
+}
+
+// GetMetricWithLabelValues returns the Metric for the given slice of label
+// values (same order as the VariableLabels in Desc). If that combination of
+// label values is accessed for the first time, a new Metric is created.
+//
+// It is possible to call this method without using the returned Metric to only
+// create the new Metric but leave it at its start value (e.g. a Summary or
+// Histogram without any observations). See also the SummaryVec example.
+//
+// Keeping the Metric for later use is possible (and should be considered if
+// performance is critical), but keep in mind that Reset, DeleteLabelValues and
+// Delete can be used to delete the Metric from the MetricVec. In that case, the
+// Metric will still exist, but it will not be exported anymore, even if a
+// Metric with the same label values is created later. See also the CounterVec
+// example.
+//
+// An error is returned if the number of label values is not the same as the
+// number of VariableLabels in Desc.
+//
+// Note that for more than one label value, this method is prone to mistakes
+// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as
+// an alternative to avoid that type of mistake. For higher label numbers, the
+// latter has a much more readable (albeit more verbose) syntax, but it comes
+// with a performance overhead (for creating and processing the Labels map).
+// See also the GaugeVec example.
+func (m *MetricVec) GetMetricWithLabelValues(lvs ...string) (Metric, error) {
+ m.mtx.Lock()
+ defer m.mtx.Unlock()
+
+ h, err := m.hashLabelValues(lvs)
+ if err != nil {
+ return nil, err
+ }
+ return m.getOrCreateMetric(h, lvs...), nil
+}
+
+// GetMetricWith returns the Metric for the given Labels map (the label names
+// must match those of the VariableLabels in Desc). If that label map is
+// accessed for the first time, a new Metric is created. Implications of
+// creating a Metric without using it and keeping the Metric for later use are
+// the same as for GetMetricWithLabelValues.
+//
+// An error is returned if the number and names of the Labels are inconsistent
+// with those of the VariableLabels in Desc.
+//
+// This method is used for the same purpose as
+// GetMetricWithLabelValues(...string). See there for pros and cons of the two
+// methods.
+func (m *MetricVec) GetMetricWith(labels Labels) (Metric, error) {
+ m.mtx.Lock()
+ defer m.mtx.Unlock()
+
+ h, err := m.hashLabels(labels)
+ if err != nil {
+ return nil, err
+ }
+ lvs := make([]string, len(labels))
+ for i, label := range m.desc.variableLabels {
+ lvs[i] = labels[label]
+ }
+ return m.getOrCreateMetric(h, lvs...), nil
+}
+
+// WithLabelValues works as GetMetricWithLabelValues, but panics if an error
+// occurs. The method allows neat syntax like:
+// httpReqs.WithLabelValues("404", "POST").Inc()
+func (m *MetricVec) WithLabelValues(lvs ...string) Metric {
+ metric, err := m.GetMetricWithLabelValues(lvs...)
+ if err != nil {
+ panic(err)
+ }
+ return metric
+}
+
+// With works as GetMetricWith, but panics if an error occurs. The method allows
+// neat syntax like:
+// httpReqs.With(Labels{"status":"404", "method":"POST"}).Inc()
+func (m *MetricVec) With(labels Labels) Metric {
+ metric, err := m.GetMetricWith(labels)
+ if err != nil {
+ panic(err)
+ }
+ return metric
+}
+
+// DeleteLabelValues removes the metric where the variable labels are the same
+// as those passed in as labels (same order as the VariableLabels in Desc). It
+// returns true if a metric was deleted.
+//
+// It is not an error if the number of label values is not the same as the
+// number of VariableLabels in Desc. However, such inconsistent label count can
+// never match an actual Metric, so the method will always return false in that
+// case.
+//
+// Note that for more than one label value, this method is prone to mistakes
+// caused by an incorrect order of arguments. Consider Delete(Labels) as an
+// alternative to avoid that type of mistake. For higher label numbers, the
+// latter has a much more readable (albeit more verbose) syntax, but it comes
+// with a performance overhead (for creating and processing the Labels map).
+// See also the CounterVec example.
+func (m *MetricVec) DeleteLabelValues(lvs ...string) bool {
+ m.mtx.Lock()
+ defer m.mtx.Unlock()
+
+ h, err := m.hashLabelValues(lvs)
+ if err != nil {
+ return false
+ }
+ if _, has := m.children[h]; !has {
+ return false
+ }
+ delete(m.children, h)
+ return true
+}
+
+// Delete deletes the metric where the variable labels are the same as those
+// passed in as labels. It returns true if a metric was deleted.
+//
+// It is not an error if the number and names of the Labels are inconsistent
+// with those of the VariableLabels in the Desc of the MetricVec. However, such
+// inconsistent Labels can never match an actual Metric, so the method will
+// always return false in that case.
+//
+// This method is used for the same purpose as DeleteLabelValues(...string). See
+// there for pros and cons of the two methods.
+func (m *MetricVec) Delete(labels Labels) bool {
+ m.mtx.Lock()
+ defer m.mtx.Unlock()
+
+ h, err := m.hashLabels(labels)
+ if err != nil {
+ return false
+ }
+ if _, has := m.children[h]; !has {
+ return false
+ }
+ delete(m.children, h)
+ return true
+}
+
+// Reset deletes all metrics in this vector.
+func (m *MetricVec) Reset() {
+ m.mtx.Lock()
+ defer m.mtx.Unlock()
+
+ for h := range m.children {
+ delete(m.children, h)
+ }
+}
+
+func (m *MetricVec) hashLabelValues(vals []string) (uint64, error) {
+ if len(vals) != len(m.desc.variableLabels) {
+ return 0, errInconsistentCardinality
+ }
+ m.hash.Reset()
+ for _, val := range vals {
+ m.buf.Reset()
+ m.buf.WriteString(val)
+ m.hash.Write(m.buf.Bytes())
+ }
+ return m.hash.Sum64(), nil
+}
+
+func (m *MetricVec) hashLabels(labels Labels) (uint64, error) {
+ if len(labels) != len(m.desc.variableLabels) {
+ return 0, errInconsistentCardinality
+ }
+ m.hash.Reset()
+ for _, label := range m.desc.variableLabels {
+ val, ok := labels[label]
+ if !ok {
+ return 0, fmt.Errorf("label name %q missing in label map", label)
+ }
+ m.buf.Reset()
+ m.buf.WriteString(val)
+ m.hash.Write(m.buf.Bytes())
+ }
+ return m.hash.Sum64(), nil
+}
+
+func (m *MetricVec) getOrCreateMetric(hash uint64, labelValues ...string) Metric {
+ metric, ok := m.children[hash]
+ if !ok {
+ // Copy labelValues. Otherwise, they would be allocated even if we don't go
+ // down this code path.
+ copiedLabelValues := append(make([]string, 0, len(labelValues)), labelValues...)
+ metric = m.newMetric(copiedLabelValues...)
+ m.children[hash] = metric
+ }
+ return metric
+}
diff --git a/vendor/github.com/prometheus/client_golang/text/create.go b/vendor/github.com/prometheus/client_golang/text/create.go
new file mode 100644
index 0000000000..d1f7bc77df
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/text/create.go
@@ -0,0 +1,314 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package text contains helper functions to parse and create text-based
+// exchange formats. The package currently supports (only) version 0.0.4 of the
+// exchange format. Should other versions be supported in the future, some
+// versioning scheme has to be applied. Possibilities include separate packages
+// or separate functions. The best way depends on the nature of future changes,
+// which is the reason why no versioning scheme has been applied prematurely
+// here.
+package text
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "math"
+ "strings"
+
+ dto "github.com/prometheus/client_model/go"
+)
+
+// MetricFamilyToText converts a MetricFamily proto message into text format and
+// writes the resulting lines to 'out'. It returns the number of bytes written
+// and any error encountered. This function does not perform checks on the
+// content of the metric and label names, i.e. invalid metric or label names
+// will result in invalid text format output.
+// This method fulfills the type 'prometheus.encoder'.
+func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (int, error) {
+ var written int
+
+ // Fail-fast checks.
+ if len(in.Metric) == 0 {
+ return written, fmt.Errorf("MetricFamily has no metrics: %s", in)
+ }
+ name := in.GetName()
+ if name == "" {
+ return written, fmt.Errorf("MetricFamily has no name: %s", in)
+ }
+ if in.Type == nil {
+ return written, fmt.Errorf("MetricFamily has no type: %s", in)
+ }
+
+ // Comments, first HELP, then TYPE.
+ if in.Help != nil {
+ n, err := fmt.Fprintf(
+ out, "# HELP %s %s\n",
+ name, escapeString(*in.Help, false),
+ )
+ written += n
+ if err != nil {
+ return written, err
+ }
+ }
+ metricType := in.GetType()
+ n, err := fmt.Fprintf(
+ out, "# TYPE %s %s\n",
+ name, strings.ToLower(metricType.String()),
+ )
+ written += n
+ if err != nil {
+ return written, err
+ }
+
+ // Finally the samples, one line for each.
+ for _, metric := range in.Metric {
+ switch metricType {
+ case dto.MetricType_COUNTER:
+ if metric.Counter == nil {
+ return written, fmt.Errorf(
+ "expected counter in metric %s %s", name, metric,
+ )
+ }
+ n, err = writeSample(
+ name, metric, "", "",
+ metric.Counter.GetValue(),
+ out,
+ )
+ case dto.MetricType_GAUGE:
+ if metric.Gauge == nil {
+ return written, fmt.Errorf(
+ "expected gauge in metric %s %s", name, metric,
+ )
+ }
+ n, err = writeSample(
+ name, metric, "", "",
+ metric.Gauge.GetValue(),
+ out,
+ )
+ case dto.MetricType_UNTYPED:
+ if metric.Untyped == nil {
+ return written, fmt.Errorf(
+ "expected untyped in metric %s %s", name, metric,
+ )
+ }
+ n, err = writeSample(
+ name, metric, "", "",
+ metric.Untyped.GetValue(),
+ out,
+ )
+ case dto.MetricType_SUMMARY:
+ if metric.Summary == nil {
+ return written, fmt.Errorf(
+ "expected summary in metric %s %s", name, metric,
+ )
+ }
+ for _, q := range metric.Summary.Quantile {
+ n, err = writeSample(
+ name, metric,
+ quantileLabel, fmt.Sprint(q.GetQuantile()),
+ q.GetValue(),
+ out,
+ )
+ written += n
+ if err != nil {
+ return written, err
+ }
+ }
+ n, err = writeSample(
+ name+"_sum", metric, "", "",
+ metric.Summary.GetSampleSum(),
+ out,
+ )
+ if err != nil {
+ return written, err
+ }
+ written += n
+ n, err = writeSample(
+ name+"_count", metric, "", "",
+ float64(metric.Summary.GetSampleCount()),
+ out,
+ )
+ case dto.MetricType_HISTOGRAM:
+ if metric.Histogram == nil {
+ return written, fmt.Errorf(
+ "expected histogram in metric %s %s", name, metric,
+ )
+ }
+ infSeen := false
+ for _, q := range metric.Histogram.Bucket {
+ n, err = writeSample(
+ name+"_bucket", metric,
+ bucketLabel, fmt.Sprint(q.GetUpperBound()),
+ float64(q.GetCumulativeCount()),
+ out,
+ )
+ written += n
+ if err != nil {
+ return written, err
+ }
+ if math.IsInf(q.GetUpperBound(), +1) {
+ infSeen = true
+ }
+ }
+ if !infSeen {
+ n, err = writeSample(
+ name+"_bucket", metric,
+ bucketLabel, "+Inf",
+ float64(metric.Histogram.GetSampleCount()),
+ out,
+ )
+ if err != nil {
+ return written, err
+ }
+ written += n
+ }
+ n, err = writeSample(
+ name+"_sum", metric, "", "",
+ metric.Histogram.GetSampleSum(),
+ out,
+ )
+ if err != nil {
+ return written, err
+ }
+ written += n
+ n, err = writeSample(
+ name+"_count", metric, "", "",
+ float64(metric.Histogram.GetSampleCount()),
+ out,
+ )
+ default:
+ return written, fmt.Errorf(
+ "unexpected type in metric %s %s", name, metric,
+ )
+ }
+ written += n
+ if err != nil {
+ return written, err
+ }
+ }
+ return written, nil
+}
+
+// writeSample writes a single sample in text format to out, given the metric
+// name, the metric proto message itself, optionally an additional label name
+// and value (use empty strings if not required), and the value. The function
+// returns the number of bytes written and any error encountered.
+func writeSample(
+ name string,
+ metric *dto.Metric,
+ additionalLabelName, additionalLabelValue string,
+ value float64,
+ out io.Writer,
+) (int, error) {
+ var written int
+ n, err := fmt.Fprint(out, name)
+ written += n
+ if err != nil {
+ return written, err
+ }
+ n, err = labelPairsToText(
+ metric.Label,
+ additionalLabelName, additionalLabelValue,
+ out,
+ )
+ written += n
+ if err != nil {
+ return written, err
+ }
+ n, err = fmt.Fprintf(out, " %v", value)
+ written += n
+ if err != nil {
+ return written, err
+ }
+ if metric.TimestampMs != nil {
+ n, err = fmt.Fprintf(out, " %v", *metric.TimestampMs)
+ written += n
+ if err != nil {
+ return written, err
+ }
+ }
+ n, err = out.Write([]byte{'\n'})
+ written += n
+ if err != nil {
+ return written, err
+ }
+ return written, nil
+}
+
+// labelPairsToText converts a slice of LabelPair proto messages plus the
+// explicitly given additional label pair into text formatted as required by the
+// text format and writes it to 'out'. An empty slice in combination with an
+// empty string 'additionalLabelName' results in nothing being
+// written. Otherwise, the label pairs are written, escaped as required by the
+// text format, and enclosed in '{...}'. The function returns the number of
+// bytes written and any error encountered.
+func labelPairsToText(
+ in []*dto.LabelPair,
+ additionalLabelName, additionalLabelValue string,
+ out io.Writer,
+) (int, error) {
+ if len(in) == 0 && additionalLabelName == "" {
+ return 0, nil
+ }
+ var written int
+ separator := '{'
+ for _, lp := range in {
+ n, err := fmt.Fprintf(
+ out, `%c%s="%s"`,
+ separator, lp.GetName(), escapeString(lp.GetValue(), true),
+ )
+ written += n
+ if err != nil {
+ return written, err
+ }
+ separator = ','
+ }
+ if additionalLabelName != "" {
+ n, err := fmt.Fprintf(
+ out, `%c%s="%s"`,
+ separator, additionalLabelName,
+ escapeString(additionalLabelValue, true),
+ )
+ written += n
+ if err != nil {
+ return written, err
+ }
+ }
+ n, err := out.Write([]byte{'}'})
+ written += n
+ if err != nil {
+ return written, err
+ }
+ return written, nil
+}
+
+// escapeString replaces '\' by '\\', new line character by '\n', and - if
+// includeDoubleQuote is true - '"' by '\"'.
+func escapeString(v string, includeDoubleQuote bool) string {
+ result := bytes.NewBuffer(make([]byte, 0, len(v)))
+ for _, c := range v {
+ switch {
+ case c == '\\':
+ result.WriteString(`\\`)
+ case includeDoubleQuote && c == '"':
+ result.WriteString(`\"`)
+ case c == '\n':
+ result.WriteString(`\n`)
+ default:
+ result.WriteRune(c)
+ }
+ }
+ return result.String()
+}
diff --git a/vendor/github.com/prometheus/client_golang/text/fuzz.go b/vendor/github.com/prometheus/client_golang/text/fuzz.go
new file mode 100644
index 0000000000..a7e4d7f8f7
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/text/fuzz.go
@@ -0,0 +1,36 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Build only when actually fuzzing
+// +build gofuzz
+
+package text
+
+import "bytes"
+
+// Fuzz text metric parser with with github.com/dvyukov/go-fuzz:
+//
+// go-fuzz-build github.com/prometheus/client_golang/text
+// go-fuzz -bin text-fuzz.zip -workdir fuzz
+//
+// Further input samples should go in the folder fuzz/corpus.
+func Fuzz(in []byte) int {
+ parser := Parser{}
+ _, err := parser.TextToMetricFamilies(bytes.NewReader(in))
+
+ if err != nil {
+ return 0
+ }
+
+ return 1
+}
diff --git a/vendor/github.com/prometheus/client_golang/text/parse.go b/vendor/github.com/prometheus/client_golang/text/parse.go
new file mode 100644
index 0000000000..56f704633b
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/text/parse.go
@@ -0,0 +1,822 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package text
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "hash"
+ "hash/fnv"
+ "io"
+ "math"
+ "sort"
+ "strconv"
+ "strings"
+ "sync"
+
+ dto "github.com/prometheus/client_model/go"
+
+ "github.com/golang/protobuf/proto"
+)
+
+const (
+ // metricNameLabel is the label name indicating the metric name of a
+ // timeseries.
+ metricNameLabel = "__name__"
+
+ // bucketLabel is used for the label that defines the upper bound of a
+ // bucket of a histogram ("le" -> "less or equal").
+ bucketLabel = "le"
+
+ // quantileLabel is used for the label that defines the quantile in a
+ // summary.
+ quantileLabel = "quantile"
+)
+
+// A stateFn is a function that represents a state in a state machine. By
+// executing it, the state is progressed to the next state. The stateFn returns
+// another stateFn, which represents the new state. The end state is represented
+// by nil.
+type stateFn func() stateFn
+
+// ParseError signals errors while parsing the simple and flat text-based
+// exchange format.
+type ParseError struct {
+ Line int
+ Msg string
+}
+
+// Error implements the error interface.
+func (e ParseError) Error() string {
+ return fmt.Sprintf("text format parsing error in line %d: %s", e.Line, e.Msg)
+}
+
+// Parser is used to parse the simple and flat text-based exchange format. Its
+// nil value is ready to use.
+type Parser struct {
+ metricFamiliesByName map[string]*dto.MetricFamily
+ buf *bufio.Reader // Where the parsed input is read through.
+ err error // Most recent error.
+ lineCount int // Tracks the line count for error messages.
+ currentByte byte // The most recent byte read.
+ currentToken bytes.Buffer // Re-used each time a token has to be gathered from multiple bytes.
+ currentMF *dto.MetricFamily
+ currentMetric *dto.Metric
+ currentLabelPair *dto.LabelPair
+
+ // The remaining member variables are only used for summaries/histograms.
+ currentLabels map[string]string // All labels including '__name__' but excluding 'quantile'/'le'
+ // Summary specific.
+ summaries map[uint64]*dto.Metric // Key is created with LabelsToSignature.
+ currentQuantile float64
+ // Histogram specific.
+ histograms map[uint64]*dto.Metric // Key is created with LabelsToSignature.
+ currentBucket float64
+ // These tell us if the currently processed line ends on '_count' or
+ // '_sum' respectively and belong to a summary/histogram, representing the sample
+ // count and sum of that summary/histogram.
+ currentIsSummaryCount, currentIsSummarySum bool
+ currentIsHistogramCount, currentIsHistogramSum bool
+}
+
+// TextToMetricFamilies reads 'in' as the simple and flat text-based exchange
+// format and creates MetricFamily proto messages. It returns the MetricFamily
+// proto messages in a map where the metric names are the keys, along with any
+// error encountered.
+//
+// If the input contains duplicate metrics (i.e. lines with the same metric name
+// and exactly the same label set), the resulting MetricFamily will contain
+// duplicate Metric proto messages. Similar is true for duplicate label
+// names. Checks for duplicates have to be performed separately, if required.
+// Also note that neither the metrics within each MetricFamily are sorted nor
+// the label pairs within each Metric. Sorting is not required for the most
+// frequent use of this method, which is sample ingestion in the Prometheus
+// server. However, for presentation purposes, you might want to sort the
+// metrics, and in some cases, you must sort the labels, e.g. for consumption by
+// the metric family injection hook of the Prometheus registry.
+//
+// Summaries and histograms are rather special beasts. You would probably not
+// use them in the simple text format anyway. This method can deal with
+// summaries and histograms if they are presented in exactly the way the
+// text.Create function creates them.
+//
+// This method must not be called concurrently. If you want to parse different
+// input concurrently, instantiate a separate Parser for each goroutine.
+func (p *Parser) TextToMetricFamilies(in io.Reader) (map[string]*dto.MetricFamily, error) {
+ p.reset(in)
+ for nextState := p.startOfLine; nextState != nil; nextState = nextState() {
+ // Magic happens here...
+ }
+ // Get rid of empty metric families.
+ for k, mf := range p.metricFamiliesByName {
+ if len(mf.GetMetric()) == 0 {
+ delete(p.metricFamiliesByName, k)
+ }
+ }
+ return p.metricFamiliesByName, p.err
+}
+
+func (p *Parser) reset(in io.Reader) {
+ p.metricFamiliesByName = map[string]*dto.MetricFamily{}
+ if p.buf == nil {
+ p.buf = bufio.NewReader(in)
+ } else {
+ p.buf.Reset(in)
+ }
+ p.err = nil
+ p.lineCount = 0
+ if p.summaries == nil || len(p.summaries) > 0 {
+ p.summaries = map[uint64]*dto.Metric{}
+ }
+ if p.histograms == nil || len(p.histograms) > 0 {
+ p.histograms = map[uint64]*dto.Metric{}
+ }
+ p.currentQuantile = math.NaN()
+ p.currentBucket = math.NaN()
+}
+
+// startOfLine represents the state where the next byte read from p.buf is the
+// start of a line (or whitespace leading up to it).
+func (p *Parser) startOfLine() stateFn {
+ p.lineCount++
+ if p.skipBlankTab(); p.err != nil {
+ // End of input reached. This is the only case where
+ // that is not an error but a signal that we are done.
+ p.err = nil
+ return nil
+ }
+ switch p.currentByte {
+ case '#':
+ return p.startComment
+ case '\n':
+ return p.startOfLine // Empty line, start the next one.
+ }
+ return p.readingMetricName
+}
+
+// startComment represents the state where the next byte read from p.buf is the
+// start of a comment (or whitespace leading up to it).
+func (p *Parser) startComment() stateFn {
+ if p.skipBlankTab(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ if p.currentByte == '\n' {
+ return p.startOfLine
+ }
+ if p.readTokenUntilWhitespace(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ // If we have hit the end of line already, there is nothing left
+ // to do. This is not considered a syntax error.
+ if p.currentByte == '\n' {
+ return p.startOfLine
+ }
+ keyword := p.currentToken.String()
+ if keyword != "HELP" && keyword != "TYPE" {
+ // Generic comment, ignore by fast forwarding to end of line.
+ for p.currentByte != '\n' {
+ if p.currentByte, p.err = p.buf.ReadByte(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ }
+ return p.startOfLine
+ }
+ // There is something. Next has to be a metric name.
+ if p.skipBlankTab(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ if p.readTokenAsMetricName(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ if p.currentByte == '\n' {
+ // At the end of the line already.
+ // Again, this is not considered a syntax error.
+ return p.startOfLine
+ }
+ if !isBlankOrTab(p.currentByte) {
+ p.parseError("invalid metric name in comment")
+ return nil
+ }
+ p.setOrCreateCurrentMF()
+ if p.skipBlankTab(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ if p.currentByte == '\n' {
+ // At the end of the line already.
+ // Again, this is not considered a syntax error.
+ return p.startOfLine
+ }
+ switch keyword {
+ case "HELP":
+ return p.readingHelp
+ case "TYPE":
+ return p.readingType
+ }
+ panic(fmt.Sprintf("code error: unexpected keyword %q", keyword))
+}
+
+// readingMetricName represents the state where the last byte read (now in
+// p.currentByte) is the first byte of a metric name.
+func (p *Parser) readingMetricName() stateFn {
+ if p.readTokenAsMetricName(); p.err != nil {
+ return nil
+ }
+ if p.currentToken.Len() == 0 {
+ p.parseError("invalid metric name")
+ return nil
+ }
+ p.setOrCreateCurrentMF()
+ // Now is the time to fix the type if it hasn't happened yet.
+ if p.currentMF.Type == nil {
+ p.currentMF.Type = dto.MetricType_UNTYPED.Enum()
+ }
+ p.currentMetric = &dto.Metric{}
+ // Do not append the newly created currentMetric to
+ // currentMF.Metric right now. First wait if this is a summary,
+ // and the metric exists already, which we can only know after
+ // having read all the labels.
+ if p.skipBlankTabIfCurrentBlankTab(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ return p.readingLabels
+}
+
+// readingLabels represents the state where the last byte read (now in
+// p.currentByte) is either the first byte of the label set (i.e. a '{'), or the
+// first byte of the value (otherwise).
+func (p *Parser) readingLabels() stateFn {
+ // Summaries/histograms are special. We have to reset the
+ // currentLabels map, currentQuantile and currentBucket before starting to
+ // read labels.
+ if p.currentMF.GetType() == dto.MetricType_SUMMARY || p.currentMF.GetType() == dto.MetricType_HISTOGRAM {
+ p.currentLabels = map[string]string{}
+ p.currentLabels[string(metricNameLabel)] = p.currentMF.GetName()
+ p.currentQuantile = math.NaN()
+ p.currentBucket = math.NaN()
+ }
+ if p.currentByte != '{' {
+ return p.readingValue
+ }
+ return p.startLabelName
+}
+
+// startLabelName represents the state where the next byte read from p.buf is
+// the start of a label name (or whitespace leading up to it).
+func (p *Parser) startLabelName() stateFn {
+ if p.skipBlankTab(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ if p.currentByte == '}' {
+ if p.skipBlankTab(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ return p.readingValue
+ }
+ if p.readTokenAsLabelName(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ if p.currentToken.Len() == 0 {
+ p.parseError(fmt.Sprintf("invalid label name for metric %q", p.currentMF.GetName()))
+ return nil
+ }
+ p.currentLabelPair = &dto.LabelPair{Name: proto.String(p.currentToken.String())}
+ if p.currentLabelPair.GetName() == string(metricNameLabel) {
+ p.parseError(fmt.Sprintf("label name %q is reserved", metricNameLabel))
+ return nil
+ }
+ // Special summary/histogram treatment. Don't add 'quantile' and 'le'
+ // labels to 'real' labels.
+ if !(p.currentMF.GetType() == dto.MetricType_SUMMARY && p.currentLabelPair.GetName() == quantileLabel) &&
+ !(p.currentMF.GetType() == dto.MetricType_HISTOGRAM && p.currentLabelPair.GetName() == bucketLabel) {
+ p.currentMetric.Label = append(p.currentMetric.Label, p.currentLabelPair)
+ }
+ if p.skipBlankTabIfCurrentBlankTab(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ if p.currentByte != '=' {
+ p.parseError(fmt.Sprintf("expected '=' after label name, found %q", p.currentByte))
+ return nil
+ }
+ return p.startLabelValue
+}
+
+// startLabelValue represents the state where the next byte read from p.buf is
+// the start of a (quoted) label value (or whitespace leading up to it).
+func (p *Parser) startLabelValue() stateFn {
+ if p.skipBlankTab(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ if p.currentByte != '"' {
+ p.parseError(fmt.Sprintf("expected '\"' at start of label value, found %q", p.currentByte))
+ return nil
+ }
+ if p.readTokenAsLabelValue(); p.err != nil {
+ return nil
+ }
+ p.currentLabelPair.Value = proto.String(p.currentToken.String())
+ // Special treatment of summaries:
+ // - Quantile labels are special, will result in dto.Quantile later.
+ // - Other labels have to be added to currentLabels for signature calculation.
+ if p.currentMF.GetType() == dto.MetricType_SUMMARY {
+ if p.currentLabelPair.GetName() == quantileLabel {
+ if p.currentQuantile, p.err = strconv.ParseFloat(p.currentLabelPair.GetValue(), 64); p.err != nil {
+ // Create a more helpful error message.
+ p.parseError(fmt.Sprintf("expected float as value for 'quantile' label, got %q", p.currentLabelPair.GetValue()))
+ return nil
+ }
+ } else {
+ p.currentLabels[p.currentLabelPair.GetName()] = p.currentLabelPair.GetValue()
+ }
+ }
+ // Similar special treatment of histograms.
+ if p.currentMF.GetType() == dto.MetricType_HISTOGRAM {
+ if p.currentLabelPair.GetName() == bucketLabel {
+ if p.currentBucket, p.err = strconv.ParseFloat(p.currentLabelPair.GetValue(), 64); p.err != nil {
+ // Create a more helpful error message.
+ p.parseError(fmt.Sprintf("expected float as value for 'le' label, got %q", p.currentLabelPair.GetValue()))
+ return nil
+ }
+ } else {
+ p.currentLabels[p.currentLabelPair.GetName()] = p.currentLabelPair.GetValue()
+ }
+ }
+ if p.skipBlankTab(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ switch p.currentByte {
+ case ',':
+ return p.startLabelName
+
+ case '}':
+ if p.skipBlankTab(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ return p.readingValue
+ default:
+ p.parseError(fmt.Sprintf("unexpected end of label value %q", p.currentLabelPair.Value))
+ return nil
+ }
+}
+
+// readingValue represents the state where the last byte read (now in
+// p.currentByte) is the first byte of the sample value (i.e. a float).
+func (p *Parser) readingValue() stateFn {
+ // When we are here, we have read all the labels, so for the
+ // special case of a summary/histogram, we can finally find out
+ // if the metric already exists.
+ if p.currentMF.GetType() == dto.MetricType_SUMMARY {
+ signature := labelsToSignature(p.currentLabels)
+ if summary := p.summaries[signature]; summary != nil {
+ p.currentMetric = summary
+ } else {
+ p.summaries[signature] = p.currentMetric
+ p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric)
+ }
+ } else if p.currentMF.GetType() == dto.MetricType_HISTOGRAM {
+ signature := labelsToSignature(p.currentLabels)
+ if histogram := p.histograms[signature]; histogram != nil {
+ p.currentMetric = histogram
+ } else {
+ p.histograms[signature] = p.currentMetric
+ p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric)
+ }
+ } else {
+ p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric)
+ }
+ if p.readTokenUntilWhitespace(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ value, err := strconv.ParseFloat(p.currentToken.String(), 64)
+ if err != nil {
+ // Create a more helpful error message.
+ p.parseError(fmt.Sprintf("expected float as value, got %q", p.currentToken.String()))
+ return nil
+ }
+ switch p.currentMF.GetType() {
+ case dto.MetricType_COUNTER:
+ p.currentMetric.Counter = &dto.Counter{Value: proto.Float64(value)}
+ case dto.MetricType_GAUGE:
+ p.currentMetric.Gauge = &dto.Gauge{Value: proto.Float64(value)}
+ case dto.MetricType_UNTYPED:
+ p.currentMetric.Untyped = &dto.Untyped{Value: proto.Float64(value)}
+ case dto.MetricType_SUMMARY:
+ // *sigh*
+ if p.currentMetric.Summary == nil {
+ p.currentMetric.Summary = &dto.Summary{}
+ }
+ switch {
+ case p.currentIsSummaryCount:
+ p.currentMetric.Summary.SampleCount = proto.Uint64(uint64(value))
+ case p.currentIsSummarySum:
+ p.currentMetric.Summary.SampleSum = proto.Float64(value)
+ case !math.IsNaN(p.currentQuantile):
+ p.currentMetric.Summary.Quantile = append(
+ p.currentMetric.Summary.Quantile,
+ &dto.Quantile{
+ Quantile: proto.Float64(p.currentQuantile),
+ Value: proto.Float64(value),
+ },
+ )
+ }
+ case dto.MetricType_HISTOGRAM:
+ // *sigh*
+ if p.currentMetric.Histogram == nil {
+ p.currentMetric.Histogram = &dto.Histogram{}
+ }
+ switch {
+ case p.currentIsHistogramCount:
+ p.currentMetric.Histogram.SampleCount = proto.Uint64(uint64(value))
+ case p.currentIsHistogramSum:
+ p.currentMetric.Histogram.SampleSum = proto.Float64(value)
+ case !math.IsNaN(p.currentBucket):
+ p.currentMetric.Histogram.Bucket = append(
+ p.currentMetric.Histogram.Bucket,
+ &dto.Bucket{
+ UpperBound: proto.Float64(p.currentBucket),
+ CumulativeCount: proto.Uint64(uint64(value)),
+ },
+ )
+ }
+ default:
+ p.err = fmt.Errorf("unexpected type for metric name %q", p.currentMF.GetName())
+ }
+ if p.currentByte == '\n' {
+ return p.startOfLine
+ }
+ return p.startTimestamp
+}
+
+// startTimestamp represents the state where the next byte read from p.buf is
+// the start of the timestamp (or whitespace leading up to it).
+func (p *Parser) startTimestamp() stateFn {
+ if p.skipBlankTab(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ if p.readTokenUntilWhitespace(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ timestamp, err := strconv.ParseInt(p.currentToken.String(), 10, 64)
+ if err != nil {
+ // Create a more helpful error message.
+ p.parseError(fmt.Sprintf("expected integer as timestamp, got %q", p.currentToken.String()))
+ return nil
+ }
+ p.currentMetric.TimestampMs = proto.Int64(timestamp)
+ if p.readTokenUntilNewline(false); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ if p.currentToken.Len() > 0 {
+ p.parseError(fmt.Sprintf("spurious string after timestamp: %q", p.currentToken.String()))
+ return nil
+ }
+ return p.startOfLine
+}
+
+// readingHelp represents the state where the last byte read (now in
+// p.currentByte) is the first byte of the docstring after 'HELP'.
+func (p *Parser) readingHelp() stateFn {
+ if p.currentMF.Help != nil {
+ p.parseError(fmt.Sprintf("second HELP line for metric name %q", p.currentMF.GetName()))
+ return nil
+ }
+ // Rest of line is the docstring.
+ if p.readTokenUntilNewline(true); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ p.currentMF.Help = proto.String(p.currentToken.String())
+ return p.startOfLine
+}
+
+// readingType represents the state where the last byte read (now in
+// p.currentByte) is the first byte of the type hint after 'HELP'.
+func (p *Parser) readingType() stateFn {
+ if p.currentMF.Type != nil {
+ p.parseError(fmt.Sprintf("second TYPE line for metric name %q, or TYPE reported after samples", p.currentMF.GetName()))
+ return nil
+ }
+ // Rest of line is the type.
+ if p.readTokenUntilNewline(false); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ metricType, ok := dto.MetricType_value[strings.ToUpper(p.currentToken.String())]
+ if !ok {
+ p.parseError(fmt.Sprintf("unknown metric type %q", p.currentToken.String()))
+ return nil
+ }
+ p.currentMF.Type = dto.MetricType(metricType).Enum()
+ return p.startOfLine
+}
+
+// parseError sets p.err to a ParseError at the current line with the given
+// message.
+func (p *Parser) parseError(msg string) {
+ p.err = ParseError{
+ Line: p.lineCount,
+ Msg: msg,
+ }
+}
+
+// skipBlankTab reads (and discards) bytes from p.buf until it encounters a byte
+// that is neither ' ' nor '\t'. That byte is left in p.currentByte.
+func (p *Parser) skipBlankTab() {
+ for {
+ if p.currentByte, p.err = p.buf.ReadByte(); p.err != nil || !isBlankOrTab(p.currentByte) {
+ return
+ }
+ }
+}
+
+// skipBlankTabIfCurrentBlankTab works exactly as skipBlankTab but doesn't do
+// anything if p.currentByte is neither ' ' nor '\t'.
+func (p *Parser) skipBlankTabIfCurrentBlankTab() {
+ if isBlankOrTab(p.currentByte) {
+ p.skipBlankTab()
+ }
+}
+
+// readTokenUntilWhitespace copies bytes from p.buf into p.currentToken. The
+// first byte considered is the byte already read (now in p.currentByte). The
+// first whitespace byte encountered is still copied into p.currentByte, but not
+// into p.currentToken.
+func (p *Parser) readTokenUntilWhitespace() {
+ p.currentToken.Reset()
+ for p.err == nil && !isBlankOrTab(p.currentByte) && p.currentByte != '\n' {
+ p.currentToken.WriteByte(p.currentByte)
+ p.currentByte, p.err = p.buf.ReadByte()
+ }
+}
+
+// readTokenUntilNewline copies bytes from p.buf into p.currentToken. The first
+// byte considered is the byte already read (now in p.currentByte). The first
+// newline byte encountered is still copied into p.currentByte, but not into
+// p.currentToken. If recognizeEscapeSequence is true, two escape sequences are
+// recognized: '\\' tranlates into '\', and '\n' into a line-feed character. All
+// other escape sequences are invalid and cause an error.
+func (p *Parser) readTokenUntilNewline(recognizeEscapeSequence bool) {
+ p.currentToken.Reset()
+ escaped := false
+ for p.err == nil {
+ if recognizeEscapeSequence && escaped {
+ switch p.currentByte {
+ case '\\':
+ p.currentToken.WriteByte(p.currentByte)
+ case 'n':
+ p.currentToken.WriteByte('\n')
+ default:
+ p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte))
+ return
+ }
+ escaped = false
+ } else {
+ switch p.currentByte {
+ case '\n':
+ return
+ case '\\':
+ escaped = true
+ default:
+ p.currentToken.WriteByte(p.currentByte)
+ }
+ }
+ p.currentByte, p.err = p.buf.ReadByte()
+ }
+}
+
+// readTokenAsMetricName copies a metric name from p.buf into p.currentToken.
+// The first byte considered is the byte already read (now in p.currentByte).
+// The first byte not part of a metric name is still copied into p.currentByte,
+// but not into p.currentToken.
+func (p *Parser) readTokenAsMetricName() {
+ p.currentToken.Reset()
+ if !isValidMetricNameStart(p.currentByte) {
+ return
+ }
+ for {
+ p.currentToken.WriteByte(p.currentByte)
+ p.currentByte, p.err = p.buf.ReadByte()
+ if p.err != nil || !isValidMetricNameContinuation(p.currentByte) {
+ return
+ }
+ }
+}
+
+// readTokenAsLabelName copies a label name from p.buf into p.currentToken.
+// The first byte considered is the byte already read (now in p.currentByte).
+// The first byte not part of a label name is still copied into p.currentByte,
+// but not into p.currentToken.
+func (p *Parser) readTokenAsLabelName() {
+ p.currentToken.Reset()
+ if !isValidLabelNameStart(p.currentByte) {
+ return
+ }
+ for {
+ p.currentToken.WriteByte(p.currentByte)
+ p.currentByte, p.err = p.buf.ReadByte()
+ if p.err != nil || !isValidLabelNameContinuation(p.currentByte) {
+ return
+ }
+ }
+}
+
+// readTokenAsLabelValue copies a label value from p.buf into p.currentToken.
+// In contrast to the other 'readTokenAs...' functions, which start with the
+// last read byte in p.currentByte, this method ignores p.currentByte and starts
+// with reading a new byte from p.buf. The first byte not part of a label value
+// is still copied into p.currentByte, but not into p.currentToken.
+func (p *Parser) readTokenAsLabelValue() {
+ p.currentToken.Reset()
+ escaped := false
+ for {
+ if p.currentByte, p.err = p.buf.ReadByte(); p.err != nil {
+ return
+ }
+ if escaped {
+ switch p.currentByte {
+ case '"', '\\':
+ p.currentToken.WriteByte(p.currentByte)
+ case 'n':
+ p.currentToken.WriteByte('\n')
+ default:
+ p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte))
+ return
+ }
+ escaped = false
+ continue
+ }
+ switch p.currentByte {
+ case '"':
+ return
+ case '\n':
+ p.parseError(fmt.Sprintf("label value %q contains unescaped new-line", p.currentToken.String()))
+ return
+ case '\\':
+ escaped = true
+ default:
+ p.currentToken.WriteByte(p.currentByte)
+ }
+ }
+}
+
+func (p *Parser) setOrCreateCurrentMF() {
+ p.currentIsSummaryCount = false
+ p.currentIsSummarySum = false
+ p.currentIsHistogramCount = false
+ p.currentIsHistogramSum = false
+ name := p.currentToken.String()
+ if p.currentMF = p.metricFamiliesByName[name]; p.currentMF != nil {
+ return
+ }
+ // Try out if this is a _sum or _count for a summary/histogram.
+ summaryName := summaryMetricName(name)
+ if p.currentMF = p.metricFamiliesByName[summaryName]; p.currentMF != nil {
+ if p.currentMF.GetType() == dto.MetricType_SUMMARY {
+ if isCount(name) {
+ p.currentIsSummaryCount = true
+ }
+ if isSum(name) {
+ p.currentIsSummarySum = true
+ }
+ return
+ }
+ }
+ histogramName := histogramMetricName(name)
+ if p.currentMF = p.metricFamiliesByName[histogramName]; p.currentMF != nil {
+ if p.currentMF.GetType() == dto.MetricType_HISTOGRAM {
+ if isCount(name) {
+ p.currentIsHistogramCount = true
+ }
+ if isSum(name) {
+ p.currentIsHistogramSum = true
+ }
+ return
+ }
+ }
+ p.currentMF = &dto.MetricFamily{Name: proto.String(name)}
+ p.metricFamiliesByName[name] = p.currentMF
+}
+
+func isValidLabelNameStart(b byte) bool {
+ return (b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_'
+}
+
+func isValidLabelNameContinuation(b byte) bool {
+ return isValidLabelNameStart(b) || (b >= '0' && b <= '9')
+}
+
+func isValidMetricNameStart(b byte) bool {
+ return isValidLabelNameStart(b) || b == ':'
+}
+
+func isValidMetricNameContinuation(b byte) bool {
+ return isValidLabelNameContinuation(b) || b == ':'
+}
+
+func isBlankOrTab(b byte) bool {
+ return b == ' ' || b == '\t'
+}
+
+func isCount(name string) bool {
+ return len(name) > 6 && name[len(name)-6:] == "_count"
+}
+
+func isSum(name string) bool {
+ return len(name) > 4 && name[len(name)-4:] == "_sum"
+}
+
+func isBucket(name string) bool {
+ return len(name) > 7 && name[len(name)-7:] == "_bucket"
+}
+
+func summaryMetricName(name string) string {
+ switch {
+ case isCount(name):
+ return name[:len(name)-6]
+ case isSum(name):
+ return name[:len(name)-4]
+ default:
+ return name
+ }
+}
+
+func histogramMetricName(name string) string {
+ switch {
+ case isCount(name):
+ return name[:len(name)-6]
+ case isSum(name):
+ return name[:len(name)-4]
+ case isBucket(name):
+ return name[:len(name)-7]
+ default:
+ return name
+ }
+}
+
+// separatorByte is a byte that cannot occur in valid UTF-8 sequences and is
+// used to separate label names, label values, and other strings from each other
+// when calculating their combined hash value (aka signature aka fingerprint).
+const separatorByte byte = 255
+
+var (
+ // cache the signature of an empty label set.
+ emptyLabelSignature = fnv.New64a().Sum64()
+
+ hashAndBufPool sync.Pool
+)
+
+type hashAndBuf struct {
+ h hash.Hash64
+ b bytes.Buffer
+}
+
+func getHashAndBuf() *hashAndBuf {
+ hb := hashAndBufPool.Get()
+ if hb == nil {
+ return &hashAndBuf{h: fnv.New64a()}
+ }
+ return hb.(*hashAndBuf)
+}
+
+func putHashAndBuf(hb *hashAndBuf) {
+ hb.h.Reset()
+ hb.b.Reset()
+ hashAndBufPool.Put(hb)
+}
+
+// labelsToSignature returns a quasi-unique signature (i.e., fingerprint) for a
+// given label set. (Collisions are possible but unlikely if the number of label
+// sets the function is applied to is small.)
+func labelsToSignature(labels map[string]string) uint64 {
+ if len(labels) == 0 {
+ return emptyLabelSignature
+ }
+
+ labelNames := make([]string, 0, len(labels))
+ for labelName := range labels {
+ labelNames = append(labelNames, labelName)
+ }
+ sort.Strings(labelNames)
+
+ hb := getHashAndBuf()
+ defer putHashAndBuf(hb)
+
+ for _, labelName := range labelNames {
+ hb.b.WriteString(labelName)
+ hb.b.WriteByte(separatorByte)
+ hb.b.WriteString(labels[labelName])
+ hb.b.WriteByte(separatorByte)
+ hb.h.Write(hb.b.Bytes())
+ hb.b.Reset()
+ }
+ return hb.h.Sum64()
+}
diff --git a/vendor/github.com/prometheus/client_golang/text/proto.go b/vendor/github.com/prometheus/client_golang/text/proto.go
new file mode 100644
index 0000000000..e82bbb3b40
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/text/proto.go
@@ -0,0 +1,43 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package text
+
+import (
+ "fmt"
+ "io"
+
+ "github.com/golang/protobuf/proto"
+ "github.com/matttproud/golang_protobuf_extensions/pbutil"
+
+ dto "github.com/prometheus/client_model/go"
+)
+
+// WriteProtoDelimited writes the MetricFamily to the writer in delimited
+// protobuf format and returns the number of bytes written and any error
+// encountered.
+func WriteProtoDelimited(w io.Writer, p *dto.MetricFamily) (int, error) {
+ return pbutil.WriteDelimited(w, p)
+}
+
+// WriteProtoText writes the MetricFamily to the writer in text format and
+// returns the number of bytes written and any error encountered.
+func WriteProtoText(w io.Writer, p *dto.MetricFamily) (int, error) {
+ return fmt.Fprintf(w, "%s\n", proto.MarshalTextString(p))
+}
+
+// WriteProtoCompactText writes the MetricFamily to the writer in compact text
+// format and returns the number of bytes written and any error encountered.
+func WriteProtoCompactText(w io.Writer, p *dto.MetricFamily) (int, error) {
+ return fmt.Fprintf(w, "%s\n", p)
+}
diff --git a/vendor/github.com/prometheus/client_model/go/metrics.pb.go b/vendor/github.com/prometheus/client_model/go/metrics.pb.go
new file mode 100644
index 0000000000..b065f8683f
--- /dev/null
+++ b/vendor/github.com/prometheus/client_model/go/metrics.pb.go
@@ -0,0 +1,364 @@
+// Code generated by protoc-gen-go.
+// source: metrics.proto
+// DO NOT EDIT!
+
+/*
+Package io_prometheus_client is a generated protocol buffer package.
+
+It is generated from these files:
+ metrics.proto
+
+It has these top-level messages:
+ LabelPair
+ Gauge
+ Counter
+ Quantile
+ Summary
+ Untyped
+ Histogram
+ Bucket
+ Metric
+ MetricFamily
+*/
+package io_prometheus_client
+
+import proto "github.com/golang/protobuf/proto"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = math.Inf
+
+type MetricType int32
+
+const (
+ MetricType_COUNTER MetricType = 0
+ MetricType_GAUGE MetricType = 1
+ MetricType_SUMMARY MetricType = 2
+ MetricType_UNTYPED MetricType = 3
+ MetricType_HISTOGRAM MetricType = 4
+)
+
+var MetricType_name = map[int32]string{
+ 0: "COUNTER",
+ 1: "GAUGE",
+ 2: "SUMMARY",
+ 3: "UNTYPED",
+ 4: "HISTOGRAM",
+}
+var MetricType_value = map[string]int32{
+ "COUNTER": 0,
+ "GAUGE": 1,
+ "SUMMARY": 2,
+ "UNTYPED": 3,
+ "HISTOGRAM": 4,
+}
+
+func (x MetricType) Enum() *MetricType {
+ p := new(MetricType)
+ *p = x
+ return p
+}
+func (x MetricType) String() string {
+ return proto.EnumName(MetricType_name, int32(x))
+}
+func (x *MetricType) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(MetricType_value, data, "MetricType")
+ if err != nil {
+ return err
+ }
+ *x = MetricType(value)
+ return nil
+}
+
+type LabelPair struct {
+ Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+ Value *string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *LabelPair) Reset() { *m = LabelPair{} }
+func (m *LabelPair) String() string { return proto.CompactTextString(m) }
+func (*LabelPair) ProtoMessage() {}
+
+func (m *LabelPair) GetName() string {
+ if m != nil && m.Name != nil {
+ return *m.Name
+ }
+ return ""
+}
+
+func (m *LabelPair) GetValue() string {
+ if m != nil && m.Value != nil {
+ return *m.Value
+ }
+ return ""
+}
+
+type Gauge struct {
+ Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Gauge) Reset() { *m = Gauge{} }
+func (m *Gauge) String() string { return proto.CompactTextString(m) }
+func (*Gauge) ProtoMessage() {}
+
+func (m *Gauge) GetValue() float64 {
+ if m != nil && m.Value != nil {
+ return *m.Value
+ }
+ return 0
+}
+
+type Counter struct {
+ Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Counter) Reset() { *m = Counter{} }
+func (m *Counter) String() string { return proto.CompactTextString(m) }
+func (*Counter) ProtoMessage() {}
+
+func (m *Counter) GetValue() float64 {
+ if m != nil && m.Value != nil {
+ return *m.Value
+ }
+ return 0
+}
+
+type Quantile struct {
+ Quantile *float64 `protobuf:"fixed64,1,opt,name=quantile" json:"quantile,omitempty"`
+ Value *float64 `protobuf:"fixed64,2,opt,name=value" json:"value,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Quantile) Reset() { *m = Quantile{} }
+func (m *Quantile) String() string { return proto.CompactTextString(m) }
+func (*Quantile) ProtoMessage() {}
+
+func (m *Quantile) GetQuantile() float64 {
+ if m != nil && m.Quantile != nil {
+ return *m.Quantile
+ }
+ return 0
+}
+
+func (m *Quantile) GetValue() float64 {
+ if m != nil && m.Value != nil {
+ return *m.Value
+ }
+ return 0
+}
+
+type Summary struct {
+ SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count" json:"sample_count,omitempty"`
+ SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum" json:"sample_sum,omitempty"`
+ Quantile []*Quantile `protobuf:"bytes,3,rep,name=quantile" json:"quantile,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Summary) Reset() { *m = Summary{} }
+func (m *Summary) String() string { return proto.CompactTextString(m) }
+func (*Summary) ProtoMessage() {}
+
+func (m *Summary) GetSampleCount() uint64 {
+ if m != nil && m.SampleCount != nil {
+ return *m.SampleCount
+ }
+ return 0
+}
+
+func (m *Summary) GetSampleSum() float64 {
+ if m != nil && m.SampleSum != nil {
+ return *m.SampleSum
+ }
+ return 0
+}
+
+func (m *Summary) GetQuantile() []*Quantile {
+ if m != nil {
+ return m.Quantile
+ }
+ return nil
+}
+
+type Untyped struct {
+ Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Untyped) Reset() { *m = Untyped{} }
+func (m *Untyped) String() string { return proto.CompactTextString(m) }
+func (*Untyped) ProtoMessage() {}
+
+func (m *Untyped) GetValue() float64 {
+ if m != nil && m.Value != nil {
+ return *m.Value
+ }
+ return 0
+}
+
+type Histogram struct {
+ SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count" json:"sample_count,omitempty"`
+ SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum" json:"sample_sum,omitempty"`
+ Bucket []*Bucket `protobuf:"bytes,3,rep,name=bucket" json:"bucket,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Histogram) Reset() { *m = Histogram{} }
+func (m *Histogram) String() string { return proto.CompactTextString(m) }
+func (*Histogram) ProtoMessage() {}
+
+func (m *Histogram) GetSampleCount() uint64 {
+ if m != nil && m.SampleCount != nil {
+ return *m.SampleCount
+ }
+ return 0
+}
+
+func (m *Histogram) GetSampleSum() float64 {
+ if m != nil && m.SampleSum != nil {
+ return *m.SampleSum
+ }
+ return 0
+}
+
+func (m *Histogram) GetBucket() []*Bucket {
+ if m != nil {
+ return m.Bucket
+ }
+ return nil
+}
+
+type Bucket struct {
+ CumulativeCount *uint64 `protobuf:"varint,1,opt,name=cumulative_count" json:"cumulative_count,omitempty"`
+ UpperBound *float64 `protobuf:"fixed64,2,opt,name=upper_bound" json:"upper_bound,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Bucket) Reset() { *m = Bucket{} }
+func (m *Bucket) String() string { return proto.CompactTextString(m) }
+func (*Bucket) ProtoMessage() {}
+
+func (m *Bucket) GetCumulativeCount() uint64 {
+ if m != nil && m.CumulativeCount != nil {
+ return *m.CumulativeCount
+ }
+ return 0
+}
+
+func (m *Bucket) GetUpperBound() float64 {
+ if m != nil && m.UpperBound != nil {
+ return *m.UpperBound
+ }
+ return 0
+}
+
+type Metric struct {
+ Label []*LabelPair `protobuf:"bytes,1,rep,name=label" json:"label,omitempty"`
+ Gauge *Gauge `protobuf:"bytes,2,opt,name=gauge" json:"gauge,omitempty"`
+ Counter *Counter `protobuf:"bytes,3,opt,name=counter" json:"counter,omitempty"`
+ Summary *Summary `protobuf:"bytes,4,opt,name=summary" json:"summary,omitempty"`
+ Untyped *Untyped `protobuf:"bytes,5,opt,name=untyped" json:"untyped,omitempty"`
+ Histogram *Histogram `protobuf:"bytes,7,opt,name=histogram" json:"histogram,omitempty"`
+ TimestampMs *int64 `protobuf:"varint,6,opt,name=timestamp_ms" json:"timestamp_ms,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Metric) Reset() { *m = Metric{} }
+func (m *Metric) String() string { return proto.CompactTextString(m) }
+func (*Metric) ProtoMessage() {}
+
+func (m *Metric) GetLabel() []*LabelPair {
+ if m != nil {
+ return m.Label
+ }
+ return nil
+}
+
+func (m *Metric) GetGauge() *Gauge {
+ if m != nil {
+ return m.Gauge
+ }
+ return nil
+}
+
+func (m *Metric) GetCounter() *Counter {
+ if m != nil {
+ return m.Counter
+ }
+ return nil
+}
+
+func (m *Metric) GetSummary() *Summary {
+ if m != nil {
+ return m.Summary
+ }
+ return nil
+}
+
+func (m *Metric) GetUntyped() *Untyped {
+ if m != nil {
+ return m.Untyped
+ }
+ return nil
+}
+
+func (m *Metric) GetHistogram() *Histogram {
+ if m != nil {
+ return m.Histogram
+ }
+ return nil
+}
+
+func (m *Metric) GetTimestampMs() int64 {
+ if m != nil && m.TimestampMs != nil {
+ return *m.TimestampMs
+ }
+ return 0
+}
+
+type MetricFamily struct {
+ Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+ Help *string `protobuf:"bytes,2,opt,name=help" json:"help,omitempty"`
+ Type *MetricType `protobuf:"varint,3,opt,name=type,enum=io.prometheus.client.MetricType" json:"type,omitempty"`
+ Metric []*Metric `protobuf:"bytes,4,rep,name=metric" json:"metric,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MetricFamily) Reset() { *m = MetricFamily{} }
+func (m *MetricFamily) String() string { return proto.CompactTextString(m) }
+func (*MetricFamily) ProtoMessage() {}
+
+func (m *MetricFamily) GetName() string {
+ if m != nil && m.Name != nil {
+ return *m.Name
+ }
+ return ""
+}
+
+func (m *MetricFamily) GetHelp() string {
+ if m != nil && m.Help != nil {
+ return *m.Help
+ }
+ return ""
+}
+
+func (m *MetricFamily) GetType() MetricType {
+ if m != nil && m.Type != nil {
+ return *m.Type
+ }
+ return MetricType_COUNTER
+}
+
+func (m *MetricFamily) GetMetric() []*Metric {
+ if m != nil {
+ return m.Metric
+ }
+ return nil
+}
+
+func init() {
+ proto.RegisterEnum("io.prometheus.client.MetricType", MetricType_name, MetricType_value)
+}
diff --git a/vendor/github.com/prometheus/common/expfmt/decode.go b/vendor/github.com/prometheus/common/expfmt/decode.go
new file mode 100644
index 0000000000..68a894445c
--- /dev/null
+++ b/vendor/github.com/prometheus/common/expfmt/decode.go
@@ -0,0 +1,388 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package expfmt
+
+import (
+ "fmt"
+ "io"
+ "math"
+ "mime"
+ "net/http"
+
+ dto "github.com/prometheus/client_model/go"
+
+ "github.com/matttproud/golang_protobuf_extensions/pbutil"
+ "github.com/prometheus/common/model"
+)
+
+// Decoder types decode an input stream into metric families.
+type Decoder interface {
+ Decode(*dto.MetricFamily) error
+}
+
+type DecodeOptions struct {
+ // Timestamp is added to each value from the stream that has no explicit timestamp set.
+ Timestamp model.Time
+}
+
+// NewDecor returns a new decoder based on the HTTP header.
+func NewDecoder(r io.Reader, h http.Header) (Decoder, error) {
+ ct := h.Get(hdrContentType)
+
+ mediatype, params, err := mime.ParseMediaType(ct)
+ if err != nil {
+ return nil, fmt.Errorf("invalid Content-Type header %q: %s", ct, err)
+ }
+
+ const (
+ protoType = ProtoType + "/" + ProtoSubType
+ textType = "text/plain"
+ )
+
+ switch mediatype {
+ case protoType:
+ if p := params["proto"]; p != ProtoProtocol {
+ return nil, fmt.Errorf("unrecognized protocol message %s", p)
+ }
+ if e := params["encoding"]; e != "delimited" {
+ return nil, fmt.Errorf("unsupported encoding %s", e)
+ }
+ return &protoDecoder{r: r}, nil
+
+ case textType:
+ if v, ok := params["version"]; ok && v != "0.0.4" {
+ return nil, fmt.Errorf("unrecognized protocol version %s", v)
+ }
+ return &textDecoder{r: r}, nil
+
+ default:
+ return nil, fmt.Errorf("unsupported media type %q, expected %q or %q", mediatype, protoType, textType)
+ }
+}
+
+// protoDecoder implements the Decoder interface for protocol buffers.
+type protoDecoder struct {
+ r io.Reader
+}
+
+// Decode implements the Decoder interface.
+func (d *protoDecoder) Decode(v *dto.MetricFamily) error {
+ _, err := pbutil.ReadDelimited(d.r, v)
+ return err
+}
+
+// textDecoder implements the Decoder interface for the text protcol.
+type textDecoder struct {
+ r io.Reader
+ p TextParser
+ fams []*dto.MetricFamily
+}
+
+// Decode implements the Decoder interface.
+func (d *textDecoder) Decode(v *dto.MetricFamily) error {
+ // TODO(fabxc): Wrap this as a line reader to make streaming safer.
+ if len(d.fams) == 0 {
+ // No cached metric families, read everything and parse metrics.
+ fams, err := d.p.TextToMetricFamilies(d.r)
+ if err != nil {
+ return err
+ }
+ if len(fams) == 0 {
+ return io.EOF
+ }
+ for _, f := range fams {
+ d.fams = append(d.fams, f)
+ }
+ }
+ *v = *d.fams[len(d.fams)-1]
+ d.fams = d.fams[:len(d.fams)-1]
+ return nil
+}
+
+type SampleDecoder struct {
+ Dec Decoder
+ Opts *DecodeOptions
+
+ f dto.MetricFamily
+}
+
+func (sd *SampleDecoder) Decode(s *model.Vector) error {
+ if err := sd.Dec.Decode(&sd.f); err != nil {
+ return err
+ }
+ *s = extractSamples(&sd.f, sd.Opts)
+ return nil
+}
+
+// Extract samples builds a slice of samples from the provided metric families.
+func ExtractSamples(o *DecodeOptions, fams ...*dto.MetricFamily) model.Vector {
+ var all model.Vector
+ for _, f := range fams {
+ all = append(all, extractSamples(f, o)...)
+ }
+ return all
+}
+
+func extractSamples(f *dto.MetricFamily, o *DecodeOptions) model.Vector {
+ switch *f.Type {
+ case dto.MetricType_COUNTER:
+ return extractCounter(o, f)
+ case dto.MetricType_GAUGE:
+ return extractGauge(o, f)
+ case dto.MetricType_SUMMARY:
+ return extractSummary(o, f)
+ case dto.MetricType_UNTYPED:
+ return extractUntyped(o, f)
+ case dto.MetricType_HISTOGRAM:
+ return extractHistogram(o, f)
+ }
+ panic("expfmt.extractSamples: unknown metric family type")
+}
+
+func extractCounter(o *DecodeOptions, f *dto.MetricFamily) model.Vector {
+ samples := make(model.Vector, 0, len(f.Metric))
+
+ for _, m := range f.Metric {
+ if m.Counter == nil {
+ continue
+ }
+
+ lset := make(model.LabelSet, len(m.Label)+1)
+ for _, p := range m.Label {
+ lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
+ }
+ lset[model.MetricNameLabel] = model.LabelValue(f.GetName())
+
+ smpl := &model.Sample{
+ Metric: model.Metric(lset),
+ Value: model.SampleValue(m.Counter.GetValue()),
+ }
+
+ if m.TimestampMs != nil {
+ smpl.Timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000)
+ } else {
+ smpl.Timestamp = o.Timestamp
+ }
+
+ samples = append(samples, smpl)
+ }
+
+ return samples
+}
+
+func extractGauge(o *DecodeOptions, f *dto.MetricFamily) model.Vector {
+ samples := make(model.Vector, 0, len(f.Metric))
+
+ for _, m := range f.Metric {
+ if m.Gauge == nil {
+ continue
+ }
+
+ lset := make(model.LabelSet, len(m.Label)+1)
+ for _, p := range m.Label {
+ lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
+ }
+ lset[model.MetricNameLabel] = model.LabelValue(f.GetName())
+
+ smpl := &model.Sample{
+ Metric: model.Metric(lset),
+ Value: model.SampleValue(m.Gauge.GetValue()),
+ }
+
+ if m.TimestampMs != nil {
+ smpl.Timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000)
+ } else {
+ smpl.Timestamp = o.Timestamp
+ }
+
+ samples = append(samples, smpl)
+ }
+
+ return samples
+}
+
+func extractUntyped(o *DecodeOptions, f *dto.MetricFamily) model.Vector {
+ samples := make(model.Vector, 0, len(f.Metric))
+
+ for _, m := range f.Metric {
+ if m.Untyped == nil {
+ continue
+ }
+
+ lset := make(model.LabelSet, len(m.Label)+1)
+ for _, p := range m.Label {
+ lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
+ }
+ lset[model.MetricNameLabel] = model.LabelValue(f.GetName())
+
+ smpl := &model.Sample{
+ Metric: model.Metric(lset),
+ Value: model.SampleValue(m.Untyped.GetValue()),
+ }
+
+ if m.TimestampMs != nil {
+ smpl.Timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000)
+ } else {
+ smpl.Timestamp = o.Timestamp
+ }
+
+ samples = append(samples, smpl)
+ }
+
+ return samples
+}
+
+func extractSummary(o *DecodeOptions, f *dto.MetricFamily) model.Vector {
+ samples := make(model.Vector, 0, len(f.Metric))
+
+ for _, m := range f.Metric {
+ if m.Summary == nil {
+ continue
+ }
+
+ timestamp := o.Timestamp
+ if m.TimestampMs != nil {
+ timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000)
+ }
+
+ for _, q := range m.Summary.Quantile {
+ lset := make(model.LabelSet, len(m.Label)+2)
+ for _, p := range m.Label {
+ lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
+ }
+ // BUG(matt): Update other names to "quantile".
+ lset[model.LabelName(model.QuantileLabel)] = model.LabelValue(fmt.Sprint(q.GetQuantile()))
+ lset[model.MetricNameLabel] = model.LabelValue(f.GetName())
+
+ samples = append(samples, &model.Sample{
+ Metric: model.Metric(lset),
+ Value: model.SampleValue(q.GetValue()),
+ Timestamp: timestamp,
+ })
+ }
+
+ if m.Summary.SampleSum != nil {
+ lset := make(model.LabelSet, len(m.Label)+1)
+ for _, p := range m.Label {
+ lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
+ }
+ lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_sum")
+
+ samples = append(samples, &model.Sample{
+ Metric: model.Metric(lset),
+ Value: model.SampleValue(m.Summary.GetSampleSum()),
+ Timestamp: timestamp,
+ })
+ }
+
+ if m.Summary.SampleCount != nil {
+ lset := make(model.LabelSet, len(m.Label)+1)
+ for _, p := range m.Label {
+ lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
+ }
+ lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_count")
+
+ samples = append(samples, &model.Sample{
+ Metric: model.Metric(lset),
+ Value: model.SampleValue(m.Summary.GetSampleCount()),
+ Timestamp: timestamp,
+ })
+ }
+ }
+
+ return samples
+}
+
+func extractHistogram(o *DecodeOptions, f *dto.MetricFamily) model.Vector {
+ samples := make(model.Vector, 0, len(f.Metric))
+
+ for _, m := range f.Metric {
+ if m.Histogram == nil {
+ continue
+ }
+
+ timestamp := o.Timestamp
+ if m.TimestampMs != nil {
+ timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000)
+ }
+
+ infSeen := false
+
+ for _, q := range m.Histogram.Bucket {
+ lset := make(model.LabelSet, len(m.Label)+2)
+ for _, p := range m.Label {
+ lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
+ }
+ lset[model.LabelName(model.BucketLabel)] = model.LabelValue(fmt.Sprint(q.GetUpperBound()))
+ lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_bucket")
+
+ if math.IsInf(q.GetUpperBound(), +1) {
+ infSeen = true
+ }
+
+ samples = append(samples, &model.Sample{
+ Metric: model.Metric(lset),
+ Value: model.SampleValue(q.GetCumulativeCount()),
+ Timestamp: timestamp,
+ })
+ }
+
+ if m.Histogram.SampleSum != nil {
+ lset := make(model.LabelSet, len(m.Label)+1)
+ for _, p := range m.Label {
+ lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
+ }
+ lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_sum")
+
+ samples = append(samples, &model.Sample{
+ Metric: model.Metric(lset),
+ Value: model.SampleValue(m.Histogram.GetSampleSum()),
+ Timestamp: timestamp,
+ })
+ }
+
+ if m.Histogram.SampleCount != nil {
+ lset := make(model.LabelSet, len(m.Label)+1)
+ for _, p := range m.Label {
+ lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
+ }
+ lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_count")
+
+ count := &model.Sample{
+ Metric: model.Metric(lset),
+ Value: model.SampleValue(m.Histogram.GetSampleCount()),
+ Timestamp: timestamp,
+ }
+ samples = append(samples, count)
+
+ if !infSeen {
+ // Append a infinity bucket sample.
+ lset := make(model.LabelSet, len(m.Label)+2)
+ for _, p := range m.Label {
+ lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
+ }
+ lset[model.LabelName(model.BucketLabel)] = model.LabelValue("+Inf")
+ lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_bucket")
+
+ samples = append(samples, &model.Sample{
+ Metric: model.Metric(lset),
+ Value: count.Value,
+ Timestamp: timestamp,
+ })
+ }
+ }
+ }
+
+ return samples
+}
diff --git a/vendor/github.com/prometheus/common/expfmt/encode.go b/vendor/github.com/prometheus/common/expfmt/encode.go
new file mode 100644
index 0000000000..fd3c186e1f
--- /dev/null
+++ b/vendor/github.com/prometheus/common/expfmt/encode.go
@@ -0,0 +1,88 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package expfmt
+
+import (
+ "fmt"
+ "io"
+ "net/http"
+
+ "bitbucket.org/ww/goautoneg"
+ "github.com/golang/protobuf/proto"
+ "github.com/matttproud/golang_protobuf_extensions/pbutil"
+
+ dto "github.com/prometheus/client_model/go"
+)
+
+// Encoder types encode metric families into an underlying wire protocol.
+type Encoder interface {
+ Encode(*dto.MetricFamily) error
+}
+
+type encoder func(*dto.MetricFamily) error
+
+func (e encoder) Encode(v *dto.MetricFamily) error {
+ return e(v)
+}
+
+// Negotiate returns the Content-Type based on the given Accept header.
+// If no appropriate accepted type is found, FmtText is returned.
+func Negotiate(h http.Header) Format {
+ for _, ac := range goautoneg.ParseAccept(h.Get(hdrAccept)) {
+ // Check for protocol buffer
+ if ac.Type == ProtoType && ac.SubType == ProtoSubType && ac.Params["proto"] == ProtoProtocol {
+ switch ac.Params["encoding"] {
+ case "delimited":
+ return FmtProtoDelim
+ case "text":
+ return FmtProtoText
+ case "compact-text":
+ return FmtProtoCompact
+ }
+ }
+ // Check for text format.
+ ver := ac.Params["version"]
+ if ac.Type == "text" && ac.SubType == "plain" && (ver == TextVersion || ver == "") {
+ return FmtText
+ }
+ }
+ return FmtText
+}
+
+// NewEncoder returns a new encoder based on content type negotiation.
+func NewEncoder(w io.Writer, format Format) Encoder {
+ switch format {
+ case FmtProtoDelim:
+ return encoder(func(v *dto.MetricFamily) error {
+ _, err := pbutil.WriteDelimited(w, v)
+ return err
+ })
+ case FmtProtoCompact:
+ return encoder(func(v *dto.MetricFamily) error {
+ _, err := fmt.Fprintln(w, v.String())
+ return err
+ })
+ case FmtProtoText:
+ return encoder(func(v *dto.MetricFamily) error {
+ _, err := fmt.Fprintln(w, proto.MarshalTextString(v))
+ return err
+ })
+ case FmtText:
+ return encoder(func(v *dto.MetricFamily) error {
+ _, err := MetricFamilyToText(w, v)
+ return err
+ })
+ }
+ panic("expfmt.NewEncoder: unknown format")
+}
diff --git a/vendor/github.com/prometheus/common/expfmt/expfmt.go b/vendor/github.com/prometheus/common/expfmt/expfmt.go
new file mode 100644
index 0000000000..7a8114ce4b
--- /dev/null
+++ b/vendor/github.com/prometheus/common/expfmt/expfmt.go
@@ -0,0 +1,37 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// A package for reading and writing Prometheus metrics.
+package expfmt
+
+type Format string
+
+const (
+ TextVersion = "0.0.4"
+
+ ProtoType = `application`
+ ProtoSubType = `vnd.google.protobuf`
+ ProtoProtocol = `io.prometheus.client.MetricFamily`
+ ProtoFmt = ProtoType + "/" + ProtoSubType + "; proto=" + ProtoProtocol + ";"
+
+ // The Content-Type values for the different wire protocols.
+ FmtText Format = `text/plain; version=` + TextVersion
+ FmtProtoDelim Format = ProtoFmt + ` encoding=delimited`
+ FmtProtoText Format = ProtoFmt + ` encoding=text`
+ FmtProtoCompact Format = ProtoFmt + ` encoding=compact-text`
+)
+
+const (
+ hdrContentType = "Content-Type"
+ hdrAccept = "Accept"
+)
diff --git a/vendor/github.com/prometheus/common/expfmt/text_create.go b/vendor/github.com/prometheus/common/expfmt/text_create.go
new file mode 100644
index 0000000000..cc2a616acc
--- /dev/null
+++ b/vendor/github.com/prometheus/common/expfmt/text_create.go
@@ -0,0 +1,308 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package expfmt
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "math"
+ "strings"
+
+ dto "github.com/prometheus/client_model/go"
+ "github.com/prometheus/common/model"
+)
+
+// MetricFamilyToText converts a MetricFamily proto message into text format and
+// writes the resulting lines to 'out'. It returns the number of bytes written
+// and any error encountered. This function does not perform checks on the
+// content of the metric and label names, i.e. invalid metric or label names
+// will result in invalid text format output.
+// This method fulfills the type 'prometheus.encoder'.
+func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (int, error) {
+ var written int
+
+ // Fail-fast checks.
+ if len(in.Metric) == 0 {
+ return written, fmt.Errorf("MetricFamily has no metrics: %s", in)
+ }
+ name := in.GetName()
+ if name == "" {
+ return written, fmt.Errorf("MetricFamily has no name: %s", in)
+ }
+ if in.Type == nil {
+ return written, fmt.Errorf("MetricFamily has no type: %s", in)
+ }
+
+ // Comments, first HELP, then TYPE.
+ if in.Help != nil {
+ n, err := fmt.Fprintf(
+ out, "# HELP %s %s\n",
+ name, escapeString(*in.Help, false),
+ )
+ written += n
+ if err != nil {
+ return written, err
+ }
+ }
+ metricType := in.GetType()
+ n, err := fmt.Fprintf(
+ out, "# TYPE %s %s\n",
+ name, strings.ToLower(metricType.String()),
+ )
+ written += n
+ if err != nil {
+ return written, err
+ }
+
+ // Finally the samples, one line for each.
+ for _, metric := range in.Metric {
+ switch metricType {
+ case dto.MetricType_COUNTER:
+ if metric.Counter == nil {
+ return written, fmt.Errorf(
+ "expected counter in metric %s %s", name, metric,
+ )
+ }
+ n, err = writeSample(
+ name, metric, "", "",
+ metric.Counter.GetValue(),
+ out,
+ )
+ case dto.MetricType_GAUGE:
+ if metric.Gauge == nil {
+ return written, fmt.Errorf(
+ "expected gauge in metric %s %s", name, metric,
+ )
+ }
+ n, err = writeSample(
+ name, metric, "", "",
+ metric.Gauge.GetValue(),
+ out,
+ )
+ case dto.MetricType_UNTYPED:
+ if metric.Untyped == nil {
+ return written, fmt.Errorf(
+ "expected untyped in metric %s %s", name, metric,
+ )
+ }
+ n, err = writeSample(
+ name, metric, "", "",
+ metric.Untyped.GetValue(),
+ out,
+ )
+ case dto.MetricType_SUMMARY:
+ if metric.Summary == nil {
+ return written, fmt.Errorf(
+ "expected summary in metric %s %s", name, metric,
+ )
+ }
+ for _, q := range metric.Summary.Quantile {
+ n, err = writeSample(
+ name, metric,
+ model.QuantileLabel, fmt.Sprint(q.GetQuantile()),
+ q.GetValue(),
+ out,
+ )
+ written += n
+ if err != nil {
+ return written, err
+ }
+ }
+ n, err = writeSample(
+ name+"_sum", metric, "", "",
+ metric.Summary.GetSampleSum(),
+ out,
+ )
+ if err != nil {
+ return written, err
+ }
+ written += n
+ n, err = writeSample(
+ name+"_count", metric, "", "",
+ float64(metric.Summary.GetSampleCount()),
+ out,
+ )
+ case dto.MetricType_HISTOGRAM:
+ if metric.Histogram == nil {
+ return written, fmt.Errorf(
+ "expected histogram in metric %s %s", name, metric,
+ )
+ }
+ infSeen := false
+ for _, q := range metric.Histogram.Bucket {
+ n, err = writeSample(
+ name+"_bucket", metric,
+ model.BucketLabel, fmt.Sprint(q.GetUpperBound()),
+ float64(q.GetCumulativeCount()),
+ out,
+ )
+ written += n
+ if err != nil {
+ return written, err
+ }
+ if math.IsInf(q.GetUpperBound(), +1) {
+ infSeen = true
+ }
+ }
+ if !infSeen {
+ n, err = writeSample(
+ name+"_bucket", metric,
+ model.BucketLabel, "+Inf",
+ float64(metric.Histogram.GetSampleCount()),
+ out,
+ )
+ if err != nil {
+ return written, err
+ }
+ written += n
+ }
+ n, err = writeSample(
+ name+"_sum", metric, "", "",
+ metric.Histogram.GetSampleSum(),
+ out,
+ )
+ if err != nil {
+ return written, err
+ }
+ written += n
+ n, err = writeSample(
+ name+"_count", metric, "", "",
+ float64(metric.Histogram.GetSampleCount()),
+ out,
+ )
+ default:
+ return written, fmt.Errorf(
+ "unexpected type in metric %s %s", name, metric,
+ )
+ }
+ written += n
+ if err != nil {
+ return written, err
+ }
+ }
+ return written, nil
+}
+
+// writeSample writes a single sample in text format to out, given the metric
+// name, the metric proto message itself, optionally an additional label name
+// and value (use empty strings if not required), and the value. The function
+// returns the number of bytes written and any error encountered.
+func writeSample(
+ name string,
+ metric *dto.Metric,
+ additionalLabelName, additionalLabelValue string,
+ value float64,
+ out io.Writer,
+) (int, error) {
+ var written int
+ n, err := fmt.Fprint(out, name)
+ written += n
+ if err != nil {
+ return written, err
+ }
+ n, err = labelPairsToText(
+ metric.Label,
+ additionalLabelName, additionalLabelValue,
+ out,
+ )
+ written += n
+ if err != nil {
+ return written, err
+ }
+ n, err = fmt.Fprintf(out, " %v", value)
+ written += n
+ if err != nil {
+ return written, err
+ }
+ if metric.TimestampMs != nil {
+ n, err = fmt.Fprintf(out, " %v", *metric.TimestampMs)
+ written += n
+ if err != nil {
+ return written, err
+ }
+ }
+ n, err = out.Write([]byte{'\n'})
+ written += n
+ if err != nil {
+ return written, err
+ }
+ return written, nil
+}
+
+// labelPairsToText converts a slice of LabelPair proto messages plus the
+// explicitly given additional label pair into text formatted as required by the
+// text format and writes it to 'out'. An empty slice in combination with an
+// empty string 'additionalLabelName' results in nothing being
+// written. Otherwise, the label pairs are written, escaped as required by the
+// text format, and enclosed in '{...}'. The function returns the number of
+// bytes written and any error encountered.
+func labelPairsToText(
+ in []*dto.LabelPair,
+ additionalLabelName, additionalLabelValue string,
+ out io.Writer,
+) (int, error) {
+ if len(in) == 0 && additionalLabelName == "" {
+ return 0, nil
+ }
+ var written int
+ separator := '{'
+ for _, lp := range in {
+ n, err := fmt.Fprintf(
+ out, `%c%s="%s"`,
+ separator, lp.GetName(), escapeString(lp.GetValue(), true),
+ )
+ written += n
+ if err != nil {
+ return written, err
+ }
+ separator = ','
+ }
+ if additionalLabelName != "" {
+ n, err := fmt.Fprintf(
+ out, `%c%s="%s"`,
+ separator, additionalLabelName,
+ escapeString(additionalLabelValue, true),
+ )
+ written += n
+ if err != nil {
+ return written, err
+ }
+ }
+ n, err := out.Write([]byte{'}'})
+ written += n
+ if err != nil {
+ return written, err
+ }
+ return written, nil
+}
+
+// escapeString replaces '\' by '\\', new line character by '\n', and - if
+// includeDoubleQuote is true - '"' by '\"'.
+func escapeString(v string, includeDoubleQuote bool) string {
+ result := bytes.NewBuffer(make([]byte, 0, len(v)))
+ for _, c := range v {
+ switch {
+ case c == '\\':
+ result.WriteString(`\\`)
+ case includeDoubleQuote && c == '"':
+ result.WriteString(`\"`)
+ case c == '\n':
+ result.WriteString(`\n`)
+ default:
+ result.WriteRune(c)
+ }
+ }
+ return result.String()
+}
diff --git a/vendor/github.com/prometheus/common/expfmt/text_parse.go b/vendor/github.com/prometheus/common/expfmt/text_parse.go
new file mode 100644
index 0000000000..84433bc4f6
--- /dev/null
+++ b/vendor/github.com/prometheus/common/expfmt/text_parse.go
@@ -0,0 +1,746 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package expfmt
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "io"
+ "math"
+ "strconv"
+ "strings"
+
+ dto "github.com/prometheus/client_model/go"
+
+ "github.com/golang/protobuf/proto"
+ "github.com/prometheus/common/model"
+)
+
+// A stateFn is a function that represents a state in a state machine. By
+// executing it, the state is progressed to the next state. The stateFn returns
+// another stateFn, which represents the new state. The end state is represented
+// by nil.
+type stateFn func() stateFn
+
+// ParseError signals errors while parsing the simple and flat text-based
+// exchange format.
+type ParseError struct {
+ Line int
+ Msg string
+}
+
+// Error implements the error interface.
+func (e ParseError) Error() string {
+ return fmt.Sprintf("text format parsing error in line %d: %s", e.Line, e.Msg)
+}
+
+// TextParser is used to parse the simple and flat text-based exchange format. Its
+// nil value is ready to use.
+type TextParser struct {
+ metricFamiliesByName map[string]*dto.MetricFamily
+ buf *bufio.Reader // Where the parsed input is read through.
+ err error // Most recent error.
+ lineCount int // Tracks the line count for error messages.
+ currentByte byte // The most recent byte read.
+ currentToken bytes.Buffer // Re-used each time a token has to be gathered from multiple bytes.
+ currentMF *dto.MetricFamily
+ currentMetric *dto.Metric
+ currentLabelPair *dto.LabelPair
+
+ // The remaining member variables are only used for summaries/histograms.
+ currentLabels map[string]string // All labels including '__name__' but excluding 'quantile'/'le'
+ // Summary specific.
+ summaries map[uint64]*dto.Metric // Key is created with LabelsToSignature.
+ currentQuantile float64
+ // Histogram specific.
+ histograms map[uint64]*dto.Metric // Key is created with LabelsToSignature.
+ currentBucket float64
+ // These tell us if the currently processed line ends on '_count' or
+ // '_sum' respectively and belong to a summary/histogram, representing the sample
+ // count and sum of that summary/histogram.
+ currentIsSummaryCount, currentIsSummarySum bool
+ currentIsHistogramCount, currentIsHistogramSum bool
+}
+
+// TextToMetricFamilies reads 'in' as the simple and flat text-based exchange
+// format and creates MetricFamily proto messages. It returns the MetricFamily
+// proto messages in a map where the metric names are the keys, along with any
+// error encountered.
+//
+// If the input contains duplicate metrics (i.e. lines with the same metric name
+// and exactly the same label set), the resulting MetricFamily will contain
+// duplicate Metric proto messages. Similar is true for duplicate label
+// names. Checks for duplicates have to be performed separately, if required.
+// Also note that neither the metrics within each MetricFamily are sorted nor
+// the label pairs within each Metric. Sorting is not required for the most
+// frequent use of this method, which is sample ingestion in the Prometheus
+// server. However, for presentation purposes, you might want to sort the
+// metrics, and in some cases, you must sort the labels, e.g. for consumption by
+// the metric family injection hook of the Prometheus registry.
+//
+// Summaries and histograms are rather special beasts. You would probably not
+// use them in the simple text format anyway. This method can deal with
+// summaries and histograms if they are presented in exactly the way the
+// text.Create function creates them.
+//
+// This method must not be called concurrently. If you want to parse different
+// input concurrently, instantiate a separate Parser for each goroutine.
+func (p *TextParser) TextToMetricFamilies(in io.Reader) (map[string]*dto.MetricFamily, error) {
+ p.reset(in)
+ for nextState := p.startOfLine; nextState != nil; nextState = nextState() {
+ // Magic happens here...
+ }
+ // Get rid of empty metric families.
+ for k, mf := range p.metricFamiliesByName {
+ if len(mf.GetMetric()) == 0 {
+ delete(p.metricFamiliesByName, k)
+ }
+ }
+ return p.metricFamiliesByName, p.err
+}
+
+func (p *TextParser) reset(in io.Reader) {
+ p.metricFamiliesByName = map[string]*dto.MetricFamily{}
+ if p.buf == nil {
+ p.buf = bufio.NewReader(in)
+ } else {
+ p.buf.Reset(in)
+ }
+ p.err = nil
+ p.lineCount = 0
+ if p.summaries == nil || len(p.summaries) > 0 {
+ p.summaries = map[uint64]*dto.Metric{}
+ }
+ if p.histograms == nil || len(p.histograms) > 0 {
+ p.histograms = map[uint64]*dto.Metric{}
+ }
+ p.currentQuantile = math.NaN()
+ p.currentBucket = math.NaN()
+}
+
+// startOfLine represents the state where the next byte read from p.buf is the
+// start of a line (or whitespace leading up to it).
+func (p *TextParser) startOfLine() stateFn {
+ p.lineCount++
+ if p.skipBlankTab(); p.err != nil {
+ // End of input reached. This is the only case where
+ // that is not an error but a signal that we are done.
+ p.err = nil
+ return nil
+ }
+ switch p.currentByte {
+ case '#':
+ return p.startComment
+ case '\n':
+ return p.startOfLine // Empty line, start the next one.
+ }
+ return p.readingMetricName
+}
+
+// startComment represents the state where the next byte read from p.buf is the
+// start of a comment (or whitespace leading up to it).
+func (p *TextParser) startComment() stateFn {
+ if p.skipBlankTab(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ if p.currentByte == '\n' {
+ return p.startOfLine
+ }
+ if p.readTokenUntilWhitespace(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ // If we have hit the end of line already, there is nothing left
+ // to do. This is not considered a syntax error.
+ if p.currentByte == '\n' {
+ return p.startOfLine
+ }
+ keyword := p.currentToken.String()
+ if keyword != "HELP" && keyword != "TYPE" {
+ // Generic comment, ignore by fast forwarding to end of line.
+ for p.currentByte != '\n' {
+ if p.currentByte, p.err = p.buf.ReadByte(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ }
+ return p.startOfLine
+ }
+ // There is something. Next has to be a metric name.
+ if p.skipBlankTab(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ if p.readTokenAsMetricName(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ if p.currentByte == '\n' {
+ // At the end of the line already.
+ // Again, this is not considered a syntax error.
+ return p.startOfLine
+ }
+ if !isBlankOrTab(p.currentByte) {
+ p.parseError("invalid metric name in comment")
+ return nil
+ }
+ p.setOrCreateCurrentMF()
+ if p.skipBlankTab(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ if p.currentByte == '\n' {
+ // At the end of the line already.
+ // Again, this is not considered a syntax error.
+ return p.startOfLine
+ }
+ switch keyword {
+ case "HELP":
+ return p.readingHelp
+ case "TYPE":
+ return p.readingType
+ }
+ panic(fmt.Sprintf("code error: unexpected keyword %q", keyword))
+}
+
+// readingMetricName represents the state where the last byte read (now in
+// p.currentByte) is the first byte of a metric name.
+func (p *TextParser) readingMetricName() stateFn {
+ if p.readTokenAsMetricName(); p.err != nil {
+ return nil
+ }
+ if p.currentToken.Len() == 0 {
+ p.parseError("invalid metric name")
+ return nil
+ }
+ p.setOrCreateCurrentMF()
+ // Now is the time to fix the type if it hasn't happened yet.
+ if p.currentMF.Type == nil {
+ p.currentMF.Type = dto.MetricType_UNTYPED.Enum()
+ }
+ p.currentMetric = &dto.Metric{}
+ // Do not append the newly created currentMetric to
+ // currentMF.Metric right now. First wait if this is a summary,
+ // and the metric exists already, which we can only know after
+ // having read all the labels.
+ if p.skipBlankTabIfCurrentBlankTab(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ return p.readingLabels
+}
+
+// readingLabels represents the state where the last byte read (now in
+// p.currentByte) is either the first byte of the label set (i.e. a '{'), or the
+// first byte of the value (otherwise).
+func (p *TextParser) readingLabels() stateFn {
+ // Summaries/histograms are special. We have to reset the
+ // currentLabels map, currentQuantile and currentBucket before starting to
+ // read labels.
+ if p.currentMF.GetType() == dto.MetricType_SUMMARY || p.currentMF.GetType() == dto.MetricType_HISTOGRAM {
+ p.currentLabels = map[string]string{}
+ p.currentLabels[string(model.MetricNameLabel)] = p.currentMF.GetName()
+ p.currentQuantile = math.NaN()
+ p.currentBucket = math.NaN()
+ }
+ if p.currentByte != '{' {
+ return p.readingValue
+ }
+ return p.startLabelName
+}
+
+// startLabelName represents the state where the next byte read from p.buf is
+// the start of a label name (or whitespace leading up to it).
+func (p *TextParser) startLabelName() stateFn {
+ if p.skipBlankTab(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ if p.currentByte == '}' {
+ if p.skipBlankTab(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ return p.readingValue
+ }
+ if p.readTokenAsLabelName(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ if p.currentToken.Len() == 0 {
+ p.parseError(fmt.Sprintf("invalid label name for metric %q", p.currentMF.GetName()))
+ return nil
+ }
+ p.currentLabelPair = &dto.LabelPair{Name: proto.String(p.currentToken.String())}
+ if p.currentLabelPair.GetName() == string(model.MetricNameLabel) {
+ p.parseError(fmt.Sprintf("label name %q is reserved", model.MetricNameLabel))
+ return nil
+ }
+ // Special summary/histogram treatment. Don't add 'quantile' and 'le'
+ // labels to 'real' labels.
+ if !(p.currentMF.GetType() == dto.MetricType_SUMMARY && p.currentLabelPair.GetName() == model.QuantileLabel) &&
+ !(p.currentMF.GetType() == dto.MetricType_HISTOGRAM && p.currentLabelPair.GetName() == model.BucketLabel) {
+ p.currentMetric.Label = append(p.currentMetric.Label, p.currentLabelPair)
+ }
+ if p.skipBlankTabIfCurrentBlankTab(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ if p.currentByte != '=' {
+ p.parseError(fmt.Sprintf("expected '=' after label name, found %q", p.currentByte))
+ return nil
+ }
+ return p.startLabelValue
+}
+
+// startLabelValue represents the state where the next byte read from p.buf is
+// the start of a (quoted) label value (or whitespace leading up to it).
+func (p *TextParser) startLabelValue() stateFn {
+ if p.skipBlankTab(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ if p.currentByte != '"' {
+ p.parseError(fmt.Sprintf("expected '\"' at start of label value, found %q", p.currentByte))
+ return nil
+ }
+ if p.readTokenAsLabelValue(); p.err != nil {
+ return nil
+ }
+ p.currentLabelPair.Value = proto.String(p.currentToken.String())
+ // Special treatment of summaries:
+ // - Quantile labels are special, will result in dto.Quantile later.
+ // - Other labels have to be added to currentLabels for signature calculation.
+ if p.currentMF.GetType() == dto.MetricType_SUMMARY {
+ if p.currentLabelPair.GetName() == model.QuantileLabel {
+ if p.currentQuantile, p.err = strconv.ParseFloat(p.currentLabelPair.GetValue(), 64); p.err != nil {
+ // Create a more helpful error message.
+ p.parseError(fmt.Sprintf("expected float as value for 'quantile' label, got %q", p.currentLabelPair.GetValue()))
+ return nil
+ }
+ } else {
+ p.currentLabels[p.currentLabelPair.GetName()] = p.currentLabelPair.GetValue()
+ }
+ }
+ // Similar special treatment of histograms.
+ if p.currentMF.GetType() == dto.MetricType_HISTOGRAM {
+ if p.currentLabelPair.GetName() == model.BucketLabel {
+ if p.currentBucket, p.err = strconv.ParseFloat(p.currentLabelPair.GetValue(), 64); p.err != nil {
+ // Create a more helpful error message.
+ p.parseError(fmt.Sprintf("expected float as value for 'le' label, got %q", p.currentLabelPair.GetValue()))
+ return nil
+ }
+ } else {
+ p.currentLabels[p.currentLabelPair.GetName()] = p.currentLabelPair.GetValue()
+ }
+ }
+ if p.skipBlankTab(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ switch p.currentByte {
+ case ',':
+ return p.startLabelName
+
+ case '}':
+ if p.skipBlankTab(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ return p.readingValue
+ default:
+ p.parseError(fmt.Sprintf("unexpected end of label value %q", p.currentLabelPair.Value))
+ return nil
+ }
+}
+
+// readingValue represents the state where the last byte read (now in
+// p.currentByte) is the first byte of the sample value (i.e. a float).
+func (p *TextParser) readingValue() stateFn {
+ // When we are here, we have read all the labels, so for the
+ // special case of a summary/histogram, we can finally find out
+ // if the metric already exists.
+ if p.currentMF.GetType() == dto.MetricType_SUMMARY {
+ signature := model.LabelsToSignature(p.currentLabels)
+ if summary := p.summaries[signature]; summary != nil {
+ p.currentMetric = summary
+ } else {
+ p.summaries[signature] = p.currentMetric
+ p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric)
+ }
+ } else if p.currentMF.GetType() == dto.MetricType_HISTOGRAM {
+ signature := model.LabelsToSignature(p.currentLabels)
+ if histogram := p.histograms[signature]; histogram != nil {
+ p.currentMetric = histogram
+ } else {
+ p.histograms[signature] = p.currentMetric
+ p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric)
+ }
+ } else {
+ p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric)
+ }
+ if p.readTokenUntilWhitespace(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ value, err := strconv.ParseFloat(p.currentToken.String(), 64)
+ if err != nil {
+ // Create a more helpful error message.
+ p.parseError(fmt.Sprintf("expected float as value, got %q", p.currentToken.String()))
+ return nil
+ }
+ switch p.currentMF.GetType() {
+ case dto.MetricType_COUNTER:
+ p.currentMetric.Counter = &dto.Counter{Value: proto.Float64(value)}
+ case dto.MetricType_GAUGE:
+ p.currentMetric.Gauge = &dto.Gauge{Value: proto.Float64(value)}
+ case dto.MetricType_UNTYPED:
+ p.currentMetric.Untyped = &dto.Untyped{Value: proto.Float64(value)}
+ case dto.MetricType_SUMMARY:
+ // *sigh*
+ if p.currentMetric.Summary == nil {
+ p.currentMetric.Summary = &dto.Summary{}
+ }
+ switch {
+ case p.currentIsSummaryCount:
+ p.currentMetric.Summary.SampleCount = proto.Uint64(uint64(value))
+ case p.currentIsSummarySum:
+ p.currentMetric.Summary.SampleSum = proto.Float64(value)
+ case !math.IsNaN(p.currentQuantile):
+ p.currentMetric.Summary.Quantile = append(
+ p.currentMetric.Summary.Quantile,
+ &dto.Quantile{
+ Quantile: proto.Float64(p.currentQuantile),
+ Value: proto.Float64(value),
+ },
+ )
+ }
+ case dto.MetricType_HISTOGRAM:
+ // *sigh*
+ if p.currentMetric.Histogram == nil {
+ p.currentMetric.Histogram = &dto.Histogram{}
+ }
+ switch {
+ case p.currentIsHistogramCount:
+ p.currentMetric.Histogram.SampleCount = proto.Uint64(uint64(value))
+ case p.currentIsHistogramSum:
+ p.currentMetric.Histogram.SampleSum = proto.Float64(value)
+ case !math.IsNaN(p.currentBucket):
+ p.currentMetric.Histogram.Bucket = append(
+ p.currentMetric.Histogram.Bucket,
+ &dto.Bucket{
+ UpperBound: proto.Float64(p.currentBucket),
+ CumulativeCount: proto.Uint64(uint64(value)),
+ },
+ )
+ }
+ default:
+ p.err = fmt.Errorf("unexpected type for metric name %q", p.currentMF.GetName())
+ }
+ if p.currentByte == '\n' {
+ return p.startOfLine
+ }
+ return p.startTimestamp
+}
+
+// startTimestamp represents the state where the next byte read from p.buf is
+// the start of the timestamp (or whitespace leading up to it).
+func (p *TextParser) startTimestamp() stateFn {
+ if p.skipBlankTab(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ if p.readTokenUntilWhitespace(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ timestamp, err := strconv.ParseInt(p.currentToken.String(), 10, 64)
+ if err != nil {
+ // Create a more helpful error message.
+ p.parseError(fmt.Sprintf("expected integer as timestamp, got %q", p.currentToken.String()))
+ return nil
+ }
+ p.currentMetric.TimestampMs = proto.Int64(timestamp)
+ if p.readTokenUntilNewline(false); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ if p.currentToken.Len() > 0 {
+ p.parseError(fmt.Sprintf("spurious string after timestamp: %q", p.currentToken.String()))
+ return nil
+ }
+ return p.startOfLine
+}
+
+// readingHelp represents the state where the last byte read (now in
+// p.currentByte) is the first byte of the docstring after 'HELP'.
+func (p *TextParser) readingHelp() stateFn {
+ if p.currentMF.Help != nil {
+ p.parseError(fmt.Sprintf("second HELP line for metric name %q", p.currentMF.GetName()))
+ return nil
+ }
+ // Rest of line is the docstring.
+ if p.readTokenUntilNewline(true); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ p.currentMF.Help = proto.String(p.currentToken.String())
+ return p.startOfLine
+}
+
+// readingType represents the state where the last byte read (now in
+// p.currentByte) is the first byte of the type hint after 'HELP'.
+func (p *TextParser) readingType() stateFn {
+ if p.currentMF.Type != nil {
+ p.parseError(fmt.Sprintf("second TYPE line for metric name %q, or TYPE reported after samples", p.currentMF.GetName()))
+ return nil
+ }
+ // Rest of line is the type.
+ if p.readTokenUntilNewline(false); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ metricType, ok := dto.MetricType_value[strings.ToUpper(p.currentToken.String())]
+ if !ok {
+ p.parseError(fmt.Sprintf("unknown metric type %q", p.currentToken.String()))
+ return nil
+ }
+ p.currentMF.Type = dto.MetricType(metricType).Enum()
+ return p.startOfLine
+}
+
+// parseError sets p.err to a ParseError at the current line with the given
+// message.
+func (p *TextParser) parseError(msg string) {
+ p.err = ParseError{
+ Line: p.lineCount,
+ Msg: msg,
+ }
+}
+
+// skipBlankTab reads (and discards) bytes from p.buf until it encounters a byte
+// that is neither ' ' nor '\t'. That byte is left in p.currentByte.
+func (p *TextParser) skipBlankTab() {
+ for {
+ if p.currentByte, p.err = p.buf.ReadByte(); p.err != nil || !isBlankOrTab(p.currentByte) {
+ return
+ }
+ }
+}
+
+// skipBlankTabIfCurrentBlankTab works exactly as skipBlankTab but doesn't do
+// anything if p.currentByte is neither ' ' nor '\t'.
+func (p *TextParser) skipBlankTabIfCurrentBlankTab() {
+ if isBlankOrTab(p.currentByte) {
+ p.skipBlankTab()
+ }
+}
+
+// readTokenUntilWhitespace copies bytes from p.buf into p.currentToken. The
+// first byte considered is the byte already read (now in p.currentByte). The
+// first whitespace byte encountered is still copied into p.currentByte, but not
+// into p.currentToken.
+func (p *TextParser) readTokenUntilWhitespace() {
+ p.currentToken.Reset()
+ for p.err == nil && !isBlankOrTab(p.currentByte) && p.currentByte != '\n' {
+ p.currentToken.WriteByte(p.currentByte)
+ p.currentByte, p.err = p.buf.ReadByte()
+ }
+}
+
+// readTokenUntilNewline copies bytes from p.buf into p.currentToken. The first
+// byte considered is the byte already read (now in p.currentByte). The first
+// newline byte encountered is still copied into p.currentByte, but not into
+// p.currentToken. If recognizeEscapeSequence is true, two escape sequences are
+// recognized: '\\' tranlates into '\', and '\n' into a line-feed character. All
+// other escape sequences are invalid and cause an error.
+func (p *TextParser) readTokenUntilNewline(recognizeEscapeSequence bool) {
+ p.currentToken.Reset()
+ escaped := false
+ for p.err == nil {
+ if recognizeEscapeSequence && escaped {
+ switch p.currentByte {
+ case '\\':
+ p.currentToken.WriteByte(p.currentByte)
+ case 'n':
+ p.currentToken.WriteByte('\n')
+ default:
+ p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte))
+ return
+ }
+ escaped = false
+ } else {
+ switch p.currentByte {
+ case '\n':
+ return
+ case '\\':
+ escaped = true
+ default:
+ p.currentToken.WriteByte(p.currentByte)
+ }
+ }
+ p.currentByte, p.err = p.buf.ReadByte()
+ }
+}
+
+// readTokenAsMetricName copies a metric name from p.buf into p.currentToken.
+// The first byte considered is the byte already read (now in p.currentByte).
+// The first byte not part of a metric name is still copied into p.currentByte,
+// but not into p.currentToken.
+func (p *TextParser) readTokenAsMetricName() {
+ p.currentToken.Reset()
+ if !isValidMetricNameStart(p.currentByte) {
+ return
+ }
+ for {
+ p.currentToken.WriteByte(p.currentByte)
+ p.currentByte, p.err = p.buf.ReadByte()
+ if p.err != nil || !isValidMetricNameContinuation(p.currentByte) {
+ return
+ }
+ }
+}
+
+// readTokenAsLabelName copies a label name from p.buf into p.currentToken.
+// The first byte considered is the byte already read (now in p.currentByte).
+// The first byte not part of a label name is still copied into p.currentByte,
+// but not into p.currentToken.
+func (p *TextParser) readTokenAsLabelName() {
+ p.currentToken.Reset()
+ if !isValidLabelNameStart(p.currentByte) {
+ return
+ }
+ for {
+ p.currentToken.WriteByte(p.currentByte)
+ p.currentByte, p.err = p.buf.ReadByte()
+ if p.err != nil || !isValidLabelNameContinuation(p.currentByte) {
+ return
+ }
+ }
+}
+
+// readTokenAsLabelValue copies a label value from p.buf into p.currentToken.
+// In contrast to the other 'readTokenAs...' functions, which start with the
+// last read byte in p.currentByte, this method ignores p.currentByte and starts
+// with reading a new byte from p.buf. The first byte not part of a label value
+// is still copied into p.currentByte, but not into p.currentToken.
+func (p *TextParser) readTokenAsLabelValue() {
+ p.currentToken.Reset()
+ escaped := false
+ for {
+ if p.currentByte, p.err = p.buf.ReadByte(); p.err != nil {
+ return
+ }
+ if escaped {
+ switch p.currentByte {
+ case '"', '\\':
+ p.currentToken.WriteByte(p.currentByte)
+ case 'n':
+ p.currentToken.WriteByte('\n')
+ default:
+ p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte))
+ return
+ }
+ escaped = false
+ continue
+ }
+ switch p.currentByte {
+ case '"':
+ return
+ case '\n':
+ p.parseError(fmt.Sprintf("label value %q contains unescaped new-line", p.currentToken.String()))
+ return
+ case '\\':
+ escaped = true
+ default:
+ p.currentToken.WriteByte(p.currentByte)
+ }
+ }
+}
+
+func (p *TextParser) setOrCreateCurrentMF() {
+ p.currentIsSummaryCount = false
+ p.currentIsSummarySum = false
+ p.currentIsHistogramCount = false
+ p.currentIsHistogramSum = false
+ name := p.currentToken.String()
+ if p.currentMF = p.metricFamiliesByName[name]; p.currentMF != nil {
+ return
+ }
+ // Try out if this is a _sum or _count for a summary/histogram.
+ summaryName := summaryMetricName(name)
+ if p.currentMF = p.metricFamiliesByName[summaryName]; p.currentMF != nil {
+ if p.currentMF.GetType() == dto.MetricType_SUMMARY {
+ if isCount(name) {
+ p.currentIsSummaryCount = true
+ }
+ if isSum(name) {
+ p.currentIsSummarySum = true
+ }
+ return
+ }
+ }
+ histogramName := histogramMetricName(name)
+ if p.currentMF = p.metricFamiliesByName[histogramName]; p.currentMF != nil {
+ if p.currentMF.GetType() == dto.MetricType_HISTOGRAM {
+ if isCount(name) {
+ p.currentIsHistogramCount = true
+ }
+ if isSum(name) {
+ p.currentIsHistogramSum = true
+ }
+ return
+ }
+ }
+ p.currentMF = &dto.MetricFamily{Name: proto.String(name)}
+ p.metricFamiliesByName[name] = p.currentMF
+}
+
+func isValidLabelNameStart(b byte) bool {
+ return (b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_'
+}
+
+func isValidLabelNameContinuation(b byte) bool {
+ return isValidLabelNameStart(b) || (b >= '0' && b <= '9')
+}
+
+func isValidMetricNameStart(b byte) bool {
+ return isValidLabelNameStart(b) || b == ':'
+}
+
+func isValidMetricNameContinuation(b byte) bool {
+ return isValidLabelNameContinuation(b) || b == ':'
+}
+
+func isBlankOrTab(b byte) bool {
+ return b == ' ' || b == '\t'
+}
+
+func isCount(name string) bool {
+ return len(name) > 6 && name[len(name)-6:] == "_count"
+}
+
+func isSum(name string) bool {
+ return len(name) > 4 && name[len(name)-4:] == "_sum"
+}
+
+func isBucket(name string) bool {
+ return len(name) > 7 && name[len(name)-7:] == "_bucket"
+}
+
+func summaryMetricName(name string) string {
+ switch {
+ case isCount(name):
+ return name[:len(name)-6]
+ case isSum(name):
+ return name[:len(name)-4]
+ default:
+ return name
+ }
+}
+
+func histogramMetricName(name string) string {
+ switch {
+ case isCount(name):
+ return name[:len(name)-6]
+ case isSum(name):
+ return name[:len(name)-4]
+ case isBucket(name):
+ return name[:len(name)-7]
+ default:
+ return name
+ }
+}
diff --git a/vendor/github.com/prometheus/common/model/fingerprinting.go b/vendor/github.com/prometheus/common/model/fingerprinting.go
new file mode 100644
index 0000000000..fc4de4106e
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/fingerprinting.go
@@ -0,0 +1,105 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+ "fmt"
+ "strconv"
+)
+
+// Fingerprint provides a hash-capable representation of a Metric.
+// For our purposes, FNV-1A 64-bit is used.
+type Fingerprint uint64
+
+// FingerprintFromString transforms a string representation into a Fingerprint.
+func FingerprintFromString(s string) (Fingerprint, error) {
+ num, err := strconv.ParseUint(s, 16, 64)
+ return Fingerprint(num), err
+}
+
+// ParseFingerprint parses the input string into a fingerprint.
+func ParseFingerprint(s string) (Fingerprint, error) {
+ num, err := strconv.ParseUint(s, 16, 64)
+ if err != nil {
+ return 0, err
+ }
+ return Fingerprint(num), nil
+}
+
+func (f Fingerprint) String() string {
+ return fmt.Sprintf("%016x", uint64(f))
+}
+
+// Fingerprints represents a collection of Fingerprint subject to a given
+// natural sorting scheme. It implements sort.Interface.
+type Fingerprints []Fingerprint
+
+// Len implements sort.Interface.
+func (f Fingerprints) Len() int {
+ return len(f)
+}
+
+// Less implements sort.Interface.
+func (f Fingerprints) Less(i, j int) bool {
+ return f[i] < f[j]
+}
+
+// Swap implements sort.Interface.
+func (f Fingerprints) Swap(i, j int) {
+ f[i], f[j] = f[j], f[i]
+}
+
+// FingerprintSet is a set of Fingerprints.
+type FingerprintSet map[Fingerprint]struct{}
+
+// Equal returns true if both sets contain the same elements (and not more).
+func (s FingerprintSet) Equal(o FingerprintSet) bool {
+ if len(s) != len(o) {
+ return false
+ }
+
+ for k := range s {
+ if _, ok := o[k]; !ok {
+ return false
+ }
+ }
+
+ return true
+}
+
+// Intersection returns the elements contained in both sets.
+func (s FingerprintSet) Intersection(o FingerprintSet) FingerprintSet {
+ myLength, otherLength := len(s), len(o)
+ if myLength == 0 || otherLength == 0 {
+ return FingerprintSet{}
+ }
+
+ subSet := s
+ superSet := o
+
+ if otherLength < myLength {
+ subSet = o
+ superSet = s
+ }
+
+ out := FingerprintSet{}
+
+ for k := range subSet {
+ if _, ok := superSet[k]; ok {
+ out[k] = struct{}{}
+ }
+ }
+
+ return out
+}
diff --git a/vendor/github.com/prometheus/common/model/labels.go b/vendor/github.com/prometheus/common/model/labels.go
new file mode 100644
index 0000000000..6459c8f791
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/labels.go
@@ -0,0 +1,188 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+ "encoding/json"
+ "fmt"
+ "regexp"
+ "sort"
+ "strings"
+)
+
+const (
+ // AlertNameLabel is the name of the label containing the an alert's name.
+ AlertNameLabel = "alertname"
+
+ // ExportedLabelPrefix is the prefix to prepend to the label names present in
+ // exported metrics if a label of the same name is added by the server.
+ ExportedLabelPrefix = "exported_"
+
+ // MetricNameLabel is the label name indicating the metric name of a
+ // timeseries.
+ MetricNameLabel = "__name__"
+
+ // SchemeLabel is the name of the label that holds the scheme on which to
+ // scrape a target.
+ SchemeLabel = "__scheme__"
+
+ // AddressLabel is the name of the label that holds the address of
+ // a scrape target.
+ AddressLabel = "__address__"
+
+ // MetricsPathLabel is the name of the label that holds the path on which to
+ // scrape a target.
+ MetricsPathLabel = "__metrics_path__"
+
+ // ReservedLabelPrefix is a prefix which is not legal in user-supplied
+ // label names.
+ ReservedLabelPrefix = "__"
+
+ // MetaLabelPrefix is a prefix for labels that provide meta information.
+ // Labels with this prefix are used for intermediate label processing and
+ // will not be attached to time series.
+ MetaLabelPrefix = "__meta_"
+
+ // TmpLabelPrefix is a prefix for temporary labels as part of relabelling.
+ // Labels with this prefix are used for intermediate label processing and
+ // will not be attached to time series. This is reserved for use in
+ // Prometheus configuration files by users.
+ TmpLabelPrefix = "__tmp_"
+
+ // ParamLabelPrefix is a prefix for labels that provide URL parameters
+ // used to scrape a target.
+ ParamLabelPrefix = "__param_"
+
+ // JobLabel is the label name indicating the job from which a timeseries
+ // was scraped.
+ JobLabel = "job"
+
+ // InstanceLabel is the label name used for the instance label.
+ InstanceLabel = "instance"
+
+ // BucketLabel is used for the label that defines the upper bound of a
+ // bucket of a histogram ("le" -> "less or equal").
+ BucketLabel = "le"
+
+ // QuantileLabel is used for the label that defines the quantile in a
+ // summary.
+ QuantileLabel = "quantile"
+)
+
+// LabelNameRE is a regular expression matching valid label names.
+var LabelNameRE = regexp.MustCompile("^[a-zA-Z_][a-zA-Z0-9_]*$")
+
+// A LabelName is a key for a LabelSet or Metric. It has a value associated
+// therewith.
+type LabelName string
+
+// UnmarshalYAML implements the yaml.Unmarshaler interface.
+func (ln *LabelName) UnmarshalYAML(unmarshal func(interface{}) error) error {
+ var s string
+ if err := unmarshal(&s); err != nil {
+ return err
+ }
+ if !LabelNameRE.MatchString(s) {
+ return fmt.Errorf("%q is not a valid label name", s)
+ }
+ *ln = LabelName(s)
+ return nil
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (ln *LabelName) UnmarshalJSON(b []byte) error {
+ var s string
+ if err := json.Unmarshal(b, &s); err != nil {
+ return err
+ }
+ if !LabelNameRE.MatchString(s) {
+ return fmt.Errorf("%q is not a valid label name", s)
+ }
+ *ln = LabelName(s)
+ return nil
+}
+
+// LabelNames is a sortable LabelName slice. In implements sort.Interface.
+type LabelNames []LabelName
+
+func (l LabelNames) Len() int {
+ return len(l)
+}
+
+func (l LabelNames) Less(i, j int) bool {
+ return l[i] < l[j]
+}
+
+func (l LabelNames) Swap(i, j int) {
+ l[i], l[j] = l[j], l[i]
+}
+
+func (l LabelNames) String() string {
+ labelStrings := make([]string, 0, len(l))
+ for _, label := range l {
+ labelStrings = append(labelStrings, string(label))
+ }
+ return strings.Join(labelStrings, ", ")
+}
+
+// A LabelValue is an associated value for a LabelName.
+type LabelValue string
+
+// LabelValues is a sortable LabelValue slice. It implements sort.Interface.
+type LabelValues []LabelValue
+
+func (l LabelValues) Len() int {
+ return len(l)
+}
+
+func (l LabelValues) Less(i, j int) bool {
+ return sort.StringsAreSorted([]string{string(l[i]), string(l[j])})
+}
+
+func (l LabelValues) Swap(i, j int) {
+ l[i], l[j] = l[j], l[i]
+}
+
+// LabelPair pairs a name with a value.
+type LabelPair struct {
+ Name LabelName
+ Value LabelValue
+}
+
+// LabelPairs is a sortable slice of LabelPair pointers. It implements
+// sort.Interface.
+type LabelPairs []*LabelPair
+
+func (l LabelPairs) Len() int {
+ return len(l)
+}
+
+func (l LabelPairs) Less(i, j int) bool {
+ switch {
+ case l[i].Name > l[j].Name:
+ return false
+ case l[i].Name < l[j].Name:
+ return true
+ case l[i].Value > l[j].Value:
+ return false
+ case l[i].Value < l[j].Value:
+ return true
+ default:
+ return false
+ }
+}
+
+func (l LabelPairs) Swap(i, j int) {
+ l[i], l[j] = l[j], l[i]
+}
diff --git a/vendor/github.com/prometheus/common/model/labelset.go b/vendor/github.com/prometheus/common/model/labelset.go
new file mode 100644
index 0000000000..142b9d1e2d
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/labelset.go
@@ -0,0 +1,153 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+ "encoding/json"
+ "fmt"
+ "sort"
+ "strings"
+)
+
+// A LabelSet is a collection of LabelName and LabelValue pairs. The LabelSet
+// may be fully-qualified down to the point where it may resolve to a single
+// Metric in the data store or not. All operations that occur within the realm
+// of a LabelSet can emit a vector of Metric entities to which the LabelSet may
+// match.
+type LabelSet map[LabelName]LabelValue
+
+func (ls LabelSet) Equal(o LabelSet) bool {
+ if len(ls) != len(o) {
+ return false
+ }
+ for ln, lv := range ls {
+ olv, ok := o[ln]
+ if !ok {
+ return false
+ }
+ if olv != lv {
+ return false
+ }
+ }
+ return true
+}
+
+// Before compares the metrics, using the following criteria:
+//
+// If m has fewer labels than o, it is before o. If it has more, it is not.
+//
+// If the number of labels is the same, the superset of all label names is
+// sorted alphanumerically. The first differing label pair found in that order
+// determines the outcome: If the label does not exist at all in m, then m is
+// before o, and vice versa. Otherwise the label value is compared
+// alphanumerically.
+//
+// If m and o are equal, the method returns false.
+func (ls LabelSet) Before(o LabelSet) bool {
+ if len(ls) < len(o) {
+ return true
+ }
+ if len(ls) > len(o) {
+ return false
+ }
+
+ lns := make(LabelNames, 0, len(ls)+len(o))
+ for ln := range ls {
+ lns = append(lns, ln)
+ }
+ for ln := range o {
+ lns = append(lns, ln)
+ }
+ // It's probably not worth it to de-dup lns.
+ sort.Sort(lns)
+ for _, ln := range lns {
+ mlv, ok := ls[ln]
+ if !ok {
+ return true
+ }
+ olv, ok := o[ln]
+ if !ok {
+ return false
+ }
+ if mlv < olv {
+ return true
+ }
+ if mlv > olv {
+ return false
+ }
+ }
+ return false
+}
+
+func (ls LabelSet) Clone() LabelSet {
+ lsn := make(LabelSet, len(ls))
+ for ln, lv := range ls {
+ lsn[ln] = lv
+ }
+ return lsn
+}
+
+// Merge is a helper function to non-destructively merge two label sets.
+func (l LabelSet) Merge(other LabelSet) LabelSet {
+ result := make(LabelSet, len(l))
+
+ for k, v := range l {
+ result[k] = v
+ }
+
+ for k, v := range other {
+ result[k] = v
+ }
+
+ return result
+}
+
+func (l LabelSet) String() string {
+ lstrs := make([]string, 0, len(l))
+ for l, v := range l {
+ lstrs = append(lstrs, fmt.Sprintf("%s=%q", l, v))
+ }
+
+ sort.Strings(lstrs)
+ return fmt.Sprintf("{%s}", strings.Join(lstrs, ", "))
+}
+
+// Fingerprint returns the LabelSet's fingerprint.
+func (ls LabelSet) Fingerprint() Fingerprint {
+ return labelSetToFingerprint(ls)
+}
+
+// FastFingerprint returns the LabelSet's Fingerprint calculated by a faster hashing
+// algorithm, which is, however, more susceptible to hash collisions.
+func (ls LabelSet) FastFingerprint() Fingerprint {
+ return labelSetToFastFingerprint(ls)
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (l *LabelSet) UnmarshalJSON(b []byte) error {
+ var m map[LabelName]LabelValue
+ if err := json.Unmarshal(b, &m); err != nil {
+ return err
+ }
+ // encoding/json only unmarshals maps of the form map[string]T. It treats
+ // LabelName as a string and does not call its UnmarshalJSON method.
+ // Thus, we have to replicate the behavior here.
+ for ln := range m {
+ if !LabelNameRE.MatchString(string(ln)) {
+ return fmt.Errorf("%q is not a valid label name", ln)
+ }
+ }
+ *l = LabelSet(m)
+ return nil
+}
diff --git a/vendor/github.com/prometheus/common/model/metric.go b/vendor/github.com/prometheus/common/model/metric.go
new file mode 100644
index 0000000000..25fc3c9425
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/metric.go
@@ -0,0 +1,81 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+ "fmt"
+ "sort"
+ "strings"
+)
+
+var separator = []byte{0}
+
+// A Metric is similar to a LabelSet, but the key difference is that a Metric is
+// a singleton and refers to one and only one stream of samples.
+type Metric LabelSet
+
+// Equal compares the metrics.
+func (m Metric) Equal(o Metric) bool {
+ return LabelSet(m).Equal(LabelSet(o))
+}
+
+// Before compares the metrics' underlying label sets.
+func (m Metric) Before(o Metric) bool {
+ return LabelSet(m).Before(LabelSet(o))
+}
+
+// Clone returns a copy of the Metric.
+func (m Metric) Clone() Metric {
+ clone := Metric{}
+ for k, v := range m {
+ clone[k] = v
+ }
+ return clone
+}
+
+func (m Metric) String() string {
+ metricName, hasName := m[MetricNameLabel]
+ numLabels := len(m) - 1
+ if !hasName {
+ numLabels = len(m)
+ }
+ labelStrings := make([]string, 0, numLabels)
+ for label, value := range m {
+ if label != MetricNameLabel {
+ labelStrings = append(labelStrings, fmt.Sprintf("%s=%q", label, value))
+ }
+ }
+
+ switch numLabels {
+ case 0:
+ if hasName {
+ return string(metricName)
+ }
+ return "{}"
+ default:
+ sort.Strings(labelStrings)
+ return fmt.Sprintf("%s{%s}", metricName, strings.Join(labelStrings, ", "))
+ }
+}
+
+// Fingerprint returns a Metric's Fingerprint.
+func (m Metric) Fingerprint() Fingerprint {
+ return LabelSet(m).Fingerprint()
+}
+
+// FastFingerprint returns a Metric's Fingerprint calculated by a faster hashing
+// algorithm, which is, however, more susceptible to hash collisions.
+func (m Metric) FastFingerprint() Fingerprint {
+ return LabelSet(m).FastFingerprint()
+}
diff --git a/vendor/github.com/prometheus/common/model/model.go b/vendor/github.com/prometheus/common/model/model.go
new file mode 100644
index 0000000000..88f013a47a
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/model.go
@@ -0,0 +1,16 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package model contains common data structures that are shared across
+// Prometheus componenets and libraries.
+package model
diff --git a/vendor/github.com/prometheus/common/model/signature.go b/vendor/github.com/prometheus/common/model/signature.go
new file mode 100644
index 0000000000..28f370065a
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/signature.go
@@ -0,0 +1,190 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+ "bytes"
+ "hash"
+ "hash/fnv"
+ "sort"
+ "sync"
+)
+
+// SeparatorByte is a byte that cannot occur in valid UTF-8 sequences and is
+// used to separate label names, label values, and other strings from each other
+// when calculating their combined hash value (aka signature aka fingerprint).
+const SeparatorByte byte = 255
+
+var (
+ // cache the signature of an empty label set.
+ emptyLabelSignature = fnv.New64a().Sum64()
+
+ hashAndBufPool sync.Pool
+)
+
+type hashAndBuf struct {
+ h hash.Hash64
+ b bytes.Buffer
+}
+
+func getHashAndBuf() *hashAndBuf {
+ hb := hashAndBufPool.Get()
+ if hb == nil {
+ return &hashAndBuf{h: fnv.New64a()}
+ }
+ return hb.(*hashAndBuf)
+}
+
+func putHashAndBuf(hb *hashAndBuf) {
+ hb.h.Reset()
+ hb.b.Reset()
+ hashAndBufPool.Put(hb)
+}
+
+// LabelsToSignature returns a quasi-unique signature (i.e., fingerprint) for a
+// given label set. (Collisions are possible but unlikely if the number of label
+// sets the function is applied to is small.)
+func LabelsToSignature(labels map[string]string) uint64 {
+ if len(labels) == 0 {
+ return emptyLabelSignature
+ }
+
+ labelNames := make([]string, 0, len(labels))
+ for labelName := range labels {
+ labelNames = append(labelNames, labelName)
+ }
+ sort.Strings(labelNames)
+
+ hb := getHashAndBuf()
+ defer putHashAndBuf(hb)
+
+ for _, labelName := range labelNames {
+ hb.b.WriteString(labelName)
+ hb.b.WriteByte(SeparatorByte)
+ hb.b.WriteString(labels[labelName])
+ hb.b.WriteByte(SeparatorByte)
+ hb.h.Write(hb.b.Bytes())
+ hb.b.Reset()
+ }
+ return hb.h.Sum64()
+}
+
+// labelSetToFingerprint works exactly as LabelsToSignature but takes a LabelSet as
+// parameter (rather than a label map) and returns a Fingerprint.
+func labelSetToFingerprint(ls LabelSet) Fingerprint {
+ if len(ls) == 0 {
+ return Fingerprint(emptyLabelSignature)
+ }
+
+ labelNames := make(LabelNames, 0, len(ls))
+ for labelName := range ls {
+ labelNames = append(labelNames, labelName)
+ }
+ sort.Sort(labelNames)
+
+ hb := getHashAndBuf()
+ defer putHashAndBuf(hb)
+
+ for _, labelName := range labelNames {
+ hb.b.WriteString(string(labelName))
+ hb.b.WriteByte(SeparatorByte)
+ hb.b.WriteString(string(ls[labelName]))
+ hb.b.WriteByte(SeparatorByte)
+ hb.h.Write(hb.b.Bytes())
+ hb.b.Reset()
+ }
+ return Fingerprint(hb.h.Sum64())
+}
+
+// labelSetToFastFingerprint works similar to labelSetToFingerprint but uses a
+// faster and less allocation-heavy hash function, which is more susceptible to
+// create hash collisions. Therefore, collision detection should be applied.
+func labelSetToFastFingerprint(ls LabelSet) Fingerprint {
+ if len(ls) == 0 {
+ return Fingerprint(emptyLabelSignature)
+ }
+
+ var result uint64
+ hb := getHashAndBuf()
+ defer putHashAndBuf(hb)
+
+ for labelName, labelValue := range ls {
+ hb.b.WriteString(string(labelName))
+ hb.b.WriteByte(SeparatorByte)
+ hb.b.WriteString(string(labelValue))
+ hb.h.Write(hb.b.Bytes())
+ result ^= hb.h.Sum64()
+ hb.h.Reset()
+ hb.b.Reset()
+ }
+ return Fingerprint(result)
+}
+
+// SignatureForLabels works like LabelsToSignature but takes a Metric as
+// parameter (rather than a label map) and only includes the labels with the
+// specified LabelNames into the signature calculation. The labels passed in
+// will be sorted by this function.
+func SignatureForLabels(m Metric, labels ...LabelName) uint64 {
+ if len(m) == 0 || len(labels) == 0 {
+ return emptyLabelSignature
+ }
+
+ sort.Sort(LabelNames(labels))
+
+ hb := getHashAndBuf()
+ defer putHashAndBuf(hb)
+
+ for _, label := range labels {
+ hb.b.WriteString(string(label))
+ hb.b.WriteByte(SeparatorByte)
+ hb.b.WriteString(string(m[label]))
+ hb.b.WriteByte(SeparatorByte)
+ hb.h.Write(hb.b.Bytes())
+ hb.b.Reset()
+ }
+ return hb.h.Sum64()
+}
+
+// SignatureWithoutLabels works like LabelsToSignature but takes a Metric as
+// parameter (rather than a label map) and excludes the labels with any of the
+// specified LabelNames from the signature calculation.
+func SignatureWithoutLabels(m Metric, labels map[LabelName]struct{}) uint64 {
+ if len(m) == 0 {
+ return emptyLabelSignature
+ }
+
+ labelNames := make(LabelNames, 0, len(m))
+ for labelName := range m {
+ if _, exclude := labels[labelName]; !exclude {
+ labelNames = append(labelNames, labelName)
+ }
+ }
+ if len(labelNames) == 0 {
+ return emptyLabelSignature
+ }
+ sort.Sort(labelNames)
+
+ hb := getHashAndBuf()
+ defer putHashAndBuf(hb)
+
+ for _, labelName := range labelNames {
+ hb.b.WriteString(string(labelName))
+ hb.b.WriteByte(SeparatorByte)
+ hb.b.WriteString(string(m[labelName]))
+ hb.b.WriteByte(SeparatorByte)
+ hb.h.Write(hb.b.Bytes())
+ hb.b.Reset()
+ }
+ return hb.h.Sum64()
+}
diff --git a/vendor/github.com/prometheus/common/model/time.go b/vendor/github.com/prometheus/common/model/time.go
new file mode 100644
index 0000000000..ebc8bf6cc8
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/time.go
@@ -0,0 +1,230 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+ "fmt"
+ "math"
+ "regexp"
+ "strconv"
+ "strings"
+ "time"
+)
+
+const (
+ // MinimumTick is the minimum supported time resolution. This has to be
+ // at least time.Second in order for the code below to work.
+ minimumTick = time.Millisecond
+ // second is the Time duration equivalent to one second.
+ second = int64(time.Second / minimumTick)
+ // The number of nanoseconds per minimum tick.
+ nanosPerTick = int64(minimumTick / time.Nanosecond)
+
+ // Earliest is the earliest Time representable. Handy for
+ // initializing a high watermark.
+ Earliest = Time(math.MinInt64)
+ // Latest is the latest Time representable. Handy for initializing
+ // a low watermark.
+ Latest = Time(math.MaxInt64)
+)
+
+// Time is the number of milliseconds since the epoch
+// (1970-01-01 00:00 UTC) excluding leap seconds.
+type Time int64
+
+// Interval describes and interval between two timestamps.
+type Interval struct {
+ Start, End Time
+}
+
+// Now returns the current time as a Time.
+func Now() Time {
+ return TimeFromUnixNano(time.Now().UnixNano())
+}
+
+// TimeFromUnix returns the Time equivalent to the Unix Time t
+// provided in seconds.
+func TimeFromUnix(t int64) Time {
+ return Time(t * second)
+}
+
+// TimeFromUnixNano returns the Time equivalent to the Unix Time
+// t provided in nanoseconds.
+func TimeFromUnixNano(t int64) Time {
+ return Time(t / nanosPerTick)
+}
+
+// Equal reports whether two Times represent the same instant.
+func (t Time) Equal(o Time) bool {
+ return t == o
+}
+
+// Before reports whether the Time t is before o.
+func (t Time) Before(o Time) bool {
+ return t < o
+}
+
+// After reports whether the Time t is after o.
+func (t Time) After(o Time) bool {
+ return t > o
+}
+
+// Add returns the Time t + d.
+func (t Time) Add(d time.Duration) Time {
+ return t + Time(d/minimumTick)
+}
+
+// Sub returns the Duration t - o.
+func (t Time) Sub(o Time) time.Duration {
+ return time.Duration(t-o) * minimumTick
+}
+
+// Time returns the time.Time representation of t.
+func (t Time) Time() time.Time {
+ return time.Unix(int64(t)/second, (int64(t)%second)*nanosPerTick)
+}
+
+// Unix returns t as a Unix time, the number of seconds elapsed
+// since January 1, 1970 UTC.
+func (t Time) Unix() int64 {
+ return int64(t) / second
+}
+
+// UnixNano returns t as a Unix time, the number of nanoseconds elapsed
+// since January 1, 1970 UTC.
+func (t Time) UnixNano() int64 {
+ return int64(t) * nanosPerTick
+}
+
+// The number of digits after the dot.
+var dotPrecision = int(math.Log10(float64(second)))
+
+// String returns a string representation of the Time.
+func (t Time) String() string {
+ return strconv.FormatFloat(float64(t)/float64(second), 'f', -1, 64)
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (t Time) MarshalJSON() ([]byte, error) {
+ return []byte(t.String()), nil
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (t *Time) UnmarshalJSON(b []byte) error {
+ p := strings.Split(string(b), ".")
+ switch len(p) {
+ case 1:
+ v, err := strconv.ParseInt(string(p[0]), 10, 64)
+ if err != nil {
+ return err
+ }
+ *t = Time(v * second)
+
+ case 2:
+ v, err := strconv.ParseInt(string(p[0]), 10, 64)
+ if err != nil {
+ return err
+ }
+ v *= second
+
+ prec := dotPrecision - len(p[1])
+ if prec < 0 {
+ p[1] = p[1][:dotPrecision]
+ } else if prec > 0 {
+ p[1] = p[1] + strings.Repeat("0", prec)
+ }
+
+ va, err := strconv.ParseInt(p[1], 10, 32)
+ if err != nil {
+ return err
+ }
+
+ *t = Time(v + va)
+
+ default:
+ return fmt.Errorf("invalid time %q", string(b))
+ }
+ return nil
+}
+
+// Duration wraps time.Duration. It is used to parse the custom duration format
+// from YAML.
+// This type should not propagate beyond the scope of input/output processing.
+type Duration time.Duration
+
+// StringToDuration parses a string into a time.Duration, assuming that a year
+// a day always has 24h.
+func ParseDuration(durationStr string) (Duration, error) {
+ matches := durationRE.FindStringSubmatch(durationStr)
+ if len(matches) != 3 {
+ return 0, fmt.Errorf("not a valid duration string: %q", durationStr)
+ }
+ durSeconds, _ := strconv.Atoi(matches[1])
+ dur := time.Duration(durSeconds) * time.Second
+ unit := matches[2]
+ switch unit {
+ case "d":
+ dur *= 60 * 60 * 24
+ case "h":
+ dur *= 60 * 60
+ case "m":
+ dur *= 60
+ case "s":
+ dur *= 1
+ default:
+ return 0, fmt.Errorf("invalid time unit in duration string: %q", unit)
+ }
+ return Duration(dur), nil
+}
+
+var durationRE = regexp.MustCompile("^([0-9]+)([ywdhms]+)$")
+
+func (d Duration) String() string {
+ seconds := int64(time.Duration(d) / time.Second)
+ factors := map[string]int64{
+ "d": 60 * 60 * 24,
+ "h": 60 * 60,
+ "m": 60,
+ "s": 1,
+ }
+ unit := "s"
+ switch int64(0) {
+ case seconds % factors["d"]:
+ unit = "d"
+ case seconds % factors["h"]:
+ unit = "h"
+ case seconds % factors["m"]:
+ unit = "m"
+ }
+ return fmt.Sprintf("%v%v", seconds/factors[unit], unit)
+}
+
+// MarshalYAML implements the yaml.Marshaler interface.
+func (d Duration) MarshalYAML() (interface{}, error) {
+ return d.String(), nil
+}
+
+// UnmarshalYAML implements the yaml.Unmarshaler interface.
+func (d *Duration) UnmarshalYAML(unmarshal func(interface{}) error) error {
+ var s string
+ if err := unmarshal(&s); err != nil {
+ return err
+ }
+ dur, err := ParseDuration(s)
+ if err != nil {
+ return err
+ }
+ *d = dur
+ return nil
+}
diff --git a/vendor/github.com/prometheus/common/model/value.go b/vendor/github.com/prometheus/common/model/value.go
new file mode 100644
index 0000000000..10ffb0bd61
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/value.go
@@ -0,0 +1,395 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+ "encoding/json"
+ "fmt"
+ "sort"
+ "strconv"
+ "strings"
+)
+
+// A SampleValue is a representation of a value for a given sample at a given
+// time.
+type SampleValue float64
+
+// MarshalJSON implements json.Marshaler.
+func (v SampleValue) MarshalJSON() ([]byte, error) {
+ return json.Marshal(v.String())
+}
+
+// UnmarshalJSON implements json.Unmarshaler.
+func (v *SampleValue) UnmarshalJSON(b []byte) error {
+ if len(b) < 2 || b[0] != '"' || b[len(b)-1] != '"' {
+ return fmt.Errorf("sample value must be a quoted string")
+ }
+ f, err := strconv.ParseFloat(string(b[1:len(b)-1]), 64)
+ if err != nil {
+ return err
+ }
+ *v = SampleValue(f)
+ return nil
+}
+
+func (v SampleValue) Equal(o SampleValue) bool {
+ return v == o
+}
+
+func (v SampleValue) String() string {
+ return strconv.FormatFloat(float64(v), 'f', -1, 64)
+}
+
+// SamplePair pairs a SampleValue with a Timestamp.
+type SamplePair struct {
+ Timestamp Time
+ Value SampleValue
+}
+
+// MarshalJSON implements json.Marshaler.
+func (s SamplePair) MarshalJSON() ([]byte, error) {
+ t, err := json.Marshal(s.Timestamp)
+ if err != nil {
+ return nil, err
+ }
+ v, err := json.Marshal(s.Value)
+ if err != nil {
+ return nil, err
+ }
+ return []byte(fmt.Sprintf("[%s,%s]", t, v)), nil
+}
+
+// UnmarshalJSON implements json.Unmarshaler.
+func (s *SamplePair) UnmarshalJSON(b []byte) error {
+ v := [...]json.Unmarshaler{&s.Timestamp, &s.Value}
+ return json.Unmarshal(b, &v)
+}
+
+// Equal returns true if this SamplePair and o have equal Values and equal
+// Timestamps.
+func (s *SamplePair) Equal(o *SamplePair) bool {
+ return s == o || (s.Value == o.Value && s.Timestamp.Equal(o.Timestamp))
+}
+
+func (s SamplePair) String() string {
+ return fmt.Sprintf("%s @[%s]", s.Value, s.Timestamp)
+}
+
+// Sample is a sample pair associated with a metric.
+type Sample struct {
+ Metric Metric `json:"metric"`
+ Value SampleValue `json:"value"`
+ Timestamp Time `json:"timestamp"`
+}
+
+// Equal compares first the metrics, then the timestamp, then the value.
+func (s *Sample) Equal(o *Sample) bool {
+ if s == o {
+ return true
+ }
+
+ if !s.Metric.Equal(o.Metric) {
+ return false
+ }
+ if !s.Timestamp.Equal(o.Timestamp) {
+ return false
+ }
+ if s.Value != o.Value {
+ return false
+ }
+
+ return true
+}
+
+func (s Sample) String() string {
+ return fmt.Sprintf("%s => %s", s.Metric, SamplePair{
+ Timestamp: s.Timestamp,
+ Value: s.Value,
+ })
+}
+
+// MarshalJSON implements json.Marshaler.
+func (s Sample) MarshalJSON() ([]byte, error) {
+ v := struct {
+ Metric Metric `json:"metric"`
+ Value SamplePair `json:"value"`
+ }{
+ Metric: s.Metric,
+ Value: SamplePair{
+ Timestamp: s.Timestamp,
+ Value: s.Value,
+ },
+ }
+
+ return json.Marshal(&v)
+}
+
+// UnmarshalJSON implements json.Unmarshaler.
+func (s *Sample) UnmarshalJSON(b []byte) error {
+ v := struct {
+ Metric Metric `json:"metric"`
+ Value SamplePair `json:"value"`
+ }{
+ Metric: s.Metric,
+ Value: SamplePair{
+ Timestamp: s.Timestamp,
+ Value: s.Value,
+ },
+ }
+
+ if err := json.Unmarshal(b, &v); err != nil {
+ return err
+ }
+
+ s.Metric = v.Metric
+ s.Timestamp = v.Value.Timestamp
+ s.Value = v.Value.Value
+
+ return nil
+}
+
+// Samples is a sortable Sample slice. It implements sort.Interface.
+type Samples []*Sample
+
+func (s Samples) Len() int {
+ return len(s)
+}
+
+// Less compares first the metrics, then the timestamp.
+func (s Samples) Less(i, j int) bool {
+ switch {
+ case s[i].Metric.Before(s[j].Metric):
+ return true
+ case s[j].Metric.Before(s[i].Metric):
+ return false
+ case s[i].Timestamp.Before(s[j].Timestamp):
+ return true
+ default:
+ return false
+ }
+}
+
+func (s Samples) Swap(i, j int) {
+ s[i], s[j] = s[j], s[i]
+}
+
+// Equal compares two sets of samples and returns true if they are equal.
+func (s Samples) Equal(o Samples) bool {
+ if len(s) != len(o) {
+ return false
+ }
+
+ for i, sample := range s {
+ if !sample.Equal(o[i]) {
+ return false
+ }
+ }
+ return true
+}
+
+// SampleStream is a stream of Values belonging to an attached COWMetric.
+type SampleStream struct {
+ Metric Metric `json:"metric"`
+ Values []SamplePair `json:"values"`
+}
+
+func (ss SampleStream) String() string {
+ vals := make([]string, len(ss.Values))
+ for i, v := range ss.Values {
+ vals[i] = v.String()
+ }
+ return fmt.Sprintf("%s =>\n%s", ss.Metric, strings.Join(vals, "\n"))
+}
+
+// Value is a generic interface for values resulting from a query evaluation.
+type Value interface {
+ Type() ValueType
+ String() string
+}
+
+func (Matrix) Type() ValueType { return ValMatrix }
+func (Vector) Type() ValueType { return ValVector }
+func (*Scalar) Type() ValueType { return ValScalar }
+func (*String) Type() ValueType { return ValString }
+
+type ValueType int
+
+const (
+ ValNone ValueType = iota
+ ValScalar
+ ValVector
+ ValMatrix
+ ValString
+)
+
+// MarshalJSON implements json.Marshaler.
+func (et ValueType) MarshalJSON() ([]byte, error) {
+ return json.Marshal(et.String())
+}
+
+func (et *ValueType) UnmarshalJSON(b []byte) error {
+ var s string
+ if err := json.Unmarshal(b, &s); err != nil {
+ return err
+ }
+ switch s {
+ case "":
+ *et = ValNone
+ case "scalar":
+ *et = ValScalar
+ case "vector":
+ *et = ValVector
+ case "matrix":
+ *et = ValMatrix
+ case "string":
+ *et = ValString
+ default:
+ return fmt.Errorf("unknown value type %q", s)
+ }
+ return nil
+}
+
+func (e ValueType) String() string {
+ switch e {
+ case ValNone:
+ return ""
+ case ValScalar:
+ return "scalar"
+ case ValVector:
+ return "vector"
+ case ValMatrix:
+ return "matrix"
+ case ValString:
+ return "string"
+ }
+ panic("ValueType.String: unhandled value type")
+}
+
+// Scalar is a scalar value evaluated at the set timestamp.
+type Scalar struct {
+ Value SampleValue `json:"value"`
+ Timestamp Time `json:"timestamp"`
+}
+
+func (s Scalar) String() string {
+ return fmt.Sprintf("scalar: %v @[%v]", s.Value, s.Timestamp)
+}
+
+// MarshalJSON implements json.Marshaler.
+func (s Scalar) MarshalJSON() ([]byte, error) {
+ v := strconv.FormatFloat(float64(s.Value), 'f', -1, 64)
+ return json.Marshal([...]interface{}{s.Timestamp, string(v)})
+}
+
+// UnmarshalJSON implements json.Unmarshaler.
+func (s *Scalar) UnmarshalJSON(b []byte) error {
+ var f string
+ v := [...]interface{}{&s.Timestamp, &f}
+
+ if err := json.Unmarshal(b, &v); err != nil {
+ return err
+ }
+
+ value, err := strconv.ParseFloat(f, 64)
+ if err != nil {
+ return fmt.Errorf("error parsing sample value: %s", err)
+ }
+ s.Value = SampleValue(value)
+ return nil
+}
+
+// String is a string value evaluated at the set timestamp.
+type String struct {
+ Value string `json:"value"`
+ Timestamp Time `json:"timestamp"`
+}
+
+func (s *String) String() string {
+ return s.Value
+}
+
+// MarshalJSON implements json.Marshaler.
+func (s String) MarshalJSON() ([]byte, error) {
+ return json.Marshal([]interface{}{s.Timestamp, s.Value})
+}
+
+// UnmarshalJSON implements json.Unmarshaler.
+func (s *String) UnmarshalJSON(b []byte) error {
+ v := [...]interface{}{&s.Timestamp, &s.Value}
+ return json.Unmarshal(b, &v)
+}
+
+// Vector is basically only an alias for Samples, but the
+// contract is that in a Vector, all Samples have the same timestamp.
+type Vector []*Sample
+
+func (vec Vector) String() string {
+ entries := make([]string, len(vec))
+ for i, s := range vec {
+ entries[i] = s.String()
+ }
+ return strings.Join(entries, "\n")
+}
+
+func (vec Vector) Len() int { return len(vec) }
+func (vec Vector) Swap(i, j int) { vec[i], vec[j] = vec[j], vec[i] }
+
+// Less compares first the metrics, then the timestamp.
+func (vec Vector) Less(i, j int) bool {
+ switch {
+ case vec[i].Metric.Before(vec[j].Metric):
+ return true
+ case vec[j].Metric.Before(vec[i].Metric):
+ return false
+ case vec[i].Timestamp.Before(vec[j].Timestamp):
+ return true
+ default:
+ return false
+ }
+}
+
+// Equal compares two sets of samples and returns true if they are equal.
+func (vec Vector) Equal(o Vector) bool {
+ if len(vec) != len(o) {
+ return false
+ }
+
+ for i, sample := range vec {
+ if !sample.Equal(o[i]) {
+ return false
+ }
+ }
+ return true
+}
+
+// Matrix is a list of time series.
+type Matrix []*SampleStream
+
+func (m Matrix) Len() int { return len(m) }
+func (m Matrix) Less(i, j int) bool { return m[i].Metric.Before(m[j].Metric) }
+func (m Matrix) Swap(i, j int) { m[i], m[j] = m[j], m[i] }
+
+func (mat Matrix) String() string {
+ matCp := make(Matrix, len(mat))
+ copy(matCp, mat)
+ sort.Sort(matCp)
+
+ strs := make([]string, len(matCp))
+
+ for i, ss := range matCp {
+ strs[i] = ss.String()
+ }
+
+ return strings.Join(strs, "\n")
+}
diff --git a/vendor/github.com/prometheus/log/AUTHORS.md b/vendor/github.com/prometheus/log/AUTHORS.md
new file mode 100644
index 0000000000..e2b42a716b
--- /dev/null
+++ b/vendor/github.com/prometheus/log/AUTHORS.md
@@ -0,0 +1,11 @@
+The Prometheus project was started by Matt T. Proud (emeritus) and
+Julius Volz in 2012.
+
+Maintainers of this repository:
+
+* Julius Volz
+
+The following individuals have contributed code to this repository
+(listed in alphabetical order):
+
+* Julius Volz
diff --git a/vendor/github.com/prometheus/log/CONTRIBUTING.md b/vendor/github.com/prometheus/log/CONTRIBUTING.md
new file mode 100644
index 0000000000..5705f0fbea
--- /dev/null
+++ b/vendor/github.com/prometheus/log/CONTRIBUTING.md
@@ -0,0 +1,18 @@
+# Contributing
+
+Prometheus uses GitHub to manage reviews of pull requests.
+
+* If you have a trivial fix or improvement, go ahead and create a pull
+ request, addressing (with `@...`) one or more of the maintainers
+ (see [AUTHORS.md](AUTHORS.md)) in the description of the pull request.
+
+* If you plan to do something more involved, first discuss your ideas
+ on our [mailing list](https://groups.google.com/forum/?fromgroups#!forum/prometheus-developers).
+ This will avoid unnecessary work and surely give you and us a good deal
+ of inspiration.
+
+* Relevant coding style guidelines are the [Go Code Review
+ Comments](https://code.google.com/p/go-wiki/wiki/CodeReviewComments)
+ and the _Formatting and style_ section of Peter Bourgon's [Go: Best
+ Practices for Production
+ Environments](http://peter.bourgon.org/go-in-production/#formatting-and-style).
diff --git a/vendor/github.com/prometheus/log/LICENSE b/vendor/github.com/prometheus/log/LICENSE
new file mode 100644
index 0000000000..261eeb9e9f
--- /dev/null
+++ b/vendor/github.com/prometheus/log/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/prometheus/log/NOTICE b/vendor/github.com/prometheus/log/NOTICE
new file mode 100644
index 0000000000..1f37552eb9
--- /dev/null
+++ b/vendor/github.com/prometheus/log/NOTICE
@@ -0,0 +1,2 @@
+Standard logging library for Go-based Prometheus components.
+Copyright 2015 The Prometheus Authors
diff --git a/vendor/github.com/prometheus/log/README.md b/vendor/github.com/prometheus/log/README.md
new file mode 100644
index 0000000000..d86afac153
--- /dev/null
+++ b/vendor/github.com/prometheus/log/README.md
@@ -0,0 +1,8 @@
+# Prometheus Logging Library
+
+Standard logging library for Go-based Prometheus components.
+
+This library wraps
+[https://github.com/Sirupsen/logrus](https://github.com/Sirupsen/logrus) in
+order to add line:file annotations to log lines, as well as to provide common
+command-line flags for Prometheus components using it.
diff --git a/vendor/github.com/prometheus/log/log.go b/vendor/github.com/prometheus/log/log.go
new file mode 100644
index 0000000000..8c85df3b46
--- /dev/null
+++ b/vendor/github.com/prometheus/log/log.go
@@ -0,0 +1,171 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package log
+
+import (
+ "flag"
+ "runtime"
+ "strings"
+
+ "github.com/Sirupsen/logrus"
+)
+
+var logger = logrus.New()
+
+type levelFlag struct{}
+
+// String implements flag.Value.
+func (f levelFlag) String() string {
+ return logger.Level.String()
+}
+
+// Set implements flag.Value.
+func (f levelFlag) Set(level string) error {
+ l, err := logrus.ParseLevel(level)
+ if err != nil {
+ return err
+ }
+ logger.Level = l
+ return nil
+}
+
+func init() {
+ // In order for this flag to take effect, the user of the package must call
+ // flag.Parse() before logging anything.
+ flag.Var(levelFlag{}, "log.level", "Only log messages with the given severity or above. Valid levels: [debug, info, warn, error, fatal, panic].")
+}
+
+// fileLineEntry returns a logrus.Entry with file and line annotations for the
+// original user log statement (two stack frames up from this function).
+func fileLineEntry() *logrus.Entry {
+ _, file, line, ok := runtime.Caller(2)
+ if !ok {
+ file = "??>"
+ line = 1
+ } else {
+ slash := strings.LastIndex(file, "/")
+ if slash >= 0 {
+ file = file[slash+1:]
+ }
+ }
+ return logger.WithFields(logrus.Fields{
+ "file": file,
+ "line": line,
+ })
+}
+
+// Debug logs a message at level Debug on the standard logger.
+func Debug(args ...interface{}) {
+ fileLineEntry().Debug(args...)
+}
+
+// Debugln logs a message at level Debug on the standard logger.
+func Debugln(args ...interface{}) {
+ fileLineEntry().Debugln(args...)
+}
+
+// Debugf logs a message at level Debug on the standard logger.
+func Debugf(format string, args ...interface{}) {
+ fileLineEntry().Debugf(format, args...)
+}
+
+// Info logs a message at level Info on the standard logger.
+func Info(args ...interface{}) {
+ fileLineEntry().Info(args...)
+}
+
+// Infoln logs a message at level Info on the standard logger.
+func Infoln(args ...interface{}) {
+ fileLineEntry().Infoln(args...)
+}
+
+// Infof logs a message at level Info on the standard logger.
+func Infof(format string, args ...interface{}) {
+ fileLineEntry().Infof(format, args...)
+}
+
+// Print logs a message at level Info on the standard logger.
+func Print(args ...interface{}) {
+ fileLineEntry().Info(args...)
+}
+
+// Println logs a message at level Info on the standard logger.
+func Println(args ...interface{}) {
+ fileLineEntry().Infoln(args...)
+}
+
+// Printf logs a message at level Info on the standard logger.
+func Printf(format string, args ...interface{}) {
+ fileLineEntry().Infof(format, args...)
+}
+
+// Warn logs a message at level Warn on the standard logger.
+func Warn(args ...interface{}) {
+ fileLineEntry().Warn(args...)
+}
+
+// Warnln logs a message at level Warn on the standard logger.
+func Warnln(args ...interface{}) {
+ fileLineEntry().Warnln(args...)
+}
+
+// Warnf logs a message at level Warn on the standard logger.
+func Warnf(format string, args ...interface{}) {
+ fileLineEntry().Warnf(format, args...)
+}
+
+// Error logs a message at level Error on the standard logger.
+func Error(args ...interface{}) {
+ fileLineEntry().Error(args...)
+}
+
+// Errorln logs a message at level Error on the standard logger.
+func Errorln(args ...interface{}) {
+ fileLineEntry().Errorln(args...)
+}
+
+// Errorf logs a message at level Error on the standard logger.
+func Errorf(format string, args ...interface{}) {
+ fileLineEntry().Errorf(format, args...)
+}
+
+// Fatal logs a message at level Fatal on the standard logger.
+func Fatal(args ...interface{}) {
+ fileLineEntry().Fatal(args...)
+}
+
+// Fatalln logs a message at level Fatal on the standard logger.
+func Fatalln(args ...interface{}) {
+ fileLineEntry().Fatalln(args...)
+}
+
+// Fatalf logs a message at level Fatal on the standard logger.
+func Fatalf(format string, args ...interface{}) {
+ fileLineEntry().Fatalf(format, args...)
+}
+
+// Panic logs a message at level Panic on the standard logger.
+func Panic(args ...interface{}) {
+ fileLineEntry().Panicln(args...)
+}
+
+// Panicln logs a message at level Panic on the standard logger.
+func Panicln(args ...interface{}) {
+ fileLineEntry().Panicln(args...)
+}
+
+// Panicf logs a message at level Panic on the standard logger.
+func Panicf(format string, args ...interface{}) {
+ fileLineEntry().Panicf(format, args...)
+}
diff --git a/vendor/github.com/prometheus/procfs/AUTHORS.md b/vendor/github.com/prometheus/procfs/AUTHORS.md
new file mode 100644
index 0000000000..8dde8e31f9
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/AUTHORS.md
@@ -0,0 +1,12 @@
+The Prometheus project was started by Matt T. Proud (emeritus) and
+Julius Volz in 2012.
+
+Maintainers of this repository:
+
+* Tobias Schmidt
+
+The following individuals have contributed code to this repository
+(listed in alphabetical order):
+
+* Ji-Hoon, Seol
+* Tobias Schmidt
diff --git a/vendor/github.com/prometheus/procfs/CONTRIBUTING.md b/vendor/github.com/prometheus/procfs/CONTRIBUTING.md
new file mode 100644
index 0000000000..5705f0fbea
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/CONTRIBUTING.md
@@ -0,0 +1,18 @@
+# Contributing
+
+Prometheus uses GitHub to manage reviews of pull requests.
+
+* If you have a trivial fix or improvement, go ahead and create a pull
+ request, addressing (with `@...`) one or more of the maintainers
+ (see [AUTHORS.md](AUTHORS.md)) in the description of the pull request.
+
+* If you plan to do something more involved, first discuss your ideas
+ on our [mailing list](https://groups.google.com/forum/?fromgroups#!forum/prometheus-developers).
+ This will avoid unnecessary work and surely give you and us a good deal
+ of inspiration.
+
+* Relevant coding style guidelines are the [Go Code Review
+ Comments](https://code.google.com/p/go-wiki/wiki/CodeReviewComments)
+ and the _Formatting and style_ section of Peter Bourgon's [Go: Best
+ Practices for Production
+ Environments](http://peter.bourgon.org/go-in-production/#formatting-and-style).
diff --git a/vendor/github.com/prometheus/procfs/LICENSE b/vendor/github.com/prometheus/procfs/LICENSE
new file mode 100644
index 0000000000..261eeb9e9f
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/prometheus/procfs/NOTICE b/vendor/github.com/prometheus/procfs/NOTICE
new file mode 100644
index 0000000000..53c5e9aa11
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/NOTICE
@@ -0,0 +1,7 @@
+procfs provides functions to retrieve system, kernel and process
+metrics from the pseudo-filesystem proc.
+
+Copyright 2014-2015 The Prometheus Authors
+
+This product includes software developed at
+SoundCloud Ltd. (http://soundcloud.com/).
diff --git a/vendor/github.com/prometheus/procfs/README.md b/vendor/github.com/prometheus/procfs/README.md
new file mode 100644
index 0000000000..6e7ee6b8b7
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/README.md
@@ -0,0 +1,10 @@
+# procfs
+
+This procfs package provides functions to retrieve system, kernel and process
+metrics from the pseudo-filesystem proc.
+
+*WARNING*: This package is a work in progress. Its API may still break in
+backwards-incompatible ways without warnings. Use it at your own risk.
+
+[](https://godoc.org/github.com/prometheus/procfs)
+[](https://travis-ci.org/prometheus/procfs)
diff --git a/vendor/github.com/prometheus/procfs/doc.go b/vendor/github.com/prometheus/procfs/doc.go
new file mode 100644
index 0000000000..e2acd6d40a
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/doc.go
@@ -0,0 +1,45 @@
+// Copyright 2014 Prometheus Team
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package procfs provides functions to retrieve system, kernel and process
+// metrics from the pseudo-filesystem proc.
+//
+// Example:
+//
+// package main
+//
+// import (
+// "fmt"
+// "log"
+//
+// "github.com/prometheus/procfs"
+// )
+//
+// func main() {
+// p, err := procfs.Self()
+// if err != nil {
+// log.Fatalf("could not get process: %s", err)
+// }
+//
+// stat, err := p.NewStat()
+// if err != nil {
+// log.Fatalf("could not get process stat: %s", err)
+// }
+//
+// fmt.Printf("command: %s\n", stat.Comm)
+// fmt.Printf("cpu time: %fs\n", stat.CPUTime())
+// fmt.Printf("vsize: %dB\n", stat.VirtualMemory())
+// fmt.Printf("rss: %dB\n", stat.ResidentMemory())
+// }
+//
+package procfs
diff --git a/vendor/github.com/prometheus/procfs/fs.go b/vendor/github.com/prometheus/procfs/fs.go
new file mode 100644
index 0000000000..6a8d97b11e
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/fs.go
@@ -0,0 +1,40 @@
+package procfs
+
+import (
+ "fmt"
+ "os"
+ "path"
+)
+
+// FS represents the pseudo-filesystem proc, which provides an interface to
+// kernel data structures.
+type FS string
+
+// DefaultMountPoint is the common mount point of the proc filesystem.
+const DefaultMountPoint = "/proc"
+
+// NewFS returns a new FS mounted under the given mountPoint. It will error
+// if the mount point can't be read.
+func NewFS(mountPoint string) (FS, error) {
+ info, err := os.Stat(mountPoint)
+ if err != nil {
+ return "", fmt.Errorf("could not read %s: %s", mountPoint, err)
+ }
+ if !info.IsDir() {
+ return "", fmt.Errorf("mount point %s is not a directory", mountPoint)
+ }
+
+ return FS(mountPoint), nil
+}
+
+func (fs FS) stat(p string) (os.FileInfo, error) {
+ return os.Stat(path.Join(string(fs), p))
+}
+
+func (fs FS) open(p string) (*os.File, error) {
+ return os.Open(path.Join(string(fs), p))
+}
+
+func (fs FS) readlink(p string) (string, error) {
+ return os.Readlink(path.Join(string(fs), p))
+}
diff --git a/vendor/github.com/prometheus/procfs/ipvs.go b/vendor/github.com/prometheus/procfs/ipvs.go
new file mode 100644
index 0000000000..26da5000e3
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/ipvs.go
@@ -0,0 +1,223 @@
+package procfs
+
+import (
+ "bufio"
+ "encoding/hex"
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net"
+ "strconv"
+ "strings"
+)
+
+// IPVSStats holds IPVS statistics, as exposed by the kernel in `/proc/net/ip_vs_stats`.
+type IPVSStats struct {
+ // Total count of connections.
+ Connections uint64
+ // Total incoming packages processed.
+ IncomingPackets uint64
+ // Total outgoing packages processed.
+ OutgoingPackets uint64
+ // Total incoming traffic.
+ IncomingBytes uint64
+ // Total outgoing traffic.
+ OutgoingBytes uint64
+}
+
+// IPVSBackendStatus holds current metrics of one virtual / real address pair.
+type IPVSBackendStatus struct {
+ // The local (virtual) IP address.
+ LocalAddress net.IP
+ // The local (virtual) port.
+ LocalPort uint16
+ // The transport protocol (TCP, UDP).
+ Proto string
+ // The remote (real) IP address.
+ RemoteAddress net.IP
+ // The remote (real) port.
+ RemotePort uint16
+ // The current number of active connections for this virtual/real address pair.
+ ActiveConn uint64
+ // The current number of inactive connections for this virtual/real address pair.
+ InactConn uint64
+ // The current weight of this virtual/real address pair.
+ Weight uint64
+}
+
+// NewIPVSStats reads the IPVS statistics.
+func NewIPVSStats() (IPVSStats, error) {
+ fs, err := NewFS(DefaultMountPoint)
+ if err != nil {
+ return IPVSStats{}, err
+ }
+
+ return fs.NewIPVSStats()
+}
+
+// NewIPVSStats reads the IPVS statistics from the specified `proc` filesystem.
+func (fs FS) NewIPVSStats() (IPVSStats, error) {
+ file, err := fs.open("net/ip_vs_stats")
+ if err != nil {
+ return IPVSStats{}, err
+ }
+ defer file.Close()
+
+ return parseIPVSStats(file)
+}
+
+// parseIPVSStats performs the actual parsing of `ip_vs_stats`.
+func parseIPVSStats(file io.Reader) (IPVSStats, error) {
+ var (
+ statContent []byte
+ statLines []string
+ statFields []string
+ stats IPVSStats
+ )
+
+ statContent, err := ioutil.ReadAll(file)
+ if err != nil {
+ return IPVSStats{}, err
+ }
+
+ statLines = strings.SplitN(string(statContent), "\n", 4)
+ if len(statLines) != 4 {
+ return IPVSStats{}, errors.New("ip_vs_stats corrupt: too short")
+ }
+
+ statFields = strings.Fields(statLines[2])
+ if len(statFields) != 5 {
+ return IPVSStats{}, errors.New("ip_vs_stats corrupt: unexpected number of fields")
+ }
+
+ stats.Connections, err = strconv.ParseUint(statFields[0], 16, 64)
+ if err != nil {
+ return IPVSStats{}, err
+ }
+ stats.IncomingPackets, err = strconv.ParseUint(statFields[1], 16, 64)
+ if err != nil {
+ return IPVSStats{}, err
+ }
+ stats.OutgoingPackets, err = strconv.ParseUint(statFields[2], 16, 64)
+ if err != nil {
+ return IPVSStats{}, err
+ }
+ stats.IncomingBytes, err = strconv.ParseUint(statFields[3], 16, 64)
+ if err != nil {
+ return IPVSStats{}, err
+ }
+ stats.OutgoingBytes, err = strconv.ParseUint(statFields[4], 16, 64)
+ if err != nil {
+ return IPVSStats{}, err
+ }
+
+ return stats, nil
+}
+
+// NewIPVSBackendStatus reads and returns the status of all (virtual,real) server pairs.
+func NewIPVSBackendStatus() ([]IPVSBackendStatus, error) {
+ fs, err := NewFS(DefaultMountPoint)
+ if err != nil {
+ return []IPVSBackendStatus{}, err
+ }
+
+ return fs.NewIPVSBackendStatus()
+}
+
+// NewIPVSBackendStatus reads and returns the status of all (virtual,real) server pairs from the specified `proc` filesystem.
+func (fs FS) NewIPVSBackendStatus() ([]IPVSBackendStatus, error) {
+ file, err := fs.open("net/ip_vs")
+ if err != nil {
+ return nil, err
+ }
+ defer file.Close()
+
+ return parseIPVSBackendStatus(file)
+}
+
+func parseIPVSBackendStatus(file io.Reader) ([]IPVSBackendStatus, error) {
+ var (
+ status []IPVSBackendStatus
+ scanner = bufio.NewScanner(file)
+ proto string
+ localAddress net.IP
+ localPort uint16
+ err error
+ )
+
+ for scanner.Scan() {
+ fields := strings.Fields(string(scanner.Text()))
+ if len(fields) == 0 {
+ continue
+ }
+ switch {
+ case fields[0] == "IP" || fields[0] == "Prot" || fields[1] == "RemoteAddress:Port":
+ continue
+ case fields[0] == "TCP" || fields[0] == "UDP":
+ if len(fields) < 2 {
+ continue
+ }
+ proto = fields[0]
+ localAddress, localPort, err = parseIPPort(fields[1])
+ if err != nil {
+ return nil, err
+ }
+ case fields[0] == "->":
+ if len(fields) < 6 {
+ continue
+ }
+ remoteAddress, remotePort, err := parseIPPort(fields[1])
+ if err != nil {
+ return nil, err
+ }
+ weight, err := strconv.ParseUint(fields[3], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ activeConn, err := strconv.ParseUint(fields[4], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ inactConn, err := strconv.ParseUint(fields[5], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ status = append(status, IPVSBackendStatus{
+ LocalAddress: localAddress,
+ LocalPort: localPort,
+ RemoteAddress: remoteAddress,
+ RemotePort: remotePort,
+ Proto: proto,
+ Weight: weight,
+ ActiveConn: activeConn,
+ InactConn: inactConn,
+ })
+ }
+ }
+ return status, nil
+}
+
+func parseIPPort(s string) (net.IP, uint16, error) {
+ tmp := strings.SplitN(s, ":", 2)
+
+ if len(tmp) != 2 {
+ return nil, 0, fmt.Errorf("invalid IP:Port: %s", s)
+ }
+
+ if len(tmp[0]) != 8 && len(tmp[0]) != 32 {
+ return nil, 0, fmt.Errorf("invalid IP: %s", tmp[0])
+ }
+
+ ip, err := hex.DecodeString(tmp[0])
+ if err != nil {
+ return nil, 0, err
+ }
+
+ port, err := strconv.ParseUint(tmp[1], 16, 16)
+ if err != nil {
+ return nil, 0, err
+ }
+
+ return ip, uint16(port), nil
+}
diff --git a/vendor/github.com/prometheus/procfs/proc.go b/vendor/github.com/prometheus/procfs/proc.go
new file mode 100644
index 0000000000..ca5f12a595
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/proc.go
@@ -0,0 +1,188 @@
+package procfs
+
+import (
+ "fmt"
+ "io/ioutil"
+ "os"
+ "path"
+ "strconv"
+ "strings"
+)
+
+// Proc provides information about a running process.
+type Proc struct {
+ // The process ID.
+ PID int
+
+ fs FS
+}
+
+// Procs represents a list of Proc structs.
+type Procs []Proc
+
+func (p Procs) Len() int { return len(p) }
+func (p Procs) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
+func (p Procs) Less(i, j int) bool { return p[i].PID < p[j].PID }
+
+// Self returns a process for the current process.
+func Self() (Proc, error) {
+ return NewProc(os.Getpid())
+}
+
+// NewProc returns a process for the given pid under /proc.
+func NewProc(pid int) (Proc, error) {
+ fs, err := NewFS(DefaultMountPoint)
+ if err != nil {
+ return Proc{}, err
+ }
+
+ return fs.NewProc(pid)
+}
+
+// AllProcs returns a list of all currently avaible processes under /proc.
+func AllProcs() (Procs, error) {
+ fs, err := NewFS(DefaultMountPoint)
+ if err != nil {
+ return Procs{}, err
+ }
+
+ return fs.AllProcs()
+}
+
+// NewProc returns a process for the given pid.
+func (fs FS) NewProc(pid int) (Proc, error) {
+ if _, err := fs.stat(strconv.Itoa(pid)); err != nil {
+ return Proc{}, err
+ }
+
+ return Proc{PID: pid, fs: fs}, nil
+}
+
+// AllProcs returns a list of all currently avaible processes.
+func (fs FS) AllProcs() (Procs, error) {
+ d, err := fs.open("")
+ if err != nil {
+ return Procs{}, err
+ }
+ defer d.Close()
+
+ names, err := d.Readdirnames(-1)
+ if err != nil {
+ return Procs{}, fmt.Errorf("could not read %s: %s", d.Name(), err)
+ }
+
+ p := Procs{}
+ for _, n := range names {
+ pid, err := strconv.ParseInt(n, 10, 64)
+ if err != nil {
+ continue
+ }
+ p = append(p, Proc{PID: int(pid), fs: fs})
+ }
+
+ return p, nil
+}
+
+// CmdLine returns the command line of a process.
+func (p Proc) CmdLine() ([]string, error) {
+ f, err := p.open("cmdline")
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+
+ data, err := ioutil.ReadAll(f)
+ if err != nil {
+ return nil, err
+ }
+
+ if len(data) < 1 {
+ return []string{}, nil
+ }
+
+ return strings.Split(string(data[:len(data)-1]), string(byte(0))), nil
+}
+
+// Executable returns the absolute path of the executable command of a process.
+func (p Proc) Executable() (string, error) {
+ exe, err := p.readlink("exe")
+
+ if os.IsNotExist(err) {
+ return "", nil
+ }
+
+ return exe, err
+}
+
+// FileDescriptors returns the currently open file descriptors of a process.
+func (p Proc) FileDescriptors() ([]uintptr, error) {
+ names, err := p.fileDescriptors()
+ if err != nil {
+ return nil, err
+ }
+
+ fds := make([]uintptr, len(names))
+ for i, n := range names {
+ fd, err := strconv.ParseInt(n, 10, 32)
+ if err != nil {
+ return nil, fmt.Errorf("could not parse fd %s: %s", n, err)
+ }
+ fds[i] = uintptr(fd)
+ }
+
+ return fds, nil
+}
+
+// FileDescriptorTargets returns the targets of all file descriptors of a process.
+// If a file descriptor is not a symlink to a file (like a socket), that value will be the empty string.
+func (p Proc) FileDescriptorTargets() ([]string, error) {
+ names, err := p.fileDescriptors()
+ if err != nil {
+ return nil, err
+ }
+
+ targets := make([]string, len(names))
+
+ for i, name := range names {
+ target, err := p.readlink("fd/" + name)
+ if err == nil {
+ targets[i] = target
+ }
+ }
+
+ return targets, nil
+}
+
+// FileDescriptorsLen returns the number of currently open file descriptors of
+// a process.
+func (p Proc) FileDescriptorsLen() (int, error) {
+ fds, err := p.fileDescriptors()
+ if err != nil {
+ return 0, err
+ }
+
+ return len(fds), nil
+}
+
+func (p Proc) fileDescriptors() ([]string, error) {
+ d, err := p.open("fd")
+ if err != nil {
+ return nil, err
+ }
+ defer d.Close()
+
+ names, err := d.Readdirnames(-1)
+ if err != nil {
+ return nil, fmt.Errorf("could not read %s: %s", d.Name(), err)
+ }
+
+ return names, nil
+}
+
+func (p Proc) open(pa string) (*os.File, error) {
+ return p.fs.open(path.Join(strconv.Itoa(p.PID), pa))
+}
+
+func (p Proc) readlink(pa string) (string, error) {
+ return p.fs.readlink(path.Join(strconv.Itoa(p.PID), pa))
+}
diff --git a/vendor/github.com/prometheus/procfs/proc_io.go b/vendor/github.com/prometheus/procfs/proc_io.go
new file mode 100644
index 0000000000..7c6dc86970
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/proc_io.go
@@ -0,0 +1,54 @@
+package procfs
+
+import (
+ "fmt"
+ "io/ioutil"
+)
+
+// ProcIO models the content of /proc//io.
+type ProcIO struct {
+ // Chars read.
+ RChar uint64
+ // Chars written.
+ WChar uint64
+ // Read syscalls.
+ SyscR uint64
+ // Write syscalls.
+ SyscW uint64
+ // Bytes read.
+ ReadBytes uint64
+ // Bytes written.
+ WriteBytes uint64
+ // Bytes written, but taking into account truncation. See
+ // Documentation/filesystems/proc.txt in the kernel sources for
+ // detailed explanation.
+ CancelledWriteBytes int64
+}
+
+// NewIO creates a new ProcIO instance from a given Proc instance.
+func (p Proc) NewIO() (ProcIO, error) {
+ pio := ProcIO{}
+
+ f, err := p.open("io")
+ if err != nil {
+ return pio, err
+ }
+ defer f.Close()
+
+ data, err := ioutil.ReadAll(f)
+ if err != nil {
+ return pio, err
+ }
+
+ ioFormat := "rchar: %d\nwchar: %d\nsyscr: %d\nsyscw: %d\n" +
+ "read_bytes: %d\nwrite_bytes: %d\n" +
+ "cancelled_write_bytes: %d\n"
+
+ _, err = fmt.Sscanf(string(data), ioFormat, &pio.RChar, &pio.WChar, &pio.SyscR,
+ &pio.SyscW, &pio.ReadBytes, &pio.WriteBytes, &pio.CancelledWriteBytes)
+ if err != nil {
+ return pio, err
+ }
+
+ return pio, nil
+}
diff --git a/vendor/github.com/prometheus/procfs/proc_limits.go b/vendor/github.com/prometheus/procfs/proc_limits.go
new file mode 100644
index 0000000000..9f080b9f62
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/proc_limits.go
@@ -0,0 +1,111 @@
+package procfs
+
+import (
+ "bufio"
+ "fmt"
+ "regexp"
+ "strconv"
+)
+
+// ProcLimits represents the soft limits for each of the process's resource
+// limits.
+type ProcLimits struct {
+ CPUTime int
+ FileSize int
+ DataSize int
+ StackSize int
+ CoreFileSize int
+ ResidentSet int
+ Processes int
+ OpenFiles int
+ LockedMemory int
+ AddressSpace int
+ FileLocks int
+ PendingSignals int
+ MsqqueueSize int
+ NicePriority int
+ RealtimePriority int
+ RealtimeTimeout int
+}
+
+const (
+ limitsFields = 3
+ limitsUnlimited = "unlimited"
+)
+
+var (
+ limitsDelimiter = regexp.MustCompile(" +")
+)
+
+// NewLimits returns the current soft limits of the process.
+func (p Proc) NewLimits() (ProcLimits, error) {
+ f, err := p.open("limits")
+ if err != nil {
+ return ProcLimits{}, err
+ }
+ defer f.Close()
+
+ var (
+ l = ProcLimits{}
+ s = bufio.NewScanner(f)
+ )
+ for s.Scan() {
+ fields := limitsDelimiter.Split(s.Text(), limitsFields)
+ if len(fields) != limitsFields {
+ return ProcLimits{}, fmt.Errorf(
+ "couldn't parse %s line %s", f.Name(), s.Text())
+ }
+
+ switch fields[0] {
+ case "Max cpu time":
+ l.CPUTime, err = parseInt(fields[1])
+ case "Max file size":
+ l.FileLocks, err = parseInt(fields[1])
+ case "Max data size":
+ l.DataSize, err = parseInt(fields[1])
+ case "Max stack size":
+ l.StackSize, err = parseInt(fields[1])
+ case "Max core file size":
+ l.CoreFileSize, err = parseInt(fields[1])
+ case "Max resident set":
+ l.ResidentSet, err = parseInt(fields[1])
+ case "Max processes":
+ l.Processes, err = parseInt(fields[1])
+ case "Max open files":
+ l.OpenFiles, err = parseInt(fields[1])
+ case "Max locked memory":
+ l.LockedMemory, err = parseInt(fields[1])
+ case "Max address space":
+ l.AddressSpace, err = parseInt(fields[1])
+ case "Max file locks":
+ l.FileLocks, err = parseInt(fields[1])
+ case "Max pending signals":
+ l.PendingSignals, err = parseInt(fields[1])
+ case "Max msgqueue size":
+ l.MsqqueueSize, err = parseInt(fields[1])
+ case "Max nice priority":
+ l.NicePriority, err = parseInt(fields[1])
+ case "Max realtime priority":
+ l.RealtimePriority, err = parseInt(fields[1])
+ case "Max realtime timeout":
+ l.RealtimeTimeout, err = parseInt(fields[1])
+ }
+
+ if err != nil {
+ return ProcLimits{}, err
+ }
+ }
+
+ return l, s.Err()
+}
+
+func parseInt(s string) (int, error) {
+ if s == limitsUnlimited {
+ return -1, nil
+ }
+ i, err := strconv.ParseInt(s, 10, 32)
+ if err != nil {
+ return 0, fmt.Errorf("couldn't parse value %s: %s", s, err)
+ }
+ return int(i), nil
+}
diff --git a/vendor/github.com/prometheus/procfs/proc_stat.go b/vendor/github.com/prometheus/procfs/proc_stat.go
new file mode 100644
index 0000000000..30a403b6c7
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/proc_stat.go
@@ -0,0 +1,175 @@
+package procfs
+
+import (
+ "bytes"
+ "fmt"
+ "io/ioutil"
+ "os"
+)
+
+// Originally, this USER_HZ value was dynamically retrieved via a sysconf call which
+// required cgo. However, that caused a lot of problems regarding
+// cross-compilation. Alternatives such as running a binary to determine the
+// value, or trying to derive it in some other way were all problematic.
+// After much research it was determined that USER_HZ is actually hardcoded to
+// 100 on all Go-supported platforms as of the time of this writing. This is
+// why we decided to hardcode it here as well. It is not impossible that there
+// could be systems with exceptions, but they should be very exotic edge cases,
+// and in that case, the worst outcome will be two misreported metrics.
+//
+// See also the following discussions:
+//
+// - https://github.com/prometheus/node_exporter/issues/52
+// - https://github.com/prometheus/procfs/pull/2
+// - http://stackoverflow.com/questions/17410841/how-does-user-hz-solve-the-jiffy-scaling-issue
+const userHZ = 100
+
+// ProcStat provides status information about the process,
+// read from /proc/[pid]/stat.
+type ProcStat struct {
+ // The process ID.
+ PID int
+ // The filename of the executable.
+ Comm string
+ // The process state.
+ State string
+ // The PID of the parent of this process.
+ PPID int
+ // The process group ID of the process.
+ PGRP int
+ // The session ID of the process.
+ Session int
+ // The controlling terminal of the process.
+ TTY int
+ // The ID of the foreground process group of the controlling terminal of
+ // the process.
+ TPGID int
+ // The kernel flags word of the process.
+ Flags uint
+ // The number of minor faults the process has made which have not required
+ // loading a memory page from disk.
+ MinFlt uint
+ // The number of minor faults that the process's waited-for children have
+ // made.
+ CMinFlt uint
+ // The number of major faults the process has made which have required
+ // loading a memory page from disk.
+ MajFlt uint
+ // The number of major faults that the process's waited-for children have
+ // made.
+ CMajFlt uint
+ // Amount of time that this process has been scheduled in user mode,
+ // measured in clock ticks.
+ UTime uint
+ // Amount of time that this process has been scheduled in kernel mode,
+ // measured in clock ticks.
+ STime uint
+ // Amount of time that this process's waited-for children have been
+ // scheduled in user mode, measured in clock ticks.
+ CUTime uint
+ // Amount of time that this process's waited-for children have been
+ // scheduled in kernel mode, measured in clock ticks.
+ CSTime uint
+ // For processes running a real-time scheduling policy, this is the negated
+ // scheduling priority, minus one.
+ Priority int
+ // The nice value, a value in the range 19 (low priority) to -20 (high
+ // priority).
+ Nice int
+ // Number of threads in this process.
+ NumThreads int
+ // The time the process started after system boot, the value is expressed
+ // in clock ticks.
+ Starttime uint64
+ // Virtual memory size in bytes.
+ VSize int
+ // Resident set size in pages.
+ RSS int
+
+ fs FS
+}
+
+// NewStat returns the current status information of the process.
+func (p Proc) NewStat() (ProcStat, error) {
+ f, err := p.open("stat")
+ if err != nil {
+ return ProcStat{}, err
+ }
+ defer f.Close()
+
+ data, err := ioutil.ReadAll(f)
+ if err != nil {
+ return ProcStat{}, err
+ }
+
+ var (
+ ignore int
+
+ s = ProcStat{PID: p.PID, fs: p.fs}
+ l = bytes.Index(data, []byte("("))
+ r = bytes.LastIndex(data, []byte(")"))
+ )
+
+ if l < 0 || r < 0 {
+ return ProcStat{}, fmt.Errorf(
+ "unexpected format, couldn't extract comm: %s",
+ data,
+ )
+ }
+
+ s.Comm = string(data[l+1 : r])
+ _, err = fmt.Fscan(
+ bytes.NewBuffer(data[r+2:]),
+ &s.State,
+ &s.PPID,
+ &s.PGRP,
+ &s.Session,
+ &s.TTY,
+ &s.TPGID,
+ &s.Flags,
+ &s.MinFlt,
+ &s.CMinFlt,
+ &s.MajFlt,
+ &s.CMajFlt,
+ &s.UTime,
+ &s.STime,
+ &s.CUTime,
+ &s.CSTime,
+ &s.Priority,
+ &s.Nice,
+ &s.NumThreads,
+ &ignore,
+ &s.Starttime,
+ &s.VSize,
+ &s.RSS,
+ )
+ if err != nil {
+ return ProcStat{}, err
+ }
+
+ return s, nil
+}
+
+// VirtualMemory returns the virtual memory size in bytes.
+func (s ProcStat) VirtualMemory() int {
+ return s.VSize
+}
+
+// ResidentMemory returns the resident memory size in bytes.
+func (s ProcStat) ResidentMemory() int {
+ return s.RSS * os.Getpagesize()
+}
+
+// StartTime returns the unix timestamp of the process in seconds.
+func (s ProcStat) StartTime() (float64, error) {
+ stat, err := s.fs.NewStat()
+ if err != nil {
+ return 0, err
+ }
+ return float64(stat.BootTime) + (float64(s.Starttime) / userHZ), nil
+}
+
+// CPUTime returns the total CPU user and system time in seconds.
+func (s ProcStat) CPUTime() float64 {
+ return float64(s.UTime+s.STime) / userHZ
+}
diff --git a/vendor/github.com/prometheus/procfs/stat.go b/vendor/github.com/prometheus/procfs/stat.go
new file mode 100644
index 0000000000..26fefb0fa0
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/stat.go
@@ -0,0 +1,55 @@
+package procfs
+
+import (
+ "bufio"
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+// Stat represents kernel/system statistics.
+type Stat struct {
+ // Boot time in seconds since the Epoch.
+ BootTime int64
+}
+
+// NewStat returns kernel/system statistics read from /proc/stat.
+func NewStat() (Stat, error) {
+ fs, err := NewFS(DefaultMountPoint)
+ if err != nil {
+ return Stat{}, err
+ }
+
+ return fs.NewStat()
+}
+
+// NewStat returns an information about current kernel/system statistics.
+func (fs FS) NewStat() (Stat, error) {
+ f, err := fs.open("stat")
+ if err != nil {
+ return Stat{}, err
+ }
+ defer f.Close()
+
+ s := bufio.NewScanner(f)
+ for s.Scan() {
+ line := s.Text()
+ if !strings.HasPrefix(line, "btime") {
+ continue
+ }
+ fields := strings.Fields(line)
+ if len(fields) != 2 {
+ return Stat{}, fmt.Errorf("couldn't parse %s line %s", f.Name(), line)
+ }
+ i, err := strconv.ParseInt(fields[1], 10, 32)
+ if err != nil {
+ return Stat{}, fmt.Errorf("couldn't parse %s: %s", fields[1], err)
+ }
+ return Stat{BootTime: i}, nil
+ }
+ if err := s.Err(); err != nil {
+ return Stat{}, fmt.Errorf("couldn't parse %s: %s", f.Name(), err)
+ }
+
+ return Stat{}, fmt.Errorf("couldn't parse %s, missing btime", f.Name())
+}
diff --git a/vendor/github.com/samuel/go-zookeeper/zk/conn.go b/vendor/github.com/samuel/go-zookeeper/zk/conn.go
new file mode 100644
index 0000000000..b46467025a
--- /dev/null
+++ b/vendor/github.com/samuel/go-zookeeper/zk/conn.go
@@ -0,0 +1,868 @@
+// Package zk is a native Go client library for the ZooKeeper orchestration service.
+package zk
+
+/*
+TODO:
+* make sure a ping response comes back in a reasonable time
+
+Possible watcher events:
+* Event{Type: EventNotWatching, State: StateDisconnected, Path: path, Err: err}
+*/
+
+import (
+ "crypto/rand"
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "io"
+ "net"
+ "strconv"
+ "strings"
+ "sync"
+ "sync/atomic"
+ "time"
+)
+
+// ErrNoServer indicates that an operation cannot be completed
+// because attempts to connect to all servers in the list failed.
+var ErrNoServer = errors.New("zk: could not connect to a server")
+
+// ErrInvalidPath indicates that an operation was being attempted on
+// an invalid path. (e.g. empty path)
+var ErrInvalidPath = errors.New("zk: invalid path")
+
+// DefaultLogger uses the stdlib log package for logging.
+var DefaultLogger Logger = defaultLogger{}
+
+const (
+ bufferSize = 1536 * 1024
+ eventChanSize = 6
+ sendChanSize = 16
+ protectedPrefix = "_c_"
+)
+
+type watchType int
+
+const (
+ watchTypeData = iota
+ watchTypeExist = iota
+ watchTypeChild = iota
+)
+
+type watchPathType struct {
+ path string
+ wType watchType
+}
+
+type Dialer func(network, address string, timeout time.Duration) (net.Conn, error)
+
+// Logger is an interface that can be implemented to provide custom log output.
+type Logger interface {
+ Printf(string, ...interface{})
+}
+
+type Conn struct {
+ lastZxid int64
+ sessionID int64
+ state State // must be 32-bit aligned
+ xid uint32
+ timeout int32 // session timeout in milliseconds
+ passwd []byte
+
+ dialer Dialer
+ servers []string
+ serverIndex int // remember last server that was tried during connect to round-robin attempts to servers
+ lastServerIndex int // index of the last server that was successfully connected to and authenticated with
+ conn net.Conn
+ eventChan chan Event
+ shouldQuit chan struct{}
+ pingInterval time.Duration
+ recvTimeout time.Duration
+ connectTimeout time.Duration
+
+ sendChan chan *request
+ requests map[int32]*request // Xid -> pending request
+ requestsLock sync.Mutex
+ watchers map[watchPathType][]chan Event
+ watchersLock sync.Mutex
+
+ // Debug (used by unit tests)
+ reconnectDelay time.Duration
+
+ logger Logger
+}
+
+type request struct {
+ xid int32
+ opcode int32
+ pkt interface{}
+ recvStruct interface{}
+ recvChan chan response
+
+ // Because sending and receiving happen in separate go routines, there's
+ // a possible race condition when creating watches from outside the read
+ // loop. We must ensure that a watcher gets added to the list synchronously
+ // with the response from the server on any request that creates a watch.
+ // In order to not hard code the watch logic for each opcode in the recv
+ // loop the caller can use recvFunc to insert some synchronously code
+ // after a response.
+ recvFunc func(*request, *responseHeader, error)
+}
+
+type response struct {
+ zxid int64
+ err error
+}
+
+type Event struct {
+ Type EventType
+ State State
+ Path string // For non-session events, the path of the watched node.
+ Err error
+ Server string // For connection events
+}
+
+// Connect establishes a new connection to a pool of zookeeper servers
+// using the default net.Dialer. See ConnectWithDialer for further
+// information about session timeout.
+func Connect(servers []string, sessionTimeout time.Duration) (*Conn, <-chan Event, error) {
+ return ConnectWithDialer(servers, sessionTimeout, nil)
+}
+
+// ConnectWithDialer establishes a new connection to a pool of zookeeper
+// servers. The provided session timeout sets the amount of time for which
+// a session is considered valid after losing connection to a server. Within
+// the session timeout it's possible to reestablish a connection to a different
+// server and keep the same session. This is means any ephemeral nodes and
+// watches are maintained.
+func ConnectWithDialer(servers []string, sessionTimeout time.Duration, dialer Dialer) (*Conn, <-chan Event, error) {
+ if len(servers) == 0 {
+ return nil, nil, errors.New("zk: server list must not be empty")
+ }
+
+ recvTimeout := sessionTimeout * 2 / 3
+
+ srvs := make([]string, len(servers))
+
+ for i, addr := range servers {
+ if strings.Contains(addr, ":") {
+ srvs[i] = addr
+ } else {
+ srvs[i] = addr + ":" + strconv.Itoa(DefaultPort)
+ }
+ }
+
+ // Randomize the order of the servers to avoid creating hotspots
+ stringShuffle(srvs)
+
+ ec := make(chan Event, eventChanSize)
+ if dialer == nil {
+ dialer = net.DialTimeout
+ }
+ conn := Conn{
+ dialer: dialer,
+ servers: srvs,
+ serverIndex: 0,
+ lastServerIndex: -1,
+ conn: nil,
+ state: StateDisconnected,
+ eventChan: ec,
+ shouldQuit: make(chan struct{}),
+ recvTimeout: recvTimeout,
+ pingInterval: recvTimeout / 2,
+ connectTimeout: 1 * time.Second,
+ sendChan: make(chan *request, sendChanSize),
+ requests: make(map[int32]*request),
+ watchers: make(map[watchPathType][]chan Event),
+ passwd: emptyPassword,
+ timeout: int32(sessionTimeout.Nanoseconds() / 1e6),
+ logger: DefaultLogger,
+
+ // Debug
+ reconnectDelay: 0,
+ }
+ go func() {
+ conn.loop()
+ conn.flushRequests(ErrClosing)
+ conn.invalidateWatches(ErrClosing)
+ close(conn.eventChan)
+ }()
+ return &conn, ec, nil
+}
+
+func (c *Conn) Close() {
+ close(c.shouldQuit)
+
+ select {
+ case <-c.queueRequest(opClose, &closeRequest{}, &closeResponse{}, nil):
+ case <-time.After(time.Second):
+ }
+}
+
+// States returns the current state of the connection.
+func (c *Conn) State() State {
+ return State(atomic.LoadInt32((*int32)(&c.state)))
+}
+
+// SetLogger sets the logger to be used for printing errors.
+// Logger is an interface provided by this package.
+func (c *Conn) SetLogger(l Logger) {
+ c.logger = l
+}
+
+func (c *Conn) setState(state State) {
+ atomic.StoreInt32((*int32)(&c.state), int32(state))
+ select {
+ case c.eventChan <- Event{Type: EventSession, State: state, Server: c.servers[c.serverIndex]}:
+ default:
+ // panic("zk: event channel full - it must be monitored and never allowed to be full")
+ }
+}
+
+func (c *Conn) connect() error {
+ c.setState(StateConnecting)
+ for {
+ c.serverIndex = (c.serverIndex + 1) % len(c.servers)
+ if c.serverIndex == c.lastServerIndex {
+ c.flushUnsentRequests(ErrNoServer)
+ select {
+ case <-time.After(time.Second):
+ // pass
+ case <-c.shouldQuit:
+ c.setState(StateDisconnected)
+ c.flushUnsentRequests(ErrClosing)
+ return ErrClosing
+ }
+ } else if c.lastServerIndex < 0 {
+ // lastServerIndex defaults to -1 to avoid a delay on the initial connect
+ c.lastServerIndex = 0
+ }
+
+ zkConn, err := c.dialer("tcp", c.servers[c.serverIndex], c.connectTimeout)
+ if err == nil {
+ c.conn = zkConn
+ c.setState(StateConnected)
+ return nil
+ }
+
+ c.logger.Printf("Failed to connect to %s: %+v", c.servers[c.serverIndex], err)
+ }
+}
+
+func (c *Conn) loop() {
+ for {
+ if err := c.connect(); err != nil {
+ // c.Close() was called
+ return
+ }
+
+ err := c.authenticate()
+ switch {
+ case err == ErrSessionExpired:
+ c.invalidateWatches(err)
+ case err != nil && c.conn != nil:
+ c.conn.Close()
+ case err == nil:
+ c.lastServerIndex = c.serverIndex
+ closeChan := make(chan struct{}) // channel to tell send loop stop
+ var wg sync.WaitGroup
+
+ wg.Add(1)
+ go func() {
+ c.sendLoop(c.conn, closeChan)
+ c.conn.Close() // causes recv loop to EOF/exit
+ wg.Done()
+ }()
+
+ wg.Add(1)
+ go func() {
+ err = c.recvLoop(c.conn)
+ if err == nil {
+ panic("zk: recvLoop should never return nil error")
+ }
+ close(closeChan) // tell send loop to exit
+ wg.Done()
+ }()
+
+ wg.Wait()
+ }
+
+ c.setState(StateDisconnected)
+
+ // Yeesh
+ if err != io.EOF && err != ErrSessionExpired && !strings.Contains(err.Error(), "use of closed network connection") {
+ c.logger.Printf(err.Error())
+ }
+
+ select {
+ case <-c.shouldQuit:
+ c.flushRequests(ErrClosing)
+ return
+ default:
+ }
+
+ if err != ErrSessionExpired {
+ err = ErrConnectionClosed
+ }
+ c.flushRequests(err)
+
+ if c.reconnectDelay > 0 {
+ select {
+ case <-c.shouldQuit:
+ return
+ case <-time.After(c.reconnectDelay):
+ }
+ }
+ }
+}
+
+func (c *Conn) flushUnsentRequests(err error) {
+ for {
+ select {
+ default:
+ return
+ case req := <-c.sendChan:
+ req.recvChan <- response{-1, err}
+ }
+ }
+}
+
+// Send error to all pending requests and clear request map
+func (c *Conn) flushRequests(err error) {
+ c.requestsLock.Lock()
+ for _, req := range c.requests {
+ req.recvChan <- response{-1, err}
+ }
+ c.requests = make(map[int32]*request)
+ c.requestsLock.Unlock()
+}
+
+// Send error to all watchers and clear watchers map
+func (c *Conn) invalidateWatches(err error) {
+ c.watchersLock.Lock()
+ defer c.watchersLock.Unlock()
+
+ if len(c.watchers) >= 0 {
+ for pathType, watchers := range c.watchers {
+ ev := Event{Type: EventNotWatching, State: StateDisconnected, Path: pathType.path, Err: err}
+ for _, ch := range watchers {
+ ch <- ev
+ close(ch)
+ }
+ }
+ c.watchers = make(map[watchPathType][]chan Event)
+ }
+}
+
+func (c *Conn) sendSetWatches() {
+ c.watchersLock.Lock()
+ defer c.watchersLock.Unlock()
+
+ if len(c.watchers) == 0 {
+ return
+ }
+
+ req := &setWatchesRequest{
+ RelativeZxid: c.lastZxid,
+ DataWatches: make([]string, 0),
+ ExistWatches: make([]string, 0),
+ ChildWatches: make([]string, 0),
+ }
+ n := 0
+ for pathType, watchers := range c.watchers {
+ if len(watchers) == 0 {
+ continue
+ }
+ switch pathType.wType {
+ case watchTypeData:
+ req.DataWatches = append(req.DataWatches, pathType.path)
+ case watchTypeExist:
+ req.ExistWatches = append(req.ExistWatches, pathType.path)
+ case watchTypeChild:
+ req.ChildWatches = append(req.ChildWatches, pathType.path)
+ }
+ n++
+ }
+ if n == 0 {
+ return
+ }
+
+ go func() {
+ res := &setWatchesResponse{}
+ _, err := c.request(opSetWatches, req, res, nil)
+ if err != nil {
+ c.logger.Printf("Failed to set previous watches: %s", err.Error())
+ }
+ }()
+}
+
+func (c *Conn) authenticate() error {
+ buf := make([]byte, 256)
+
+ // connect request
+
+ n, err := encodePacket(buf[4:], &connectRequest{
+ ProtocolVersion: protocolVersion,
+ LastZxidSeen: c.lastZxid,
+ TimeOut: c.timeout,
+ SessionID: c.sessionID,
+ Passwd: c.passwd,
+ })
+ if err != nil {
+ return err
+ }
+
+ binary.BigEndian.PutUint32(buf[:4], uint32(n))
+
+ c.conn.SetWriteDeadline(time.Now().Add(c.recvTimeout * 10))
+ _, err = c.conn.Write(buf[:n+4])
+ c.conn.SetWriteDeadline(time.Time{})
+ if err != nil {
+ return err
+ }
+
+ c.sendSetWatches()
+
+ // connect response
+
+ // package length
+ c.conn.SetReadDeadline(time.Now().Add(c.recvTimeout * 10))
+ _, err = io.ReadFull(c.conn, buf[:4])
+ c.conn.SetReadDeadline(time.Time{})
+ if err != nil {
+ // Sometimes zookeeper just drops connection on invalid session data,
+ // we prefer to drop session and start from scratch when that event
+ // occurs instead of dropping into loop of connect/disconnect attempts
+ c.sessionID = 0
+ c.passwd = emptyPassword
+ c.lastZxid = 0
+ c.setState(StateExpired)
+ return ErrSessionExpired
+ }
+
+ blen := int(binary.BigEndian.Uint32(buf[:4]))
+ if cap(buf) < blen {
+ buf = make([]byte, blen)
+ }
+
+ _, err = io.ReadFull(c.conn, buf[:blen])
+ if err != nil {
+ return err
+ }
+
+ r := connectResponse{}
+ _, err = decodePacket(buf[:blen], &r)
+ if err != nil {
+ return err
+ }
+ if r.SessionID == 0 {
+ c.sessionID = 0
+ c.passwd = emptyPassword
+ c.lastZxid = 0
+ c.setState(StateExpired)
+ return ErrSessionExpired
+ }
+
+ c.timeout = r.TimeOut
+ c.sessionID = r.SessionID
+ c.passwd = r.Passwd
+ c.setState(StateHasSession)
+
+ return nil
+}
+
+func (c *Conn) sendLoop(conn net.Conn, closeChan <-chan struct{}) error {
+ pingTicker := time.NewTicker(c.pingInterval)
+ defer pingTicker.Stop()
+
+ buf := make([]byte, bufferSize)
+ for {
+ select {
+ case req := <-c.sendChan:
+ header := &requestHeader{req.xid, req.opcode}
+ n, err := encodePacket(buf[4:], header)
+ if err != nil {
+ req.recvChan <- response{-1, err}
+ continue
+ }
+
+ n2, err := encodePacket(buf[4+n:], req.pkt)
+ if err != nil {
+ req.recvChan <- response{-1, err}
+ continue
+ }
+
+ n += n2
+
+ binary.BigEndian.PutUint32(buf[:4], uint32(n))
+
+ c.requestsLock.Lock()
+ select {
+ case <-closeChan:
+ req.recvChan <- response{-1, ErrConnectionClosed}
+ c.requestsLock.Unlock()
+ return ErrConnectionClosed
+ default:
+ }
+ c.requests[req.xid] = req
+ c.requestsLock.Unlock()
+
+ conn.SetWriteDeadline(time.Now().Add(c.recvTimeout))
+ _, err = conn.Write(buf[:n+4])
+ conn.SetWriteDeadline(time.Time{})
+ if err != nil {
+ req.recvChan <- response{-1, err}
+ conn.Close()
+ return err
+ }
+ case <-pingTicker.C:
+ n, err := encodePacket(buf[4:], &requestHeader{Xid: -2, Opcode: opPing})
+ if err != nil {
+ panic("zk: opPing should never fail to serialize")
+ }
+
+ binary.BigEndian.PutUint32(buf[:4], uint32(n))
+
+ conn.SetWriteDeadline(time.Now().Add(c.recvTimeout))
+ _, err = conn.Write(buf[:n+4])
+ conn.SetWriteDeadline(time.Time{})
+ if err != nil {
+ conn.Close()
+ return err
+ }
+ case <-closeChan:
+ return nil
+ }
+ }
+}
+
+func (c *Conn) recvLoop(conn net.Conn) error {
+ buf := make([]byte, bufferSize)
+ for {
+ // package length
+ conn.SetReadDeadline(time.Now().Add(c.recvTimeout))
+ _, err := io.ReadFull(conn, buf[:4])
+ if err != nil {
+ return err
+ }
+
+ blen := int(binary.BigEndian.Uint32(buf[:4]))
+ if cap(buf) < blen {
+ buf = make([]byte, blen)
+ }
+
+ _, err = io.ReadFull(conn, buf[:blen])
+ conn.SetReadDeadline(time.Time{})
+ if err != nil {
+ return err
+ }
+
+ res := responseHeader{}
+ _, err = decodePacket(buf[:16], &res)
+ if err != nil {
+ return err
+ }
+
+ if res.Xid == -1 {
+ res := &watcherEvent{}
+ _, err := decodePacket(buf[16:16+blen], res)
+ if err != nil {
+ return err
+ }
+ ev := Event{
+ Type: res.Type,
+ State: res.State,
+ Path: res.Path,
+ Err: nil,
+ }
+ select {
+ case c.eventChan <- ev:
+ default:
+ }
+ wTypes := make([]watchType, 0, 2)
+ switch res.Type {
+ case EventNodeCreated:
+ wTypes = append(wTypes, watchTypeExist)
+ case EventNodeDeleted, EventNodeDataChanged:
+ wTypes = append(wTypes, watchTypeExist, watchTypeData, watchTypeChild)
+ case EventNodeChildrenChanged:
+ wTypes = append(wTypes, watchTypeChild)
+ }
+ c.watchersLock.Lock()
+ for _, t := range wTypes {
+ wpt := watchPathType{res.Path, t}
+ if watchers := c.watchers[wpt]; watchers != nil && len(watchers) > 0 {
+ for _, ch := range watchers {
+ ch <- ev
+ close(ch)
+ }
+ delete(c.watchers, wpt)
+ }
+ }
+ c.watchersLock.Unlock()
+ } else if res.Xid == -2 {
+ // Ping response. Ignore.
+ } else if res.Xid < 0 {
+ c.logger.Printf("Xid < 0 (%d) but not ping or watcher event", res.Xid)
+ } else {
+ if res.Zxid > 0 {
+ c.lastZxid = res.Zxid
+ }
+
+ c.requestsLock.Lock()
+ req, ok := c.requests[res.Xid]
+ if ok {
+ delete(c.requests, res.Xid)
+ }
+ c.requestsLock.Unlock()
+
+ if !ok {
+ c.logger.Printf("Response for unknown request with xid %d", res.Xid)
+ } else {
+ if res.Err != 0 {
+ err = res.Err.toError()
+ } else {
+ _, err = decodePacket(buf[16:16+blen], req.recvStruct)
+ }
+ if req.recvFunc != nil {
+ req.recvFunc(req, &res, err)
+ }
+ req.recvChan <- response{res.Zxid, err}
+ if req.opcode == opClose {
+ return io.EOF
+ }
+ }
+ }
+ }
+}
+
+func (c *Conn) nextXid() int32 {
+ return int32(atomic.AddUint32(&c.xid, 1) & 0x7fffffff)
+}
+
+func (c *Conn) addWatcher(path string, watchType watchType) <-chan Event {
+ c.watchersLock.Lock()
+ defer c.watchersLock.Unlock()
+
+ ch := make(chan Event, 1)
+ wpt := watchPathType{path, watchType}
+ c.watchers[wpt] = append(c.watchers[wpt], ch)
+ return ch
+}
+
+func (c *Conn) queueRequest(opcode int32, req interface{}, res interface{}, recvFunc func(*request, *responseHeader, error)) <-chan response {
+ rq := &request{
+ xid: c.nextXid(),
+ opcode: opcode,
+ pkt: req,
+ recvStruct: res,
+ recvChan: make(chan response, 1),
+ recvFunc: recvFunc,
+ }
+ c.sendChan <- rq
+ return rq.recvChan
+}
+
+func (c *Conn) request(opcode int32, req interface{}, res interface{}, recvFunc func(*request, *responseHeader, error)) (int64, error) {
+ r := <-c.queueRequest(opcode, req, res, recvFunc)
+ return r.zxid, r.err
+}
+
+func (c *Conn) AddAuth(scheme string, auth []byte) error {
+ _, err := c.request(opSetAuth, &setAuthRequest{Type: 0, Scheme: scheme, Auth: auth}, &setAuthResponse{}, nil)
+ return err
+}
+
+func (c *Conn) Children(path string) ([]string, *Stat, error) {
+ res := &getChildren2Response{}
+ _, err := c.request(opGetChildren2, &getChildren2Request{Path: path, Watch: false}, res, nil)
+ return res.Children, &res.Stat, err
+}
+
+func (c *Conn) ChildrenW(path string) ([]string, *Stat, <-chan Event, error) {
+ var ech <-chan Event
+ res := &getChildren2Response{}
+ _, err := c.request(opGetChildren2, &getChildren2Request{Path: path, Watch: true}, res, func(req *request, res *responseHeader, err error) {
+ if err == nil {
+ ech = c.addWatcher(path, watchTypeChild)
+ }
+ })
+ if err != nil {
+ return nil, nil, nil, err
+ }
+ return res.Children, &res.Stat, ech, err
+}
+
+func (c *Conn) Get(path string) ([]byte, *Stat, error) {
+ res := &getDataResponse{}
+ _, err := c.request(opGetData, &getDataRequest{Path: path, Watch: false}, res, nil)
+ return res.Data, &res.Stat, err
+}
+
+// GetW returns the contents of a znode and sets a watch
+func (c *Conn) GetW(path string) ([]byte, *Stat, <-chan Event, error) {
+ var ech <-chan Event
+ res := &getDataResponse{}
+ _, err := c.request(opGetData, &getDataRequest{Path: path, Watch: true}, res, func(req *request, res *responseHeader, err error) {
+ if err == nil {
+ ech = c.addWatcher(path, watchTypeData)
+ }
+ })
+ if err != nil {
+ return nil, nil, nil, err
+ }
+ return res.Data, &res.Stat, ech, err
+}
+
+func (c *Conn) Set(path string, data []byte, version int32) (*Stat, error) {
+ if path == "" {
+ return nil, ErrInvalidPath
+ }
+ res := &setDataResponse{}
+ _, err := c.request(opSetData, &SetDataRequest{path, data, version}, res, nil)
+ return &res.Stat, err
+}
+
+func (c *Conn) Create(path string, data []byte, flags int32, acl []ACL) (string, error) {
+ res := &createResponse{}
+ _, err := c.request(opCreate, &CreateRequest{path, data, acl, flags}, res, nil)
+ return res.Path, err
+}
+
+// CreateProtectedEphemeralSequential fixes a race condition if the server crashes
+// after it creates the node. On reconnect the session may still be valid so the
+// ephemeral node still exists. Therefore, on reconnect we need to check if a node
+// with a GUID generated on create exists.
+func (c *Conn) CreateProtectedEphemeralSequential(path string, data []byte, acl []ACL) (string, error) {
+ var guid [16]byte
+ _, err := io.ReadFull(rand.Reader, guid[:16])
+ if err != nil {
+ return "", err
+ }
+ guidStr := fmt.Sprintf("%x", guid)
+
+ parts := strings.Split(path, "/")
+ parts[len(parts)-1] = fmt.Sprintf("%s%s-%s", protectedPrefix, guidStr, parts[len(parts)-1])
+ rootPath := strings.Join(parts[:len(parts)-1], "/")
+ protectedPath := strings.Join(parts, "/")
+
+ var newPath string
+ for i := 0; i < 3; i++ {
+ newPath, err = c.Create(protectedPath, data, FlagEphemeral|FlagSequence, acl)
+ switch err {
+ case ErrSessionExpired:
+ // No need to search for the node since it can't exist. Just try again.
+ case ErrConnectionClosed:
+ children, _, err := c.Children(rootPath)
+ if err != nil {
+ return "", err
+ }
+ for _, p := range children {
+ parts := strings.Split(p, "/")
+ if pth := parts[len(parts)-1]; strings.HasPrefix(pth, protectedPrefix) {
+ if g := pth[len(protectedPrefix) : len(protectedPrefix)+32]; g == guidStr {
+ return rootPath + "/" + p, nil
+ }
+ }
+ }
+ case nil:
+ return newPath, nil
+ default:
+ return "", err
+ }
+ }
+ return "", err
+}
+
+func (c *Conn) Delete(path string, version int32) error {
+ _, err := c.request(opDelete, &DeleteRequest{path, version}, &deleteResponse{}, nil)
+ return err
+}
+
+func (c *Conn) Exists(path string) (bool, *Stat, error) {
+ res := &existsResponse{}
+ _, err := c.request(opExists, &existsRequest{Path: path, Watch: false}, res, nil)
+ exists := true
+ if err == ErrNoNode {
+ exists = false
+ err = nil
+ }
+ return exists, &res.Stat, err
+}
+
+func (c *Conn) ExistsW(path string) (bool, *Stat, <-chan Event, error) {
+ var ech <-chan Event
+ res := &existsResponse{}
+ _, err := c.request(opExists, &existsRequest{Path: path, Watch: true}, res, func(req *request, res *responseHeader, err error) {
+ if err == nil {
+ ech = c.addWatcher(path, watchTypeData)
+ } else if err == ErrNoNode {
+ ech = c.addWatcher(path, watchTypeExist)
+ }
+ })
+ exists := true
+ if err == ErrNoNode {
+ exists = false
+ err = nil
+ }
+ if err != nil {
+ return false, nil, nil, err
+ }
+ return exists, &res.Stat, ech, err
+}
+
+func (c *Conn) GetACL(path string) ([]ACL, *Stat, error) {
+ res := &getAclResponse{}
+ _, err := c.request(opGetAcl, &getAclRequest{Path: path}, res, nil)
+ return res.Acl, &res.Stat, err
+}
+
+func (c *Conn) SetACL(path string, acl []ACL, version int32) (*Stat, error) {
+ res := &setAclResponse{}
+ _, err := c.request(opSetAcl, &setAclRequest{Path: path, Acl: acl, Version: version}, res, nil)
+ return &res.Stat, err
+}
+
+func (c *Conn) Sync(path string) (string, error) {
+ res := &syncResponse{}
+ _, err := c.request(opSync, &syncRequest{Path: path}, res, nil)
+ return res.Path, err
+}
+
+type MultiResponse struct {
+ Stat *Stat
+ String string
+}
+
+// Multi executes multiple ZooKeeper operations or none of them. The provided
+// ops must be one of *CreateRequest, *DeleteRequest, *SetDataRequest, or
+// *CheckVersionRequest.
+func (c *Conn) Multi(ops ...interface{}) ([]MultiResponse, error) {
+ req := &multiRequest{
+ Ops: make([]multiRequestOp, 0, len(ops)),
+ DoneHeader: multiHeader{Type: -1, Done: true, Err: -1},
+ }
+ for _, op := range ops {
+ var opCode int32
+ switch op.(type) {
+ case *CreateRequest:
+ opCode = opCreate
+ case *SetDataRequest:
+ opCode = opSetData
+ case *DeleteRequest:
+ opCode = opDelete
+ case *CheckVersionRequest:
+ opCode = opCheck
+ default:
+ return nil, fmt.Errorf("uknown operation type %T", op)
+ }
+ req.Ops = append(req.Ops, multiRequestOp{multiHeader{opCode, false, -1}, op})
+ }
+ res := &multiResponse{}
+ _, err := c.request(opMulti, req, res, nil)
+ mr := make([]MultiResponse, len(res.Ops))
+ for i, op := range res.Ops {
+ mr[i] = MultiResponse{Stat: op.Stat, String: op.String}
+ }
+ return mr, err
+}
diff --git a/vendor/github.com/samuel/go-zookeeper/zk/constants.go b/vendor/github.com/samuel/go-zookeeper/zk/constants.go
new file mode 100644
index 0000000000..f9b39b904f
--- /dev/null
+++ b/vendor/github.com/samuel/go-zookeeper/zk/constants.go
@@ -0,0 +1,240 @@
+package zk
+
+import (
+ "errors"
+)
+
+const (
+ protocolVersion = 0
+
+ DefaultPort = 2181
+)
+
+const (
+ opNotify = 0
+ opCreate = 1
+ opDelete = 2
+ opExists = 3
+ opGetData = 4
+ opSetData = 5
+ opGetAcl = 6
+ opSetAcl = 7
+ opGetChildren = 8
+ opSync = 9
+ opPing = 11
+ opGetChildren2 = 12
+ opCheck = 13
+ opMulti = 14
+ opClose = -11
+ opSetAuth = 100
+ opSetWatches = 101
+ // Not in protocol, used internally
+ opWatcherEvent = -2
+)
+
+const (
+ EventNodeCreated = EventType(1)
+ EventNodeDeleted = EventType(2)
+ EventNodeDataChanged = EventType(3)
+ EventNodeChildrenChanged = EventType(4)
+
+ EventSession = EventType(-1)
+ EventNotWatching = EventType(-2)
+)
+
+var (
+ eventNames = map[EventType]string{
+ EventNodeCreated: "EventNodeCreated",
+ EventNodeDeleted: "EventNodeDeleted",
+ EventNodeDataChanged: "EventNodeDataChanged",
+ EventNodeChildrenChanged: "EventNodeChildrenChanged",
+ EventSession: "EventSession",
+ EventNotWatching: "EventNotWatching",
+ }
+)
+
+const (
+ StateUnknown = State(-1)
+ StateDisconnected = State(0)
+ StateConnecting = State(1)
+ StateAuthFailed = State(4)
+ StateConnectedReadOnly = State(5)
+ StateSaslAuthenticated = State(6)
+ StateExpired = State(-112)
+ // StateAuthFailed = State(-113)
+
+ StateConnected = State(100)
+ StateHasSession = State(101)
+)
+
+const (
+ FlagEphemeral = 1
+ FlagSequence = 2
+)
+
+var (
+ stateNames = map[State]string{
+ StateUnknown: "StateUnknown",
+ StateDisconnected: "StateDisconnected",
+ StateConnectedReadOnly: "StateConnectedReadOnly",
+ StateSaslAuthenticated: "StateSaslAuthenticated",
+ StateExpired: "StateExpired",
+ StateAuthFailed: "StateAuthFailed",
+ StateConnecting: "StateConnecting",
+ StateConnected: "StateConnected",
+ StateHasSession: "StateHasSession",
+ }
+)
+
+type State int32
+
+func (s State) String() string {
+ if name := stateNames[s]; name != "" {
+ return name
+ }
+ return "Unknown"
+}
+
+type ErrCode int32
+
+var (
+ ErrConnectionClosed = errors.New("zk: connection closed")
+ ErrUnknown = errors.New("zk: unknown error")
+ ErrAPIError = errors.New("zk: api error")
+ ErrNoNode = errors.New("zk: node does not exist")
+ ErrNoAuth = errors.New("zk: not authenticated")
+ ErrBadVersion = errors.New("zk: version conflict")
+ ErrNoChildrenForEphemerals = errors.New("zk: ephemeral nodes may not have children")
+ ErrNodeExists = errors.New("zk: node already exists")
+ ErrNotEmpty = errors.New("zk: node has children")
+ ErrSessionExpired = errors.New("zk: session has been expired by the server")
+ ErrInvalidACL = errors.New("zk: invalid ACL specified")
+ ErrAuthFailed = errors.New("zk: client authentication failed")
+ ErrClosing = errors.New("zk: zookeeper is closing")
+ ErrNothing = errors.New("zk: no server responsees to process")
+ ErrSessionMoved = errors.New("zk: session moved to another server, so operation is ignored")
+
+ // ErrInvalidCallback = errors.New("zk: invalid callback specified")
+ errCodeToError = map[ErrCode]error{
+ 0: nil,
+ errAPIError: ErrAPIError,
+ errNoNode: ErrNoNode,
+ errNoAuth: ErrNoAuth,
+ errBadVersion: ErrBadVersion,
+ errNoChildrenForEphemerals: ErrNoChildrenForEphemerals,
+ errNodeExists: ErrNodeExists,
+ errNotEmpty: ErrNotEmpty,
+ errSessionExpired: ErrSessionExpired,
+ // errInvalidCallback: ErrInvalidCallback,
+ errInvalidAcl: ErrInvalidACL,
+ errAuthFailed: ErrAuthFailed,
+ errClosing: ErrClosing,
+ errNothing: ErrNothing,
+ errSessionMoved: ErrSessionMoved,
+ }
+)
+
+func (e ErrCode) toError() error {
+ if err, ok := errCodeToError[e]; ok {
+ return err
+ }
+ return ErrUnknown
+}
+
+const (
+ errOk = 0
+ // System and server-side errors
+ errSystemError = -1
+ errRuntimeInconsistency = -2
+ errDataInconsistency = -3
+ errConnectionLoss = -4
+ errMarshallingError = -5
+ errUnimplemented = -6
+ errOperationTimeout = -7
+ errBadArguments = -8
+ errInvalidState = -9
+ // API errors
+ errAPIError = ErrCode(-100)
+ errNoNode = ErrCode(-101) // *
+ errNoAuth = ErrCode(-102)
+ errBadVersion = ErrCode(-103) // *
+ errNoChildrenForEphemerals = ErrCode(-108)
+ errNodeExists = ErrCode(-110) // *
+ errNotEmpty = ErrCode(-111)
+ errSessionExpired = ErrCode(-112)
+ errInvalidCallback = ErrCode(-113)
+ errInvalidAcl = ErrCode(-114)
+ errAuthFailed = ErrCode(-115)
+ errClosing = ErrCode(-116)
+ errNothing = ErrCode(-117)
+ errSessionMoved = ErrCode(-118)
+)
+
+// Constants for ACL permissions
+const (
+ PermRead = 1 << iota
+ PermWrite
+ PermCreate
+ PermDelete
+ PermAdmin
+ PermAll = 0x1f
+)
+
+var (
+ emptyPassword = []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
+ opNames = map[int32]string{
+ opNotify: "notify",
+ opCreate: "create",
+ opDelete: "delete",
+ opExists: "exists",
+ opGetData: "getData",
+ opSetData: "setData",
+ opGetAcl: "getACL",
+ opSetAcl: "setACL",
+ opGetChildren: "getChildren",
+ opSync: "sync",
+ opPing: "ping",
+ opGetChildren2: "getChildren2",
+ opCheck: "check",
+ opMulti: "multi",
+ opClose: "close",
+ opSetAuth: "setAuth",
+ opSetWatches: "setWatches",
+
+ opWatcherEvent: "watcherEvent",
+ }
+)
+
+type EventType int32
+
+func (t EventType) String() string {
+ if name := eventNames[t]; name != "" {
+ return name
+ }
+ return "Unknown"
+}
+
+// Mode is used to build custom server modes (leader|follower|standalone).
+type Mode uint8
+
+func (m Mode) String() string {
+ if name := modeNames[m]; name != "" {
+ return name
+ }
+ return "unknown"
+}
+
+const (
+ ModeUnknown Mode = iota
+ ModeLeader Mode = iota
+ ModeFollower Mode = iota
+ ModeStandalone Mode = iota
+)
+
+var (
+ modeNames = map[Mode]string{
+ ModeLeader: "leader",
+ ModeFollower: "follower",
+ ModeStandalone: "standalone",
+ }
+)
diff --git a/vendor/github.com/samuel/go-zookeeper/zk/flw.go b/vendor/github.com/samuel/go-zookeeper/zk/flw.go
new file mode 100644
index 0000000000..1045c98cfd
--- /dev/null
+++ b/vendor/github.com/samuel/go-zookeeper/zk/flw.go
@@ -0,0 +1,290 @@
+package zk
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "io/ioutil"
+ "math/big"
+ "net"
+ "regexp"
+ "strconv"
+ "time"
+)
+
+// FLWSrvr is a FourLetterWord helper function. In particular, this function pulls the srvr output
+// from the zookeeper instances and parses the output. A slice of *ServerStats structs are returned
+// as well as a boolean value to indicate whether this function processed successfully.
+//
+// If the boolean value is false there was a problem. If the *ServerStats slice is empty or nil,
+// then the error happened before we started to obtain 'srvr' values. Otherwise, one of the
+// servers had an issue and the "Error" value in the struct should be inspected to determine
+// which server had the issue.
+func FLWSrvr(servers []string, timeout time.Duration) ([]*ServerStats, bool) {
+ // different parts of the regular expression that are required to parse the srvr output
+ var (
+ zrVer = `^Zookeeper version: ([A-Za-z0-9\.\-]+), built on (\d\d/\d\d/\d\d\d\d \d\d:\d\d [A-Za-z0-9:\+\-]+)`
+ zrLat = `^Latency min/avg/max: (\d+)/(\d+)/(\d+)`
+ zrNet = `^Received: (\d+).*\n^Sent: (\d+).*\n^Connections: (\d+).*\n^Outstanding: (\d+)`
+ zrState = `^Zxid: (0x[A-Za-z0-9]+).*\n^Mode: (\w+).*\n^Node count: (\d+)`
+ )
+
+ // build the regex from the pieces above
+ re, err := regexp.Compile(fmt.Sprintf(`(?m:\A%v.*\n%v.*\n%v.*\n%v)`, zrVer, zrLat, zrNet, zrState))
+
+ if err != nil {
+ return nil, false
+ }
+
+ imOk := true
+ servers = FormatServers(servers)
+ ss := make([]*ServerStats, len(servers))
+
+ for i := range ss {
+ response, err := fourLetterWord(servers[i], "srvr", timeout)
+
+ if err != nil {
+ ss[i] = &ServerStats{Error: err}
+ imOk = false
+ continue
+ }
+
+ matches := re.FindAllStringSubmatch(string(response), -1)
+
+ if matches == nil {
+ err := fmt.Errorf("unable to parse fields from zookeeper response (no regex matches)")
+ ss[i] = &ServerStats{Error: err}
+ imOk = false
+ continue
+ }
+
+ match := matches[0][1:]
+
+ // determine current server
+ var srvrMode Mode
+ switch match[10] {
+ case "leader":
+ srvrMode = ModeLeader
+ case "follower":
+ srvrMode = ModeFollower
+ case "standalone":
+ srvrMode = ModeStandalone
+ default:
+ srvrMode = ModeUnknown
+ }
+
+ buildTime, err := time.Parse("01/02/2006 15:04 MST", match[1])
+
+ if err != nil {
+ ss[i] = &ServerStats{Error: err}
+ imOk = false
+ continue
+ }
+
+ parsedInt, err := strconv.ParseInt(match[9], 0, 64)
+
+ if err != nil {
+ ss[i] = &ServerStats{Error: err}
+ imOk = false
+ continue
+ }
+
+ // the ZxID value is an int64 with two int32s packed inside
+ // the high int32 is the epoch (i.e., number of leader elections)
+ // the low int32 is the counter
+ epoch := int32(parsedInt >> 32)
+ counter := int32(parsedInt & 0xFFFFFFFF)
+
+ // within the regex above, these values must be numerical
+ // so we can avoid useless checking of the error return value
+ minLatency, _ := strconv.ParseInt(match[2], 0, 64)
+ avgLatency, _ := strconv.ParseInt(match[3], 0, 64)
+ maxLatency, _ := strconv.ParseInt(match[4], 0, 64)
+ recv, _ := strconv.ParseInt(match[5], 0, 64)
+ sent, _ := strconv.ParseInt(match[6], 0, 64)
+ cons, _ := strconv.ParseInt(match[7], 0, 64)
+ outs, _ := strconv.ParseInt(match[8], 0, 64)
+ ncnt, _ := strconv.ParseInt(match[11], 0, 64)
+
+ ss[i] = &ServerStats{
+ Sent: sent,
+ Received: recv,
+ NodeCount: ncnt,
+ MinLatency: minLatency,
+ AvgLatency: avgLatency,
+ MaxLatency: maxLatency,
+ Connections: cons,
+ Outstanding: outs,
+ Epoch: epoch,
+ Counter: counter,
+ BuildTime: buildTime,
+ Mode: srvrMode,
+ Version: match[0],
+ }
+ }
+
+ return ss, imOk
+}
+
+// FLWRuok is a FourLetterWord helper function. In particular, this function
+// pulls the ruok output from each server.
+func FLWRuok(servers []string, timeout time.Duration) []bool {
+ servers = FormatServers(servers)
+ oks := make([]bool, len(servers))
+
+ for i := range oks {
+ response, err := fourLetterWord(servers[i], "ruok", timeout)
+
+ if err != nil {
+ continue
+ }
+
+ if bytes.Equal(response[:4], []byte("imok")) {
+ oks[i] = true
+ }
+ }
+ return oks
+}
+
+// FLWCons is a FourLetterWord helper function. In particular, this function
+// pulls the ruok output from each server.
+//
+// As with FLWSrvr, the boolean value indicates whether one of the requests had
+// an issue. The Clients struct has an Error value that can be checked.
+func FLWCons(servers []string, timeout time.Duration) ([]*ServerClients, bool) {
+ var (
+ zrAddr = `^ /((?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?):(?:\d+))\[\d+\]`
+ zrPac = `\(queued=(\d+),recved=(\d+),sent=(\d+),sid=(0x[A-Za-z0-9]+),lop=(\w+),est=(\d+),to=(\d+),`
+ zrSesh = `lcxid=(0x[A-Za-z0-9]+),lzxid=(0x[A-Za-z0-9]+),lresp=(\d+),llat=(\d+),minlat=(\d+),avglat=(\d+),maxlat=(\d+)\)`
+ )
+
+ re, err := regexp.Compile(fmt.Sprintf("%v%v%v", zrAddr, zrPac, zrSesh))
+
+ if err != nil {
+ return nil, false
+ }
+
+ servers = FormatServers(servers)
+ sc := make([]*ServerClients, len(servers))
+ imOk := true
+
+ for i := range sc {
+ response, err := fourLetterWord(servers[i], "cons", timeout)
+
+ if err != nil {
+ sc[i] = &ServerClients{Error: err}
+ imOk = false
+ continue
+ }
+
+ scan := bufio.NewScanner(bytes.NewReader(response))
+
+ var clients []*ServerClient
+
+ for scan.Scan() {
+ line := scan.Bytes()
+
+ if len(line) == 0 {
+ continue
+ }
+
+ m := re.FindAllStringSubmatch(string(line), -1)
+
+ if m == nil {
+ err := fmt.Errorf("unable to parse fields from zookeeper response (no regex matches)")
+ sc[i] = &ServerClients{Error: err}
+ imOk = false
+ continue
+ }
+
+ match := m[0][1:]
+
+ queued, _ := strconv.ParseInt(match[1], 0, 64)
+ recvd, _ := strconv.ParseInt(match[2], 0, 64)
+ sent, _ := strconv.ParseInt(match[3], 0, 64)
+ sid, _ := strconv.ParseInt(match[4], 0, 64)
+ est, _ := strconv.ParseInt(match[6], 0, 64)
+ timeout, _ := strconv.ParseInt(match[7], 0, 32)
+ lresp, _ := strconv.ParseInt(match[10], 0, 64)
+ llat, _ := strconv.ParseInt(match[11], 0, 32)
+ minlat, _ := strconv.ParseInt(match[12], 0, 32)
+ avglat, _ := strconv.ParseInt(match[13], 0, 32)
+ maxlat, _ := strconv.ParseInt(match[14], 0, 32)
+
+ // zookeeper returns a value, '0xffffffffffffffff', as the
+ // Lzxid for PING requests in the 'cons' output.
+ // unfortunately, in Go that is an invalid int64 and is not represented
+ // as -1.
+ // However, converting the string value to a big.Int and then back to
+ // and int64 properly sets the value to -1
+ lzxid, ok := new(big.Int).SetString(match[9], 0)
+
+ var errVal error
+
+ if !ok {
+ errVal = fmt.Errorf("failed to convert lzxid value to big.Int")
+ imOk = false
+ }
+
+ lcxid, ok := new(big.Int).SetString(match[8], 0)
+
+ if !ok && errVal == nil {
+ errVal = fmt.Errorf("failed to convert lcxid value to big.Int")
+ imOk = false
+ }
+
+ clients = append(clients, &ServerClient{
+ Queued: queued,
+ Received: recvd,
+ Sent: sent,
+ SessionID: sid,
+ Lcxid: lcxid.Int64(),
+ Lzxid: lzxid.Int64(),
+ Timeout: int32(timeout),
+ LastLatency: int32(llat),
+ MinLatency: int32(minlat),
+ AvgLatency: int32(avglat),
+ MaxLatency: int32(maxlat),
+ Established: time.Unix(est, 0),
+ LastResponse: time.Unix(lresp, 0),
+ Addr: match[0],
+ LastOperation: match[5],
+ Error: errVal,
+ })
+ }
+
+ sc[i] = &ServerClients{Clients: clients}
+ }
+
+ return sc, imOk
+}
+
+func fourLetterWord(server, command string, timeout time.Duration) ([]byte, error) {
+ conn, err := net.DialTimeout("tcp", server, timeout)
+
+ if err != nil {
+ return nil, err
+ }
+
+ // the zookeeper server should automatically close this socket
+ // once the command has been processed, but better safe than sorry
+ defer conn.Close()
+
+ conn.SetWriteDeadline(time.Now().Add(timeout))
+
+ _, err = conn.Write([]byte(command))
+
+ if err != nil {
+ return nil, err
+ }
+
+ conn.SetReadDeadline(time.Now().Add(timeout))
+
+ resp, err := ioutil.ReadAll(conn)
+
+ if err != nil {
+ return nil, err
+ }
+
+ return resp, nil
+}
diff --git a/vendor/github.com/samuel/go-zookeeper/zk/lock.go b/vendor/github.com/samuel/go-zookeeper/zk/lock.go
new file mode 100644
index 0000000000..f13a8b0ba6
--- /dev/null
+++ b/vendor/github.com/samuel/go-zookeeper/zk/lock.go
@@ -0,0 +1,142 @@
+package zk
+
+import (
+ "errors"
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+var (
+ // ErrDeadlock is returned by Lock when trying to lock twice without unlocking first
+ ErrDeadlock = errors.New("zk: trying to acquire a lock twice")
+ // ErrNotLocked is returned by Unlock when trying to release a lock that has not first be acquired.
+ ErrNotLocked = errors.New("zk: not locked")
+)
+
+// Lock is a mutual exclusion lock.
+type Lock struct {
+ c *Conn
+ path string
+ acl []ACL
+ lockPath string
+ seq int
+}
+
+// NewLock creates a new lock instance using the provided connection, path, and acl.
+// The path must be a node that is only used by this lock. A lock instances starts
+// unlocked until Lock() is called.
+func NewLock(c *Conn, path string, acl []ACL) *Lock {
+ return &Lock{
+ c: c,
+ path: path,
+ acl: acl,
+ }
+}
+
+func parseSeq(path string) (int, error) {
+ parts := strings.Split(path, "-")
+ return strconv.Atoi(parts[len(parts)-1])
+}
+
+// Lock attempts to acquire the lock. It will wait to return until the lock
+// is acquired or an error occurs. If this instance already has the lock
+// then ErrDeadlock is returned.
+func (l *Lock) Lock() error {
+ if l.lockPath != "" {
+ return ErrDeadlock
+ }
+
+ prefix := fmt.Sprintf("%s/lock-", l.path)
+
+ path := ""
+ var err error
+ for i := 0; i < 3; i++ {
+ path, err = l.c.CreateProtectedEphemeralSequential(prefix, []byte{}, l.acl)
+ if err == ErrNoNode {
+ // Create parent node.
+ parts := strings.Split(l.path, "/")
+ pth := ""
+ for _, p := range parts[1:] {
+ pth += "/" + p
+ _, err := l.c.Create(pth, []byte{}, 0, l.acl)
+ if err != nil && err != ErrNodeExists {
+ return err
+ }
+ }
+ } else if err == nil {
+ break
+ } else {
+ return err
+ }
+ }
+ if err != nil {
+ return err
+ }
+
+ seq, err := parseSeq(path)
+ if err != nil {
+ return err
+ }
+
+ for {
+ children, _, err := l.c.Children(l.path)
+ if err != nil {
+ return err
+ }
+
+ lowestSeq := seq
+ prevSeq := 0
+ prevSeqPath := ""
+ for _, p := range children {
+ s, err := parseSeq(p)
+ if err != nil {
+ return err
+ }
+ if s < lowestSeq {
+ lowestSeq = s
+ }
+ if s < seq && s > prevSeq {
+ prevSeq = s
+ prevSeqPath = p
+ }
+ }
+
+ if seq == lowestSeq {
+ // Acquired the lock
+ break
+ }
+
+ // Wait on the node next in line for the lock
+ _, _, ch, err := l.c.GetW(l.path + "/" + prevSeqPath)
+ if err != nil && err != ErrNoNode {
+ return err
+ } else if err != nil && err == ErrNoNode {
+ // try again
+ continue
+ }
+
+ ev := <-ch
+ if ev.Err != nil {
+ return ev.Err
+ }
+ }
+
+ l.seq = seq
+ l.lockPath = path
+ return nil
+}
+
+// Unlock releases an acquired lock. If the lock is not currently acquired by
+// this Lock instance than ErrNotLocked is returned.
+func (l *Lock) Unlock() error {
+ if l.lockPath == "" {
+ return ErrNotLocked
+ }
+ if err := l.c.Delete(l.lockPath, -1); err != nil {
+ return err
+ }
+ l.lockPath = ""
+ l.seq = 0
+ return nil
+}
diff --git a/vendor/github.com/samuel/go-zookeeper/zk/server_help.go b/vendor/github.com/samuel/go-zookeeper/zk/server_help.go
new file mode 100644
index 0000000000..4a53772bde
--- /dev/null
+++ b/vendor/github.com/samuel/go-zookeeper/zk/server_help.go
@@ -0,0 +1,119 @@
+package zk
+
+import (
+ "fmt"
+ "io"
+ "io/ioutil"
+ "math/rand"
+ "os"
+ "path/filepath"
+ "time"
+)
+
+type TestServer struct {
+ Port int
+ Path string
+ Srv *Server
+}
+
+type TestCluster struct {
+ Path string
+ Servers []TestServer
+}
+
+func StartTestCluster(size int, stdout, stderr io.Writer) (*TestCluster, error) {
+ tmpPath, err := ioutil.TempDir("", "gozk")
+ if err != nil {
+ return nil, err
+ }
+ success := false
+ startPort := int(rand.Int31n(6000) + 10000)
+ cluster := &TestCluster{Path: tmpPath}
+ defer func() {
+ if !success {
+ cluster.Stop()
+ }
+ }()
+ for serverN := 0; serverN < size; serverN++ {
+ srvPath := filepath.Join(tmpPath, fmt.Sprintf("srv%d", serverN))
+ if err := os.Mkdir(srvPath, 0700); err != nil {
+ return nil, err
+ }
+ port := startPort + serverN*3
+ cfg := ServerConfig{
+ ClientPort: port,
+ DataDir: srvPath,
+ }
+ for i := 0; i < size; i++ {
+ cfg.Servers = append(cfg.Servers, ServerConfigServer{
+ ID: i + 1,
+ Host: "127.0.0.1",
+ PeerPort: startPort + i*3 + 1,
+ LeaderElectionPort: startPort + i*3 + 2,
+ })
+ }
+ cfgPath := filepath.Join(srvPath, "zoo.cfg")
+ fi, err := os.Create(cfgPath)
+ if err != nil {
+ return nil, err
+ }
+ err = cfg.Marshall(fi)
+ fi.Close()
+ if err != nil {
+ return nil, err
+ }
+
+ fi, err = os.Create(filepath.Join(srvPath, "myid"))
+ if err != nil {
+ return nil, err
+ }
+ _, err = fmt.Fprintf(fi, "%d\n", serverN+1)
+ fi.Close()
+ if err != nil {
+ return nil, err
+ }
+
+ srv := &Server{
+ ConfigPath: cfgPath,
+ Stdout: stdout,
+ Stderr: stderr,
+ }
+ if err := srv.Start(); err != nil {
+ return nil, err
+ }
+ cluster.Servers = append(cluster.Servers, TestServer{
+ Path: srvPath,
+ Port: cfg.ClientPort,
+ Srv: srv,
+ })
+ }
+ success = true
+ time.Sleep(time.Second) // Give the server time to become active. Should probably actually attempt to connect to verify.
+ return cluster, nil
+}
+
+func (ts *TestCluster) Connect(idx int) (*Conn, error) {
+ zk, _, err := Connect([]string{fmt.Sprintf("127.0.0.1:%d", ts.Servers[idx].Port)}, time.Second*15)
+ return zk, err
+}
+
+func (ts *TestCluster) ConnectAll() (*Conn, <-chan Event, error) {
+ return ts.ConnectAllTimeout(time.Second * 15)
+}
+
+func (ts *TestCluster) ConnectAllTimeout(sessionTimeout time.Duration) (*Conn, <-chan Event, error) {
+ hosts := make([]string, len(ts.Servers))
+ for i, srv := range ts.Servers {
+ hosts[i] = fmt.Sprintf("127.0.0.1:%d", srv.Port)
+ }
+ zk, ch, err := Connect(hosts, sessionTimeout)
+ return zk, ch, err
+}
+
+func (ts *TestCluster) Stop() error {
+ for _, srv := range ts.Servers {
+ srv.Srv.Stop()
+ }
+ defer os.RemoveAll(ts.Path)
+ return nil
+}
diff --git a/vendor/github.com/samuel/go-zookeeper/zk/server_java.go b/vendor/github.com/samuel/go-zookeeper/zk/server_java.go
new file mode 100644
index 0000000000..e553ec1d9f
--- /dev/null
+++ b/vendor/github.com/samuel/go-zookeeper/zk/server_java.go
@@ -0,0 +1,136 @@
+package zk
+
+import (
+ "fmt"
+ "io"
+ "os"
+ "os/exec"
+ "path/filepath"
+)
+
+type ErrMissingServerConfigField string
+
+func (e ErrMissingServerConfigField) Error() string {
+ return fmt.Sprintf("zk: missing server config field '%s'", string(e))
+}
+
+const (
+ DefaultServerTickTime = 2000
+ DefaultServerInitLimit = 10
+ DefaultServerSyncLimit = 5
+ DefaultServerAutoPurgeSnapRetainCount = 3
+ DefaultPeerPort = 2888
+ DefaultLeaderElectionPort = 3888
+)
+
+type ServerConfigServer struct {
+ ID int
+ Host string
+ PeerPort int
+ LeaderElectionPort int
+}
+
+type ServerConfig struct {
+ TickTime int // Number of milliseconds of each tick
+ InitLimit int // Number of ticks that the initial synchronization phase can take
+ SyncLimit int // Number of ticks that can pass between sending a request and getting an acknowledgement
+ DataDir string // Direcrory where the snapshot is stored
+ ClientPort int // Port at which clients will connect
+ AutoPurgeSnapRetainCount int // Number of snapshots to retain in dataDir
+ AutoPurgePurgeInterval int // Purge task internal in hours (0 to disable auto purge)
+ Servers []ServerConfigServer
+}
+
+func (sc ServerConfig) Marshall(w io.Writer) error {
+ if sc.DataDir == "" {
+ return ErrMissingServerConfigField("dataDir")
+ }
+ fmt.Fprintf(w, "dataDir=%s\n", sc.DataDir)
+ if sc.TickTime <= 0 {
+ sc.TickTime = DefaultServerTickTime
+ }
+ fmt.Fprintf(w, "tickTime=%d\n", sc.TickTime)
+ if sc.InitLimit <= 0 {
+ sc.InitLimit = DefaultServerInitLimit
+ }
+ fmt.Fprintf(w, "initLimit=%d\n", sc.InitLimit)
+ if sc.SyncLimit <= 0 {
+ sc.SyncLimit = DefaultServerSyncLimit
+ }
+ fmt.Fprintf(w, "syncLimit=%d\n", sc.SyncLimit)
+ if sc.ClientPort <= 0 {
+ sc.ClientPort = DefaultPort
+ }
+ fmt.Fprintf(w, "clientPort=%d\n", sc.ClientPort)
+ if sc.AutoPurgePurgeInterval > 0 {
+ if sc.AutoPurgeSnapRetainCount <= 0 {
+ sc.AutoPurgeSnapRetainCount = DefaultServerAutoPurgeSnapRetainCount
+ }
+ fmt.Fprintf(w, "autopurge.snapRetainCount=%d\n", sc.AutoPurgeSnapRetainCount)
+ fmt.Fprintf(w, "autopurge.purgeInterval=%d\n", sc.AutoPurgePurgeInterval)
+ }
+ if len(sc.Servers) > 0 {
+ for _, srv := range sc.Servers {
+ if srv.PeerPort <= 0 {
+ srv.PeerPort = DefaultPeerPort
+ }
+ if srv.LeaderElectionPort <= 0 {
+ srv.LeaderElectionPort = DefaultLeaderElectionPort
+ }
+ fmt.Fprintf(w, "server.%d=%s:%d:%d\n", srv.ID, srv.Host, srv.PeerPort, srv.LeaderElectionPort)
+ }
+ }
+ return nil
+}
+
+var jarSearchPaths = []string{
+ "zookeeper-*/contrib/fatjar/zookeeper-*-fatjar.jar",
+ "../zookeeper-*/contrib/fatjar/zookeeper-*-fatjar.jar",
+ "/usr/share/java/zookeeper-*.jar",
+ "/usr/local/zookeeper-*/contrib/fatjar/zookeeper-*-fatjar.jar",
+ "/usr/local/Cellar/zookeeper/*/libexec/contrib/fatjar/zookeeper-*-fatjar.jar",
+}
+
+func findZookeeperFatJar() string {
+ var paths []string
+ zkPath := os.Getenv("ZOOKEEPER_PATH")
+ if zkPath == "" {
+ paths = jarSearchPaths
+ } else {
+ paths = []string{filepath.Join(zkPath, "contrib/fatjar/zookeeper-*-fatjar.jar")}
+ }
+ for _, path := range paths {
+ matches, _ := filepath.Glob(path)
+ // TODO: could sort by version and pick latest
+ if len(matches) > 0 {
+ return matches[0]
+ }
+ }
+ return ""
+}
+
+type Server struct {
+ JarPath string
+ ConfigPath string
+ Stdout, Stderr io.Writer
+
+ cmd *exec.Cmd
+}
+
+func (srv *Server) Start() error {
+ if srv.JarPath == "" {
+ srv.JarPath = findZookeeperFatJar()
+ if srv.JarPath == "" {
+ return fmt.Errorf("zk: unable to find server jar")
+ }
+ }
+ srv.cmd = exec.Command("java", "-jar", srv.JarPath, "server", srv.ConfigPath)
+ srv.cmd.Stdout = srv.Stdout
+ srv.cmd.Stderr = srv.Stderr
+ return srv.cmd.Start()
+}
+
+func (srv *Server) Stop() error {
+ srv.cmd.Process.Signal(os.Kill)
+ return srv.cmd.Wait()
+}
diff --git a/vendor/github.com/samuel/go-zookeeper/zk/structs.go b/vendor/github.com/samuel/go-zookeeper/zk/structs.go
new file mode 100644
index 0000000000..8fbc069ee1
--- /dev/null
+++ b/vendor/github.com/samuel/go-zookeeper/zk/structs.go
@@ -0,0 +1,640 @@
+package zk
+
+import (
+ "encoding/binary"
+ "errors"
+ "log"
+ "reflect"
+ "runtime"
+ "time"
+)
+
+var (
+ ErrUnhandledFieldType = errors.New("zk: unhandled field type")
+ ErrPtrExpected = errors.New("zk: encode/decode expect a non-nil pointer to struct")
+ ErrShortBuffer = errors.New("zk: buffer too small")
+)
+
+type defaultLogger struct{}
+
+func (defaultLogger) Printf(format string, a ...interface{}) {
+ log.Printf(format, a...)
+}
+
+type ACL struct {
+ Perms int32
+ Scheme string
+ ID string
+}
+
+type Stat struct {
+ Czxid int64 // The zxid of the change that caused this znode to be created.
+ Mzxid int64 // The zxid of the change that last modified this znode.
+ Ctime int64 // The time in milliseconds from epoch when this znode was created.
+ Mtime int64 // The time in milliseconds from epoch when this znode was last modified.
+ Version int32 // The number of changes to the data of this znode.
+ Cversion int32 // The number of changes to the children of this znode.
+ Aversion int32 // The number of changes to the ACL of this znode.
+ EphemeralOwner int64 // The session id of the owner of this znode if the znode is an ephemeral node. If it is not an ephemeral node, it will be zero.
+ DataLength int32 // The length of the data field of this znode.
+ NumChildren int32 // The number of children of this znode.
+ Pzxid int64 // last modified children
+}
+
+// ServerClient is the information for a single Zookeeper client and its session.
+// This is used to parse/extract the output fo the `cons` command.
+type ServerClient struct {
+ Queued int64
+ Received int64
+ Sent int64
+ SessionID int64
+ Lcxid int64
+ Lzxid int64
+ Timeout int32
+ LastLatency int32
+ MinLatency int32
+ AvgLatency int32
+ MaxLatency int32
+ Established time.Time
+ LastResponse time.Time
+ Addr string
+ LastOperation string // maybe?
+ Error error
+}
+
+// ServerClients is a struct for the FLWCons() function. It's used to provide
+// the list of Clients.
+//
+// This is needed because FLWCons() takes multiple servers.
+type ServerClients struct {
+ Clients []*ServerClient
+ Error error
+}
+
+// ServerStats is the information pulled from the Zookeeper `stat` command.
+type ServerStats struct {
+ Sent int64
+ Received int64
+ NodeCount int64
+ MinLatency int64
+ AvgLatency int64
+ MaxLatency int64
+ Connections int64
+ Outstanding int64
+ Epoch int32
+ Counter int32
+ BuildTime time.Time
+ Mode Mode
+ Version string
+ Error error
+}
+
+type requestHeader struct {
+ Xid int32
+ Opcode int32
+}
+
+type responseHeader struct {
+ Xid int32
+ Zxid int64
+ Err ErrCode
+}
+
+type multiHeader struct {
+ Type int32
+ Done bool
+ Err ErrCode
+}
+
+type auth struct {
+ Type int32
+ Scheme string
+ Auth []byte
+}
+
+// Generic request structs
+
+type pathRequest struct {
+ Path string
+}
+
+type PathVersionRequest struct {
+ Path string
+ Version int32
+}
+
+type pathWatchRequest struct {
+ Path string
+ Watch bool
+}
+
+type pathResponse struct {
+ Path string
+}
+
+type statResponse struct {
+ Stat Stat
+}
+
+//
+
+type CheckVersionRequest PathVersionRequest
+type closeRequest struct{}
+type closeResponse struct{}
+
+type connectRequest struct {
+ ProtocolVersion int32
+ LastZxidSeen int64
+ TimeOut int32
+ SessionID int64
+ Passwd []byte
+}
+
+type connectResponse struct {
+ ProtocolVersion int32
+ TimeOut int32
+ SessionID int64
+ Passwd []byte
+}
+
+type CreateRequest struct {
+ Path string
+ Data []byte
+ Acl []ACL
+ Flags int32
+}
+
+type createResponse pathResponse
+type DeleteRequest PathVersionRequest
+type deleteResponse struct{}
+
+type errorResponse struct {
+ Err int32
+}
+
+type existsRequest pathWatchRequest
+type existsResponse statResponse
+type getAclRequest pathRequest
+
+type getAclResponse struct {
+ Acl []ACL
+ Stat Stat
+}
+
+type getChildrenRequest pathRequest
+
+type getChildrenResponse struct {
+ Children []string
+}
+
+type getChildren2Request pathWatchRequest
+
+type getChildren2Response struct {
+ Children []string
+ Stat Stat
+}
+
+type getDataRequest pathWatchRequest
+
+type getDataResponse struct {
+ Data []byte
+ Stat Stat
+}
+
+type getMaxChildrenRequest pathRequest
+
+type getMaxChildrenResponse struct {
+ Max int32
+}
+
+type getSaslRequest struct {
+ Token []byte
+}
+
+type pingRequest struct{}
+type pingResponse struct{}
+
+type setAclRequest struct {
+ Path string
+ Acl []ACL
+ Version int32
+}
+
+type setAclResponse statResponse
+
+type SetDataRequest struct {
+ Path string
+ Data []byte
+ Version int32
+}
+
+type setDataResponse statResponse
+
+type setMaxChildren struct {
+ Path string
+ Max int32
+}
+
+type setSaslRequest struct {
+ Token string
+}
+
+type setSaslResponse struct {
+ Token string
+}
+
+type setWatchesRequest struct {
+ RelativeZxid int64
+ DataWatches []string
+ ExistWatches []string
+ ChildWatches []string
+}
+
+type setWatchesResponse struct{}
+
+type syncRequest pathRequest
+type syncResponse pathResponse
+
+type setAuthRequest auth
+type setAuthResponse struct{}
+
+type multiRequestOp struct {
+ Header multiHeader
+ Op interface{}
+}
+type multiRequest struct {
+ Ops []multiRequestOp
+ DoneHeader multiHeader
+}
+type multiResponseOp struct {
+ Header multiHeader
+ String string
+ Stat *Stat
+}
+type multiResponse struct {
+ Ops []multiResponseOp
+ DoneHeader multiHeader
+}
+
+func (r *multiRequest) Encode(buf []byte) (int, error) {
+ total := 0
+ for _, op := range r.Ops {
+ op.Header.Done = false
+ n, err := encodePacketValue(buf[total:], reflect.ValueOf(op))
+ if err != nil {
+ return total, err
+ }
+ total += n
+ }
+ r.DoneHeader.Done = true
+ n, err := encodePacketValue(buf[total:], reflect.ValueOf(r.DoneHeader))
+ if err != nil {
+ return total, err
+ }
+ total += n
+
+ return total, nil
+}
+
+func (r *multiRequest) Decode(buf []byte) (int, error) {
+ r.Ops = make([]multiRequestOp, 0)
+ r.DoneHeader = multiHeader{-1, true, -1}
+ total := 0
+ for {
+ header := &multiHeader{}
+ n, err := decodePacketValue(buf[total:], reflect.ValueOf(header))
+ if err != nil {
+ return total, err
+ }
+ total += n
+ if header.Done {
+ r.DoneHeader = *header
+ break
+ }
+
+ req := requestStructForOp(header.Type)
+ if req == nil {
+ return total, ErrAPIError
+ }
+ n, err = decodePacketValue(buf[total:], reflect.ValueOf(req))
+ if err != nil {
+ return total, err
+ }
+ total += n
+ r.Ops = append(r.Ops, multiRequestOp{*header, req})
+ }
+ return total, nil
+}
+
+func (r *multiResponse) Decode(buf []byte) (int, error) {
+ r.Ops = make([]multiResponseOp, 0)
+ r.DoneHeader = multiHeader{-1, true, -1}
+ total := 0
+ for {
+ header := &multiHeader{}
+ n, err := decodePacketValue(buf[total:], reflect.ValueOf(header))
+ if err != nil {
+ return total, err
+ }
+ total += n
+ if header.Done {
+ r.DoneHeader = *header
+ break
+ }
+
+ res := multiResponseOp{Header: *header}
+ var w reflect.Value
+ switch header.Type {
+ default:
+ return total, ErrAPIError
+ case opCreate:
+ w = reflect.ValueOf(&res.String)
+ case opSetData:
+ res.Stat = new(Stat)
+ w = reflect.ValueOf(res.Stat)
+ case opCheck, opDelete:
+ }
+ if w.IsValid() {
+ n, err := decodePacketValue(buf[total:], w)
+ if err != nil {
+ return total, err
+ }
+ total += n
+ }
+ r.Ops = append(r.Ops, res)
+ }
+ return total, nil
+}
+
+type watcherEvent struct {
+ Type EventType
+ State State
+ Path string
+}
+
+type decoder interface {
+ Decode(buf []byte) (int, error)
+}
+
+type encoder interface {
+ Encode(buf []byte) (int, error)
+}
+
+func decodePacket(buf []byte, st interface{}) (n int, err error) {
+ defer func() {
+ if r := recover(); r != nil {
+ if e, ok := r.(runtime.Error); ok && e.Error() == "runtime error: slice bounds out of range" {
+ err = ErrShortBuffer
+ } else {
+ panic(r)
+ }
+ }
+ }()
+
+ v := reflect.ValueOf(st)
+ if v.Kind() != reflect.Ptr || v.IsNil() {
+ return 0, ErrPtrExpected
+ }
+ return decodePacketValue(buf, v)
+}
+
+func decodePacketValue(buf []byte, v reflect.Value) (int, error) {
+ rv := v
+ kind := v.Kind()
+ if kind == reflect.Ptr {
+ if v.IsNil() {
+ v.Set(reflect.New(v.Type().Elem()))
+ }
+ v = v.Elem()
+ kind = v.Kind()
+ }
+
+ n := 0
+ switch kind {
+ default:
+ return n, ErrUnhandledFieldType
+ case reflect.Struct:
+ if de, ok := rv.Interface().(decoder); ok {
+ return de.Decode(buf)
+ } else if de, ok := v.Interface().(decoder); ok {
+ return de.Decode(buf)
+ } else {
+ for i := 0; i < v.NumField(); i++ {
+ field := v.Field(i)
+ n2, err := decodePacketValue(buf[n:], field)
+ n += n2
+ if err != nil {
+ return n, err
+ }
+ }
+ }
+ case reflect.Bool:
+ v.SetBool(buf[n] != 0)
+ n++
+ case reflect.Int32:
+ v.SetInt(int64(binary.BigEndian.Uint32(buf[n : n+4])))
+ n += 4
+ case reflect.Int64:
+ v.SetInt(int64(binary.BigEndian.Uint64(buf[n : n+8])))
+ n += 8
+ case reflect.String:
+ ln := int(binary.BigEndian.Uint32(buf[n : n+4]))
+ v.SetString(string(buf[n+4 : n+4+ln]))
+ n += 4 + ln
+ case reflect.Slice:
+ switch v.Type().Elem().Kind() {
+ default:
+ count := int(binary.BigEndian.Uint32(buf[n : n+4]))
+ n += 4
+ values := reflect.MakeSlice(v.Type(), count, count)
+ v.Set(values)
+ for i := 0; i < count; i++ {
+ n2, err := decodePacketValue(buf[n:], values.Index(i))
+ n += n2
+ if err != nil {
+ return n, err
+ }
+ }
+ case reflect.Uint8:
+ ln := int(int32(binary.BigEndian.Uint32(buf[n : n+4])))
+ if ln < 0 {
+ n += 4
+ v.SetBytes(nil)
+ } else {
+ bytes := make([]byte, ln)
+ copy(bytes, buf[n+4:n+4+ln])
+ v.SetBytes(bytes)
+ n += 4 + ln
+ }
+ }
+ }
+ return n, nil
+}
+
+func encodePacket(buf []byte, st interface{}) (n int, err error) {
+ defer func() {
+ if r := recover(); r != nil {
+ if e, ok := r.(runtime.Error); ok && e.Error() == "runtime error: slice bounds out of range" {
+ err = ErrShortBuffer
+ } else {
+ panic(r)
+ }
+ }
+ }()
+
+ v := reflect.ValueOf(st)
+ if v.Kind() != reflect.Ptr || v.IsNil() {
+ return 0, ErrPtrExpected
+ }
+ return encodePacketValue(buf, v)
+}
+
+func encodePacketValue(buf []byte, v reflect.Value) (int, error) {
+ rv := v
+ for v.Kind() == reflect.Ptr || v.Kind() == reflect.Interface {
+ v = v.Elem()
+ }
+
+ n := 0
+ switch v.Kind() {
+ default:
+ return n, ErrUnhandledFieldType
+ case reflect.Struct:
+ if en, ok := rv.Interface().(encoder); ok {
+ return en.Encode(buf)
+ } else if en, ok := v.Interface().(encoder); ok {
+ return en.Encode(buf)
+ } else {
+ for i := 0; i < v.NumField(); i++ {
+ field := v.Field(i)
+ n2, err := encodePacketValue(buf[n:], field)
+ n += n2
+ if err != nil {
+ return n, err
+ }
+ }
+ }
+ case reflect.Bool:
+ if v.Bool() {
+ buf[n] = 1
+ } else {
+ buf[n] = 0
+ }
+ n++
+ case reflect.Int32:
+ binary.BigEndian.PutUint32(buf[n:n+4], uint32(v.Int()))
+ n += 4
+ case reflect.Int64:
+ binary.BigEndian.PutUint64(buf[n:n+8], uint64(v.Int()))
+ n += 8
+ case reflect.String:
+ str := v.String()
+ binary.BigEndian.PutUint32(buf[n:n+4], uint32(len(str)))
+ copy(buf[n+4:n+4+len(str)], []byte(str))
+ n += 4 + len(str)
+ case reflect.Slice:
+ switch v.Type().Elem().Kind() {
+ default:
+ count := v.Len()
+ startN := n
+ n += 4
+ for i := 0; i < count; i++ {
+ n2, err := encodePacketValue(buf[n:], v.Index(i))
+ n += n2
+ if err != nil {
+ return n, err
+ }
+ }
+ binary.BigEndian.PutUint32(buf[startN:startN+4], uint32(count))
+ case reflect.Uint8:
+ if v.IsNil() {
+ binary.BigEndian.PutUint32(buf[n:n+4], uint32(0xffffffff))
+ n += 4
+ } else {
+ bytes := v.Bytes()
+ binary.BigEndian.PutUint32(buf[n:n+4], uint32(len(bytes)))
+ copy(buf[n+4:n+4+len(bytes)], bytes)
+ n += 4 + len(bytes)
+ }
+ }
+ }
+ return n, nil
+}
+
+func requestStructForOp(op int32) interface{} {
+ switch op {
+ case opClose:
+ return &closeRequest{}
+ case opCreate:
+ return &CreateRequest{}
+ case opDelete:
+ return &DeleteRequest{}
+ case opExists:
+ return &existsRequest{}
+ case opGetAcl:
+ return &getAclRequest{}
+ case opGetChildren:
+ return &getChildrenRequest{}
+ case opGetChildren2:
+ return &getChildren2Request{}
+ case opGetData:
+ return &getDataRequest{}
+ case opPing:
+ return &pingRequest{}
+ case opSetAcl:
+ return &setAclRequest{}
+ case opSetData:
+ return &SetDataRequest{}
+ case opSetWatches:
+ return &setWatchesRequest{}
+ case opSync:
+ return &syncRequest{}
+ case opSetAuth:
+ return &setAuthRequest{}
+ case opCheck:
+ return &CheckVersionRequest{}
+ case opMulti:
+ return &multiRequest{}
+ }
+ return nil
+}
+
+func responseStructForOp(op int32) interface{} {
+ switch op {
+ case opClose:
+ return &closeResponse{}
+ case opCreate:
+ return &createResponse{}
+ case opDelete:
+ return &deleteResponse{}
+ case opExists:
+ return &existsResponse{}
+ case opGetAcl:
+ return &getAclResponse{}
+ case opGetChildren:
+ return &getChildrenResponse{}
+ case opGetChildren2:
+ return &getChildren2Response{}
+ case opGetData:
+ return &getDataResponse{}
+ case opPing:
+ return &pingResponse{}
+ case opSetAcl:
+ return &setAclResponse{}
+ case opSetData:
+ return &setDataResponse{}
+ case opSetWatches:
+ return &setWatchesResponse{}
+ case opSync:
+ return &syncResponse{}
+ case opWatcherEvent:
+ return &watcherEvent{}
+ case opSetAuth:
+ return &setAuthResponse{}
+ // case opCheck:
+ // return &checkVersionResponse{}
+ case opMulti:
+ return &multiResponse{}
+ }
+ return nil
+}
diff --git a/vendor/github.com/samuel/go-zookeeper/zk/tracer.go b/vendor/github.com/samuel/go-zookeeper/zk/tracer.go
new file mode 100644
index 0000000000..7af2e96bbc
--- /dev/null
+++ b/vendor/github.com/samuel/go-zookeeper/zk/tracer.go
@@ -0,0 +1,148 @@
+package zk
+
+import (
+ "encoding/binary"
+ "fmt"
+ "io"
+ "net"
+ "sync"
+)
+
+var (
+ requests = make(map[int32]int32) // Map of Xid -> Opcode
+ requestsLock = &sync.Mutex{}
+)
+
+func trace(conn1, conn2 net.Conn, client bool) {
+ defer conn1.Close()
+ defer conn2.Close()
+ buf := make([]byte, 10*1024)
+ init := true
+ for {
+ _, err := io.ReadFull(conn1, buf[:4])
+ if err != nil {
+ fmt.Println("1>", client, err)
+ return
+ }
+
+ blen := int(binary.BigEndian.Uint32(buf[:4]))
+
+ _, err = io.ReadFull(conn1, buf[4:4+blen])
+ if err != nil {
+ fmt.Println("2>", client, err)
+ return
+ }
+
+ var cr interface{}
+ opcode := int32(-1)
+ readHeader := true
+ if client {
+ if init {
+ cr = &connectRequest{}
+ readHeader = false
+ } else {
+ xid := int32(binary.BigEndian.Uint32(buf[4:8]))
+ opcode = int32(binary.BigEndian.Uint32(buf[8:12]))
+ requestsLock.Lock()
+ requests[xid] = opcode
+ requestsLock.Unlock()
+ cr = requestStructForOp(opcode)
+ if cr == nil {
+ fmt.Printf("Unknown opcode %d\n", opcode)
+ }
+ }
+ } else {
+ if init {
+ cr = &connectResponse{}
+ readHeader = false
+ } else {
+ xid := int32(binary.BigEndian.Uint32(buf[4:8]))
+ zxid := int64(binary.BigEndian.Uint64(buf[8:16]))
+ errnum := int32(binary.BigEndian.Uint32(buf[16:20]))
+ if xid != -1 || zxid != -1 {
+ requestsLock.Lock()
+ found := false
+ opcode, found = requests[xid]
+ if !found {
+ opcode = 0
+ }
+ delete(requests, xid)
+ requestsLock.Unlock()
+ } else {
+ opcode = opWatcherEvent
+ }
+ cr = responseStructForOp(opcode)
+ if cr == nil {
+ fmt.Printf("Unknown opcode %d\n", opcode)
+ }
+ if errnum != 0 {
+ cr = &struct{}{}
+ }
+ }
+ }
+ opname := "."
+ if opcode != -1 {
+ opname = opNames[opcode]
+ }
+ if cr == nil {
+ fmt.Printf("%+v %s %+v\n", client, opname, buf[4:4+blen])
+ } else {
+ n := 4
+ hdrStr := ""
+ if readHeader {
+ var hdr interface{}
+ if client {
+ hdr = &requestHeader{}
+ } else {
+ hdr = &responseHeader{}
+ }
+ if n2, err := decodePacket(buf[n:n+blen], hdr); err != nil {
+ fmt.Println(err)
+ } else {
+ n += n2
+ }
+ hdrStr = fmt.Sprintf(" %+v", hdr)
+ }
+ if _, err := decodePacket(buf[n:n+blen], cr); err != nil {
+ fmt.Println(err)
+ }
+ fmt.Printf("%+v %s%s %+v\n", client, opname, hdrStr, cr)
+ }
+
+ init = false
+
+ written, err := conn2.Write(buf[:4+blen])
+ if err != nil {
+ fmt.Println("3>", client, err)
+ return
+ } else if written != 4+blen {
+ fmt.Printf("Written != read: %d != %d\n", written, blen)
+ return
+ }
+ }
+}
+
+func handleConnection(addr string, conn net.Conn) {
+ zkConn, err := net.Dial("tcp", addr)
+ if err != nil {
+ fmt.Println(err)
+ return
+ }
+ go trace(conn, zkConn, true)
+ trace(zkConn, conn, false)
+}
+
+func StartTracer(listenAddr, serverAddr string) {
+ ln, err := net.Listen("tcp", listenAddr)
+ if err != nil {
+ panic(err)
+ }
+ for {
+ conn, err := ln.Accept()
+ if err != nil {
+ fmt.Println(err)
+ continue
+ }
+ go handleConnection(serverAddr, conn)
+ }
+}
diff --git a/vendor/github.com/samuel/go-zookeeper/zk/util.go b/vendor/github.com/samuel/go-zookeeper/zk/util.go
new file mode 100644
index 0000000000..769bbe878b
--- /dev/null
+++ b/vendor/github.com/samuel/go-zookeeper/zk/util.go
@@ -0,0 +1,54 @@
+package zk
+
+import (
+ "crypto/sha1"
+ "encoding/base64"
+ "fmt"
+ "math/rand"
+ "strconv"
+ "strings"
+)
+
+// AuthACL produces an ACL list containing a single ACL which uses the
+// provided permissions, with the scheme "auth", and ID "", which is used
+// by ZooKeeper to represent any authenticated user.
+func AuthACL(perms int32) []ACL {
+ return []ACL{{perms, "auth", ""}}
+}
+
+// WorldACL produces an ACL list containing a single ACL which uses the
+// provided permissions, with the scheme "world", and ID "anyone", which
+// is used by ZooKeeper to represent any user at all.
+func WorldACL(perms int32) []ACL {
+ return []ACL{{perms, "world", "anyone"}}
+}
+
+func DigestACL(perms int32, user, password string) []ACL {
+ userPass := []byte(fmt.Sprintf("%s:%s", user, password))
+ h := sha1.New()
+ if n, err := h.Write(userPass); err != nil || n != len(userPass) {
+ panic("SHA1 failed")
+ }
+ digest := base64.StdEncoding.EncodeToString(h.Sum(nil))
+ return []ACL{{perms, "digest", fmt.Sprintf("%s:%s", user, digest)}}
+}
+
+// FormatServers takes a slice of addresses, and makes sure they are in a format
+// that resembles :. If the server has no port provided, the
+// DefaultPort constant is added to the end.
+func FormatServers(servers []string) []string {
+ for i := range servers {
+ if !strings.Contains(servers[i], ":") {
+ servers[i] = servers[i] + ":" + strconv.Itoa(DefaultPort)
+ }
+ }
+ return servers
+}
+
+// stringShuffle performs a Fisher-Yates shuffle on a slice of strings
+func stringShuffle(s []string) {
+ for i := len(s) - 1; i > 0; i-- {
+ j := rand.Intn(i + 1)
+ s[i], s[j] = s[j], s[i]
+ }
+}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/batch.go b/vendor/github.com/syndtr/goleveldb/leveldb/batch.go
new file mode 100644
index 0000000000..ccf390c9cf
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/batch.go
@@ -0,0 +1,252 @@
+// Copyright (c) 2012, Suryandaru Triandana
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package leveldb
+
+import (
+ "encoding/binary"
+ "fmt"
+
+ "github.com/syndtr/goleveldb/leveldb/errors"
+ "github.com/syndtr/goleveldb/leveldb/memdb"
+)
+
+type ErrBatchCorrupted struct {
+ Reason string
+}
+
+func (e *ErrBatchCorrupted) Error() string {
+ return fmt.Sprintf("leveldb: batch corrupted: %s", e.Reason)
+}
+
+func newErrBatchCorrupted(reason string) error {
+ return errors.NewErrCorrupted(nil, &ErrBatchCorrupted{reason})
+}
+
+const (
+ batchHdrLen = 8 + 4
+ batchGrowRec = 3000
+)
+
+type BatchReplay interface {
+ Put(key, value []byte)
+ Delete(key []byte)
+}
+
+// Batch is a write batch.
+type Batch struct {
+ data []byte
+ rLen, bLen int
+ seq uint64
+ sync bool
+}
+
+func (b *Batch) grow(n int) {
+ off := len(b.data)
+ if off == 0 {
+ off = batchHdrLen
+ if b.data != nil {
+ b.data = b.data[:off]
+ }
+ }
+ if cap(b.data)-off < n {
+ if b.data == nil {
+ b.data = make([]byte, off, off+n)
+ } else {
+ odata := b.data
+ div := 1
+ if b.rLen > batchGrowRec {
+ div = b.rLen / batchGrowRec
+ }
+ b.data = make([]byte, off, off+n+(off-batchHdrLen)/div)
+ copy(b.data, odata)
+ }
+ }
+}
+
+func (b *Batch) appendRec(kt kType, key, value []byte) {
+ n := 1 + binary.MaxVarintLen32 + len(key)
+ if kt == ktVal {
+ n += binary.MaxVarintLen32 + len(value)
+ }
+ b.grow(n)
+ off := len(b.data)
+ data := b.data[:off+n]
+ data[off] = byte(kt)
+ off += 1
+ off += binary.PutUvarint(data[off:], uint64(len(key)))
+ copy(data[off:], key)
+ off += len(key)
+ if kt == ktVal {
+ off += binary.PutUvarint(data[off:], uint64(len(value)))
+ copy(data[off:], value)
+ off += len(value)
+ }
+ b.data = data[:off]
+ b.rLen++
+ // Include 8-byte ikey header
+ b.bLen += len(key) + len(value) + 8
+}
+
+// Put appends 'put operation' of the given key/value pair to the batch.
+// It is safe to modify the contents of the argument after Put returns.
+func (b *Batch) Put(key, value []byte) {
+ b.appendRec(ktVal, key, value)
+}
+
+// Delete appends 'delete operation' of the given key to the batch.
+// It is safe to modify the contents of the argument after Delete returns.
+func (b *Batch) Delete(key []byte) {
+ b.appendRec(ktDel, key, nil)
+}
+
+// Dump dumps batch contents. The returned slice can be loaded into the
+// batch using Load method.
+// The returned slice is not its own copy, so the contents should not be
+// modified.
+func (b *Batch) Dump() []byte {
+ return b.encode()
+}
+
+// Load loads given slice into the batch. Previous contents of the batch
+// will be discarded.
+// The given slice will not be copied and will be used as batch buffer, so
+// it is not safe to modify the contents of the slice.
+func (b *Batch) Load(data []byte) error {
+ return b.decode(0, data)
+}
+
+// Replay replays batch contents.
+func (b *Batch) Replay(r BatchReplay) error {
+ return b.decodeRec(func(i int, kt kType, key, value []byte) {
+ switch kt {
+ case ktVal:
+ r.Put(key, value)
+ case ktDel:
+ r.Delete(key)
+ }
+ })
+}
+
+// Len returns number of records in the batch.
+func (b *Batch) Len() int {
+ return b.rLen
+}
+
+// Reset resets the batch.
+func (b *Batch) Reset() {
+ b.data = b.data[:0]
+ b.seq = 0
+ b.rLen = 0
+ b.bLen = 0
+ b.sync = false
+}
+
+func (b *Batch) init(sync bool) {
+ b.sync = sync
+}
+
+func (b *Batch) append(p *Batch) {
+ if p.rLen > 0 {
+ b.grow(len(p.data) - batchHdrLen)
+ b.data = append(b.data, p.data[batchHdrLen:]...)
+ b.rLen += p.rLen
+ }
+ if p.sync {
+ b.sync = true
+ }
+}
+
+// size returns sums of key/value pair length plus 8-bytes ikey.
+func (b *Batch) size() int {
+ return b.bLen
+}
+
+func (b *Batch) encode() []byte {
+ b.grow(0)
+ binary.LittleEndian.PutUint64(b.data, b.seq)
+ binary.LittleEndian.PutUint32(b.data[8:], uint32(b.rLen))
+
+ return b.data
+}
+
+func (b *Batch) decode(prevSeq uint64, data []byte) error {
+ if len(data) < batchHdrLen {
+ return newErrBatchCorrupted("too short")
+ }
+
+ b.seq = binary.LittleEndian.Uint64(data)
+ if b.seq < prevSeq {
+ return newErrBatchCorrupted("invalid sequence number")
+ }
+ b.rLen = int(binary.LittleEndian.Uint32(data[8:]))
+ if b.rLen < 0 {
+ return newErrBatchCorrupted("invalid records length")
+ }
+ // No need to be precise at this point, it won't be used anyway
+ b.bLen = len(data) - batchHdrLen
+ b.data = data
+
+ return nil
+}
+
+func (b *Batch) decodeRec(f func(i int, kt kType, key, value []byte)) (err error) {
+ off := batchHdrLen
+ for i := 0; i < b.rLen; i++ {
+ if off >= len(b.data) {
+ return newErrBatchCorrupted("invalid records length")
+ }
+
+ kt := kType(b.data[off])
+ if kt > ktVal {
+ return newErrBatchCorrupted("bad record: invalid type")
+ }
+ off += 1
+
+ x, n := binary.Uvarint(b.data[off:])
+ off += n
+ if n <= 0 || off+int(x) > len(b.data) {
+ return newErrBatchCorrupted("bad record: invalid key length")
+ }
+ key := b.data[off : off+int(x)]
+ off += int(x)
+ var value []byte
+ if kt == ktVal {
+ x, n := binary.Uvarint(b.data[off:])
+ off += n
+ if n <= 0 || off+int(x) > len(b.data) {
+ return newErrBatchCorrupted("bad record: invalid value length")
+ }
+ value = b.data[off : off+int(x)]
+ off += int(x)
+ }
+
+ f(i, kt, key, value)
+ }
+
+ return nil
+}
+
+func (b *Batch) memReplay(to *memdb.DB) error {
+ return b.decodeRec(func(i int, kt kType, key, value []byte) {
+ ikey := newIkey(key, b.seq+uint64(i), kt)
+ to.Put(ikey, value)
+ })
+}
+
+func (b *Batch) memDecodeAndReplay(prevSeq uint64, data []byte, to *memdb.DB) error {
+ if err := b.decode(prevSeq, data); err != nil {
+ return err
+ }
+ return b.memReplay(to)
+}
+
+func (b *Batch) revertMemReplay(to *memdb.DB) error {
+ return b.decodeRec(func(i int, kt kType, key, value []byte) {
+ ikey := newIkey(key, b.seq+uint64(i), kt)
+ to.Delete(ikey)
+ })
+}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/cache/cache.go b/vendor/github.com/syndtr/goleveldb/leveldb/cache/cache.go
new file mode 100644
index 0000000000..c9670de5de
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/cache/cache.go
@@ -0,0 +1,676 @@
+// Copyright (c) 2012, Suryandaru Triandana
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Package cache provides interface and implementation of a cache algorithms.
+package cache
+
+import (
+ "sync"
+ "sync/atomic"
+ "unsafe"
+
+ "github.com/syndtr/goleveldb/leveldb/util"
+)
+
+// Cacher provides interface to implements a caching functionality.
+// An implementation must be goroutine-safe.
+type Cacher interface {
+ // Capacity returns cache capacity.
+ Capacity() int
+
+ // SetCapacity sets cache capacity.
+ SetCapacity(capacity int)
+
+ // Promote promotes the 'cache node'.
+ Promote(n *Node)
+
+ // Ban evicts the 'cache node' and prevent subsequent 'promote'.
+ Ban(n *Node)
+
+ // Evict evicts the 'cache node'.
+ Evict(n *Node)
+
+ // EvictNS evicts 'cache node' with the given namespace.
+ EvictNS(ns uint64)
+
+ // EvictAll evicts all 'cache node'.
+ EvictAll()
+
+ // Close closes the 'cache tree'
+ Close() error
+}
+
+// Value is a 'cacheable object'. It may implements util.Releaser, if
+// so the the Release method will be called once object is released.
+type Value interface{}
+
+type CacheGetter struct {
+ Cache *Cache
+ NS uint64
+}
+
+func (g *CacheGetter) Get(key uint64, setFunc func() (size int, value Value)) *Handle {
+ return g.Cache.Get(g.NS, key, setFunc)
+}
+
+// The hash tables implementation is based on:
+// "Dynamic-Sized Nonblocking Hash Tables", by Yujie Liu, Kunlong Zhang, and Michael Spear. ACM Symposium on Principles of Distributed Computing, Jul 2014.
+
+const (
+ mInitialSize = 1 << 4
+ mOverflowThreshold = 1 << 5
+ mOverflowGrowThreshold = 1 << 7
+)
+
+type mBucket struct {
+ mu sync.Mutex
+ node []*Node
+ frozen bool
+}
+
+func (b *mBucket) freeze() []*Node {
+ b.mu.Lock()
+ defer b.mu.Unlock()
+ if !b.frozen {
+ b.frozen = true
+ }
+ return b.node
+}
+
+func (b *mBucket) get(r *Cache, h *mNode, hash uint32, ns, key uint64, noset bool) (done, added bool, n *Node) {
+ b.mu.Lock()
+
+ if b.frozen {
+ b.mu.Unlock()
+ return
+ }
+
+ // Scan the node.
+ for _, n := range b.node {
+ if n.hash == hash && n.ns == ns && n.key == key {
+ atomic.AddInt32(&n.ref, 1)
+ b.mu.Unlock()
+ return true, false, n
+ }
+ }
+
+ // Get only.
+ if noset {
+ b.mu.Unlock()
+ return true, false, nil
+ }
+
+ // Create node.
+ n = &Node{
+ r: r,
+ hash: hash,
+ ns: ns,
+ key: key,
+ ref: 1,
+ }
+ // Add node to bucket.
+ b.node = append(b.node, n)
+ bLen := len(b.node)
+ b.mu.Unlock()
+
+ // Update counter.
+ grow := atomic.AddInt32(&r.nodes, 1) >= h.growThreshold
+ if bLen > mOverflowThreshold {
+ grow = grow || atomic.AddInt32(&h.overflow, 1) >= mOverflowGrowThreshold
+ }
+
+ // Grow.
+ if grow && atomic.CompareAndSwapInt32(&h.resizeInProgess, 0, 1) {
+ nhLen := len(h.buckets) << 1
+ nh := &mNode{
+ buckets: make([]unsafe.Pointer, nhLen),
+ mask: uint32(nhLen) - 1,
+ pred: unsafe.Pointer(h),
+ growThreshold: int32(nhLen * mOverflowThreshold),
+ shrinkThreshold: int32(nhLen >> 1),
+ }
+ ok := atomic.CompareAndSwapPointer(&r.mHead, unsafe.Pointer(h), unsafe.Pointer(nh))
+ if !ok {
+ panic("BUG: failed swapping head")
+ }
+ go nh.initBuckets()
+ }
+
+ return true, true, n
+}
+
+func (b *mBucket) delete(r *Cache, h *mNode, hash uint32, ns, key uint64) (done, deleted bool) {
+ b.mu.Lock()
+
+ if b.frozen {
+ b.mu.Unlock()
+ return
+ }
+
+ // Scan the node.
+ var (
+ n *Node
+ bLen int
+ )
+ for i := range b.node {
+ n = b.node[i]
+ if n.ns == ns && n.key == key {
+ if atomic.LoadInt32(&n.ref) == 0 {
+ deleted = true
+
+ // Call releaser.
+ if n.value != nil {
+ if r, ok := n.value.(util.Releaser); ok {
+ r.Release()
+ }
+ n.value = nil
+ }
+
+ // Remove node from bucket.
+ b.node = append(b.node[:i], b.node[i+1:]...)
+ bLen = len(b.node)
+ }
+ break
+ }
+ }
+ b.mu.Unlock()
+
+ if deleted {
+ // Call OnDel.
+ for _, f := range n.onDel {
+ f()
+ }
+
+ // Update counter.
+ atomic.AddInt32(&r.size, int32(n.size)*-1)
+ shrink := atomic.AddInt32(&r.nodes, -1) < h.shrinkThreshold
+ if bLen >= mOverflowThreshold {
+ atomic.AddInt32(&h.overflow, -1)
+ }
+
+ // Shrink.
+ if shrink && len(h.buckets) > mInitialSize && atomic.CompareAndSwapInt32(&h.resizeInProgess, 0, 1) {
+ nhLen := len(h.buckets) >> 1
+ nh := &mNode{
+ buckets: make([]unsafe.Pointer, nhLen),
+ mask: uint32(nhLen) - 1,
+ pred: unsafe.Pointer(h),
+ growThreshold: int32(nhLen * mOverflowThreshold),
+ shrinkThreshold: int32(nhLen >> 1),
+ }
+ ok := atomic.CompareAndSwapPointer(&r.mHead, unsafe.Pointer(h), unsafe.Pointer(nh))
+ if !ok {
+ panic("BUG: failed swapping head")
+ }
+ go nh.initBuckets()
+ }
+ }
+
+ return true, deleted
+}
+
+type mNode struct {
+ buckets []unsafe.Pointer // []*mBucket
+ mask uint32
+ pred unsafe.Pointer // *mNode
+ resizeInProgess int32
+
+ overflow int32
+ growThreshold int32
+ shrinkThreshold int32
+}
+
+func (n *mNode) initBucket(i uint32) *mBucket {
+ if b := (*mBucket)(atomic.LoadPointer(&n.buckets[i])); b != nil {
+ return b
+ }
+
+ p := (*mNode)(atomic.LoadPointer(&n.pred))
+ if p != nil {
+ var node []*Node
+ if n.mask > p.mask {
+ // Grow.
+ pb := (*mBucket)(atomic.LoadPointer(&p.buckets[i&p.mask]))
+ if pb == nil {
+ pb = p.initBucket(i & p.mask)
+ }
+ m := pb.freeze()
+ // Split nodes.
+ for _, x := range m {
+ if x.hash&n.mask == i {
+ node = append(node, x)
+ }
+ }
+ } else {
+ // Shrink.
+ pb0 := (*mBucket)(atomic.LoadPointer(&p.buckets[i]))
+ if pb0 == nil {
+ pb0 = p.initBucket(i)
+ }
+ pb1 := (*mBucket)(atomic.LoadPointer(&p.buckets[i+uint32(len(n.buckets))]))
+ if pb1 == nil {
+ pb1 = p.initBucket(i + uint32(len(n.buckets)))
+ }
+ m0 := pb0.freeze()
+ m1 := pb1.freeze()
+ // Merge nodes.
+ node = make([]*Node, 0, len(m0)+len(m1))
+ node = append(node, m0...)
+ node = append(node, m1...)
+ }
+ b := &mBucket{node: node}
+ if atomic.CompareAndSwapPointer(&n.buckets[i], nil, unsafe.Pointer(b)) {
+ if len(node) > mOverflowThreshold {
+ atomic.AddInt32(&n.overflow, int32(len(node)-mOverflowThreshold))
+ }
+ return b
+ }
+ }
+
+ return (*mBucket)(atomic.LoadPointer(&n.buckets[i]))
+}
+
+func (n *mNode) initBuckets() {
+ for i := range n.buckets {
+ n.initBucket(uint32(i))
+ }
+ atomic.StorePointer(&n.pred, nil)
+}
+
+// Cache is a 'cache map'.
+type Cache struct {
+ mu sync.RWMutex
+ mHead unsafe.Pointer // *mNode
+ nodes int32
+ size int32
+ cacher Cacher
+ closed bool
+}
+
+// NewCache creates a new 'cache map'. The cacher is optional and
+// may be nil.
+func NewCache(cacher Cacher) *Cache {
+ h := &mNode{
+ buckets: make([]unsafe.Pointer, mInitialSize),
+ mask: mInitialSize - 1,
+ growThreshold: int32(mInitialSize * mOverflowThreshold),
+ shrinkThreshold: 0,
+ }
+ for i := range h.buckets {
+ h.buckets[i] = unsafe.Pointer(&mBucket{})
+ }
+ r := &Cache{
+ mHead: unsafe.Pointer(h),
+ cacher: cacher,
+ }
+ return r
+}
+
+func (r *Cache) getBucket(hash uint32) (*mNode, *mBucket) {
+ h := (*mNode)(atomic.LoadPointer(&r.mHead))
+ i := hash & h.mask
+ b := (*mBucket)(atomic.LoadPointer(&h.buckets[i]))
+ if b == nil {
+ b = h.initBucket(i)
+ }
+ return h, b
+}
+
+func (r *Cache) delete(n *Node) bool {
+ for {
+ h, b := r.getBucket(n.hash)
+ done, deleted := b.delete(r, h, n.hash, n.ns, n.key)
+ if done {
+ return deleted
+ }
+ }
+ return false
+}
+
+// Nodes returns number of 'cache node' in the map.
+func (r *Cache) Nodes() int {
+ return int(atomic.LoadInt32(&r.nodes))
+}
+
+// Size returns sums of 'cache node' size in the map.
+func (r *Cache) Size() int {
+ return int(atomic.LoadInt32(&r.size))
+}
+
+// Capacity returns cache capacity.
+func (r *Cache) Capacity() int {
+ if r.cacher == nil {
+ return 0
+ }
+ return r.cacher.Capacity()
+}
+
+// SetCapacity sets cache capacity.
+func (r *Cache) SetCapacity(capacity int) {
+ if r.cacher != nil {
+ r.cacher.SetCapacity(capacity)
+ }
+}
+
+// Get gets 'cache node' with the given namespace and key.
+// If cache node is not found and setFunc is not nil, Get will atomically creates
+// the 'cache node' by calling setFunc. Otherwise Get will returns nil.
+//
+// The returned 'cache handle' should be released after use by calling Release
+// method.
+func (r *Cache) Get(ns, key uint64, setFunc func() (size int, value Value)) *Handle {
+ r.mu.RLock()
+ defer r.mu.RUnlock()
+ if r.closed {
+ return nil
+ }
+
+ hash := murmur32(ns, key, 0xf00)
+ for {
+ h, b := r.getBucket(hash)
+ done, _, n := b.get(r, h, hash, ns, key, setFunc == nil)
+ if done {
+ if n != nil {
+ n.mu.Lock()
+ if n.value == nil {
+ if setFunc == nil {
+ n.mu.Unlock()
+ n.unref()
+ return nil
+ }
+
+ n.size, n.value = setFunc()
+ if n.value == nil {
+ n.size = 0
+ n.mu.Unlock()
+ n.unref()
+ return nil
+ }
+ atomic.AddInt32(&r.size, int32(n.size))
+ }
+ n.mu.Unlock()
+ if r.cacher != nil {
+ r.cacher.Promote(n)
+ }
+ return &Handle{unsafe.Pointer(n)}
+ }
+
+ break
+ }
+ }
+ return nil
+}
+
+// Delete removes and ban 'cache node' with the given namespace and key.
+// A banned 'cache node' will never inserted into the 'cache tree'. Ban
+// only attributed to the particular 'cache node', so when a 'cache node'
+// is recreated it will not be banned.
+//
+// If onDel is not nil, then it will be executed if such 'cache node'
+// doesn't exist or once the 'cache node' is released.
+//
+// Delete return true is such 'cache node' exist.
+func (r *Cache) Delete(ns, key uint64, onDel func()) bool {
+ r.mu.RLock()
+ defer r.mu.RUnlock()
+ if r.closed {
+ return false
+ }
+
+ hash := murmur32(ns, key, 0xf00)
+ for {
+ h, b := r.getBucket(hash)
+ done, _, n := b.get(r, h, hash, ns, key, true)
+ if done {
+ if n != nil {
+ if onDel != nil {
+ n.mu.Lock()
+ n.onDel = append(n.onDel, onDel)
+ n.mu.Unlock()
+ }
+ if r.cacher != nil {
+ r.cacher.Ban(n)
+ }
+ n.unref()
+ return true
+ }
+
+ break
+ }
+ }
+
+ if onDel != nil {
+ onDel()
+ }
+
+ return false
+}
+
+// Evict evicts 'cache node' with the given namespace and key. This will
+// simply call Cacher.Evict.
+//
+// Evict return true is such 'cache node' exist.
+func (r *Cache) Evict(ns, key uint64) bool {
+ r.mu.RLock()
+ defer r.mu.RUnlock()
+ if r.closed {
+ return false
+ }
+
+ hash := murmur32(ns, key, 0xf00)
+ for {
+ h, b := r.getBucket(hash)
+ done, _, n := b.get(r, h, hash, ns, key, true)
+ if done {
+ if n != nil {
+ if r.cacher != nil {
+ r.cacher.Evict(n)
+ }
+ n.unref()
+ return true
+ }
+
+ break
+ }
+ }
+
+ return false
+}
+
+// EvictNS evicts 'cache node' with the given namespace. This will
+// simply call Cacher.EvictNS.
+func (r *Cache) EvictNS(ns uint64) {
+ r.mu.RLock()
+ defer r.mu.RUnlock()
+ if r.closed {
+ return
+ }
+
+ if r.cacher != nil {
+ r.cacher.EvictNS(ns)
+ }
+}
+
+// EvictAll evicts all 'cache node'. This will simply call Cacher.EvictAll.
+func (r *Cache) EvictAll() {
+ r.mu.RLock()
+ defer r.mu.RUnlock()
+ if r.closed {
+ return
+ }
+
+ if r.cacher != nil {
+ r.cacher.EvictAll()
+ }
+}
+
+// Close closes the 'cache map' and releases all 'cache node'.
+func (r *Cache) Close() error {
+ r.mu.Lock()
+ if !r.closed {
+ r.closed = true
+
+ if r.cacher != nil {
+ if err := r.cacher.Close(); err != nil {
+ return err
+ }
+ }
+
+ h := (*mNode)(r.mHead)
+ h.initBuckets()
+
+ for i := range h.buckets {
+ b := (*mBucket)(h.buckets[i])
+ for _, n := range b.node {
+ // Call releaser.
+ if n.value != nil {
+ if r, ok := n.value.(util.Releaser); ok {
+ r.Release()
+ }
+ n.value = nil
+ }
+
+ // Call OnDel.
+ for _, f := range n.onDel {
+ f()
+ }
+ }
+ }
+ }
+ r.mu.Unlock()
+ return nil
+}
+
+// Node is a 'cache node'.
+type Node struct {
+ r *Cache
+
+ hash uint32
+ ns, key uint64
+
+ mu sync.Mutex
+ size int
+ value Value
+
+ ref int32
+ onDel []func()
+
+ CacheData unsafe.Pointer
+}
+
+// NS returns this 'cache node' namespace.
+func (n *Node) NS() uint64 {
+ return n.ns
+}
+
+// Key returns this 'cache node' key.
+func (n *Node) Key() uint64 {
+ return n.key
+}
+
+// Size returns this 'cache node' size.
+func (n *Node) Size() int {
+ return n.size
+}
+
+// Value returns this 'cache node' value.
+func (n *Node) Value() Value {
+ return n.value
+}
+
+// Ref returns this 'cache node' ref counter.
+func (n *Node) Ref() int32 {
+ return atomic.LoadInt32(&n.ref)
+}
+
+// GetHandle returns an handle for this 'cache node'.
+func (n *Node) GetHandle() *Handle {
+ if atomic.AddInt32(&n.ref, 1) <= 1 {
+ panic("BUG: Node.GetHandle on zero ref")
+ }
+ return &Handle{unsafe.Pointer(n)}
+}
+
+func (n *Node) unref() {
+ if atomic.AddInt32(&n.ref, -1) == 0 {
+ n.r.delete(n)
+ }
+}
+
+func (n *Node) unrefLocked() {
+ if atomic.AddInt32(&n.ref, -1) == 0 {
+ n.r.mu.RLock()
+ if !n.r.closed {
+ n.r.delete(n)
+ }
+ n.r.mu.RUnlock()
+ }
+}
+
+type Handle struct {
+ n unsafe.Pointer // *Node
+}
+
+func (h *Handle) Value() Value {
+ n := (*Node)(atomic.LoadPointer(&h.n))
+ if n != nil {
+ return n.value
+ }
+ return nil
+}
+
+func (h *Handle) Release() {
+ nPtr := atomic.LoadPointer(&h.n)
+ if nPtr != nil && atomic.CompareAndSwapPointer(&h.n, nPtr, nil) {
+ n := (*Node)(nPtr)
+ n.unrefLocked()
+ }
+}
+
+func murmur32(ns, key uint64, seed uint32) uint32 {
+ const (
+ m = uint32(0x5bd1e995)
+ r = 24
+ )
+
+ k1 := uint32(ns >> 32)
+ k2 := uint32(ns)
+ k3 := uint32(key >> 32)
+ k4 := uint32(key)
+
+ k1 *= m
+ k1 ^= k1 >> r
+ k1 *= m
+
+ k2 *= m
+ k2 ^= k2 >> r
+ k2 *= m
+
+ k3 *= m
+ k3 ^= k3 >> r
+ k3 *= m
+
+ k4 *= m
+ k4 ^= k4 >> r
+ k4 *= m
+
+ h := seed
+
+ h *= m
+ h ^= k1
+ h *= m
+ h ^= k2
+ h *= m
+ h ^= k3
+ h *= m
+ h ^= k4
+
+ h ^= h >> 13
+ h *= m
+ h ^= h >> 15
+
+ return h
+}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/cache/lru.go b/vendor/github.com/syndtr/goleveldb/leveldb/cache/lru.go
new file mode 100644
index 0000000000..d9a84cde15
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/cache/lru.go
@@ -0,0 +1,195 @@
+// Copyright (c) 2012, Suryandaru Triandana
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package cache
+
+import (
+ "sync"
+ "unsafe"
+)
+
+type lruNode struct {
+ n *Node
+ h *Handle
+ ban bool
+
+ next, prev *lruNode
+}
+
+func (n *lruNode) insert(at *lruNode) {
+ x := at.next
+ at.next = n
+ n.prev = at
+ n.next = x
+ x.prev = n
+}
+
+func (n *lruNode) remove() {
+ if n.prev != nil {
+ n.prev.next = n.next
+ n.next.prev = n.prev
+ n.prev = nil
+ n.next = nil
+ } else {
+ panic("BUG: removing removed node")
+ }
+}
+
+type lru struct {
+ mu sync.Mutex
+ capacity int
+ used int
+ recent lruNode
+}
+
+func (r *lru) reset() {
+ r.recent.next = &r.recent
+ r.recent.prev = &r.recent
+ r.used = 0
+}
+
+func (r *lru) Capacity() int {
+ r.mu.Lock()
+ defer r.mu.Unlock()
+ return r.capacity
+}
+
+func (r *lru) SetCapacity(capacity int) {
+ var evicted []*lruNode
+
+ r.mu.Lock()
+ r.capacity = capacity
+ for r.used > r.capacity {
+ rn := r.recent.prev
+ if rn == nil {
+ panic("BUG: invalid LRU used or capacity counter")
+ }
+ rn.remove()
+ rn.n.CacheData = nil
+ r.used -= rn.n.Size()
+ evicted = append(evicted, rn)
+ }
+ r.mu.Unlock()
+
+ for _, rn := range evicted {
+ rn.h.Release()
+ }
+}
+
+func (r *lru) Promote(n *Node) {
+ var evicted []*lruNode
+
+ r.mu.Lock()
+ if n.CacheData == nil {
+ if n.Size() <= r.capacity {
+ rn := &lruNode{n: n, h: n.GetHandle()}
+ rn.insert(&r.recent)
+ n.CacheData = unsafe.Pointer(rn)
+ r.used += n.Size()
+
+ for r.used > r.capacity {
+ rn := r.recent.prev
+ if rn == nil {
+ panic("BUG: invalid LRU used or capacity counter")
+ }
+ rn.remove()
+ rn.n.CacheData = nil
+ r.used -= rn.n.Size()
+ evicted = append(evicted, rn)
+ }
+ }
+ } else {
+ rn := (*lruNode)(n.CacheData)
+ if !rn.ban {
+ rn.remove()
+ rn.insert(&r.recent)
+ }
+ }
+ r.mu.Unlock()
+
+ for _, rn := range evicted {
+ rn.h.Release()
+ }
+}
+
+func (r *lru) Ban(n *Node) {
+ r.mu.Lock()
+ if n.CacheData == nil {
+ n.CacheData = unsafe.Pointer(&lruNode{n: n, ban: true})
+ } else {
+ rn := (*lruNode)(n.CacheData)
+ if !rn.ban {
+ rn.remove()
+ rn.ban = true
+ r.used -= rn.n.Size()
+ r.mu.Unlock()
+
+ rn.h.Release()
+ rn.h = nil
+ return
+ }
+ }
+ r.mu.Unlock()
+}
+
+func (r *lru) Evict(n *Node) {
+ r.mu.Lock()
+ rn := (*lruNode)(n.CacheData)
+ if rn == nil || rn.ban {
+ r.mu.Unlock()
+ return
+ }
+ n.CacheData = nil
+ r.mu.Unlock()
+
+ rn.h.Release()
+}
+
+func (r *lru) EvictNS(ns uint64) {
+ var evicted []*lruNode
+
+ r.mu.Lock()
+ for e := r.recent.prev; e != &r.recent; {
+ rn := e
+ e = e.prev
+ if rn.n.NS() == ns {
+ rn.remove()
+ rn.n.CacheData = nil
+ r.used -= rn.n.Size()
+ evicted = append(evicted, rn)
+ }
+ }
+ r.mu.Unlock()
+
+ for _, rn := range evicted {
+ rn.h.Release()
+ }
+}
+
+func (r *lru) EvictAll() {
+ r.mu.Lock()
+ back := r.recent.prev
+ for rn := back; rn != &r.recent; rn = rn.prev {
+ rn.n.CacheData = nil
+ }
+ r.reset()
+ r.mu.Unlock()
+
+ for rn := back; rn != &r.recent; rn = rn.prev {
+ rn.h.Release()
+ }
+}
+
+func (r *lru) Close() error {
+ return nil
+}
+
+// NewLRU create a new LRU-cache.
+func NewLRU(capacity int) Cacher {
+ r := &lru{capacity: capacity}
+ r.reset()
+ return r
+}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/comparer.go b/vendor/github.com/syndtr/goleveldb/leveldb/comparer.go
new file mode 100644
index 0000000000..d33d5e9c78
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/comparer.go
@@ -0,0 +1,75 @@
+// Copyright (c) 2012, Suryandaru Triandana
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package leveldb
+
+import "github.com/syndtr/goleveldb/leveldb/comparer"
+
+type iComparer struct {
+ ucmp comparer.Comparer
+}
+
+func (icmp *iComparer) uName() string {
+ return icmp.ucmp.Name()
+}
+
+func (icmp *iComparer) uCompare(a, b []byte) int {
+ return icmp.ucmp.Compare(a, b)
+}
+
+func (icmp *iComparer) uSeparator(dst, a, b []byte) []byte {
+ return icmp.ucmp.Separator(dst, a, b)
+}
+
+func (icmp *iComparer) uSuccessor(dst, b []byte) []byte {
+ return icmp.ucmp.Successor(dst, b)
+}
+
+func (icmp *iComparer) Name() string {
+ return icmp.uName()
+}
+
+func (icmp *iComparer) Compare(a, b []byte) int {
+ x := icmp.ucmp.Compare(iKey(a).ukey(), iKey(b).ukey())
+ if x == 0 {
+ if m, n := iKey(a).num(), iKey(b).num(); m > n {
+ x = -1
+ } else if m < n {
+ x = 1
+ }
+ }
+ return x
+}
+
+func (icmp *iComparer) Separator(dst, a, b []byte) []byte {
+ ua, ub := iKey(a).ukey(), iKey(b).ukey()
+ dst = icmp.ucmp.Separator(dst, ua, ub)
+ if dst == nil {
+ return nil
+ }
+ if len(dst) < len(ua) && icmp.uCompare(ua, dst) < 0 {
+ dst = append(dst, kMaxNumBytes...)
+ } else {
+ // Did not close possibilities that n maybe longer than len(ub).
+ dst = append(dst, a[len(a)-8:]...)
+ }
+ return dst
+}
+
+func (icmp *iComparer) Successor(dst, b []byte) []byte {
+ ub := iKey(b).ukey()
+ dst = icmp.ucmp.Successor(dst, ub)
+ if dst == nil {
+ return nil
+ }
+ if len(dst) < len(ub) && icmp.uCompare(ub, dst) < 0 {
+ dst = append(dst, kMaxNumBytes...)
+ } else {
+ // Did not close possibilities that n maybe longer than len(ub).
+ dst = append(dst, b[len(b)-8:]...)
+ }
+ return dst
+}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/comparer/bytes_comparer.go b/vendor/github.com/syndtr/goleveldb/leveldb/comparer/bytes_comparer.go
new file mode 100644
index 0000000000..14dddf88dd
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/comparer/bytes_comparer.go
@@ -0,0 +1,51 @@
+// Copyright (c) 2012, Suryandaru Triandana
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package comparer
+
+import "bytes"
+
+type bytesComparer struct{}
+
+func (bytesComparer) Compare(a, b []byte) int {
+ return bytes.Compare(a, b)
+}
+
+func (bytesComparer) Name() string {
+ return "leveldb.BytewiseComparator"
+}
+
+func (bytesComparer) Separator(dst, a, b []byte) []byte {
+ i, n := 0, len(a)
+ if n > len(b) {
+ n = len(b)
+ }
+ for ; i < n && a[i] == b[i]; i++ {
+ }
+ if i >= n {
+ // Do not shorten if one string is a prefix of the other
+ } else if c := a[i]; c < 0xff && c+1 < b[i] {
+ dst = append(dst, a[:i+1]...)
+ dst[i]++
+ return dst
+ }
+ return nil
+}
+
+func (bytesComparer) Successor(dst, b []byte) []byte {
+ for i, c := range b {
+ if c != 0xff {
+ dst = append(dst, b[:i+1]...)
+ dst[i]++
+ return dst
+ }
+ }
+ return nil
+}
+
+// DefaultComparer are default implementation of the Comparer interface.
+// It uses the natural ordering, consistent with bytes.Compare.
+var DefaultComparer = bytesComparer{}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/comparer/comparer.go b/vendor/github.com/syndtr/goleveldb/leveldb/comparer/comparer.go
new file mode 100644
index 0000000000..14a28f16fc
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/comparer/comparer.go
@@ -0,0 +1,57 @@
+// Copyright (c) 2012, Suryandaru Triandana
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Package comparer provides interface and implementation for ordering
+// sets of data.
+package comparer
+
+// BasicComparer is the interface that wraps the basic Compare method.
+type BasicComparer interface {
+ // Compare returns -1, 0, or +1 depending on whether a is 'less than',
+ // 'equal to' or 'greater than' b. The two arguments can only be 'equal'
+ // if their contents are exactly equal. Furthermore, the empty slice
+ // must be 'less than' any non-empty slice.
+ Compare(a, b []byte) int
+}
+
+// Comparer defines a total ordering over the space of []byte keys: a 'less
+// than' relationship.
+type Comparer interface {
+ BasicComparer
+
+ // Name returns name of the comparer.
+ //
+ // The Level-DB on-disk format stores the comparer name, and opening a
+ // database with a different comparer from the one it was created with
+ // will result in an error.
+ //
+ // An implementation to a new name whenever the comparer implementation
+ // changes in a way that will cause the relative ordering of any two keys
+ // to change.
+ //
+ // Names starting with "leveldb." are reserved and should not be used
+ // by any users of this package.
+ Name() string
+
+ // Bellow are advanced functions used used to reduce the space requirements
+ // for internal data structures such as index blocks.
+
+ // Separator appends a sequence of bytes x to dst such that a <= x && x < b,
+ // where 'less than' is consistent with Compare. An implementation should
+ // return nil if x equal to a.
+ //
+ // Either contents of a or b should not by any means modified. Doing so
+ // may cause corruption on the internal state.
+ Separator(dst, a, b []byte) []byte
+
+ // Successor appends a sequence of bytes x to dst such that x >= b, where
+ // 'less than' is consistent with Compare. An implementation should return
+ // nil if x equal to b.
+ //
+ // Contents of b should not by any means modified. Doing so may cause
+ // corruption on the internal state.
+ Successor(dst, b []byte) []byte
+}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/db.go b/vendor/github.com/syndtr/goleveldb/leveldb/db.go
new file mode 100644
index 0000000000..88a3e0db3b
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/db.go
@@ -0,0 +1,1071 @@
+// Copyright (c) 2012, Suryandaru Triandana
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package leveldb
+
+import (
+ "container/list"
+ "fmt"
+ "io"
+ "os"
+ "runtime"
+ "strings"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "github.com/syndtr/goleveldb/leveldb/errors"
+ "github.com/syndtr/goleveldb/leveldb/iterator"
+ "github.com/syndtr/goleveldb/leveldb/journal"
+ "github.com/syndtr/goleveldb/leveldb/memdb"
+ "github.com/syndtr/goleveldb/leveldb/opt"
+ "github.com/syndtr/goleveldb/leveldb/storage"
+ "github.com/syndtr/goleveldb/leveldb/table"
+ "github.com/syndtr/goleveldb/leveldb/util"
+)
+
+// DB is a LevelDB database.
+type DB struct {
+ // Need 64-bit alignment.
+ seq uint64
+
+ // Session.
+ s *session
+
+ // MemDB.
+ memMu sync.RWMutex
+ memPool chan *memdb.DB
+ mem, frozenMem *memDB
+ journal *journal.Writer
+ journalWriter storage.Writer
+ journalFile storage.File
+ frozenJournalFile storage.File
+ frozenSeq uint64
+
+ // Snapshot.
+ snapsMu sync.Mutex
+ snapsList *list.List
+
+ // Stats.
+ aliveSnaps, aliveIters int32
+
+ // Write.
+ writeC chan *Batch
+ writeMergedC chan bool
+ writeLockC chan struct{}
+ writeAckC chan error
+ writeDelay time.Duration
+ writeDelayN int
+ journalC chan *Batch
+ journalAckC chan error
+
+ // Compaction.
+ tcompCmdC chan cCmd
+ tcompPauseC chan chan<- struct{}
+ mcompCmdC chan cCmd
+ compErrC chan error
+ compPerErrC chan error
+ compErrSetC chan error
+ compWriteLocking bool
+ compStats []cStats
+
+ // Close.
+ closeW sync.WaitGroup
+ closeC chan struct{}
+ closed uint32
+ closer io.Closer
+}
+
+func openDB(s *session) (*DB, error) {
+ s.log("db@open opening")
+ start := time.Now()
+ db := &DB{
+ s: s,
+ // Initial sequence
+ seq: s.stSeqNum,
+ // MemDB
+ memPool: make(chan *memdb.DB, 1),
+ // Snapshot
+ snapsList: list.New(),
+ // Write
+ writeC: make(chan *Batch),
+ writeMergedC: make(chan bool),
+ writeLockC: make(chan struct{}, 1),
+ writeAckC: make(chan error),
+ journalC: make(chan *Batch),
+ journalAckC: make(chan error),
+ // Compaction
+ tcompCmdC: make(chan cCmd),
+ tcompPauseC: make(chan chan<- struct{}),
+ mcompCmdC: make(chan cCmd),
+ compErrC: make(chan error),
+ compPerErrC: make(chan error),
+ compErrSetC: make(chan error),
+ compStats: make([]cStats, s.o.GetNumLevel()),
+ // Close
+ closeC: make(chan struct{}),
+ }
+
+ // Read-only mode.
+ readOnly := s.o.GetReadOnly()
+
+ if readOnly {
+ // Recover journals (read-only mode).
+ if err := db.recoverJournalRO(); err != nil {
+ return nil, err
+ }
+ } else {
+ // Recover journals.
+ if err := db.recoverJournal(); err != nil {
+ return nil, err
+ }
+
+ // Remove any obsolete files.
+ if err := db.checkAndCleanFiles(); err != nil {
+ // Close journal.
+ if db.journal != nil {
+ db.journal.Close()
+ db.journalWriter.Close()
+ }
+ return nil, err
+ }
+
+ }
+
+ // Doesn't need to be included in the wait group.
+ go db.compactionError()
+ go db.mpoolDrain()
+
+ if readOnly {
+ db.SetReadOnly()
+ } else {
+ db.closeW.Add(3)
+ go db.tCompaction()
+ go db.mCompaction()
+ go db.jWriter()
+ }
+
+ s.logf("db@open done T·%v", time.Since(start))
+
+ runtime.SetFinalizer(db, (*DB).Close)
+ return db, nil
+}
+
+// Open opens or creates a DB for the given storage.
+// The DB will be created if not exist, unless ErrorIfMissing is true.
+// Also, if ErrorIfExist is true and the DB exist Open will returns
+// os.ErrExist error.
+//
+// Open will return an error with type of ErrCorrupted if corruption
+// detected in the DB. Corrupted DB can be recovered with Recover
+// function.
+//
+// The returned DB instance is goroutine-safe.
+// The DB must be closed after use, by calling Close method.
+func Open(stor storage.Storage, o *opt.Options) (db *DB, err error) {
+ s, err := newSession(stor, o)
+ if err != nil {
+ return
+ }
+ defer func() {
+ if err != nil {
+ s.close()
+ s.release()
+ }
+ }()
+
+ err = s.recover()
+ if err != nil {
+ if !os.IsNotExist(err) || s.o.GetErrorIfMissing() {
+ return
+ }
+ err = s.create()
+ if err != nil {
+ return
+ }
+ } else if s.o.GetErrorIfExist() {
+ err = os.ErrExist
+ return
+ }
+
+ return openDB(s)
+}
+
+// OpenFile opens or creates a DB for the given path.
+// The DB will be created if not exist, unless ErrorIfMissing is true.
+// Also, if ErrorIfExist is true and the DB exist OpenFile will returns
+// os.ErrExist error.
+//
+// OpenFile uses standard file-system backed storage implementation as
+// desribed in the leveldb/storage package.
+//
+// OpenFile will return an error with type of ErrCorrupted if corruption
+// detected in the DB. Corrupted DB can be recovered with Recover
+// function.
+//
+// The returned DB instance is goroutine-safe.
+// The DB must be closed after use, by calling Close method.
+func OpenFile(path string, o *opt.Options) (db *DB, err error) {
+ stor, err := storage.OpenFile(path)
+ if err != nil {
+ return
+ }
+ db, err = Open(stor, o)
+ if err != nil {
+ stor.Close()
+ } else {
+ db.closer = stor
+ }
+ return
+}
+
+// Recover recovers and opens a DB with missing or corrupted manifest files
+// for the given storage. It will ignore any manifest files, valid or not.
+// The DB must already exist or it will returns an error.
+// Also, Recover will ignore ErrorIfMissing and ErrorIfExist options.
+//
+// The returned DB instance is goroutine-safe.
+// The DB must be closed after use, by calling Close method.
+func Recover(stor storage.Storage, o *opt.Options) (db *DB, err error) {
+ s, err := newSession(stor, o)
+ if err != nil {
+ return
+ }
+ defer func() {
+ if err != nil {
+ s.close()
+ s.release()
+ }
+ }()
+
+ err = recoverTable(s, o)
+ if err != nil {
+ return
+ }
+ return openDB(s)
+}
+
+// RecoverFile recovers and opens a DB with missing or corrupted manifest files
+// for the given path. It will ignore any manifest files, valid or not.
+// The DB must already exist or it will returns an error.
+// Also, Recover will ignore ErrorIfMissing and ErrorIfExist options.
+//
+// RecoverFile uses standard file-system backed storage implementation as desribed
+// in the leveldb/storage package.
+//
+// The returned DB instance is goroutine-safe.
+// The DB must be closed after use, by calling Close method.
+func RecoverFile(path string, o *opt.Options) (db *DB, err error) {
+ stor, err := storage.OpenFile(path)
+ if err != nil {
+ return
+ }
+ db, err = Recover(stor, o)
+ if err != nil {
+ stor.Close()
+ } else {
+ db.closer = stor
+ }
+ return
+}
+
+func recoverTable(s *session, o *opt.Options) error {
+ o = dupOptions(o)
+ // Mask StrictReader, lets StrictRecovery doing its job.
+ o.Strict &= ^opt.StrictReader
+
+ // Get all tables and sort it by file number.
+ tableFiles_, err := s.getFiles(storage.TypeTable)
+ if err != nil {
+ return err
+ }
+ tableFiles := files(tableFiles_)
+ tableFiles.sort()
+
+ var (
+ maxSeq uint64
+ recoveredKey, goodKey, corruptedKey, corruptedBlock, droppedTable int
+
+ // We will drop corrupted table.
+ strict = o.GetStrict(opt.StrictRecovery)
+ noSync = o.GetNoSync()
+
+ rec = &sessionRecord{}
+ bpool = util.NewBufferPool(o.GetBlockSize() + 5)
+ )
+ buildTable := func(iter iterator.Iterator) (tmp storage.File, size int64, err error) {
+ tmp = s.newTemp()
+ writer, err := tmp.Create()
+ if err != nil {
+ return
+ }
+ defer func() {
+ writer.Close()
+ if err != nil {
+ tmp.Remove()
+ tmp = nil
+ }
+ }()
+
+ // Copy entries.
+ tw := table.NewWriter(writer, o)
+ for iter.Next() {
+ key := iter.Key()
+ if validIkey(key) {
+ err = tw.Append(key, iter.Value())
+ if err != nil {
+ return
+ }
+ }
+ }
+ err = iter.Error()
+ if err != nil {
+ return
+ }
+ err = tw.Close()
+ if err != nil {
+ return
+ }
+ if !noSync {
+ err = writer.Sync()
+ if err != nil {
+ return
+ }
+ }
+ size = int64(tw.BytesLen())
+ return
+ }
+ recoverTable := func(file storage.File) error {
+ s.logf("table@recovery recovering @%d", file.Num())
+ reader, err := file.Open()
+ if err != nil {
+ return err
+ }
+ var closed bool
+ defer func() {
+ if !closed {
+ reader.Close()
+ }
+ }()
+
+ // Get file size.
+ size, err := reader.Seek(0, 2)
+ if err != nil {
+ return err
+ }
+
+ var (
+ tSeq uint64
+ tgoodKey, tcorruptedKey, tcorruptedBlock int
+ imin, imax []byte
+ )
+ tr, err := table.NewReader(reader, size, storage.NewFileInfo(file), nil, bpool, o)
+ if err != nil {
+ return err
+ }
+ iter := tr.NewIterator(nil, nil)
+ if itererr, ok := iter.(iterator.ErrorCallbackSetter); ok {
+ itererr.SetErrorCallback(func(err error) {
+ if errors.IsCorrupted(err) {
+ s.logf("table@recovery block corruption @%d %q", file.Num(), err)
+ tcorruptedBlock++
+ }
+ })
+ }
+
+ // Scan the table.
+ for iter.Next() {
+ key := iter.Key()
+ _, seq, _, kerr := parseIkey(key)
+ if kerr != nil {
+ tcorruptedKey++
+ continue
+ }
+ tgoodKey++
+ if seq > tSeq {
+ tSeq = seq
+ }
+ if imin == nil {
+ imin = append([]byte{}, key...)
+ }
+ imax = append(imax[:0], key...)
+ }
+ if err := iter.Error(); err != nil {
+ iter.Release()
+ return err
+ }
+ iter.Release()
+
+ goodKey += tgoodKey
+ corruptedKey += tcorruptedKey
+ corruptedBlock += tcorruptedBlock
+
+ if strict && (tcorruptedKey > 0 || tcorruptedBlock > 0) {
+ droppedTable++
+ s.logf("table@recovery dropped @%d Gk·%d Ck·%d Cb·%d S·%d Q·%d", file.Num(), tgoodKey, tcorruptedKey, tcorruptedBlock, size, tSeq)
+ return nil
+ }
+
+ if tgoodKey > 0 {
+ if tcorruptedKey > 0 || tcorruptedBlock > 0 {
+ // Rebuild the table.
+ s.logf("table@recovery rebuilding @%d", file.Num())
+ iter := tr.NewIterator(nil, nil)
+ tmp, newSize, err := buildTable(iter)
+ iter.Release()
+ if err != nil {
+ return err
+ }
+ closed = true
+ reader.Close()
+ if err := file.Replace(tmp); err != nil {
+ return err
+ }
+ size = newSize
+ }
+ if tSeq > maxSeq {
+ maxSeq = tSeq
+ }
+ recoveredKey += tgoodKey
+ // Add table to level 0.
+ rec.addTable(0, file.Num(), uint64(size), imin, imax)
+ s.logf("table@recovery recovered @%d Gk·%d Ck·%d Cb·%d S·%d Q·%d", file.Num(), tgoodKey, tcorruptedKey, tcorruptedBlock, size, tSeq)
+ } else {
+ droppedTable++
+ s.logf("table@recovery unrecoverable @%d Ck·%d Cb·%d S·%d", file.Num(), tcorruptedKey, tcorruptedBlock, size)
+ }
+
+ return nil
+ }
+
+ // Recover all tables.
+ if len(tableFiles) > 0 {
+ s.logf("table@recovery F·%d", len(tableFiles))
+
+ // Mark file number as used.
+ s.markFileNum(tableFiles[len(tableFiles)-1].Num())
+
+ for _, file := range tableFiles {
+ if err := recoverTable(file); err != nil {
+ return err
+ }
+ }
+
+ s.logf("table@recovery recovered F·%d N·%d Gk·%d Ck·%d Q·%d", len(tableFiles), recoveredKey, goodKey, corruptedKey, maxSeq)
+ }
+
+ // Set sequence number.
+ rec.setSeqNum(maxSeq)
+
+ // Create new manifest.
+ if err := s.create(); err != nil {
+ return err
+ }
+
+ // Commit.
+ return s.commit(rec)
+}
+
+func (db *DB) recoverJournal() error {
+ // Get all journals and sort it by file number.
+ allJournalFiles, err := db.s.getFiles(storage.TypeJournal)
+ if err != nil {
+ return err
+ }
+ files(allJournalFiles).sort()
+
+ // Journals that will be recovered.
+ var recJournalFiles []storage.File
+ for _, jf := range allJournalFiles {
+ if jf.Num() >= db.s.stJournalNum || jf.Num() == db.s.stPrevJournalNum {
+ recJournalFiles = append(recJournalFiles, jf)
+ }
+ }
+
+ var (
+ of storage.File // Obsolete file.
+ rec = &sessionRecord{}
+ )
+
+ // Recover journals.
+ if len(recJournalFiles) > 0 {
+ db.logf("journal@recovery F·%d", len(recJournalFiles))
+
+ // Mark file number as used.
+ db.s.markFileNum(recJournalFiles[len(recJournalFiles)-1].Num())
+
+ var (
+ // Options.
+ strict = db.s.o.GetStrict(opt.StrictJournal)
+ checksum = db.s.o.GetStrict(opt.StrictJournalChecksum)
+ writeBuffer = db.s.o.GetWriteBuffer()
+
+ jr *journal.Reader
+ mdb = memdb.New(db.s.icmp, writeBuffer)
+ buf = &util.Buffer{}
+ batch = &Batch{}
+ )
+
+ for _, jf := range recJournalFiles {
+ db.logf("journal@recovery recovering @%d", jf.Num())
+
+ fr, err := jf.Open()
+ if err != nil {
+ return err
+ }
+
+ // Create or reset journal reader instance.
+ if jr == nil {
+ jr = journal.NewReader(fr, dropper{db.s, jf}, strict, checksum)
+ } else {
+ jr.Reset(fr, dropper{db.s, jf}, strict, checksum)
+ }
+
+ // Flush memdb and remove obsolete journal file.
+ if of != nil {
+ if mdb.Len() > 0 {
+ if _, err := db.s.flushMemdb(rec, mdb, -1); err != nil {
+ fr.Close()
+ return err
+ }
+ }
+
+ rec.setJournalNum(jf.Num())
+ rec.setSeqNum(db.seq)
+ if err := db.s.commit(rec); err != nil {
+ fr.Close()
+ return err
+ }
+ rec.resetAddedTables()
+
+ of.Remove()
+ of = nil
+ }
+
+ // Replay journal to memdb.
+ mdb.Reset()
+ for {
+ r, err := jr.Next()
+ if err != nil {
+ if err == io.EOF {
+ break
+ }
+
+ fr.Close()
+ return errors.SetFile(err, jf)
+ }
+
+ buf.Reset()
+ if _, err := buf.ReadFrom(r); err != nil {
+ if err == io.ErrUnexpectedEOF {
+ // This is error returned due to corruption, with strict == false.
+ continue
+ }
+
+ fr.Close()
+ return errors.SetFile(err, jf)
+ }
+ if err := batch.memDecodeAndReplay(db.seq, buf.Bytes(), mdb); err != nil {
+ if !strict && errors.IsCorrupted(err) {
+ db.s.logf("journal error: %v (skipped)", err)
+ // We won't apply sequence number as it might be corrupted.
+ continue
+ }
+
+ fr.Close()
+ return errors.SetFile(err, jf)
+ }
+
+ // Save sequence number.
+ db.seq = batch.seq + uint64(batch.Len())
+
+ // Flush it if large enough.
+ if mdb.Size() >= writeBuffer {
+ if _, err := db.s.flushMemdb(rec, mdb, 0); err != nil {
+ fr.Close()
+ return err
+ }
+
+ mdb.Reset()
+ }
+ }
+
+ fr.Close()
+ of = jf
+ }
+
+ // Flush the last memdb.
+ if mdb.Len() > 0 {
+ if _, err := db.s.flushMemdb(rec, mdb, 0); err != nil {
+ return err
+ }
+ }
+ }
+
+ // Create a new journal.
+ if _, err := db.newMem(0); err != nil {
+ return err
+ }
+
+ // Commit.
+ rec.setJournalNum(db.journalFile.Num())
+ rec.setSeqNum(db.seq)
+ if err := db.s.commit(rec); err != nil {
+ // Close journal on error.
+ if db.journal != nil {
+ db.journal.Close()
+ db.journalWriter.Close()
+ }
+ return err
+ }
+
+ // Remove the last obsolete journal file.
+ if of != nil {
+ of.Remove()
+ }
+
+ return nil
+}
+
+func (db *DB) recoverJournalRO() error {
+ // Get all journals and sort it by file number.
+ allJournalFiles, err := db.s.getFiles(storage.TypeJournal)
+ if err != nil {
+ return err
+ }
+ files(allJournalFiles).sort()
+
+ // Journals that will be recovered.
+ var recJournalFiles []storage.File
+ for _, jf := range allJournalFiles {
+ if jf.Num() >= db.s.stJournalNum || jf.Num() == db.s.stPrevJournalNum {
+ recJournalFiles = append(recJournalFiles, jf)
+ }
+ }
+
+ var (
+ // Options.
+ strict = db.s.o.GetStrict(opt.StrictJournal)
+ checksum = db.s.o.GetStrict(opt.StrictJournalChecksum)
+ writeBuffer = db.s.o.GetWriteBuffer()
+
+ mdb = memdb.New(db.s.icmp, writeBuffer)
+ )
+
+ // Recover journals.
+ if len(recJournalFiles) > 0 {
+ db.logf("journal@recovery RO·Mode F·%d", len(recJournalFiles))
+
+ var (
+ jr *journal.Reader
+ buf = &util.Buffer{}
+ batch = &Batch{}
+ )
+
+ for _, jf := range recJournalFiles {
+ db.logf("journal@recovery recovering @%d", jf.Num())
+
+ fr, err := jf.Open()
+ if err != nil {
+ return err
+ }
+
+ // Create or reset journal reader instance.
+ if jr == nil {
+ jr = journal.NewReader(fr, dropper{db.s, jf}, strict, checksum)
+ } else {
+ jr.Reset(fr, dropper{db.s, jf}, strict, checksum)
+ }
+
+ // Replay journal to memdb.
+ for {
+ r, err := jr.Next()
+ if err != nil {
+ if err == io.EOF {
+ break
+ }
+
+ fr.Close()
+ return errors.SetFile(err, jf)
+ }
+
+ buf.Reset()
+ if _, err := buf.ReadFrom(r); err != nil {
+ if err == io.ErrUnexpectedEOF {
+ // This is error returned due to corruption, with strict == false.
+ continue
+ }
+
+ fr.Close()
+ return errors.SetFile(err, jf)
+ }
+ if err := batch.memDecodeAndReplay(db.seq, buf.Bytes(), mdb); err != nil {
+ if !strict && errors.IsCorrupted(err) {
+ db.s.logf("journal error: %v (skipped)", err)
+ // We won't apply sequence number as it might be corrupted.
+ continue
+ }
+
+ fr.Close()
+ return errors.SetFile(err, jf)
+ }
+
+ // Save sequence number.
+ db.seq = batch.seq + uint64(batch.Len())
+ }
+
+ fr.Close()
+ }
+ }
+
+ // Set memDB.
+ db.mem = &memDB{db: db, DB: mdb, ref: 1}
+
+ return nil
+}
+
+func (db *DB) get(key []byte, seq uint64, ro *opt.ReadOptions) (value []byte, err error) {
+ ikey := newIkey(key, seq, ktSeek)
+
+ em, fm := db.getMems()
+ for _, m := range [...]*memDB{em, fm} {
+ if m == nil {
+ continue
+ }
+ defer m.decref()
+
+ mk, mv, me := m.Find(ikey)
+ if me == nil {
+ ukey, _, kt, kerr := parseIkey(mk)
+ if kerr != nil {
+ // Shouldn't have had happen.
+ panic(kerr)
+ }
+ if db.s.icmp.uCompare(ukey, key) == 0 {
+ if kt == ktDel {
+ return nil, ErrNotFound
+ }
+ return append([]byte{}, mv...), nil
+ }
+ } else if me != ErrNotFound {
+ return nil, me
+ }
+ }
+
+ v := db.s.version()
+ value, cSched, err := v.get(ikey, ro, false)
+ v.release()
+ if cSched {
+ // Trigger table compaction.
+ db.compSendTrigger(db.tcompCmdC)
+ }
+ return
+}
+
+func (db *DB) has(key []byte, seq uint64, ro *opt.ReadOptions) (ret bool, err error) {
+ ikey := newIkey(key, seq, ktSeek)
+
+ em, fm := db.getMems()
+ for _, m := range [...]*memDB{em, fm} {
+ if m == nil {
+ continue
+ }
+ defer m.decref()
+
+ mk, _, me := m.Find(ikey)
+ if me == nil {
+ ukey, _, kt, kerr := parseIkey(mk)
+ if kerr != nil {
+ // Shouldn't have had happen.
+ panic(kerr)
+ }
+ if db.s.icmp.uCompare(ukey, key) == 0 {
+ if kt == ktDel {
+ return false, nil
+ }
+ return true, nil
+ }
+ } else if me != ErrNotFound {
+ return false, me
+ }
+ }
+
+ v := db.s.version()
+ _, cSched, err := v.get(ikey, ro, true)
+ v.release()
+ if cSched {
+ // Trigger table compaction.
+ db.compSendTrigger(db.tcompCmdC)
+ }
+ if err == nil {
+ ret = true
+ } else if err == ErrNotFound {
+ err = nil
+ }
+ return
+}
+
+// Get gets the value for the given key. It returns ErrNotFound if the
+// DB does not contains the key.
+//
+// The returned slice is its own copy, it is safe to modify the contents
+// of the returned slice.
+// It is safe to modify the contents of the argument after Get returns.
+func (db *DB) Get(key []byte, ro *opt.ReadOptions) (value []byte, err error) {
+ err = db.ok()
+ if err != nil {
+ return
+ }
+
+ se := db.acquireSnapshot()
+ defer db.releaseSnapshot(se)
+ return db.get(key, se.seq, ro)
+}
+
+// Has returns true if the DB does contains the given key.
+//
+// It is safe to modify the contents of the argument after Get returns.
+func (db *DB) Has(key []byte, ro *opt.ReadOptions) (ret bool, err error) {
+ err = db.ok()
+ if err != nil {
+ return
+ }
+
+ se := db.acquireSnapshot()
+ defer db.releaseSnapshot(se)
+ return db.has(key, se.seq, ro)
+}
+
+// NewIterator returns an iterator for the latest snapshot of the
+// uderlying DB.
+// The returned iterator is not goroutine-safe, but it is safe to use
+// multiple iterators concurrently, with each in a dedicated goroutine.
+// It is also safe to use an iterator concurrently with modifying its
+// underlying DB. The resultant key/value pairs are guaranteed to be
+// consistent.
+//
+// Slice allows slicing the iterator to only contains keys in the given
+// range. A nil Range.Start is treated as a key before all keys in the
+// DB. And a nil Range.Limit is treated as a key after all keys in
+// the DB.
+//
+// The iterator must be released after use, by calling Release method.
+//
+// Also read Iterator documentation of the leveldb/iterator package.
+func (db *DB) NewIterator(slice *util.Range, ro *opt.ReadOptions) iterator.Iterator {
+ if err := db.ok(); err != nil {
+ return iterator.NewEmptyIterator(err)
+ }
+
+ se := db.acquireSnapshot()
+ defer db.releaseSnapshot(se)
+ // Iterator holds 'version' lock, 'version' is immutable so snapshot
+ // can be released after iterator created.
+ return db.newIterator(se.seq, slice, ro)
+}
+
+// GetSnapshot returns a latest snapshot of the underlying DB. A snapshot
+// is a frozen snapshot of a DB state at a particular point in time. The
+// content of snapshot are guaranteed to be consistent.
+//
+// The snapshot must be released after use, by calling Release method.
+func (db *DB) GetSnapshot() (*Snapshot, error) {
+ if err := db.ok(); err != nil {
+ return nil, err
+ }
+
+ return db.newSnapshot(), nil
+}
+
+// GetProperty returns value of the given property name.
+//
+// Property names:
+// leveldb.num-files-at-level{n}
+// Returns the number of files at level 'n'.
+// leveldb.stats
+// Returns statistics of the underlying DB.
+// leveldb.sstables
+// Returns sstables list for each level.
+// leveldb.blockpool
+// Returns block pool stats.
+// leveldb.cachedblock
+// Returns size of cached block.
+// leveldb.openedtables
+// Returns number of opened tables.
+// leveldb.alivesnaps
+// Returns number of alive snapshots.
+// leveldb.aliveiters
+// Returns number of alive iterators.
+func (db *DB) GetProperty(name string) (value string, err error) {
+ err = db.ok()
+ if err != nil {
+ return
+ }
+
+ const prefix = "leveldb."
+ if !strings.HasPrefix(name, prefix) {
+ return "", ErrNotFound
+ }
+ p := name[len(prefix):]
+
+ v := db.s.version()
+ defer v.release()
+
+ numFilesPrefix := "num-files-at-level"
+ switch {
+ case strings.HasPrefix(p, numFilesPrefix):
+ var level uint
+ var rest string
+ n, _ := fmt.Sscanf(p[len(numFilesPrefix):], "%d%s", &level, &rest)
+ if n != 1 || int(level) >= db.s.o.GetNumLevel() {
+ err = ErrNotFound
+ } else {
+ value = fmt.Sprint(v.tLen(int(level)))
+ }
+ case p == "stats":
+ value = "Compactions\n" +
+ " Level | Tables | Size(MB) | Time(sec) | Read(MB) | Write(MB)\n" +
+ "-------+------------+---------------+---------------+---------------+---------------\n"
+ for level, tables := range v.tables {
+ duration, read, write := db.compStats[level].get()
+ if len(tables) == 0 && duration == 0 {
+ continue
+ }
+ value += fmt.Sprintf(" %3d | %10d | %13.5f | %13.5f | %13.5f | %13.5f\n",
+ level, len(tables), float64(tables.size())/1048576.0, duration.Seconds(),
+ float64(read)/1048576.0, float64(write)/1048576.0)
+ }
+ case p == "sstables":
+ for level, tables := range v.tables {
+ value += fmt.Sprintf("--- level %d ---\n", level)
+ for _, t := range tables {
+ value += fmt.Sprintf("%d:%d[%q .. %q]\n", t.file.Num(), t.size, t.imin, t.imax)
+ }
+ }
+ case p == "blockpool":
+ value = fmt.Sprintf("%v", db.s.tops.bpool)
+ case p == "cachedblock":
+ if db.s.tops.bcache != nil {
+ value = fmt.Sprintf("%d", db.s.tops.bcache.Size())
+ } else {
+ value = ""
+ }
+ case p == "openedtables":
+ value = fmt.Sprintf("%d", db.s.tops.cache.Size())
+ case p == "alivesnaps":
+ value = fmt.Sprintf("%d", atomic.LoadInt32(&db.aliveSnaps))
+ case p == "aliveiters":
+ value = fmt.Sprintf("%d", atomic.LoadInt32(&db.aliveIters))
+ default:
+ err = ErrNotFound
+ }
+
+ return
+}
+
+// SizeOf calculates approximate sizes of the given key ranges.
+// The length of the returned sizes are equal with the length of the given
+// ranges. The returned sizes measure storage space usage, so if the user
+// data compresses by a factor of ten, the returned sizes will be one-tenth
+// the size of the corresponding user data size.
+// The results may not include the sizes of recently written data.
+func (db *DB) SizeOf(ranges []util.Range) (Sizes, error) {
+ if err := db.ok(); err != nil {
+ return nil, err
+ }
+
+ v := db.s.version()
+ defer v.release()
+
+ sizes := make(Sizes, 0, len(ranges))
+ for _, r := range ranges {
+ imin := newIkey(r.Start, kMaxSeq, ktSeek)
+ imax := newIkey(r.Limit, kMaxSeq, ktSeek)
+ start, err := v.offsetOf(imin)
+ if err != nil {
+ return nil, err
+ }
+ limit, err := v.offsetOf(imax)
+ if err != nil {
+ return nil, err
+ }
+ var size uint64
+ if limit >= start {
+ size = limit - start
+ }
+ sizes = append(sizes, size)
+ }
+
+ return sizes, nil
+}
+
+// Close closes the DB. This will also releases any outstanding snapshot and
+// abort any in-flight compaction.
+//
+// It is not safe to close a DB until all outstanding iterators are released.
+// It is valid to call Close multiple times. Other methods should not be
+// called after the DB has been closed.
+func (db *DB) Close() error {
+ if !db.setClosed() {
+ return ErrClosed
+ }
+
+ start := time.Now()
+ db.log("db@close closing")
+
+ // Clear the finalizer.
+ runtime.SetFinalizer(db, nil)
+
+ // Get compaction error.
+ var err error
+ select {
+ case err = <-db.compErrC:
+ if err == ErrReadOnly {
+ err = nil
+ }
+ default:
+ }
+
+ // Signal all goroutines.
+ close(db.closeC)
+
+ // Wait for all gorotines to exit.
+ db.closeW.Wait()
+
+ // Lock writer and closes journal.
+ db.writeLockC <- struct{}{}
+ if db.journal != nil {
+ db.journal.Close()
+ db.journalWriter.Close()
+ }
+
+ if db.writeDelayN > 0 {
+ db.logf("db@write was delayed N·%d T·%v", db.writeDelayN, db.writeDelay)
+ }
+
+ // Close session.
+ db.s.close()
+ db.logf("db@close done T·%v", time.Since(start))
+ db.s.release()
+
+ if db.closer != nil {
+ if err1 := db.closer.Close(); err == nil {
+ err = err1
+ }
+ }
+
+ // NIL'ing pointers.
+ db.s = nil
+ db.mem = nil
+ db.frozenMem = nil
+ db.journal = nil
+ db.journalWriter = nil
+ db.journalFile = nil
+ db.frozenJournalFile = nil
+ db.closer = nil
+
+ return err
+}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/db_compaction.go b/vendor/github.com/syndtr/goleveldb/leveldb/db_compaction.go
new file mode 100644
index 0000000000..26003106ea
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/db_compaction.go
@@ -0,0 +1,791 @@
+// Copyright (c) 2012, Suryandaru Triandana
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package leveldb
+
+import (
+ "sync"
+ "time"
+
+ "github.com/syndtr/goleveldb/leveldb/errors"
+ "github.com/syndtr/goleveldb/leveldb/opt"
+)
+
+var (
+ errCompactionTransactExiting = errors.New("leveldb: compaction transact exiting")
+)
+
+type cStats struct {
+ sync.Mutex
+ duration time.Duration
+ read uint64
+ write uint64
+}
+
+func (p *cStats) add(n *cStatsStaging) {
+ p.Lock()
+ p.duration += n.duration
+ p.read += n.read
+ p.write += n.write
+ p.Unlock()
+}
+
+func (p *cStats) get() (duration time.Duration, read, write uint64) {
+ p.Lock()
+ defer p.Unlock()
+ return p.duration, p.read, p.write
+}
+
+type cStatsStaging struct {
+ start time.Time
+ duration time.Duration
+ on bool
+ read uint64
+ write uint64
+}
+
+func (p *cStatsStaging) startTimer() {
+ if !p.on {
+ p.start = time.Now()
+ p.on = true
+ }
+}
+
+func (p *cStatsStaging) stopTimer() {
+ if p.on {
+ p.duration += time.Since(p.start)
+ p.on = false
+ }
+}
+
+func (db *DB) compactionError() {
+ var err error
+noerr:
+ // No error.
+ for {
+ select {
+ case err = <-db.compErrSetC:
+ switch {
+ case err == nil:
+ case err == ErrReadOnly, errors.IsCorrupted(err):
+ goto hasperr
+ default:
+ goto haserr
+ }
+ case _, _ = <-db.closeC:
+ return
+ }
+ }
+haserr:
+ // Transient error.
+ for {
+ select {
+ case db.compErrC <- err:
+ case err = <-db.compErrSetC:
+ switch {
+ case err == nil:
+ goto noerr
+ case err == ErrReadOnly, errors.IsCorrupted(err):
+ goto hasperr
+ default:
+ }
+ case _, _ = <-db.closeC:
+ return
+ }
+ }
+hasperr:
+ // Persistent error.
+ for {
+ select {
+ case db.compErrC <- err:
+ case db.compPerErrC <- err:
+ case db.writeLockC <- struct{}{}:
+ // Hold write lock, so that write won't pass-through.
+ db.compWriteLocking = true
+ case _, _ = <-db.closeC:
+ if db.compWriteLocking {
+ // We should release the lock or Close will hang.
+ <-db.writeLockC
+ }
+ return
+ }
+ }
+}
+
+type compactionTransactCounter int
+
+func (cnt *compactionTransactCounter) incr() {
+ *cnt++
+}
+
+type compactionTransactInterface interface {
+ run(cnt *compactionTransactCounter) error
+ revert() error
+}
+
+func (db *DB) compactionTransact(name string, t compactionTransactInterface) {
+ defer func() {
+ if x := recover(); x != nil {
+ if x == errCompactionTransactExiting {
+ if err := t.revert(); err != nil {
+ db.logf("%s revert error %q", name, err)
+ }
+ }
+ panic(x)
+ }
+ }()
+
+ const (
+ backoffMin = 1 * time.Second
+ backoffMax = 8 * time.Second
+ backoffMul = 2 * time.Second
+ )
+ var (
+ backoff = backoffMin
+ backoffT = time.NewTimer(backoff)
+ lastCnt = compactionTransactCounter(0)
+
+ disableBackoff = db.s.o.GetDisableCompactionBackoff()
+ )
+ for n := 0; ; n++ {
+ // Check wether the DB is closed.
+ if db.isClosed() {
+ db.logf("%s exiting", name)
+ db.compactionExitTransact()
+ } else if n > 0 {
+ db.logf("%s retrying N·%d", name, n)
+ }
+
+ // Execute.
+ cnt := compactionTransactCounter(0)
+ err := t.run(&cnt)
+ if err != nil {
+ db.logf("%s error I·%d %q", name, cnt, err)
+ }
+
+ // Set compaction error status.
+ select {
+ case db.compErrSetC <- err:
+ case perr := <-db.compPerErrC:
+ if err != nil {
+ db.logf("%s exiting (persistent error %q)", name, perr)
+ db.compactionExitTransact()
+ }
+ case _, _ = <-db.closeC:
+ db.logf("%s exiting", name)
+ db.compactionExitTransact()
+ }
+ if err == nil {
+ return
+ }
+ if errors.IsCorrupted(err) {
+ db.logf("%s exiting (corruption detected)", name)
+ db.compactionExitTransact()
+ }
+
+ if !disableBackoff {
+ // Reset backoff duration if counter is advancing.
+ if cnt > lastCnt {
+ backoff = backoffMin
+ lastCnt = cnt
+ }
+
+ // Backoff.
+ backoffT.Reset(backoff)
+ if backoff < backoffMax {
+ backoff *= backoffMul
+ if backoff > backoffMax {
+ backoff = backoffMax
+ }
+ }
+ select {
+ case <-backoffT.C:
+ case _, _ = <-db.closeC:
+ db.logf("%s exiting", name)
+ db.compactionExitTransact()
+ }
+ }
+ }
+}
+
+type compactionTransactFunc struct {
+ runFunc func(cnt *compactionTransactCounter) error
+ revertFunc func() error
+}
+
+func (t *compactionTransactFunc) run(cnt *compactionTransactCounter) error {
+ return t.runFunc(cnt)
+}
+
+func (t *compactionTransactFunc) revert() error {
+ if t.revertFunc != nil {
+ return t.revertFunc()
+ }
+ return nil
+}
+
+func (db *DB) compactionTransactFunc(name string, run func(cnt *compactionTransactCounter) error, revert func() error) {
+ db.compactionTransact(name, &compactionTransactFunc{run, revert})
+}
+
+func (db *DB) compactionExitTransact() {
+ panic(errCompactionTransactExiting)
+}
+
+func (db *DB) memCompaction() {
+ mdb := db.getFrozenMem()
+ if mdb == nil {
+ return
+ }
+ defer mdb.decref()
+
+ db.logf("memdb@flush N·%d S·%s", mdb.Len(), shortenb(mdb.Size()))
+
+ // Don't compact empty memdb.
+ if mdb.Len() == 0 {
+ db.logf("memdb@flush skipping")
+ // drop frozen memdb
+ db.dropFrozenMem()
+ return
+ }
+
+ // Pause table compaction.
+ resumeC := make(chan struct{})
+ select {
+ case db.tcompPauseC <- (chan<- struct{})(resumeC):
+ case <-db.compPerErrC:
+ close(resumeC)
+ resumeC = nil
+ case _, _ = <-db.closeC:
+ return
+ }
+
+ var (
+ rec = &sessionRecord{}
+ stats = &cStatsStaging{}
+ flushLevel int
+ )
+
+ db.compactionTransactFunc("memdb@flush", func(cnt *compactionTransactCounter) (err error) {
+ stats.startTimer()
+ flushLevel, err = db.s.flushMemdb(rec, mdb.DB, -1)
+ stats.stopTimer()
+ return
+ }, func() error {
+ for _, r := range rec.addedTables {
+ db.logf("memdb@flush revert @%d", r.num)
+ f := db.s.getTableFile(r.num)
+ if err := f.Remove(); err != nil {
+ return err
+ }
+ }
+ return nil
+ })
+
+ db.compactionTransactFunc("memdb@commit", func(cnt *compactionTransactCounter) (err error) {
+ stats.startTimer()
+ rec.setJournalNum(db.journalFile.Num())
+ rec.setSeqNum(db.frozenSeq)
+ err = db.s.commit(rec)
+ stats.stopTimer()
+ return
+ }, nil)
+
+ db.logf("memdb@flush committed F·%d T·%v", len(rec.addedTables), stats.duration)
+
+ for _, r := range rec.addedTables {
+ stats.write += r.size
+ }
+ db.compStats[flushLevel].add(stats)
+
+ // Drop frozen memdb.
+ db.dropFrozenMem()
+
+ // Resume table compaction.
+ if resumeC != nil {
+ select {
+ case <-resumeC:
+ close(resumeC)
+ case _, _ = <-db.closeC:
+ return
+ }
+ }
+
+ // Trigger table compaction.
+ db.compSendTrigger(db.tcompCmdC)
+}
+
+type tableCompactionBuilder struct {
+ db *DB
+ s *session
+ c *compaction
+ rec *sessionRecord
+ stat0, stat1 *cStatsStaging
+
+ snapHasLastUkey bool
+ snapLastUkey []byte
+ snapLastSeq uint64
+ snapIter int
+ snapKerrCnt int
+ snapDropCnt int
+
+ kerrCnt int
+ dropCnt int
+
+ minSeq uint64
+ strict bool
+ tableSize int
+
+ tw *tWriter
+}
+
+func (b *tableCompactionBuilder) appendKV(key, value []byte) error {
+ // Create new table if not already.
+ if b.tw == nil {
+ // Check for pause event.
+ if b.db != nil {
+ select {
+ case ch := <-b.db.tcompPauseC:
+ b.db.pauseCompaction(ch)
+ case _, _ = <-b.db.closeC:
+ b.db.compactionExitTransact()
+ default:
+ }
+ }
+
+ // Create new table.
+ var err error
+ b.tw, err = b.s.tops.create()
+ if err != nil {
+ return err
+ }
+ }
+
+ // Write key/value into table.
+ return b.tw.append(key, value)
+}
+
+func (b *tableCompactionBuilder) needFlush() bool {
+ return b.tw.tw.BytesLen() >= b.tableSize
+}
+
+func (b *tableCompactionBuilder) flush() error {
+ t, err := b.tw.finish()
+ if err != nil {
+ return err
+ }
+ b.rec.addTableFile(b.c.level+1, t)
+ b.stat1.write += t.size
+ b.s.logf("table@build created L%d@%d N·%d S·%s %q:%q", b.c.level+1, t.file.Num(), b.tw.tw.EntriesLen(), shortenb(int(t.size)), t.imin, t.imax)
+ b.tw = nil
+ return nil
+}
+
+func (b *tableCompactionBuilder) cleanup() {
+ if b.tw != nil {
+ b.tw.drop()
+ b.tw = nil
+ }
+}
+
+func (b *tableCompactionBuilder) run(cnt *compactionTransactCounter) error {
+ snapResumed := b.snapIter > 0
+ hasLastUkey := b.snapHasLastUkey // The key might has zero length, so this is necessary.
+ lastUkey := append([]byte{}, b.snapLastUkey...)
+ lastSeq := b.snapLastSeq
+ b.kerrCnt = b.snapKerrCnt
+ b.dropCnt = b.snapDropCnt
+ // Restore compaction state.
+ b.c.restore()
+
+ defer b.cleanup()
+
+ b.stat1.startTimer()
+ defer b.stat1.stopTimer()
+
+ iter := b.c.newIterator()
+ defer iter.Release()
+ for i := 0; iter.Next(); i++ {
+ // Incr transact counter.
+ cnt.incr()
+
+ // Skip until last state.
+ if i < b.snapIter {
+ continue
+ }
+
+ resumed := false
+ if snapResumed {
+ resumed = true
+ snapResumed = false
+ }
+
+ ikey := iter.Key()
+ ukey, seq, kt, kerr := parseIkey(ikey)
+
+ if kerr == nil {
+ shouldStop := !resumed && b.c.shouldStopBefore(ikey)
+
+ if !hasLastUkey || b.s.icmp.uCompare(lastUkey, ukey) != 0 {
+ // First occurrence of this user key.
+
+ // Only rotate tables if ukey doesn't hop across.
+ if b.tw != nil && (shouldStop || b.needFlush()) {
+ if err := b.flush(); err != nil {
+ return err
+ }
+
+ // Creates snapshot of the state.
+ b.c.save()
+ b.snapHasLastUkey = hasLastUkey
+ b.snapLastUkey = append(b.snapLastUkey[:0], lastUkey...)
+ b.snapLastSeq = lastSeq
+ b.snapIter = i
+ b.snapKerrCnt = b.kerrCnt
+ b.snapDropCnt = b.dropCnt
+ }
+
+ hasLastUkey = true
+ lastUkey = append(lastUkey[:0], ukey...)
+ lastSeq = kMaxSeq
+ }
+
+ switch {
+ case lastSeq <= b.minSeq:
+ // Dropped because newer entry for same user key exist
+ fallthrough // (A)
+ case kt == ktDel && seq <= b.minSeq && b.c.baseLevelForKey(lastUkey):
+ // For this user key:
+ // (1) there is no data in higher levels
+ // (2) data in lower levels will have larger seq numbers
+ // (3) data in layers that are being compacted here and have
+ // smaller seq numbers will be dropped in the next
+ // few iterations of this loop (by rule (A) above).
+ // Therefore this deletion marker is obsolete and can be dropped.
+ lastSeq = seq
+ b.dropCnt++
+ continue
+ default:
+ lastSeq = seq
+ }
+ } else {
+ if b.strict {
+ return kerr
+ }
+
+ // Don't drop corrupted keys.
+ hasLastUkey = false
+ lastUkey = lastUkey[:0]
+ lastSeq = kMaxSeq
+ b.kerrCnt++
+ }
+
+ if err := b.appendKV(ikey, iter.Value()); err != nil {
+ return err
+ }
+ }
+
+ if err := iter.Error(); err != nil {
+ return err
+ }
+
+ // Finish last table.
+ if b.tw != nil && !b.tw.empty() {
+ return b.flush()
+ }
+ return nil
+}
+
+func (b *tableCompactionBuilder) revert() error {
+ for _, at := range b.rec.addedTables {
+ b.s.logf("table@build revert @%d", at.num)
+ f := b.s.getTableFile(at.num)
+ if err := f.Remove(); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (db *DB) tableCompaction(c *compaction, noTrivial bool) {
+ defer c.release()
+
+ rec := &sessionRecord{}
+ rec.addCompPtr(c.level, c.imax)
+
+ if !noTrivial && c.trivial() {
+ t := c.tables[0][0]
+ db.logf("table@move L%d@%d -> L%d", c.level, t.file.Num(), c.level+1)
+ rec.delTable(c.level, t.file.Num())
+ rec.addTableFile(c.level+1, t)
+ db.compactionTransactFunc("table@move", func(cnt *compactionTransactCounter) (err error) {
+ return db.s.commit(rec)
+ }, nil)
+ return
+ }
+
+ var stats [2]cStatsStaging
+ for i, tables := range c.tables {
+ for _, t := range tables {
+ stats[i].read += t.size
+ // Insert deleted tables into record
+ rec.delTable(c.level+i, t.file.Num())
+ }
+ }
+ sourceSize := int(stats[0].read + stats[1].read)
+ minSeq := db.minSeq()
+ db.logf("table@compaction L%d·%d -> L%d·%d S·%s Q·%d", c.level, len(c.tables[0]), c.level+1, len(c.tables[1]), shortenb(sourceSize), minSeq)
+
+ b := &tableCompactionBuilder{
+ db: db,
+ s: db.s,
+ c: c,
+ rec: rec,
+ stat1: &stats[1],
+ minSeq: minSeq,
+ strict: db.s.o.GetStrict(opt.StrictCompaction),
+ tableSize: db.s.o.GetCompactionTableSize(c.level + 1),
+ }
+ db.compactionTransact("table@build", b)
+
+ // Commit changes
+ db.compactionTransactFunc("table@commit", func(cnt *compactionTransactCounter) (err error) {
+ stats[1].startTimer()
+ defer stats[1].stopTimer()
+ return db.s.commit(rec)
+ }, nil)
+
+ resultSize := int(stats[1].write)
+ db.logf("table@compaction committed F%s S%s Ke·%d D·%d T·%v", sint(len(rec.addedTables)-len(rec.deletedTables)), sshortenb(resultSize-sourceSize), b.kerrCnt, b.dropCnt, stats[1].duration)
+
+ // Save compaction stats
+ for i := range stats {
+ db.compStats[c.level+1].add(&stats[i])
+ }
+}
+
+func (db *DB) tableRangeCompaction(level int, umin, umax []byte) {
+ db.logf("table@compaction range L%d %q:%q", level, umin, umax)
+
+ if level >= 0 {
+ if c := db.s.getCompactionRange(level, umin, umax); c != nil {
+ db.tableCompaction(c, true)
+ }
+ } else {
+ v := db.s.version()
+ m := 1
+ for i, t := range v.tables[1:] {
+ if t.overlaps(db.s.icmp, umin, umax, false) {
+ m = i + 1
+ }
+ }
+ v.release()
+
+ for level := 0; level < m; level++ {
+ if c := db.s.getCompactionRange(level, umin, umax); c != nil {
+ db.tableCompaction(c, true)
+ }
+ }
+ }
+}
+
+func (db *DB) tableAutoCompaction() {
+ if c := db.s.pickCompaction(); c != nil {
+ db.tableCompaction(c, false)
+ }
+}
+
+func (db *DB) tableNeedCompaction() bool {
+ v := db.s.version()
+ defer v.release()
+ return v.needCompaction()
+}
+
+func (db *DB) pauseCompaction(ch chan<- struct{}) {
+ select {
+ case ch <- struct{}{}:
+ case _, _ = <-db.closeC:
+ db.compactionExitTransact()
+ }
+}
+
+type cCmd interface {
+ ack(err error)
+}
+
+type cIdle struct {
+ ackC chan<- error
+}
+
+func (r cIdle) ack(err error) {
+ if r.ackC != nil {
+ defer func() {
+ recover()
+ }()
+ r.ackC <- err
+ }
+}
+
+type cRange struct {
+ level int
+ min, max []byte
+ ackC chan<- error
+}
+
+func (r cRange) ack(err error) {
+ if r.ackC != nil {
+ defer func() {
+ recover()
+ }()
+ r.ackC <- err
+ }
+}
+
+// This will trigger auto compation and/or wait for all compaction to be done.
+func (db *DB) compSendIdle(compC chan<- cCmd) (err error) {
+ ch := make(chan error)
+ defer close(ch)
+ // Send cmd.
+ select {
+ case compC <- cIdle{ch}:
+ case err = <-db.compErrC:
+ return
+ case _, _ = <-db.closeC:
+ return ErrClosed
+ }
+ // Wait cmd.
+ select {
+ case err = <-ch:
+ case err = <-db.compErrC:
+ case _, _ = <-db.closeC:
+ return ErrClosed
+ }
+ return err
+}
+
+// This will trigger auto compaction but will not wait for it.
+func (db *DB) compSendTrigger(compC chan<- cCmd) {
+ select {
+ case compC <- cIdle{}:
+ default:
+ }
+}
+
+// Send range compaction request.
+func (db *DB) compSendRange(compC chan<- cCmd, level int, min, max []byte) (err error) {
+ ch := make(chan error)
+ defer close(ch)
+ // Send cmd.
+ select {
+ case compC <- cRange{level, min, max, ch}:
+ case err := <-db.compErrC:
+ return err
+ case _, _ = <-db.closeC:
+ return ErrClosed
+ }
+ // Wait cmd.
+ select {
+ case err = <-ch:
+ case err = <-db.compErrC:
+ case _, _ = <-db.closeC:
+ return ErrClosed
+ }
+ return err
+}
+
+func (db *DB) mCompaction() {
+ var x cCmd
+
+ defer func() {
+ if x := recover(); x != nil {
+ if x != errCompactionTransactExiting {
+ panic(x)
+ }
+ }
+ if x != nil {
+ x.ack(ErrClosed)
+ }
+ db.closeW.Done()
+ }()
+
+ for {
+ select {
+ case x = <-db.mcompCmdC:
+ switch x.(type) {
+ case cIdle:
+ db.memCompaction()
+ x.ack(nil)
+ x = nil
+ default:
+ panic("leveldb: unknown command")
+ }
+ case _, _ = <-db.closeC:
+ return
+ }
+ }
+}
+
+func (db *DB) tCompaction() {
+ var x cCmd
+ var ackQ []cCmd
+
+ defer func() {
+ if x := recover(); x != nil {
+ if x != errCompactionTransactExiting {
+ panic(x)
+ }
+ }
+ for i := range ackQ {
+ ackQ[i].ack(ErrClosed)
+ ackQ[i] = nil
+ }
+ if x != nil {
+ x.ack(ErrClosed)
+ }
+ db.closeW.Done()
+ }()
+
+ for {
+ if db.tableNeedCompaction() {
+ select {
+ case x = <-db.tcompCmdC:
+ case ch := <-db.tcompPauseC:
+ db.pauseCompaction(ch)
+ continue
+ case _, _ = <-db.closeC:
+ return
+ default:
+ }
+ } else {
+ for i := range ackQ {
+ ackQ[i].ack(nil)
+ ackQ[i] = nil
+ }
+ ackQ = ackQ[:0]
+ select {
+ case x = <-db.tcompCmdC:
+ case ch := <-db.tcompPauseC:
+ db.pauseCompaction(ch)
+ continue
+ case _, _ = <-db.closeC:
+ return
+ }
+ }
+ if x != nil {
+ switch cmd := x.(type) {
+ case cIdle:
+ ackQ = append(ackQ, x)
+ case cRange:
+ db.tableRangeCompaction(cmd.level, cmd.min, cmd.max)
+ x.ack(nil)
+ default:
+ panic("leveldb: unknown command")
+ }
+ x = nil
+ }
+ db.tableAutoCompaction()
+ }
+}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/db_iter.go b/vendor/github.com/syndtr/goleveldb/leveldb/db_iter.go
new file mode 100644
index 0000000000..656ae98567
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/db_iter.go
@@ -0,0 +1,350 @@
+// Copyright (c) 2012, Suryandaru Triandana
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package leveldb
+
+import (
+ "errors"
+ "math/rand"
+ "runtime"
+ "sync"
+ "sync/atomic"
+
+ "github.com/syndtr/goleveldb/leveldb/iterator"
+ "github.com/syndtr/goleveldb/leveldb/opt"
+ "github.com/syndtr/goleveldb/leveldb/util"
+)
+
+var (
+ errInvalidIkey = errors.New("leveldb: Iterator: invalid internal key")
+)
+
+type memdbReleaser struct {
+ once sync.Once
+ m *memDB
+}
+
+func (mr *memdbReleaser) Release() {
+ mr.once.Do(func() {
+ mr.m.decref()
+ })
+}
+
+func (db *DB) newRawIterator(slice *util.Range, ro *opt.ReadOptions) iterator.Iterator {
+ em, fm := db.getMems()
+ v := db.s.version()
+
+ ti := v.getIterators(slice, ro)
+ n := len(ti) + 2
+ i := make([]iterator.Iterator, 0, n)
+ emi := em.NewIterator(slice)
+ emi.SetReleaser(&memdbReleaser{m: em})
+ i = append(i, emi)
+ if fm != nil {
+ fmi := fm.NewIterator(slice)
+ fmi.SetReleaser(&memdbReleaser{m: fm})
+ i = append(i, fmi)
+ }
+ i = append(i, ti...)
+ strict := opt.GetStrict(db.s.o.Options, ro, opt.StrictReader)
+ mi := iterator.NewMergedIterator(i, db.s.icmp, strict)
+ mi.SetReleaser(&versionReleaser{v: v})
+ return mi
+}
+
+func (db *DB) newIterator(seq uint64, slice *util.Range, ro *opt.ReadOptions) *dbIter {
+ var islice *util.Range
+ if slice != nil {
+ islice = &util.Range{}
+ if slice.Start != nil {
+ islice.Start = newIkey(slice.Start, kMaxSeq, ktSeek)
+ }
+ if slice.Limit != nil {
+ islice.Limit = newIkey(slice.Limit, kMaxSeq, ktSeek)
+ }
+ }
+ rawIter := db.newRawIterator(islice, ro)
+ iter := &dbIter{
+ db: db,
+ icmp: db.s.icmp,
+ iter: rawIter,
+ seq: seq,
+ strict: opt.GetStrict(db.s.o.Options, ro, opt.StrictReader),
+ key: make([]byte, 0),
+ value: make([]byte, 0),
+ }
+ atomic.AddInt32(&db.aliveIters, 1)
+ runtime.SetFinalizer(iter, (*dbIter).Release)
+ return iter
+}
+
+func (db *DB) iterSamplingRate() int {
+ return rand.Intn(2 * db.s.o.GetIteratorSamplingRate())
+}
+
+type dir int
+
+const (
+ dirReleased dir = iota - 1
+ dirSOI
+ dirEOI
+ dirBackward
+ dirForward
+)
+
+// dbIter represent an interator states over a database session.
+type dbIter struct {
+ db *DB
+ icmp *iComparer
+ iter iterator.Iterator
+ seq uint64
+ strict bool
+
+ smaplingGap int
+ dir dir
+ key []byte
+ value []byte
+ err error
+ releaser util.Releaser
+}
+
+func (i *dbIter) sampleSeek() {
+ ikey := i.iter.Key()
+ i.smaplingGap -= len(ikey) + len(i.iter.Value())
+ for i.smaplingGap < 0 {
+ i.smaplingGap += i.db.iterSamplingRate()
+ i.db.sampleSeek(ikey)
+ }
+}
+
+func (i *dbIter) setErr(err error) {
+ i.err = err
+ i.key = nil
+ i.value = nil
+}
+
+func (i *dbIter) iterErr() {
+ if err := i.iter.Error(); err != nil {
+ i.setErr(err)
+ }
+}
+
+func (i *dbIter) Valid() bool {
+ return i.err == nil && i.dir > dirEOI
+}
+
+func (i *dbIter) First() bool {
+ if i.err != nil {
+ return false
+ } else if i.dir == dirReleased {
+ i.err = ErrIterReleased
+ return false
+ }
+
+ if i.iter.First() {
+ i.dir = dirSOI
+ return i.next()
+ }
+ i.dir = dirEOI
+ i.iterErr()
+ return false
+}
+
+func (i *dbIter) Last() bool {
+ if i.err != nil {
+ return false
+ } else if i.dir == dirReleased {
+ i.err = ErrIterReleased
+ return false
+ }
+
+ if i.iter.Last() {
+ return i.prev()
+ }
+ i.dir = dirSOI
+ i.iterErr()
+ return false
+}
+
+func (i *dbIter) Seek(key []byte) bool {
+ if i.err != nil {
+ return false
+ } else if i.dir == dirReleased {
+ i.err = ErrIterReleased
+ return false
+ }
+
+ ikey := newIkey(key, i.seq, ktSeek)
+ if i.iter.Seek(ikey) {
+ i.dir = dirSOI
+ return i.next()
+ }
+ i.dir = dirEOI
+ i.iterErr()
+ return false
+}
+
+func (i *dbIter) next() bool {
+ for {
+ if ukey, seq, kt, kerr := parseIkey(i.iter.Key()); kerr == nil {
+ i.sampleSeek()
+ if seq <= i.seq {
+ switch kt {
+ case ktDel:
+ // Skip deleted key.
+ i.key = append(i.key[:0], ukey...)
+ i.dir = dirForward
+ case ktVal:
+ if i.dir == dirSOI || i.icmp.uCompare(ukey, i.key) > 0 {
+ i.key = append(i.key[:0], ukey...)
+ i.value = append(i.value[:0], i.iter.Value()...)
+ i.dir = dirForward
+ return true
+ }
+ }
+ }
+ } else if i.strict {
+ i.setErr(kerr)
+ break
+ }
+ if !i.iter.Next() {
+ i.dir = dirEOI
+ i.iterErr()
+ break
+ }
+ }
+ return false
+}
+
+func (i *dbIter) Next() bool {
+ if i.dir == dirEOI || i.err != nil {
+ return false
+ } else if i.dir == dirReleased {
+ i.err = ErrIterReleased
+ return false
+ }
+
+ if !i.iter.Next() || (i.dir == dirBackward && !i.iter.Next()) {
+ i.dir = dirEOI
+ i.iterErr()
+ return false
+ }
+ return i.next()
+}
+
+func (i *dbIter) prev() bool {
+ i.dir = dirBackward
+ del := true
+ if i.iter.Valid() {
+ for {
+ if ukey, seq, kt, kerr := parseIkey(i.iter.Key()); kerr == nil {
+ i.sampleSeek()
+ if seq <= i.seq {
+ if !del && i.icmp.uCompare(ukey, i.key) < 0 {
+ return true
+ }
+ del = (kt == ktDel)
+ if !del {
+ i.key = append(i.key[:0], ukey...)
+ i.value = append(i.value[:0], i.iter.Value()...)
+ }
+ }
+ } else if i.strict {
+ i.setErr(kerr)
+ return false
+ }
+ if !i.iter.Prev() {
+ break
+ }
+ }
+ }
+ if del {
+ i.dir = dirSOI
+ i.iterErr()
+ return false
+ }
+ return true
+}
+
+func (i *dbIter) Prev() bool {
+ if i.dir == dirSOI || i.err != nil {
+ return false
+ } else if i.dir == dirReleased {
+ i.err = ErrIterReleased
+ return false
+ }
+
+ switch i.dir {
+ case dirEOI:
+ return i.Last()
+ case dirForward:
+ for i.iter.Prev() {
+ if ukey, _, _, kerr := parseIkey(i.iter.Key()); kerr == nil {
+ i.sampleSeek()
+ if i.icmp.uCompare(ukey, i.key) < 0 {
+ goto cont
+ }
+ } else if i.strict {
+ i.setErr(kerr)
+ return false
+ }
+ }
+ i.dir = dirSOI
+ i.iterErr()
+ return false
+ }
+
+cont:
+ return i.prev()
+}
+
+func (i *dbIter) Key() []byte {
+ if i.err != nil || i.dir <= dirEOI {
+ return nil
+ }
+ return i.key
+}
+
+func (i *dbIter) Value() []byte {
+ if i.err != nil || i.dir <= dirEOI {
+ return nil
+ }
+ return i.value
+}
+
+func (i *dbIter) Release() {
+ if i.dir != dirReleased {
+ // Clear the finalizer.
+ runtime.SetFinalizer(i, nil)
+
+ if i.releaser != nil {
+ i.releaser.Release()
+ i.releaser = nil
+ }
+
+ i.dir = dirReleased
+ i.key = nil
+ i.value = nil
+ i.iter.Release()
+ i.iter = nil
+ atomic.AddInt32(&i.db.aliveIters, -1)
+ i.db = nil
+ }
+}
+
+func (i *dbIter) SetReleaser(releaser util.Releaser) {
+ if i.dir == dirReleased {
+ panic(util.ErrReleased)
+ }
+ if i.releaser != nil && releaser != nil {
+ panic(util.ErrHasReleaser)
+ }
+ i.releaser = releaser
+}
+
+func (i *dbIter) Error() error {
+ return i.err
+}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/db_snapshot.go b/vendor/github.com/syndtr/goleveldb/leveldb/db_snapshot.go
new file mode 100644
index 0000000000..0372848ff1
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/db_snapshot.go
@@ -0,0 +1,183 @@
+// Copyright (c) 2012, Suryandaru Triandana
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package leveldb
+
+import (
+ "container/list"
+ "fmt"
+ "runtime"
+ "sync"
+ "sync/atomic"
+
+ "github.com/syndtr/goleveldb/leveldb/iterator"
+ "github.com/syndtr/goleveldb/leveldb/opt"
+ "github.com/syndtr/goleveldb/leveldb/util"
+)
+
+type snapshotElement struct {
+ seq uint64
+ ref int
+ e *list.Element
+}
+
+// Acquires a snapshot, based on latest sequence.
+func (db *DB) acquireSnapshot() *snapshotElement {
+ db.snapsMu.Lock()
+ defer db.snapsMu.Unlock()
+
+ seq := db.getSeq()
+
+ if e := db.snapsList.Back(); e != nil {
+ se := e.Value.(*snapshotElement)
+ if se.seq == seq {
+ se.ref++
+ return se
+ } else if seq < se.seq {
+ panic("leveldb: sequence number is not increasing")
+ }
+ }
+ se := &snapshotElement{seq: seq, ref: 1}
+ se.e = db.snapsList.PushBack(se)
+ return se
+}
+
+// Releases given snapshot element.
+func (db *DB) releaseSnapshot(se *snapshotElement) {
+ db.snapsMu.Lock()
+ defer db.snapsMu.Unlock()
+
+ se.ref--
+ if se.ref == 0 {
+ db.snapsList.Remove(se.e)
+ se.e = nil
+ } else if se.ref < 0 {
+ panic("leveldb: Snapshot: negative element reference")
+ }
+}
+
+// Gets minimum sequence that not being snapshoted.
+func (db *DB) minSeq() uint64 {
+ db.snapsMu.Lock()
+ defer db.snapsMu.Unlock()
+
+ if e := db.snapsList.Front(); e != nil {
+ return e.Value.(*snapshotElement).seq
+ }
+
+ return db.getSeq()
+}
+
+// Snapshot is a DB snapshot.
+type Snapshot struct {
+ db *DB
+ elem *snapshotElement
+ mu sync.RWMutex
+ released bool
+}
+
+// Creates new snapshot object.
+func (db *DB) newSnapshot() *Snapshot {
+ snap := &Snapshot{
+ db: db,
+ elem: db.acquireSnapshot(),
+ }
+ atomic.AddInt32(&db.aliveSnaps, 1)
+ runtime.SetFinalizer(snap, (*Snapshot).Release)
+ return snap
+}
+
+func (snap *Snapshot) String() string {
+ return fmt.Sprintf("leveldb.Snapshot{%d}", snap.elem.seq)
+}
+
+// Get gets the value for the given key. It returns ErrNotFound if
+// the DB does not contains the key.
+//
+// The caller should not modify the contents of the returned slice, but
+// it is safe to modify the contents of the argument after Get returns.
+func (snap *Snapshot) Get(key []byte, ro *opt.ReadOptions) (value []byte, err error) {
+ err = snap.db.ok()
+ if err != nil {
+ return
+ }
+ snap.mu.RLock()
+ defer snap.mu.RUnlock()
+ if snap.released {
+ err = ErrSnapshotReleased
+ return
+ }
+ return snap.db.get(key, snap.elem.seq, ro)
+}
+
+// Has returns true if the DB does contains the given key.
+//
+// It is safe to modify the contents of the argument after Get returns.
+func (snap *Snapshot) Has(key []byte, ro *opt.ReadOptions) (ret bool, err error) {
+ err = snap.db.ok()
+ if err != nil {
+ return
+ }
+ snap.mu.RLock()
+ defer snap.mu.RUnlock()
+ if snap.released {
+ err = ErrSnapshotReleased
+ return
+ }
+ return snap.db.has(key, snap.elem.seq, ro)
+}
+
+// NewIterator returns an iterator for the snapshot of the uderlying DB.
+// The returned iterator is not goroutine-safe, but it is safe to use
+// multiple iterators concurrently, with each in a dedicated goroutine.
+// It is also safe to use an iterator concurrently with modifying its
+// underlying DB. The resultant key/value pairs are guaranteed to be
+// consistent.
+//
+// Slice allows slicing the iterator to only contains keys in the given
+// range. A nil Range.Start is treated as a key before all keys in the
+// DB. And a nil Range.Limit is treated as a key after all keys in
+// the DB.
+//
+// The iterator must be released after use, by calling Release method.
+// Releasing the snapshot doesn't mean releasing the iterator too, the
+// iterator would be still valid until released.
+//
+// Also read Iterator documentation of the leveldb/iterator package.
+func (snap *Snapshot) NewIterator(slice *util.Range, ro *opt.ReadOptions) iterator.Iterator {
+ if err := snap.db.ok(); err != nil {
+ return iterator.NewEmptyIterator(err)
+ }
+ snap.mu.Lock()
+ defer snap.mu.Unlock()
+ if snap.released {
+ return iterator.NewEmptyIterator(ErrSnapshotReleased)
+ }
+ // Since iterator already hold version ref, it doesn't need to
+ // hold snapshot ref.
+ return snap.db.newIterator(snap.elem.seq, slice, ro)
+}
+
+// Release releases the snapshot. This will not release any returned
+// iterators, the iterators would still be valid until released or the
+// underlying DB is closed.
+//
+// Other methods should not be called after the snapshot has been released.
+func (snap *Snapshot) Release() {
+ snap.mu.Lock()
+ defer snap.mu.Unlock()
+
+ if !snap.released {
+ // Clear the finalizer.
+ runtime.SetFinalizer(snap, nil)
+
+ snap.released = true
+ snap.db.releaseSnapshot(snap.elem)
+ atomic.AddInt32(&snap.db.aliveSnaps, -1)
+ snap.db = nil
+ snap.elem = nil
+ }
+}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/db_state.go b/vendor/github.com/syndtr/goleveldb/leveldb/db_state.go
new file mode 100644
index 0000000000..24671dd39e
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/db_state.go
@@ -0,0 +1,211 @@
+// Copyright (c) 2013, Suryandaru Triandana
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package leveldb
+
+import (
+ "sync/atomic"
+ "time"
+
+ "github.com/syndtr/goleveldb/leveldb/journal"
+ "github.com/syndtr/goleveldb/leveldb/memdb"
+)
+
+type memDB struct {
+ db *DB
+ *memdb.DB
+ ref int32
+}
+
+func (m *memDB) incref() {
+ atomic.AddInt32(&m.ref, 1)
+}
+
+func (m *memDB) decref() {
+ if ref := atomic.AddInt32(&m.ref, -1); ref == 0 {
+ // Only put back memdb with std capacity.
+ if m.Capacity() == m.db.s.o.GetWriteBuffer() {
+ m.Reset()
+ m.db.mpoolPut(m.DB)
+ }
+ m.db = nil
+ m.DB = nil
+ } else if ref < 0 {
+ panic("negative memdb ref")
+ }
+}
+
+// Get latest sequence number.
+func (db *DB) getSeq() uint64 {
+ return atomic.LoadUint64(&db.seq)
+}
+
+// Atomically adds delta to seq.
+func (db *DB) addSeq(delta uint64) {
+ atomic.AddUint64(&db.seq, delta)
+}
+
+func (db *DB) sampleSeek(ikey iKey) {
+ v := db.s.version()
+ if v.sampleSeek(ikey) {
+ // Trigger table compaction.
+ db.compSendTrigger(db.tcompCmdC)
+ }
+ v.release()
+}
+
+func (db *DB) mpoolPut(mem *memdb.DB) {
+ defer func() {
+ recover()
+ }()
+ select {
+ case db.memPool <- mem:
+ default:
+ }
+}
+
+func (db *DB) mpoolGet() *memdb.DB {
+ select {
+ case mem := <-db.memPool:
+ return mem
+ default:
+ return nil
+ }
+}
+
+func (db *DB) mpoolDrain() {
+ ticker := time.NewTicker(30 * time.Second)
+ for {
+ select {
+ case <-ticker.C:
+ select {
+ case <-db.memPool:
+ default:
+ }
+ case _, _ = <-db.closeC:
+ close(db.memPool)
+ return
+ }
+ }
+}
+
+// Create new memdb and froze the old one; need external synchronization.
+// newMem only called synchronously by the writer.
+func (db *DB) newMem(n int) (mem *memDB, err error) {
+ num := db.s.allocFileNum()
+ file := db.s.getJournalFile(num)
+ w, err := file.Create()
+ if err != nil {
+ db.s.reuseFileNum(num)
+ return
+ }
+
+ db.memMu.Lock()
+ defer db.memMu.Unlock()
+
+ if db.frozenMem != nil {
+ panic("still has frozen mem")
+ }
+
+ if db.journal == nil {
+ db.journal = journal.NewWriter(w)
+ } else {
+ db.journal.Reset(w)
+ db.journalWriter.Close()
+ db.frozenJournalFile = db.journalFile
+ }
+ db.journalWriter = w
+ db.journalFile = file
+ db.frozenMem = db.mem
+ mdb := db.mpoolGet()
+ if mdb == nil || mdb.Capacity() < n {
+ mdb = memdb.New(db.s.icmp, maxInt(db.s.o.GetWriteBuffer(), n))
+ }
+ mem = &memDB{
+ db: db,
+ DB: mdb,
+ ref: 2,
+ }
+ db.mem = mem
+ // The seq only incremented by the writer. And whoever called newMem
+ // should hold write lock, so no need additional synchronization here.
+ db.frozenSeq = db.seq
+ return
+}
+
+// Get all memdbs.
+func (db *DB) getMems() (e, f *memDB) {
+ db.memMu.RLock()
+ defer db.memMu.RUnlock()
+ if db.mem == nil {
+ panic("nil effective mem")
+ }
+ db.mem.incref()
+ if db.frozenMem != nil {
+ db.frozenMem.incref()
+ }
+ return db.mem, db.frozenMem
+}
+
+// Get frozen memdb.
+func (db *DB) getEffectiveMem() *memDB {
+ db.memMu.RLock()
+ defer db.memMu.RUnlock()
+ if db.mem == nil {
+ panic("nil effective mem")
+ }
+ db.mem.incref()
+ return db.mem
+}
+
+// Check whether we has frozen memdb.
+func (db *DB) hasFrozenMem() bool {
+ db.memMu.RLock()
+ defer db.memMu.RUnlock()
+ return db.frozenMem != nil
+}
+
+// Get frozen memdb.
+func (db *DB) getFrozenMem() *memDB {
+ db.memMu.RLock()
+ defer db.memMu.RUnlock()
+ if db.frozenMem != nil {
+ db.frozenMem.incref()
+ }
+ return db.frozenMem
+}
+
+// Drop frozen memdb; assume that frozen memdb isn't nil.
+func (db *DB) dropFrozenMem() {
+ db.memMu.Lock()
+ if err := db.frozenJournalFile.Remove(); err != nil {
+ db.logf("journal@remove removing @%d %q", db.frozenJournalFile.Num(), err)
+ } else {
+ db.logf("journal@remove removed @%d", db.frozenJournalFile.Num())
+ }
+ db.frozenJournalFile = nil
+ db.frozenMem.decref()
+ db.frozenMem = nil
+ db.memMu.Unlock()
+}
+
+// Set closed flag; return true if not already closed.
+func (db *DB) setClosed() bool {
+ return atomic.CompareAndSwapUint32(&db.closed, 0, 1)
+}
+
+// Check whether DB was closed.
+func (db *DB) isClosed() bool {
+ return atomic.LoadUint32(&db.closed) != 0
+}
+
+// Check read ok status.
+func (db *DB) ok() error {
+ if db.isClosed() {
+ return ErrClosed
+ }
+ return nil
+}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/db_util.go b/vendor/github.com/syndtr/goleveldb/leveldb/db_util.go
new file mode 100644
index 0000000000..a8a2bdf72e
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/db_util.go
@@ -0,0 +1,100 @@
+// Copyright (c) 2012, Suryandaru Triandana
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package leveldb
+
+import (
+ "github.com/syndtr/goleveldb/leveldb/errors"
+ "github.com/syndtr/goleveldb/leveldb/iterator"
+ "github.com/syndtr/goleveldb/leveldb/opt"
+ "github.com/syndtr/goleveldb/leveldb/storage"
+ "github.com/syndtr/goleveldb/leveldb/util"
+)
+
+// Reader is the interface that wraps basic Get and NewIterator methods.
+// This interface implemented by both DB and Snapshot.
+type Reader interface {
+ Get(key []byte, ro *opt.ReadOptions) (value []byte, err error)
+ NewIterator(slice *util.Range, ro *opt.ReadOptions) iterator.Iterator
+}
+
+type Sizes []uint64
+
+// Sum returns sum of the sizes.
+func (p Sizes) Sum() (n uint64) {
+ for _, s := range p {
+ n += s
+ }
+ return n
+}
+
+// Logging.
+func (db *DB) log(v ...interface{}) { db.s.log(v...) }
+func (db *DB) logf(format string, v ...interface{}) { db.s.logf(format, v...) }
+
+// Check and clean files.
+func (db *DB) checkAndCleanFiles() error {
+ v := db.s.version()
+ defer v.release()
+
+ tablesMap := make(map[uint64]bool)
+ for _, tables := range v.tables {
+ for _, t := range tables {
+ tablesMap[t.file.Num()] = false
+ }
+ }
+
+ files, err := db.s.getFiles(storage.TypeAll)
+ if err != nil {
+ return err
+ }
+
+ var nTables int
+ var rem []storage.File
+ for _, f := range files {
+ keep := true
+ switch f.Type() {
+ case storage.TypeManifest:
+ keep = f.Num() >= db.s.manifestFile.Num()
+ case storage.TypeJournal:
+ if db.frozenJournalFile != nil {
+ keep = f.Num() >= db.frozenJournalFile.Num()
+ } else {
+ keep = f.Num() >= db.journalFile.Num()
+ }
+ case storage.TypeTable:
+ _, keep = tablesMap[f.Num()]
+ if keep {
+ tablesMap[f.Num()] = true
+ nTables++
+ }
+ }
+
+ if !keep {
+ rem = append(rem, f)
+ }
+ }
+
+ if nTables != len(tablesMap) {
+ var missing []*storage.FileInfo
+ for num, present := range tablesMap {
+ if !present {
+ missing = append(missing, &storage.FileInfo{Type: storage.TypeTable, Num: num})
+ db.logf("db@janitor table missing @%d", num)
+ }
+ }
+ return errors.NewErrCorrupted(nil, &errors.ErrMissingFiles{Files: missing})
+ }
+
+ db.logf("db@janitor F·%d G·%d", len(files), len(rem))
+ for _, f := range rem {
+ db.logf("db@janitor removing %s-%d", f.Type(), f.Num())
+ if err := f.Remove(); err != nil {
+ return err
+ }
+ }
+ return nil
+}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/db_write.go b/vendor/github.com/syndtr/goleveldb/leveldb/db_write.go
new file mode 100644
index 0000000000..0c3956539a
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/db_write.go
@@ -0,0 +1,338 @@
+// Copyright (c) 2012, Suryandaru Triandana
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package leveldb
+
+import (
+ "time"
+
+ "github.com/syndtr/goleveldb/leveldb/memdb"
+ "github.com/syndtr/goleveldb/leveldb/opt"
+ "github.com/syndtr/goleveldb/leveldb/util"
+)
+
+func (db *DB) writeJournal(b *Batch) error {
+ w, err := db.journal.Next()
+ if err != nil {
+ return err
+ }
+ if _, err := w.Write(b.encode()); err != nil {
+ return err
+ }
+ if err := db.journal.Flush(); err != nil {
+ return err
+ }
+ if b.sync {
+ return db.journalWriter.Sync()
+ }
+ return nil
+}
+
+func (db *DB) jWriter() {
+ defer db.closeW.Done()
+ for {
+ select {
+ case b := <-db.journalC:
+ if b != nil {
+ db.journalAckC <- db.writeJournal(b)
+ }
+ case _, _ = <-db.closeC:
+ return
+ }
+ }
+}
+
+func (db *DB) rotateMem(n int) (mem *memDB, err error) {
+ // Wait for pending memdb compaction.
+ err = db.compSendIdle(db.mcompCmdC)
+ if err != nil {
+ return
+ }
+
+ // Create new memdb and journal.
+ mem, err = db.newMem(n)
+ if err != nil {
+ return
+ }
+
+ // Schedule memdb compaction.
+ db.compSendTrigger(db.mcompCmdC)
+ return
+}
+
+func (db *DB) flush(n int) (mdb *memDB, mdbFree int, err error) {
+ delayed := false
+ flush := func() (retry bool) {
+ v := db.s.version()
+ defer v.release()
+ mdb = db.getEffectiveMem()
+ defer func() {
+ if retry {
+ mdb.decref()
+ mdb = nil
+ }
+ }()
+ mdbFree = mdb.Free()
+ switch {
+ case v.tLen(0) >= db.s.o.GetWriteL0SlowdownTrigger() && !delayed:
+ delayed = true
+ time.Sleep(time.Millisecond)
+ case mdbFree >= n:
+ return false
+ case v.tLen(0) >= db.s.o.GetWriteL0PauseTrigger():
+ delayed = true
+ err = db.compSendIdle(db.tcompCmdC)
+ if err != nil {
+ return false
+ }
+ default:
+ // Allow memdb to grow if it has no entry.
+ if mdb.Len() == 0 {
+ mdbFree = n
+ } else {
+ mdb.decref()
+ mdb, err = db.rotateMem(n)
+ if err == nil {
+ mdbFree = mdb.Free()
+ } else {
+ mdbFree = 0
+ }
+ }
+ return false
+ }
+ return true
+ }
+ start := time.Now()
+ for flush() {
+ }
+ if delayed {
+ db.writeDelay += time.Since(start)
+ db.writeDelayN++
+ } else if db.writeDelayN > 0 {
+ db.logf("db@write was delayed N·%d T·%v", db.writeDelayN, db.writeDelay)
+ db.writeDelay = 0
+ db.writeDelayN = 0
+ }
+ return
+}
+
+// Write apply the given batch to the DB. The batch will be applied
+// sequentially.
+//
+// It is safe to modify the contents of the arguments after Write returns.
+func (db *DB) Write(b *Batch, wo *opt.WriteOptions) (err error) {
+ err = db.ok()
+ if err != nil || b == nil || b.Len() == 0 {
+ return
+ }
+
+ b.init(wo.GetSync() && !db.s.o.GetNoSync())
+
+ // The write happen synchronously.
+ select {
+ case db.writeC <- b:
+ if <-db.writeMergedC {
+ return <-db.writeAckC
+ }
+ case db.writeLockC <- struct{}{}:
+ case err = <-db.compPerErrC:
+ return
+ case _, _ = <-db.closeC:
+ return ErrClosed
+ }
+
+ merged := 0
+ danglingMerge := false
+ defer func() {
+ if danglingMerge {
+ db.writeMergedC <- false
+ } else {
+ <-db.writeLockC
+ }
+ for i := 0; i < merged; i++ {
+ db.writeAckC <- err
+ }
+ }()
+
+ mdb, mdbFree, err := db.flush(b.size())
+ if err != nil {
+ return
+ }
+ defer mdb.decref()
+
+ // Calculate maximum size of the batch.
+ m := 1 << 20
+ if x := b.size(); x <= 128<<10 {
+ m = x + (128 << 10)
+ }
+ m = minInt(m, mdbFree)
+
+ // Merge with other batch.
+drain:
+ for b.size() < m && !b.sync {
+ select {
+ case nb := <-db.writeC:
+ if b.size()+nb.size() <= m {
+ b.append(nb)
+ db.writeMergedC <- true
+ merged++
+ } else {
+ danglingMerge = true
+ break drain
+ }
+ default:
+ break drain
+ }
+ }
+
+ // Set batch first seq number relative from last seq.
+ b.seq = db.seq + 1
+
+ // Write journal concurrently if it is large enough.
+ if b.size() >= (128 << 10) {
+ // Push the write batch to the journal writer
+ select {
+ case db.journalC <- b:
+ // Write into memdb
+ if berr := b.memReplay(mdb.DB); berr != nil {
+ panic(berr)
+ }
+ case err = <-db.compPerErrC:
+ return
+ case _, _ = <-db.closeC:
+ err = ErrClosed
+ return
+ }
+ // Wait for journal writer
+ select {
+ case err = <-db.journalAckC:
+ if err != nil {
+ // Revert memdb if error detected
+ if berr := b.revertMemReplay(mdb.DB); berr != nil {
+ panic(berr)
+ }
+ return
+ }
+ case _, _ = <-db.closeC:
+ err = ErrClosed
+ return
+ }
+ } else {
+ err = db.writeJournal(b)
+ if err != nil {
+ return
+ }
+ if berr := b.memReplay(mdb.DB); berr != nil {
+ panic(berr)
+ }
+ }
+
+ // Set last seq number.
+ db.addSeq(uint64(b.Len()))
+
+ if b.size() >= mdbFree {
+ db.rotateMem(0)
+ }
+ return
+}
+
+// Put sets the value for the given key. It overwrites any previous value
+// for that key; a DB is not a multi-map.
+//
+// It is safe to modify the contents of the arguments after Put returns.
+func (db *DB) Put(key, value []byte, wo *opt.WriteOptions) error {
+ b := new(Batch)
+ b.Put(key, value)
+ return db.Write(b, wo)
+}
+
+// Delete deletes the value for the given key.
+//
+// It is safe to modify the contents of the arguments after Delete returns.
+func (db *DB) Delete(key []byte, wo *opt.WriteOptions) error {
+ b := new(Batch)
+ b.Delete(key)
+ return db.Write(b, wo)
+}
+
+func isMemOverlaps(icmp *iComparer, mem *memdb.DB, min, max []byte) bool {
+ iter := mem.NewIterator(nil)
+ defer iter.Release()
+ return (max == nil || (iter.First() && icmp.uCompare(max, iKey(iter.Key()).ukey()) >= 0)) &&
+ (min == nil || (iter.Last() && icmp.uCompare(min, iKey(iter.Key()).ukey()) <= 0))
+}
+
+// CompactRange compacts the underlying DB for the given key range.
+// In particular, deleted and overwritten versions are discarded,
+// and the data is rearranged to reduce the cost of operations
+// needed to access the data. This operation should typically only
+// be invoked by users who understand the underlying implementation.
+//
+// A nil Range.Start is treated as a key before all keys in the DB.
+// And a nil Range.Limit is treated as a key after all keys in the DB.
+// Therefore if both is nil then it will compact entire DB.
+func (db *DB) CompactRange(r util.Range) error {
+ if err := db.ok(); err != nil {
+ return err
+ }
+
+ // Lock writer.
+ select {
+ case db.writeLockC <- struct{}{}:
+ case err := <-db.compPerErrC:
+ return err
+ case _, _ = <-db.closeC:
+ return ErrClosed
+ }
+
+ // Check for overlaps in memdb.
+ mdb := db.getEffectiveMem()
+ defer mdb.decref()
+ if isMemOverlaps(db.s.icmp, mdb.DB, r.Start, r.Limit) {
+ // Memdb compaction.
+ if _, err := db.rotateMem(0); err != nil {
+ <-db.writeLockC
+ return err
+ }
+ <-db.writeLockC
+ if err := db.compSendIdle(db.mcompCmdC); err != nil {
+ return err
+ }
+ } else {
+ <-db.writeLockC
+ }
+
+ // Table compaction.
+ return db.compSendRange(db.tcompCmdC, -1, r.Start, r.Limit)
+}
+
+// SetReadOnly makes DB read-only. It will stay read-only until reopened.
+func (db *DB) SetReadOnly() error {
+ if err := db.ok(); err != nil {
+ return err
+ }
+
+ // Lock writer.
+ select {
+ case db.writeLockC <- struct{}{}:
+ db.compWriteLocking = true
+ case err := <-db.compPerErrC:
+ return err
+ case _, _ = <-db.closeC:
+ return ErrClosed
+ }
+
+ // Set compaction read-only.
+ select {
+ case db.compErrSetC <- ErrReadOnly:
+ case perr := <-db.compPerErrC:
+ return perr
+ case _, _ = <-db.closeC:
+ return ErrClosed
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/doc.go b/vendor/github.com/syndtr/goleveldb/leveldb/doc.go
new file mode 100644
index 0000000000..53f13bb24c
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/doc.go
@@ -0,0 +1,90 @@
+// Copyright (c) 2012, Suryandaru Triandana
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Package leveldb provides implementation of LevelDB key/value database.
+//
+// Create or open a database:
+//
+// db, err := leveldb.OpenFile("path/to/db", nil)
+// ...
+// defer db.Close()
+// ...
+//
+// Read or modify the database content:
+//
+// // Remember that the contents of the returned slice should not be modified.
+// data, err := db.Get([]byte("key"), nil)
+// ...
+// err = db.Put([]byte("key"), []byte("value"), nil)
+// ...
+// err = db.Delete([]byte("key"), nil)
+// ...
+//
+// Iterate over database content:
+//
+// iter := db.NewIterator(nil, nil)
+// for iter.Next() {
+// // Remember that the contents of the returned slice should not be modified, and
+// // only valid until the next call to Next.
+// key := iter.Key()
+// value := iter.Value()
+// ...
+// }
+// iter.Release()
+// err = iter.Error()
+// ...
+//
+// Iterate over subset of database content with a particular prefix:
+// iter := db.NewIterator(util.BytesPrefix([]byte("foo-")), nil)
+// for iter.Next() {
+// // Use key/value.
+// ...
+// }
+// iter.Release()
+// err = iter.Error()
+// ...
+//
+// Seek-then-Iterate:
+//
+// iter := db.NewIterator(nil, nil)
+// for ok := iter.Seek(key); ok; ok = iter.Next() {
+// // Use key/value.
+// ...
+// }
+// iter.Release()
+// err = iter.Error()
+// ...
+//
+// Iterate over subset of database content:
+//
+// iter := db.NewIterator(&util.Range{Start: []byte("foo"), Limit: []byte("xoo")}, nil)
+// for iter.Next() {
+// // Use key/value.
+// ...
+// }
+// iter.Release()
+// err = iter.Error()
+// ...
+//
+// Batch writes:
+//
+// batch := new(leveldb.Batch)
+// batch.Put([]byte("foo"), []byte("value"))
+// batch.Put([]byte("bar"), []byte("another value"))
+// batch.Delete([]byte("baz"))
+// err = db.Write(batch, nil)
+// ...
+//
+// Use bloom filter:
+//
+// o := &opt.Options{
+// Filter: filter.NewBloomFilter(10),
+// }
+// db, err := leveldb.OpenFile("path/to/db", o)
+// ...
+// defer db.Close()
+// ...
+package leveldb
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/errors.go b/vendor/github.com/syndtr/goleveldb/leveldb/errors.go
new file mode 100644
index 0000000000..c8bd66a5aa
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/errors.go
@@ -0,0 +1,19 @@
+// Copyright (c) 2014, Suryandaru Triandana
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package leveldb
+
+import (
+ "github.com/syndtr/goleveldb/leveldb/errors"
+)
+
+var (
+ ErrNotFound = errors.ErrNotFound
+ ErrReadOnly = errors.New("leveldb: read-only mode")
+ ErrSnapshotReleased = errors.New("leveldb: snapshot released")
+ ErrIterReleased = errors.New("leveldb: iterator released")
+ ErrClosed = errors.New("leveldb: closed")
+)
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/errors/errors.go b/vendor/github.com/syndtr/goleveldb/leveldb/errors/errors.go
new file mode 100644
index 0000000000..dacbf131c5
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/errors/errors.go
@@ -0,0 +1,78 @@
+// Copyright (c) 2014, Suryandaru Triandana
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Package errors provides common error types used throughout leveldb.
+package errors
+
+import (
+ "errors"
+ "fmt"
+
+ "github.com/syndtr/goleveldb/leveldb/storage"
+ "github.com/syndtr/goleveldb/leveldb/util"
+)
+
+var (
+ ErrNotFound = New("leveldb: not found")
+ ErrReleased = util.ErrReleased
+ ErrHasReleaser = util.ErrHasReleaser
+)
+
+// New returns an error that formats as the given text.
+func New(text string) error {
+ return errors.New(text)
+}
+
+// ErrCorrupted is the type that wraps errors that indicate corruption in
+// the database.
+type ErrCorrupted struct {
+ File *storage.FileInfo
+ Err error
+}
+
+func (e *ErrCorrupted) Error() string {
+ if e.File != nil {
+ return fmt.Sprintf("%v [file=%v]", e.Err, e.File)
+ } else {
+ return e.Err.Error()
+ }
+}
+
+// NewErrCorrupted creates new ErrCorrupted error.
+func NewErrCorrupted(f storage.File, err error) error {
+ return &ErrCorrupted{storage.NewFileInfo(f), err}
+}
+
+// IsCorrupted returns a boolean indicating whether the error is indicating
+// a corruption.
+func IsCorrupted(err error) bool {
+ switch err.(type) {
+ case *ErrCorrupted:
+ return true
+ case *storage.ErrCorrupted:
+ return true
+ }
+ return false
+}
+
+// ErrMissingFiles is the type that indicating a corruption due to missing
+// files. ErrMissingFiles always wrapped with ErrCorrupted.
+type ErrMissingFiles struct {
+ Files []*storage.FileInfo
+}
+
+func (e *ErrMissingFiles) Error() string { return "file missing" }
+
+// SetFile sets 'file info' of the given error with the given file.
+// Currently only ErrCorrupted is supported, otherwise will do nothing.
+func SetFile(err error, f storage.File) error {
+ switch x := err.(type) {
+ case *ErrCorrupted:
+ x.File = storage.NewFileInfo(f)
+ return x
+ }
+ return err
+}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/filter.go b/vendor/github.com/syndtr/goleveldb/leveldb/filter.go
new file mode 100644
index 0000000000..37c1e146bc
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/filter.go
@@ -0,0 +1,31 @@
+// Copyright (c) 2012, Suryandaru Triandana
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package leveldb
+
+import (
+ "github.com/syndtr/goleveldb/leveldb/filter"
+)
+
+type iFilter struct {
+ filter.Filter
+}
+
+func (f iFilter) Contains(filter, key []byte) bool {
+ return f.Filter.Contains(filter, iKey(key).ukey())
+}
+
+func (f iFilter) NewGenerator() filter.FilterGenerator {
+ return iFilterGenerator{f.Filter.NewGenerator()}
+}
+
+type iFilterGenerator struct {
+ filter.FilterGenerator
+}
+
+func (g iFilterGenerator) Add(key []byte) {
+ g.FilterGenerator.Add(iKey(key).ukey())
+}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/filter/bloom.go b/vendor/github.com/syndtr/goleveldb/leveldb/filter/bloom.go
new file mode 100644
index 0000000000..bab0e99705
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/filter/bloom.go
@@ -0,0 +1,116 @@
+// Copyright (c) 2012, Suryandaru Triandana
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package filter
+
+import (
+ "github.com/syndtr/goleveldb/leveldb/util"
+)
+
+func bloomHash(key []byte) uint32 {
+ return util.Hash(key, 0xbc9f1d34)
+}
+
+type bloomFilter int
+
+// The bloom filter serializes its parameters and is backward compatible
+// with respect to them. Therefor, its parameters are not added to its
+// name.
+func (bloomFilter) Name() string {
+ return "leveldb.BuiltinBloomFilter"
+}
+
+func (f bloomFilter) Contains(filter, key []byte) bool {
+ nBytes := len(filter) - 1
+ if nBytes < 1 {
+ return false
+ }
+ nBits := uint32(nBytes * 8)
+
+ // Use the encoded k so that we can read filters generated by
+ // bloom filters created using different parameters.
+ k := filter[nBytes]
+ if k > 30 {
+ // Reserved for potentially new encodings for short bloom filters.
+ // Consider it a match.
+ return true
+ }
+
+ kh := bloomHash(key)
+ delta := (kh >> 17) | (kh << 15) // Rotate right 17 bits
+ for j := uint8(0); j < k; j++ {
+ bitpos := kh % nBits
+ if (uint32(filter[bitpos/8]) & (1 << (bitpos % 8))) == 0 {
+ return false
+ }
+ kh += delta
+ }
+ return true
+}
+
+func (f bloomFilter) NewGenerator() FilterGenerator {
+ // Round down to reduce probing cost a little bit.
+ k := uint8(f * 69 / 100) // 0.69 =~ ln(2)
+ if k < 1 {
+ k = 1
+ } else if k > 30 {
+ k = 30
+ }
+ return &bloomFilterGenerator{
+ n: int(f),
+ k: k,
+ }
+}
+
+type bloomFilterGenerator struct {
+ n int
+ k uint8
+
+ keyHashes []uint32
+}
+
+func (g *bloomFilterGenerator) Add(key []byte) {
+ // Use double-hashing to generate a sequence of hash values.
+ // See analysis in [Kirsch,Mitzenmacher 2006].
+ g.keyHashes = append(g.keyHashes, bloomHash(key))
+}
+
+func (g *bloomFilterGenerator) Generate(b Buffer) {
+ // Compute bloom filter size (in both bits and bytes)
+ nBits := uint32(len(g.keyHashes) * g.n)
+ // For small n, we can see a very high false positive rate. Fix it
+ // by enforcing a minimum bloom filter length.
+ if nBits < 64 {
+ nBits = 64
+ }
+ nBytes := (nBits + 7) / 8
+ nBits = nBytes * 8
+
+ dest := b.Alloc(int(nBytes) + 1)
+ dest[nBytes] = g.k
+ for _, kh := range g.keyHashes {
+ delta := (kh >> 17) | (kh << 15) // Rotate right 17 bits
+ for j := uint8(0); j < g.k; j++ {
+ bitpos := kh % nBits
+ dest[bitpos/8] |= (1 << (bitpos % 8))
+ kh += delta
+ }
+ }
+
+ g.keyHashes = g.keyHashes[:0]
+}
+
+// NewBloomFilter creates a new initialized bloom filter for given
+// bitsPerKey.
+//
+// Since bitsPerKey is persisted individually for each bloom filter
+// serialization, bloom filters are backwards compatible with respect to
+// changing bitsPerKey. This means that no big performance penalty will
+// be experienced when changing the parameter. See documentation for
+// opt.Options.Filter for more information.
+func NewBloomFilter(bitsPerKey int) Filter {
+ return bloomFilter(bitsPerKey)
+}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/filter/filter.go b/vendor/github.com/syndtr/goleveldb/leveldb/filter/filter.go
new file mode 100644
index 0000000000..7a925c5a86
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/filter/filter.go
@@ -0,0 +1,60 @@
+// Copyright (c) 2012, Suryandaru Triandana
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Package filter provides interface and implementation of probabilistic
+// data structure.
+//
+// The filter is resposible for creating small filter from a set of keys.
+// These filter will then used to test whether a key is a member of the set.
+// In many cases, a filter can cut down the number of disk seeks from a
+// handful to a single disk seek per DB.Get call.
+package filter
+
+// Buffer is the interface that wraps basic Alloc, Write and WriteByte methods.
+type Buffer interface {
+ // Alloc allocs n bytes of slice from the buffer. This also advancing
+ // write offset.
+ Alloc(n int) []byte
+
+ // Write appends the contents of p to the buffer.
+ Write(p []byte) (n int, err error)
+
+ // WriteByte appends the byte c to the buffer.
+ WriteByte(c byte) error
+}
+
+// Filter is the filter.
+type Filter interface {
+ // Name returns the name of this policy.
+ //
+ // Note that if the filter encoding changes in an incompatible way,
+ // the name returned by this method must be changed. Otherwise, old
+ // incompatible filters may be passed to methods of this type.
+ Name() string
+
+ // NewGenerator creates a new filter generator.
+ NewGenerator() FilterGenerator
+
+ // Contains returns true if the filter contains the given key.
+ //
+ // The filter are filters generated by the filter generator.
+ Contains(filter, key []byte) bool
+}
+
+// FilterGenerator is the filter generator.
+type FilterGenerator interface {
+ // Add adds a key to the filter generator.
+ //
+ // The key may become invalid after call to this method end, therefor
+ // key must be copied if implementation require keeping key for later
+ // use. The key should not modified directly, doing so may cause
+ // undefined results.
+ Add(key []byte)
+
+ // Generate generates filters based on keys passed so far. After call
+ // to Generate the filter generator maybe resetted, depends on implementation.
+ Generate(b Buffer)
+}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/iterator/array_iter.go b/vendor/github.com/syndtr/goleveldb/leveldb/iterator/array_iter.go
new file mode 100644
index 0000000000..a23ab05f70
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/iterator/array_iter.go
@@ -0,0 +1,184 @@
+// Copyright (c) 2014, Suryandaru Triandana
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package iterator
+
+import (
+ "github.com/syndtr/goleveldb/leveldb/util"
+)
+
+// BasicArray is the interface that wraps basic Len and Search method.
+type BasicArray interface {
+ // Len returns length of the array.
+ Len() int
+
+ // Search finds smallest index that point to a key that is greater
+ // than or equal to the given key.
+ Search(key []byte) int
+}
+
+// Array is the interface that wraps BasicArray and basic Index method.
+type Array interface {
+ BasicArray
+
+ // Index returns key/value pair with index of i.
+ Index(i int) (key, value []byte)
+}
+
+// Array is the interface that wraps BasicArray and basic Get method.
+type ArrayIndexer interface {
+ BasicArray
+
+ // Get returns a new data iterator with index of i.
+ Get(i int) Iterator
+}
+
+type basicArrayIterator struct {
+ util.BasicReleaser
+ array BasicArray
+ pos int
+ err error
+}
+
+func (i *basicArrayIterator) Valid() bool {
+ return i.pos >= 0 && i.pos < i.array.Len() && !i.Released()
+}
+
+func (i *basicArrayIterator) First() bool {
+ if i.Released() {
+ i.err = ErrIterReleased
+ return false
+ }
+
+ if i.array.Len() == 0 {
+ i.pos = -1
+ return false
+ }
+ i.pos = 0
+ return true
+}
+
+func (i *basicArrayIterator) Last() bool {
+ if i.Released() {
+ i.err = ErrIterReleased
+ return false
+ }
+
+ n := i.array.Len()
+ if n == 0 {
+ i.pos = 0
+ return false
+ }
+ i.pos = n - 1
+ return true
+}
+
+func (i *basicArrayIterator) Seek(key []byte) bool {
+ if i.Released() {
+ i.err = ErrIterReleased
+ return false
+ }
+
+ n := i.array.Len()
+ if n == 0 {
+ i.pos = 0
+ return false
+ }
+ i.pos = i.array.Search(key)
+ if i.pos >= n {
+ return false
+ }
+ return true
+}
+
+func (i *basicArrayIterator) Next() bool {
+ if i.Released() {
+ i.err = ErrIterReleased
+ return false
+ }
+
+ i.pos++
+ if n := i.array.Len(); i.pos >= n {
+ i.pos = n
+ return false
+ }
+ return true
+}
+
+func (i *basicArrayIterator) Prev() bool {
+ if i.Released() {
+ i.err = ErrIterReleased
+ return false
+ }
+
+ i.pos--
+ if i.pos < 0 {
+ i.pos = -1
+ return false
+ }
+ return true
+}
+
+func (i *basicArrayIterator) Error() error { return i.err }
+
+type arrayIterator struct {
+ basicArrayIterator
+ array Array
+ pos int
+ key, value []byte
+}
+
+func (i *arrayIterator) updateKV() {
+ if i.pos == i.basicArrayIterator.pos {
+ return
+ }
+ i.pos = i.basicArrayIterator.pos
+ if i.Valid() {
+ i.key, i.value = i.array.Index(i.pos)
+ } else {
+ i.key = nil
+ i.value = nil
+ }
+}
+
+func (i *arrayIterator) Key() []byte {
+ i.updateKV()
+ return i.key
+}
+
+func (i *arrayIterator) Value() []byte {
+ i.updateKV()
+ return i.value
+}
+
+type arrayIteratorIndexer struct {
+ basicArrayIterator
+ array ArrayIndexer
+}
+
+func (i *arrayIteratorIndexer) Get() Iterator {
+ if i.Valid() {
+ return i.array.Get(i.basicArrayIterator.pos)
+ }
+ return nil
+}
+
+// NewArrayIterator returns an iterator from the given array.
+func NewArrayIterator(array Array) Iterator {
+ return &arrayIterator{
+ basicArrayIterator: basicArrayIterator{array: array, pos: -1},
+ array: array,
+ pos: -1,
+ }
+}
+
+// NewArrayIndexer returns an index iterator from the given array.
+func NewArrayIndexer(array ArrayIndexer) IteratorIndexer {
+ return &arrayIteratorIndexer{
+ basicArrayIterator: basicArrayIterator{array: array, pos: -1},
+ array: array,
+ }
+}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/iterator/indexed_iter.go b/vendor/github.com/syndtr/goleveldb/leveldb/iterator/indexed_iter.go
new file mode 100644
index 0000000000..939adbb933
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/iterator/indexed_iter.go
@@ -0,0 +1,242 @@
+// Copyright (c) 2012, Suryandaru Triandana
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package iterator
+
+import (
+ "github.com/syndtr/goleveldb/leveldb/errors"
+ "github.com/syndtr/goleveldb/leveldb/util"
+)
+
+// IteratorIndexer is the interface that wraps CommonIterator and basic Get
+// method. IteratorIndexer provides index for indexed iterator.
+type IteratorIndexer interface {
+ CommonIterator
+
+ // Get returns a new data iterator for the current position, or nil if
+ // done.
+ Get() Iterator
+}
+
+type indexedIterator struct {
+ util.BasicReleaser
+ index IteratorIndexer
+ strict bool
+
+ data Iterator
+ err error
+ errf func(err error)
+ closed bool
+}
+
+func (i *indexedIterator) setData() {
+ if i.data != nil {
+ i.data.Release()
+ }
+ i.data = i.index.Get()
+}
+
+func (i *indexedIterator) clearData() {
+ if i.data != nil {
+ i.data.Release()
+ }
+ i.data = nil
+}
+
+func (i *indexedIterator) indexErr() {
+ if err := i.index.Error(); err != nil {
+ if i.errf != nil {
+ i.errf(err)
+ }
+ i.err = err
+ }
+}
+
+func (i *indexedIterator) dataErr() bool {
+ if err := i.data.Error(); err != nil {
+ if i.errf != nil {
+ i.errf(err)
+ }
+ if i.strict || !errors.IsCorrupted(err) {
+ i.err = err
+ return true
+ }
+ }
+ return false
+}
+
+func (i *indexedIterator) Valid() bool {
+ return i.data != nil && i.data.Valid()
+}
+
+func (i *indexedIterator) First() bool {
+ if i.err != nil {
+ return false
+ } else if i.Released() {
+ i.err = ErrIterReleased
+ return false
+ }
+
+ if !i.index.First() {
+ i.indexErr()
+ i.clearData()
+ return false
+ }
+ i.setData()
+ return i.Next()
+}
+
+func (i *indexedIterator) Last() bool {
+ if i.err != nil {
+ return false
+ } else if i.Released() {
+ i.err = ErrIterReleased
+ return false
+ }
+
+ if !i.index.Last() {
+ i.indexErr()
+ i.clearData()
+ return false
+ }
+ i.setData()
+ if !i.data.Last() {
+ if i.dataErr() {
+ return false
+ }
+ i.clearData()
+ return i.Prev()
+ }
+ return true
+}
+
+func (i *indexedIterator) Seek(key []byte) bool {
+ if i.err != nil {
+ return false
+ } else if i.Released() {
+ i.err = ErrIterReleased
+ return false
+ }
+
+ if !i.index.Seek(key) {
+ i.indexErr()
+ i.clearData()
+ return false
+ }
+ i.setData()
+ if !i.data.Seek(key) {
+ if i.dataErr() {
+ return false
+ }
+ i.clearData()
+ return i.Next()
+ }
+ return true
+}
+
+func (i *indexedIterator) Next() bool {
+ if i.err != nil {
+ return false
+ } else if i.Released() {
+ i.err = ErrIterReleased
+ return false
+ }
+
+ switch {
+ case i.data != nil && !i.data.Next():
+ if i.dataErr() {
+ return false
+ }
+ i.clearData()
+ fallthrough
+ case i.data == nil:
+ if !i.index.Next() {
+ i.indexErr()
+ return false
+ }
+ i.setData()
+ return i.Next()
+ }
+ return true
+}
+
+func (i *indexedIterator) Prev() bool {
+ if i.err != nil {
+ return false
+ } else if i.Released() {
+ i.err = ErrIterReleased
+ return false
+ }
+
+ switch {
+ case i.data != nil && !i.data.Prev():
+ if i.dataErr() {
+ return false
+ }
+ i.clearData()
+ fallthrough
+ case i.data == nil:
+ if !i.index.Prev() {
+ i.indexErr()
+ return false
+ }
+ i.setData()
+ if !i.data.Last() {
+ if i.dataErr() {
+ return false
+ }
+ i.clearData()
+ return i.Prev()
+ }
+ }
+ return true
+}
+
+func (i *indexedIterator) Key() []byte {
+ if i.data == nil {
+ return nil
+ }
+ return i.data.Key()
+}
+
+func (i *indexedIterator) Value() []byte {
+ if i.data == nil {
+ return nil
+ }
+ return i.data.Value()
+}
+
+func (i *indexedIterator) Release() {
+ i.clearData()
+ i.index.Release()
+ i.BasicReleaser.Release()
+}
+
+func (i *indexedIterator) Error() error {
+ if i.err != nil {
+ return i.err
+ }
+ if err := i.index.Error(); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (i *indexedIterator) SetErrorCallback(f func(err error)) {
+ i.errf = f
+}
+
+// NewIndexedIterator returns an 'indexed iterator'. An index is iterator
+// that returns another iterator, a 'data iterator'. A 'data iterator' is the
+// iterator that contains actual key/value pairs.
+//
+// If strict is true the any 'corruption errors' (i.e errors.IsCorrupted(err) == true)
+// won't be ignored and will halt 'indexed iterator', otherwise the iterator will
+// continue to the next 'data iterator'. Corruption on 'index iterator' will not be
+// ignored and will halt the iterator.
+func NewIndexedIterator(index IteratorIndexer, strict bool) Iterator {
+ return &indexedIterator{index: index, strict: strict}
+}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/iterator/iter.go b/vendor/github.com/syndtr/goleveldb/leveldb/iterator/iter.go
new file mode 100644
index 0000000000..c2522860b0
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/iterator/iter.go
@@ -0,0 +1,131 @@
+// Copyright (c) 2012, Suryandaru Triandana
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Package iterator provides interface and implementation to traverse over
+// contents of a database.
+package iterator
+
+import (
+ "errors"
+
+ "github.com/syndtr/goleveldb/leveldb/util"
+)
+
+var (
+ ErrIterReleased = errors.New("leveldb/iterator: iterator released")
+)
+
+// IteratorSeeker is the interface that wraps the 'seeks method'.
+type IteratorSeeker interface {
+ // First moves the iterator to the first key/value pair. If the iterator
+ // only contains one key/value pair then First and Last whould moves
+ // to the same key/value pair.
+ // It returns whether such pair exist.
+ First() bool
+
+ // Last moves the iterator to the last key/value pair. If the iterator
+ // only contains one key/value pair then First and Last whould moves
+ // to the same key/value pair.
+ // It returns whether such pair exist.
+ Last() bool
+
+ // Seek moves the iterator to the first key/value pair whose key is greater
+ // than or equal to the given key.
+ // It returns whether such pair exist.
+ //
+ // It is safe to modify the contents of the argument after Seek returns.
+ Seek(key []byte) bool
+
+ // Next moves the iterator to the next key/value pair.
+ // It returns whether the iterator is exhausted.
+ Next() bool
+
+ // Prev moves the iterator to the previous key/value pair.
+ // It returns whether the iterator is exhausted.
+ Prev() bool
+}
+
+// CommonIterator is the interface that wraps common interator methods.
+type CommonIterator interface {
+ IteratorSeeker
+
+ // util.Releaser is the interface that wraps basic Release method.
+ // When called Release will releases any resources associated with the
+ // iterator.
+ util.Releaser
+
+ // util.ReleaseSetter is the interface that wraps the basic SetReleaser
+ // method.
+ util.ReleaseSetter
+
+ // TODO: Remove this when ready.
+ Valid() bool
+
+ // Error returns any accumulated error. Exhausting all the key/value pairs
+ // is not considered to be an error.
+ Error() error
+}
+
+// Iterator iterates over a DB's key/value pairs in key order.
+//
+// When encouter an error any 'seeks method' will return false and will
+// yield no key/value pairs. The error can be queried by calling the Error
+// method. Calling Release is still necessary.
+//
+// An iterator must be released after use, but it is not necessary to read
+// an iterator until exhaustion.
+// Also, an iterator is not necessarily goroutine-safe, but it is safe to use
+// multiple iterators concurrently, with each in a dedicated goroutine.
+type Iterator interface {
+ CommonIterator
+
+ // Key returns the key of the current key/value pair, or nil if done.
+ // The caller should not modify the contents of the returned slice, and
+ // its contents may change on the next call to any 'seeks method'.
+ Key() []byte
+
+ // Value returns the key of the current key/value pair, or nil if done.
+ // The caller should not modify the contents of the returned slice, and
+ // its contents may change on the next call to any 'seeks method'.
+ Value() []byte
+}
+
+// ErrorCallbackSetter is the interface that wraps basic SetErrorCallback
+// method.
+//
+// ErrorCallbackSetter implemented by indexed and merged iterator.
+type ErrorCallbackSetter interface {
+ // SetErrorCallback allows set an error callback of the coresponding
+ // iterator. Use nil to clear the callback.
+ SetErrorCallback(f func(err error))
+}
+
+type emptyIterator struct {
+ util.BasicReleaser
+ err error
+}
+
+func (i *emptyIterator) rErr() {
+ if i.err == nil && i.Released() {
+ i.err = ErrIterReleased
+ }
+}
+
+func (*emptyIterator) Valid() bool { return false }
+func (i *emptyIterator) First() bool { i.rErr(); return false }
+func (i *emptyIterator) Last() bool { i.rErr(); return false }
+func (i *emptyIterator) Seek(key []byte) bool { i.rErr(); return false }
+func (i *emptyIterator) Next() bool { i.rErr(); return false }
+func (i *emptyIterator) Prev() bool { i.rErr(); return false }
+func (*emptyIterator) Key() []byte { return nil }
+func (*emptyIterator) Value() []byte { return nil }
+func (i *emptyIterator) Error() error { return i.err }
+
+// NewEmptyIterator creates an empty iterator. The err parameter can be
+// nil, but if not nil the given err will be returned by Error method.
+func NewEmptyIterator(err error) Iterator {
+ return &emptyIterator{err: err}
+}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/iterator/merged_iter.go b/vendor/github.com/syndtr/goleveldb/leveldb/iterator/merged_iter.go
new file mode 100644
index 0000000000..1a7e29df8f
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/iterator/merged_iter.go
@@ -0,0 +1,304 @@
+// Copyright (c) 2012, Suryandaru Triandana
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package iterator
+
+import (
+ "github.com/syndtr/goleveldb/leveldb/comparer"
+ "github.com/syndtr/goleveldb/leveldb/errors"
+ "github.com/syndtr/goleveldb/leveldb/util"
+)
+
+type dir int
+
+const (
+ dirReleased dir = iota - 1
+ dirSOI
+ dirEOI
+ dirBackward
+ dirForward
+)
+
+type mergedIterator struct {
+ cmp comparer.Comparer
+ iters []Iterator
+ strict bool
+
+ keys [][]byte
+ index int
+ dir dir
+ err error
+ errf func(err error)
+ releaser util.Releaser
+}
+
+func assertKey(key []byte) []byte {
+ if key == nil {
+ panic("leveldb/iterator: nil key")
+ }
+ return key
+}
+
+func (i *mergedIterator) iterErr(iter Iterator) bool {
+ if err := iter.Error(); err != nil {
+ if i.errf != nil {
+ i.errf(err)
+ }
+ if i.strict || !errors.IsCorrupted(err) {
+ i.err = err
+ return true
+ }
+ }
+ return false
+}
+
+func (i *mergedIterator) Valid() bool {
+ return i.err == nil && i.dir > dirEOI
+}
+
+func (i *mergedIterator) First() bool {
+ if i.err != nil {
+ return false
+ } else if i.dir == dirReleased {
+ i.err = ErrIterReleased
+ return false
+ }
+
+ for x, iter := range i.iters {
+ switch {
+ case iter.First():
+ i.keys[x] = assertKey(iter.Key())
+ case i.iterErr(iter):
+ return false
+ default:
+ i.keys[x] = nil
+ }
+ }
+ i.dir = dirSOI
+ return i.next()
+}
+
+func (i *mergedIterator) Last() bool {
+ if i.err != nil {
+ return false
+ } else if i.dir == dirReleased {
+ i.err = ErrIterReleased
+ return false
+ }
+
+ for x, iter := range i.iters {
+ switch {
+ case iter.Last():
+ i.keys[x] = assertKey(iter.Key())
+ case i.iterErr(iter):
+ return false
+ default:
+ i.keys[x] = nil
+ }
+ }
+ i.dir = dirEOI
+ return i.prev()
+}
+
+func (i *mergedIterator) Seek(key []byte) bool {
+ if i.err != nil {
+ return false
+ } else if i.dir == dirReleased {
+ i.err = ErrIterReleased
+ return false
+ }
+
+ for x, iter := range i.iters {
+ switch {
+ case iter.Seek(key):
+ i.keys[x] = assertKey(iter.Key())
+ case i.iterErr(iter):
+ return false
+ default:
+ i.keys[x] = nil
+ }
+ }
+ i.dir = dirSOI
+ return i.next()
+}
+
+func (i *mergedIterator) next() bool {
+ var key []byte
+ if i.dir == dirForward {
+ key = i.keys[i.index]
+ }
+ for x, tkey := range i.keys {
+ if tkey != nil && (key == nil || i.cmp.Compare(tkey, key) < 0) {
+ key = tkey
+ i.index = x
+ }
+ }
+ if key == nil {
+ i.dir = dirEOI
+ return false
+ }
+ i.dir = dirForward
+ return true
+}
+
+func (i *mergedIterator) Next() bool {
+ if i.dir == dirEOI || i.err != nil {
+ return false
+ } else if i.dir == dirReleased {
+ i.err = ErrIterReleased
+ return false
+ }
+
+ switch i.dir {
+ case dirSOI:
+ return i.First()
+ case dirBackward:
+ key := append([]byte{}, i.keys[i.index]...)
+ if !i.Seek(key) {
+ return false
+ }
+ return i.Next()
+ }
+
+ x := i.index
+ iter := i.iters[x]
+ switch {
+ case iter.Next():
+ i.keys[x] = assertKey(iter.Key())
+ case i.iterErr(iter):
+ return false
+ default:
+ i.keys[x] = nil
+ }
+ return i.next()
+}
+
+func (i *mergedIterator) prev() bool {
+ var key []byte
+ if i.dir == dirBackward {
+ key = i.keys[i.index]
+ }
+ for x, tkey := range i.keys {
+ if tkey != nil && (key == nil || i.cmp.Compare(tkey, key) > 0) {
+ key = tkey
+ i.index = x
+ }
+ }
+ if key == nil {
+ i.dir = dirSOI
+ return false
+ }
+ i.dir = dirBackward
+ return true
+}
+
+func (i *mergedIterator) Prev() bool {
+ if i.dir == dirSOI || i.err != nil {
+ return false
+ } else if i.dir == dirReleased {
+ i.err = ErrIterReleased
+ return false
+ }
+
+ switch i.dir {
+ case dirEOI:
+ return i.Last()
+ case dirForward:
+ key := append([]byte{}, i.keys[i.index]...)
+ for x, iter := range i.iters {
+ if x == i.index {
+ continue
+ }
+ seek := iter.Seek(key)
+ switch {
+ case seek && iter.Prev(), !seek && iter.Last():
+ i.keys[x] = assertKey(iter.Key())
+ case i.iterErr(iter):
+ return false
+ default:
+ i.keys[x] = nil
+ }
+ }
+ }
+
+ x := i.index
+ iter := i.iters[x]
+ switch {
+ case iter.Prev():
+ i.keys[x] = assertKey(iter.Key())
+ case i.iterErr(iter):
+ return false
+ default:
+ i.keys[x] = nil
+ }
+ return i.prev()
+}
+
+func (i *mergedIterator) Key() []byte {
+ if i.err != nil || i.dir <= dirEOI {
+ return nil
+ }
+ return i.keys[i.index]
+}
+
+func (i *mergedIterator) Value() []byte {
+ if i.err != nil || i.dir <= dirEOI {
+ return nil
+ }
+ return i.iters[i.index].Value()
+}
+
+func (i *mergedIterator) Release() {
+ if i.dir != dirReleased {
+ i.dir = dirReleased
+ for _, iter := range i.iters {
+ iter.Release()
+ }
+ i.iters = nil
+ i.keys = nil
+ if i.releaser != nil {
+ i.releaser.Release()
+ i.releaser = nil
+ }
+ }
+}
+
+func (i *mergedIterator) SetReleaser(releaser util.Releaser) {
+ if i.dir == dirReleased {
+ panic(util.ErrReleased)
+ }
+ if i.releaser != nil && releaser != nil {
+ panic(util.ErrHasReleaser)
+ }
+ i.releaser = releaser
+}
+
+func (i *mergedIterator) Error() error {
+ return i.err
+}
+
+func (i *mergedIterator) SetErrorCallback(f func(err error)) {
+ i.errf = f
+}
+
+// NewMergedIterator returns an iterator that merges its input. Walking the
+// resultant iterator will return all key/value pairs of all input iterators
+// in strictly increasing key order, as defined by cmp.
+// The input's key ranges may overlap, but there are assumed to be no duplicate
+// keys: if iters[i] contains a key k then iters[j] will not contain that key k.
+// None of the iters may be nil.
+//
+// If strict is true the any 'corruption errors' (i.e errors.IsCorrupted(err) == true)
+// won't be ignored and will halt 'merged iterator', otherwise the iterator will
+// continue to the next 'input iterator'.
+func NewMergedIterator(iters []Iterator, cmp comparer.Comparer, strict bool) Iterator {
+ return &mergedIterator{
+ iters: iters,
+ cmp: cmp,
+ strict: strict,
+ keys: make([][]byte, len(iters)),
+ }
+}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/journal/journal.go b/vendor/github.com/syndtr/goleveldb/leveldb/journal/journal.go
new file mode 100644
index 0000000000..6519ec660e
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/journal/journal.go
@@ -0,0 +1,520 @@
+// Copyright 2011 The LevelDB-Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Taken from: https://code.google.com/p/leveldb-go/source/browse/leveldb/record/record.go?r=1d5ccbe03246da926391ee12d1c6caae054ff4b0
+// License, authors and contributors informations can be found at bellow URLs respectively:
+// https://code.google.com/p/leveldb-go/source/browse/LICENSE
+// https://code.google.com/p/leveldb-go/source/browse/AUTHORS
+// https://code.google.com/p/leveldb-go/source/browse/CONTRIBUTORS
+
+// Package journal reads and writes sequences of journals. Each journal is a stream
+// of bytes that completes before the next journal starts.
+//
+// When reading, call Next to obtain an io.Reader for the next journal. Next will
+// return io.EOF when there are no more journals. It is valid to call Next
+// without reading the current journal to exhaustion.
+//
+// When writing, call Next to obtain an io.Writer for the next journal. Calling
+// Next finishes the current journal. Call Close to finish the final journal.
+//
+// Optionally, call Flush to finish the current journal and flush the underlying
+// writer without starting a new journal. To start a new journal after flushing,
+// call Next.
+//
+// Neither Readers or Writers are safe to use concurrently.
+//
+// Example code:
+// func read(r io.Reader) ([]string, error) {
+// var ss []string
+// journals := journal.NewReader(r, nil, true, true)
+// for {
+// j, err := journals.Next()
+// if err == io.EOF {
+// break
+// }
+// if err != nil {
+// return nil, err
+// }
+// s, err := ioutil.ReadAll(j)
+// if err != nil {
+// return nil, err
+// }
+// ss = append(ss, string(s))
+// }
+// return ss, nil
+// }
+//
+// func write(w io.Writer, ss []string) error {
+// journals := journal.NewWriter(w)
+// for _, s := range ss {
+// j, err := journals.Next()
+// if err != nil {
+// return err
+// }
+// if _, err := j.Write([]byte(s)), err != nil {
+// return err
+// }
+// }
+// return journals.Close()
+// }
+//
+// The wire format is that the stream is divided into 32KiB blocks, and each
+// block contains a number of tightly packed chunks. Chunks cannot cross block
+// boundaries. The last block may be shorter than 32 KiB. Any unused bytes in a
+// block must be zero.
+//
+// A journal maps to one or more chunks. Each chunk has a 7 byte header (a 4
+// byte checksum, a 2 byte little-endian uint16 length, and a 1 byte chunk type)
+// followed by a payload. The checksum is over the chunk type and the payload.
+//
+// There are four chunk types: whether the chunk is the full journal, or the
+// first, middle or last chunk of a multi-chunk journal. A multi-chunk journal
+// has one first chunk, zero or more middle chunks, and one last chunk.
+//
+// The wire format allows for limited recovery in the face of data corruption:
+// on a format error (such as a checksum mismatch), the reader moves to the
+// next block and looks for the next full or first chunk.
+package journal
+
+import (
+ "encoding/binary"
+ "fmt"
+ "io"
+
+ "github.com/syndtr/goleveldb/leveldb/errors"
+ "github.com/syndtr/goleveldb/leveldb/util"
+)
+
+// These constants are part of the wire format and should not be changed.
+const (
+ fullChunkType = 1
+ firstChunkType = 2
+ middleChunkType = 3
+ lastChunkType = 4
+)
+
+const (
+ blockSize = 32 * 1024
+ headerSize = 7
+)
+
+type flusher interface {
+ Flush() error
+}
+
+// ErrCorrupted is the error type that generated by corrupted block or chunk.
+type ErrCorrupted struct {
+ Size int
+ Reason string
+}
+
+func (e *ErrCorrupted) Error() string {
+ return fmt.Sprintf("leveldb/journal: block/chunk corrupted: %s (%d bytes)", e.Reason, e.Size)
+}
+
+// Dropper is the interface that wrap simple Drop method. The Drop
+// method will be called when the journal reader dropping a block or chunk.
+type Dropper interface {
+ Drop(err error)
+}
+
+// Reader reads journals from an underlying io.Reader.
+type Reader struct {
+ // r is the underlying reader.
+ r io.Reader
+ // the dropper.
+ dropper Dropper
+ // strict flag.
+ strict bool
+ // checksum flag.
+ checksum bool
+ // seq is the sequence number of the current journal.
+ seq int
+ // buf[i:j] is the unread portion of the current chunk's payload.
+ // The low bound, i, excludes the chunk header.
+ i, j int
+ // n is the number of bytes of buf that are valid. Once reading has started,
+ // only the final block can have n < blockSize.
+ n int
+ // last is whether the current chunk is the last chunk of the journal.
+ last bool
+ // err is any accumulated error.
+ err error
+ // buf is the buffer.
+ buf [blockSize]byte
+}
+
+// NewReader returns a new reader. The dropper may be nil, and if
+// strict is true then corrupted or invalid chunk will halt the journal
+// reader entirely.
+func NewReader(r io.Reader, dropper Dropper, strict, checksum bool) *Reader {
+ return &Reader{
+ r: r,
+ dropper: dropper,
+ strict: strict,
+ checksum: checksum,
+ last: true,
+ }
+}
+
+var errSkip = errors.New("leveldb/journal: skipped")
+
+func (r *Reader) corrupt(n int, reason string, skip bool) error {
+ if r.dropper != nil {
+ r.dropper.Drop(&ErrCorrupted{n, reason})
+ }
+ if r.strict && !skip {
+ r.err = errors.NewErrCorrupted(nil, &ErrCorrupted{n, reason})
+ return r.err
+ }
+ return errSkip
+}
+
+// nextChunk sets r.buf[r.i:r.j] to hold the next chunk's payload, reading the
+// next block into the buffer if necessary.
+func (r *Reader) nextChunk(first bool) error {
+ for {
+ if r.j+headerSize <= r.n {
+ checksum := binary.LittleEndian.Uint32(r.buf[r.j+0 : r.j+4])
+ length := binary.LittleEndian.Uint16(r.buf[r.j+4 : r.j+6])
+ chunkType := r.buf[r.j+6]
+
+ if checksum == 0 && length == 0 && chunkType == 0 {
+ // Drop entire block.
+ m := r.n - r.j
+ r.i = r.n
+ r.j = r.n
+ return r.corrupt(m, "zero header", false)
+ } else {
+ m := r.n - r.j
+ r.i = r.j + headerSize
+ r.j = r.j + headerSize + int(length)
+ if r.j > r.n {
+ // Drop entire block.
+ r.i = r.n
+ r.j = r.n
+ return r.corrupt(m, "chunk length overflows block", false)
+ } else if r.checksum && checksum != util.NewCRC(r.buf[r.i-1:r.j]).Value() {
+ // Drop entire block.
+ r.i = r.n
+ r.j = r.n
+ return r.corrupt(m, "checksum mismatch", false)
+ }
+ }
+ if first && chunkType != fullChunkType && chunkType != firstChunkType {
+ m := r.j - r.i
+ r.i = r.j
+ // Report the error, but skip it.
+ return r.corrupt(m+headerSize, "orphan chunk", true)
+ }
+ r.last = chunkType == fullChunkType || chunkType == lastChunkType
+ return nil
+ }
+
+ // The last block.
+ if r.n < blockSize && r.n > 0 {
+ if !first {
+ return r.corrupt(0, "missing chunk part", false)
+ }
+ r.err = io.EOF
+ return r.err
+ }
+
+ // Read block.
+ n, err := io.ReadFull(r.r, r.buf[:])
+ if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF {
+ return err
+ }
+ if n == 0 {
+ if !first {
+ return r.corrupt(0, "missing chunk part", false)
+ }
+ r.err = io.EOF
+ return r.err
+ }
+ r.i, r.j, r.n = 0, 0, n
+ }
+}
+
+// Next returns a reader for the next journal. It returns io.EOF if there are no
+// more journals. The reader returned becomes stale after the next Next call,
+// and should no longer be used. If strict is false, the reader will returns
+// io.ErrUnexpectedEOF error when found corrupted journal.
+func (r *Reader) Next() (io.Reader, error) {
+ r.seq++
+ if r.err != nil {
+ return nil, r.err
+ }
+ r.i = r.j
+ for {
+ if err := r.nextChunk(true); err == nil {
+ break
+ } else if err != errSkip {
+ return nil, err
+ }
+ }
+ return &singleReader{r, r.seq, nil}, nil
+}
+
+// Reset resets the journal reader, allows reuse of the journal reader. Reset returns
+// last accumulated error.
+func (r *Reader) Reset(reader io.Reader, dropper Dropper, strict, checksum bool) error {
+ r.seq++
+ err := r.err
+ r.r = reader
+ r.dropper = dropper
+ r.strict = strict
+ r.checksum = checksum
+ r.i = 0
+ r.j = 0
+ r.n = 0
+ r.last = true
+ r.err = nil
+ return err
+}
+
+type singleReader struct {
+ r *Reader
+ seq int
+ err error
+}
+
+func (x *singleReader) Read(p []byte) (int, error) {
+ r := x.r
+ if r.seq != x.seq {
+ return 0, errors.New("leveldb/journal: stale reader")
+ }
+ if x.err != nil {
+ return 0, x.err
+ }
+ if r.err != nil {
+ return 0, r.err
+ }
+ for r.i == r.j {
+ if r.last {
+ return 0, io.EOF
+ }
+ x.err = r.nextChunk(false)
+ if x.err != nil {
+ if x.err == errSkip {
+ x.err = io.ErrUnexpectedEOF
+ }
+ return 0, x.err
+ }
+ }
+ n := copy(p, r.buf[r.i:r.j])
+ r.i += n
+ return n, nil
+}
+
+func (x *singleReader) ReadByte() (byte, error) {
+ r := x.r
+ if r.seq != x.seq {
+ return 0, errors.New("leveldb/journal: stale reader")
+ }
+ if x.err != nil {
+ return 0, x.err
+ }
+ if r.err != nil {
+ return 0, r.err
+ }
+ for r.i == r.j {
+ if r.last {
+ return 0, io.EOF
+ }
+ x.err = r.nextChunk(false)
+ if x.err != nil {
+ if x.err == errSkip {
+ x.err = io.ErrUnexpectedEOF
+ }
+ return 0, x.err
+ }
+ }
+ c := r.buf[r.i]
+ r.i++
+ return c, nil
+}
+
+// Writer writes journals to an underlying io.Writer.
+type Writer struct {
+ // w is the underlying writer.
+ w io.Writer
+ // seq is the sequence number of the current journal.
+ seq int
+ // f is w as a flusher.
+ f flusher
+ // buf[i:j] is the bytes that will become the current chunk.
+ // The low bound, i, includes the chunk header.
+ i, j int
+ // buf[:written] has already been written to w.
+ // written is zero unless Flush has been called.
+ written int
+ // first is whether the current chunk is the first chunk of the journal.
+ first bool
+ // pending is whether a chunk is buffered but not yet written.
+ pending bool
+ // err is any accumulated error.
+ err error
+ // buf is the buffer.
+ buf [blockSize]byte
+}
+
+// NewWriter returns a new Writer.
+func NewWriter(w io.Writer) *Writer {
+ f, _ := w.(flusher)
+ return &Writer{
+ w: w,
+ f: f,
+ }
+}
+
+// fillHeader fills in the header for the pending chunk.
+func (w *Writer) fillHeader(last bool) {
+ if w.i+headerSize > w.j || w.j > blockSize {
+ panic("leveldb/journal: bad writer state")
+ }
+ if last {
+ if w.first {
+ w.buf[w.i+6] = fullChunkType
+ } else {
+ w.buf[w.i+6] = lastChunkType
+ }
+ } else {
+ if w.first {
+ w.buf[w.i+6] = firstChunkType
+ } else {
+ w.buf[w.i+6] = middleChunkType
+ }
+ }
+ binary.LittleEndian.PutUint32(w.buf[w.i+0:w.i+4], util.NewCRC(w.buf[w.i+6:w.j]).Value())
+ binary.LittleEndian.PutUint16(w.buf[w.i+4:w.i+6], uint16(w.j-w.i-headerSize))
+}
+
+// writeBlock writes the buffered block to the underlying writer, and reserves
+// space for the next chunk's header.
+func (w *Writer) writeBlock() {
+ _, w.err = w.w.Write(w.buf[w.written:])
+ w.i = 0
+ w.j = headerSize
+ w.written = 0
+}
+
+// writePending finishes the current journal and writes the buffer to the
+// underlying writer.
+func (w *Writer) writePending() {
+ if w.err != nil {
+ return
+ }
+ if w.pending {
+ w.fillHeader(true)
+ w.pending = false
+ }
+ _, w.err = w.w.Write(w.buf[w.written:w.j])
+ w.written = w.j
+}
+
+// Close finishes the current journal and closes the writer.
+func (w *Writer) Close() error {
+ w.seq++
+ w.writePending()
+ if w.err != nil {
+ return w.err
+ }
+ w.err = errors.New("leveldb/journal: closed Writer")
+ return nil
+}
+
+// Flush finishes the current journal, writes to the underlying writer, and
+// flushes it if that writer implements interface{ Flush() error }.
+func (w *Writer) Flush() error {
+ w.seq++
+ w.writePending()
+ if w.err != nil {
+ return w.err
+ }
+ if w.f != nil {
+ w.err = w.f.Flush()
+ return w.err
+ }
+ return nil
+}
+
+// Reset resets the journal writer, allows reuse of the journal writer. Reset
+// will also closes the journal writer if not already.
+func (w *Writer) Reset(writer io.Writer) (err error) {
+ w.seq++
+ if w.err == nil {
+ w.writePending()
+ err = w.err
+ }
+ w.w = writer
+ w.f, _ = writer.(flusher)
+ w.i = 0
+ w.j = 0
+ w.written = 0
+ w.first = false
+ w.pending = false
+ w.err = nil
+ return
+}
+
+// Next returns a writer for the next journal. The writer returned becomes stale
+// after the next Close, Flush or Next call, and should no longer be used.
+func (w *Writer) Next() (io.Writer, error) {
+ w.seq++
+ if w.err != nil {
+ return nil, w.err
+ }
+ if w.pending {
+ w.fillHeader(true)
+ }
+ w.i = w.j
+ w.j = w.j + headerSize
+ // Check if there is room in the block for the header.
+ if w.j > blockSize {
+ // Fill in the rest of the block with zeroes.
+ for k := w.i; k < blockSize; k++ {
+ w.buf[k] = 0
+ }
+ w.writeBlock()
+ if w.err != nil {
+ return nil, w.err
+ }
+ }
+ w.first = true
+ w.pending = true
+ return singleWriter{w, w.seq}, nil
+}
+
+type singleWriter struct {
+ w *Writer
+ seq int
+}
+
+func (x singleWriter) Write(p []byte) (int, error) {
+ w := x.w
+ if w.seq != x.seq {
+ return 0, errors.New("leveldb/journal: stale writer")
+ }
+ if w.err != nil {
+ return 0, w.err
+ }
+ n0 := len(p)
+ for len(p) > 0 {
+ // Write a block, if it is full.
+ if w.j == blockSize {
+ w.fillHeader(false)
+ w.writeBlock()
+ if w.err != nil {
+ return 0, w.err
+ }
+ w.first = false
+ }
+ // Copy bytes into the buffer.
+ n := copy(w.buf[w.j:], p)
+ w.j += n
+ p = p[n:]
+ }
+ return n0, nil
+}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/key.go b/vendor/github.com/syndtr/goleveldb/leveldb/key.go
new file mode 100644
index 0000000000..572ae8150c
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/key.go
@@ -0,0 +1,142 @@
+// Copyright (c) 2012, Suryandaru Triandana
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package leveldb
+
+import (
+ "encoding/binary"
+ "fmt"
+
+ "github.com/syndtr/goleveldb/leveldb/errors"
+)
+
+type ErrIkeyCorrupted struct {
+ Ikey []byte
+ Reason string
+}
+
+func (e *ErrIkeyCorrupted) Error() string {
+ return fmt.Sprintf("leveldb: iKey %q corrupted: %s", e.Ikey, e.Reason)
+}
+
+func newErrIkeyCorrupted(ikey []byte, reason string) error {
+ return errors.NewErrCorrupted(nil, &ErrIkeyCorrupted{append([]byte{}, ikey...), reason})
+}
+
+type kType int
+
+func (kt kType) String() string {
+ switch kt {
+ case ktDel:
+ return "d"
+ case ktVal:
+ return "v"
+ }
+ return "x"
+}
+
+// Value types encoded as the last component of internal keys.
+// Don't modify; this value are saved to disk.
+const (
+ ktDel kType = iota
+ ktVal
+)
+
+// ktSeek defines the kType that should be passed when constructing an
+// internal key for seeking to a particular sequence number (since we
+// sort sequence numbers in decreasing order and the value type is
+// embedded as the low 8 bits in the sequence number in internal keys,
+// we need to use the highest-numbered ValueType, not the lowest).
+const ktSeek = ktVal
+
+const (
+ // Maximum value possible for sequence number; the 8-bits are
+ // used by value type, so its can packed together in single
+ // 64-bit integer.
+ kMaxSeq uint64 = (uint64(1) << 56) - 1
+ // Maximum value possible for packed sequence number and type.
+ kMaxNum uint64 = (kMaxSeq << 8) | uint64(ktSeek)
+)
+
+// Maximum number encoded in bytes.
+var kMaxNumBytes = make([]byte, 8)
+
+func init() {
+ binary.LittleEndian.PutUint64(kMaxNumBytes, kMaxNum)
+}
+
+type iKey []byte
+
+func newIkey(ukey []byte, seq uint64, kt kType) iKey {
+ if seq > kMaxSeq {
+ panic("leveldb: invalid sequence number")
+ } else if kt > ktVal {
+ panic("leveldb: invalid type")
+ }
+
+ ik := make(iKey, len(ukey)+8)
+ copy(ik, ukey)
+ binary.LittleEndian.PutUint64(ik[len(ukey):], (seq<<8)|uint64(kt))
+ return ik
+}
+
+func parseIkey(ik []byte) (ukey []byte, seq uint64, kt kType, err error) {
+ if len(ik) < 8 {
+ return nil, 0, 0, newErrIkeyCorrupted(ik, "invalid length")
+ }
+ num := binary.LittleEndian.Uint64(ik[len(ik)-8:])
+ seq, kt = uint64(num>>8), kType(num&0xff)
+ if kt > ktVal {
+ return nil, 0, 0, newErrIkeyCorrupted(ik, "invalid type")
+ }
+ ukey = ik[:len(ik)-8]
+ return
+}
+
+func validIkey(ik []byte) bool {
+ _, _, _, err := parseIkey(ik)
+ return err == nil
+}
+
+func (ik iKey) assert() {
+ if ik == nil {
+ panic("leveldb: nil iKey")
+ }
+ if len(ik) < 8 {
+ panic(fmt.Sprintf("leveldb: iKey %q, len=%d: invalid length", []byte(ik), len(ik)))
+ }
+}
+
+func (ik iKey) ukey() []byte {
+ ik.assert()
+ return ik[:len(ik)-8]
+}
+
+func (ik iKey) num() uint64 {
+ ik.assert()
+ return binary.LittleEndian.Uint64(ik[len(ik)-8:])
+}
+
+func (ik iKey) parseNum() (seq uint64, kt kType) {
+ num := ik.num()
+ seq, kt = uint64(num>>8), kType(num&0xff)
+ if kt > ktVal {
+ panic(fmt.Sprintf("leveldb: iKey %q, len=%d: invalid type %#x", []byte(ik), len(ik), kt))
+ }
+ return
+}
+
+func (ik iKey) String() string {
+ if ik == nil {
+ return ""
+ }
+
+ if ukey, seq, kt, err := parseIkey(ik); err == nil {
+ return fmt.Sprintf("%s,%s%d", shorten(string(ukey)), kt, seq)
+ } else {
+ return ""
+ }
+}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/memdb/memdb.go b/vendor/github.com/syndtr/goleveldb/leveldb/memdb/memdb.go
new file mode 100644
index 0000000000..1395bd9280
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/memdb/memdb.go
@@ -0,0 +1,471 @@
+// Copyright (c) 2012, Suryandaru Triandana
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Package memdb provides in-memory key/value database implementation.
+package memdb
+
+import (
+ "math/rand"
+ "sync"
+
+ "github.com/syndtr/goleveldb/leveldb/comparer"
+ "github.com/syndtr/goleveldb/leveldb/errors"
+ "github.com/syndtr/goleveldb/leveldb/iterator"
+ "github.com/syndtr/goleveldb/leveldb/util"
+)
+
+var (
+ ErrNotFound = errors.ErrNotFound
+ ErrIterReleased = errors.New("leveldb/memdb: iterator released")
+)
+
+const tMaxHeight = 12
+
+type dbIter struct {
+ util.BasicReleaser
+ p *DB
+ slice *util.Range
+ node int
+ forward bool
+ key, value []byte
+ err error
+}
+
+func (i *dbIter) fill(checkStart, checkLimit bool) bool {
+ if i.node != 0 {
+ n := i.p.nodeData[i.node]
+ m := n + i.p.nodeData[i.node+nKey]
+ i.key = i.p.kvData[n:m]
+ if i.slice != nil {
+ switch {
+ case checkLimit && i.slice.Limit != nil && i.p.cmp.Compare(i.key, i.slice.Limit) >= 0:
+ fallthrough
+ case checkStart && i.slice.Start != nil && i.p.cmp.Compare(i.key, i.slice.Start) < 0:
+ i.node = 0
+ goto bail
+ }
+ }
+ i.value = i.p.kvData[m : m+i.p.nodeData[i.node+nVal]]
+ return true
+ }
+bail:
+ i.key = nil
+ i.value = nil
+ return false
+}
+
+func (i *dbIter) Valid() bool {
+ return i.node != 0
+}
+
+func (i *dbIter) First() bool {
+ if i.Released() {
+ i.err = ErrIterReleased
+ return false
+ }
+
+ i.forward = true
+ i.p.mu.RLock()
+ defer i.p.mu.RUnlock()
+ if i.slice != nil && i.slice.Start != nil {
+ i.node, _ = i.p.findGE(i.slice.Start, false)
+ } else {
+ i.node = i.p.nodeData[nNext]
+ }
+ return i.fill(false, true)
+}
+
+func (i *dbIter) Last() bool {
+ if i.Released() {
+ i.err = ErrIterReleased
+ return false
+ }
+
+ i.forward = false
+ i.p.mu.RLock()
+ defer i.p.mu.RUnlock()
+ if i.slice != nil && i.slice.Limit != nil {
+ i.node = i.p.findLT(i.slice.Limit)
+ } else {
+ i.node = i.p.findLast()
+ }
+ return i.fill(true, false)
+}
+
+func (i *dbIter) Seek(key []byte) bool {
+ if i.Released() {
+ i.err = ErrIterReleased
+ return false
+ }
+
+ i.forward = true
+ i.p.mu.RLock()
+ defer i.p.mu.RUnlock()
+ if i.slice != nil && i.slice.Start != nil && i.p.cmp.Compare(key, i.slice.Start) < 0 {
+ key = i.slice.Start
+ }
+ i.node, _ = i.p.findGE(key, false)
+ return i.fill(false, true)
+}
+
+func (i *dbIter) Next() bool {
+ if i.Released() {
+ i.err = ErrIterReleased
+ return false
+ }
+
+ if i.node == 0 {
+ if !i.forward {
+ return i.First()
+ }
+ return false
+ }
+ i.forward = true
+ i.p.mu.RLock()
+ defer i.p.mu.RUnlock()
+ i.node = i.p.nodeData[i.node+nNext]
+ return i.fill(false, true)
+}
+
+func (i *dbIter) Prev() bool {
+ if i.Released() {
+ i.err = ErrIterReleased
+ return false
+ }
+
+ if i.node == 0 {
+ if i.forward {
+ return i.Last()
+ }
+ return false
+ }
+ i.forward = false
+ i.p.mu.RLock()
+ defer i.p.mu.RUnlock()
+ i.node = i.p.findLT(i.key)
+ return i.fill(true, false)
+}
+
+func (i *dbIter) Key() []byte {
+ return i.key
+}
+
+func (i *dbIter) Value() []byte {
+ return i.value
+}
+
+func (i *dbIter) Error() error { return i.err }
+
+func (i *dbIter) Release() {
+ if !i.Released() {
+ i.p = nil
+ i.node = 0
+ i.key = nil
+ i.value = nil
+ i.BasicReleaser.Release()
+ }
+}
+
+const (
+ nKV = iota
+ nKey
+ nVal
+ nHeight
+ nNext
+)
+
+// DB is an in-memory key/value database.
+type DB struct {
+ cmp comparer.BasicComparer
+ rnd *rand.Rand
+
+ mu sync.RWMutex
+ kvData []byte
+ // Node data:
+ // [0] : KV offset
+ // [1] : Key length
+ // [2] : Value length
+ // [3] : Height
+ // [3..height] : Next nodes
+ nodeData []int
+ prevNode [tMaxHeight]int
+ maxHeight int
+ n int
+ kvSize int
+}
+
+func (p *DB) randHeight() (h int) {
+ const branching = 4
+ h = 1
+ for h < tMaxHeight && p.rnd.Int()%branching == 0 {
+ h++
+ }
+ return
+}
+
+// Must hold RW-lock if prev == true, as it use shared prevNode slice.
+func (p *DB) findGE(key []byte, prev bool) (int, bool) {
+ node := 0
+ h := p.maxHeight - 1
+ for {
+ next := p.nodeData[node+nNext+h]
+ cmp := 1
+ if next != 0 {
+ o := p.nodeData[next]
+ cmp = p.cmp.Compare(p.kvData[o:o+p.nodeData[next+nKey]], key)
+ }
+ if cmp < 0 {
+ // Keep searching in this list
+ node = next
+ } else {
+ if prev {
+ p.prevNode[h] = node
+ } else if cmp == 0 {
+ return next, true
+ }
+ if h == 0 {
+ return next, cmp == 0
+ }
+ h--
+ }
+ }
+}
+
+func (p *DB) findLT(key []byte) int {
+ node := 0
+ h := p.maxHeight - 1
+ for {
+ next := p.nodeData[node+nNext+h]
+ o := p.nodeData[next]
+ if next == 0 || p.cmp.Compare(p.kvData[o:o+p.nodeData[next+nKey]], key) >= 0 {
+ if h == 0 {
+ break
+ }
+ h--
+ } else {
+ node = next
+ }
+ }
+ return node
+}
+
+func (p *DB) findLast() int {
+ node := 0
+ h := p.maxHeight - 1
+ for {
+ next := p.nodeData[node+nNext+h]
+ if next == 0 {
+ if h == 0 {
+ break
+ }
+ h--
+ } else {
+ node = next
+ }
+ }
+ return node
+}
+
+// Put sets the value for the given key. It overwrites any previous value
+// for that key; a DB is not a multi-map.
+//
+// It is safe to modify the contents of the arguments after Put returns.
+func (p *DB) Put(key []byte, value []byte) error {
+ p.mu.Lock()
+ defer p.mu.Unlock()
+
+ if node, exact := p.findGE(key, true); exact {
+ kvOffset := len(p.kvData)
+ p.kvData = append(p.kvData, key...)
+ p.kvData = append(p.kvData, value...)
+ p.nodeData[node] = kvOffset
+ m := p.nodeData[node+nVal]
+ p.nodeData[node+nVal] = len(value)
+ p.kvSize += len(value) - m
+ return nil
+ }
+
+ h := p.randHeight()
+ if h > p.maxHeight {
+ for i := p.maxHeight; i < h; i++ {
+ p.prevNode[i] = 0
+ }
+ p.maxHeight = h
+ }
+
+ kvOffset := len(p.kvData)
+ p.kvData = append(p.kvData, key...)
+ p.kvData = append(p.kvData, value...)
+ // Node
+ node := len(p.nodeData)
+ p.nodeData = append(p.nodeData, kvOffset, len(key), len(value), h)
+ for i, n := range p.prevNode[:h] {
+ m := n + nNext + i
+ p.nodeData = append(p.nodeData, p.nodeData[m])
+ p.nodeData[m] = node
+ }
+
+ p.kvSize += len(key) + len(value)
+ p.n++
+ return nil
+}
+
+// Delete deletes the value for the given key. It returns ErrNotFound if
+// the DB does not contain the key.
+//
+// It is safe to modify the contents of the arguments after Delete returns.
+func (p *DB) Delete(key []byte) error {
+ p.mu.Lock()
+ defer p.mu.Unlock()
+
+ node, exact := p.findGE(key, true)
+ if !exact {
+ return ErrNotFound
+ }
+
+ h := p.nodeData[node+nHeight]
+ for i, n := range p.prevNode[:h] {
+ m := n + 4 + i
+ p.nodeData[m] = p.nodeData[p.nodeData[m]+nNext+i]
+ }
+
+ p.kvSize -= p.nodeData[node+nKey] + p.nodeData[node+nVal]
+ p.n--
+ return nil
+}
+
+// Contains returns true if the given key are in the DB.
+//
+// It is safe to modify the contents of the arguments after Contains returns.
+func (p *DB) Contains(key []byte) bool {
+ p.mu.RLock()
+ _, exact := p.findGE(key, false)
+ p.mu.RUnlock()
+ return exact
+}
+
+// Get gets the value for the given key. It returns error.ErrNotFound if the
+// DB does not contain the key.
+//
+// The caller should not modify the contents of the returned slice, but
+// it is safe to modify the contents of the argument after Get returns.
+func (p *DB) Get(key []byte) (value []byte, err error) {
+ p.mu.RLock()
+ if node, exact := p.findGE(key, false); exact {
+ o := p.nodeData[node] + p.nodeData[node+nKey]
+ value = p.kvData[o : o+p.nodeData[node+nVal]]
+ } else {
+ err = ErrNotFound
+ }
+ p.mu.RUnlock()
+ return
+}
+
+// Find finds key/value pair whose key is greater than or equal to the
+// given key. It returns ErrNotFound if the table doesn't contain
+// such pair.
+//
+// The caller should not modify the contents of the returned slice, but
+// it is safe to modify the contents of the argument after Find returns.
+func (p *DB) Find(key []byte) (rkey, value []byte, err error) {
+ p.mu.RLock()
+ if node, _ := p.findGE(key, false); node != 0 {
+ n := p.nodeData[node]
+ m := n + p.nodeData[node+nKey]
+ rkey = p.kvData[n:m]
+ value = p.kvData[m : m+p.nodeData[node+nVal]]
+ } else {
+ err = ErrNotFound
+ }
+ p.mu.RUnlock()
+ return
+}
+
+// NewIterator returns an iterator of the DB.
+// The returned iterator is not goroutine-safe, but it is safe to use
+// multiple iterators concurrently, with each in a dedicated goroutine.
+// It is also safe to use an iterator concurrently with modifying its
+// underlying DB. However, the resultant key/value pairs are not guaranteed
+// to be a consistent snapshot of the DB at a particular point in time.
+//
+// Slice allows slicing the iterator to only contains keys in the given
+// range. A nil Range.Start is treated as a key before all keys in the
+// DB. And a nil Range.Limit is treated as a key after all keys in
+// the DB.
+//
+// The iterator must be released after use, by calling Release method.
+//
+// Also read Iterator documentation of the leveldb/iterator package.
+func (p *DB) NewIterator(slice *util.Range) iterator.Iterator {
+ return &dbIter{p: p, slice: slice}
+}
+
+// Capacity returns keys/values buffer capacity.
+func (p *DB) Capacity() int {
+ p.mu.RLock()
+ defer p.mu.RUnlock()
+ return cap(p.kvData)
+}
+
+// Size returns sum of keys and values length. Note that deleted
+// key/value will not be accouted for, but it will still consume
+// the buffer, since the buffer is append only.
+func (p *DB) Size() int {
+ p.mu.RLock()
+ defer p.mu.RUnlock()
+ return p.kvSize
+}
+
+// Free returns keys/values free buffer before need to grow.
+func (p *DB) Free() int {
+ p.mu.RLock()
+ defer p.mu.RUnlock()
+ return cap(p.kvData) - len(p.kvData)
+}
+
+// Len returns the number of entries in the DB.
+func (p *DB) Len() int {
+ p.mu.RLock()
+ defer p.mu.RUnlock()
+ return p.n
+}
+
+// Reset resets the DB to initial empty state. Allows reuse the buffer.
+func (p *DB) Reset() {
+ p.mu.Lock()
+ p.rnd = rand.New(rand.NewSource(0xdeadbeef))
+ p.maxHeight = 1
+ p.n = 0
+ p.kvSize = 0
+ p.kvData = p.kvData[:0]
+ p.nodeData = p.nodeData[:nNext+tMaxHeight]
+ p.nodeData[nKV] = 0
+ p.nodeData[nKey] = 0
+ p.nodeData[nVal] = 0
+ p.nodeData[nHeight] = tMaxHeight
+ for n := 0; n < tMaxHeight; n++ {
+ p.nodeData[nNext+n] = 0
+ p.prevNode[n] = 0
+ }
+ p.mu.Unlock()
+}
+
+// New creates a new initalized in-memory key/value DB. The capacity
+// is the initial key/value buffer capacity. The capacity is advisory,
+// not enforced.
+//
+// The returned DB instance is goroutine-safe.
+func New(cmp comparer.BasicComparer, capacity int) *DB {
+ p := &DB{
+ cmp: cmp,
+ rnd: rand.New(rand.NewSource(0xdeadbeef)),
+ maxHeight: 1,
+ kvData: make([]byte, 0, capacity),
+ nodeData: make([]int, 4+tMaxHeight),
+ }
+ p.nodeData[nHeight] = tMaxHeight
+ return p
+}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/opt/options.go b/vendor/github.com/syndtr/goleveldb/leveldb/opt/options.go
new file mode 100644
index 0000000000..7b5d8b9b53
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/opt/options.go
@@ -0,0 +1,682 @@
+// Copyright (c) 2012, Suryandaru Triandana
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Package opt provides sets of options used by LevelDB.
+package opt
+
+import (
+ "github.com/syndtr/goleveldb/leveldb/cache"
+ "github.com/syndtr/goleveldb/leveldb/comparer"
+ "github.com/syndtr/goleveldb/leveldb/filter"
+ "math"
+)
+
+const (
+ KiB = 1024
+ MiB = KiB * 1024
+ GiB = MiB * 1024
+)
+
+var (
+ DefaultBlockCacher = LRUCacher
+ DefaultBlockCacheCapacity = 8 * MiB
+ DefaultBlockRestartInterval = 16
+ DefaultBlockSize = 4 * KiB
+ DefaultCompactionExpandLimitFactor = 25
+ DefaultCompactionGPOverlapsFactor = 10
+ DefaultCompactionL0Trigger = 4
+ DefaultCompactionSourceLimitFactor = 1
+ DefaultCompactionTableSize = 2 * MiB
+ DefaultCompactionTableSizeMultiplier = 1.0
+ DefaultCompactionTotalSize = 10 * MiB
+ DefaultCompactionTotalSizeMultiplier = 10.0
+ DefaultCompressionType = SnappyCompression
+ DefaultIteratorSamplingRate = 1 * MiB
+ DefaultMaxMemCompationLevel = 2
+ DefaultNumLevel = 7
+ DefaultOpenFilesCacher = LRUCacher
+ DefaultOpenFilesCacheCapacity = 500
+ DefaultWriteBuffer = 4 * MiB
+ DefaultWriteL0PauseTrigger = 12
+ DefaultWriteL0SlowdownTrigger = 8
+)
+
+// Cacher is a caching algorithm.
+type Cacher interface {
+ New(capacity int) cache.Cacher
+}
+
+type CacherFunc struct {
+ NewFunc func(capacity int) cache.Cacher
+}
+
+func (f *CacherFunc) New(capacity int) cache.Cacher {
+ if f.NewFunc != nil {
+ return f.NewFunc(capacity)
+ }
+ return nil
+}
+
+func noCacher(int) cache.Cacher { return nil }
+
+var (
+ // LRUCacher is the LRU-cache algorithm.
+ LRUCacher = &CacherFunc{cache.NewLRU}
+
+ // NoCacher is the value to disable caching algorithm.
+ NoCacher = &CacherFunc{}
+)
+
+// Compression is the 'sorted table' block compression algorithm to use.
+type Compression uint
+
+func (c Compression) String() string {
+ switch c {
+ case DefaultCompression:
+ return "default"
+ case NoCompression:
+ return "none"
+ case SnappyCompression:
+ return "snappy"
+ }
+ return "invalid"
+}
+
+const (
+ DefaultCompression Compression = iota
+ NoCompression
+ SnappyCompression
+ nCompression
+)
+
+// Strict is the DB 'strict level'.
+type Strict uint
+
+const (
+ // If present then a corrupted or invalid chunk or block in manifest
+ // journal will cause an error instead of being dropped.
+ // This will prevent database with corrupted manifest to be opened.
+ StrictManifest Strict = 1 << iota
+
+ // If present then journal chunk checksum will be verified.
+ StrictJournalChecksum
+
+ // If present then a corrupted or invalid chunk or block in journal
+ // will cause an error instead of being dropped.
+ // This will prevent database with corrupted journal to be opened.
+ StrictJournal
+
+ // If present then 'sorted table' block checksum will be verified.
+ // This has effect on both 'read operation' and compaction.
+ StrictBlockChecksum
+
+ // If present then a corrupted 'sorted table' will fails compaction.
+ // The database will enter read-only mode.
+ StrictCompaction
+
+ // If present then a corrupted 'sorted table' will halts 'read operation'.
+ StrictReader
+
+ // If present then leveldb.Recover will drop corrupted 'sorted table'.
+ StrictRecovery
+
+ // This only applicable for ReadOptions, if present then this ReadOptions
+ // 'strict level' will override global ones.
+ StrictOverride
+
+ // StrictAll enables all strict flags.
+ StrictAll = StrictManifest | StrictJournalChecksum | StrictJournal | StrictBlockChecksum | StrictCompaction | StrictReader | StrictRecovery
+
+ // DefaultStrict is the default strict flags. Specify any strict flags
+ // will override default strict flags as whole (i.e. not OR'ed).
+ DefaultStrict = StrictJournalChecksum | StrictBlockChecksum | StrictCompaction | StrictReader
+
+ // NoStrict disables all strict flags. Override default strict flags.
+ NoStrict = ^StrictAll
+)
+
+// Options holds the optional parameters for the DB at large.
+type Options struct {
+ // AltFilters defines one or more 'alternative filters'.
+ // 'alternative filters' will be used during reads if a filter block
+ // does not match with the 'effective filter'.
+ //
+ // The default value is nil
+ AltFilters []filter.Filter
+
+ // BlockCacher provides cache algorithm for LevelDB 'sorted table' block caching.
+ // Specify NoCacher to disable caching algorithm.
+ //
+ // The default value is LRUCacher.
+ BlockCacher Cacher
+
+ // BlockCacheCapacity defines the capacity of the 'sorted table' block caching.
+ // Use -1 for zero, this has same effect as specifying NoCacher to BlockCacher.
+ //
+ // The default value is 8MiB.
+ BlockCacheCapacity int
+
+ // BlockRestartInterval is the number of keys between restart points for
+ // delta encoding of keys.
+ //
+ // The default value is 16.
+ BlockRestartInterval int
+
+ // BlockSize is the minimum uncompressed size in bytes of each 'sorted table'
+ // block.
+ //
+ // The default value is 4KiB.
+ BlockSize int
+
+ // CompactionExpandLimitFactor limits compaction size after expanded.
+ // This will be multiplied by table size limit at compaction target level.
+ //
+ // The default value is 25.
+ CompactionExpandLimitFactor int
+
+ // CompactionGPOverlapsFactor limits overlaps in grandparent (Level + 2) that a
+ // single 'sorted table' generates.
+ // This will be multiplied by table size limit at grandparent level.
+ //
+ // The default value is 10.
+ CompactionGPOverlapsFactor int
+
+ // CompactionL0Trigger defines number of 'sorted table' at level-0 that will
+ // trigger compaction.
+ //
+ // The default value is 4.
+ CompactionL0Trigger int
+
+ // CompactionSourceLimitFactor limits compaction source size. This doesn't apply to
+ // level-0.
+ // This will be multiplied by table size limit at compaction target level.
+ //
+ // The default value is 1.
+ CompactionSourceLimitFactor int
+
+ // CompactionTableSize limits size of 'sorted table' that compaction generates.
+ // The limits for each level will be calculated as:
+ // CompactionTableSize * (CompactionTableSizeMultiplier ^ Level)
+ // The multiplier for each level can also fine-tuned using CompactionTableSizeMultiplierPerLevel.
+ //
+ // The default value is 2MiB.
+ CompactionTableSize int
+
+ // CompactionTableSizeMultiplier defines multiplier for CompactionTableSize.
+ //
+ // The default value is 1.
+ CompactionTableSizeMultiplier float64
+
+ // CompactionTableSizeMultiplierPerLevel defines per-level multiplier for
+ // CompactionTableSize.
+ // Use zero to skip a level.
+ //
+ // The default value is nil.
+ CompactionTableSizeMultiplierPerLevel []float64
+
+ // CompactionTotalSize limits total size of 'sorted table' for each level.
+ // The limits for each level will be calculated as:
+ // CompactionTotalSize * (CompactionTotalSizeMultiplier ^ Level)
+ // The multiplier for each level can also fine-tuned using
+ // CompactionTotalSizeMultiplierPerLevel.
+ //
+ // The default value is 10MiB.
+ CompactionTotalSize int
+
+ // CompactionTotalSizeMultiplier defines multiplier for CompactionTotalSize.
+ //
+ // The default value is 10.
+ CompactionTotalSizeMultiplier float64
+
+ // CompactionTotalSizeMultiplierPerLevel defines per-level multiplier for
+ // CompactionTotalSize.
+ // Use zero to skip a level.
+ //
+ // The default value is nil.
+ CompactionTotalSizeMultiplierPerLevel []float64
+
+ // Comparer defines a total ordering over the space of []byte keys: a 'less
+ // than' relationship. The same comparison algorithm must be used for reads
+ // and writes over the lifetime of the DB.
+ //
+ // The default value uses the same ordering as bytes.Compare.
+ Comparer comparer.Comparer
+
+ // Compression defines the 'sorted table' block compression to use.
+ //
+ // The default value (DefaultCompression) uses snappy compression.
+ Compression Compression
+
+ // DisableBufferPool allows disable use of util.BufferPool functionality.
+ //
+ // The default value is false.
+ DisableBufferPool bool
+
+ // DisableBlockCache allows disable use of cache.Cache functionality on
+ // 'sorted table' block.
+ //
+ // The default value is false.
+ DisableBlockCache bool
+
+ // DisableCompactionBackoff allows disable compaction retry backoff.
+ //
+ // The default value is false.
+ DisableCompactionBackoff bool
+
+ // ErrorIfExist defines whether an error should returned if the DB already
+ // exist.
+ //
+ // The default value is false.
+ ErrorIfExist bool
+
+ // ErrorIfMissing defines whether an error should returned if the DB is
+ // missing. If false then the database will be created if missing, otherwise
+ // an error will be returned.
+ //
+ // The default value is false.
+ ErrorIfMissing bool
+
+ // Filter defines an 'effective filter' to use. An 'effective filter'
+ // if defined will be used to generate per-table filter block.
+ // The filter name will be stored on disk.
+ // During reads LevelDB will try to find matching filter from
+ // 'effective filter' and 'alternative filters'.
+ //
+ // Filter can be changed after a DB has been created. It is recommended
+ // to put old filter to the 'alternative filters' to mitigate lack of
+ // filter during transition period.
+ //
+ // A filter is used to reduce disk reads when looking for a specific key.
+ //
+ // The default value is nil.
+ Filter filter.Filter
+
+ // IteratorSamplingRate defines approximate gap (in bytes) between read
+ // sampling of an iterator. The samples will be used to determine when
+ // compaction should be triggered.
+ //
+ // The default is 1MiB.
+ IteratorSamplingRate int
+
+ // MaxMemCompationLevel defines maximum level a newly compacted 'memdb'
+ // will be pushed into if doesn't creates overlap. This should less than
+ // NumLevel. Use -1 for level-0.
+ //
+ // The default is 2.
+ MaxMemCompationLevel int
+
+ // NoSync allows completely disable fsync.
+ //
+ // The default is false.
+ NoSync bool
+
+ // NumLevel defines number of database level. The level shouldn't changed
+ // between opens, or the database will panic.
+ //
+ // The default is 7.
+ NumLevel int
+
+ // OpenFilesCacher provides cache algorithm for open files caching.
+ // Specify NoCacher to disable caching algorithm.
+ //
+ // The default value is LRUCacher.
+ OpenFilesCacher Cacher
+
+ // OpenFilesCacheCapacity defines the capacity of the open files caching.
+ // Use -1 for zero, this has same effect as specifying NoCacher to OpenFilesCacher.
+ //
+ // The default value is 500.
+ OpenFilesCacheCapacity int
+
+ // If true then opens DB in read-only mode.
+ //
+ // The default value is false.
+ ReadOnly bool
+
+ // Strict defines the DB strict level.
+ Strict Strict
+
+ // WriteBuffer defines maximum size of a 'memdb' before flushed to
+ // 'sorted table'. 'memdb' is an in-memory DB backed by an on-disk
+ // unsorted journal.
+ //
+ // LevelDB may held up to two 'memdb' at the same time.
+ //
+ // The default value is 4MiB.
+ WriteBuffer int
+
+ // WriteL0StopTrigger defines number of 'sorted table' at level-0 that will
+ // pause write.
+ //
+ // The default value is 12.
+ WriteL0PauseTrigger int
+
+ // WriteL0SlowdownTrigger defines number of 'sorted table' at level-0 that
+ // will trigger write slowdown.
+ //
+ // The default value is 8.
+ WriteL0SlowdownTrigger int
+}
+
+func (o *Options) GetAltFilters() []filter.Filter {
+ if o == nil {
+ return nil
+ }
+ return o.AltFilters
+}
+
+func (o *Options) GetBlockCacher() Cacher {
+ if o == nil || o.BlockCacher == nil {
+ return DefaultBlockCacher
+ } else if o.BlockCacher == NoCacher {
+ return nil
+ }
+ return o.BlockCacher
+}
+
+func (o *Options) GetBlockCacheCapacity() int {
+ if o == nil || o.BlockCacheCapacity == 0 {
+ return DefaultBlockCacheCapacity
+ } else if o.BlockCacheCapacity < 0 {
+ return 0
+ }
+ return o.BlockCacheCapacity
+}
+
+func (o *Options) GetBlockRestartInterval() int {
+ if o == nil || o.BlockRestartInterval <= 0 {
+ return DefaultBlockRestartInterval
+ }
+ return o.BlockRestartInterval
+}
+
+func (o *Options) GetBlockSize() int {
+ if o == nil || o.BlockSize <= 0 {
+ return DefaultBlockSize
+ }
+ return o.BlockSize
+}
+
+func (o *Options) GetCompactionExpandLimit(level int) int {
+ factor := DefaultCompactionExpandLimitFactor
+ if o != nil && o.CompactionExpandLimitFactor > 0 {
+ factor = o.CompactionExpandLimitFactor
+ }
+ return o.GetCompactionTableSize(level+1) * factor
+}
+
+func (o *Options) GetCompactionGPOverlaps(level int) int {
+ factor := DefaultCompactionGPOverlapsFactor
+ if o != nil && o.CompactionGPOverlapsFactor > 0 {
+ factor = o.CompactionGPOverlapsFactor
+ }
+ return o.GetCompactionTableSize(level+2) * factor
+}
+
+func (o *Options) GetCompactionL0Trigger() int {
+ if o == nil || o.CompactionL0Trigger == 0 {
+ return DefaultCompactionL0Trigger
+ }
+ return o.CompactionL0Trigger
+}
+
+func (o *Options) GetCompactionSourceLimit(level int) int {
+ factor := DefaultCompactionSourceLimitFactor
+ if o != nil && o.CompactionSourceLimitFactor > 0 {
+ factor = o.CompactionSourceLimitFactor
+ }
+ return o.GetCompactionTableSize(level+1) * factor
+}
+
+func (o *Options) GetCompactionTableSize(level int) int {
+ var (
+ base = DefaultCompactionTableSize
+ mult float64
+ )
+ if o != nil {
+ if o.CompactionTableSize > 0 {
+ base = o.CompactionTableSize
+ }
+ if len(o.CompactionTableSizeMultiplierPerLevel) > level && o.CompactionTableSizeMultiplierPerLevel[level] > 0 {
+ mult = o.CompactionTableSizeMultiplierPerLevel[level]
+ } else if o.CompactionTableSizeMultiplier > 0 {
+ mult = math.Pow(o.CompactionTableSizeMultiplier, float64(level))
+ }
+ }
+ if mult == 0 {
+ mult = math.Pow(DefaultCompactionTableSizeMultiplier, float64(level))
+ }
+ return int(float64(base) * mult)
+}
+
+func (o *Options) GetCompactionTotalSize(level int) int64 {
+ var (
+ base = DefaultCompactionTotalSize
+ mult float64
+ )
+ if o != nil {
+ if o.CompactionTotalSize > 0 {
+ base = o.CompactionTotalSize
+ }
+ if len(o.CompactionTotalSizeMultiplierPerLevel) > level && o.CompactionTotalSizeMultiplierPerLevel[level] > 0 {
+ mult = o.CompactionTotalSizeMultiplierPerLevel[level]
+ } else if o.CompactionTotalSizeMultiplier > 0 {
+ mult = math.Pow(o.CompactionTotalSizeMultiplier, float64(level))
+ }
+ }
+ if mult == 0 {
+ mult = math.Pow(DefaultCompactionTotalSizeMultiplier, float64(level))
+ }
+ return int64(float64(base) * mult)
+}
+
+func (o *Options) GetComparer() comparer.Comparer {
+ if o == nil || o.Comparer == nil {
+ return comparer.DefaultComparer
+ }
+ return o.Comparer
+}
+
+func (o *Options) GetCompression() Compression {
+ if o == nil || o.Compression <= DefaultCompression || o.Compression >= nCompression {
+ return DefaultCompressionType
+ }
+ return o.Compression
+}
+
+func (o *Options) GetDisableBufferPool() bool {
+ if o == nil {
+ return false
+ }
+ return o.DisableBufferPool
+}
+
+func (o *Options) GetDisableBlockCache() bool {
+ if o == nil {
+ return false
+ }
+ return o.DisableBlockCache
+}
+
+func (o *Options) GetDisableCompactionBackoff() bool {
+ if o == nil {
+ return false
+ }
+ return o.DisableCompactionBackoff
+}
+
+func (o *Options) GetErrorIfExist() bool {
+ if o == nil {
+ return false
+ }
+ return o.ErrorIfExist
+}
+
+func (o *Options) GetErrorIfMissing() bool {
+ if o == nil {
+ return false
+ }
+ return o.ErrorIfMissing
+}
+
+func (o *Options) GetFilter() filter.Filter {
+ if o == nil {
+ return nil
+ }
+ return o.Filter
+}
+
+func (o *Options) GetIteratorSamplingRate() int {
+ if o == nil || o.IteratorSamplingRate <= 0 {
+ return DefaultIteratorSamplingRate
+ }
+ return o.IteratorSamplingRate
+}
+
+func (o *Options) GetMaxMemCompationLevel() int {
+ level := DefaultMaxMemCompationLevel
+ if o != nil {
+ if o.MaxMemCompationLevel > 0 {
+ level = o.MaxMemCompationLevel
+ } else if o.MaxMemCompationLevel < 0 {
+ level = 0
+ }
+ }
+ if level >= o.GetNumLevel() {
+ return o.GetNumLevel() - 1
+ }
+ return level
+}
+
+func (o *Options) GetNoSync() bool {
+ if o == nil {
+ return false
+ }
+ return o.NoSync
+}
+
+func (o *Options) GetNumLevel() int {
+ if o == nil || o.NumLevel <= 0 {
+ return DefaultNumLevel
+ }
+ return o.NumLevel
+}
+
+func (o *Options) GetOpenFilesCacher() Cacher {
+ if o == nil || o.OpenFilesCacher == nil {
+ return DefaultOpenFilesCacher
+ }
+ if o.OpenFilesCacher == NoCacher {
+ return nil
+ }
+ return o.OpenFilesCacher
+}
+
+func (o *Options) GetOpenFilesCacheCapacity() int {
+ if o == nil || o.OpenFilesCacheCapacity == 0 {
+ return DefaultOpenFilesCacheCapacity
+ } else if o.OpenFilesCacheCapacity < 0 {
+ return 0
+ }
+ return o.OpenFilesCacheCapacity
+}
+
+func (o *Options) GetReadOnly() bool {
+ if o == nil {
+ return false
+ }
+ return o.ReadOnly
+}
+
+func (o *Options) GetStrict(strict Strict) bool {
+ if o == nil || o.Strict == 0 {
+ return DefaultStrict&strict != 0
+ }
+ return o.Strict&strict != 0
+}
+
+func (o *Options) GetWriteBuffer() int {
+ if o == nil || o.WriteBuffer <= 0 {
+ return DefaultWriteBuffer
+ }
+ return o.WriteBuffer
+}
+
+func (o *Options) GetWriteL0PauseTrigger() int {
+ if o == nil || o.WriteL0PauseTrigger == 0 {
+ return DefaultWriteL0PauseTrigger
+ }
+ return o.WriteL0PauseTrigger
+}
+
+func (o *Options) GetWriteL0SlowdownTrigger() int {
+ if o == nil || o.WriteL0SlowdownTrigger == 0 {
+ return DefaultWriteL0SlowdownTrigger
+ }
+ return o.WriteL0SlowdownTrigger
+}
+
+// ReadOptions holds the optional parameters for 'read operation'. The
+// 'read operation' includes Get, Find and NewIterator.
+type ReadOptions struct {
+ // DontFillCache defines whether block reads for this 'read operation'
+ // should be cached. If false then the block will be cached. This does
+ // not affects already cached block.
+ //
+ // The default value is false.
+ DontFillCache bool
+
+ // Strict will be OR'ed with global DB 'strict level' unless StrictOverride
+ // is present. Currently only StrictReader that has effect here.
+ Strict Strict
+}
+
+func (ro *ReadOptions) GetDontFillCache() bool {
+ if ro == nil {
+ return false
+ }
+ return ro.DontFillCache
+}
+
+func (ro *ReadOptions) GetStrict(strict Strict) bool {
+ if ro == nil {
+ return false
+ }
+ return ro.Strict&strict != 0
+}
+
+// WriteOptions holds the optional parameters for 'write operation'. The
+// 'write operation' includes Write, Put and Delete.
+type WriteOptions struct {
+ // Sync is whether to sync underlying writes from the OS buffer cache
+ // through to actual disk, if applicable. Setting Sync can result in
+ // slower writes.
+ //
+ // If false, and the machine crashes, then some recent writes may be lost.
+ // Note that if it is just the process that crashes (and the machine does
+ // not) then no writes will be lost.
+ //
+ // In other words, Sync being false has the same semantics as a write
+ // system call. Sync being true means write followed by fsync.
+ //
+ // The default value is false.
+ Sync bool
+}
+
+func (wo *WriteOptions) GetSync() bool {
+ if wo == nil {
+ return false
+ }
+ return wo.Sync
+}
+
+func GetStrict(o *Options, ro *ReadOptions, strict Strict) bool {
+ if ro.GetStrict(StrictOverride) {
+ return ro.GetStrict(strict)
+ } else {
+ return o.GetStrict(strict) || ro.GetStrict(strict)
+ }
+}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/options.go b/vendor/github.com/syndtr/goleveldb/leveldb/options.go
new file mode 100644
index 0000000000..a3d84ef60d
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/options.go
@@ -0,0 +1,92 @@
+// Copyright (c) 2012, Suryandaru Triandana
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package leveldb
+
+import (
+ "github.com/syndtr/goleveldb/leveldb/filter"
+ "github.com/syndtr/goleveldb/leveldb/opt"
+)
+
+func dupOptions(o *opt.Options) *opt.Options {
+ newo := &opt.Options{}
+ if o != nil {
+ *newo = *o
+ }
+ if newo.Strict == 0 {
+ newo.Strict = opt.DefaultStrict
+ }
+ return newo
+}
+
+func (s *session) setOptions(o *opt.Options) {
+ no := dupOptions(o)
+ // Alternative filters.
+ if filters := o.GetAltFilters(); len(filters) > 0 {
+ no.AltFilters = make([]filter.Filter, len(filters))
+ for i, filter := range filters {
+ no.AltFilters[i] = &iFilter{filter}
+ }
+ }
+ // Comparer.
+ s.icmp = &iComparer{o.GetComparer()}
+ no.Comparer = s.icmp
+ // Filter.
+ if filter := o.GetFilter(); filter != nil {
+ no.Filter = &iFilter{filter}
+ }
+
+ s.o = &cachedOptions{Options: no}
+ s.o.cache()
+}
+
+type cachedOptions struct {
+ *opt.Options
+
+ compactionExpandLimit []int
+ compactionGPOverlaps []int
+ compactionSourceLimit []int
+ compactionTableSize []int
+ compactionTotalSize []int64
+}
+
+func (co *cachedOptions) cache() {
+ numLevel := co.Options.GetNumLevel()
+
+ co.compactionExpandLimit = make([]int, numLevel)
+ co.compactionGPOverlaps = make([]int, numLevel)
+ co.compactionSourceLimit = make([]int, numLevel)
+ co.compactionTableSize = make([]int, numLevel)
+ co.compactionTotalSize = make([]int64, numLevel)
+
+ for level := 0; level < numLevel; level++ {
+ co.compactionExpandLimit[level] = co.Options.GetCompactionExpandLimit(level)
+ co.compactionGPOverlaps[level] = co.Options.GetCompactionGPOverlaps(level)
+ co.compactionSourceLimit[level] = co.Options.GetCompactionSourceLimit(level)
+ co.compactionTableSize[level] = co.Options.GetCompactionTableSize(level)
+ co.compactionTotalSize[level] = co.Options.GetCompactionTotalSize(level)
+ }
+}
+
+func (co *cachedOptions) GetCompactionExpandLimit(level int) int {
+ return co.compactionExpandLimit[level]
+}
+
+func (co *cachedOptions) GetCompactionGPOverlaps(level int) int {
+ return co.compactionGPOverlaps[level]
+}
+
+func (co *cachedOptions) GetCompactionSourceLimit(level int) int {
+ return co.compactionSourceLimit[level]
+}
+
+func (co *cachedOptions) GetCompactionTableSize(level int) int {
+ return co.compactionTableSize[level]
+}
+
+func (co *cachedOptions) GetCompactionTotalSize(level int) int64 {
+ return co.compactionTotalSize[level]
+}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/session.go b/vendor/github.com/syndtr/goleveldb/leveldb/session.go
new file mode 100644
index 0000000000..f0bba4602c
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/session.go
@@ -0,0 +1,211 @@
+// Copyright (c) 2012, Suryandaru Triandana
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package leveldb
+
+import (
+ "fmt"
+ "io"
+ "os"
+ "sync"
+
+ "github.com/syndtr/goleveldb/leveldb/errors"
+ "github.com/syndtr/goleveldb/leveldb/journal"
+ "github.com/syndtr/goleveldb/leveldb/opt"
+ "github.com/syndtr/goleveldb/leveldb/storage"
+ "github.com/syndtr/goleveldb/leveldb/util"
+)
+
+type ErrManifestCorrupted struct {
+ Field string
+ Reason string
+}
+
+func (e *ErrManifestCorrupted) Error() string {
+ return fmt.Sprintf("leveldb: manifest corrupted (field '%s'): %s", e.Field, e.Reason)
+}
+
+func newErrManifestCorrupted(f storage.File, field, reason string) error {
+ return errors.NewErrCorrupted(f, &ErrManifestCorrupted{field, reason})
+}
+
+// session represent a persistent database session.
+type session struct {
+ // Need 64-bit alignment.
+ stNextFileNum uint64 // current unused file number
+ stJournalNum uint64 // current journal file number; need external synchronization
+ stPrevJournalNum uint64 // prev journal file number; no longer used; for compatibility with older version of leveldb
+ stSeqNum uint64 // last mem compacted seq; need external synchronization
+ stTempFileNum uint64
+
+ stor storage.Storage
+ storLock util.Releaser
+ o *cachedOptions
+ icmp *iComparer
+ tops *tOps
+
+ manifest *journal.Writer
+ manifestWriter storage.Writer
+ manifestFile storage.File
+
+ stCompPtrs []iKey // compaction pointers; need external synchronization
+ stVersion *version // current version
+ vmu sync.Mutex
+}
+
+// Creates new initialized session instance.
+func newSession(stor storage.Storage, o *opt.Options) (s *session, err error) {
+ if stor == nil {
+ return nil, os.ErrInvalid
+ }
+ storLock, err := stor.Lock()
+ if err != nil {
+ return
+ }
+ s = &session{
+ stor: stor,
+ storLock: storLock,
+ stCompPtrs: make([]iKey, o.GetNumLevel()),
+ }
+ s.setOptions(o)
+ s.tops = newTableOps(s)
+ s.setVersion(newVersion(s))
+ s.log("log@legend F·NumFile S·FileSize N·Entry C·BadEntry B·BadBlock Ke·KeyError D·DroppedEntry L·Level Q·SeqNum T·TimeElapsed")
+ return
+}
+
+// Close session.
+func (s *session) close() {
+ s.tops.close()
+ if s.manifest != nil {
+ s.manifest.Close()
+ }
+ if s.manifestWriter != nil {
+ s.manifestWriter.Close()
+ }
+ s.manifest = nil
+ s.manifestWriter = nil
+ s.manifestFile = nil
+ s.stVersion = nil
+}
+
+// Release session lock.
+func (s *session) release() {
+ s.storLock.Release()
+}
+
+// Create a new database session; need external synchronization.
+func (s *session) create() error {
+ // create manifest
+ return s.newManifest(nil, nil)
+}
+
+// Recover a database session; need external synchronization.
+func (s *session) recover() (err error) {
+ defer func() {
+ if os.IsNotExist(err) {
+ // Don't return os.ErrNotExist if the underlying storage contains
+ // other files that belong to LevelDB. So the DB won't get trashed.
+ if files, _ := s.stor.GetFiles(storage.TypeAll); len(files) > 0 {
+ err = &errors.ErrCorrupted{File: &storage.FileInfo{Type: storage.TypeManifest}, Err: &errors.ErrMissingFiles{}}
+ }
+ }
+ }()
+
+ m, err := s.stor.GetManifest()
+ if err != nil {
+ return
+ }
+
+ reader, err := m.Open()
+ if err != nil {
+ return
+ }
+ defer reader.Close()
+
+ var (
+ // Options.
+ numLevel = s.o.GetNumLevel()
+ strict = s.o.GetStrict(opt.StrictManifest)
+
+ jr = journal.NewReader(reader, dropper{s, m}, strict, true)
+ rec = &sessionRecord{}
+ staging = s.stVersion.newStaging()
+ )
+ for {
+ var r io.Reader
+ r, err = jr.Next()
+ if err != nil {
+ if err == io.EOF {
+ err = nil
+ break
+ }
+ return errors.SetFile(err, m)
+ }
+
+ err = rec.decode(r, numLevel)
+ if err == nil {
+ // save compact pointers
+ for _, r := range rec.compPtrs {
+ s.stCompPtrs[r.level] = iKey(r.ikey)
+ }
+ // commit record to version staging
+ staging.commit(rec)
+ } else {
+ err = errors.SetFile(err, m)
+ if strict || !errors.IsCorrupted(err) {
+ return
+ } else {
+ s.logf("manifest error: %v (skipped)", errors.SetFile(err, m))
+ }
+ }
+ rec.resetCompPtrs()
+ rec.resetAddedTables()
+ rec.resetDeletedTables()
+ }
+
+ switch {
+ case !rec.has(recComparer):
+ return newErrManifestCorrupted(m, "comparer", "missing")
+ case rec.comparer != s.icmp.uName():
+ return newErrManifestCorrupted(m, "comparer", fmt.Sprintf("mismatch: want '%s', got '%s'", s.icmp.uName(), rec.comparer))
+ case !rec.has(recNextFileNum):
+ return newErrManifestCorrupted(m, "next-file-num", "missing")
+ case !rec.has(recJournalNum):
+ return newErrManifestCorrupted(m, "journal-file-num", "missing")
+ case !rec.has(recSeqNum):
+ return newErrManifestCorrupted(m, "seq-num", "missing")
+ }
+
+ s.manifestFile = m
+ s.setVersion(staging.finish())
+ s.setNextFileNum(rec.nextFileNum)
+ s.recordCommited(rec)
+ return nil
+}
+
+// Commit session; need external synchronization.
+func (s *session) commit(r *sessionRecord) (err error) {
+ v := s.version()
+ defer v.release()
+
+ // spawn new version based on current version
+ nv := v.spawn(r)
+
+ if s.manifest == nil {
+ // manifest journal writer not yet created, create one
+ err = s.newManifest(r, nv)
+ } else {
+ err = s.flushManifest(r)
+ }
+
+ // finally, apply new version if no error rise
+ if err == nil {
+ s.setVersion(nv)
+ }
+
+ return
+}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/session_compaction.go b/vendor/github.com/syndtr/goleveldb/leveldb/session_compaction.go
new file mode 100644
index 0000000000..7c5a79418c
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/session_compaction.go
@@ -0,0 +1,287 @@
+// Copyright (c) 2012, Suryandaru Triandana
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package leveldb
+
+import (
+ "sync/atomic"
+
+ "github.com/syndtr/goleveldb/leveldb/iterator"
+ "github.com/syndtr/goleveldb/leveldb/memdb"
+ "github.com/syndtr/goleveldb/leveldb/opt"
+)
+
+func (s *session) pickMemdbLevel(umin, umax []byte) int {
+ v := s.version()
+ defer v.release()
+ return v.pickMemdbLevel(umin, umax)
+}
+
+func (s *session) flushMemdb(rec *sessionRecord, mdb *memdb.DB, level int) (level_ int, err error) {
+ // Create sorted table.
+ iter := mdb.NewIterator(nil)
+ defer iter.Release()
+ t, n, err := s.tops.createFrom(iter)
+ if err != nil {
+ return level, err
+ }
+
+ // Pick level and add to record.
+ if level < 0 {
+ level = s.pickMemdbLevel(t.imin.ukey(), t.imax.ukey())
+ }
+ rec.addTableFile(level, t)
+
+ s.logf("memdb@flush created L%d@%d N·%d S·%s %q:%q", level, t.file.Num(), n, shortenb(int(t.size)), t.imin, t.imax)
+ return level, nil
+}
+
+// Pick a compaction based on current state; need external synchronization.
+func (s *session) pickCompaction() *compaction {
+ v := s.version()
+
+ var level int
+ var t0 tFiles
+ if v.cScore >= 1 {
+ level = v.cLevel
+ cptr := s.stCompPtrs[level]
+ tables := v.tables[level]
+ for _, t := range tables {
+ if cptr == nil || s.icmp.Compare(t.imax, cptr) > 0 {
+ t0 = append(t0, t)
+ break
+ }
+ }
+ if len(t0) == 0 {
+ t0 = append(t0, tables[0])
+ }
+ } else {
+ if p := atomic.LoadPointer(&v.cSeek); p != nil {
+ ts := (*tSet)(p)
+ level = ts.level
+ t0 = append(t0, ts.table)
+ } else {
+ v.release()
+ return nil
+ }
+ }
+
+ return newCompaction(s, v, level, t0)
+}
+
+// Create compaction from given level and range; need external synchronization.
+func (s *session) getCompactionRange(level int, umin, umax []byte) *compaction {
+ v := s.version()
+
+ t0 := v.tables[level].getOverlaps(nil, s.icmp, umin, umax, level == 0)
+ if len(t0) == 0 {
+ v.release()
+ return nil
+ }
+
+ // Avoid compacting too much in one shot in case the range is large.
+ // But we cannot do this for level-0 since level-0 files can overlap
+ // and we must not pick one file and drop another older file if the
+ // two files overlap.
+ if level > 0 {
+ limit := uint64(v.s.o.GetCompactionSourceLimit(level))
+ total := uint64(0)
+ for i, t := range t0 {
+ total += t.size
+ if total >= limit {
+ s.logf("table@compaction limiting F·%d -> F·%d", len(t0), i+1)
+ t0 = t0[:i+1]
+ break
+ }
+ }
+ }
+
+ return newCompaction(s, v, level, t0)
+}
+
+func newCompaction(s *session, v *version, level int, t0 tFiles) *compaction {
+ c := &compaction{
+ s: s,
+ v: v,
+ level: level,
+ tables: [2]tFiles{t0, nil},
+ maxGPOverlaps: uint64(s.o.GetCompactionGPOverlaps(level)),
+ tPtrs: make([]int, s.o.GetNumLevel()),
+ }
+ c.expand()
+ c.save()
+ return c
+}
+
+// compaction represent a compaction state.
+type compaction struct {
+ s *session
+ v *version
+
+ level int
+ tables [2]tFiles
+ maxGPOverlaps uint64
+
+ gp tFiles
+ gpi int
+ seenKey bool
+ gpOverlappedBytes uint64
+ imin, imax iKey
+ tPtrs []int
+ released bool
+
+ snapGPI int
+ snapSeenKey bool
+ snapGPOverlappedBytes uint64
+ snapTPtrs []int
+}
+
+func (c *compaction) save() {
+ c.snapGPI = c.gpi
+ c.snapSeenKey = c.seenKey
+ c.snapGPOverlappedBytes = c.gpOverlappedBytes
+ c.snapTPtrs = append(c.snapTPtrs[:0], c.tPtrs...)
+}
+
+func (c *compaction) restore() {
+ c.gpi = c.snapGPI
+ c.seenKey = c.snapSeenKey
+ c.gpOverlappedBytes = c.snapGPOverlappedBytes
+ c.tPtrs = append(c.tPtrs[:0], c.snapTPtrs...)
+}
+
+func (c *compaction) release() {
+ if !c.released {
+ c.released = true
+ c.v.release()
+ }
+}
+
+// Expand compacted tables; need external synchronization.
+func (c *compaction) expand() {
+ limit := uint64(c.s.o.GetCompactionExpandLimit(c.level))
+ vt0, vt1 := c.v.tables[c.level], c.v.tables[c.level+1]
+
+ t0, t1 := c.tables[0], c.tables[1]
+ imin, imax := t0.getRange(c.s.icmp)
+ // We expand t0 here just incase ukey hop across tables.
+ t0 = vt0.getOverlaps(t0, c.s.icmp, imin.ukey(), imax.ukey(), c.level == 0)
+ if len(t0) != len(c.tables[0]) {
+ imin, imax = t0.getRange(c.s.icmp)
+ }
+ t1 = vt1.getOverlaps(t1, c.s.icmp, imin.ukey(), imax.ukey(), false)
+ // Get entire range covered by compaction.
+ amin, amax := append(t0, t1...).getRange(c.s.icmp)
+
+ // See if we can grow the number of inputs in "level" without
+ // changing the number of "level+1" files we pick up.
+ if len(t1) > 0 {
+ exp0 := vt0.getOverlaps(nil, c.s.icmp, amin.ukey(), amax.ukey(), c.level == 0)
+ if len(exp0) > len(t0) && t1.size()+exp0.size() < limit {
+ xmin, xmax := exp0.getRange(c.s.icmp)
+ exp1 := vt1.getOverlaps(nil, c.s.icmp, xmin.ukey(), xmax.ukey(), false)
+ if len(exp1) == len(t1) {
+ c.s.logf("table@compaction expanding L%d+L%d (F·%d S·%s)+(F·%d S·%s) -> (F·%d S·%s)+(F·%d S·%s)",
+ c.level, c.level+1, len(t0), shortenb(int(t0.size())), len(t1), shortenb(int(t1.size())),
+ len(exp0), shortenb(int(exp0.size())), len(exp1), shortenb(int(exp1.size())))
+ imin, imax = xmin, xmax
+ t0, t1 = exp0, exp1
+ amin, amax = append(t0, t1...).getRange(c.s.icmp)
+ }
+ }
+ }
+
+ // Compute the set of grandparent files that overlap this compaction
+ // (parent == level+1; grandparent == level+2)
+ if c.level+2 < c.s.o.GetNumLevel() {
+ c.gp = c.v.tables[c.level+2].getOverlaps(c.gp, c.s.icmp, amin.ukey(), amax.ukey(), false)
+ }
+
+ c.tables[0], c.tables[1] = t0, t1
+ c.imin, c.imax = imin, imax
+}
+
+// Check whether compaction is trivial.
+func (c *compaction) trivial() bool {
+ return len(c.tables[0]) == 1 && len(c.tables[1]) == 0 && c.gp.size() <= c.maxGPOverlaps
+}
+
+func (c *compaction) baseLevelForKey(ukey []byte) bool {
+ for level, tables := range c.v.tables[c.level+2:] {
+ for c.tPtrs[level] < len(tables) {
+ t := tables[c.tPtrs[level]]
+ if c.s.icmp.uCompare(ukey, t.imax.ukey()) <= 0 {
+ // We've advanced far enough.
+ if c.s.icmp.uCompare(ukey, t.imin.ukey()) >= 0 {
+ // Key falls in this file's range, so definitely not base level.
+ return false
+ }
+ break
+ }
+ c.tPtrs[level]++
+ }
+ }
+ return true
+}
+
+func (c *compaction) shouldStopBefore(ikey iKey) bool {
+ for ; c.gpi < len(c.gp); c.gpi++ {
+ gp := c.gp[c.gpi]
+ if c.s.icmp.Compare(ikey, gp.imax) <= 0 {
+ break
+ }
+ if c.seenKey {
+ c.gpOverlappedBytes += gp.size
+ }
+ }
+ c.seenKey = true
+
+ if c.gpOverlappedBytes > c.maxGPOverlaps {
+ // Too much overlap for current output; start new output.
+ c.gpOverlappedBytes = 0
+ return true
+ }
+ return false
+}
+
+// Creates an iterator.
+func (c *compaction) newIterator() iterator.Iterator {
+ // Creates iterator slice.
+ icap := len(c.tables)
+ if c.level == 0 {
+ // Special case for level-0.
+ icap = len(c.tables[0]) + 1
+ }
+ its := make([]iterator.Iterator, 0, icap)
+
+ // Options.
+ ro := &opt.ReadOptions{
+ DontFillCache: true,
+ Strict: opt.StrictOverride,
+ }
+ strict := c.s.o.GetStrict(opt.StrictCompaction)
+ if strict {
+ ro.Strict |= opt.StrictReader
+ }
+
+ for i, tables := range c.tables {
+ if len(tables) == 0 {
+ continue
+ }
+
+ // Level-0 is not sorted and may overlaps each other.
+ if c.level+i == 0 {
+ for _, t := range tables {
+ its = append(its, c.s.tops.newIterator(t, nil, ro))
+ }
+ } else {
+ it := iterator.NewIndexedIterator(tables.newIndexIterator(c.s.tops, c.s.icmp, nil, ro), strict)
+ its = append(its, it)
+ }
+ }
+
+ return iterator.NewMergedIterator(its, c.s.icmp, strict)
+}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/session_record.go b/vendor/github.com/syndtr/goleveldb/leveldb/session_record.go
new file mode 100644
index 0000000000..405e07bef4
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/session_record.go
@@ -0,0 +1,311 @@
+// Copyright (c) 2012, Suryandaru Triandana
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package leveldb
+
+import (
+ "bufio"
+ "encoding/binary"
+ "io"
+ "strings"
+
+ "github.com/syndtr/goleveldb/leveldb/errors"
+)
+
+type byteReader interface {
+ io.Reader
+ io.ByteReader
+}
+
+// These numbers are written to disk and should not be changed.
+const (
+ recComparer = 1
+ recJournalNum = 2
+ recNextFileNum = 3
+ recSeqNum = 4
+ recCompPtr = 5
+ recDelTable = 6
+ recAddTable = 7
+ // 8 was used for large value refs
+ recPrevJournalNum = 9
+)
+
+type cpRecord struct {
+ level int
+ ikey iKey
+}
+
+type atRecord struct {
+ level int
+ num uint64
+ size uint64
+ imin iKey
+ imax iKey
+}
+
+type dtRecord struct {
+ level int
+ num uint64
+}
+
+type sessionRecord struct {
+ hasRec int
+ comparer string
+ journalNum uint64
+ prevJournalNum uint64
+ nextFileNum uint64
+ seqNum uint64
+ compPtrs []cpRecord
+ addedTables []atRecord
+ deletedTables []dtRecord
+
+ scratch [binary.MaxVarintLen64]byte
+ err error
+}
+
+func (p *sessionRecord) has(rec int) bool {
+ return p.hasRec&(1<= uint64(numLevel) {
+ p.err = errors.NewErrCorrupted(nil, &ErrManifestCorrupted{field, "invalid level number"})
+ return 0
+ }
+ return int(x)
+}
+
+func (p *sessionRecord) decode(r io.Reader, numLevel int) error {
+ br, ok := r.(byteReader)
+ if !ok {
+ br = bufio.NewReader(r)
+ }
+ p.err = nil
+ for p.err == nil {
+ rec := p.readUvarintMayEOF("field-header", br, true)
+ if p.err != nil {
+ if p.err == io.EOF {
+ return nil
+ }
+ return p.err
+ }
+ switch rec {
+ case recComparer:
+ x := p.readBytes("comparer", br)
+ if p.err == nil {
+ p.setComparer(string(x))
+ }
+ case recJournalNum:
+ x := p.readUvarint("journal-num", br)
+ if p.err == nil {
+ p.setJournalNum(x)
+ }
+ case recPrevJournalNum:
+ x := p.readUvarint("prev-journal-num", br)
+ if p.err == nil {
+ p.setPrevJournalNum(x)
+ }
+ case recNextFileNum:
+ x := p.readUvarint("next-file-num", br)
+ if p.err == nil {
+ p.setNextFileNum(x)
+ }
+ case recSeqNum:
+ x := p.readUvarint("seq-num", br)
+ if p.err == nil {
+ p.setSeqNum(x)
+ }
+ case recCompPtr:
+ level := p.readLevel("comp-ptr.level", br, numLevel)
+ ikey := p.readBytes("comp-ptr.ikey", br)
+ if p.err == nil {
+ p.addCompPtr(level, iKey(ikey))
+ }
+ case recAddTable:
+ level := p.readLevel("add-table.level", br, numLevel)
+ num := p.readUvarint("add-table.num", br)
+ size := p.readUvarint("add-table.size", br)
+ imin := p.readBytes("add-table.imin", br)
+ imax := p.readBytes("add-table.imax", br)
+ if p.err == nil {
+ p.addTable(level, num, size, imin, imax)
+ }
+ case recDelTable:
+ level := p.readLevel("del-table.level", br, numLevel)
+ num := p.readUvarint("del-table.num", br)
+ if p.err == nil {
+ p.delTable(level, num)
+ }
+ }
+ }
+
+ return p.err
+}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/session_util.go b/vendor/github.com/syndtr/goleveldb/leveldb/session_util.go
new file mode 100644
index 0000000000..7ec9f86f75
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/session_util.go
@@ -0,0 +1,251 @@
+// Copyright (c) 2012, Suryandaru Triandana
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package leveldb
+
+import (
+ "fmt"
+ "sync/atomic"
+
+ "github.com/syndtr/goleveldb/leveldb/journal"
+ "github.com/syndtr/goleveldb/leveldb/storage"
+)
+
+// Logging.
+
+type dropper struct {
+ s *session
+ file storage.File
+}
+
+func (d dropper) Drop(err error) {
+ if e, ok := err.(*journal.ErrCorrupted); ok {
+ d.s.logf("journal@drop %s-%d S·%s %q", d.file.Type(), d.file.Num(), shortenb(e.Size), e.Reason)
+ } else {
+ d.s.logf("journal@drop %s-%d %q", d.file.Type(), d.file.Num(), err)
+ }
+}
+
+func (s *session) log(v ...interface{}) { s.stor.Log(fmt.Sprint(v...)) }
+func (s *session) logf(format string, v ...interface{}) { s.stor.Log(fmt.Sprintf(format, v...)) }
+
+// File utils.
+
+func (s *session) getJournalFile(num uint64) storage.File {
+ return s.stor.GetFile(num, storage.TypeJournal)
+}
+
+func (s *session) getTableFile(num uint64) storage.File {
+ return s.stor.GetFile(num, storage.TypeTable)
+}
+
+func (s *session) getFiles(t storage.FileType) ([]storage.File, error) {
+ return s.stor.GetFiles(t)
+}
+
+func (s *session) newTemp() storage.File {
+ num := atomic.AddUint64(&s.stTempFileNum, 1) - 1
+ return s.stor.GetFile(num, storage.TypeTemp)
+}
+
+func (s *session) tableFileFromRecord(r atRecord) *tFile {
+ return newTableFile(s.getTableFile(r.num), r.size, r.imin, r.imax)
+}
+
+// Session state.
+
+// Get current version. This will incr version ref, must call
+// version.release (exactly once) after use.
+func (s *session) version() *version {
+ s.vmu.Lock()
+ defer s.vmu.Unlock()
+ s.stVersion.ref++
+ return s.stVersion
+}
+
+// Set current version to v.
+func (s *session) setVersion(v *version) {
+ s.vmu.Lock()
+ v.ref = 1 // Holds by session.
+ if old := s.stVersion; old != nil {
+ v.ref++ // Holds by old version.
+ old.next = v
+ old.releaseNB()
+ }
+ s.stVersion = v
+ s.vmu.Unlock()
+}
+
+// Get current unused file number.
+func (s *session) nextFileNum() uint64 {
+ return atomic.LoadUint64(&s.stNextFileNum)
+}
+
+// Set current unused file number to num.
+func (s *session) setNextFileNum(num uint64) {
+ atomic.StoreUint64(&s.stNextFileNum, num)
+}
+
+// Mark file number as used.
+func (s *session) markFileNum(num uint64) {
+ nextFileNum := num + 1
+ for {
+ old, x := s.stNextFileNum, nextFileNum
+ if old > x {
+ x = old
+ }
+ if atomic.CompareAndSwapUint64(&s.stNextFileNum, old, x) {
+ break
+ }
+ }
+}
+
+// Allocate a file number.
+func (s *session) allocFileNum() uint64 {
+ return atomic.AddUint64(&s.stNextFileNum, 1) - 1
+}
+
+// Reuse given file number.
+func (s *session) reuseFileNum(num uint64) {
+ for {
+ old, x := s.stNextFileNum, num
+ if old != x+1 {
+ x = old
+ }
+ if atomic.CompareAndSwapUint64(&s.stNextFileNum, old, x) {
+ break
+ }
+ }
+}
+
+// Manifest related utils.
+
+// Fill given session record obj with current states; need external
+// synchronization.
+func (s *session) fillRecord(r *sessionRecord, snapshot bool) {
+ r.setNextFileNum(s.nextFileNum())
+
+ if snapshot {
+ if !r.has(recJournalNum) {
+ r.setJournalNum(s.stJournalNum)
+ }
+
+ if !r.has(recSeqNum) {
+ r.setSeqNum(s.stSeqNum)
+ }
+
+ for level, ik := range s.stCompPtrs {
+ if ik != nil {
+ r.addCompPtr(level, ik)
+ }
+ }
+
+ r.setComparer(s.icmp.uName())
+ }
+}
+
+// Mark if record has been committed, this will update session state;
+// need external synchronization.
+func (s *session) recordCommited(r *sessionRecord) {
+ if r.has(recJournalNum) {
+ s.stJournalNum = r.journalNum
+ }
+
+ if r.has(recPrevJournalNum) {
+ s.stPrevJournalNum = r.prevJournalNum
+ }
+
+ if r.has(recSeqNum) {
+ s.stSeqNum = r.seqNum
+ }
+
+ for _, p := range r.compPtrs {
+ s.stCompPtrs[p.level] = iKey(p.ikey)
+ }
+}
+
+// Create a new manifest file; need external synchronization.
+func (s *session) newManifest(rec *sessionRecord, v *version) (err error) {
+ num := s.allocFileNum()
+ file := s.stor.GetFile(num, storage.TypeManifest)
+ writer, err := file.Create()
+ if err != nil {
+ return
+ }
+ jw := journal.NewWriter(writer)
+
+ if v == nil {
+ v = s.version()
+ defer v.release()
+ }
+ if rec == nil {
+ rec = &sessionRecord{}
+ }
+ s.fillRecord(rec, true)
+ v.fillRecord(rec)
+
+ defer func() {
+ if err == nil {
+ s.recordCommited(rec)
+ if s.manifest != nil {
+ s.manifest.Close()
+ }
+ if s.manifestWriter != nil {
+ s.manifestWriter.Close()
+ }
+ if s.manifestFile != nil {
+ s.manifestFile.Remove()
+ }
+ s.manifestFile = file
+ s.manifestWriter = writer
+ s.manifest = jw
+ } else {
+ writer.Close()
+ file.Remove()
+ s.reuseFileNum(num)
+ }
+ }()
+
+ w, err := jw.Next()
+ if err != nil {
+ return
+ }
+ err = rec.encode(w)
+ if err != nil {
+ return
+ }
+ err = jw.Flush()
+ if err != nil {
+ return
+ }
+ err = s.stor.SetManifest(file)
+ return
+}
+
+// Flush record to disk.
+func (s *session) flushManifest(rec *sessionRecord) (err error) {
+ s.fillRecord(rec, false)
+ w, err := s.manifest.Next()
+ if err != nil {
+ return
+ }
+ err = rec.encode(w)
+ if err != nil {
+ return
+ }
+ err = s.manifest.Flush()
+ if err != nil {
+ return
+ }
+ if !s.o.GetNoSync() {
+ err = s.manifestWriter.Sync()
+ if err != nil {
+ return
+ }
+ }
+ s.recordCommited(rec)
+ return
+}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage.go b/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage.go
new file mode 100644
index 0000000000..420b277324
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage.go
@@ -0,0 +1,543 @@
+// Copyright (c) 2012, Suryandaru Triandana
+// All rights reservefs.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package storage
+
+import (
+ "errors"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "runtime"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/syndtr/goleveldb/leveldb/util"
+)
+
+var errFileOpen = errors.New("leveldb/storage: file still open")
+
+type fileLock interface {
+ release() error
+}
+
+type fileStorageLock struct {
+ fs *fileStorage
+}
+
+func (lock *fileStorageLock) Release() {
+ fs := lock.fs
+ fs.mu.Lock()
+ defer fs.mu.Unlock()
+ if fs.slock == lock {
+ fs.slock = nil
+ }
+ return
+}
+
+// fileStorage is a file-system backed storage.
+type fileStorage struct {
+ path string
+
+ mu sync.Mutex
+ flock fileLock
+ slock *fileStorageLock
+ logw *os.File
+ buf []byte
+ // Opened file counter; if open < 0 means closed.
+ open int
+ day int
+}
+
+// OpenFile returns a new filesytem-backed storage implementation with the given
+// path. This also hold a file lock, so any subsequent attempt to open the same
+// path will fail.
+//
+// The storage must be closed after use, by calling Close method.
+func OpenFile(path string) (Storage, error) {
+ if err := os.MkdirAll(path, 0755); err != nil {
+ return nil, err
+ }
+
+ flock, err := newFileLock(filepath.Join(path, "LOCK"))
+ if err != nil {
+ return nil, err
+ }
+
+ defer func() {
+ if err != nil {
+ flock.release()
+ }
+ }()
+
+ rename(filepath.Join(path, "LOG"), filepath.Join(path, "LOG.old"))
+ logw, err := os.OpenFile(filepath.Join(path, "LOG"), os.O_WRONLY|os.O_CREATE, 0644)
+ if err != nil {
+ return nil, err
+ }
+
+ fs := &fileStorage{path: path, flock: flock, logw: logw}
+ runtime.SetFinalizer(fs, (*fileStorage).Close)
+ return fs, nil
+}
+
+func (fs *fileStorage) Lock() (util.Releaser, error) {
+ fs.mu.Lock()
+ defer fs.mu.Unlock()
+ if fs.open < 0 {
+ return nil, ErrClosed
+ }
+ if fs.slock != nil {
+ return nil, ErrLocked
+ }
+ fs.slock = &fileStorageLock{fs: fs}
+ return fs.slock, nil
+}
+
+func itoa(buf []byte, i int, wid int) []byte {
+ var u uint = uint(i)
+ if u == 0 && wid <= 1 {
+ return append(buf, '0')
+ }
+
+ // Assemble decimal in reverse order.
+ var b [32]byte
+ bp := len(b)
+ for ; u > 0 || wid > 0; u /= 10 {
+ bp--
+ wid--
+ b[bp] = byte(u%10) + '0'
+ }
+ return append(buf, b[bp:]...)
+}
+
+func (fs *fileStorage) printDay(t time.Time) {
+ if fs.day == t.Day() {
+ return
+ }
+ fs.day = t.Day()
+ fs.logw.Write([]byte("=============== " + t.Format("Jan 2, 2006 (MST)") + " ===============\n"))
+}
+
+func (fs *fileStorage) doLog(t time.Time, str string) {
+ fs.printDay(t)
+ hour, min, sec := t.Clock()
+ msec := t.Nanosecond() / 1e3
+ // time
+ fs.buf = itoa(fs.buf[:0], hour, 2)
+ fs.buf = append(fs.buf, ':')
+ fs.buf = itoa(fs.buf, min, 2)
+ fs.buf = append(fs.buf, ':')
+ fs.buf = itoa(fs.buf, sec, 2)
+ fs.buf = append(fs.buf, '.')
+ fs.buf = itoa(fs.buf, msec, 6)
+ fs.buf = append(fs.buf, ' ')
+ // write
+ fs.buf = append(fs.buf, []byte(str)...)
+ fs.buf = append(fs.buf, '\n')
+ fs.logw.Write(fs.buf)
+}
+
+func (fs *fileStorage) Log(str string) {
+ t := time.Now()
+ fs.mu.Lock()
+ defer fs.mu.Unlock()
+ if fs.open < 0 {
+ return
+ }
+ fs.doLog(t, str)
+}
+
+func (fs *fileStorage) log(str string) {
+ fs.doLog(time.Now(), str)
+}
+
+func (fs *fileStorage) GetFile(num uint64, t FileType) File {
+ return &file{fs: fs, num: num, t: t}
+}
+
+func (fs *fileStorage) GetFiles(t FileType) (ff []File, err error) {
+ fs.mu.Lock()
+ defer fs.mu.Unlock()
+ if fs.open < 0 {
+ return nil, ErrClosed
+ }
+ dir, err := os.Open(fs.path)
+ if err != nil {
+ return
+ }
+ fnn, err := dir.Readdirnames(0)
+ // Close the dir first before checking for Readdirnames error.
+ if err := dir.Close(); err != nil {
+ fs.log(fmt.Sprintf("close dir: %v", err))
+ }
+ if err != nil {
+ return
+ }
+ f := &file{fs: fs}
+ for _, fn := range fnn {
+ if f.parse(fn) && (f.t&t) != 0 {
+ ff = append(ff, f)
+ f = &file{fs: fs}
+ }
+ }
+ return
+}
+
+func (fs *fileStorage) GetManifest() (f File, err error) {
+ fs.mu.Lock()
+ defer fs.mu.Unlock()
+ if fs.open < 0 {
+ return nil, ErrClosed
+ }
+ dir, err := os.Open(fs.path)
+ if err != nil {
+ return
+ }
+ fnn, err := dir.Readdirnames(0)
+ // Close the dir first before checking for Readdirnames error.
+ if err := dir.Close(); err != nil {
+ fs.log(fmt.Sprintf("close dir: %v", err))
+ }
+ if err != nil {
+ return
+ }
+ // Find latest CURRENT file.
+ var rem []string
+ var pend bool
+ var cerr error
+ for _, fn := range fnn {
+ if strings.HasPrefix(fn, "CURRENT") {
+ pend1 := len(fn) > 7
+ // Make sure it is valid name for a CURRENT file, otherwise skip it.
+ if pend1 {
+ if fn[7] != '.' || len(fn) < 9 {
+ fs.log(fmt.Sprintf("skipping %s: invalid file name", fn))
+ continue
+ }
+ if _, e1 := strconv.ParseUint(fn[8:], 10, 0); e1 != nil {
+ fs.log(fmt.Sprintf("skipping %s: invalid file num: %v", fn, e1))
+ continue
+ }
+ }
+ path := filepath.Join(fs.path, fn)
+ r, e1 := os.OpenFile(path, os.O_RDONLY, 0)
+ if e1 != nil {
+ return nil, e1
+ }
+ b, e1 := ioutil.ReadAll(r)
+ if e1 != nil {
+ r.Close()
+ return nil, e1
+ }
+ f1 := &file{fs: fs}
+ if len(b) < 1 || b[len(b)-1] != '\n' || !f1.parse(string(b[:len(b)-1])) {
+ fs.log(fmt.Sprintf("skipping %s: corrupted or incomplete", fn))
+ if pend1 {
+ rem = append(rem, fn)
+ }
+ if !pend1 || cerr == nil {
+ cerr = &ErrCorrupted{
+ File: fsParseName(filepath.Base(fn)),
+ Err: errors.New("leveldb/storage: corrupted or incomplete manifest file"),
+ }
+ }
+ } else if f != nil && f1.Num() < f.Num() {
+ fs.log(fmt.Sprintf("skipping %s: obsolete", fn))
+ if pend1 {
+ rem = append(rem, fn)
+ }
+ } else {
+ f = f1
+ pend = pend1
+ }
+ if err := r.Close(); err != nil {
+ fs.log(fmt.Sprintf("close %s: %v", fn, err))
+ }
+ }
+ }
+ // Don't remove any files if there is no valid CURRENT file.
+ if f == nil {
+ if cerr != nil {
+ err = cerr
+ } else {
+ err = os.ErrNotExist
+ }
+ return
+ }
+ // Rename pending CURRENT file to an effective CURRENT.
+ if pend {
+ path := fmt.Sprintf("%s.%d", filepath.Join(fs.path, "CURRENT"), f.Num())
+ if err := rename(path, filepath.Join(fs.path, "CURRENT")); err != nil {
+ fs.log(fmt.Sprintf("CURRENT.%d -> CURRENT: %v", f.Num(), err))
+ }
+ }
+ // Remove obsolete or incomplete pending CURRENT files.
+ for _, fn := range rem {
+ path := filepath.Join(fs.path, fn)
+ if err := os.Remove(path); err != nil {
+ fs.log(fmt.Sprintf("remove %s: %v", fn, err))
+ }
+ }
+ return
+}
+
+func (fs *fileStorage) SetManifest(f File) (err error) {
+ fs.mu.Lock()
+ defer fs.mu.Unlock()
+ if fs.open < 0 {
+ return ErrClosed
+ }
+ f2, ok := f.(*file)
+ if !ok || f2.t != TypeManifest {
+ return ErrInvalidFile
+ }
+ defer func() {
+ if err != nil {
+ fs.log(fmt.Sprintf("CURRENT: %v", err))
+ }
+ }()
+ path := fmt.Sprintf("%s.%d", filepath.Join(fs.path, "CURRENT"), f2.Num())
+ w, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644)
+ if err != nil {
+ return err
+ }
+ _, err = fmt.Fprintln(w, f2.name())
+ // Close the file first.
+ if err := w.Close(); err != nil {
+ fs.log(fmt.Sprintf("close CURRENT.%d: %v", f2.num, err))
+ }
+ if err != nil {
+ return err
+ }
+ return rename(path, filepath.Join(fs.path, "CURRENT"))
+}
+
+func (fs *fileStorage) Close() error {
+ fs.mu.Lock()
+ defer fs.mu.Unlock()
+ if fs.open < 0 {
+ return ErrClosed
+ }
+ // Clear the finalizer.
+ runtime.SetFinalizer(fs, nil)
+
+ if fs.open > 0 {
+ fs.log(fmt.Sprintf("close: warning, %d files still open", fs.open))
+ }
+ fs.open = -1
+ e1 := fs.logw.Close()
+ err := fs.flock.release()
+ if err == nil {
+ err = e1
+ }
+ return err
+}
+
+type fileWrap struct {
+ *os.File
+ f *file
+}
+
+func (fw fileWrap) Sync() error {
+ if err := fw.File.Sync(); err != nil {
+ return err
+ }
+ if fw.f.Type() == TypeManifest {
+ // Also sync parent directory if file type is manifest.
+ // See: https://code.google.com/p/leveldb/issues/detail?id=190.
+ if err := syncDir(fw.f.fs.path); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (fw fileWrap) Close() error {
+ f := fw.f
+ f.fs.mu.Lock()
+ defer f.fs.mu.Unlock()
+ if !f.open {
+ return ErrClosed
+ }
+ f.open = false
+ f.fs.open--
+ err := fw.File.Close()
+ if err != nil {
+ f.fs.log(fmt.Sprintf("close %s.%d: %v", f.Type(), f.Num(), err))
+ }
+ return err
+}
+
+type file struct {
+ fs *fileStorage
+ num uint64
+ t FileType
+ open bool
+}
+
+func (f *file) Open() (Reader, error) {
+ f.fs.mu.Lock()
+ defer f.fs.mu.Unlock()
+ if f.fs.open < 0 {
+ return nil, ErrClosed
+ }
+ if f.open {
+ return nil, errFileOpen
+ }
+ of, err := os.OpenFile(f.path(), os.O_RDONLY, 0)
+ if err != nil {
+ if f.hasOldName() && os.IsNotExist(err) {
+ of, err = os.OpenFile(f.oldPath(), os.O_RDONLY, 0)
+ if err == nil {
+ goto ok
+ }
+ }
+ return nil, err
+ }
+ok:
+ f.open = true
+ f.fs.open++
+ return fileWrap{of, f}, nil
+}
+
+func (f *file) Create() (Writer, error) {
+ f.fs.mu.Lock()
+ defer f.fs.mu.Unlock()
+ if f.fs.open < 0 {
+ return nil, ErrClosed
+ }
+ if f.open {
+ return nil, errFileOpen
+ }
+ of, err := os.OpenFile(f.path(), os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644)
+ if err != nil {
+ return nil, err
+ }
+ f.open = true
+ f.fs.open++
+ return fileWrap{of, f}, nil
+}
+
+func (f *file) Replace(newfile File) error {
+ f.fs.mu.Lock()
+ defer f.fs.mu.Unlock()
+ if f.fs.open < 0 {
+ return ErrClosed
+ }
+ newfile2, ok := newfile.(*file)
+ if !ok {
+ return ErrInvalidFile
+ }
+ if f.open || newfile2.open {
+ return errFileOpen
+ }
+ return rename(newfile2.path(), f.path())
+}
+
+func (f *file) Type() FileType {
+ return f.t
+}
+
+func (f *file) Num() uint64 {
+ return f.num
+}
+
+func (f *file) Remove() error {
+ f.fs.mu.Lock()
+ defer f.fs.mu.Unlock()
+ if f.fs.open < 0 {
+ return ErrClosed
+ }
+ if f.open {
+ return errFileOpen
+ }
+ err := os.Remove(f.path())
+ if err != nil {
+ f.fs.log(fmt.Sprintf("remove %s.%d: %v", f.Type(), f.Num(), err))
+ }
+ // Also try remove file with old name, just in case.
+ if f.hasOldName() {
+ if e1 := os.Remove(f.oldPath()); !os.IsNotExist(e1) {
+ f.fs.log(fmt.Sprintf("remove %s.%d: %v (old name)", f.Type(), f.Num(), err))
+ err = e1
+ }
+ }
+ return err
+}
+
+func (f *file) hasOldName() bool {
+ return f.t == TypeTable
+}
+
+func (f *file) oldName() string {
+ switch f.t {
+ case TypeTable:
+ return fmt.Sprintf("%06d.sst", f.num)
+ }
+ return f.name()
+}
+
+func (f *file) oldPath() string {
+ return filepath.Join(f.fs.path, f.oldName())
+}
+
+func (f *file) name() string {
+ switch f.t {
+ case TypeManifest:
+ return fmt.Sprintf("MANIFEST-%06d", f.num)
+ case TypeJournal:
+ return fmt.Sprintf("%06d.log", f.num)
+ case TypeTable:
+ return fmt.Sprintf("%06d.ldb", f.num)
+ case TypeTemp:
+ return fmt.Sprintf("%06d.tmp", f.num)
+ default:
+ panic("invalid file type")
+ }
+}
+
+func (f *file) path() string {
+ return filepath.Join(f.fs.path, f.name())
+}
+
+func fsParseName(name string) *FileInfo {
+ fi := &FileInfo{}
+ var tail string
+ _, err := fmt.Sscanf(name, "%d.%s", &fi.Num, &tail)
+ if err == nil {
+ switch tail {
+ case "log":
+ fi.Type = TypeJournal
+ case "ldb", "sst":
+ fi.Type = TypeTable
+ case "tmp":
+ fi.Type = TypeTemp
+ default:
+ return nil
+ }
+ return fi
+ }
+ n, _ := fmt.Sscanf(name, "MANIFEST-%d%s", &fi.Num, &tail)
+ if n == 1 {
+ fi.Type = TypeManifest
+ return fi
+ }
+ return nil
+}
+
+func (f *file) parse(name string) bool {
+ fi := fsParseName(name)
+ if fi == nil {
+ return false
+ }
+ f.t = fi.Type
+ f.num = fi.Num
+ return true
+}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_plan9.go b/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_plan9.go
new file mode 100644
index 0000000000..42940d769f
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_plan9.go
@@ -0,0 +1,52 @@
+// Copyright (c) 2012, Suryandaru Triandana
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package storage
+
+import (
+ "os"
+ "path/filepath"
+)
+
+type plan9FileLock struct {
+ f *os.File
+}
+
+func (fl *plan9FileLock) release() error {
+ return fl.f.Close()
+}
+
+func newFileLock(path string) (fl fileLock, err error) {
+ f, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE, os.ModeExclusive|0644)
+ if err != nil {
+ return
+ }
+ fl = &plan9FileLock{f: f}
+ return
+}
+
+func rename(oldpath, newpath string) error {
+ if _, err := os.Stat(newpath); err == nil {
+ if err := os.Remove(newpath); err != nil {
+ return err
+ }
+ }
+
+ _, fname := filepath.Split(newpath)
+ return os.Rename(oldpath, fname)
+}
+
+func syncDir(name string) error {
+ f, err := os.Open(name)
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+ if err := f.Sync(); err != nil {
+ return err
+ }
+ return nil
+}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_solaris.go b/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_solaris.go
new file mode 100644
index 0000000000..102031bfd5
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_solaris.go
@@ -0,0 +1,68 @@
+// Copyright (c) 2012, Suryandaru Triandana
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// +build solaris
+
+package storage
+
+import (
+ "os"
+ "syscall"
+)
+
+type unixFileLock struct {
+ f *os.File
+}
+
+func (fl *unixFileLock) release() error {
+ if err := setFileLock(fl.f, false); err != nil {
+ return err
+ }
+ return fl.f.Close()
+}
+
+func newFileLock(path string) (fl fileLock, err error) {
+ f, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE, 0644)
+ if err != nil {
+ return
+ }
+ err = setFileLock(f, true)
+ if err != nil {
+ f.Close()
+ return
+ }
+ fl = &unixFileLock{f: f}
+ return
+}
+
+func setFileLock(f *os.File, lock bool) error {
+ flock := syscall.Flock_t{
+ Type: syscall.F_UNLCK,
+ Start: 0,
+ Len: 0,
+ Whence: 1,
+ }
+ if lock {
+ flock.Type = syscall.F_WRLCK
+ }
+ return syscall.FcntlFlock(f.Fd(), syscall.F_SETLK, &flock)
+}
+
+func rename(oldpath, newpath string) error {
+ return os.Rename(oldpath, newpath)
+}
+
+func syncDir(name string) error {
+ f, err := os.Open(name)
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+ if err := f.Sync(); err != nil {
+ return err
+ }
+ return nil
+}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_unix.go b/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_unix.go
new file mode 100644
index 0000000000..6eb3274212
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_unix.go
@@ -0,0 +1,73 @@
+// Copyright (c) 2012, Suryandaru Triandana
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// +build darwin dragonfly freebsd linux netbsd openbsd
+
+package storage
+
+import (
+ "os"
+ "syscall"
+)
+
+type unixFileLock struct {
+ f *os.File
+}
+
+func (fl *unixFileLock) release() error {
+ if err := setFileLock(fl.f, false); err != nil {
+ return err
+ }
+ return fl.f.Close()
+}
+
+func newFileLock(path string) (fl fileLock, err error) {
+ f, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE, 0644)
+ if err != nil {
+ return
+ }
+ err = setFileLock(f, true)
+ if err != nil {
+ f.Close()
+ return
+ }
+ fl = &unixFileLock{f: f}
+ return
+}
+
+func setFileLock(f *os.File, lock bool) error {
+ how := syscall.LOCK_UN
+ if lock {
+ how = syscall.LOCK_EX
+ }
+ return syscall.Flock(int(f.Fd()), how|syscall.LOCK_NB)
+}
+
+func rename(oldpath, newpath string) error {
+ return os.Rename(oldpath, newpath)
+}
+
+func isErrInvalid(err error) bool {
+ if err == os.ErrInvalid {
+ return true
+ }
+ if syserr, ok := err.(*os.SyscallError); ok && syserr.Err == syscall.EINVAL {
+ return true
+ }
+ return false
+}
+
+func syncDir(name string) error {
+ f, err := os.Open(name)
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+ if err := f.Sync(); err != nil && !isErrInvalid(err) {
+ return err
+ }
+ return nil
+}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_windows.go b/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_windows.go
new file mode 100644
index 0000000000..50c3c454e7
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_windows.go
@@ -0,0 +1,69 @@
+// Copyright (c) 2013, Suryandaru Triandana
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package storage
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+var (
+ modkernel32 = syscall.NewLazyDLL("kernel32.dll")
+
+ procMoveFileExW = modkernel32.NewProc("MoveFileExW")
+)
+
+const (
+ _MOVEFILE_REPLACE_EXISTING = 1
+)
+
+type windowsFileLock struct {
+ fd syscall.Handle
+}
+
+func (fl *windowsFileLock) release() error {
+ return syscall.Close(fl.fd)
+}
+
+func newFileLock(path string) (fl fileLock, err error) {
+ pathp, err := syscall.UTF16PtrFromString(path)
+ if err != nil {
+ return
+ }
+ fd, err := syscall.CreateFile(pathp, syscall.GENERIC_READ|syscall.GENERIC_WRITE, 0, nil, syscall.CREATE_ALWAYS, syscall.FILE_ATTRIBUTE_NORMAL, 0)
+ if err != nil {
+ return
+ }
+ fl = &windowsFileLock{fd: fd}
+ return
+}
+
+func moveFileEx(from *uint16, to *uint16, flags uint32) error {
+ r1, _, e1 := syscall.Syscall(procMoveFileExW.Addr(), 3, uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(to)), uintptr(flags))
+ if r1 == 0 {
+ if e1 != 0 {
+ return error(e1)
+ } else {
+ return syscall.EINVAL
+ }
+ }
+ return nil
+}
+
+func rename(oldpath, newpath string) error {
+ from, err := syscall.UTF16PtrFromString(oldpath)
+ if err != nil {
+ return err
+ }
+ to, err := syscall.UTF16PtrFromString(newpath)
+ if err != nil {
+ return err
+ }
+ return moveFileEx(from, to, _MOVEFILE_REPLACE_EXISTING)
+}
+
+func syncDir(name string) error { return nil }
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/storage/mem_storage.go b/vendor/github.com/syndtr/goleveldb/leveldb/storage/mem_storage.go
new file mode 100644
index 0000000000..fc1c8165df
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/storage/mem_storage.go
@@ -0,0 +1,203 @@
+// Copyright (c) 2013, Suryandaru Triandana
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package storage
+
+import (
+ "bytes"
+ "os"
+ "sync"
+
+ "github.com/syndtr/goleveldb/leveldb/util"
+)
+
+const typeShift = 3
+
+type memStorageLock struct {
+ ms *memStorage
+}
+
+func (lock *memStorageLock) Release() {
+ ms := lock.ms
+ ms.mu.Lock()
+ defer ms.mu.Unlock()
+ if ms.slock == lock {
+ ms.slock = nil
+ }
+ return
+}
+
+// memStorage is a memory-backed storage.
+type memStorage struct {
+ mu sync.Mutex
+ slock *memStorageLock
+ files map[uint64]*memFile
+ manifest *memFilePtr
+}
+
+// NewMemStorage returns a new memory-backed storage implementation.
+func NewMemStorage() Storage {
+ return &memStorage{
+ files: make(map[uint64]*memFile),
+ }
+}
+
+func (ms *memStorage) Lock() (util.Releaser, error) {
+ ms.mu.Lock()
+ defer ms.mu.Unlock()
+ if ms.slock != nil {
+ return nil, ErrLocked
+ }
+ ms.slock = &memStorageLock{ms: ms}
+ return ms.slock, nil
+}
+
+func (*memStorage) Log(str string) {}
+
+func (ms *memStorage) GetFile(num uint64, t FileType) File {
+ return &memFilePtr{ms: ms, num: num, t: t}
+}
+
+func (ms *memStorage) GetFiles(t FileType) ([]File, error) {
+ ms.mu.Lock()
+ var ff []File
+ for x, _ := range ms.files {
+ num, mt := x>>typeShift, FileType(x)&TypeAll
+ if mt&t == 0 {
+ continue
+ }
+ ff = append(ff, &memFilePtr{ms: ms, num: num, t: mt})
+ }
+ ms.mu.Unlock()
+ return ff, nil
+}
+
+func (ms *memStorage) GetManifest() (File, error) {
+ ms.mu.Lock()
+ defer ms.mu.Unlock()
+ if ms.manifest == nil {
+ return nil, os.ErrNotExist
+ }
+ return ms.manifest, nil
+}
+
+func (ms *memStorage) SetManifest(f File) error {
+ fm, ok := f.(*memFilePtr)
+ if !ok || fm.t != TypeManifest {
+ return ErrInvalidFile
+ }
+ ms.mu.Lock()
+ ms.manifest = fm
+ ms.mu.Unlock()
+ return nil
+}
+
+func (*memStorage) Close() error { return nil }
+
+type memReader struct {
+ *bytes.Reader
+ m *memFile
+}
+
+func (mr *memReader) Close() error {
+ return mr.m.Close()
+}
+
+type memFile struct {
+ bytes.Buffer
+ ms *memStorage
+ open bool
+}
+
+func (*memFile) Sync() error { return nil }
+func (m *memFile) Close() error {
+ m.ms.mu.Lock()
+ m.open = false
+ m.ms.mu.Unlock()
+ return nil
+}
+
+type memFilePtr struct {
+ ms *memStorage
+ num uint64
+ t FileType
+}
+
+func (p *memFilePtr) x() uint64 {
+ return p.Num()<
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Package storage provides storage abstraction for LevelDB.
+package storage
+
+import (
+ "errors"
+ "fmt"
+ "io"
+
+ "github.com/syndtr/goleveldb/leveldb/util"
+)
+
+type FileType uint32
+
+const (
+ TypeManifest FileType = 1 << iota
+ TypeJournal
+ TypeTable
+ TypeTemp
+
+ TypeAll = TypeManifest | TypeJournal | TypeTable | TypeTemp
+)
+
+func (t FileType) String() string {
+ switch t {
+ case TypeManifest:
+ return "manifest"
+ case TypeJournal:
+ return "journal"
+ case TypeTable:
+ return "table"
+ case TypeTemp:
+ return "temp"
+ }
+ return fmt.Sprintf("", t)
+}
+
+var (
+ ErrInvalidFile = errors.New("leveldb/storage: invalid file for argument")
+ ErrLocked = errors.New("leveldb/storage: already locked")
+ ErrClosed = errors.New("leveldb/storage: closed")
+)
+
+// ErrCorrupted is the type that wraps errors that indicate corruption of
+// a file. Package storage has its own type instead of using
+// errors.ErrCorrupted to prevent circular import.
+type ErrCorrupted struct {
+ File *FileInfo
+ Err error
+}
+
+func (e *ErrCorrupted) Error() string {
+ if e.File != nil {
+ return fmt.Sprintf("%v [file=%v]", e.Err, e.File)
+ } else {
+ return e.Err.Error()
+ }
+}
+
+// Syncer is the interface that wraps basic Sync method.
+type Syncer interface {
+ // Sync commits the current contents of the file to stable storage.
+ Sync() error
+}
+
+// Reader is the interface that groups the basic Read, Seek, ReadAt and Close
+// methods.
+type Reader interface {
+ io.ReadSeeker
+ io.ReaderAt
+ io.Closer
+}
+
+// Writer is the interface that groups the basic Write, Sync and Close
+// methods.
+type Writer interface {
+ io.WriteCloser
+ Syncer
+}
+
+// File is the file. A file instance must be goroutine-safe.
+type File interface {
+ // Open opens the file for read. Returns os.ErrNotExist error
+ // if the file does not exist.
+ // Returns ErrClosed if the underlying storage is closed.
+ Open() (r Reader, err error)
+
+ // Create creates the file for writting. Truncate the file if
+ // already exist.
+ // Returns ErrClosed if the underlying storage is closed.
+ Create() (w Writer, err error)
+
+ // Replace replaces file with newfile.
+ // Returns ErrClosed if the underlying storage is closed.
+ Replace(newfile File) error
+
+ // Type returns the file type
+ Type() FileType
+
+ // Num returns the file number.
+ Num() uint64
+
+ // Remove removes the file.
+ // Returns ErrClosed if the underlying storage is closed.
+ Remove() error
+}
+
+// Storage is the storage. A storage instance must be goroutine-safe.
+type Storage interface {
+ // Lock locks the storage. Any subsequent attempt to call Lock will fail
+ // until the last lock released.
+ // After use the caller should call the Release method.
+ Lock() (l util.Releaser, err error)
+
+ // Log logs a string. This is used for logging. An implementation
+ // may write to a file, stdout or simply do nothing.
+ Log(str string)
+
+ // GetFile returns a file for the given number and type. GetFile will never
+ // returns nil, even if the underlying storage is closed.
+ GetFile(num uint64, t FileType) File
+
+ // GetFiles returns a slice of files that match the given file types.
+ // The file types may be OR'ed together.
+ GetFiles(t FileType) ([]File, error)
+
+ // GetManifest returns a manifest file. Returns os.ErrNotExist if manifest
+ // file does not exist.
+ GetManifest() (File, error)
+
+ // SetManifest sets the given file as manifest file. The given file should
+ // be a manifest file type or error will be returned.
+ SetManifest(f File) error
+
+ // Close closes the storage. It is valid to call Close multiple times.
+ // Other methods should not be called after the storage has been closed.
+ Close() error
+}
+
+// FileInfo wraps basic file info.
+type FileInfo struct {
+ Type FileType
+ Num uint64
+}
+
+func (fi FileInfo) String() string {
+ switch fi.Type {
+ case TypeManifest:
+ return fmt.Sprintf("MANIFEST-%06d", fi.Num)
+ case TypeJournal:
+ return fmt.Sprintf("%06d.log", fi.Num)
+ case TypeTable:
+ return fmt.Sprintf("%06d.ldb", fi.Num)
+ case TypeTemp:
+ return fmt.Sprintf("%06d.tmp", fi.Num)
+ default:
+ return fmt.Sprintf("%#x-%d", fi.Type, fi.Num)
+ }
+}
+
+// NewFileInfo creates new FileInfo from the given File. It will returns nil
+// if File is nil.
+func NewFileInfo(f File) *FileInfo {
+ if f == nil {
+ return nil
+ }
+ return &FileInfo{f.Type(), f.Num()}
+}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/table.go b/vendor/github.com/syndtr/goleveldb/leveldb/table.go
new file mode 100644
index 0000000000..37be47aebf
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/table.go
@@ -0,0 +1,529 @@
+// Copyright (c) 2012, Suryandaru Triandana
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package leveldb
+
+import (
+ "fmt"
+ "sort"
+ "sync/atomic"
+
+ "github.com/syndtr/goleveldb/leveldb/cache"
+ "github.com/syndtr/goleveldb/leveldb/iterator"
+ "github.com/syndtr/goleveldb/leveldb/opt"
+ "github.com/syndtr/goleveldb/leveldb/storage"
+ "github.com/syndtr/goleveldb/leveldb/table"
+ "github.com/syndtr/goleveldb/leveldb/util"
+)
+
+// tFile holds basic information about a table.
+type tFile struct {
+ file storage.File
+ seekLeft int32
+ size uint64
+ imin, imax iKey
+}
+
+// Returns true if given key is after largest key of this table.
+func (t *tFile) after(icmp *iComparer, ukey []byte) bool {
+ return ukey != nil && icmp.uCompare(ukey, t.imax.ukey()) > 0
+}
+
+// Returns true if given key is before smallest key of this table.
+func (t *tFile) before(icmp *iComparer, ukey []byte) bool {
+ return ukey != nil && icmp.uCompare(ukey, t.imin.ukey()) < 0
+}
+
+// Returns true if given key range overlaps with this table key range.
+func (t *tFile) overlaps(icmp *iComparer, umin, umax []byte) bool {
+ return !t.after(icmp, umin) && !t.before(icmp, umax)
+}
+
+// Cosumes one seek and return current seeks left.
+func (t *tFile) consumeSeek() int32 {
+ return atomic.AddInt32(&t.seekLeft, -1)
+}
+
+// Creates new tFile.
+func newTableFile(file storage.File, size uint64, imin, imax iKey) *tFile {
+ f := &tFile{
+ file: file,
+ size: size,
+ imin: imin,
+ imax: imax,
+ }
+
+ // We arrange to automatically compact this file after
+ // a certain number of seeks. Let's assume:
+ // (1) One seek costs 10ms
+ // (2) Writing or reading 1MB costs 10ms (100MB/s)
+ // (3) A compaction of 1MB does 25MB of IO:
+ // 1MB read from this level
+ // 10-12MB read from next level (boundaries may be misaligned)
+ // 10-12MB written to next level
+ // This implies that 25 seeks cost the same as the compaction
+ // of 1MB of data. I.e., one seek costs approximately the
+ // same as the compaction of 40KB of data. We are a little
+ // conservative and allow approximately one seek for every 16KB
+ // of data before triggering a compaction.
+ f.seekLeft = int32(size / 16384)
+ if f.seekLeft < 100 {
+ f.seekLeft = 100
+ }
+
+ return f
+}
+
+// tFiles hold multiple tFile.
+type tFiles []*tFile
+
+func (tf tFiles) Len() int { return len(tf) }
+func (tf tFiles) Swap(i, j int) { tf[i], tf[j] = tf[j], tf[i] }
+
+func (tf tFiles) nums() string {
+ x := "[ "
+ for i, f := range tf {
+ if i != 0 {
+ x += ", "
+ }
+ x += fmt.Sprint(f.file.Num())
+ }
+ x += " ]"
+ return x
+}
+
+// Returns true if i smallest key is less than j.
+// This used for sort by key in ascending order.
+func (tf tFiles) lessByKey(icmp *iComparer, i, j int) bool {
+ a, b := tf[i], tf[j]
+ n := icmp.Compare(a.imin, b.imin)
+ if n == 0 {
+ return a.file.Num() < b.file.Num()
+ }
+ return n < 0
+}
+
+// Returns true if i file number is greater than j.
+// This used for sort by file number in descending order.
+func (tf tFiles) lessByNum(i, j int) bool {
+ return tf[i].file.Num() > tf[j].file.Num()
+}
+
+// Sorts tables by key in ascending order.
+func (tf tFiles) sortByKey(icmp *iComparer) {
+ sort.Sort(&tFilesSortByKey{tFiles: tf, icmp: icmp})
+}
+
+// Sorts tables by file number in descending order.
+func (tf tFiles) sortByNum() {
+ sort.Sort(&tFilesSortByNum{tFiles: tf})
+}
+
+// Returns sum of all tables size.
+func (tf tFiles) size() (sum uint64) {
+ for _, t := range tf {
+ sum += t.size
+ }
+ return sum
+}
+
+// Searches smallest index of tables whose its smallest
+// key is after or equal with given key.
+func (tf tFiles) searchMin(icmp *iComparer, ikey iKey) int {
+ return sort.Search(len(tf), func(i int) bool {
+ return icmp.Compare(tf[i].imin, ikey) >= 0
+ })
+}
+
+// Searches smallest index of tables whose its largest
+// key is after or equal with given key.
+func (tf tFiles) searchMax(icmp *iComparer, ikey iKey) int {
+ return sort.Search(len(tf), func(i int) bool {
+ return icmp.Compare(tf[i].imax, ikey) >= 0
+ })
+}
+
+// Returns true if given key range overlaps with one or more
+// tables key range. If unsorted is true then binary search will not be used.
+func (tf tFiles) overlaps(icmp *iComparer, umin, umax []byte, unsorted bool) bool {
+ if unsorted {
+ // Check against all files.
+ for _, t := range tf {
+ if t.overlaps(icmp, umin, umax) {
+ return true
+ }
+ }
+ return false
+ }
+
+ i := 0
+ if len(umin) > 0 {
+ // Find the earliest possible internal key for min.
+ i = tf.searchMax(icmp, newIkey(umin, kMaxSeq, ktSeek))
+ }
+ if i >= len(tf) {
+ // Beginning of range is after all files, so no overlap.
+ return false
+ }
+ return !tf[i].before(icmp, umax)
+}
+
+// Returns tables whose its key range overlaps with given key range.
+// Range will be expanded if ukey found hop across tables.
+// If overlapped is true then the search will be restarted if umax
+// expanded.
+// The dst content will be overwritten.
+func (tf tFiles) getOverlaps(dst tFiles, icmp *iComparer, umin, umax []byte, overlapped bool) tFiles {
+ dst = dst[:0]
+ for i := 0; i < len(tf); {
+ t := tf[i]
+ if t.overlaps(icmp, umin, umax) {
+ if umin != nil && icmp.uCompare(t.imin.ukey(), umin) < 0 {
+ umin = t.imin.ukey()
+ dst = dst[:0]
+ i = 0
+ continue
+ } else if umax != nil && icmp.uCompare(t.imax.ukey(), umax) > 0 {
+ umax = t.imax.ukey()
+ // Restart search if it is overlapped.
+ if overlapped {
+ dst = dst[:0]
+ i = 0
+ continue
+ }
+ }
+
+ dst = append(dst, t)
+ }
+ i++
+ }
+
+ return dst
+}
+
+// Returns tables key range.
+func (tf tFiles) getRange(icmp *iComparer) (imin, imax iKey) {
+ for i, t := range tf {
+ if i == 0 {
+ imin, imax = t.imin, t.imax
+ continue
+ }
+ if icmp.Compare(t.imin, imin) < 0 {
+ imin = t.imin
+ }
+ if icmp.Compare(t.imax, imax) > 0 {
+ imax = t.imax
+ }
+ }
+
+ return
+}
+
+// Creates iterator index from tables.
+func (tf tFiles) newIndexIterator(tops *tOps, icmp *iComparer, slice *util.Range, ro *opt.ReadOptions) iterator.IteratorIndexer {
+ if slice != nil {
+ var start, limit int
+ if slice.Start != nil {
+ start = tf.searchMax(icmp, iKey(slice.Start))
+ }
+ if slice.Limit != nil {
+ limit = tf.searchMin(icmp, iKey(slice.Limit))
+ } else {
+ limit = tf.Len()
+ }
+ tf = tf[start:limit]
+ }
+ return iterator.NewArrayIndexer(&tFilesArrayIndexer{
+ tFiles: tf,
+ tops: tops,
+ icmp: icmp,
+ slice: slice,
+ ro: ro,
+ })
+}
+
+// Tables iterator index.
+type tFilesArrayIndexer struct {
+ tFiles
+ tops *tOps
+ icmp *iComparer
+ slice *util.Range
+ ro *opt.ReadOptions
+}
+
+func (a *tFilesArrayIndexer) Search(key []byte) int {
+ return a.searchMax(a.icmp, iKey(key))
+}
+
+func (a *tFilesArrayIndexer) Get(i int) iterator.Iterator {
+ if i == 0 || i == a.Len()-1 {
+ return a.tops.newIterator(a.tFiles[i], a.slice, a.ro)
+ }
+ return a.tops.newIterator(a.tFiles[i], nil, a.ro)
+}
+
+// Helper type for sortByKey.
+type tFilesSortByKey struct {
+ tFiles
+ icmp *iComparer
+}
+
+func (x *tFilesSortByKey) Less(i, j int) bool {
+ return x.lessByKey(x.icmp, i, j)
+}
+
+// Helper type for sortByNum.
+type tFilesSortByNum struct {
+ tFiles
+}
+
+func (x *tFilesSortByNum) Less(i, j int) bool {
+ return x.lessByNum(i, j)
+}
+
+// Table operations.
+type tOps struct {
+ s *session
+ noSync bool
+ cache *cache.Cache
+ bcache *cache.Cache
+ bpool *util.BufferPool
+}
+
+// Creates an empty table and returns table writer.
+func (t *tOps) create() (*tWriter, error) {
+ file := t.s.getTableFile(t.s.allocFileNum())
+ fw, err := file.Create()
+ if err != nil {
+ return nil, err
+ }
+ return &tWriter{
+ t: t,
+ file: file,
+ w: fw,
+ tw: table.NewWriter(fw, t.s.o.Options),
+ }, nil
+}
+
+// Builds table from src iterator.
+func (t *tOps) createFrom(src iterator.Iterator) (f *tFile, n int, err error) {
+ w, err := t.create()
+ if err != nil {
+ return
+ }
+
+ defer func() {
+ if err != nil {
+ w.drop()
+ }
+ }()
+
+ for src.Next() {
+ err = w.append(src.Key(), src.Value())
+ if err != nil {
+ return
+ }
+ }
+ err = src.Error()
+ if err != nil {
+ return
+ }
+
+ n = w.tw.EntriesLen()
+ f, err = w.finish()
+ return
+}
+
+// Opens table. It returns a cache handle, which should
+// be released after use.
+func (t *tOps) open(f *tFile) (ch *cache.Handle, err error) {
+ num := f.file.Num()
+ ch = t.cache.Get(0, num, func() (size int, value cache.Value) {
+ var r storage.Reader
+ r, err = f.file.Open()
+ if err != nil {
+ return 0, nil
+ }
+
+ var bcache *cache.CacheGetter
+ if t.bcache != nil {
+ bcache = &cache.CacheGetter{Cache: t.bcache, NS: num}
+ }
+
+ var tr *table.Reader
+ tr, err = table.NewReader(r, int64(f.size), storage.NewFileInfo(f.file), bcache, t.bpool, t.s.o.Options)
+ if err != nil {
+ r.Close()
+ return 0, nil
+ }
+ return 1, tr
+
+ })
+ if ch == nil && err == nil {
+ err = ErrClosed
+ }
+ return
+}
+
+// Finds key/value pair whose key is greater than or equal to the
+// given key.
+func (t *tOps) find(f *tFile, key []byte, ro *opt.ReadOptions) (rkey, rvalue []byte, err error) {
+ ch, err := t.open(f)
+ if err != nil {
+ return nil, nil, err
+ }
+ defer ch.Release()
+ return ch.Value().(*table.Reader).Find(key, true, ro)
+}
+
+// Finds key that is greater than or equal to the given key.
+func (t *tOps) findKey(f *tFile, key []byte, ro *opt.ReadOptions) (rkey []byte, err error) {
+ ch, err := t.open(f)
+ if err != nil {
+ return nil, err
+ }
+ defer ch.Release()
+ return ch.Value().(*table.Reader).FindKey(key, true, ro)
+}
+
+// Returns approximate offset of the given key.
+func (t *tOps) offsetOf(f *tFile, key []byte) (offset uint64, err error) {
+ ch, err := t.open(f)
+ if err != nil {
+ return
+ }
+ defer ch.Release()
+ offset_, err := ch.Value().(*table.Reader).OffsetOf(key)
+ return uint64(offset_), err
+}
+
+// Creates an iterator from the given table.
+func (t *tOps) newIterator(f *tFile, slice *util.Range, ro *opt.ReadOptions) iterator.Iterator {
+ ch, err := t.open(f)
+ if err != nil {
+ return iterator.NewEmptyIterator(err)
+ }
+ iter := ch.Value().(*table.Reader).NewIterator(slice, ro)
+ iter.SetReleaser(ch)
+ return iter
+}
+
+// Removes table from persistent storage. It waits until
+// no one use the the table.
+func (t *tOps) remove(f *tFile) {
+ num := f.file.Num()
+ t.cache.Delete(0, num, func() {
+ if err := f.file.Remove(); err != nil {
+ t.s.logf("table@remove removing @%d %q", num, err)
+ } else {
+ t.s.logf("table@remove removed @%d", num)
+ }
+ if t.bcache != nil {
+ t.bcache.EvictNS(num)
+ }
+ })
+}
+
+// Closes the table ops instance. It will close all tables,
+// regadless still used or not.
+func (t *tOps) close() {
+ t.bpool.Close()
+ t.cache.Close()
+ if t.bcache != nil {
+ t.bcache.Close()
+ }
+}
+
+// Creates new initialized table ops instance.
+func newTableOps(s *session) *tOps {
+ var (
+ cacher cache.Cacher
+ bcache *cache.Cache
+ bpool *util.BufferPool
+ )
+ if s.o.GetOpenFilesCacheCapacity() > 0 {
+ cacher = cache.NewLRU(s.o.GetOpenFilesCacheCapacity())
+ }
+ if !s.o.GetDisableBlockCache() {
+ var bcacher cache.Cacher
+ if s.o.GetBlockCacheCapacity() > 0 {
+ bcacher = cache.NewLRU(s.o.GetBlockCacheCapacity())
+ }
+ bcache = cache.NewCache(bcacher)
+ }
+ if !s.o.GetDisableBufferPool() {
+ bpool = util.NewBufferPool(s.o.GetBlockSize() + 5)
+ }
+ return &tOps{
+ s: s,
+ noSync: s.o.GetNoSync(),
+ cache: cache.NewCache(cacher),
+ bcache: bcache,
+ bpool: bpool,
+ }
+}
+
+// tWriter wraps the table writer. It keep track of file descriptor
+// and added key range.
+type tWriter struct {
+ t *tOps
+
+ file storage.File
+ w storage.Writer
+ tw *table.Writer
+
+ first, last []byte
+}
+
+// Append key/value pair to the table.
+func (w *tWriter) append(key, value []byte) error {
+ if w.first == nil {
+ w.first = append([]byte{}, key...)
+ }
+ w.last = append(w.last[:0], key...)
+ return w.tw.Append(key, value)
+}
+
+// Returns true if the table is empty.
+func (w *tWriter) empty() bool {
+ return w.first == nil
+}
+
+// Closes the storage.Writer.
+func (w *tWriter) close() {
+ if w.w != nil {
+ w.w.Close()
+ w.w = nil
+ }
+}
+
+// Finalizes the table and returns table file.
+func (w *tWriter) finish() (f *tFile, err error) {
+ defer w.close()
+ err = w.tw.Close()
+ if err != nil {
+ return
+ }
+ if !w.t.noSync {
+ err = w.w.Sync()
+ if err != nil {
+ return
+ }
+ }
+ f = newTableFile(w.file, uint64(w.tw.BytesLen()), iKey(w.first), iKey(w.last))
+ return
+}
+
+// Drops the table.
+func (w *tWriter) drop() {
+ w.close()
+ w.file.Remove()
+ w.t.s.reuseFileNum(w.file.Num())
+ w.file = nil
+ w.tw = nil
+ w.first = nil
+ w.last = nil
+}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/table/reader.go b/vendor/github.com/syndtr/goleveldb/leveldb/table/reader.go
new file mode 100644
index 0000000000..23c7c6129a
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/table/reader.go
@@ -0,0 +1,1107 @@
+// Copyright (c) 2012, Suryandaru Triandana
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package table
+
+import (
+ "encoding/binary"
+ "fmt"
+ "io"
+ "sort"
+ "strings"
+ "sync"
+
+ "github.com/golang/snappy"
+
+ "github.com/syndtr/goleveldb/leveldb/cache"
+ "github.com/syndtr/goleveldb/leveldb/comparer"
+ "github.com/syndtr/goleveldb/leveldb/errors"
+ "github.com/syndtr/goleveldb/leveldb/filter"
+ "github.com/syndtr/goleveldb/leveldb/iterator"
+ "github.com/syndtr/goleveldb/leveldb/opt"
+ "github.com/syndtr/goleveldb/leveldb/storage"
+ "github.com/syndtr/goleveldb/leveldb/util"
+)
+
+var (
+ ErrNotFound = errors.ErrNotFound
+ ErrReaderReleased = errors.New("leveldb/table: reader released")
+ ErrIterReleased = errors.New("leveldb/table: iterator released")
+)
+
+type ErrCorrupted struct {
+ Pos int64
+ Size int64
+ Kind string
+ Reason string
+}
+
+func (e *ErrCorrupted) Error() string {
+ return fmt.Sprintf("leveldb/table: corruption on %s (pos=%d): %s", e.Kind, e.Pos, e.Reason)
+}
+
+func max(x, y int) int {
+ if x > y {
+ return x
+ }
+ return y
+}
+
+type block struct {
+ bpool *util.BufferPool
+ bh blockHandle
+ data []byte
+ restartsLen int
+ restartsOffset int
+}
+
+func (b *block) seek(cmp comparer.Comparer, rstart, rlimit int, key []byte) (index, offset int, err error) {
+ index = sort.Search(b.restartsLen-rstart-(b.restartsLen-rlimit), func(i int) bool {
+ offset := int(binary.LittleEndian.Uint32(b.data[b.restartsOffset+4*(rstart+i):]))
+ offset += 1 // shared always zero, since this is a restart point
+ v1, n1 := binary.Uvarint(b.data[offset:]) // key length
+ _, n2 := binary.Uvarint(b.data[offset+n1:]) // value length
+ m := offset + n1 + n2
+ return cmp.Compare(b.data[m:m+int(v1)], key) > 0
+ }) + rstart - 1
+ if index < rstart {
+ // The smallest key is greater-than key sought.
+ index = rstart
+ }
+ offset = int(binary.LittleEndian.Uint32(b.data[b.restartsOffset+4*index:]))
+ return
+}
+
+func (b *block) restartIndex(rstart, rlimit, offset int) int {
+ return sort.Search(b.restartsLen-rstart-(b.restartsLen-rlimit), func(i int) bool {
+ return int(binary.LittleEndian.Uint32(b.data[b.restartsOffset+4*(rstart+i):])) > offset
+ }) + rstart - 1
+}
+
+func (b *block) restartOffset(index int) int {
+ return int(binary.LittleEndian.Uint32(b.data[b.restartsOffset+4*index:]))
+}
+
+func (b *block) entry(offset int) (key, value []byte, nShared, n int, err error) {
+ if offset >= b.restartsOffset {
+ if offset != b.restartsOffset {
+ err = &ErrCorrupted{Reason: "entries offset not aligned"}
+ }
+ return
+ }
+ v0, n0 := binary.Uvarint(b.data[offset:]) // Shared prefix length
+ v1, n1 := binary.Uvarint(b.data[offset+n0:]) // Key length
+ v2, n2 := binary.Uvarint(b.data[offset+n0+n1:]) // Value length
+ m := n0 + n1 + n2
+ n = m + int(v1) + int(v2)
+ if n0 <= 0 || n1 <= 0 || n2 <= 0 || offset+n > b.restartsOffset {
+ err = &ErrCorrupted{Reason: "entries corrupted"}
+ return
+ }
+ key = b.data[offset+m : offset+m+int(v1)]
+ value = b.data[offset+m+int(v1) : offset+n]
+ nShared = int(v0)
+ return
+}
+
+func (b *block) Release() {
+ b.bpool.Put(b.data)
+ b.bpool = nil
+ b.data = nil
+}
+
+type dir int
+
+const (
+ dirReleased dir = iota - 1
+ dirSOI
+ dirEOI
+ dirBackward
+ dirForward
+)
+
+type blockIter struct {
+ tr *Reader
+ block *block
+ blockReleaser util.Releaser
+ releaser util.Releaser
+ key, value []byte
+ offset int
+ // Previous offset, only filled by Next.
+ prevOffset int
+ prevNode []int
+ prevKeys []byte
+ restartIndex int
+ // Iterator direction.
+ dir dir
+ // Restart index slice range.
+ riStart int
+ riLimit int
+ // Offset slice range.
+ offsetStart int
+ offsetRealStart int
+ offsetLimit int
+ // Error.
+ err error
+}
+
+func (i *blockIter) sErr(err error) {
+ i.err = err
+ i.key = nil
+ i.value = nil
+ i.prevNode = nil
+ i.prevKeys = nil
+}
+
+func (i *blockIter) reset() {
+ if i.dir == dirBackward {
+ i.prevNode = i.prevNode[:0]
+ i.prevKeys = i.prevKeys[:0]
+ }
+ i.restartIndex = i.riStart
+ i.offset = i.offsetStart
+ i.dir = dirSOI
+ i.key = i.key[:0]
+ i.value = nil
+}
+
+func (i *blockIter) isFirst() bool {
+ switch i.dir {
+ case dirForward:
+ return i.prevOffset == i.offsetRealStart
+ case dirBackward:
+ return len(i.prevNode) == 1 && i.restartIndex == i.riStart
+ }
+ return false
+}
+
+func (i *blockIter) isLast() bool {
+ switch i.dir {
+ case dirForward, dirBackward:
+ return i.offset == i.offsetLimit
+ }
+ return false
+}
+
+func (i *blockIter) First() bool {
+ if i.err != nil {
+ return false
+ } else if i.dir == dirReleased {
+ i.err = ErrIterReleased
+ return false
+ }
+
+ if i.dir == dirBackward {
+ i.prevNode = i.prevNode[:0]
+ i.prevKeys = i.prevKeys[:0]
+ }
+ i.dir = dirSOI
+ return i.Next()
+}
+
+func (i *blockIter) Last() bool {
+ if i.err != nil {
+ return false
+ } else if i.dir == dirReleased {
+ i.err = ErrIterReleased
+ return false
+ }
+
+ if i.dir == dirBackward {
+ i.prevNode = i.prevNode[:0]
+ i.prevKeys = i.prevKeys[:0]
+ }
+ i.dir = dirEOI
+ return i.Prev()
+}
+
+func (i *blockIter) Seek(key []byte) bool {
+ if i.err != nil {
+ return false
+ } else if i.dir == dirReleased {
+ i.err = ErrIterReleased
+ return false
+ }
+
+ ri, offset, err := i.block.seek(i.tr.cmp, i.riStart, i.riLimit, key)
+ if err != nil {
+ i.sErr(err)
+ return false
+ }
+ i.restartIndex = ri
+ i.offset = max(i.offsetStart, offset)
+ if i.dir == dirSOI || i.dir == dirEOI {
+ i.dir = dirForward
+ }
+ for i.Next() {
+ if i.tr.cmp.Compare(i.key, key) >= 0 {
+ return true
+ }
+ }
+ return false
+}
+
+func (i *blockIter) Next() bool {
+ if i.dir == dirEOI || i.err != nil {
+ return false
+ } else if i.dir == dirReleased {
+ i.err = ErrIterReleased
+ return false
+ }
+
+ if i.dir == dirSOI {
+ i.restartIndex = i.riStart
+ i.offset = i.offsetStart
+ } else if i.dir == dirBackward {
+ i.prevNode = i.prevNode[:0]
+ i.prevKeys = i.prevKeys[:0]
+ }
+ for i.offset < i.offsetRealStart {
+ key, value, nShared, n, err := i.block.entry(i.offset)
+ if err != nil {
+ i.sErr(i.tr.fixErrCorruptedBH(i.block.bh, err))
+ return false
+ }
+ if n == 0 {
+ i.dir = dirEOI
+ return false
+ }
+ i.key = append(i.key[:nShared], key...)
+ i.value = value
+ i.offset += n
+ }
+ if i.offset >= i.offsetLimit {
+ i.dir = dirEOI
+ if i.offset != i.offsetLimit {
+ i.sErr(i.tr.newErrCorruptedBH(i.block.bh, "entries offset not aligned"))
+ }
+ return false
+ }
+ key, value, nShared, n, err := i.block.entry(i.offset)
+ if err != nil {
+ i.sErr(i.tr.fixErrCorruptedBH(i.block.bh, err))
+ return false
+ }
+ if n == 0 {
+ i.dir = dirEOI
+ return false
+ }
+ i.key = append(i.key[:nShared], key...)
+ i.value = value
+ i.prevOffset = i.offset
+ i.offset += n
+ i.dir = dirForward
+ return true
+}
+
+func (i *blockIter) Prev() bool {
+ if i.dir == dirSOI || i.err != nil {
+ return false
+ } else if i.dir == dirReleased {
+ i.err = ErrIterReleased
+ return false
+ }
+
+ var ri int
+ if i.dir == dirForward {
+ // Change direction.
+ i.offset = i.prevOffset
+ if i.offset == i.offsetRealStart {
+ i.dir = dirSOI
+ return false
+ }
+ ri = i.block.restartIndex(i.restartIndex, i.riLimit, i.offset)
+ i.dir = dirBackward
+ } else if i.dir == dirEOI {
+ // At the end of iterator.
+ i.restartIndex = i.riLimit
+ i.offset = i.offsetLimit
+ if i.offset == i.offsetRealStart {
+ i.dir = dirSOI
+ return false
+ }
+ ri = i.riLimit - 1
+ i.dir = dirBackward
+ } else if len(i.prevNode) == 1 {
+ // This is the end of a restart range.
+ i.offset = i.prevNode[0]
+ i.prevNode = i.prevNode[:0]
+ if i.restartIndex == i.riStart {
+ i.dir = dirSOI
+ return false
+ }
+ i.restartIndex--
+ ri = i.restartIndex
+ } else {
+ // In the middle of restart range, get from cache.
+ n := len(i.prevNode) - 3
+ node := i.prevNode[n:]
+ i.prevNode = i.prevNode[:n]
+ // Get the key.
+ ko := node[0]
+ i.key = append(i.key[:0], i.prevKeys[ko:]...)
+ i.prevKeys = i.prevKeys[:ko]
+ // Get the value.
+ vo := node[1]
+ vl := vo + node[2]
+ i.value = i.block.data[vo:vl]
+ i.offset = vl
+ return true
+ }
+ // Build entries cache.
+ i.key = i.key[:0]
+ i.value = nil
+ offset := i.block.restartOffset(ri)
+ if offset == i.offset {
+ ri -= 1
+ if ri < 0 {
+ i.dir = dirSOI
+ return false
+ }
+ offset = i.block.restartOffset(ri)
+ }
+ i.prevNode = append(i.prevNode, offset)
+ for {
+ key, value, nShared, n, err := i.block.entry(offset)
+ if err != nil {
+ i.sErr(i.tr.fixErrCorruptedBH(i.block.bh, err))
+ return false
+ }
+ if offset >= i.offsetRealStart {
+ if i.value != nil {
+ // Appends 3 variables:
+ // 1. Previous keys offset
+ // 2. Value offset in the data block
+ // 3. Value length
+ i.prevNode = append(i.prevNode, len(i.prevKeys), offset-len(i.value), len(i.value))
+ i.prevKeys = append(i.prevKeys, i.key...)
+ }
+ i.value = value
+ }
+ i.key = append(i.key[:nShared], key...)
+ offset += n
+ // Stop if target offset reached.
+ if offset >= i.offset {
+ if offset != i.offset {
+ i.sErr(i.tr.newErrCorruptedBH(i.block.bh, "entries offset not aligned"))
+ return false
+ }
+
+ break
+ }
+ }
+ i.restartIndex = ri
+ i.offset = offset
+ return true
+}
+
+func (i *blockIter) Key() []byte {
+ if i.err != nil || i.dir <= dirEOI {
+ return nil
+ }
+ return i.key
+}
+
+func (i *blockIter) Value() []byte {
+ if i.err != nil || i.dir <= dirEOI {
+ return nil
+ }
+ return i.value
+}
+
+func (i *blockIter) Release() {
+ if i.dir != dirReleased {
+ i.tr = nil
+ i.block = nil
+ i.prevNode = nil
+ i.prevKeys = nil
+ i.key = nil
+ i.value = nil
+ i.dir = dirReleased
+ if i.blockReleaser != nil {
+ i.blockReleaser.Release()
+ i.blockReleaser = nil
+ }
+ if i.releaser != nil {
+ i.releaser.Release()
+ i.releaser = nil
+ }
+ }
+}
+
+func (i *blockIter) SetReleaser(releaser util.Releaser) {
+ if i.dir == dirReleased {
+ panic(util.ErrReleased)
+ }
+ if i.releaser != nil && releaser != nil {
+ panic(util.ErrHasReleaser)
+ }
+ i.releaser = releaser
+}
+
+func (i *blockIter) Valid() bool {
+ return i.err == nil && (i.dir == dirBackward || i.dir == dirForward)
+}
+
+func (i *blockIter) Error() error {
+ return i.err
+}
+
+type filterBlock struct {
+ bpool *util.BufferPool
+ data []byte
+ oOffset int
+ baseLg uint
+ filtersNum int
+}
+
+func (b *filterBlock) contains(filter filter.Filter, offset uint64, key []byte) bool {
+ i := int(offset >> b.baseLg)
+ if i < b.filtersNum {
+ o := b.data[b.oOffset+i*4:]
+ n := int(binary.LittleEndian.Uint32(o))
+ m := int(binary.LittleEndian.Uint32(o[4:]))
+ if n < m && m <= b.oOffset {
+ return filter.Contains(b.data[n:m], key)
+ } else if n == m {
+ return false
+ }
+ }
+ return true
+}
+
+func (b *filterBlock) Release() {
+ b.bpool.Put(b.data)
+ b.bpool = nil
+ b.data = nil
+}
+
+type indexIter struct {
+ *blockIter
+ tr *Reader
+ slice *util.Range
+ // Options
+ fillCache bool
+}
+
+func (i *indexIter) Get() iterator.Iterator {
+ value := i.Value()
+ if value == nil {
+ return nil
+ }
+ dataBH, n := decodeBlockHandle(value)
+ if n == 0 {
+ return iterator.NewEmptyIterator(i.tr.newErrCorruptedBH(i.tr.indexBH, "bad data block handle"))
+ }
+
+ var slice *util.Range
+ if i.slice != nil && (i.blockIter.isFirst() || i.blockIter.isLast()) {
+ slice = i.slice
+ }
+ return i.tr.getDataIterErr(dataBH, slice, i.tr.verifyChecksum, i.fillCache)
+}
+
+// Reader is a table reader.
+type Reader struct {
+ mu sync.RWMutex
+ fi *storage.FileInfo
+ reader io.ReaderAt
+ cache *cache.CacheGetter
+ err error
+ bpool *util.BufferPool
+ // Options
+ o *opt.Options
+ cmp comparer.Comparer
+ filter filter.Filter
+ verifyChecksum bool
+
+ dataEnd int64
+ metaBH, indexBH, filterBH blockHandle
+ indexBlock *block
+ filterBlock *filterBlock
+}
+
+func (r *Reader) blockKind(bh blockHandle) string {
+ switch bh.offset {
+ case r.metaBH.offset:
+ return "meta-block"
+ case r.indexBH.offset:
+ return "index-block"
+ case r.filterBH.offset:
+ if r.filterBH.length > 0 {
+ return "filter-block"
+ }
+ }
+ return "data-block"
+}
+
+func (r *Reader) newErrCorrupted(pos, size int64, kind, reason string) error {
+ return &errors.ErrCorrupted{File: r.fi, Err: &ErrCorrupted{Pos: pos, Size: size, Kind: kind, Reason: reason}}
+}
+
+func (r *Reader) newErrCorruptedBH(bh blockHandle, reason string) error {
+ return r.newErrCorrupted(int64(bh.offset), int64(bh.length), r.blockKind(bh), reason)
+}
+
+func (r *Reader) fixErrCorruptedBH(bh blockHandle, err error) error {
+ if cerr, ok := err.(*ErrCorrupted); ok {
+ cerr.Pos = int64(bh.offset)
+ cerr.Size = int64(bh.length)
+ cerr.Kind = r.blockKind(bh)
+ return &errors.ErrCorrupted{File: r.fi, Err: cerr}
+ }
+ return err
+}
+
+func (r *Reader) readRawBlock(bh blockHandle, verifyChecksum bool) ([]byte, error) {
+ data := r.bpool.Get(int(bh.length + blockTrailerLen))
+ if _, err := r.reader.ReadAt(data, int64(bh.offset)); err != nil && err != io.EOF {
+ return nil, err
+ }
+
+ if verifyChecksum {
+ n := bh.length + 1
+ checksum0 := binary.LittleEndian.Uint32(data[n:])
+ checksum1 := util.NewCRC(data[:n]).Value()
+ if checksum0 != checksum1 {
+ r.bpool.Put(data)
+ return nil, r.newErrCorruptedBH(bh, fmt.Sprintf("checksum mismatch, want=%#x got=%#x", checksum0, checksum1))
+ }
+ }
+
+ switch data[bh.length] {
+ case blockTypeNoCompression:
+ data = data[:bh.length]
+ case blockTypeSnappyCompression:
+ decLen, err := snappy.DecodedLen(data[:bh.length])
+ if err != nil {
+ return nil, r.newErrCorruptedBH(bh, err.Error())
+ }
+ decData := r.bpool.Get(decLen)
+ decData, err = snappy.Decode(decData, data[:bh.length])
+ r.bpool.Put(data)
+ if err != nil {
+ r.bpool.Put(decData)
+ return nil, r.newErrCorruptedBH(bh, err.Error())
+ }
+ data = decData
+ default:
+ r.bpool.Put(data)
+ return nil, r.newErrCorruptedBH(bh, fmt.Sprintf("unknown compression type %#x", data[bh.length]))
+ }
+ return data, nil
+}
+
+func (r *Reader) readBlock(bh blockHandle, verifyChecksum bool) (*block, error) {
+ data, err := r.readRawBlock(bh, verifyChecksum)
+ if err != nil {
+ return nil, err
+ }
+ restartsLen := int(binary.LittleEndian.Uint32(data[len(data)-4:]))
+ b := &block{
+ bpool: r.bpool,
+ bh: bh,
+ data: data,
+ restartsLen: restartsLen,
+ restartsOffset: len(data) - (restartsLen+1)*4,
+ }
+ return b, nil
+}
+
+func (r *Reader) readBlockCached(bh blockHandle, verifyChecksum, fillCache bool) (*block, util.Releaser, error) {
+ if r.cache != nil {
+ var (
+ err error
+ ch *cache.Handle
+ )
+ if fillCache {
+ ch = r.cache.Get(bh.offset, func() (size int, value cache.Value) {
+ var b *block
+ b, err = r.readBlock(bh, verifyChecksum)
+ if err != nil {
+ return 0, nil
+ }
+ return cap(b.data), b
+ })
+ } else {
+ ch = r.cache.Get(bh.offset, nil)
+ }
+ if ch != nil {
+ b, ok := ch.Value().(*block)
+ if !ok {
+ ch.Release()
+ return nil, nil, errors.New("leveldb/table: inconsistent block type")
+ }
+ return b, ch, err
+ } else if err != nil {
+ return nil, nil, err
+ }
+ }
+
+ b, err := r.readBlock(bh, verifyChecksum)
+ return b, b, err
+}
+
+func (r *Reader) readFilterBlock(bh blockHandle) (*filterBlock, error) {
+ data, err := r.readRawBlock(bh, true)
+ if err != nil {
+ return nil, err
+ }
+ n := len(data)
+ if n < 5 {
+ return nil, r.newErrCorruptedBH(bh, "too short")
+ }
+ m := n - 5
+ oOffset := int(binary.LittleEndian.Uint32(data[m:]))
+ if oOffset > m {
+ return nil, r.newErrCorruptedBH(bh, "invalid data-offsets offset")
+ }
+ b := &filterBlock{
+ bpool: r.bpool,
+ data: data,
+ oOffset: oOffset,
+ baseLg: uint(data[n-1]),
+ filtersNum: (m - oOffset) / 4,
+ }
+ return b, nil
+}
+
+func (r *Reader) readFilterBlockCached(bh blockHandle, fillCache bool) (*filterBlock, util.Releaser, error) {
+ if r.cache != nil {
+ var (
+ err error
+ ch *cache.Handle
+ )
+ if fillCache {
+ ch = r.cache.Get(bh.offset, func() (size int, value cache.Value) {
+ var b *filterBlock
+ b, err = r.readFilterBlock(bh)
+ if err != nil {
+ return 0, nil
+ }
+ return cap(b.data), b
+ })
+ } else {
+ ch = r.cache.Get(bh.offset, nil)
+ }
+ if ch != nil {
+ b, ok := ch.Value().(*filterBlock)
+ if !ok {
+ ch.Release()
+ return nil, nil, errors.New("leveldb/table: inconsistent block type")
+ }
+ return b, ch, err
+ } else if err != nil {
+ return nil, nil, err
+ }
+ }
+
+ b, err := r.readFilterBlock(bh)
+ return b, b, err
+}
+
+func (r *Reader) getIndexBlock(fillCache bool) (b *block, rel util.Releaser, err error) {
+ if r.indexBlock == nil {
+ return r.readBlockCached(r.indexBH, true, fillCache)
+ }
+ return r.indexBlock, util.NoopReleaser{}, nil
+}
+
+func (r *Reader) getFilterBlock(fillCache bool) (*filterBlock, util.Releaser, error) {
+ if r.filterBlock == nil {
+ return r.readFilterBlockCached(r.filterBH, fillCache)
+ }
+ return r.filterBlock, util.NoopReleaser{}, nil
+}
+
+func (r *Reader) newBlockIter(b *block, bReleaser util.Releaser, slice *util.Range, inclLimit bool) *blockIter {
+ bi := &blockIter{
+ tr: r,
+ block: b,
+ blockReleaser: bReleaser,
+ // Valid key should never be nil.
+ key: make([]byte, 0),
+ dir: dirSOI,
+ riStart: 0,
+ riLimit: b.restartsLen,
+ offsetStart: 0,
+ offsetRealStart: 0,
+ offsetLimit: b.restartsOffset,
+ }
+ if slice != nil {
+ if slice.Start != nil {
+ if bi.Seek(slice.Start) {
+ bi.riStart = b.restartIndex(bi.restartIndex, b.restartsLen, bi.prevOffset)
+ bi.offsetStart = b.restartOffset(bi.riStart)
+ bi.offsetRealStart = bi.prevOffset
+ } else {
+ bi.riStart = b.restartsLen
+ bi.offsetStart = b.restartsOffset
+ bi.offsetRealStart = b.restartsOffset
+ }
+ }
+ if slice.Limit != nil {
+ if bi.Seek(slice.Limit) && (!inclLimit || bi.Next()) {
+ bi.offsetLimit = bi.prevOffset
+ bi.riLimit = bi.restartIndex + 1
+ }
+ }
+ bi.reset()
+ if bi.offsetStart > bi.offsetLimit {
+ bi.sErr(errors.New("leveldb/table: invalid slice range"))
+ }
+ }
+ return bi
+}
+
+func (r *Reader) getDataIter(dataBH blockHandle, slice *util.Range, verifyChecksum, fillCache bool) iterator.Iterator {
+ b, rel, err := r.readBlockCached(dataBH, verifyChecksum, fillCache)
+ if err != nil {
+ return iterator.NewEmptyIterator(err)
+ }
+ return r.newBlockIter(b, rel, slice, false)
+}
+
+func (r *Reader) getDataIterErr(dataBH blockHandle, slice *util.Range, verifyChecksum, fillCache bool) iterator.Iterator {
+ r.mu.RLock()
+ defer r.mu.RUnlock()
+
+ if r.err != nil {
+ return iterator.NewEmptyIterator(r.err)
+ }
+
+ return r.getDataIter(dataBH, slice, verifyChecksum, fillCache)
+}
+
+// NewIterator creates an iterator from the table.
+//
+// Slice allows slicing the iterator to only contains keys in the given
+// range. A nil Range.Start is treated as a key before all keys in the
+// table. And a nil Range.Limit is treated as a key after all keys in
+// the table.
+//
+// The returned iterator is not goroutine-safe and should be released
+// when not used.
+//
+// Also read Iterator documentation of the leveldb/iterator package.
+func (r *Reader) NewIterator(slice *util.Range, ro *opt.ReadOptions) iterator.Iterator {
+ r.mu.RLock()
+ defer r.mu.RUnlock()
+
+ if r.err != nil {
+ return iterator.NewEmptyIterator(r.err)
+ }
+
+ fillCache := !ro.GetDontFillCache()
+ indexBlock, rel, err := r.getIndexBlock(fillCache)
+ if err != nil {
+ return iterator.NewEmptyIterator(err)
+ }
+ index := &indexIter{
+ blockIter: r.newBlockIter(indexBlock, rel, slice, true),
+ tr: r,
+ slice: slice,
+ fillCache: !ro.GetDontFillCache(),
+ }
+ return iterator.NewIndexedIterator(index, opt.GetStrict(r.o, ro, opt.StrictReader))
+}
+
+func (r *Reader) find(key []byte, filtered bool, ro *opt.ReadOptions, noValue bool) (rkey, value []byte, err error) {
+ r.mu.RLock()
+ defer r.mu.RUnlock()
+
+ if r.err != nil {
+ err = r.err
+ return
+ }
+
+ indexBlock, rel, err := r.getIndexBlock(true)
+ if err != nil {
+ return
+ }
+ defer rel.Release()
+
+ index := r.newBlockIter(indexBlock, nil, nil, true)
+ defer index.Release()
+ if !index.Seek(key) {
+ err = index.Error()
+ if err == nil {
+ err = ErrNotFound
+ }
+ return
+ }
+ dataBH, n := decodeBlockHandle(index.Value())
+ if n == 0 {
+ r.err = r.newErrCorruptedBH(r.indexBH, "bad data block handle")
+ return
+ }
+ if filtered && r.filter != nil {
+ filterBlock, frel, ferr := r.getFilterBlock(true)
+ if ferr == nil {
+ if !filterBlock.contains(r.filter, dataBH.offset, key) {
+ frel.Release()
+ return nil, nil, ErrNotFound
+ }
+ frel.Release()
+ } else if !errors.IsCorrupted(ferr) {
+ err = ferr
+ return
+ }
+ }
+ data := r.getDataIter(dataBH, nil, r.verifyChecksum, !ro.GetDontFillCache())
+ defer data.Release()
+ if !data.Seek(key) {
+ err = data.Error()
+ if err == nil {
+ err = ErrNotFound
+ }
+ return
+ }
+ // Don't use block buffer, no need to copy the buffer.
+ rkey = data.Key()
+ if !noValue {
+ if r.bpool == nil {
+ value = data.Value()
+ } else {
+ // Use block buffer, and since the buffer will be recycled, the buffer
+ // need to be copied.
+ value = append([]byte{}, data.Value()...)
+ }
+ }
+ return
+}
+
+// Find finds key/value pair whose key is greater than or equal to the
+// given key. It returns ErrNotFound if the table doesn't contain
+// such pair.
+// If filtered is true then the nearest 'block' will be checked against
+// 'filter data' (if present) and will immediately return ErrNotFound if
+// 'filter data' indicates that such pair doesn't exist.
+//
+// The caller may modify the contents of the returned slice as it is its
+// own copy.
+// It is safe to modify the contents of the argument after Find returns.
+func (r *Reader) Find(key []byte, filtered bool, ro *opt.ReadOptions) (rkey, value []byte, err error) {
+ return r.find(key, filtered, ro, false)
+}
+
+// Find finds key that is greater than or equal to the given key.
+// It returns ErrNotFound if the table doesn't contain such key.
+// If filtered is true then the nearest 'block' will be checked against
+// 'filter data' (if present) and will immediately return ErrNotFound if
+// 'filter data' indicates that such key doesn't exist.
+//
+// The caller may modify the contents of the returned slice as it is its
+// own copy.
+// It is safe to modify the contents of the argument after Find returns.
+func (r *Reader) FindKey(key []byte, filtered bool, ro *opt.ReadOptions) (rkey []byte, err error) {
+ rkey, _, err = r.find(key, filtered, ro, true)
+ return
+}
+
+// Get gets the value for the given key. It returns errors.ErrNotFound
+// if the table does not contain the key.
+//
+// The caller may modify the contents of the returned slice as it is its
+// own copy.
+// It is safe to modify the contents of the argument after Find returns.
+func (r *Reader) Get(key []byte, ro *opt.ReadOptions) (value []byte, err error) {
+ r.mu.RLock()
+ defer r.mu.RUnlock()
+
+ if r.err != nil {
+ err = r.err
+ return
+ }
+
+ rkey, value, err := r.find(key, false, ro, false)
+ if err == nil && r.cmp.Compare(rkey, key) != 0 {
+ value = nil
+ err = ErrNotFound
+ }
+ return
+}
+
+// OffsetOf returns approximate offset for the given key.
+//
+// It is safe to modify the contents of the argument after Get returns.
+func (r *Reader) OffsetOf(key []byte) (offset int64, err error) {
+ r.mu.RLock()
+ defer r.mu.RUnlock()
+
+ if r.err != nil {
+ err = r.err
+ return
+ }
+
+ indexBlock, rel, err := r.readBlockCached(r.indexBH, true, true)
+ if err != nil {
+ return
+ }
+ defer rel.Release()
+
+ index := r.newBlockIter(indexBlock, nil, nil, true)
+ defer index.Release()
+ if index.Seek(key) {
+ dataBH, n := decodeBlockHandle(index.Value())
+ if n == 0 {
+ r.err = r.newErrCorruptedBH(r.indexBH, "bad data block handle")
+ return
+ }
+ offset = int64(dataBH.offset)
+ return
+ }
+ err = index.Error()
+ if err == nil {
+ offset = r.dataEnd
+ }
+ return
+}
+
+// Release implements util.Releaser.
+// It also close the file if it is an io.Closer.
+func (r *Reader) Release() {
+ r.mu.Lock()
+ defer r.mu.Unlock()
+
+ if closer, ok := r.reader.(io.Closer); ok {
+ closer.Close()
+ }
+ if r.indexBlock != nil {
+ r.indexBlock.Release()
+ r.indexBlock = nil
+ }
+ if r.filterBlock != nil {
+ r.filterBlock.Release()
+ r.filterBlock = nil
+ }
+ r.reader = nil
+ r.cache = nil
+ r.bpool = nil
+ r.err = ErrReaderReleased
+}
+
+// NewReader creates a new initialized table reader for the file.
+// The fi, cache and bpool is optional and can be nil.
+//
+// The returned table reader instance is goroutine-safe.
+func NewReader(f io.ReaderAt, size int64, fi *storage.FileInfo, cache *cache.CacheGetter, bpool *util.BufferPool, o *opt.Options) (*Reader, error) {
+ if f == nil {
+ return nil, errors.New("leveldb/table: nil file")
+ }
+
+ r := &Reader{
+ fi: fi,
+ reader: f,
+ cache: cache,
+ bpool: bpool,
+ o: o,
+ cmp: o.GetComparer(),
+ verifyChecksum: o.GetStrict(opt.StrictBlockChecksum),
+ }
+
+ if size < footerLen {
+ r.err = r.newErrCorrupted(0, size, "table", "too small")
+ return r, nil
+ }
+
+ footerPos := size - footerLen
+ var footer [footerLen]byte
+ if _, err := r.reader.ReadAt(footer[:], footerPos); err != nil && err != io.EOF {
+ return nil, err
+ }
+ if string(footer[footerLen-len(magic):footerLen]) != magic {
+ r.err = r.newErrCorrupted(footerPos, footerLen, "table-footer", "bad magic number")
+ return r, nil
+ }
+
+ var n int
+ // Decode the metaindex block handle.
+ r.metaBH, n = decodeBlockHandle(footer[:])
+ if n == 0 {
+ r.err = r.newErrCorrupted(footerPos, footerLen, "table-footer", "bad metaindex block handle")
+ return r, nil
+ }
+
+ // Decode the index block handle.
+ r.indexBH, n = decodeBlockHandle(footer[n:])
+ if n == 0 {
+ r.err = r.newErrCorrupted(footerPos, footerLen, "table-footer", "bad index block handle")
+ return r, nil
+ }
+
+ // Read metaindex block.
+ metaBlock, err := r.readBlock(r.metaBH, true)
+ if err != nil {
+ if errors.IsCorrupted(err) {
+ r.err = err
+ return r, nil
+ } else {
+ return nil, err
+ }
+ }
+
+ // Set data end.
+ r.dataEnd = int64(r.metaBH.offset)
+
+ // Read metaindex.
+ metaIter := r.newBlockIter(metaBlock, nil, nil, true)
+ for metaIter.Next() {
+ key := string(metaIter.Key())
+ if !strings.HasPrefix(key, "filter.") {
+ continue
+ }
+ fn := key[7:]
+ if f0 := o.GetFilter(); f0 != nil && f0.Name() == fn {
+ r.filter = f0
+ } else {
+ for _, f0 := range o.GetAltFilters() {
+ if f0.Name() == fn {
+ r.filter = f0
+ break
+ }
+ }
+ }
+ if r.filter != nil {
+ filterBH, n := decodeBlockHandle(metaIter.Value())
+ if n == 0 {
+ continue
+ }
+ r.filterBH = filterBH
+ // Update data end.
+ r.dataEnd = int64(filterBH.offset)
+ break
+ }
+ }
+ metaIter.Release()
+ metaBlock.Release()
+
+ // Cache index and filter block locally, since we don't have global cache.
+ if cache == nil {
+ r.indexBlock, err = r.readBlock(r.indexBH, true)
+ if err != nil {
+ if errors.IsCorrupted(err) {
+ r.err = err
+ return r, nil
+ } else {
+ return nil, err
+ }
+ }
+ if r.filter != nil {
+ r.filterBlock, err = r.readFilterBlock(r.filterBH)
+ if err != nil {
+ if !errors.IsCorrupted(err) {
+ return nil, err
+ }
+
+ // Don't use filter then.
+ r.filter = nil
+ }
+ }
+ }
+
+ return r, nil
+}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/table/table.go b/vendor/github.com/syndtr/goleveldb/leveldb/table/table.go
new file mode 100644
index 0000000000..beacdc1f02
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/table/table.go
@@ -0,0 +1,177 @@
+// Copyright (c) 2012, Suryandaru Triandana
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Package table allows read and write sorted key/value.
+package table
+
+import (
+ "encoding/binary"
+)
+
+/*
+Table:
+
+Table is consist of one or more data blocks, an optional filter block
+a metaindex block, an index block and a table footer. Metaindex block
+is a special block used to keep parameters of the table, such as filter
+block name and its block handle. Index block is a special block used to
+keep record of data blocks offset and length, index block use one as
+restart interval. The key used by index block are the last key of preceding
+block, shorter separator of adjacent blocks or shorter successor of the
+last key of the last block. Filter block is an optional block contains
+sequence of filter data generated by a filter generator.
+
+Table data structure:
+ + optional
+ /
+ +--------------+--------------+--------------+------+-------+-----------------+-------------+--------+
+ | data block 1 | ... | data block n | filter block | metaindex block | index block | footer |
+ +--------------+--------------+--------------+--------------+-----------------+-------------+--------+
+
+ Each block followed by a 5-bytes trailer contains compression type and checksum.
+
+Table block trailer:
+
+ +---------------------------+-------------------+
+ | compression type (1-byte) | checksum (4-byte) |
+ +---------------------------+-------------------+
+
+ The checksum is a CRC-32 computed using Castagnoli's polynomial. Compression
+ type also included in the checksum.
+
+Table footer:
+
+ +------------------- 40-bytes -------------------+
+ / \
+ +------------------------+--------------------+------+-----------------+
+ | metaindex block handle / index block handle / ---- | magic (8-bytes) |
+ +------------------------+--------------------+------+-----------------+
+
+ The magic are first 64-bit of SHA-1 sum of "http://code.google.com/p/leveldb/".
+
+NOTE: All fixed-length integer are little-endian.
+*/
+
+/*
+Block:
+
+Block is consist of one or more key/value entries and a block trailer.
+Block entry shares key prefix with its preceding key until a restart
+point reached. A block should contains at least one restart point.
+First restart point are always zero.
+
+Block data structure:
+
+ + restart point + restart point (depends on restart interval)
+ / /
+ +---------------+---------------+---------------+---------------+---------+
+ | block entry 1 | block entry 2 | ... | block entry n | trailer |
+ +---------------+---------------+---------------+---------------+---------+
+
+Key/value entry:
+
+ +---- key len ----+
+ / \
+ +-------+---------+-----------+---------+--------------------+--------------+----------------+
+ | shared (varint) | not shared (varint) | value len (varint) | key (varlen) | value (varlen) |
+ +-----------------+---------------------+--------------------+--------------+----------------+
+
+ Block entry shares key prefix with its preceding key:
+ Conditions:
+ restart_interval=2
+ entry one : key=deck,value=v1
+ entry two : key=dock,value=v2
+ entry three: key=duck,value=v3
+ The entries will be encoded as follow:
+
+ + restart point (offset=0) + restart point (offset=16)
+ / /
+ +-----+-----+-----+----------+--------+-----+-----+-----+---------+--------+-----+-----+-----+----------+--------+
+ | 0 | 4 | 2 | "deck" | "v1" | 1 | 3 | 2 | "ock" | "v2" | 0 | 4 | 2 | "duck" | "v3" |
+ +-----+-----+-----+----------+--------+-----+-----+-----+---------+--------+-----+-----+-----+----------+--------+
+ \ / \ / \ /
+ +----------- entry one -----------+ +----------- entry two ----------+ +---------- entry three ----------+
+
+ The block trailer will contains two restart points:
+
+ +------------+-----------+--------+
+ | 0 | 16 | 2 |
+ +------------+-----------+---+----+
+ \ / \
+ +-- restart points --+ + restart points length
+
+Block trailer:
+
+ +-- 4-bytes --+
+ / \
+ +-----------------+-----------------+-----------------+------------------------------+
+ | restart point 1 | .... | restart point n | restart points len (4-bytes) |
+ +-----------------+-----------------+-----------------+------------------------------+
+
+
+NOTE: All fixed-length integer are little-endian.
+*/
+
+/*
+Filter block:
+
+Filter block consist of one or more filter data and a filter block trailer.
+The trailer contains filter data offsets, a trailer offset and a 1-byte base Lg.
+
+Filter block data structure:
+
+ + offset 1 + offset 2 + offset n + trailer offset
+ / / / /
+ +---------------+---------------+---------------+---------+
+ | filter data 1 | ... | filter data n | trailer |
+ +---------------+---------------+---------------+---------+
+
+Filter block trailer:
+
+ +- 4-bytes -+
+ / \
+ +---------------+---------------+---------------+-------------------------------+------------------+
+ | data 1 offset | .... | data n offset | data-offsets offset (4-bytes) | base Lg (1-byte) |
+ +-------------- +---------------+---------------+-------------------------------+------------------+
+
+
+NOTE: All fixed-length integer are little-endian.
+*/
+
+const (
+ blockTrailerLen = 5
+ footerLen = 48
+
+ magic = "\x57\xfb\x80\x8b\x24\x75\x47\xdb"
+
+ // The block type gives the per-block compression format.
+ // These constants are part of the file format and should not be changed.
+ blockTypeNoCompression = 0
+ blockTypeSnappyCompression = 1
+
+ // Generate new filter every 2KB of data
+ filterBaseLg = 11
+ filterBase = 1 << filterBaseLg
+)
+
+type blockHandle struct {
+ offset, length uint64
+}
+
+func decodeBlockHandle(src []byte) (blockHandle, int) {
+ offset, n := binary.Uvarint(src)
+ length, m := binary.Uvarint(src[n:])
+ if n == 0 || m == 0 {
+ return blockHandle{}, 0
+ }
+ return blockHandle{offset, length}, n + m
+}
+
+func encodeBlockHandle(dst []byte, b blockHandle) int {
+ n := binary.PutUvarint(dst, b.offset)
+ m := binary.PutUvarint(dst[n:], b.length)
+ return n + m
+}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/table/writer.go b/vendor/github.com/syndtr/goleveldb/leveldb/table/writer.go
new file mode 100644
index 0000000000..274dee6da8
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/table/writer.go
@@ -0,0 +1,375 @@
+// Copyright (c) 2012, Suryandaru Triandana
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package table
+
+import (
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "io"
+
+ "github.com/golang/snappy"
+
+ "github.com/syndtr/goleveldb/leveldb/comparer"
+ "github.com/syndtr/goleveldb/leveldb/filter"
+ "github.com/syndtr/goleveldb/leveldb/opt"
+ "github.com/syndtr/goleveldb/leveldb/util"
+)
+
+func sharedPrefixLen(a, b []byte) int {
+ i, n := 0, len(a)
+ if n > len(b) {
+ n = len(b)
+ }
+ for i < n && a[i] == b[i] {
+ i++
+ }
+ return i
+}
+
+type blockWriter struct {
+ restartInterval int
+ buf util.Buffer
+ nEntries int
+ prevKey []byte
+ restarts []uint32
+ scratch []byte
+}
+
+func (w *blockWriter) append(key, value []byte) {
+ nShared := 0
+ if w.nEntries%w.restartInterval == 0 {
+ w.restarts = append(w.restarts, uint32(w.buf.Len()))
+ } else {
+ nShared = sharedPrefixLen(w.prevKey, key)
+ }
+ n := binary.PutUvarint(w.scratch[0:], uint64(nShared))
+ n += binary.PutUvarint(w.scratch[n:], uint64(len(key)-nShared))
+ n += binary.PutUvarint(w.scratch[n:], uint64(len(value)))
+ w.buf.Write(w.scratch[:n])
+ w.buf.Write(key[nShared:])
+ w.buf.Write(value)
+ w.prevKey = append(w.prevKey[:0], key...)
+ w.nEntries++
+}
+
+func (w *blockWriter) finish() {
+ // Write restarts entry.
+ if w.nEntries == 0 {
+ // Must have at least one restart entry.
+ w.restarts = append(w.restarts, 0)
+ }
+ w.restarts = append(w.restarts, uint32(len(w.restarts)))
+ for _, x := range w.restarts {
+ buf4 := w.buf.Alloc(4)
+ binary.LittleEndian.PutUint32(buf4, x)
+ }
+}
+
+func (w *blockWriter) reset() {
+ w.buf.Reset()
+ w.nEntries = 0
+ w.restarts = w.restarts[:0]
+}
+
+func (w *blockWriter) bytesLen() int {
+ restartsLen := len(w.restarts)
+ if restartsLen == 0 {
+ restartsLen = 1
+ }
+ return w.buf.Len() + 4*restartsLen + 4
+}
+
+type filterWriter struct {
+ generator filter.FilterGenerator
+ buf util.Buffer
+ nKeys int
+ offsets []uint32
+}
+
+func (w *filterWriter) add(key []byte) {
+ if w.generator == nil {
+ return
+ }
+ w.generator.Add(key)
+ w.nKeys++
+}
+
+func (w *filterWriter) flush(offset uint64) {
+ if w.generator == nil {
+ return
+ }
+ for x := int(offset / filterBase); x > len(w.offsets); {
+ w.generate()
+ }
+}
+
+func (w *filterWriter) finish() {
+ if w.generator == nil {
+ return
+ }
+ // Generate last keys.
+
+ if w.nKeys > 0 {
+ w.generate()
+ }
+ w.offsets = append(w.offsets, uint32(w.buf.Len()))
+ for _, x := range w.offsets {
+ buf4 := w.buf.Alloc(4)
+ binary.LittleEndian.PutUint32(buf4, x)
+ }
+ w.buf.WriteByte(filterBaseLg)
+}
+
+func (w *filterWriter) generate() {
+ // Record offset.
+ w.offsets = append(w.offsets, uint32(w.buf.Len()))
+ // Generate filters.
+ if w.nKeys > 0 {
+ w.generator.Generate(&w.buf)
+ w.nKeys = 0
+ }
+}
+
+// Writer is a table writer.
+type Writer struct {
+ writer io.Writer
+ err error
+ // Options
+ cmp comparer.Comparer
+ filter filter.Filter
+ compression opt.Compression
+ blockSize int
+
+ dataBlock blockWriter
+ indexBlock blockWriter
+ filterBlock filterWriter
+ pendingBH blockHandle
+ offset uint64
+ nEntries int
+ // Scratch allocated enough for 5 uvarint. Block writer should not use
+ // first 20-bytes since it will be used to encode block handle, which
+ // then passed to the block writer itself.
+ scratch [50]byte
+ comparerScratch []byte
+ compressionScratch []byte
+}
+
+func (w *Writer) writeBlock(buf *util.Buffer, compression opt.Compression) (bh blockHandle, err error) {
+ // Compress the buffer if necessary.
+ var b []byte
+ if compression == opt.SnappyCompression {
+ // Allocate scratch enough for compression and block trailer.
+ if n := snappy.MaxEncodedLen(buf.Len()) + blockTrailerLen; len(w.compressionScratch) < n {
+ w.compressionScratch = make([]byte, n)
+ }
+ compressed := snappy.Encode(w.compressionScratch, buf.Bytes())
+ n := len(compressed)
+ b = compressed[:n+blockTrailerLen]
+ b[n] = blockTypeSnappyCompression
+ } else {
+ tmp := buf.Alloc(blockTrailerLen)
+ tmp[0] = blockTypeNoCompression
+ b = buf.Bytes()
+ }
+
+ // Calculate the checksum.
+ n := len(b) - 4
+ checksum := util.NewCRC(b[:n]).Value()
+ binary.LittleEndian.PutUint32(b[n:], checksum)
+
+ // Write the buffer to the file.
+ _, err = w.writer.Write(b)
+ if err != nil {
+ return
+ }
+ bh = blockHandle{w.offset, uint64(len(b) - blockTrailerLen)}
+ w.offset += uint64(len(b))
+ return
+}
+
+func (w *Writer) flushPendingBH(key []byte) {
+ if w.pendingBH.length == 0 {
+ return
+ }
+ var separator []byte
+ if len(key) == 0 {
+ separator = w.cmp.Successor(w.comparerScratch[:0], w.dataBlock.prevKey)
+ } else {
+ separator = w.cmp.Separator(w.comparerScratch[:0], w.dataBlock.prevKey, key)
+ }
+ if separator == nil {
+ separator = w.dataBlock.prevKey
+ } else {
+ w.comparerScratch = separator
+ }
+ n := encodeBlockHandle(w.scratch[:20], w.pendingBH)
+ // Append the block handle to the index block.
+ w.indexBlock.append(separator, w.scratch[:n])
+ // Reset prev key of the data block.
+ w.dataBlock.prevKey = w.dataBlock.prevKey[:0]
+ // Clear pending block handle.
+ w.pendingBH = blockHandle{}
+}
+
+func (w *Writer) finishBlock() error {
+ w.dataBlock.finish()
+ bh, err := w.writeBlock(&w.dataBlock.buf, w.compression)
+ if err != nil {
+ return err
+ }
+ w.pendingBH = bh
+ // Reset the data block.
+ w.dataBlock.reset()
+ // Flush the filter block.
+ w.filterBlock.flush(w.offset)
+ return nil
+}
+
+// Append appends key/value pair to the table. The keys passed must
+// be in increasing order.
+//
+// It is safe to modify the contents of the arguments after Append returns.
+func (w *Writer) Append(key, value []byte) error {
+ if w.err != nil {
+ return w.err
+ }
+ if w.nEntries > 0 && w.cmp.Compare(w.dataBlock.prevKey, key) >= 0 {
+ w.err = fmt.Errorf("leveldb/table: Writer: keys are not in increasing order: %q, %q", w.dataBlock.prevKey, key)
+ return w.err
+ }
+
+ w.flushPendingBH(key)
+ // Append key/value pair to the data block.
+ w.dataBlock.append(key, value)
+ // Add key to the filter block.
+ w.filterBlock.add(key)
+
+ // Finish the data block if block size target reached.
+ if w.dataBlock.bytesLen() >= w.blockSize {
+ if err := w.finishBlock(); err != nil {
+ w.err = err
+ return w.err
+ }
+ }
+ w.nEntries++
+ return nil
+}
+
+// BlocksLen returns number of blocks written so far.
+func (w *Writer) BlocksLen() int {
+ n := w.indexBlock.nEntries
+ if w.pendingBH.length > 0 {
+ // Includes the pending block.
+ n++
+ }
+ return n
+}
+
+// EntriesLen returns number of entries added so far.
+func (w *Writer) EntriesLen() int {
+ return w.nEntries
+}
+
+// BytesLen returns number of bytes written so far.
+func (w *Writer) BytesLen() int {
+ return int(w.offset)
+}
+
+// Close will finalize the table. Calling Append is not possible
+// after Close, but calling BlocksLen, EntriesLen and BytesLen
+// is still possible.
+func (w *Writer) Close() error {
+ if w.err != nil {
+ return w.err
+ }
+
+ // Write the last data block. Or empty data block if there
+ // aren't any data blocks at all.
+ if w.dataBlock.nEntries > 0 || w.nEntries == 0 {
+ if err := w.finishBlock(); err != nil {
+ w.err = err
+ return w.err
+ }
+ }
+ w.flushPendingBH(nil)
+
+ // Write the filter block.
+ var filterBH blockHandle
+ w.filterBlock.finish()
+ if buf := &w.filterBlock.buf; buf.Len() > 0 {
+ filterBH, w.err = w.writeBlock(buf, opt.NoCompression)
+ if w.err != nil {
+ return w.err
+ }
+ }
+
+ // Write the metaindex block.
+ if filterBH.length > 0 {
+ key := []byte("filter." + w.filter.Name())
+ n := encodeBlockHandle(w.scratch[:20], filterBH)
+ w.dataBlock.append(key, w.scratch[:n])
+ }
+ w.dataBlock.finish()
+ metaindexBH, err := w.writeBlock(&w.dataBlock.buf, w.compression)
+ if err != nil {
+ w.err = err
+ return w.err
+ }
+
+ // Write the index block.
+ w.indexBlock.finish()
+ indexBH, err := w.writeBlock(&w.indexBlock.buf, w.compression)
+ if err != nil {
+ w.err = err
+ return w.err
+ }
+
+ // Write the table footer.
+ footer := w.scratch[:footerLen]
+ for i := range footer {
+ footer[i] = 0
+ }
+ n := encodeBlockHandle(footer, metaindexBH)
+ encodeBlockHandle(footer[n:], indexBH)
+ copy(footer[footerLen-len(magic):], magic)
+ if _, err := w.writer.Write(footer); err != nil {
+ w.err = err
+ return w.err
+ }
+ w.offset += footerLen
+
+ w.err = errors.New("leveldb/table: writer is closed")
+ return nil
+}
+
+// NewWriter creates a new initialized table writer for the file.
+//
+// Table writer is not goroutine-safe.
+func NewWriter(f io.Writer, o *opt.Options) *Writer {
+ w := &Writer{
+ writer: f,
+ cmp: o.GetComparer(),
+ filter: o.GetFilter(),
+ compression: o.GetCompression(),
+ blockSize: o.GetBlockSize(),
+ comparerScratch: make([]byte, 0),
+ }
+ // data block
+ w.dataBlock.restartInterval = o.GetBlockRestartInterval()
+ // The first 20-bytes are used for encoding block handle.
+ w.dataBlock.scratch = w.scratch[20:]
+ // index block
+ w.indexBlock.restartInterval = 1
+ w.indexBlock.scratch = w.scratch[20:]
+ // filter block
+ if w.filter != nil {
+ w.filterBlock.generator = w.filter.NewGenerator()
+ w.filterBlock.flush(0)
+ }
+ return w
+}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/util.go b/vendor/github.com/syndtr/goleveldb/leveldb/util.go
new file mode 100644
index 0000000000..1a5bf71a32
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/util.go
@@ -0,0 +1,91 @@
+// Copyright (c) 2012, Suryandaru Triandana
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package leveldb
+
+import (
+ "fmt"
+ "sort"
+
+ "github.com/syndtr/goleveldb/leveldb/storage"
+)
+
+func shorten(str string) string {
+ if len(str) <= 8 {
+ return str
+ }
+ return str[:3] + ".." + str[len(str)-3:]
+}
+
+var bunits = [...]string{"", "Ki", "Mi", "Gi"}
+
+func shortenb(bytes int) string {
+ i := 0
+ for ; bytes > 1024 && i < 4; i++ {
+ bytes /= 1024
+ }
+ return fmt.Sprintf("%d%sB", bytes, bunits[i])
+}
+
+func sshortenb(bytes int) string {
+ if bytes == 0 {
+ return "~"
+ }
+ sign := "+"
+ if bytes < 0 {
+ sign = "-"
+ bytes *= -1
+ }
+ i := 0
+ for ; bytes > 1024 && i < 4; i++ {
+ bytes /= 1024
+ }
+ return fmt.Sprintf("%s%d%sB", sign, bytes, bunits[i])
+}
+
+func sint(x int) string {
+ if x == 0 {
+ return "~"
+ }
+ sign := "+"
+ if x < 0 {
+ sign = "-"
+ x *= -1
+ }
+ return fmt.Sprintf("%s%d", sign, x)
+}
+
+func minInt(a, b int) int {
+ if a < b {
+ return a
+ }
+ return b
+}
+
+func maxInt(a, b int) int {
+ if a > b {
+ return a
+ }
+ return b
+}
+
+type files []storage.File
+
+func (p files) Len() int {
+ return len(p)
+}
+
+func (p files) Less(i, j int) bool {
+ return p[i].Num() < p[j].Num()
+}
+
+func (p files) Swap(i, j int) {
+ p[i], p[j] = p[j], p[i]
+}
+
+func (p files) sort() {
+ sort.Sort(p)
+}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/util/buffer.go b/vendor/github.com/syndtr/goleveldb/leveldb/util/buffer.go
new file mode 100644
index 0000000000..21de242552
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/util/buffer.go
@@ -0,0 +1,293 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package util
+
+// This a copy of Go std bytes.Buffer with some modification
+// and some features stripped.
+
+import (
+ "bytes"
+ "io"
+)
+
+// A Buffer is a variable-sized buffer of bytes with Read and Write methods.
+// The zero value for Buffer is an empty buffer ready to use.
+type Buffer struct {
+ buf []byte // contents are the bytes buf[off : len(buf)]
+ off int // read at &buf[off], write at &buf[len(buf)]
+ bootstrap [64]byte // memory to hold first slice; helps small buffers (Printf) avoid allocation.
+}
+
+// Bytes returns a slice of the contents of the unread portion of the buffer;
+// len(b.Bytes()) == b.Len(). If the caller changes the contents of the
+// returned slice, the contents of the buffer will change provided there
+// are no intervening method calls on the Buffer.
+func (b *Buffer) Bytes() []byte { return b.buf[b.off:] }
+
+// String returns the contents of the unread portion of the buffer
+// as a string. If the Buffer is a nil pointer, it returns "".
+func (b *Buffer) String() string {
+ if b == nil {
+ // Special case, useful in debugging.
+ return ""
+ }
+ return string(b.buf[b.off:])
+}
+
+// Len returns the number of bytes of the unread portion of the buffer;
+// b.Len() == len(b.Bytes()).
+func (b *Buffer) Len() int { return len(b.buf) - b.off }
+
+// Truncate discards all but the first n unread bytes from the buffer.
+// It panics if n is negative or greater than the length of the buffer.
+func (b *Buffer) Truncate(n int) {
+ switch {
+ case n < 0 || n > b.Len():
+ panic("leveldb/util.Buffer: truncation out of range")
+ case n == 0:
+ // Reuse buffer space.
+ b.off = 0
+ }
+ b.buf = b.buf[0 : b.off+n]
+}
+
+// Reset resets the buffer so it has no content.
+// b.Reset() is the same as b.Truncate(0).
+func (b *Buffer) Reset() { b.Truncate(0) }
+
+// grow grows the buffer to guarantee space for n more bytes.
+// It returns the index where bytes should be written.
+// If the buffer can't grow it will panic with bytes.ErrTooLarge.
+func (b *Buffer) grow(n int) int {
+ m := b.Len()
+ // If buffer is empty, reset to recover space.
+ if m == 0 && b.off != 0 {
+ b.Truncate(0)
+ }
+ if len(b.buf)+n > cap(b.buf) {
+ var buf []byte
+ if b.buf == nil && n <= len(b.bootstrap) {
+ buf = b.bootstrap[0:]
+ } else if m+n <= cap(b.buf)/2 {
+ // We can slide things down instead of allocating a new
+ // slice. We only need m+n <= cap(b.buf) to slide, but
+ // we instead let capacity get twice as large so we
+ // don't spend all our time copying.
+ copy(b.buf[:], b.buf[b.off:])
+ buf = b.buf[:m]
+ } else {
+ // not enough space anywhere
+ buf = makeSlice(2*cap(b.buf) + n)
+ copy(buf, b.buf[b.off:])
+ }
+ b.buf = buf
+ b.off = 0
+ }
+ b.buf = b.buf[0 : b.off+m+n]
+ return b.off + m
+}
+
+// Alloc allocs n bytes of slice from the buffer, growing the buffer as
+// needed. If n is negative, Alloc will panic.
+// If the buffer can't grow it will panic with bytes.ErrTooLarge.
+func (b *Buffer) Alloc(n int) []byte {
+ if n < 0 {
+ panic("leveldb/util.Buffer.Alloc: negative count")
+ }
+ m := b.grow(n)
+ return b.buf[m:]
+}
+
+// Grow grows the buffer's capacity, if necessary, to guarantee space for
+// another n bytes. After Grow(n), at least n bytes can be written to the
+// buffer without another allocation.
+// If n is negative, Grow will panic.
+// If the buffer can't grow it will panic with bytes.ErrTooLarge.
+func (b *Buffer) Grow(n int) {
+ if n < 0 {
+ panic("leveldb/util.Buffer.Grow: negative count")
+ }
+ m := b.grow(n)
+ b.buf = b.buf[0:m]
+}
+
+// Write appends the contents of p to the buffer, growing the buffer as
+// needed. The return value n is the length of p; err is always nil. If the
+// buffer becomes too large, Write will panic with bytes.ErrTooLarge.
+func (b *Buffer) Write(p []byte) (n int, err error) {
+ m := b.grow(len(p))
+ return copy(b.buf[m:], p), nil
+}
+
+// MinRead is the minimum slice size passed to a Read call by
+// Buffer.ReadFrom. As long as the Buffer has at least MinRead bytes beyond
+// what is required to hold the contents of r, ReadFrom will not grow the
+// underlying buffer.
+const MinRead = 512
+
+// ReadFrom reads data from r until EOF and appends it to the buffer, growing
+// the buffer as needed. The return value n is the number of bytes read. Any
+// error except io.EOF encountered during the read is also returned. If the
+// buffer becomes too large, ReadFrom will panic with bytes.ErrTooLarge.
+func (b *Buffer) ReadFrom(r io.Reader) (n int64, err error) {
+ // If buffer is empty, reset to recover space.
+ if b.off >= len(b.buf) {
+ b.Truncate(0)
+ }
+ for {
+ if free := cap(b.buf) - len(b.buf); free < MinRead {
+ // not enough space at end
+ newBuf := b.buf
+ if b.off+free < MinRead {
+ // not enough space using beginning of buffer;
+ // double buffer capacity
+ newBuf = makeSlice(2*cap(b.buf) + MinRead)
+ }
+ copy(newBuf, b.buf[b.off:])
+ b.buf = newBuf[:len(b.buf)-b.off]
+ b.off = 0
+ }
+ m, e := r.Read(b.buf[len(b.buf):cap(b.buf)])
+ b.buf = b.buf[0 : len(b.buf)+m]
+ n += int64(m)
+ if e == io.EOF {
+ break
+ }
+ if e != nil {
+ return n, e
+ }
+ }
+ return n, nil // err is EOF, so return nil explicitly
+}
+
+// makeSlice allocates a slice of size n. If the allocation fails, it panics
+// with bytes.ErrTooLarge.
+func makeSlice(n int) []byte {
+ // If the make fails, give a known error.
+ defer func() {
+ if recover() != nil {
+ panic(bytes.ErrTooLarge)
+ }
+ }()
+ return make([]byte, n)
+}
+
+// WriteTo writes data to w until the buffer is drained or an error occurs.
+// The return value n is the number of bytes written; it always fits into an
+// int, but it is int64 to match the io.WriterTo interface. Any error
+// encountered during the write is also returned.
+func (b *Buffer) WriteTo(w io.Writer) (n int64, err error) {
+ if b.off < len(b.buf) {
+ nBytes := b.Len()
+ m, e := w.Write(b.buf[b.off:])
+ if m > nBytes {
+ panic("leveldb/util.Buffer.WriteTo: invalid Write count")
+ }
+ b.off += m
+ n = int64(m)
+ if e != nil {
+ return n, e
+ }
+ // all bytes should have been written, by definition of
+ // Write method in io.Writer
+ if m != nBytes {
+ return n, io.ErrShortWrite
+ }
+ }
+ // Buffer is now empty; reset.
+ b.Truncate(0)
+ return
+}
+
+// WriteByte appends the byte c to the buffer, growing the buffer as needed.
+// The returned error is always nil, but is included to match bufio.Writer's
+// WriteByte. If the buffer becomes too large, WriteByte will panic with
+// bytes.ErrTooLarge.
+func (b *Buffer) WriteByte(c byte) error {
+ m := b.grow(1)
+ b.buf[m] = c
+ return nil
+}
+
+// Read reads the next len(p) bytes from the buffer or until the buffer
+// is drained. The return value n is the number of bytes read. If the
+// buffer has no data to return, err is io.EOF (unless len(p) is zero);
+// otherwise it is nil.
+func (b *Buffer) Read(p []byte) (n int, err error) {
+ if b.off >= len(b.buf) {
+ // Buffer is empty, reset to recover space.
+ b.Truncate(0)
+ if len(p) == 0 {
+ return
+ }
+ return 0, io.EOF
+ }
+ n = copy(p, b.buf[b.off:])
+ b.off += n
+ return
+}
+
+// Next returns a slice containing the next n bytes from the buffer,
+// advancing the buffer as if the bytes had been returned by Read.
+// If there are fewer than n bytes in the buffer, Next returns the entire buffer.
+// The slice is only valid until the next call to a read or write method.
+func (b *Buffer) Next(n int) []byte {
+ m := b.Len()
+ if n > m {
+ n = m
+ }
+ data := b.buf[b.off : b.off+n]
+ b.off += n
+ return data
+}
+
+// ReadByte reads and returns the next byte from the buffer.
+// If no byte is available, it returns error io.EOF.
+func (b *Buffer) ReadByte() (c byte, err error) {
+ if b.off >= len(b.buf) {
+ // Buffer is empty, reset to recover space.
+ b.Truncate(0)
+ return 0, io.EOF
+ }
+ c = b.buf[b.off]
+ b.off++
+ return c, nil
+}
+
+// ReadBytes reads until the first occurrence of delim in the input,
+// returning a slice containing the data up to and including the delimiter.
+// If ReadBytes encounters an error before finding a delimiter,
+// it returns the data read before the error and the error itself (often io.EOF).
+// ReadBytes returns err != nil if and only if the returned data does not end in
+// delim.
+func (b *Buffer) ReadBytes(delim byte) (line []byte, err error) {
+ slice, err := b.readSlice(delim)
+ // return a copy of slice. The buffer's backing array may
+ // be overwritten by later calls.
+ line = append(line, slice...)
+ return
+}
+
+// readSlice is like ReadBytes but returns a reference to internal buffer data.
+func (b *Buffer) readSlice(delim byte) (line []byte, err error) {
+ i := bytes.IndexByte(b.buf[b.off:], delim)
+ end := b.off + i + 1
+ if i < 0 {
+ end = len(b.buf)
+ err = io.EOF
+ }
+ line = b.buf[b.off:end]
+ b.off = end
+ return line, err
+}
+
+// NewBuffer creates and initializes a new Buffer using buf as its initial
+// contents. It is intended to prepare a Buffer to read existing data. It
+// can also be used to size the internal buffer for writing. To do that,
+// buf should have the desired capacity but a length of zero.
+//
+// In most cases, new(Buffer) (or just declaring a Buffer variable) is
+// sufficient to initialize a Buffer.
+func NewBuffer(buf []byte) *Buffer { return &Buffer{buf: buf} }
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/util/buffer_pool.go b/vendor/github.com/syndtr/goleveldb/leveldb/util/buffer_pool.go
new file mode 100644
index 0000000000..2f3db974a7
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/util/buffer_pool.go
@@ -0,0 +1,239 @@
+// Copyright (c) 2014, Suryandaru Triandana
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package util
+
+import (
+ "fmt"
+ "sync"
+ "sync/atomic"
+ "time"
+)
+
+type buffer struct {
+ b []byte
+ miss int
+}
+
+// BufferPool is a 'buffer pool'.
+type BufferPool struct {
+ pool [6]chan []byte
+ size [5]uint32
+ sizeMiss [5]uint32
+ sizeHalf [5]uint32
+ baseline [4]int
+ baseline0 int
+
+ mu sync.RWMutex
+ closed bool
+ closeC chan struct{}
+
+ get uint32
+ put uint32
+ half uint32
+ less uint32
+ equal uint32
+ greater uint32
+ miss uint32
+}
+
+func (p *BufferPool) poolNum(n int) int {
+ if n <= p.baseline0 && n > p.baseline0/2 {
+ return 0
+ }
+ for i, x := range p.baseline {
+ if n <= x {
+ return i + 1
+ }
+ }
+ return len(p.baseline) + 1
+}
+
+// Get returns buffer with length of n.
+func (p *BufferPool) Get(n int) []byte {
+ if p == nil {
+ return make([]byte, n)
+ }
+
+ p.mu.RLock()
+ defer p.mu.RUnlock()
+
+ if p.closed {
+ return make([]byte, n)
+ }
+
+ atomic.AddUint32(&p.get, 1)
+
+ poolNum := p.poolNum(n)
+ pool := p.pool[poolNum]
+ if poolNum == 0 {
+ // Fast path.
+ select {
+ case b := <-pool:
+ switch {
+ case cap(b) > n:
+ if cap(b)-n >= n {
+ atomic.AddUint32(&p.half, 1)
+ select {
+ case pool <- b:
+ default:
+ }
+ return make([]byte, n)
+ } else {
+ atomic.AddUint32(&p.less, 1)
+ return b[:n]
+ }
+ case cap(b) == n:
+ atomic.AddUint32(&p.equal, 1)
+ return b[:n]
+ default:
+ atomic.AddUint32(&p.greater, 1)
+ }
+ default:
+ atomic.AddUint32(&p.miss, 1)
+ }
+
+ return make([]byte, n, p.baseline0)
+ } else {
+ sizePtr := &p.size[poolNum-1]
+
+ select {
+ case b := <-pool:
+ switch {
+ case cap(b) > n:
+ if cap(b)-n >= n {
+ atomic.AddUint32(&p.half, 1)
+ sizeHalfPtr := &p.sizeHalf[poolNum-1]
+ if atomic.AddUint32(sizeHalfPtr, 1) == 20 {
+ atomic.StoreUint32(sizePtr, uint32(cap(b)/2))
+ atomic.StoreUint32(sizeHalfPtr, 0)
+ } else {
+ select {
+ case pool <- b:
+ default:
+ }
+ }
+ return make([]byte, n)
+ } else {
+ atomic.AddUint32(&p.less, 1)
+ return b[:n]
+ }
+ case cap(b) == n:
+ atomic.AddUint32(&p.equal, 1)
+ return b[:n]
+ default:
+ atomic.AddUint32(&p.greater, 1)
+ if uint32(cap(b)) >= atomic.LoadUint32(sizePtr) {
+ select {
+ case pool <- b:
+ default:
+ }
+ }
+ }
+ default:
+ atomic.AddUint32(&p.miss, 1)
+ }
+
+ if size := atomic.LoadUint32(sizePtr); uint32(n) > size {
+ if size == 0 {
+ atomic.CompareAndSwapUint32(sizePtr, 0, uint32(n))
+ } else {
+ sizeMissPtr := &p.sizeMiss[poolNum-1]
+ if atomic.AddUint32(sizeMissPtr, 1) == 20 {
+ atomic.StoreUint32(sizePtr, uint32(n))
+ atomic.StoreUint32(sizeMissPtr, 0)
+ }
+ }
+ return make([]byte, n)
+ } else {
+ return make([]byte, n, size)
+ }
+ }
+}
+
+// Put adds given buffer to the pool.
+func (p *BufferPool) Put(b []byte) {
+ if p == nil {
+ return
+ }
+
+ p.mu.RLock()
+ defer p.mu.RUnlock()
+
+ if p.closed {
+ return
+ }
+
+ atomic.AddUint32(&p.put, 1)
+
+ pool := p.pool[p.poolNum(cap(b))]
+ select {
+ case pool <- b:
+ default:
+ }
+
+}
+
+func (p *BufferPool) Close() {
+ if p == nil {
+ return
+ }
+
+ p.mu.Lock()
+ if !p.closed {
+ p.closed = true
+ p.closeC <- struct{}{}
+ }
+ p.mu.Unlock()
+}
+
+func (p *BufferPool) String() string {
+ if p == nil {
+ return ""
+ }
+
+ return fmt.Sprintf("BufferPool{B·%d Z·%v Zm·%v Zh·%v G·%d P·%d H·%d <·%d =·%d >·%d M·%d}",
+ p.baseline0, p.size, p.sizeMiss, p.sizeHalf, p.get, p.put, p.half, p.less, p.equal, p.greater, p.miss)
+}
+
+func (p *BufferPool) drain() {
+ ticker := time.NewTicker(2 * time.Second)
+ defer ticker.Stop()
+ for {
+ select {
+ case <-ticker.C:
+ for _, ch := range p.pool {
+ select {
+ case <-ch:
+ default:
+ }
+ }
+ case <-p.closeC:
+ close(p.closeC)
+ for _, ch := range p.pool {
+ close(ch)
+ }
+ return
+ }
+ }
+}
+
+// NewBufferPool creates a new initialized 'buffer pool'.
+func NewBufferPool(baseline int) *BufferPool {
+ if baseline <= 0 {
+ panic("baseline can't be <= 0")
+ }
+ p := &BufferPool{
+ baseline0: baseline,
+ baseline: [...]int{baseline / 4, baseline / 2, baseline * 2, baseline * 4},
+ closeC: make(chan struct{}, 1),
+ }
+ for i, cap := range []int{2, 2, 4, 4, 2, 1} {
+ p.pool[i] = make(chan []byte, cap)
+ }
+ go p.drain()
+ return p
+}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/util/crc32.go b/vendor/github.com/syndtr/goleveldb/leveldb/util/crc32.go
new file mode 100644
index 0000000000..631c9d6109
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/util/crc32.go
@@ -0,0 +1,30 @@
+// Copyright 2011 The LevelDB-Go Authors. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package util
+
+import (
+ "hash/crc32"
+)
+
+var table = crc32.MakeTable(crc32.Castagnoli)
+
+// CRC is a CRC-32 checksum computed using Castagnoli's polynomial.
+type CRC uint32
+
+// NewCRC creates a new crc based on the given bytes.
+func NewCRC(b []byte) CRC {
+ return CRC(0).Update(b)
+}
+
+// Update updates the crc with the given bytes.
+func (c CRC) Update(b []byte) CRC {
+ return CRC(crc32.Update(uint32(c), table, b))
+}
+
+// Value returns a masked crc.
+func (c CRC) Value() uint32 {
+ return uint32(c>>15|c<<17) + 0xa282ead8
+}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/util/hash.go b/vendor/github.com/syndtr/goleveldb/leveldb/util/hash.go
new file mode 100644
index 0000000000..54903660ff
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/util/hash.go
@@ -0,0 +1,48 @@
+// Copyright (c) 2012, Suryandaru Triandana
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package util
+
+import (
+ "bytes"
+ "encoding/binary"
+)
+
+// Hash return hash of the given data.
+func Hash(data []byte, seed uint32) uint32 {
+ // Similar to murmur hash
+ var m uint32 = 0xc6a4a793
+ var r uint32 = 24
+ h := seed ^ (uint32(len(data)) * m)
+
+ buf := bytes.NewBuffer(data)
+ for buf.Len() >= 4 {
+ var w uint32
+ binary.Read(buf, binary.LittleEndian, &w)
+ h += w
+ h *= m
+ h ^= (h >> 16)
+ }
+
+ rest := buf.Bytes()
+ switch len(rest) {
+ default:
+ panic("not reached")
+ case 3:
+ h += uint32(rest[2]) << 16
+ fallthrough
+ case 2:
+ h += uint32(rest[1]) << 8
+ fallthrough
+ case 1:
+ h += uint32(rest[0])
+ h *= m
+ h ^= (h >> r)
+ case 0:
+ }
+
+ return h
+}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/util/pool.go b/vendor/github.com/syndtr/goleveldb/leveldb/util/pool.go
new file mode 100644
index 0000000000..1f7fdd41fe
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/util/pool.go
@@ -0,0 +1,21 @@
+// Copyright (c) 2014, Suryandaru Triandana
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// +build go1.3
+
+package util
+
+import (
+ "sync"
+)
+
+type Pool struct {
+ sync.Pool
+}
+
+func NewPool(cap int) *Pool {
+ return &Pool{}
+}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/util/pool_legacy.go b/vendor/github.com/syndtr/goleveldb/leveldb/util/pool_legacy.go
new file mode 100644
index 0000000000..27b8d03be9
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/util/pool_legacy.go
@@ -0,0 +1,33 @@
+// Copyright (c) 2014, Suryandaru Triandana
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// +build !go1.3
+
+package util
+
+type Pool struct {
+ pool chan interface{}
+}
+
+func (p *Pool) Get() interface{} {
+ select {
+ case x := <-p.pool:
+ return x
+ default:
+ return nil
+ }
+}
+
+func (p *Pool) Put(x interface{}) {
+ select {
+ case p.pool <- x:
+ default:
+ }
+}
+
+func NewPool(cap int) *Pool {
+ return &Pool{pool: make(chan interface{}, cap)}
+}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/util/range.go b/vendor/github.com/syndtr/goleveldb/leveldb/util/range.go
new file mode 100644
index 0000000000..85159583d2
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/util/range.go
@@ -0,0 +1,32 @@
+// Copyright (c) 2014, Suryandaru Triandana
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package util
+
+// Range is a key range.
+type Range struct {
+ // Start of the key range, include in the range.
+ Start []byte
+
+ // Limit of the key range, not include in the range.
+ Limit []byte
+}
+
+// BytesPrefix returns key range that satisfy the given prefix.
+// This only applicable for the standard 'bytes comparer'.
+func BytesPrefix(prefix []byte) *Range {
+ var limit []byte
+ for i := len(prefix) - 1; i >= 0; i-- {
+ c := prefix[i]
+ if c < 0xff {
+ limit = make([]byte, i+1)
+ copy(limit, prefix)
+ limit[i] = c + 1
+ break
+ }
+ }
+ return &Range{prefix, limit}
+}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/util/util.go b/vendor/github.com/syndtr/goleveldb/leveldb/util/util.go
new file mode 100644
index 0000000000..f35976865b
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/util/util.go
@@ -0,0 +1,73 @@
+// Copyright (c) 2013, Suryandaru Triandana
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Package util provides utilities used throughout leveldb.
+package util
+
+import (
+ "errors"
+)
+
+var (
+ ErrReleased = errors.New("leveldb: resource already relesed")
+ ErrHasReleaser = errors.New("leveldb: releaser already defined")
+)
+
+// Releaser is the interface that wraps the basic Release method.
+type Releaser interface {
+ // Release releases associated resources. Release should always success
+ // and can be called multipe times without causing error.
+ Release()
+}
+
+// ReleaseSetter is the interface that wraps the basic SetReleaser method.
+type ReleaseSetter interface {
+ // SetReleaser associates the given releaser to the resources. The
+ // releaser will be called once coresponding resources released.
+ // Calling SetReleaser with nil will clear the releaser.
+ //
+ // This will panic if a releaser already present or coresponding
+ // resource is already released. Releaser should be cleared first
+ // before assigned a new one.
+ SetReleaser(releaser Releaser)
+}
+
+// BasicReleaser provides basic implementation of Releaser and ReleaseSetter.
+type BasicReleaser struct {
+ releaser Releaser
+ released bool
+}
+
+// Released returns whether Release method already called.
+func (r *BasicReleaser) Released() bool {
+ return r.released
+}
+
+// Release implements Releaser.Release.
+func (r *BasicReleaser) Release() {
+ if !r.released {
+ if r.releaser != nil {
+ r.releaser.Release()
+ r.releaser = nil
+ }
+ r.released = true
+ }
+}
+
+// SetReleaser implements ReleaseSetter.SetReleaser.
+func (r *BasicReleaser) SetReleaser(releaser Releaser) {
+ if r.released {
+ panic(ErrReleased)
+ }
+ if r.releaser != nil && releaser != nil {
+ panic(ErrHasReleaser)
+ }
+ r.releaser = releaser
+}
+
+type NoopReleaser struct{}
+
+func (NoopReleaser) Release() {}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/version.go b/vendor/github.com/syndtr/goleveldb/leveldb/version.go
new file mode 100644
index 0000000000..011d982da8
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/version.go
@@ -0,0 +1,457 @@
+// Copyright (c) 2012, Suryandaru Triandana
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package leveldb
+
+import (
+ "sync/atomic"
+ "unsafe"
+
+ "github.com/syndtr/goleveldb/leveldb/iterator"
+ "github.com/syndtr/goleveldb/leveldb/opt"
+ "github.com/syndtr/goleveldb/leveldb/util"
+)
+
+type tSet struct {
+ level int
+ table *tFile
+}
+
+type version struct {
+ s *session
+
+ tables []tFiles
+
+ // Level that should be compacted next and its compaction score.
+ // Score < 1 means compaction is not strictly needed. These fields
+ // are initialized by computeCompaction()
+ cLevel int
+ cScore float64
+
+ cSeek unsafe.Pointer
+
+ ref int
+ // Succeeding version.
+ next *version
+}
+
+func newVersion(s *session) *version {
+ return &version{s: s, tables: make([]tFiles, s.o.GetNumLevel())}
+}
+
+func (v *version) releaseNB() {
+ v.ref--
+ if v.ref > 0 {
+ return
+ }
+ if v.ref < 0 {
+ panic("negative version ref")
+ }
+
+ tables := make(map[uint64]bool)
+ for _, tt := range v.next.tables {
+ for _, t := range tt {
+ num := t.file.Num()
+ tables[num] = true
+ }
+ }
+
+ for _, tt := range v.tables {
+ for _, t := range tt {
+ num := t.file.Num()
+ if _, ok := tables[num]; !ok {
+ v.s.tops.remove(t)
+ }
+ }
+ }
+
+ v.next.releaseNB()
+ v.next = nil
+}
+
+func (v *version) release() {
+ v.s.vmu.Lock()
+ v.releaseNB()
+ v.s.vmu.Unlock()
+}
+
+func (v *version) walkOverlapping(ikey iKey, f func(level int, t *tFile) bool, lf func(level int) bool) {
+ ukey := ikey.ukey()
+
+ // Walk tables level-by-level.
+ for level, tables := range v.tables {
+ if len(tables) == 0 {
+ continue
+ }
+
+ if level == 0 {
+ // Level-0 files may overlap each other. Find all files that
+ // overlap ukey.
+ for _, t := range tables {
+ if t.overlaps(v.s.icmp, ukey, ukey) {
+ if !f(level, t) {
+ return
+ }
+ }
+ }
+ } else {
+ if i := tables.searchMax(v.s.icmp, ikey); i < len(tables) {
+ t := tables[i]
+ if v.s.icmp.uCompare(ukey, t.imin.ukey()) >= 0 {
+ if !f(level, t) {
+ return
+ }
+ }
+ }
+ }
+
+ if lf != nil && !lf(level) {
+ return
+ }
+ }
+}
+
+func (v *version) get(ikey iKey, ro *opt.ReadOptions, noValue bool) (value []byte, tcomp bool, err error) {
+ ukey := ikey.ukey()
+
+ var (
+ tset *tSet
+ tseek bool
+
+ // Level-0.
+ zfound bool
+ zseq uint64
+ zkt kType
+ zval []byte
+ )
+
+ err = ErrNotFound
+
+ // Since entries never hope across level, finding key/value
+ // in smaller level make later levels irrelevant.
+ v.walkOverlapping(ikey, func(level int, t *tFile) bool {
+ if !tseek {
+ if tset == nil {
+ tset = &tSet{level, t}
+ } else {
+ tseek = true
+ }
+ }
+
+ var (
+ fikey, fval []byte
+ ferr error
+ )
+ if noValue {
+ fikey, ferr = v.s.tops.findKey(t, ikey, ro)
+ } else {
+ fikey, fval, ferr = v.s.tops.find(t, ikey, ro)
+ }
+ switch ferr {
+ case nil:
+ case ErrNotFound:
+ return true
+ default:
+ err = ferr
+ return false
+ }
+
+ if fukey, fseq, fkt, fkerr := parseIkey(fikey); fkerr == nil {
+ if v.s.icmp.uCompare(ukey, fukey) == 0 {
+ if level == 0 {
+ if fseq >= zseq {
+ zfound = true
+ zseq = fseq
+ zkt = fkt
+ zval = fval
+ }
+ } else {
+ switch fkt {
+ case ktVal:
+ value = fval
+ err = nil
+ case ktDel:
+ default:
+ panic("leveldb: invalid iKey type")
+ }
+ return false
+ }
+ }
+ } else {
+ err = fkerr
+ return false
+ }
+
+ return true
+ }, func(level int) bool {
+ if zfound {
+ switch zkt {
+ case ktVal:
+ value = zval
+ err = nil
+ case ktDel:
+ default:
+ panic("leveldb: invalid iKey type")
+ }
+ return false
+ }
+
+ return true
+ })
+
+ if tseek && tset.table.consumeSeek() <= 0 {
+ tcomp = atomic.CompareAndSwapPointer(&v.cSeek, nil, unsafe.Pointer(tset))
+ }
+
+ return
+}
+
+func (v *version) sampleSeek(ikey iKey) (tcomp bool) {
+ var tset *tSet
+
+ v.walkOverlapping(ikey, func(level int, t *tFile) bool {
+ if tset == nil {
+ tset = &tSet{level, t}
+ return true
+ } else {
+ if tset.table.consumeSeek() <= 0 {
+ tcomp = atomic.CompareAndSwapPointer(&v.cSeek, nil, unsafe.Pointer(tset))
+ }
+ return false
+ }
+ }, nil)
+
+ return
+}
+
+func (v *version) getIterators(slice *util.Range, ro *opt.ReadOptions) (its []iterator.Iterator) {
+ // Merge all level zero files together since they may overlap
+ for _, t := range v.tables[0] {
+ it := v.s.tops.newIterator(t, slice, ro)
+ its = append(its, it)
+ }
+
+ strict := opt.GetStrict(v.s.o.Options, ro, opt.StrictReader)
+ for _, tables := range v.tables[1:] {
+ if len(tables) == 0 {
+ continue
+ }
+
+ it := iterator.NewIndexedIterator(tables.newIndexIterator(v.s.tops, v.s.icmp, slice, ro), strict)
+ its = append(its, it)
+ }
+
+ return
+}
+
+func (v *version) newStaging() *versionStaging {
+ return &versionStaging{base: v, tables: make([]tablesScratch, v.s.o.GetNumLevel())}
+}
+
+// Spawn a new version based on this version.
+func (v *version) spawn(r *sessionRecord) *version {
+ staging := v.newStaging()
+ staging.commit(r)
+ return staging.finish()
+}
+
+func (v *version) fillRecord(r *sessionRecord) {
+ for level, ts := range v.tables {
+ for _, t := range ts {
+ r.addTableFile(level, t)
+ }
+ }
+}
+
+func (v *version) tLen(level int) int {
+ return len(v.tables[level])
+}
+
+func (v *version) offsetOf(ikey iKey) (n uint64, err error) {
+ for level, tables := range v.tables {
+ for _, t := range tables {
+ if v.s.icmp.Compare(t.imax, ikey) <= 0 {
+ // Entire file is before "ikey", so just add the file size
+ n += t.size
+ } else if v.s.icmp.Compare(t.imin, ikey) > 0 {
+ // Entire file is after "ikey", so ignore
+ if level > 0 {
+ // Files other than level 0 are sorted by meta->min, so
+ // no further files in this level will contain data for
+ // "ikey".
+ break
+ }
+ } else {
+ // "ikey" falls in the range for this table. Add the
+ // approximate offset of "ikey" within the table.
+ var nn uint64
+ nn, err = v.s.tops.offsetOf(t, ikey)
+ if err != nil {
+ return 0, err
+ }
+ n += nn
+ }
+ }
+ }
+
+ return
+}
+
+func (v *version) pickMemdbLevel(umin, umax []byte) (level int) {
+ if !v.tables[0].overlaps(v.s.icmp, umin, umax, true) {
+ var overlaps tFiles
+ maxLevel := v.s.o.GetMaxMemCompationLevel()
+ for ; level < maxLevel; level++ {
+ if v.tables[level+1].overlaps(v.s.icmp, umin, umax, false) {
+ break
+ }
+ overlaps = v.tables[level+2].getOverlaps(overlaps, v.s.icmp, umin, umax, false)
+ if overlaps.size() > uint64(v.s.o.GetCompactionGPOverlaps(level)) {
+ break
+ }
+ }
+ }
+
+ return
+}
+
+func (v *version) computeCompaction() {
+ // Precomputed best level for next compaction
+ var bestLevel int = -1
+ var bestScore float64 = -1
+
+ for level, tables := range v.tables {
+ var score float64
+ if level == 0 {
+ // We treat level-0 specially by bounding the number of files
+ // instead of number of bytes for two reasons:
+ //
+ // (1) With larger write-buffer sizes, it is nice not to do too
+ // many level-0 compactions.
+ //
+ // (2) The files in level-0 are merged on every read and
+ // therefore we wish to avoid too many files when the individual
+ // file size is small (perhaps because of a small write-buffer
+ // setting, or very high compression ratios, or lots of
+ // overwrites/deletions).
+ score = float64(len(tables)) / float64(v.s.o.GetCompactionL0Trigger())
+ } else {
+ score = float64(tables.size()) / float64(v.s.o.GetCompactionTotalSize(level))
+ }
+
+ if score > bestScore {
+ bestLevel = level
+ bestScore = score
+ }
+ }
+
+ v.cLevel = bestLevel
+ v.cScore = bestScore
+}
+
+func (v *version) needCompaction() bool {
+ return v.cScore >= 1 || atomic.LoadPointer(&v.cSeek) != nil
+}
+
+type tablesScratch struct {
+ added map[uint64]atRecord
+ deleted map[uint64]struct{}
+}
+
+type versionStaging struct {
+ base *version
+ tables []tablesScratch
+}
+
+func (p *versionStaging) commit(r *sessionRecord) {
+ // Deleted tables.
+ for _, r := range r.deletedTables {
+ tm := &(p.tables[r.level])
+
+ if len(p.base.tables[r.level]) > 0 {
+ if tm.deleted == nil {
+ tm.deleted = make(map[uint64]struct{})
+ }
+ tm.deleted[r.num] = struct{}{}
+ }
+
+ if tm.added != nil {
+ delete(tm.added, r.num)
+ }
+ }
+
+ // New tables.
+ for _, r := range r.addedTables {
+ tm := &(p.tables[r.level])
+
+ if tm.added == nil {
+ tm.added = make(map[uint64]atRecord)
+ }
+ tm.added[r.num] = r
+
+ if tm.deleted != nil {
+ delete(tm.deleted, r.num)
+ }
+ }
+}
+
+func (p *versionStaging) finish() *version {
+ // Build new version.
+ nv := newVersion(p.base.s)
+ for level, tm := range p.tables {
+ btables := p.base.tables[level]
+
+ n := len(btables) + len(tm.added) - len(tm.deleted)
+ if n < 0 {
+ n = 0
+ }
+ nt := make(tFiles, 0, n)
+
+ // Base tables.
+ for _, t := range btables {
+ if _, ok := tm.deleted[t.file.Num()]; ok {
+ continue
+ }
+ if _, ok := tm.added[t.file.Num()]; ok {
+ continue
+ }
+ nt = append(nt, t)
+ }
+
+ // New tables.
+ for _, r := range tm.added {
+ nt = append(nt, p.base.s.tableFileFromRecord(r))
+ }
+
+ // Sort tables.
+ if level == 0 {
+ nt.sortByNum()
+ } else {
+ nt.sortByKey(p.base.s.icmp)
+ }
+ nv.tables[level] = nt
+ }
+
+ // Compute compaction score for new version.
+ nv.computeCompaction()
+
+ return nv
+}
+
+type versionReleaser struct {
+ v *version
+ once bool
+}
+
+func (vr *versionReleaser) Release() {
+ v := vr.v
+ v.s.vmu.Lock()
+ if !vr.once {
+ v.releaseNB()
+ vr.once = true
+ }
+ v.s.vmu.Unlock()
+}
diff --git a/vendor/golang.org/x/net/context/context.go b/vendor/golang.org/x/net/context/context.go
new file mode 100644
index 0000000000..e7ee376c47
--- /dev/null
+++ b/vendor/golang.org/x/net/context/context.go
@@ -0,0 +1,447 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package context defines the Context type, which carries deadlines,
+// cancelation signals, and other request-scoped values across API boundaries
+// and between processes.
+//
+// Incoming requests to a server should create a Context, and outgoing calls to
+// servers should accept a Context. The chain of function calls between must
+// propagate the Context, optionally replacing it with a modified copy created
+// using WithDeadline, WithTimeout, WithCancel, or WithValue.
+//
+// Programs that use Contexts should follow these rules to keep interfaces
+// consistent across packages and enable static analysis tools to check context
+// propagation:
+//
+// Do not store Contexts inside a struct type; instead, pass a Context
+// explicitly to each function that needs it. The Context should be the first
+// parameter, typically named ctx:
+//
+// func DoSomething(ctx context.Context, arg Arg) error {
+// // ... use ctx ...
+// }
+//
+// Do not pass a nil Context, even if a function permits it. Pass context.TODO
+// if you are unsure about which Context to use.
+//
+// Use context Values only for request-scoped data that transits processes and
+// APIs, not for passing optional parameters to functions.
+//
+// The same Context may be passed to functions running in different goroutines;
+// Contexts are safe for simultaneous use by multiple goroutines.
+//
+// See http://blog.golang.org/context for example code for a server that uses
+// Contexts.
+package context // import "golang.org/x/net/context"
+
+import (
+ "errors"
+ "fmt"
+ "sync"
+ "time"
+)
+
+// A Context carries a deadline, a cancelation signal, and other values across
+// API boundaries.
+//
+// Context's methods may be called by multiple goroutines simultaneously.
+type Context interface {
+ // Deadline returns the time when work done on behalf of this context
+ // should be canceled. Deadline returns ok==false when no deadline is
+ // set. Successive calls to Deadline return the same results.
+ Deadline() (deadline time.Time, ok bool)
+
+ // Done returns a channel that's closed when work done on behalf of this
+ // context should be canceled. Done may return nil if this context can
+ // never be canceled. Successive calls to Done return the same value.
+ //
+ // WithCancel arranges for Done to be closed when cancel is called;
+ // WithDeadline arranges for Done to be closed when the deadline
+ // expires; WithTimeout arranges for Done to be closed when the timeout
+ // elapses.
+ //
+ // Done is provided for use in select statements:
+ //
+ // // Stream generates values with DoSomething and sends them to out
+ // // until DoSomething returns an error or ctx.Done is closed.
+ // func Stream(ctx context.Context, out <-chan Value) error {
+ // for {
+ // v, err := DoSomething(ctx)
+ // if err != nil {
+ // return err
+ // }
+ // select {
+ // case <-ctx.Done():
+ // return ctx.Err()
+ // case out <- v:
+ // }
+ // }
+ // }
+ //
+ // See http://blog.golang.org/pipelines for more examples of how to use
+ // a Done channel for cancelation.
+ Done() <-chan struct{}
+
+ // Err returns a non-nil error value after Done is closed. Err returns
+ // Canceled if the context was canceled or DeadlineExceeded if the
+ // context's deadline passed. No other values for Err are defined.
+ // After Done is closed, successive calls to Err return the same value.
+ Err() error
+
+ // Value returns the value associated with this context for key, or nil
+ // if no value is associated with key. Successive calls to Value with
+ // the same key returns the same result.
+ //
+ // Use context values only for request-scoped data that transits
+ // processes and API boundaries, not for passing optional parameters to
+ // functions.
+ //
+ // A key identifies a specific value in a Context. Functions that wish
+ // to store values in Context typically allocate a key in a global
+ // variable then use that key as the argument to context.WithValue and
+ // Context.Value. A key can be any type that supports equality;
+ // packages should define keys as an unexported type to avoid
+ // collisions.
+ //
+ // Packages that define a Context key should provide type-safe accessors
+ // for the values stores using that key:
+ //
+ // // Package user defines a User type that's stored in Contexts.
+ // package user
+ //
+ // import "golang.org/x/net/context"
+ //
+ // // User is the type of value stored in the Contexts.
+ // type User struct {...}
+ //
+ // // key is an unexported type for keys defined in this package.
+ // // This prevents collisions with keys defined in other packages.
+ // type key int
+ //
+ // // userKey is the key for user.User values in Contexts. It is
+ // // unexported; clients use user.NewContext and user.FromContext
+ // // instead of using this key directly.
+ // var userKey key = 0
+ //
+ // // NewContext returns a new Context that carries value u.
+ // func NewContext(ctx context.Context, u *User) context.Context {
+ // return context.WithValue(ctx, userKey, u)
+ // }
+ //
+ // // FromContext returns the User value stored in ctx, if any.
+ // func FromContext(ctx context.Context) (*User, bool) {
+ // u, ok := ctx.Value(userKey).(*User)
+ // return u, ok
+ // }
+ Value(key interface{}) interface{}
+}
+
+// Canceled is the error returned by Context.Err when the context is canceled.
+var Canceled = errors.New("context canceled")
+
+// DeadlineExceeded is the error returned by Context.Err when the context's
+// deadline passes.
+var DeadlineExceeded = errors.New("context deadline exceeded")
+
+// An emptyCtx is never canceled, has no values, and has no deadline. It is not
+// struct{}, since vars of this type must have distinct addresses.
+type emptyCtx int
+
+func (*emptyCtx) Deadline() (deadline time.Time, ok bool) {
+ return
+}
+
+func (*emptyCtx) Done() <-chan struct{} {
+ return nil
+}
+
+func (*emptyCtx) Err() error {
+ return nil
+}
+
+func (*emptyCtx) Value(key interface{}) interface{} {
+ return nil
+}
+
+func (e *emptyCtx) String() string {
+ switch e {
+ case background:
+ return "context.Background"
+ case todo:
+ return "context.TODO"
+ }
+ return "unknown empty Context"
+}
+
+var (
+ background = new(emptyCtx)
+ todo = new(emptyCtx)
+)
+
+// Background returns a non-nil, empty Context. It is never canceled, has no
+// values, and has no deadline. It is typically used by the main function,
+// initialization, and tests, and as the top-level Context for incoming
+// requests.
+func Background() Context {
+ return background
+}
+
+// TODO returns a non-nil, empty Context. Code should use context.TODO when
+// it's unclear which Context to use or it's is not yet available (because the
+// surrounding function has not yet been extended to accept a Context
+// parameter). TODO is recognized by static analysis tools that determine
+// whether Contexts are propagated correctly in a program.
+func TODO() Context {
+ return todo
+}
+
+// A CancelFunc tells an operation to abandon its work.
+// A CancelFunc does not wait for the work to stop.
+// After the first call, subsequent calls to a CancelFunc do nothing.
+type CancelFunc func()
+
+// WithCancel returns a copy of parent with a new Done channel. The returned
+// context's Done channel is closed when the returned cancel function is called
+// or when the parent context's Done channel is closed, whichever happens first.
+//
+// Canceling this context releases resources associated with it, so code should
+// call cancel as soon as the operations running in this Context complete.
+func WithCancel(parent Context) (ctx Context, cancel CancelFunc) {
+ c := newCancelCtx(parent)
+ propagateCancel(parent, &c)
+ return &c, func() { c.cancel(true, Canceled) }
+}
+
+// newCancelCtx returns an initialized cancelCtx.
+func newCancelCtx(parent Context) cancelCtx {
+ return cancelCtx{
+ Context: parent,
+ done: make(chan struct{}),
+ }
+}
+
+// propagateCancel arranges for child to be canceled when parent is.
+func propagateCancel(parent Context, child canceler) {
+ if parent.Done() == nil {
+ return // parent is never canceled
+ }
+ if p, ok := parentCancelCtx(parent); ok {
+ p.mu.Lock()
+ if p.err != nil {
+ // parent has already been canceled
+ child.cancel(false, p.err)
+ } else {
+ if p.children == nil {
+ p.children = make(map[canceler]bool)
+ }
+ p.children[child] = true
+ }
+ p.mu.Unlock()
+ } else {
+ go func() {
+ select {
+ case <-parent.Done():
+ child.cancel(false, parent.Err())
+ case <-child.Done():
+ }
+ }()
+ }
+}
+
+// parentCancelCtx follows a chain of parent references until it finds a
+// *cancelCtx. This function understands how each of the concrete types in this
+// package represents its parent.
+func parentCancelCtx(parent Context) (*cancelCtx, bool) {
+ for {
+ switch c := parent.(type) {
+ case *cancelCtx:
+ return c, true
+ case *timerCtx:
+ return &c.cancelCtx, true
+ case *valueCtx:
+ parent = c.Context
+ default:
+ return nil, false
+ }
+ }
+}
+
+// removeChild removes a context from its parent.
+func removeChild(parent Context, child canceler) {
+ p, ok := parentCancelCtx(parent)
+ if !ok {
+ return
+ }
+ p.mu.Lock()
+ if p.children != nil {
+ delete(p.children, child)
+ }
+ p.mu.Unlock()
+}
+
+// A canceler is a context type that can be canceled directly. The
+// implementations are *cancelCtx and *timerCtx.
+type canceler interface {
+ cancel(removeFromParent bool, err error)
+ Done() <-chan struct{}
+}
+
+// A cancelCtx can be canceled. When canceled, it also cancels any children
+// that implement canceler.
+type cancelCtx struct {
+ Context
+
+ done chan struct{} // closed by the first cancel call.
+
+ mu sync.Mutex
+ children map[canceler]bool // set to nil by the first cancel call
+ err error // set to non-nil by the first cancel call
+}
+
+func (c *cancelCtx) Done() <-chan struct{} {
+ return c.done
+}
+
+func (c *cancelCtx) Err() error {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+ return c.err
+}
+
+func (c *cancelCtx) String() string {
+ return fmt.Sprintf("%v.WithCancel", c.Context)
+}
+
+// cancel closes c.done, cancels each of c's children, and, if
+// removeFromParent is true, removes c from its parent's children.
+func (c *cancelCtx) cancel(removeFromParent bool, err error) {
+ if err == nil {
+ panic("context: internal error: missing cancel error")
+ }
+ c.mu.Lock()
+ if c.err != nil {
+ c.mu.Unlock()
+ return // already canceled
+ }
+ c.err = err
+ close(c.done)
+ for child := range c.children {
+ // NOTE: acquiring the child's lock while holding parent's lock.
+ child.cancel(false, err)
+ }
+ c.children = nil
+ c.mu.Unlock()
+
+ if removeFromParent {
+ removeChild(c.Context, c)
+ }
+}
+
+// WithDeadline returns a copy of the parent context with the deadline adjusted
+// to be no later than d. If the parent's deadline is already earlier than d,
+// WithDeadline(parent, d) is semantically equivalent to parent. The returned
+// context's Done channel is closed when the deadline expires, when the returned
+// cancel function is called, or when the parent context's Done channel is
+// closed, whichever happens first.
+//
+// Canceling this context releases resources associated with it, so code should
+// call cancel as soon as the operations running in this Context complete.
+func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) {
+ if cur, ok := parent.Deadline(); ok && cur.Before(deadline) {
+ // The current deadline is already sooner than the new one.
+ return WithCancel(parent)
+ }
+ c := &timerCtx{
+ cancelCtx: newCancelCtx(parent),
+ deadline: deadline,
+ }
+ propagateCancel(parent, c)
+ d := deadline.Sub(time.Now())
+ if d <= 0 {
+ c.cancel(true, DeadlineExceeded) // deadline has already passed
+ return c, func() { c.cancel(true, Canceled) }
+ }
+ c.mu.Lock()
+ defer c.mu.Unlock()
+ if c.err == nil {
+ c.timer = time.AfterFunc(d, func() {
+ c.cancel(true, DeadlineExceeded)
+ })
+ }
+ return c, func() { c.cancel(true, Canceled) }
+}
+
+// A timerCtx carries a timer and a deadline. It embeds a cancelCtx to
+// implement Done and Err. It implements cancel by stopping its timer then
+// delegating to cancelCtx.cancel.
+type timerCtx struct {
+ cancelCtx
+ timer *time.Timer // Under cancelCtx.mu.
+
+ deadline time.Time
+}
+
+func (c *timerCtx) Deadline() (deadline time.Time, ok bool) {
+ return c.deadline, true
+}
+
+func (c *timerCtx) String() string {
+ return fmt.Sprintf("%v.WithDeadline(%s [%s])", c.cancelCtx.Context, c.deadline, c.deadline.Sub(time.Now()))
+}
+
+func (c *timerCtx) cancel(removeFromParent bool, err error) {
+ c.cancelCtx.cancel(false, err)
+ if removeFromParent {
+ // Remove this timerCtx from its parent cancelCtx's children.
+ removeChild(c.cancelCtx.Context, c)
+ }
+ c.mu.Lock()
+ if c.timer != nil {
+ c.timer.Stop()
+ c.timer = nil
+ }
+ c.mu.Unlock()
+}
+
+// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)).
+//
+// Canceling this context releases resources associated with it, so code should
+// call cancel as soon as the operations running in this Context complete:
+//
+// func slowOperationWithTimeout(ctx context.Context) (Result, error) {
+// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond)
+// defer cancel() // releases resources if slowOperation completes before timeout elapses
+// return slowOperation(ctx)
+// }
+func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) {
+ return WithDeadline(parent, time.Now().Add(timeout))
+}
+
+// WithValue returns a copy of parent in which the value associated with key is
+// val.
+//
+// Use context Values only for request-scoped data that transits processes and
+// APIs, not for passing optional parameters to functions.
+func WithValue(parent Context, key interface{}, val interface{}) Context {
+ return &valueCtx{parent, key, val}
+}
+
+// A valueCtx carries a key-value pair. It implements Value for that key and
+// delegates all other calls to the embedded Context.
+type valueCtx struct {
+ Context
+ key, val interface{}
+}
+
+func (c *valueCtx) String() string {
+ return fmt.Sprintf("%v.WithValue(%#v, %#v)", c.Context, c.key, c.val)
+}
+
+func (c *valueCtx) Value(key interface{}) interface{} {
+ if c.key == key {
+ return c.val
+ }
+ return c.Context.Value(key)
+}
diff --git a/vendor/gopkg.in/fsnotify.v1/AUTHORS b/vendor/gopkg.in/fsnotify.v1/AUTHORS
new file mode 100644
index 0000000000..4e0e8284e9
--- /dev/null
+++ b/vendor/gopkg.in/fsnotify.v1/AUTHORS
@@ -0,0 +1,34 @@
+# Names should be added to this file as
+# Name or Organization
+# The email address is not required for organizations.
+
+# You can update this list using the following command:
+#
+# $ git shortlog -se | awk '{print $2 " " $3 " " $4}'
+
+# Please keep the list sorted.
+
+Adrien Bustany
+Caleb Spare
+Case Nelson
+Chris Howey
+Christoffer Buchholz
+Dave Cheney
+Francisco Souza
+Hari haran
+John C Barstow
+Kelvin Fo
+Matt Layher
+Nathan Youngman
+Paul Hammond
+Pieter Droogendijk
+Pursuit92
+Rob Figueiredo
+Soge Zhang
+Tilak Sharma
+Travis Cline
+Tudor Golubenco
+Yukang
+bronze1man
+debrando
+henrikedwards
diff --git a/vendor/gopkg.in/fsnotify.v1/CHANGELOG.md b/vendor/gopkg.in/fsnotify.v1/CHANGELOG.md
new file mode 100644
index 0000000000..ea9428a2a4
--- /dev/null
+++ b/vendor/gopkg.in/fsnotify.v1/CHANGELOG.md
@@ -0,0 +1,263 @@
+# Changelog
+
+## v1.2.0 / 2015-02-08
+
+* inotify: use epoll to wake up readEvents [#66](https://github.com/go-fsnotify/fsnotify/pull/66) (thanks @PieterD)
+* inotify: closing watcher should now always shut down goroutine [#63](https://github.com/go-fsnotify/fsnotify/pull/63) (thanks @PieterD)
+* kqueue: close kqueue after removing watches, fixes [#59](https://github.com/go-fsnotify/fsnotify/issues/59)
+
+## v1.1.1 / 2015-02-05
+
+* inotify: Retry read on EINTR [#61](https://github.com/go-fsnotify/fsnotify/issues/61) (thanks @PieterD)
+
+## v1.1.0 / 2014-12-12
+
+* kqueue: rework internals [#43](https://github.com/go-fsnotify/fsnotify/pull/43)
+ * add low-level functions
+ * only need to store flags on directories
+ * less mutexes [#13](https://github.com/go-fsnotify/fsnotify/issues/13)
+ * done can be an unbuffered channel
+ * remove calls to os.NewSyscallError
+* More efficient string concatenation for Event.String() [#52](https://github.com/go-fsnotify/fsnotify/pull/52) (thanks @mdlayher)
+* kqueue: fix regression in rework causing subdirectories to be watched [#48](https://github.com/go-fsnotify/fsnotify/issues/48)
+* kqueue: cleanup internal watch before sending remove event [#51](https://github.com/go-fsnotify/fsnotify/issues/51)
+
+## v1.0.4 / 2014-09-07
+
+* kqueue: add dragonfly to the build tags.
+* Rename source code files, rearrange code so exported APIs are at the top.
+* Add done channel to example code. [#37](https://github.com/go-fsnotify/fsnotify/pull/37) (thanks @chenyukang)
+
+## v1.0.3 / 2014-08-19
+
+* [Fix] Windows MOVED_TO now translates to Create like on BSD and Linux. [#36](https://github.com/go-fsnotify/fsnotify/issues/36)
+
+## v1.0.2 / 2014-08-17
+
+* [Fix] Missing create events on OS X. [#14](https://github.com/go-fsnotify/fsnotify/issues/14) (thanks @zhsso)
+* [Fix] Make ./path and path equivalent. (thanks @zhsso)
+
+## v1.0.0 / 2014-08-15
+
+* [API] Remove AddWatch on Windows, use Add.
+* Improve documentation for exported identifiers. [#30](https://github.com/go-fsnotify/fsnotify/issues/30)
+* Minor updates based on feedback from golint.
+
+## dev / 2014-07-09
+
+* Moved to [github.com/go-fsnotify/fsnotify](https://github.com/go-fsnotify/fsnotify).
+* Use os.NewSyscallError instead of returning errno (thanks @hariharan-uno)
+
+## dev / 2014-07-04
+
+* kqueue: fix incorrect mutex used in Close()
+* Update example to demonstrate usage of Op.
+
+## dev / 2014-06-28
+
+* [API] Don't set the Write Op for attribute notifications [#4](https://github.com/go-fsnotify/fsnotify/issues/4)
+* Fix for String() method on Event (thanks Alex Brainman)
+* Don't build on Plan 9 or Solaris (thanks @4ad)
+
+## dev / 2014-06-21
+
+* Events channel of type Event rather than *Event.
+* [internal] use syscall constants directly for inotify and kqueue.
+* [internal] kqueue: rename events to kevents and fileEvent to event.
+
+## dev / 2014-06-19
+
+* Go 1.3+ required on Windows (uses syscall.ERROR_MORE_DATA internally).
+* [internal] remove cookie from Event struct (unused).
+* [internal] Event struct has the same definition across every OS.
+* [internal] remove internal watch and removeWatch methods.
+
+## dev / 2014-06-12
+
+* [API] Renamed Watch() to Add() and RemoveWatch() to Remove().
+* [API] Pluralized channel names: Events and Errors.
+* [API] Renamed FileEvent struct to Event.
+* [API] Op constants replace methods like IsCreate().
+
+## dev / 2014-06-12
+
+* Fix data race on kevent buffer (thanks @tilaks) [#98](https://github.com/howeyc/fsnotify/pull/98)
+
+## dev / 2014-05-23
+
+* [API] Remove current implementation of WatchFlags.
+ * current implementation doesn't take advantage of OS for efficiency
+ * provides little benefit over filtering events as they are received, but has extra bookkeeping and mutexes
+ * no tests for the current implementation
+ * not fully implemented on Windows [#93](https://github.com/howeyc/fsnotify/issues/93#issuecomment-39285195)
+
+## v0.9.3 / 2014-12-31
+
+* kqueue: cleanup internal watch before sending remove event [#51](https://github.com/go-fsnotify/fsnotify/issues/51)
+
+## v0.9.2 / 2014-08-17
+
+* [Backport] Fix missing create events on OS X. [#14](https://github.com/go-fsnotify/fsnotify/issues/14) (thanks @zhsso)
+
+## v0.9.1 / 2014-06-12
+
+* Fix data race on kevent buffer (thanks @tilaks) [#98](https://github.com/howeyc/fsnotify/pull/98)
+
+## v0.9.0 / 2014-01-17
+
+* IsAttrib() for events that only concern a file's metadata [#79][] (thanks @abustany)
+* [Fix] kqueue: fix deadlock [#77][] (thanks @cespare)
+* [NOTICE] Development has moved to `code.google.com/p/go.exp/fsnotify` in preparation for inclusion in the Go standard library.
+
+## v0.8.12 / 2013-11-13
+
+* [API] Remove FD_SET and friends from Linux adapter
+
+## v0.8.11 / 2013-11-02
+
+* [Doc] Add Changelog [#72][] (thanks @nathany)
+* [Doc] Spotlight and double modify events on OS X [#62][] (reported by @paulhammond)
+
+## v0.8.10 / 2013-10-19
+
+* [Fix] kqueue: remove file watches when parent directory is removed [#71][] (reported by @mdwhatcott)
+* [Fix] kqueue: race between Close and readEvents [#70][] (reported by @bernerdschaefer)
+* [Doc] specify OS-specific limits in README (thanks @debrando)
+
+## v0.8.9 / 2013-09-08
+
+* [Doc] Contributing (thanks @nathany)
+* [Doc] update package path in example code [#63][] (thanks @paulhammond)
+* [Doc] GoCI badge in README (Linux only) [#60][]
+* [Doc] Cross-platform testing with Vagrant [#59][] (thanks @nathany)
+
+## v0.8.8 / 2013-06-17
+
+* [Fix] Windows: handle `ERROR_MORE_DATA` on Windows [#49][] (thanks @jbowtie)
+
+## v0.8.7 / 2013-06-03
+
+* [API] Make syscall flags internal
+* [Fix] inotify: ignore event changes
+* [Fix] race in symlink test [#45][] (reported by @srid)
+* [Fix] tests on Windows
+* lower case error messages
+
+## v0.8.6 / 2013-05-23
+
+* kqueue: Use EVT_ONLY flag on Darwin
+* [Doc] Update README with full example
+
+## v0.8.5 / 2013-05-09
+
+* [Fix] inotify: allow monitoring of "broken" symlinks (thanks @tsg)
+
+## v0.8.4 / 2013-04-07
+
+* [Fix] kqueue: watch all file events [#40][] (thanks @ChrisBuchholz)
+
+## v0.8.3 / 2013-03-13
+
+* [Fix] inoitfy/kqueue memory leak [#36][] (reported by @nbkolchin)
+* [Fix] kqueue: use fsnFlags for watching a directory [#33][] (reported by @nbkolchin)
+
+## v0.8.2 / 2013-02-07
+
+* [Doc] add Authors
+* [Fix] fix data races for map access [#29][] (thanks @fsouza)
+
+## v0.8.1 / 2013-01-09
+
+* [Fix] Windows path separators
+* [Doc] BSD License
+
+## v0.8.0 / 2012-11-09
+
+* kqueue: directory watching improvements (thanks @vmirage)
+* inotify: add `IN_MOVED_TO` [#25][] (requested by @cpisto)
+* [Fix] kqueue: deleting watched directory [#24][] (reported by @jakerr)
+
+## v0.7.4 / 2012-10-09
+
+* [Fix] inotify: fixes from https://codereview.appspot.com/5418045/ (ugorji)
+* [Fix] kqueue: preserve watch flags when watching for delete [#21][] (reported by @robfig)
+* [Fix] kqueue: watch the directory even if it isn't a new watch (thanks @robfig)
+* [Fix] kqueue: modify after recreation of file
+
+## v0.7.3 / 2012-09-27
+
+* [Fix] kqueue: watch with an existing folder inside the watched folder (thanks @vmirage)
+* [Fix] kqueue: no longer get duplicate CREATE events
+
+## v0.7.2 / 2012-09-01
+
+* kqueue: events for created directories
+
+## v0.7.1 / 2012-07-14
+
+* [Fix] for renaming files
+
+## v0.7.0 / 2012-07-02
+
+* [Feature] FSNotify flags
+* [Fix] inotify: Added file name back to event path
+
+## v0.6.0 / 2012-06-06
+
+* kqueue: watch files after directory created (thanks @tmc)
+
+## v0.5.1 / 2012-05-22
+
+* [Fix] inotify: remove all watches before Close()
+
+## v0.5.0 / 2012-05-03
+
+* [API] kqueue: return errors during watch instead of sending over channel
+* kqueue: match symlink behavior on Linux
+* inotify: add `DELETE_SELF` (requested by @taralx)
+* [Fix] kqueue: handle EINTR (reported by @robfig)
+* [Doc] Godoc example [#1][] (thanks @davecheney)
+
+## v0.4.0 / 2012-03-30
+
+* Go 1 released: build with go tool
+* [Feature] Windows support using winfsnotify
+* Windows does not have attribute change notifications
+* Roll attribute notifications into IsModify
+
+## v0.3.0 / 2012-02-19
+
+* kqueue: add files when watch directory
+
+## v0.2.0 / 2011-12-30
+
+* update to latest Go weekly code
+
+## v0.1.0 / 2011-10-19
+
+* kqueue: add watch on file creation to match inotify
+* kqueue: create file event
+* inotify: ignore `IN_IGNORED` events
+* event String()
+* linux: common FileEvent functions
+* initial commit
+
+[#79]: https://github.com/howeyc/fsnotify/pull/79
+[#77]: https://github.com/howeyc/fsnotify/pull/77
+[#72]: https://github.com/howeyc/fsnotify/issues/72
+[#71]: https://github.com/howeyc/fsnotify/issues/71
+[#70]: https://github.com/howeyc/fsnotify/issues/70
+[#63]: https://github.com/howeyc/fsnotify/issues/63
+[#62]: https://github.com/howeyc/fsnotify/issues/62
+[#60]: https://github.com/howeyc/fsnotify/issues/60
+[#59]: https://github.com/howeyc/fsnotify/issues/59
+[#49]: https://github.com/howeyc/fsnotify/issues/49
+[#45]: https://github.com/howeyc/fsnotify/issues/45
+[#40]: https://github.com/howeyc/fsnotify/issues/40
+[#36]: https://github.com/howeyc/fsnotify/issues/36
+[#33]: https://github.com/howeyc/fsnotify/issues/33
+[#29]: https://github.com/howeyc/fsnotify/issues/29
+[#25]: https://github.com/howeyc/fsnotify/issues/25
+[#24]: https://github.com/howeyc/fsnotify/issues/24
+[#21]: https://github.com/howeyc/fsnotify/issues/21
+
diff --git a/vendor/gopkg.in/fsnotify.v1/CONTRIBUTING.md b/vendor/gopkg.in/fsnotify.v1/CONTRIBUTING.md
new file mode 100644
index 0000000000..0f377f341b
--- /dev/null
+++ b/vendor/gopkg.in/fsnotify.v1/CONTRIBUTING.md
@@ -0,0 +1,77 @@
+# Contributing
+
+## Issues
+
+* Request features and report bugs using the [GitHub Issue Tracker](https://github.com/go-fsnotify/fsnotify/issues).
+* Please indicate the platform you are using fsnotify on.
+* A code example to reproduce the problem is appreciated.
+
+## Pull Requests
+
+### Contributor License Agreement
+
+fsnotify is derived from code in the [golang.org/x/exp](https://godoc.org/golang.org/x/exp) package and it may be included [in the standard library](https://github.com/go-fsnotify/fsnotify/issues/1) in the future. Therefore fsnotify carries the same [LICENSE](https://github.com/go-fsnotify/fsnotify/blob/master/LICENSE) as Go. Contributors retain their copyright, so you need to fill out a short form before we can accept your contribution: [Google Individual Contributor License Agreement](https://developers.google.com/open-source/cla/individual).
+
+Please indicate that you have signed the CLA in your pull request.
+
+### How fsnotify is Developed
+
+* Development is done on feature branches.
+* Tests are run on BSD, Linux, OS X and Windows.
+* Pull requests are reviewed and [applied to master][am] using [hub][].
+ * Maintainers may modify or squash commits rather than asking contributors to.
+* To issue a new release, the maintainers will:
+ * Update the CHANGELOG
+ * Tag a version, which will become available through gopkg.in.
+
+### How to Fork
+
+For smooth sailing, always use the original import path. Installing with `go get` makes this easy.
+
+1. Install from GitHub (`go get -u github.com/go-fsnotify/fsnotify`)
+2. Create your feature branch (`git checkout -b my-new-feature`)
+3. Ensure everything works and the tests pass (see below)
+4. Commit your changes (`git commit -am 'Add some feature'`)
+
+Contribute upstream:
+
+1. Fork fsnotify on GitHub
+2. Add your remote (`git remote add fork git@github.com:mycompany/repo.git`)
+3. Push to the branch (`git push fork my-new-feature`)
+4. Create a new Pull Request on GitHub
+
+This workflow is [thoroughly explained by Katrina Owen](https://blog.splice.com/contributing-open-source-git-repositories-go/).
+
+### Testing
+
+fsnotify uses build tags to compile different code on Linux, BSD, OS X, and Windows.
+
+Before doing a pull request, please do your best to test your changes on multiple platforms, and list which platforms you were able/unable to test on.
+
+To aid in cross-platform testing there is a Vagrantfile for Linux and BSD.
+
+* Install [Vagrant](http://www.vagrantup.com/) and [VirtualBox](https://www.virtualbox.org/)
+* Setup [Vagrant Gopher](https://github.com/nathany/vagrant-gopher) in your `src` folder.
+* Run `vagrant up` from the project folder. You can also setup just one box with `vagrant up linux` or `vagrant up bsd` (note: the BSD box doesn't support Windows hosts at this time, and NFS may prompt for your host OS password)
+* Once setup, you can run the test suite on a given OS with a single command `vagrant ssh linux -c 'cd go-fsnotify/fsnotify; go test'`.
+* When you're done, you will want to halt or destroy the Vagrant boxes.
+
+Notice: fsnotify file system events won't trigger in shared folders. The tests get around this limitation by using the /tmp directory.
+
+Right now there is no equivalent solution for Windows and OS X, but there are Windows VMs [freely available from Microsoft](http://www.modern.ie/en-us/virtualization-tools#downloads).
+
+### Maintainers
+
+Help maintaining fsnotify is welcome. To be a maintainer:
+
+* Submit a pull request and sign the CLA as above.
+* You must be able to run the test suite on Mac, Windows, Linux and BSD.
+
+To keep master clean, the fsnotify project uses the "apply mail" workflow outlined in Nathaniel Talbott's post ["Merge pull request" Considered Harmful][am]. This requires installing [hub][].
+
+All code changes should be internal pull requests.
+
+Releases are tagged using [Semantic Versioning](http://semver.org/).
+
+[hub]: https://github.com/github/hub
+[am]: http://blog.spreedly.com/2014/06/24/merge-pull-request-considered-harmful/#.VGa5yZPF_Zs
diff --git a/vendor/gopkg.in/fsnotify.v1/LICENSE b/vendor/gopkg.in/fsnotify.v1/LICENSE
new file mode 100644
index 0000000000..f21e540800
--- /dev/null
+++ b/vendor/gopkg.in/fsnotify.v1/LICENSE
@@ -0,0 +1,28 @@
+Copyright (c) 2012 The Go Authors. All rights reserved.
+Copyright (c) 2012 fsnotify Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/gopkg.in/fsnotify.v1/NotUsed.xcworkspace b/vendor/gopkg.in/fsnotify.v1/NotUsed.xcworkspace
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/vendor/gopkg.in/fsnotify.v1/README.md b/vendor/gopkg.in/fsnotify.v1/README.md
new file mode 100644
index 0000000000..7a0b247364
--- /dev/null
+++ b/vendor/gopkg.in/fsnotify.v1/README.md
@@ -0,0 +1,59 @@
+# File system notifications for Go
+
+[](http://gocover.io/github.com/go-fsnotify/fsnotify) [](https://godoc.org/gopkg.in/fsnotify.v1)
+
+Go 1.3+ required.
+
+Cross platform: Windows, Linux, BSD and OS X.
+
+|Adapter |OS |Status |
+|----------|----------|----------|
+|inotify |Linux, Android\*|Supported [](https://travis-ci.org/go-fsnotify/fsnotify)|
+|kqueue |BSD, OS X, iOS\*|Supported [](https://circleci.com/gh/go-fsnotify/fsnotify)|
+|ReadDirectoryChangesW|Windows|Supported [](https://ci.appveyor.com/project/NathanYoungman/fsnotify/branch/master)|
+|FSEvents |OS X |[Planned](https://github.com/go-fsnotify/fsnotify/issues/11)|
+|FEN |Solaris 11 |[Planned](https://github.com/go-fsnotify/fsnotify/issues/12)|
+|fanotify |Linux 2.6.37+ | |
+|USN Journals |Windows |[Maybe](https://github.com/go-fsnotify/fsnotify/issues/53)|
+|Polling |*All* |[Maybe](https://github.com/go-fsnotify/fsnotify/issues/9)|
+
+\* Android and iOS are untested.
+
+Please see [the documentation](https://godoc.org/gopkg.in/fsnotify.v1) for usage. Consult the [Wiki](https://github.com/go-fsnotify/fsnotify/wiki) for the FAQ and further information.
+
+## API stability
+
+Two major versions of fsnotify exist.
+
+**[fsnotify.v0](https://gopkg.in/fsnotify.v0)** is API-compatible with [howeyc/fsnotify](https://godoc.org/github.com/howeyc/fsnotify). Bugfixes *may* be backported, but I recommend upgrading to v1.
+
+```go
+import "gopkg.in/fsnotify.v0"
+```
+
+\* Refer to the package as fsnotify (without the .v0 suffix).
+
+**[fsnotify.v1](https://gopkg.in/fsnotify.v1)** provides [a new API](https://godoc.org/gopkg.in/fsnotify.v1) based on [this design document](http://goo.gl/MrYxyA). You can import v1 with:
+
+```go
+import "gopkg.in/fsnotify.v1"
+```
+
+Further API changes are [planned](https://github.com/go-fsnotify/fsnotify/milestones), but a new major revision will be tagged, so you can depend on the v1 API.
+
+**Master** may have unreleased changes. Use it to test the very latest code or when [contributing][], but don't expect it to remain API-compatible:
+
+```go
+import "github.com/go-fsnotify/fsnotify"
+```
+
+## Contributing
+
+Please refer to [CONTRIBUTING][] before opening an issue or pull request.
+
+## Example
+
+See [example_test.go](https://github.com/go-fsnotify/fsnotify/blob/master/example_test.go).
+
+
+[contributing]: https://github.com/go-fsnotify/fsnotify/blob/master/CONTRIBUTING.md
diff --git a/vendor/gopkg.in/fsnotify.v1/circle.yml b/vendor/gopkg.in/fsnotify.v1/circle.yml
new file mode 100644
index 0000000000..204217fb0b
--- /dev/null
+++ b/vendor/gopkg.in/fsnotify.v1/circle.yml
@@ -0,0 +1,26 @@
+## OS X build (CircleCI iOS beta)
+
+# Pretend like it's an Xcode project, at least to get it running.
+machine:
+ environment:
+ XCODE_WORKSPACE: NotUsed.xcworkspace
+ XCODE_SCHEME: NotUsed
+ # This is where the go project is actually checked out to:
+ CIRCLE_BUILD_DIR: $HOME/.go_project/src/github.com/go-fsnotify/fsnotify
+
+dependencies:
+ pre:
+ - brew upgrade go
+
+test:
+ override:
+ - go test ./...
+
+# Idealized future config, eventually with cross-platform build matrix :-)
+
+# machine:
+# go:
+# version: 1.4
+# os:
+# - osx
+# - linux
diff --git a/vendor/gopkg.in/fsnotify.v1/fsnotify.go b/vendor/gopkg.in/fsnotify.v1/fsnotify.go
new file mode 100644
index 0000000000..c899ee0083
--- /dev/null
+++ b/vendor/gopkg.in/fsnotify.v1/fsnotify.go
@@ -0,0 +1,62 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !plan9,!solaris
+
+// Package fsnotify provides a platform-independent interface for file system notifications.
+package fsnotify
+
+import (
+ "bytes"
+ "fmt"
+)
+
+// Event represents a single file system notification.
+type Event struct {
+ Name string // Relative path to the file or directory.
+ Op Op // File operation that triggered the event.
+}
+
+// Op describes a set of file operations.
+type Op uint32
+
+// These are the generalized file operations that can trigger a notification.
+const (
+ Create Op = 1 << iota
+ Write
+ Remove
+ Rename
+ Chmod
+)
+
+// String returns a string representation of the event in the form
+// "file: REMOVE|WRITE|..."
+func (e Event) String() string {
+ // Use a buffer for efficient string concatenation
+ var buffer bytes.Buffer
+
+ if e.Op&Create == Create {
+ buffer.WriteString("|CREATE")
+ }
+ if e.Op&Remove == Remove {
+ buffer.WriteString("|REMOVE")
+ }
+ if e.Op&Write == Write {
+ buffer.WriteString("|WRITE")
+ }
+ if e.Op&Rename == Rename {
+ buffer.WriteString("|RENAME")
+ }
+ if e.Op&Chmod == Chmod {
+ buffer.WriteString("|CHMOD")
+ }
+
+ // If buffer remains empty, return no event names
+ if buffer.Len() == 0 {
+ return fmt.Sprintf("%q: ", e.Name)
+ }
+
+ // Return a list of event names, with leading pipe character stripped
+ return fmt.Sprintf("%q: %s", e.Name, buffer.String()[1:])
+}
diff --git a/vendor/gopkg.in/fsnotify.v1/inotify.go b/vendor/gopkg.in/fsnotify.v1/inotify.go
new file mode 100644
index 0000000000..d7759ec8c8
--- /dev/null
+++ b/vendor/gopkg.in/fsnotify.v1/inotify.go
@@ -0,0 +1,306 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build linux
+
+package fsnotify
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+ "strings"
+ "sync"
+ "syscall"
+ "unsafe"
+)
+
+// Watcher watches a set of files, delivering events to a channel.
+type Watcher struct {
+ Events chan Event
+ Errors chan error
+ mu sync.Mutex // Map access
+ fd int
+ poller *fdPoller
+ watches map[string]*watch // Map of inotify watches (key: path)
+ paths map[int]string // Map of watched paths (key: watch descriptor)
+ done chan struct{} // Channel for sending a "quit message" to the reader goroutine
+ doneResp chan struct{} // Channel to respond to Close
+}
+
+// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events.
+func NewWatcher() (*Watcher, error) {
+ // Create inotify fd
+ fd, errno := syscall.InotifyInit()
+ if fd == -1 {
+ return nil, errno
+ }
+ // Create epoll
+ poller, err := newFdPoller(fd)
+ if err != nil {
+ syscall.Close(fd)
+ return nil, err
+ }
+ w := &Watcher{
+ fd: fd,
+ poller: poller,
+ watches: make(map[string]*watch),
+ paths: make(map[int]string),
+ Events: make(chan Event),
+ Errors: make(chan error),
+ done: make(chan struct{}),
+ doneResp: make(chan struct{}),
+ }
+
+ go w.readEvents()
+ return w, nil
+}
+
+func (w *Watcher) isClosed() bool {
+ select {
+ case <-w.done:
+ return true
+ default:
+ return false
+ }
+}
+
+// Close removes all watches and closes the events channel.
+func (w *Watcher) Close() error {
+ if w.isClosed() {
+ return nil
+ }
+
+ // Send 'close' signal to goroutine, and set the Watcher to closed.
+ close(w.done)
+
+ // Wake up goroutine
+ w.poller.wake()
+
+ // Wait for goroutine to close
+ <-w.doneResp
+
+ return nil
+}
+
+// Add starts watching the named file or directory (non-recursively).
+func (w *Watcher) Add(name string) error {
+ name = filepath.Clean(name)
+ if w.isClosed() {
+ return errors.New("inotify instance already closed")
+ }
+
+ const agnosticEvents = syscall.IN_MOVED_TO | syscall.IN_MOVED_FROM |
+ syscall.IN_CREATE | syscall.IN_ATTRIB | syscall.IN_MODIFY |
+ syscall.IN_MOVE_SELF | syscall.IN_DELETE | syscall.IN_DELETE_SELF
+
+ var flags uint32 = agnosticEvents
+
+ w.mu.Lock()
+ watchEntry, found := w.watches[name]
+ w.mu.Unlock()
+ if found {
+ watchEntry.flags |= flags
+ flags |= syscall.IN_MASK_ADD
+ }
+ wd, errno := syscall.InotifyAddWatch(w.fd, name, flags)
+ if wd == -1 {
+ return errno
+ }
+
+ w.mu.Lock()
+ w.watches[name] = &watch{wd: uint32(wd), flags: flags}
+ w.paths[wd] = name
+ w.mu.Unlock()
+
+ return nil
+}
+
+// Remove stops watching the named file or directory (non-recursively).
+func (w *Watcher) Remove(name string) error {
+ name = filepath.Clean(name)
+
+ // Fetch the watch.
+ w.mu.Lock()
+ defer w.mu.Unlock()
+ watch, ok := w.watches[name]
+
+ // Remove it from inotify.
+ if !ok {
+ return fmt.Errorf("can't remove non-existent inotify watch for: %s", name)
+ }
+ // inotify_rm_watch will return EINVAL if the file has been deleted;
+ // the inotify will already have been removed.
+ // That means we can safely delete it from our watches, whatever inotify_rm_watch does.
+ delete(w.watches, name)
+ success, errno := syscall.InotifyRmWatch(w.fd, watch.wd)
+ if success == -1 {
+ // TODO: Perhaps it's not helpful to return an error here in every case.
+ // the only two possible errors are:
+ // EBADF, which happens when w.fd is not a valid file descriptor of any kind.
+ // EINVAL, which is when fd is not an inotify descriptor or wd is not a valid watch descriptor.
+ // Watch descriptors are invalidated when they are removed explicitly or implicitly;
+ // explicitly by inotify_rm_watch, implicitly when the file they are watching is deleted.
+ return errno
+ }
+ return nil
+}
+
+type watch struct {
+ wd uint32 // Watch descriptor (as returned by the inotify_add_watch() syscall)
+ flags uint32 // inotify flags of this watch (see inotify(7) for the list of valid flags)
+}
+
+// readEvents reads from the inotify file descriptor, converts the
+// received events into Event objects and sends them via the Events channel
+func (w *Watcher) readEvents() {
+ var (
+ buf [syscall.SizeofInotifyEvent * 4096]byte // Buffer for a maximum of 4096 raw events
+ n int // Number of bytes read with read()
+ errno error // Syscall errno
+ ok bool // For poller.wait
+ )
+
+ defer close(w.doneResp)
+ defer close(w.Errors)
+ defer close(w.Events)
+ defer syscall.Close(w.fd)
+ defer w.poller.close()
+
+ for {
+ // See if we have been closed.
+ if w.isClosed() {
+ return
+ }
+
+ ok, errno = w.poller.wait()
+ if errno != nil {
+ select {
+ case w.Errors <- errno:
+ case <-w.done:
+ return
+ }
+ continue
+ }
+
+ if !ok {
+ continue
+ }
+
+ n, errno = syscall.Read(w.fd, buf[:])
+ // If a signal interrupted execution, see if we've been asked to close, and try again.
+ // http://man7.org/linux/man-pages/man7/signal.7.html :
+ // "Before Linux 3.8, reads from an inotify(7) file descriptor were not restartable"
+ if errno == syscall.EINTR {
+ continue
+ }
+
+ // syscall.Read might have been woken up by Close. If so, we're done.
+ if w.isClosed() {
+ return
+ }
+
+ if n < syscall.SizeofInotifyEvent {
+ var err error
+ if n == 0 {
+ // If EOF is received. This should really never happen.
+ err = io.EOF
+ } else if n < 0 {
+ // If an error occured while reading.
+ err = errno
+ } else {
+ // Read was too short.
+ err = errors.New("notify: short read in readEvents()")
+ }
+ select {
+ case w.Errors <- err:
+ case <-w.done:
+ return
+ }
+ continue
+ }
+
+ var offset uint32
+ // We don't know how many events we just read into the buffer
+ // While the offset points to at least one whole event...
+ for offset <= uint32(n-syscall.SizeofInotifyEvent) {
+ // Point "raw" to the event in the buffer
+ raw := (*syscall.InotifyEvent)(unsafe.Pointer(&buf[offset]))
+
+ mask := uint32(raw.Mask)
+ nameLen := uint32(raw.Len)
+ // If the event happened to the watched directory or the watched file, the kernel
+ // doesn't append the filename to the event, but we would like to always fill the
+ // the "Name" field with a valid filename. We retrieve the path of the watch from
+ // the "paths" map.
+ w.mu.Lock()
+ name := w.paths[int(raw.Wd)]
+ w.mu.Unlock()
+ if nameLen > 0 {
+ // Point "bytes" at the first byte of the filename
+ bytes := (*[syscall.PathMax]byte)(unsafe.Pointer(&buf[offset+syscall.SizeofInotifyEvent]))
+ // The filename is padded with NULL bytes. TrimRight() gets rid of those.
+ name += "/" + strings.TrimRight(string(bytes[0:nameLen]), "\000")
+ }
+
+ event := newEvent(name, mask)
+
+ // Send the events that are not ignored on the events channel
+ if !event.ignoreLinux(mask) {
+ select {
+ case w.Events <- event:
+ case <-w.done:
+ return
+ }
+ }
+
+ // Move to the next event in the buffer
+ offset += syscall.SizeofInotifyEvent + nameLen
+ }
+ }
+}
+
+// Certain types of events can be "ignored" and not sent over the Events
+// channel. Such as events marked ignore by the kernel, or MODIFY events
+// against files that do not exist.
+func (e *Event) ignoreLinux(mask uint32) bool {
+ // Ignore anything the inotify API says to ignore
+ if mask&syscall.IN_IGNORED == syscall.IN_IGNORED {
+ return true
+ }
+
+ // If the event is not a DELETE or RENAME, the file must exist.
+ // Otherwise the event is ignored.
+ // *Note*: this was put in place because it was seen that a MODIFY
+ // event was sent after the DELETE. This ignores that MODIFY and
+ // assumes a DELETE will come or has come if the file doesn't exist.
+ if !(e.Op&Remove == Remove || e.Op&Rename == Rename) {
+ _, statErr := os.Lstat(e.Name)
+ return os.IsNotExist(statErr)
+ }
+ return false
+}
+
+// newEvent returns an platform-independent Event based on an inotify mask.
+func newEvent(name string, mask uint32) Event {
+ e := Event{Name: name}
+ if mask&syscall.IN_CREATE == syscall.IN_CREATE || mask&syscall.IN_MOVED_TO == syscall.IN_MOVED_TO {
+ e.Op |= Create
+ }
+ if mask&syscall.IN_DELETE_SELF == syscall.IN_DELETE_SELF || mask&syscall.IN_DELETE == syscall.IN_DELETE {
+ e.Op |= Remove
+ }
+ if mask&syscall.IN_MODIFY == syscall.IN_MODIFY {
+ e.Op |= Write
+ }
+ if mask&syscall.IN_MOVE_SELF == syscall.IN_MOVE_SELF || mask&syscall.IN_MOVED_FROM == syscall.IN_MOVED_FROM {
+ e.Op |= Rename
+ }
+ if mask&syscall.IN_ATTRIB == syscall.IN_ATTRIB {
+ e.Op |= Chmod
+ }
+ return e
+}
diff --git a/vendor/gopkg.in/fsnotify.v1/inotify_poller.go b/vendor/gopkg.in/fsnotify.v1/inotify_poller.go
new file mode 100644
index 0000000000..3b41784041
--- /dev/null
+++ b/vendor/gopkg.in/fsnotify.v1/inotify_poller.go
@@ -0,0 +1,186 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build linux
+
+package fsnotify
+
+import (
+ "errors"
+ "syscall"
+)
+
+type fdPoller struct {
+ fd int // File descriptor (as returned by the inotify_init() syscall)
+ epfd int // Epoll file descriptor
+ pipe [2]int // Pipe for waking up
+}
+
+func emptyPoller(fd int) *fdPoller {
+ poller := new(fdPoller)
+ poller.fd = fd
+ poller.epfd = -1
+ poller.pipe[0] = -1
+ poller.pipe[1] = -1
+ return poller
+}
+
+// Create a new inotify poller.
+// This creates an inotify handler, and an epoll handler.
+func newFdPoller(fd int) (*fdPoller, error) {
+ var errno error
+ poller := emptyPoller(fd)
+ defer func() {
+ if errno != nil {
+ poller.close()
+ }
+ }()
+ poller.fd = fd
+
+ // Create epoll fd
+ poller.epfd, errno = syscall.EpollCreate(1)
+ if poller.epfd == -1 {
+ return nil, errno
+ }
+ // Create pipe; pipe[0] is the read end, pipe[1] the write end.
+ errno = syscall.Pipe2(poller.pipe[:], syscall.O_NONBLOCK)
+ if errno != nil {
+ return nil, errno
+ }
+
+ // Register inotify fd with epoll
+ event := syscall.EpollEvent{
+ Fd: int32(poller.fd),
+ Events: syscall.EPOLLIN,
+ }
+ errno = syscall.EpollCtl(poller.epfd, syscall.EPOLL_CTL_ADD, poller.fd, &event)
+ if errno != nil {
+ return nil, errno
+ }
+
+ // Register pipe fd with epoll
+ event = syscall.EpollEvent{
+ Fd: int32(poller.pipe[0]),
+ Events: syscall.EPOLLIN,
+ }
+ errno = syscall.EpollCtl(poller.epfd, syscall.EPOLL_CTL_ADD, poller.pipe[0], &event)
+ if errno != nil {
+ return nil, errno
+ }
+
+ return poller, nil
+}
+
+// Wait using epoll.
+// Returns true if something is ready to be read,
+// false if there is not.
+func (poller *fdPoller) wait() (bool, error) {
+ // 3 possible events per fd, and 2 fds, makes a maximum of 6 events.
+ // I don't know whether epoll_wait returns the number of events returned,
+ // or the total number of events ready.
+ // I decided to catch both by making the buffer one larger than the maximum.
+ events := make([]syscall.EpollEvent, 7)
+ for {
+ n, errno := syscall.EpollWait(poller.epfd, events, -1)
+ if n == -1 {
+ if errno == syscall.EINTR {
+ continue
+ }
+ return false, errno
+ }
+ if n == 0 {
+ // If there are no events, try again.
+ continue
+ }
+ if n > 6 {
+ // This should never happen. More events were returned than should be possible.
+ return false, errors.New("epoll_wait returned more events than I know what to do with")
+ }
+ ready := events[:n]
+ epollhup := false
+ epollerr := false
+ epollin := false
+ for _, event := range ready {
+ if event.Fd == int32(poller.fd) {
+ if event.Events&syscall.EPOLLHUP != 0 {
+ // This should not happen, but if it does, treat it as a wakeup.
+ epollhup = true
+ }
+ if event.Events&syscall.EPOLLERR != 0 {
+ // If an error is waiting on the file descriptor, we should pretend
+ // something is ready to read, and let syscall.Read pick up the error.
+ epollerr = true
+ }
+ if event.Events&syscall.EPOLLIN != 0 {
+ // There is data to read.
+ epollin = true
+ }
+ }
+ if event.Fd == int32(poller.pipe[0]) {
+ if event.Events&syscall.EPOLLHUP != 0 {
+ // Write pipe descriptor was closed, by us. This means we're closing down the
+ // watcher, and we should wake up.
+ }
+ if event.Events&syscall.EPOLLERR != 0 {
+ // If an error is waiting on the pipe file descriptor.
+ // This is an absolute mystery, and should never ever happen.
+ return false, errors.New("Error on the pipe descriptor.")
+ }
+ if event.Events&syscall.EPOLLIN != 0 {
+ // This is a regular wakeup, so we have to clear the buffer.
+ err := poller.clearWake()
+ if err != nil {
+ return false, err
+ }
+ }
+ }
+ }
+
+ if epollhup || epollerr || epollin {
+ return true, nil
+ }
+ return false, nil
+ }
+}
+
+// Close the write end of the poller.
+func (poller *fdPoller) wake() error {
+ buf := make([]byte, 1)
+ n, errno := syscall.Write(poller.pipe[1], buf)
+ if n == -1 {
+ if errno == syscall.EAGAIN {
+ // Buffer is full, poller will wake.
+ return nil
+ }
+ return errno
+ }
+ return nil
+}
+
+func (poller *fdPoller) clearWake() error {
+ // You have to be woken up a LOT in order to get to 100!
+ buf := make([]byte, 100)
+ n, errno := syscall.Read(poller.pipe[0], buf)
+ if n == -1 {
+ if errno == syscall.EAGAIN {
+ // Buffer is empty, someone else cleared our wake.
+ return nil
+ }
+ return errno
+ }
+ return nil
+}
+
+// Close all poller file descriptors, but not the one passed to it.
+func (poller *fdPoller) close() {
+ if poller.pipe[1] != -1 {
+ syscall.Close(poller.pipe[1])
+ }
+ if poller.pipe[0] != -1 {
+ syscall.Close(poller.pipe[0])
+ }
+ if poller.epfd != -1 {
+ syscall.Close(poller.epfd)
+ }
+}
diff --git a/vendor/gopkg.in/fsnotify.v1/kqueue.go b/vendor/gopkg.in/fsnotify.v1/kqueue.go
new file mode 100644
index 0000000000..265622d201
--- /dev/null
+++ b/vendor/gopkg.in/fsnotify.v1/kqueue.go
@@ -0,0 +1,463 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build freebsd openbsd netbsd dragonfly darwin
+
+package fsnotify
+
+import (
+ "errors"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "sync"
+ "syscall"
+ "time"
+)
+
+// Watcher watches a set of files, delivering events to a channel.
+type Watcher struct {
+ Events chan Event
+ Errors chan error
+ done chan bool // Channel for sending a "quit message" to the reader goroutine
+
+ kq int // File descriptor (as returned by the kqueue() syscall).
+
+ mu sync.Mutex // Protects access to watcher data
+ watches map[string]int // Map of watched file descriptors (key: path).
+ externalWatches map[string]bool // Map of watches added by user of the library.
+ dirFlags map[string]uint32 // Map of watched directories to fflags used in kqueue.
+ paths map[int]pathInfo // Map file descriptors to path names for processing kqueue events.
+ fileExists map[string]bool // Keep track of if we know this file exists (to stop duplicate create events).
+ isClosed bool // Set to true when Close() is first called
+}
+
+type pathInfo struct {
+ name string
+ isDir bool
+}
+
+// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events.
+func NewWatcher() (*Watcher, error) {
+ kq, err := kqueue()
+ if err != nil {
+ return nil, err
+ }
+
+ w := &Watcher{
+ kq: kq,
+ watches: make(map[string]int),
+ dirFlags: make(map[string]uint32),
+ paths: make(map[int]pathInfo),
+ fileExists: make(map[string]bool),
+ externalWatches: make(map[string]bool),
+ Events: make(chan Event),
+ Errors: make(chan error),
+ done: make(chan bool),
+ }
+
+ go w.readEvents()
+ return w, nil
+}
+
+// Close removes all watches and closes the events channel.
+func (w *Watcher) Close() error {
+ w.mu.Lock()
+ if w.isClosed {
+ w.mu.Unlock()
+ return nil
+ }
+ w.isClosed = true
+ w.mu.Unlock()
+
+ w.mu.Lock()
+ ws := w.watches
+ w.mu.Unlock()
+
+ var err error
+ for name := range ws {
+ if e := w.Remove(name); e != nil && err == nil {
+ err = e
+ }
+ }
+
+ // Send "quit" message to the reader goroutine:
+ w.done <- true
+
+ return nil
+}
+
+// Add starts watching the named file or directory (non-recursively).
+func (w *Watcher) Add(name string) error {
+ w.mu.Lock()
+ w.externalWatches[name] = true
+ w.mu.Unlock()
+ return w.addWatch(name, noteAllEvents)
+}
+
+// Remove stops watching the the named file or directory (non-recursively).
+func (w *Watcher) Remove(name string) error {
+ name = filepath.Clean(name)
+ w.mu.Lock()
+ watchfd, ok := w.watches[name]
+ w.mu.Unlock()
+ if !ok {
+ return fmt.Errorf("can't remove non-existent kevent watch for: %s", name)
+ }
+
+ const registerRemove = syscall.EV_DELETE
+ if err := register(w.kq, []int{watchfd}, registerRemove, 0); err != nil {
+ return err
+ }
+
+ syscall.Close(watchfd)
+
+ w.mu.Lock()
+ isDir := w.paths[watchfd].isDir
+ delete(w.watches, name)
+ delete(w.paths, watchfd)
+ delete(w.dirFlags, name)
+ w.mu.Unlock()
+
+ // Find all watched paths that are in this directory that are not external.
+ if isDir {
+ var pathsToRemove []string
+ w.mu.Lock()
+ for _, path := range w.paths {
+ wdir, _ := filepath.Split(path.name)
+ if filepath.Clean(wdir) == name {
+ if !w.externalWatches[path.name] {
+ pathsToRemove = append(pathsToRemove, path.name)
+ }
+ }
+ }
+ w.mu.Unlock()
+ for _, name := range pathsToRemove {
+ // Since these are internal, not much sense in propagating error
+ // to the user, as that will just confuse them with an error about
+ // a path they did not explicitly watch themselves.
+ w.Remove(name)
+ }
+ }
+
+ return nil
+}
+
+// Watch all events (except NOTE_EXTEND, NOTE_LINK, NOTE_REVOKE)
+const noteAllEvents = syscall.NOTE_DELETE | syscall.NOTE_WRITE | syscall.NOTE_ATTRIB | syscall.NOTE_RENAME
+
+// keventWaitTime to block on each read from kevent
+var keventWaitTime = durationToTimespec(100 * time.Millisecond)
+
+// addWatch adds name to the watched file set.
+// The flags are interpreted as described in kevent(2).
+func (w *Watcher) addWatch(name string, flags uint32) error {
+ var isDir bool
+ // Make ./name and name equivalent
+ name = filepath.Clean(name)
+
+ w.mu.Lock()
+ if w.isClosed {
+ w.mu.Unlock()
+ return errors.New("kevent instance already closed")
+ }
+ watchfd, alreadyWatching := w.watches[name]
+ // We already have a watch, but we can still override flags.
+ if alreadyWatching {
+ isDir = w.paths[watchfd].isDir
+ }
+ w.mu.Unlock()
+
+ if !alreadyWatching {
+ fi, err := os.Lstat(name)
+ if err != nil {
+ return err
+ }
+
+ // Don't watch sockets.
+ if fi.Mode()&os.ModeSocket == os.ModeSocket {
+ return nil
+ }
+
+ // Follow Symlinks
+ // Unfortunately, Linux can add bogus symlinks to watch list without
+ // issue, and Windows can't do symlinks period (AFAIK). To maintain
+ // consistency, we will act like everything is fine. There will simply
+ // be no file events for broken symlinks.
+ // Hence the returns of nil on errors.
+ if fi.Mode()&os.ModeSymlink == os.ModeSymlink {
+ name, err = filepath.EvalSymlinks(name)
+ if err != nil {
+ return nil
+ }
+
+ fi, err = os.Lstat(name)
+ if err != nil {
+ return nil
+ }
+ }
+
+ watchfd, err = syscall.Open(name, openMode, 0700)
+ if watchfd == -1 {
+ return err
+ }
+
+ isDir = fi.IsDir()
+ }
+
+ const registerAdd = syscall.EV_ADD | syscall.EV_CLEAR | syscall.EV_ENABLE
+ if err := register(w.kq, []int{watchfd}, registerAdd, flags); err != nil {
+ syscall.Close(watchfd)
+ return err
+ }
+
+ if !alreadyWatching {
+ w.mu.Lock()
+ w.watches[name] = watchfd
+ w.paths[watchfd] = pathInfo{name: name, isDir: isDir}
+ w.mu.Unlock()
+ }
+
+ if isDir {
+ // Watch the directory if it has not been watched before,
+ // or if it was watched before, but perhaps only a NOTE_DELETE (watchDirectoryFiles)
+ w.mu.Lock()
+ watchDir := (flags&syscall.NOTE_WRITE) == syscall.NOTE_WRITE &&
+ (!alreadyWatching || (w.dirFlags[name]&syscall.NOTE_WRITE) != syscall.NOTE_WRITE)
+ // Store flags so this watch can be updated later
+ w.dirFlags[name] = flags
+ w.mu.Unlock()
+
+ if watchDir {
+ if err := w.watchDirectoryFiles(name); err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+// readEvents reads from kqueue and converts the received kevents into
+// Event values that it sends down the Events channel.
+func (w *Watcher) readEvents() {
+ eventBuffer := make([]syscall.Kevent_t, 10)
+
+ for {
+ // See if there is a message on the "done" channel
+ select {
+ case <-w.done:
+ err := syscall.Close(w.kq)
+ if err != nil {
+ w.Errors <- err
+ }
+ close(w.Events)
+ close(w.Errors)
+ return
+ default:
+ }
+
+ // Get new events
+ kevents, err := read(w.kq, eventBuffer, &keventWaitTime)
+ // EINTR is okay, the syscall was interrupted before timeout expired.
+ if err != nil && err != syscall.EINTR {
+ w.Errors <- err
+ continue
+ }
+
+ // Flush the events we received to the Events channel
+ for len(kevents) > 0 {
+ kevent := &kevents[0]
+ watchfd := int(kevent.Ident)
+ mask := uint32(kevent.Fflags)
+ w.mu.Lock()
+ path := w.paths[watchfd]
+ w.mu.Unlock()
+ event := newEvent(path.name, mask)
+
+ if path.isDir && !(event.Op&Remove == Remove) {
+ // Double check to make sure the directory exists. This can happen when
+ // we do a rm -fr on a recursively watched folders and we receive a
+ // modification event first but the folder has been deleted and later
+ // receive the delete event
+ if _, err := os.Lstat(event.Name); os.IsNotExist(err) {
+ // mark is as delete event
+ event.Op |= Remove
+ }
+ }
+
+ if event.Op&Rename == Rename || event.Op&Remove == Remove {
+ w.Remove(event.Name)
+ w.mu.Lock()
+ delete(w.fileExists, event.Name)
+ w.mu.Unlock()
+ }
+
+ if path.isDir && event.Op&Write == Write && !(event.Op&Remove == Remove) {
+ w.sendDirectoryChangeEvents(event.Name)
+ } else {
+ // Send the event on the Events channel
+ w.Events <- event
+ }
+
+ if event.Op&Remove == Remove {
+ // Look for a file that may have overwritten this.
+ // For example, mv f1 f2 will delete f2, then create f2.
+ fileDir, _ := filepath.Split(event.Name)
+ fileDir = filepath.Clean(fileDir)
+ w.mu.Lock()
+ _, found := w.watches[fileDir]
+ w.mu.Unlock()
+ if found {
+ // make sure the directory exists before we watch for changes. When we
+ // do a recursive watch and perform rm -fr, the parent directory might
+ // have gone missing, ignore the missing directory and let the
+ // upcoming delete event remove the watch from the parent directory.
+ if _, err := os.Lstat(fileDir); os.IsExist(err) {
+ w.sendDirectoryChangeEvents(fileDir)
+ // FIXME: should this be for events on files or just isDir?
+ }
+ }
+ }
+
+ // Move to next event
+ kevents = kevents[1:]
+ }
+ }
+}
+
+// newEvent returns an platform-independent Event based on kqueue Fflags.
+func newEvent(name string, mask uint32) Event {
+ e := Event{Name: name}
+ if mask&syscall.NOTE_DELETE == syscall.NOTE_DELETE {
+ e.Op |= Remove
+ }
+ if mask&syscall.NOTE_WRITE == syscall.NOTE_WRITE {
+ e.Op |= Write
+ }
+ if mask&syscall.NOTE_RENAME == syscall.NOTE_RENAME {
+ e.Op |= Rename
+ }
+ if mask&syscall.NOTE_ATTRIB == syscall.NOTE_ATTRIB {
+ e.Op |= Chmod
+ }
+ return e
+}
+
+func newCreateEvent(name string) Event {
+ return Event{Name: name, Op: Create}
+}
+
+// watchDirectoryFiles to mimic inotify when adding a watch on a directory
+func (w *Watcher) watchDirectoryFiles(dirPath string) error {
+ // Get all files
+ files, err := ioutil.ReadDir(dirPath)
+ if err != nil {
+ return err
+ }
+
+ for _, fileInfo := range files {
+ filePath := filepath.Join(dirPath, fileInfo.Name())
+ if err := w.internalWatch(filePath, fileInfo); err != nil {
+ return err
+ }
+
+ w.mu.Lock()
+ w.fileExists[filePath] = true
+ w.mu.Unlock()
+ }
+
+ return nil
+}
+
+// sendDirectoryEvents searches the directory for newly created files
+// and sends them over the event channel. This functionality is to have
+// the BSD version of fsnotify match Linux inotify which provides a
+// create event for files created in a watched directory.
+func (w *Watcher) sendDirectoryChangeEvents(dirPath string) {
+ // Get all files
+ files, err := ioutil.ReadDir(dirPath)
+ if err != nil {
+ w.Errors <- err
+ }
+
+ // Search for new files
+ for _, fileInfo := range files {
+ filePath := filepath.Join(dirPath, fileInfo.Name())
+ w.mu.Lock()
+ _, doesExist := w.fileExists[filePath]
+ w.mu.Unlock()
+ if !doesExist {
+ // Send create event
+ w.Events <- newCreateEvent(filePath)
+ }
+
+ // like watchDirectoryFiles (but without doing another ReadDir)
+ if err := w.internalWatch(filePath, fileInfo); err != nil {
+ return
+ }
+
+ w.mu.Lock()
+ w.fileExists[filePath] = true
+ w.mu.Unlock()
+ }
+}
+
+func (w *Watcher) internalWatch(name string, fileInfo os.FileInfo) error {
+ if fileInfo.IsDir() {
+ // mimic Linux providing delete events for subdirectories
+ // but preserve the flags used if currently watching subdirectory
+ w.mu.Lock()
+ flags := w.dirFlags[name]
+ w.mu.Unlock()
+
+ flags |= syscall.NOTE_DELETE
+ return w.addWatch(name, flags)
+ }
+
+ // watch file to mimic Linux inotify
+ return w.addWatch(name, noteAllEvents)
+}
+
+// kqueue creates a new kernel event queue and returns a descriptor.
+func kqueue() (kq int, err error) {
+ kq, err = syscall.Kqueue()
+ if kq == -1 {
+ return kq, err
+ }
+ return kq, nil
+}
+
+// register events with the queue
+func register(kq int, fds []int, flags int, fflags uint32) error {
+ changes := make([]syscall.Kevent_t, len(fds))
+
+ for i, fd := range fds {
+ // SetKevent converts int to the platform-specific types:
+ syscall.SetKevent(&changes[i], fd, syscall.EVFILT_VNODE, flags)
+ changes[i].Fflags = fflags
+ }
+
+ // register the events
+ success, err := syscall.Kevent(kq, changes, nil, nil)
+ if success == -1 {
+ return err
+ }
+ return nil
+}
+
+// read retrieves pending events, or waits until an event occurs.
+// A timeout of nil blocks indefinitely, while 0 polls the queue.
+func read(kq int, events []syscall.Kevent_t, timeout *syscall.Timespec) ([]syscall.Kevent_t, error) {
+ n, err := syscall.Kevent(kq, nil, events, timeout)
+ if err != nil {
+ return nil, err
+ }
+ return events[0:n], nil
+}
+
+// durationToTimespec prepares a timeout value
+func durationToTimespec(d time.Duration) syscall.Timespec {
+ return syscall.NsecToTimespec(d.Nanoseconds())
+}
diff --git a/vendor/gopkg.in/fsnotify.v1/open_mode_bsd.go b/vendor/gopkg.in/fsnotify.v1/open_mode_bsd.go
new file mode 100644
index 0000000000..c57ccb427b
--- /dev/null
+++ b/vendor/gopkg.in/fsnotify.v1/open_mode_bsd.go
@@ -0,0 +1,11 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build freebsd openbsd netbsd dragonfly
+
+package fsnotify
+
+import "syscall"
+
+const openMode = syscall.O_NONBLOCK | syscall.O_RDONLY
diff --git a/vendor/gopkg.in/fsnotify.v1/open_mode_darwin.go b/vendor/gopkg.in/fsnotify.v1/open_mode_darwin.go
new file mode 100644
index 0000000000..174b2c331f
--- /dev/null
+++ b/vendor/gopkg.in/fsnotify.v1/open_mode_darwin.go
@@ -0,0 +1,12 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build darwin
+
+package fsnotify
+
+import "syscall"
+
+// note: this constant is not defined on BSD
+const openMode = syscall.O_EVTONLY
diff --git a/vendor/gopkg.in/fsnotify.v1/windows.go b/vendor/gopkg.in/fsnotify.v1/windows.go
new file mode 100644
index 0000000000..811585227d
--- /dev/null
+++ b/vendor/gopkg.in/fsnotify.v1/windows.go
@@ -0,0 +1,561 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build windows
+
+package fsnotify
+
+import (
+ "errors"
+ "fmt"
+ "os"
+ "path/filepath"
+ "runtime"
+ "sync"
+ "syscall"
+ "unsafe"
+)
+
+// Watcher watches a set of files, delivering events to a channel.
+type Watcher struct {
+ Events chan Event
+ Errors chan error
+ isClosed bool // Set to true when Close() is first called
+ mu sync.Mutex // Map access
+ port syscall.Handle // Handle to completion port
+ watches watchMap // Map of watches (key: i-number)
+ input chan *input // Inputs to the reader are sent on this channel
+ quit chan chan<- error
+}
+
+// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events.
+func NewWatcher() (*Watcher, error) {
+ port, e := syscall.CreateIoCompletionPort(syscall.InvalidHandle, 0, 0, 0)
+ if e != nil {
+ return nil, os.NewSyscallError("CreateIoCompletionPort", e)
+ }
+ w := &Watcher{
+ port: port,
+ watches: make(watchMap),
+ input: make(chan *input, 1),
+ Events: make(chan Event, 50),
+ Errors: make(chan error),
+ quit: make(chan chan<- error, 1),
+ }
+ go w.readEvents()
+ return w, nil
+}
+
+// Close removes all watches and closes the events channel.
+func (w *Watcher) Close() error {
+ if w.isClosed {
+ return nil
+ }
+ w.isClosed = true
+
+ // Send "quit" message to the reader goroutine
+ ch := make(chan error)
+ w.quit <- ch
+ if err := w.wakeupReader(); err != nil {
+ return err
+ }
+ return <-ch
+}
+
+// Add starts watching the named file or directory (non-recursively).
+func (w *Watcher) Add(name string) error {
+ if w.isClosed {
+ return errors.New("watcher already closed")
+ }
+ in := &input{
+ op: opAddWatch,
+ path: filepath.Clean(name),
+ flags: sys_FS_ALL_EVENTS,
+ reply: make(chan error),
+ }
+ w.input <- in
+ if err := w.wakeupReader(); err != nil {
+ return err
+ }
+ return <-in.reply
+}
+
+// Remove stops watching the the named file or directory (non-recursively).
+func (w *Watcher) Remove(name string) error {
+ in := &input{
+ op: opRemoveWatch,
+ path: filepath.Clean(name),
+ reply: make(chan error),
+ }
+ w.input <- in
+ if err := w.wakeupReader(); err != nil {
+ return err
+ }
+ return <-in.reply
+}
+
+const (
+ // Options for AddWatch
+ sys_FS_ONESHOT = 0x80000000
+ sys_FS_ONLYDIR = 0x1000000
+
+ // Events
+ sys_FS_ACCESS = 0x1
+ sys_FS_ALL_EVENTS = 0xfff
+ sys_FS_ATTRIB = 0x4
+ sys_FS_CLOSE = 0x18
+ sys_FS_CREATE = 0x100
+ sys_FS_DELETE = 0x200
+ sys_FS_DELETE_SELF = 0x400
+ sys_FS_MODIFY = 0x2
+ sys_FS_MOVE = 0xc0
+ sys_FS_MOVED_FROM = 0x40
+ sys_FS_MOVED_TO = 0x80
+ sys_FS_MOVE_SELF = 0x800
+
+ // Special events
+ sys_FS_IGNORED = 0x8000
+ sys_FS_Q_OVERFLOW = 0x4000
+)
+
+func newEvent(name string, mask uint32) Event {
+ e := Event{Name: name}
+ if mask&sys_FS_CREATE == sys_FS_CREATE || mask&sys_FS_MOVED_TO == sys_FS_MOVED_TO {
+ e.Op |= Create
+ }
+ if mask&sys_FS_DELETE == sys_FS_DELETE || mask&sys_FS_DELETE_SELF == sys_FS_DELETE_SELF {
+ e.Op |= Remove
+ }
+ if mask&sys_FS_MODIFY == sys_FS_MODIFY {
+ e.Op |= Write
+ }
+ if mask&sys_FS_MOVE == sys_FS_MOVE || mask&sys_FS_MOVE_SELF == sys_FS_MOVE_SELF || mask&sys_FS_MOVED_FROM == sys_FS_MOVED_FROM {
+ e.Op |= Rename
+ }
+ if mask&sys_FS_ATTRIB == sys_FS_ATTRIB {
+ e.Op |= Chmod
+ }
+ return e
+}
+
+const (
+ opAddWatch = iota
+ opRemoveWatch
+)
+
+const (
+ provisional uint64 = 1 << (32 + iota)
+)
+
+type input struct {
+ op int
+ path string
+ flags uint32
+ reply chan error
+}
+
+type inode struct {
+ handle syscall.Handle
+ volume uint32
+ index uint64
+}
+
+type watch struct {
+ ov syscall.Overlapped
+ ino *inode // i-number
+ path string // Directory path
+ mask uint64 // Directory itself is being watched with these notify flags
+ names map[string]uint64 // Map of names being watched and their notify flags
+ rename string // Remembers the old name while renaming a file
+ buf [4096]byte
+}
+
+type indexMap map[uint64]*watch
+type watchMap map[uint32]indexMap
+
+func (w *Watcher) wakeupReader() error {
+ e := syscall.PostQueuedCompletionStatus(w.port, 0, 0, nil)
+ if e != nil {
+ return os.NewSyscallError("PostQueuedCompletionStatus", e)
+ }
+ return nil
+}
+
+func getDir(pathname string) (dir string, err error) {
+ attr, e := syscall.GetFileAttributes(syscall.StringToUTF16Ptr(pathname))
+ if e != nil {
+ return "", os.NewSyscallError("GetFileAttributes", e)
+ }
+ if attr&syscall.FILE_ATTRIBUTE_DIRECTORY != 0 {
+ dir = pathname
+ } else {
+ dir, _ = filepath.Split(pathname)
+ dir = filepath.Clean(dir)
+ }
+ return
+}
+
+func getIno(path string) (ino *inode, err error) {
+ h, e := syscall.CreateFile(syscall.StringToUTF16Ptr(path),
+ syscall.FILE_LIST_DIRECTORY,
+ syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE,
+ nil, syscall.OPEN_EXISTING,
+ syscall.FILE_FLAG_BACKUP_SEMANTICS|syscall.FILE_FLAG_OVERLAPPED, 0)
+ if e != nil {
+ return nil, os.NewSyscallError("CreateFile", e)
+ }
+ var fi syscall.ByHandleFileInformation
+ if e = syscall.GetFileInformationByHandle(h, &fi); e != nil {
+ syscall.CloseHandle(h)
+ return nil, os.NewSyscallError("GetFileInformationByHandle", e)
+ }
+ ino = &inode{
+ handle: h,
+ volume: fi.VolumeSerialNumber,
+ index: uint64(fi.FileIndexHigh)<<32 | uint64(fi.FileIndexLow),
+ }
+ return ino, nil
+}
+
+// Must run within the I/O thread.
+func (m watchMap) get(ino *inode) *watch {
+ if i := m[ino.volume]; i != nil {
+ return i[ino.index]
+ }
+ return nil
+}
+
+// Must run within the I/O thread.
+func (m watchMap) set(ino *inode, watch *watch) {
+ i := m[ino.volume]
+ if i == nil {
+ i = make(indexMap)
+ m[ino.volume] = i
+ }
+ i[ino.index] = watch
+}
+
+// Must run within the I/O thread.
+func (w *Watcher) addWatch(pathname string, flags uint64) error {
+ dir, err := getDir(pathname)
+ if err != nil {
+ return err
+ }
+ if flags&sys_FS_ONLYDIR != 0 && pathname != dir {
+ return nil
+ }
+ ino, err := getIno(dir)
+ if err != nil {
+ return err
+ }
+ w.mu.Lock()
+ watchEntry := w.watches.get(ino)
+ w.mu.Unlock()
+ if watchEntry == nil {
+ if _, e := syscall.CreateIoCompletionPort(ino.handle, w.port, 0, 0); e != nil {
+ syscall.CloseHandle(ino.handle)
+ return os.NewSyscallError("CreateIoCompletionPort", e)
+ }
+ watchEntry = &watch{
+ ino: ino,
+ path: dir,
+ names: make(map[string]uint64),
+ }
+ w.mu.Lock()
+ w.watches.set(ino, watchEntry)
+ w.mu.Unlock()
+ flags |= provisional
+ } else {
+ syscall.CloseHandle(ino.handle)
+ }
+ if pathname == dir {
+ watchEntry.mask |= flags
+ } else {
+ watchEntry.names[filepath.Base(pathname)] |= flags
+ }
+ if err = w.startRead(watchEntry); err != nil {
+ return err
+ }
+ if pathname == dir {
+ watchEntry.mask &= ^provisional
+ } else {
+ watchEntry.names[filepath.Base(pathname)] &= ^provisional
+ }
+ return nil
+}
+
+// Must run within the I/O thread.
+func (w *Watcher) remWatch(pathname string) error {
+ dir, err := getDir(pathname)
+ if err != nil {
+ return err
+ }
+ ino, err := getIno(dir)
+ if err != nil {
+ return err
+ }
+ w.mu.Lock()
+ watch := w.watches.get(ino)
+ w.mu.Unlock()
+ if watch == nil {
+ return fmt.Errorf("can't remove non-existent watch for: %s", pathname)
+ }
+ if pathname == dir {
+ w.sendEvent(watch.path, watch.mask&sys_FS_IGNORED)
+ watch.mask = 0
+ } else {
+ name := filepath.Base(pathname)
+ w.sendEvent(watch.path+"\\"+name, watch.names[name]&sys_FS_IGNORED)
+ delete(watch.names, name)
+ }
+ return w.startRead(watch)
+}
+
+// Must run within the I/O thread.
+func (w *Watcher) deleteWatch(watch *watch) {
+ for name, mask := range watch.names {
+ if mask&provisional == 0 {
+ w.sendEvent(watch.path+"\\"+name, mask&sys_FS_IGNORED)
+ }
+ delete(watch.names, name)
+ }
+ if watch.mask != 0 {
+ if watch.mask&provisional == 0 {
+ w.sendEvent(watch.path, watch.mask&sys_FS_IGNORED)
+ }
+ watch.mask = 0
+ }
+}
+
+// Must run within the I/O thread.
+func (w *Watcher) startRead(watch *watch) error {
+ if e := syscall.CancelIo(watch.ino.handle); e != nil {
+ w.Errors <- os.NewSyscallError("CancelIo", e)
+ w.deleteWatch(watch)
+ }
+ mask := toWindowsFlags(watch.mask)
+ for _, m := range watch.names {
+ mask |= toWindowsFlags(m)
+ }
+ if mask == 0 {
+ if e := syscall.CloseHandle(watch.ino.handle); e != nil {
+ w.Errors <- os.NewSyscallError("CloseHandle", e)
+ }
+ w.mu.Lock()
+ delete(w.watches[watch.ino.volume], watch.ino.index)
+ w.mu.Unlock()
+ return nil
+ }
+ e := syscall.ReadDirectoryChanges(watch.ino.handle, &watch.buf[0],
+ uint32(unsafe.Sizeof(watch.buf)), false, mask, nil, &watch.ov, 0)
+ if e != nil {
+ err := os.NewSyscallError("ReadDirectoryChanges", e)
+ if e == syscall.ERROR_ACCESS_DENIED && watch.mask&provisional == 0 {
+ // Watched directory was probably removed
+ if w.sendEvent(watch.path, watch.mask&sys_FS_DELETE_SELF) {
+ if watch.mask&sys_FS_ONESHOT != 0 {
+ watch.mask = 0
+ }
+ }
+ err = nil
+ }
+ w.deleteWatch(watch)
+ w.startRead(watch)
+ return err
+ }
+ return nil
+}
+
+// readEvents reads from the I/O completion port, converts the
+// received events into Event objects and sends them via the Events channel.
+// Entry point to the I/O thread.
+func (w *Watcher) readEvents() {
+ var (
+ n, key uint32
+ ov *syscall.Overlapped
+ )
+ runtime.LockOSThread()
+
+ for {
+ e := syscall.GetQueuedCompletionStatus(w.port, &n, &key, &ov, syscall.INFINITE)
+ watch := (*watch)(unsafe.Pointer(ov))
+
+ if watch == nil {
+ select {
+ case ch := <-w.quit:
+ w.mu.Lock()
+ var indexes []indexMap
+ for _, index := range w.watches {
+ indexes = append(indexes, index)
+ }
+ w.mu.Unlock()
+ for _, index := range indexes {
+ for _, watch := range index {
+ w.deleteWatch(watch)
+ w.startRead(watch)
+ }
+ }
+ var err error
+ if e := syscall.CloseHandle(w.port); e != nil {
+ err = os.NewSyscallError("CloseHandle", e)
+ }
+ close(w.Events)
+ close(w.Errors)
+ ch <- err
+ return
+ case in := <-w.input:
+ switch in.op {
+ case opAddWatch:
+ in.reply <- w.addWatch(in.path, uint64(in.flags))
+ case opRemoveWatch:
+ in.reply <- w.remWatch(in.path)
+ }
+ default:
+ }
+ continue
+ }
+
+ switch e {
+ case syscall.ERROR_MORE_DATA:
+ if watch == nil {
+ w.Errors <- errors.New("ERROR_MORE_DATA has unexpectedly null lpOverlapped buffer")
+ } else {
+ // The i/o succeeded but the buffer is full.
+ // In theory we should be building up a full packet.
+ // In practice we can get away with just carrying on.
+ n = uint32(unsafe.Sizeof(watch.buf))
+ }
+ case syscall.ERROR_ACCESS_DENIED:
+ // Watched directory was probably removed
+ w.sendEvent(watch.path, watch.mask&sys_FS_DELETE_SELF)
+ w.deleteWatch(watch)
+ w.startRead(watch)
+ continue
+ case syscall.ERROR_OPERATION_ABORTED:
+ // CancelIo was called on this handle
+ continue
+ default:
+ w.Errors <- os.NewSyscallError("GetQueuedCompletionPort", e)
+ continue
+ case nil:
+ }
+
+ var offset uint32
+ for {
+ if n == 0 {
+ w.Events <- newEvent("", sys_FS_Q_OVERFLOW)
+ w.Errors <- errors.New("short read in readEvents()")
+ break
+ }
+
+ // Point "raw" to the event in the buffer
+ raw := (*syscall.FileNotifyInformation)(unsafe.Pointer(&watch.buf[offset]))
+ buf := (*[syscall.MAX_PATH]uint16)(unsafe.Pointer(&raw.FileName))
+ name := syscall.UTF16ToString(buf[:raw.FileNameLength/2])
+ fullname := watch.path + "\\" + name
+
+ var mask uint64
+ switch raw.Action {
+ case syscall.FILE_ACTION_REMOVED:
+ mask = sys_FS_DELETE_SELF
+ case syscall.FILE_ACTION_MODIFIED:
+ mask = sys_FS_MODIFY
+ case syscall.FILE_ACTION_RENAMED_OLD_NAME:
+ watch.rename = name
+ case syscall.FILE_ACTION_RENAMED_NEW_NAME:
+ if watch.names[watch.rename] != 0 {
+ watch.names[name] |= watch.names[watch.rename]
+ delete(watch.names, watch.rename)
+ mask = sys_FS_MOVE_SELF
+ }
+ }
+
+ sendNameEvent := func() {
+ if w.sendEvent(fullname, watch.names[name]&mask) {
+ if watch.names[name]&sys_FS_ONESHOT != 0 {
+ delete(watch.names, name)
+ }
+ }
+ }
+ if raw.Action != syscall.FILE_ACTION_RENAMED_NEW_NAME {
+ sendNameEvent()
+ }
+ if raw.Action == syscall.FILE_ACTION_REMOVED {
+ w.sendEvent(fullname, watch.names[name]&sys_FS_IGNORED)
+ delete(watch.names, name)
+ }
+ if w.sendEvent(fullname, watch.mask&toFSnotifyFlags(raw.Action)) {
+ if watch.mask&sys_FS_ONESHOT != 0 {
+ watch.mask = 0
+ }
+ }
+ if raw.Action == syscall.FILE_ACTION_RENAMED_NEW_NAME {
+ fullname = watch.path + "\\" + watch.rename
+ sendNameEvent()
+ }
+
+ // Move to the next event in the buffer
+ if raw.NextEntryOffset == 0 {
+ break
+ }
+ offset += raw.NextEntryOffset
+
+ // Error!
+ if offset >= n {
+ w.Errors <- errors.New("Windows system assumed buffer larger than it is, events have likely been missed.")
+ break
+ }
+ }
+
+ if err := w.startRead(watch); err != nil {
+ w.Errors <- err
+ }
+ }
+}
+
+func (w *Watcher) sendEvent(name string, mask uint64) bool {
+ if mask == 0 {
+ return false
+ }
+ event := newEvent(name, uint32(mask))
+ select {
+ case ch := <-w.quit:
+ w.quit <- ch
+ case w.Events <- event:
+ }
+ return true
+}
+
+func toWindowsFlags(mask uint64) uint32 {
+ var m uint32
+ if mask&sys_FS_ACCESS != 0 {
+ m |= syscall.FILE_NOTIFY_CHANGE_LAST_ACCESS
+ }
+ if mask&sys_FS_MODIFY != 0 {
+ m |= syscall.FILE_NOTIFY_CHANGE_LAST_WRITE
+ }
+ if mask&sys_FS_ATTRIB != 0 {
+ m |= syscall.FILE_NOTIFY_CHANGE_ATTRIBUTES
+ }
+ if mask&(sys_FS_MOVE|sys_FS_CREATE|sys_FS_DELETE) != 0 {
+ m |= syscall.FILE_NOTIFY_CHANGE_FILE_NAME | syscall.FILE_NOTIFY_CHANGE_DIR_NAME
+ }
+ return m
+}
+
+func toFSnotifyFlags(action uint32) uint64 {
+ switch action {
+ case syscall.FILE_ACTION_ADDED:
+ return sys_FS_CREATE
+ case syscall.FILE_ACTION_REMOVED:
+ return sys_FS_DELETE
+ case syscall.FILE_ACTION_MODIFIED:
+ return sys_FS_MODIFY
+ case syscall.FILE_ACTION_RENAMED_OLD_NAME:
+ return sys_FS_MOVED_FROM
+ case syscall.FILE_ACTION_RENAMED_NEW_NAME:
+ return sys_FS_MOVED_TO
+ }
+ return 0
+}
diff --git a/vendor/gopkg.in/yaml.v2/LICENSE b/vendor/gopkg.in/yaml.v2/LICENSE
new file mode 100644
index 0000000000..a68e67f01b
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v2/LICENSE
@@ -0,0 +1,188 @@
+
+Copyright (c) 2011-2014 - Canonical Inc.
+
+This software is licensed under the LGPLv3, included below.
+
+As a special exception to the GNU Lesser General Public License version 3
+("LGPL3"), the copyright holders of this Library give you permission to
+convey to a third party a Combined Work that links statically or dynamically
+to this Library without providing any Minimal Corresponding Source or
+Minimal Application Code as set out in 4d or providing the installation
+information set out in section 4e, provided that you comply with the other
+provisions of LGPL3 and provided that you meet, for the Application the
+terms and conditions of the license(s) which apply to the Application.
+
+Except as stated in this special exception, the provisions of LGPL3 will
+continue to comply in full to this Library. If you modify this Library, you
+may apply this exception to your version of this Library, but you are not
+obliged to do so. If you do not wish to do so, delete this exception
+statement from your version. This exception does not (and cannot) modify any
+license terms which apply to the Application, with which you must still
+comply.
+
+
+ GNU LESSER GENERAL PUBLIC LICENSE
+ Version 3, 29 June 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc.
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+
+ This version of the GNU Lesser General Public License incorporates
+the terms and conditions of version 3 of the GNU General Public
+License, supplemented by the additional permissions listed below.
+
+ 0. Additional Definitions.
+
+ As used herein, "this License" refers to version 3 of the GNU Lesser
+General Public License, and the "GNU GPL" refers to version 3 of the GNU
+General Public License.
+
+ "The Library" refers to a covered work governed by this License,
+other than an Application or a Combined Work as defined below.
+
+ An "Application" is any work that makes use of an interface provided
+by the Library, but which is not otherwise based on the Library.
+Defining a subclass of a class defined by the Library is deemed a mode
+of using an interface provided by the Library.
+
+ A "Combined Work" is a work produced by combining or linking an
+Application with the Library. The particular version of the Library
+with which the Combined Work was made is also called the "Linked
+Version".
+
+ The "Minimal Corresponding Source" for a Combined Work means the
+Corresponding Source for the Combined Work, excluding any source code
+for portions of the Combined Work that, considered in isolation, are
+based on the Application, and not on the Linked Version.
+
+ The "Corresponding Application Code" for a Combined Work means the
+object code and/or source code for the Application, including any data
+and utility programs needed for reproducing the Combined Work from the
+Application, but excluding the System Libraries of the Combined Work.
+
+ 1. Exception to Section 3 of the GNU GPL.
+
+ You may convey a covered work under sections 3 and 4 of this License
+without being bound by section 3 of the GNU GPL.
+
+ 2. Conveying Modified Versions.
+
+ If you modify a copy of the Library, and, in your modifications, a
+facility refers to a function or data to be supplied by an Application
+that uses the facility (other than as an argument passed when the
+facility is invoked), then you may convey a copy of the modified
+version:
+
+ a) under this License, provided that you make a good faith effort to
+ ensure that, in the event an Application does not supply the
+ function or data, the facility still operates, and performs
+ whatever part of its purpose remains meaningful, or
+
+ b) under the GNU GPL, with none of the additional permissions of
+ this License applicable to that copy.
+
+ 3. Object Code Incorporating Material from Library Header Files.
+
+ The object code form of an Application may incorporate material from
+a header file that is part of the Library. You may convey such object
+code under terms of your choice, provided that, if the incorporated
+material is not limited to numerical parameters, data structure
+layouts and accessors, or small macros, inline functions and templates
+(ten or fewer lines in length), you do both of the following:
+
+ a) Give prominent notice with each copy of the object code that the
+ Library is used in it and that the Library and its use are
+ covered by this License.
+
+ b) Accompany the object code with a copy of the GNU GPL and this license
+ document.
+
+ 4. Combined Works.
+
+ You may convey a Combined Work under terms of your choice that,
+taken together, effectively do not restrict modification of the
+portions of the Library contained in the Combined Work and reverse
+engineering for debugging such modifications, if you also do each of
+the following:
+
+ a) Give prominent notice with each copy of the Combined Work that
+ the Library is used in it and that the Library and its use are
+ covered by this License.
+
+ b) Accompany the Combined Work with a copy of the GNU GPL and this license
+ document.
+
+ c) For a Combined Work that displays copyright notices during
+ execution, include the copyright notice for the Library among
+ these notices, as well as a reference directing the user to the
+ copies of the GNU GPL and this license document.
+
+ d) Do one of the following:
+
+ 0) Convey the Minimal Corresponding Source under the terms of this
+ License, and the Corresponding Application Code in a form
+ suitable for, and under terms that permit, the user to
+ recombine or relink the Application with a modified version of
+ the Linked Version to produce a modified Combined Work, in the
+ manner specified by section 6 of the GNU GPL for conveying
+ Corresponding Source.
+
+ 1) Use a suitable shared library mechanism for linking with the
+ Library. A suitable mechanism is one that (a) uses at run time
+ a copy of the Library already present on the user's computer
+ system, and (b) will operate properly with a modified version
+ of the Library that is interface-compatible with the Linked
+ Version.
+
+ e) Provide Installation Information, but only if you would otherwise
+ be required to provide such information under section 6 of the
+ GNU GPL, and only to the extent that such information is
+ necessary to install and execute a modified version of the
+ Combined Work produced by recombining or relinking the
+ Application with a modified version of the Linked Version. (If
+ you use option 4d0, the Installation Information must accompany
+ the Minimal Corresponding Source and Corresponding Application
+ Code. If you use option 4d1, you must provide the Installation
+ Information in the manner specified by section 6 of the GNU GPL
+ for conveying Corresponding Source.)
+
+ 5. Combined Libraries.
+
+ You may place library facilities that are a work based on the
+Library side by side in a single library together with other library
+facilities that are not Applications and are not covered by this
+License, and convey such a combined library under terms of your
+choice, if you do both of the following:
+
+ a) Accompany the combined library with a copy of the same work based
+ on the Library, uncombined with any other library facilities,
+ conveyed under the terms of this License.
+
+ b) Give prominent notice with the combined library that part of it
+ is a work based on the Library, and explaining where to find the
+ accompanying uncombined form of the same work.
+
+ 6. Revised Versions of the GNU Lesser General Public License.
+
+ The Free Software Foundation may publish revised and/or new versions
+of the GNU Lesser General Public License from time to time. Such new
+versions will be similar in spirit to the present version, but may
+differ in detail to address new problems or concerns.
+
+ Each version is given a distinguishing version number. If the
+Library as you received it specifies that a certain numbered version
+of the GNU Lesser General Public License "or any later version"
+applies to it, you have the option of following the terms and
+conditions either of that published version or of any later version
+published by the Free Software Foundation. If the Library as you
+received it does not specify a version number of the GNU Lesser
+General Public License, you may choose any version of the GNU Lesser
+General Public License ever published by the Free Software Foundation.
+
+ If the Library as you received it specifies that a proxy can decide
+whether future versions of the GNU Lesser General Public License shall
+apply, that proxy's public statement of acceptance of any version is
+permanent authorization for you to choose that version for the
+Library.
diff --git a/vendor/gopkg.in/yaml.v2/LICENSE.libyaml b/vendor/gopkg.in/yaml.v2/LICENSE.libyaml
new file mode 100644
index 0000000000..8da58fbf6f
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v2/LICENSE.libyaml
@@ -0,0 +1,31 @@
+The following files were ported to Go from C files of libyaml, and thus
+are still covered by their original copyright and license:
+
+ apic.go
+ emitterc.go
+ parserc.go
+ readerc.go
+ scannerc.go
+ writerc.go
+ yamlh.go
+ yamlprivateh.go
+
+Copyright (c) 2006 Kirill Simonov
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+of the Software, and to permit persons to whom the Software is furnished to do
+so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/gopkg.in/yaml.v2/README.md b/vendor/gopkg.in/yaml.v2/README.md
new file mode 100644
index 0000000000..d6c919e607
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v2/README.md
@@ -0,0 +1,128 @@
+# YAML support for the Go language
+
+Introduction
+------------
+
+The yaml package enables Go programs to comfortably encode and decode YAML
+values. It was developed within [Canonical](https://www.canonical.com) as
+part of the [juju](https://juju.ubuntu.com) project, and is based on a
+pure Go port of the well-known [libyaml](http://pyyaml.org/wiki/LibYAML)
+C library to parse and generate YAML data quickly and reliably.
+
+Compatibility
+-------------
+
+The yaml package supports most of YAML 1.1 and 1.2, including support for
+anchors, tags, map merging, etc. Multi-document unmarshalling is not yet
+implemented, and base-60 floats from YAML 1.1 are purposefully not
+supported since they're a poor design and are gone in YAML 1.2.
+
+Installation and usage
+----------------------
+
+The import path for the package is *gopkg.in/yaml.v2*.
+
+To install it, run:
+
+ go get gopkg.in/yaml.v2
+
+API documentation
+-----------------
+
+If opened in a browser, the import path itself leads to the API documentation:
+
+ * [https://gopkg.in/yaml.v2](https://gopkg.in/yaml.v2)
+
+API stability
+-------------
+
+The package API for yaml v2 will remain stable as described in [gopkg.in](https://gopkg.in).
+
+
+License
+-------
+
+The yaml package is licensed under the LGPL with an exception that allows it to be linked statically. Please see the LICENSE file for details.
+
+
+Example
+-------
+
+```Go
+package main
+
+import (
+ "fmt"
+ "log"
+
+ "gopkg.in/yaml.v2"
+)
+
+var data = `
+a: Easy!
+b:
+ c: 2
+ d: [3, 4]
+`
+
+type T struct {
+ A string
+ B struct{C int; D []int ",flow"}
+}
+
+func main() {
+ t := T{}
+
+ err := yaml.Unmarshal([]byte(data), &t)
+ if err != nil {
+ log.Fatalf("error: %v", err)
+ }
+ fmt.Printf("--- t:\n%v\n\n", t)
+
+ d, err := yaml.Marshal(&t)
+ if err != nil {
+ log.Fatalf("error: %v", err)
+ }
+ fmt.Printf("--- t dump:\n%s\n\n", string(d))
+
+ m := make(map[interface{}]interface{})
+
+ err = yaml.Unmarshal([]byte(data), &m)
+ if err != nil {
+ log.Fatalf("error: %v", err)
+ }
+ fmt.Printf("--- m:\n%v\n\n", m)
+
+ d, err = yaml.Marshal(&m)
+ if err != nil {
+ log.Fatalf("error: %v", err)
+ }
+ fmt.Printf("--- m dump:\n%s\n\n", string(d))
+}
+```
+
+This example will generate the following output:
+
+```
+--- t:
+{Easy! {2 [3 4]}}
+
+--- t dump:
+a: Easy!
+b:
+ c: 2
+ d: [3, 4]
+
+
+--- m:
+map[a:Easy! b:map[c:2 d:[3 4]]]
+
+--- m dump:
+a: Easy!
+b:
+ c: 2
+ d:
+ - 3
+ - 4
+```
+
diff --git a/vendor/gopkg.in/yaml.v2/apic.go b/vendor/gopkg.in/yaml.v2/apic.go
new file mode 100644
index 0000000000..95ec014e8c
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v2/apic.go
@@ -0,0 +1,742 @@
+package yaml
+
+import (
+ "io"
+ "os"
+)
+
+func yaml_insert_token(parser *yaml_parser_t, pos int, token *yaml_token_t) {
+ //fmt.Println("yaml_insert_token", "pos:", pos, "typ:", token.typ, "head:", parser.tokens_head, "len:", len(parser.tokens))
+
+ // Check if we can move the queue at the beginning of the buffer.
+ if parser.tokens_head > 0 && len(parser.tokens) == cap(parser.tokens) {
+ if parser.tokens_head != len(parser.tokens) {
+ copy(parser.tokens, parser.tokens[parser.tokens_head:])
+ }
+ parser.tokens = parser.tokens[:len(parser.tokens)-parser.tokens_head]
+ parser.tokens_head = 0
+ }
+ parser.tokens = append(parser.tokens, *token)
+ if pos < 0 {
+ return
+ }
+ copy(parser.tokens[parser.tokens_head+pos+1:], parser.tokens[parser.tokens_head+pos:])
+ parser.tokens[parser.tokens_head+pos] = *token
+}
+
+// Create a new parser object.
+func yaml_parser_initialize(parser *yaml_parser_t) bool {
+ *parser = yaml_parser_t{
+ raw_buffer: make([]byte, 0, input_raw_buffer_size),
+ buffer: make([]byte, 0, input_buffer_size),
+ }
+ return true
+}
+
+// Destroy a parser object.
+func yaml_parser_delete(parser *yaml_parser_t) {
+ *parser = yaml_parser_t{}
+}
+
+// String read handler.
+func yaml_string_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) {
+ if parser.input_pos == len(parser.input) {
+ return 0, io.EOF
+ }
+ n = copy(buffer, parser.input[parser.input_pos:])
+ parser.input_pos += n
+ return n, nil
+}
+
+// File read handler.
+func yaml_file_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) {
+ return parser.input_file.Read(buffer)
+}
+
+// Set a string input.
+func yaml_parser_set_input_string(parser *yaml_parser_t, input []byte) {
+ if parser.read_handler != nil {
+ panic("must set the input source only once")
+ }
+ parser.read_handler = yaml_string_read_handler
+ parser.input = input
+ parser.input_pos = 0
+}
+
+// Set a file input.
+func yaml_parser_set_input_file(parser *yaml_parser_t, file *os.File) {
+ if parser.read_handler != nil {
+ panic("must set the input source only once")
+ }
+ parser.read_handler = yaml_file_read_handler
+ parser.input_file = file
+}
+
+// Set the source encoding.
+func yaml_parser_set_encoding(parser *yaml_parser_t, encoding yaml_encoding_t) {
+ if parser.encoding != yaml_ANY_ENCODING {
+ panic("must set the encoding only once")
+ }
+ parser.encoding = encoding
+}
+
+// Create a new emitter object.
+func yaml_emitter_initialize(emitter *yaml_emitter_t) bool {
+ *emitter = yaml_emitter_t{
+ buffer: make([]byte, output_buffer_size),
+ raw_buffer: make([]byte, 0, output_raw_buffer_size),
+ states: make([]yaml_emitter_state_t, 0, initial_stack_size),
+ events: make([]yaml_event_t, 0, initial_queue_size),
+ }
+ return true
+}
+
+// Destroy an emitter object.
+func yaml_emitter_delete(emitter *yaml_emitter_t) {
+ *emitter = yaml_emitter_t{}
+}
+
+// String write handler.
+func yaml_string_write_handler(emitter *yaml_emitter_t, buffer []byte) error {
+ *emitter.output_buffer = append(*emitter.output_buffer, buffer...)
+ return nil
+}
+
+// File write handler.
+func yaml_file_write_handler(emitter *yaml_emitter_t, buffer []byte) error {
+ _, err := emitter.output_file.Write(buffer)
+ return err
+}
+
+// Set a string output.
+func yaml_emitter_set_output_string(emitter *yaml_emitter_t, output_buffer *[]byte) {
+ if emitter.write_handler != nil {
+ panic("must set the output target only once")
+ }
+ emitter.write_handler = yaml_string_write_handler
+ emitter.output_buffer = output_buffer
+}
+
+// Set a file output.
+func yaml_emitter_set_output_file(emitter *yaml_emitter_t, file io.Writer) {
+ if emitter.write_handler != nil {
+ panic("must set the output target only once")
+ }
+ emitter.write_handler = yaml_file_write_handler
+ emitter.output_file = file
+}
+
+// Set the output encoding.
+func yaml_emitter_set_encoding(emitter *yaml_emitter_t, encoding yaml_encoding_t) {
+ if emitter.encoding != yaml_ANY_ENCODING {
+ panic("must set the output encoding only once")
+ }
+ emitter.encoding = encoding
+}
+
+// Set the canonical output style.
+func yaml_emitter_set_canonical(emitter *yaml_emitter_t, canonical bool) {
+ emitter.canonical = canonical
+}
+
+//// Set the indentation increment.
+func yaml_emitter_set_indent(emitter *yaml_emitter_t, indent int) {
+ if indent < 2 || indent > 9 {
+ indent = 2
+ }
+ emitter.best_indent = indent
+}
+
+// Set the preferred line width.
+func yaml_emitter_set_width(emitter *yaml_emitter_t, width int) {
+ if width < 0 {
+ width = -1
+ }
+ emitter.best_width = width
+}
+
+// Set if unescaped non-ASCII characters are allowed.
+func yaml_emitter_set_unicode(emitter *yaml_emitter_t, unicode bool) {
+ emitter.unicode = unicode
+}
+
+// Set the preferred line break character.
+func yaml_emitter_set_break(emitter *yaml_emitter_t, line_break yaml_break_t) {
+ emitter.line_break = line_break
+}
+
+///*
+// * Destroy a token object.
+// */
+//
+//YAML_DECLARE(void)
+//yaml_token_delete(yaml_token_t *token)
+//{
+// assert(token); // Non-NULL token object expected.
+//
+// switch (token.type)
+// {
+// case YAML_TAG_DIRECTIVE_TOKEN:
+// yaml_free(token.data.tag_directive.handle);
+// yaml_free(token.data.tag_directive.prefix);
+// break;
+//
+// case YAML_ALIAS_TOKEN:
+// yaml_free(token.data.alias.value);
+// break;
+//
+// case YAML_ANCHOR_TOKEN:
+// yaml_free(token.data.anchor.value);
+// break;
+//
+// case YAML_TAG_TOKEN:
+// yaml_free(token.data.tag.handle);
+// yaml_free(token.data.tag.suffix);
+// break;
+//
+// case YAML_SCALAR_TOKEN:
+// yaml_free(token.data.scalar.value);
+// break;
+//
+// default:
+// break;
+// }
+//
+// memset(token, 0, sizeof(yaml_token_t));
+//}
+//
+///*
+// * Check if a string is a valid UTF-8 sequence.
+// *
+// * Check 'reader.c' for more details on UTF-8 encoding.
+// */
+//
+//static int
+//yaml_check_utf8(yaml_char_t *start, size_t length)
+//{
+// yaml_char_t *end = start+length;
+// yaml_char_t *pointer = start;
+//
+// while (pointer < end) {
+// unsigned char octet;
+// unsigned int width;
+// unsigned int value;
+// size_t k;
+//
+// octet = pointer[0];
+// width = (octet & 0x80) == 0x00 ? 1 :
+// (octet & 0xE0) == 0xC0 ? 2 :
+// (octet & 0xF0) == 0xE0 ? 3 :
+// (octet & 0xF8) == 0xF0 ? 4 : 0;
+// value = (octet & 0x80) == 0x00 ? octet & 0x7F :
+// (octet & 0xE0) == 0xC0 ? octet & 0x1F :
+// (octet & 0xF0) == 0xE0 ? octet & 0x0F :
+// (octet & 0xF8) == 0xF0 ? octet & 0x07 : 0;
+// if (!width) return 0;
+// if (pointer+width > end) return 0;
+// for (k = 1; k < width; k ++) {
+// octet = pointer[k];
+// if ((octet & 0xC0) != 0x80) return 0;
+// value = (value << 6) + (octet & 0x3F);
+// }
+// if (!((width == 1) ||
+// (width == 2 && value >= 0x80) ||
+// (width == 3 && value >= 0x800) ||
+// (width == 4 && value >= 0x10000))) return 0;
+//
+// pointer += width;
+// }
+//
+// return 1;
+//}
+//
+
+// Create STREAM-START.
+func yaml_stream_start_event_initialize(event *yaml_event_t, encoding yaml_encoding_t) bool {
+ *event = yaml_event_t{
+ typ: yaml_STREAM_START_EVENT,
+ encoding: encoding,
+ }
+ return true
+}
+
+// Create STREAM-END.
+func yaml_stream_end_event_initialize(event *yaml_event_t) bool {
+ *event = yaml_event_t{
+ typ: yaml_STREAM_END_EVENT,
+ }
+ return true
+}
+
+// Create DOCUMENT-START.
+func yaml_document_start_event_initialize(event *yaml_event_t, version_directive *yaml_version_directive_t,
+ tag_directives []yaml_tag_directive_t, implicit bool) bool {
+ *event = yaml_event_t{
+ typ: yaml_DOCUMENT_START_EVENT,
+ version_directive: version_directive,
+ tag_directives: tag_directives,
+ implicit: implicit,
+ }
+ return true
+}
+
+// Create DOCUMENT-END.
+func yaml_document_end_event_initialize(event *yaml_event_t, implicit bool) bool {
+ *event = yaml_event_t{
+ typ: yaml_DOCUMENT_END_EVENT,
+ implicit: implicit,
+ }
+ return true
+}
+
+///*
+// * Create ALIAS.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_alias_event_initialize(event *yaml_event_t, anchor *yaml_char_t)
+//{
+// mark yaml_mark_t = { 0, 0, 0 }
+// anchor_copy *yaml_char_t = NULL
+//
+// assert(event) // Non-NULL event object is expected.
+// assert(anchor) // Non-NULL anchor is expected.
+//
+// if (!yaml_check_utf8(anchor, strlen((char *)anchor))) return 0
+//
+// anchor_copy = yaml_strdup(anchor)
+// if (!anchor_copy)
+// return 0
+//
+// ALIAS_EVENT_INIT(*event, anchor_copy, mark, mark)
+//
+// return 1
+//}
+
+// Create SCALAR.
+func yaml_scalar_event_initialize(event *yaml_event_t, anchor, tag, value []byte, plain_implicit, quoted_implicit bool, style yaml_scalar_style_t) bool {
+ *event = yaml_event_t{
+ typ: yaml_SCALAR_EVENT,
+ anchor: anchor,
+ tag: tag,
+ value: value,
+ implicit: plain_implicit,
+ quoted_implicit: quoted_implicit,
+ style: yaml_style_t(style),
+ }
+ return true
+}
+
+// Create SEQUENCE-START.
+func yaml_sequence_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_sequence_style_t) bool {
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_START_EVENT,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ style: yaml_style_t(style),
+ }
+ return true
+}
+
+// Create SEQUENCE-END.
+func yaml_sequence_end_event_initialize(event *yaml_event_t) bool {
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_END_EVENT,
+ }
+ return true
+}
+
+// Create MAPPING-START.
+func yaml_mapping_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_mapping_style_t) bool {
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_START_EVENT,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ style: yaml_style_t(style),
+ }
+ return true
+}
+
+// Create MAPPING-END.
+func yaml_mapping_end_event_initialize(event *yaml_event_t) bool {
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_END_EVENT,
+ }
+ return true
+}
+
+// Destroy an event object.
+func yaml_event_delete(event *yaml_event_t) {
+ *event = yaml_event_t{}
+}
+
+///*
+// * Create a document object.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_initialize(document *yaml_document_t,
+// version_directive *yaml_version_directive_t,
+// tag_directives_start *yaml_tag_directive_t,
+// tag_directives_end *yaml_tag_directive_t,
+// start_implicit int, end_implicit int)
+//{
+// struct {
+// error yaml_error_type_t
+// } context
+// struct {
+// start *yaml_node_t
+// end *yaml_node_t
+// top *yaml_node_t
+// } nodes = { NULL, NULL, NULL }
+// version_directive_copy *yaml_version_directive_t = NULL
+// struct {
+// start *yaml_tag_directive_t
+// end *yaml_tag_directive_t
+// top *yaml_tag_directive_t
+// } tag_directives_copy = { NULL, NULL, NULL }
+// value yaml_tag_directive_t = { NULL, NULL }
+// mark yaml_mark_t = { 0, 0, 0 }
+//
+// assert(document) // Non-NULL document object is expected.
+// assert((tag_directives_start && tag_directives_end) ||
+// (tag_directives_start == tag_directives_end))
+// // Valid tag directives are expected.
+//
+// if (!STACK_INIT(&context, nodes, INITIAL_STACK_SIZE)) goto error
+//
+// if (version_directive) {
+// version_directive_copy = yaml_malloc(sizeof(yaml_version_directive_t))
+// if (!version_directive_copy) goto error
+// version_directive_copy.major = version_directive.major
+// version_directive_copy.minor = version_directive.minor
+// }
+//
+// if (tag_directives_start != tag_directives_end) {
+// tag_directive *yaml_tag_directive_t
+// if (!STACK_INIT(&context, tag_directives_copy, INITIAL_STACK_SIZE))
+// goto error
+// for (tag_directive = tag_directives_start
+// tag_directive != tag_directives_end; tag_directive ++) {
+// assert(tag_directive.handle)
+// assert(tag_directive.prefix)
+// if (!yaml_check_utf8(tag_directive.handle,
+// strlen((char *)tag_directive.handle)))
+// goto error
+// if (!yaml_check_utf8(tag_directive.prefix,
+// strlen((char *)tag_directive.prefix)))
+// goto error
+// value.handle = yaml_strdup(tag_directive.handle)
+// value.prefix = yaml_strdup(tag_directive.prefix)
+// if (!value.handle || !value.prefix) goto error
+// if (!PUSH(&context, tag_directives_copy, value))
+// goto error
+// value.handle = NULL
+// value.prefix = NULL
+// }
+// }
+//
+// DOCUMENT_INIT(*document, nodes.start, nodes.end, version_directive_copy,
+// tag_directives_copy.start, tag_directives_copy.top,
+// start_implicit, end_implicit, mark, mark)
+//
+// return 1
+//
+//error:
+// STACK_DEL(&context, nodes)
+// yaml_free(version_directive_copy)
+// while (!STACK_EMPTY(&context, tag_directives_copy)) {
+// value yaml_tag_directive_t = POP(&context, tag_directives_copy)
+// yaml_free(value.handle)
+// yaml_free(value.prefix)
+// }
+// STACK_DEL(&context, tag_directives_copy)
+// yaml_free(value.handle)
+// yaml_free(value.prefix)
+//
+// return 0
+//}
+//
+///*
+// * Destroy a document object.
+// */
+//
+//YAML_DECLARE(void)
+//yaml_document_delete(document *yaml_document_t)
+//{
+// struct {
+// error yaml_error_type_t
+// } context
+// tag_directive *yaml_tag_directive_t
+//
+// context.error = YAML_NO_ERROR // Eliminate a compliler warning.
+//
+// assert(document) // Non-NULL document object is expected.
+//
+// while (!STACK_EMPTY(&context, document.nodes)) {
+// node yaml_node_t = POP(&context, document.nodes)
+// yaml_free(node.tag)
+// switch (node.type) {
+// case YAML_SCALAR_NODE:
+// yaml_free(node.data.scalar.value)
+// break
+// case YAML_SEQUENCE_NODE:
+// STACK_DEL(&context, node.data.sequence.items)
+// break
+// case YAML_MAPPING_NODE:
+// STACK_DEL(&context, node.data.mapping.pairs)
+// break
+// default:
+// assert(0) // Should not happen.
+// }
+// }
+// STACK_DEL(&context, document.nodes)
+//
+// yaml_free(document.version_directive)
+// for (tag_directive = document.tag_directives.start
+// tag_directive != document.tag_directives.end
+// tag_directive++) {
+// yaml_free(tag_directive.handle)
+// yaml_free(tag_directive.prefix)
+// }
+// yaml_free(document.tag_directives.start)
+//
+// memset(document, 0, sizeof(yaml_document_t))
+//}
+//
+///**
+// * Get a document node.
+// */
+//
+//YAML_DECLARE(yaml_node_t *)
+//yaml_document_get_node(document *yaml_document_t, index int)
+//{
+// assert(document) // Non-NULL document object is expected.
+//
+// if (index > 0 && document.nodes.start + index <= document.nodes.top) {
+// return document.nodes.start + index - 1
+// }
+// return NULL
+//}
+//
+///**
+// * Get the root object.
+// */
+//
+//YAML_DECLARE(yaml_node_t *)
+//yaml_document_get_root_node(document *yaml_document_t)
+//{
+// assert(document) // Non-NULL document object is expected.
+//
+// if (document.nodes.top != document.nodes.start) {
+// return document.nodes.start
+// }
+// return NULL
+//}
+//
+///*
+// * Add a scalar node to a document.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_add_scalar(document *yaml_document_t,
+// tag *yaml_char_t, value *yaml_char_t, length int,
+// style yaml_scalar_style_t)
+//{
+// struct {
+// error yaml_error_type_t
+// } context
+// mark yaml_mark_t = { 0, 0, 0 }
+// tag_copy *yaml_char_t = NULL
+// value_copy *yaml_char_t = NULL
+// node yaml_node_t
+//
+// assert(document) // Non-NULL document object is expected.
+// assert(value) // Non-NULL value is expected.
+//
+// if (!tag) {
+// tag = (yaml_char_t *)YAML_DEFAULT_SCALAR_TAG
+// }
+//
+// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
+// tag_copy = yaml_strdup(tag)
+// if (!tag_copy) goto error
+//
+// if (length < 0) {
+// length = strlen((char *)value)
+// }
+//
+// if (!yaml_check_utf8(value, length)) goto error
+// value_copy = yaml_malloc(length+1)
+// if (!value_copy) goto error
+// memcpy(value_copy, value, length)
+// value_copy[length] = '\0'
+//
+// SCALAR_NODE_INIT(node, tag_copy, value_copy, length, style, mark, mark)
+// if (!PUSH(&context, document.nodes, node)) goto error
+//
+// return document.nodes.top - document.nodes.start
+//
+//error:
+// yaml_free(tag_copy)
+// yaml_free(value_copy)
+//
+// return 0
+//}
+//
+///*
+// * Add a sequence node to a document.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_add_sequence(document *yaml_document_t,
+// tag *yaml_char_t, style yaml_sequence_style_t)
+//{
+// struct {
+// error yaml_error_type_t
+// } context
+// mark yaml_mark_t = { 0, 0, 0 }
+// tag_copy *yaml_char_t = NULL
+// struct {
+// start *yaml_node_item_t
+// end *yaml_node_item_t
+// top *yaml_node_item_t
+// } items = { NULL, NULL, NULL }
+// node yaml_node_t
+//
+// assert(document) // Non-NULL document object is expected.
+//
+// if (!tag) {
+// tag = (yaml_char_t *)YAML_DEFAULT_SEQUENCE_TAG
+// }
+//
+// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
+// tag_copy = yaml_strdup(tag)
+// if (!tag_copy) goto error
+//
+// if (!STACK_INIT(&context, items, INITIAL_STACK_SIZE)) goto error
+//
+// SEQUENCE_NODE_INIT(node, tag_copy, items.start, items.end,
+// style, mark, mark)
+// if (!PUSH(&context, document.nodes, node)) goto error
+//
+// return document.nodes.top - document.nodes.start
+//
+//error:
+// STACK_DEL(&context, items)
+// yaml_free(tag_copy)
+//
+// return 0
+//}
+//
+///*
+// * Add a mapping node to a document.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_add_mapping(document *yaml_document_t,
+// tag *yaml_char_t, style yaml_mapping_style_t)
+//{
+// struct {
+// error yaml_error_type_t
+// } context
+// mark yaml_mark_t = { 0, 0, 0 }
+// tag_copy *yaml_char_t = NULL
+// struct {
+// start *yaml_node_pair_t
+// end *yaml_node_pair_t
+// top *yaml_node_pair_t
+// } pairs = { NULL, NULL, NULL }
+// node yaml_node_t
+//
+// assert(document) // Non-NULL document object is expected.
+//
+// if (!tag) {
+// tag = (yaml_char_t *)YAML_DEFAULT_MAPPING_TAG
+// }
+//
+// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
+// tag_copy = yaml_strdup(tag)
+// if (!tag_copy) goto error
+//
+// if (!STACK_INIT(&context, pairs, INITIAL_STACK_SIZE)) goto error
+//
+// MAPPING_NODE_INIT(node, tag_copy, pairs.start, pairs.end,
+// style, mark, mark)
+// if (!PUSH(&context, document.nodes, node)) goto error
+//
+// return document.nodes.top - document.nodes.start
+//
+//error:
+// STACK_DEL(&context, pairs)
+// yaml_free(tag_copy)
+//
+// return 0
+//}
+//
+///*
+// * Append an item to a sequence node.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_append_sequence_item(document *yaml_document_t,
+// sequence int, item int)
+//{
+// struct {
+// error yaml_error_type_t
+// } context
+//
+// assert(document) // Non-NULL document is required.
+// assert(sequence > 0
+// && document.nodes.start + sequence <= document.nodes.top)
+// // Valid sequence id is required.
+// assert(document.nodes.start[sequence-1].type == YAML_SEQUENCE_NODE)
+// // A sequence node is required.
+// assert(item > 0 && document.nodes.start + item <= document.nodes.top)
+// // Valid item id is required.
+//
+// if (!PUSH(&context,
+// document.nodes.start[sequence-1].data.sequence.items, item))
+// return 0
+//
+// return 1
+//}
+//
+///*
+// * Append a pair of a key and a value to a mapping node.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_append_mapping_pair(document *yaml_document_t,
+// mapping int, key int, value int)
+//{
+// struct {
+// error yaml_error_type_t
+// } context
+//
+// pair yaml_node_pair_t
+//
+// assert(document) // Non-NULL document is required.
+// assert(mapping > 0
+// && document.nodes.start + mapping <= document.nodes.top)
+// // Valid mapping id is required.
+// assert(document.nodes.start[mapping-1].type == YAML_MAPPING_NODE)
+// // A mapping node is required.
+// assert(key > 0 && document.nodes.start + key <= document.nodes.top)
+// // Valid key id is required.
+// assert(value > 0 && document.nodes.start + value <= document.nodes.top)
+// // Valid value id is required.
+//
+// pair.key = key
+// pair.value = value
+//
+// if (!PUSH(&context,
+// document.nodes.start[mapping-1].data.mapping.pairs, pair))
+// return 0
+//
+// return 1
+//}
+//
+//
diff --git a/vendor/gopkg.in/yaml.v2/decode.go b/vendor/gopkg.in/yaml.v2/decode.go
new file mode 100644
index 0000000000..085cddc44b
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v2/decode.go
@@ -0,0 +1,683 @@
+package yaml
+
+import (
+ "encoding"
+ "encoding/base64"
+ "fmt"
+ "math"
+ "reflect"
+ "strconv"
+ "time"
+)
+
+const (
+ documentNode = 1 << iota
+ mappingNode
+ sequenceNode
+ scalarNode
+ aliasNode
+)
+
+type node struct {
+ kind int
+ line, column int
+ tag string
+ value string
+ implicit bool
+ children []*node
+ anchors map[string]*node
+}
+
+// ----------------------------------------------------------------------------
+// Parser, produces a node tree out of a libyaml event stream.
+
+type parser struct {
+ parser yaml_parser_t
+ event yaml_event_t
+ doc *node
+}
+
+func newParser(b []byte) *parser {
+ p := parser{}
+ if !yaml_parser_initialize(&p.parser) {
+ panic("failed to initialize YAML emitter")
+ }
+
+ if len(b) == 0 {
+ b = []byte{'\n'}
+ }
+
+ yaml_parser_set_input_string(&p.parser, b)
+
+ p.skip()
+ if p.event.typ != yaml_STREAM_START_EVENT {
+ panic("expected stream start event, got " + strconv.Itoa(int(p.event.typ)))
+ }
+ p.skip()
+ return &p
+}
+
+func (p *parser) destroy() {
+ if p.event.typ != yaml_NO_EVENT {
+ yaml_event_delete(&p.event)
+ }
+ yaml_parser_delete(&p.parser)
+}
+
+func (p *parser) skip() {
+ if p.event.typ != yaml_NO_EVENT {
+ if p.event.typ == yaml_STREAM_END_EVENT {
+ failf("attempted to go past the end of stream; corrupted value?")
+ }
+ yaml_event_delete(&p.event)
+ }
+ if !yaml_parser_parse(&p.parser, &p.event) {
+ p.fail()
+ }
+}
+
+func (p *parser) fail() {
+ var where string
+ var line int
+ if p.parser.problem_mark.line != 0 {
+ line = p.parser.problem_mark.line
+ } else if p.parser.context_mark.line != 0 {
+ line = p.parser.context_mark.line
+ }
+ if line != 0 {
+ where = "line " + strconv.Itoa(line) + ": "
+ }
+ var msg string
+ if len(p.parser.problem) > 0 {
+ msg = p.parser.problem
+ } else {
+ msg = "unknown problem parsing YAML content"
+ }
+ failf("%s%s", where, msg)
+}
+
+func (p *parser) anchor(n *node, anchor []byte) {
+ if anchor != nil {
+ p.doc.anchors[string(anchor)] = n
+ }
+}
+
+func (p *parser) parse() *node {
+ switch p.event.typ {
+ case yaml_SCALAR_EVENT:
+ return p.scalar()
+ case yaml_ALIAS_EVENT:
+ return p.alias()
+ case yaml_MAPPING_START_EVENT:
+ return p.mapping()
+ case yaml_SEQUENCE_START_EVENT:
+ return p.sequence()
+ case yaml_DOCUMENT_START_EVENT:
+ return p.document()
+ case yaml_STREAM_END_EVENT:
+ // Happens when attempting to decode an empty buffer.
+ return nil
+ default:
+ panic("attempted to parse unknown event: " + strconv.Itoa(int(p.event.typ)))
+ }
+ panic("unreachable")
+}
+
+func (p *parser) node(kind int) *node {
+ return &node{
+ kind: kind,
+ line: p.event.start_mark.line,
+ column: p.event.start_mark.column,
+ }
+}
+
+func (p *parser) document() *node {
+ n := p.node(documentNode)
+ n.anchors = make(map[string]*node)
+ p.doc = n
+ p.skip()
+ n.children = append(n.children, p.parse())
+ if p.event.typ != yaml_DOCUMENT_END_EVENT {
+ panic("expected end of document event but got " + strconv.Itoa(int(p.event.typ)))
+ }
+ p.skip()
+ return n
+}
+
+func (p *parser) alias() *node {
+ n := p.node(aliasNode)
+ n.value = string(p.event.anchor)
+ p.skip()
+ return n
+}
+
+func (p *parser) scalar() *node {
+ n := p.node(scalarNode)
+ n.value = string(p.event.value)
+ n.tag = string(p.event.tag)
+ n.implicit = p.event.implicit
+ p.anchor(n, p.event.anchor)
+ p.skip()
+ return n
+}
+
+func (p *parser) sequence() *node {
+ n := p.node(sequenceNode)
+ p.anchor(n, p.event.anchor)
+ p.skip()
+ for p.event.typ != yaml_SEQUENCE_END_EVENT {
+ n.children = append(n.children, p.parse())
+ }
+ p.skip()
+ return n
+}
+
+func (p *parser) mapping() *node {
+ n := p.node(mappingNode)
+ p.anchor(n, p.event.anchor)
+ p.skip()
+ for p.event.typ != yaml_MAPPING_END_EVENT {
+ n.children = append(n.children, p.parse(), p.parse())
+ }
+ p.skip()
+ return n
+}
+
+// ----------------------------------------------------------------------------
+// Decoder, unmarshals a node into a provided value.
+
+type decoder struct {
+ doc *node
+ aliases map[string]bool
+ mapType reflect.Type
+ terrors []string
+}
+
+var (
+ mapItemType = reflect.TypeOf(MapItem{})
+ durationType = reflect.TypeOf(time.Duration(0))
+ defaultMapType = reflect.TypeOf(map[interface{}]interface{}{})
+ ifaceType = defaultMapType.Elem()
+)
+
+func newDecoder() *decoder {
+ d := &decoder{mapType: defaultMapType}
+ d.aliases = make(map[string]bool)
+ return d
+}
+
+func (d *decoder) terror(n *node, tag string, out reflect.Value) {
+ if n.tag != "" {
+ tag = n.tag
+ }
+ value := n.value
+ if tag != yaml_SEQ_TAG && tag != yaml_MAP_TAG {
+ if len(value) > 10 {
+ value = " `" + value[:7] + "...`"
+ } else {
+ value = " `" + value + "`"
+ }
+ }
+ d.terrors = append(d.terrors, fmt.Sprintf("line %d: cannot unmarshal %s%s into %s", n.line+1, shortTag(tag), value, out.Type()))
+}
+
+func (d *decoder) callUnmarshaler(n *node, u Unmarshaler) (good bool) {
+ terrlen := len(d.terrors)
+ err := u.UnmarshalYAML(func(v interface{}) (err error) {
+ defer handleErr(&err)
+ d.unmarshal(n, reflect.ValueOf(v))
+ if len(d.terrors) > terrlen {
+ issues := d.terrors[terrlen:]
+ d.terrors = d.terrors[:terrlen]
+ return &TypeError{issues}
+ }
+ return nil
+ })
+ if e, ok := err.(*TypeError); ok {
+ d.terrors = append(d.terrors, e.Errors...)
+ return false
+ }
+ if err != nil {
+ fail(err)
+ }
+ return true
+}
+
+// d.prepare initializes and dereferences pointers and calls UnmarshalYAML
+// if a value is found to implement it.
+// It returns the initialized and dereferenced out value, whether
+// unmarshalling was already done by UnmarshalYAML, and if so whether
+// its types unmarshalled appropriately.
+//
+// If n holds a null value, prepare returns before doing anything.
+func (d *decoder) prepare(n *node, out reflect.Value) (newout reflect.Value, unmarshaled, good bool) {
+ if n.tag == yaml_NULL_TAG || n.kind == scalarNode && n.tag == "" && (n.value == "null" || n.value == "") {
+ return out, false, false
+ }
+ again := true
+ for again {
+ again = false
+ if out.Kind() == reflect.Ptr {
+ if out.IsNil() {
+ out.Set(reflect.New(out.Type().Elem()))
+ }
+ out = out.Elem()
+ again = true
+ }
+ if out.CanAddr() {
+ if u, ok := out.Addr().Interface().(Unmarshaler); ok {
+ good = d.callUnmarshaler(n, u)
+ return out, true, good
+ }
+ }
+ }
+ return out, false, false
+}
+
+func (d *decoder) unmarshal(n *node, out reflect.Value) (good bool) {
+ switch n.kind {
+ case documentNode:
+ return d.document(n, out)
+ case aliasNode:
+ return d.alias(n, out)
+ }
+ out, unmarshaled, good := d.prepare(n, out)
+ if unmarshaled {
+ return good
+ }
+ switch n.kind {
+ case scalarNode:
+ good = d.scalar(n, out)
+ case mappingNode:
+ good = d.mapping(n, out)
+ case sequenceNode:
+ good = d.sequence(n, out)
+ default:
+ panic("internal error: unknown node kind: " + strconv.Itoa(n.kind))
+ }
+ return good
+}
+
+func (d *decoder) document(n *node, out reflect.Value) (good bool) {
+ if len(n.children) == 1 {
+ d.doc = n
+ d.unmarshal(n.children[0], out)
+ return true
+ }
+ return false
+}
+
+func (d *decoder) alias(n *node, out reflect.Value) (good bool) {
+ an, ok := d.doc.anchors[n.value]
+ if !ok {
+ failf("unknown anchor '%s' referenced", n.value)
+ }
+ if d.aliases[n.value] {
+ failf("anchor '%s' value contains itself", n.value)
+ }
+ d.aliases[n.value] = true
+ good = d.unmarshal(an, out)
+ delete(d.aliases, n.value)
+ return good
+}
+
+var zeroValue reflect.Value
+
+func resetMap(out reflect.Value) {
+ for _, k := range out.MapKeys() {
+ out.SetMapIndex(k, zeroValue)
+ }
+}
+
+func (d *decoder) scalar(n *node, out reflect.Value) (good bool) {
+ var tag string
+ var resolved interface{}
+ if n.tag == "" && !n.implicit {
+ tag = yaml_STR_TAG
+ resolved = n.value
+ } else {
+ tag, resolved = resolve(n.tag, n.value)
+ if tag == yaml_BINARY_TAG {
+ data, err := base64.StdEncoding.DecodeString(resolved.(string))
+ if err != nil {
+ failf("!!binary value contains invalid base64 data")
+ }
+ resolved = string(data)
+ }
+ }
+ if resolved == nil {
+ if out.Kind() == reflect.Map && !out.CanAddr() {
+ resetMap(out)
+ } else {
+ out.Set(reflect.Zero(out.Type()))
+ }
+ return true
+ }
+ if s, ok := resolved.(string); ok && out.CanAddr() {
+ if u, ok := out.Addr().Interface().(encoding.TextUnmarshaler); ok {
+ err := u.UnmarshalText([]byte(s))
+ if err != nil {
+ fail(err)
+ }
+ return true
+ }
+ }
+ switch out.Kind() {
+ case reflect.String:
+ if tag == yaml_BINARY_TAG {
+ out.SetString(resolved.(string))
+ good = true
+ } else if resolved != nil {
+ out.SetString(n.value)
+ good = true
+ }
+ case reflect.Interface:
+ if resolved == nil {
+ out.Set(reflect.Zero(out.Type()))
+ } else {
+ out.Set(reflect.ValueOf(resolved))
+ }
+ good = true
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ switch resolved := resolved.(type) {
+ case int:
+ if !out.OverflowInt(int64(resolved)) {
+ out.SetInt(int64(resolved))
+ good = true
+ }
+ case int64:
+ if !out.OverflowInt(resolved) {
+ out.SetInt(resolved)
+ good = true
+ }
+ case uint64:
+ if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) {
+ out.SetInt(int64(resolved))
+ good = true
+ }
+ case float64:
+ if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) {
+ out.SetInt(int64(resolved))
+ good = true
+ }
+ case string:
+ if out.Type() == durationType {
+ d, err := time.ParseDuration(resolved)
+ if err == nil {
+ out.SetInt(int64(d))
+ good = true
+ }
+ }
+ }
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ switch resolved := resolved.(type) {
+ case int:
+ if resolved >= 0 && !out.OverflowUint(uint64(resolved)) {
+ out.SetUint(uint64(resolved))
+ good = true
+ }
+ case int64:
+ if resolved >= 0 && !out.OverflowUint(uint64(resolved)) {
+ out.SetUint(uint64(resolved))
+ good = true
+ }
+ case uint64:
+ if !out.OverflowUint(uint64(resolved)) {
+ out.SetUint(uint64(resolved))
+ good = true
+ }
+ case float64:
+ if resolved <= math.MaxUint64 && !out.OverflowUint(uint64(resolved)) {
+ out.SetUint(uint64(resolved))
+ good = true
+ }
+ }
+ case reflect.Bool:
+ switch resolved := resolved.(type) {
+ case bool:
+ out.SetBool(resolved)
+ good = true
+ }
+ case reflect.Float32, reflect.Float64:
+ switch resolved := resolved.(type) {
+ case int:
+ out.SetFloat(float64(resolved))
+ good = true
+ case int64:
+ out.SetFloat(float64(resolved))
+ good = true
+ case uint64:
+ out.SetFloat(float64(resolved))
+ good = true
+ case float64:
+ out.SetFloat(resolved)
+ good = true
+ }
+ case reflect.Ptr:
+ if out.Type().Elem() == reflect.TypeOf(resolved) {
+ // TODO DOes this make sense? When is out a Ptr except when decoding a nil value?
+ elem := reflect.New(out.Type().Elem())
+ elem.Elem().Set(reflect.ValueOf(resolved))
+ out.Set(elem)
+ good = true
+ }
+ }
+ if !good {
+ d.terror(n, tag, out)
+ }
+ return good
+}
+
+func settableValueOf(i interface{}) reflect.Value {
+ v := reflect.ValueOf(i)
+ sv := reflect.New(v.Type()).Elem()
+ sv.Set(v)
+ return sv
+}
+
+func (d *decoder) sequence(n *node, out reflect.Value) (good bool) {
+ l := len(n.children)
+
+ var iface reflect.Value
+ switch out.Kind() {
+ case reflect.Slice:
+ out.Set(reflect.MakeSlice(out.Type(), l, l))
+ case reflect.Interface:
+ // No type hints. Will have to use a generic sequence.
+ iface = out
+ out = settableValueOf(make([]interface{}, l))
+ default:
+ d.terror(n, yaml_SEQ_TAG, out)
+ return false
+ }
+ et := out.Type().Elem()
+
+ j := 0
+ for i := 0; i < l; i++ {
+ e := reflect.New(et).Elem()
+ if ok := d.unmarshal(n.children[i], e); ok {
+ out.Index(j).Set(e)
+ j++
+ }
+ }
+ out.Set(out.Slice(0, j))
+ if iface.IsValid() {
+ iface.Set(out)
+ }
+ return true
+}
+
+func (d *decoder) mapping(n *node, out reflect.Value) (good bool) {
+ switch out.Kind() {
+ case reflect.Struct:
+ return d.mappingStruct(n, out)
+ case reflect.Slice:
+ return d.mappingSlice(n, out)
+ case reflect.Map:
+ // okay
+ case reflect.Interface:
+ if d.mapType.Kind() == reflect.Map {
+ iface := out
+ out = reflect.MakeMap(d.mapType)
+ iface.Set(out)
+ } else {
+ slicev := reflect.New(d.mapType).Elem()
+ if !d.mappingSlice(n, slicev) {
+ return false
+ }
+ out.Set(slicev)
+ return true
+ }
+ default:
+ d.terror(n, yaml_MAP_TAG, out)
+ return false
+ }
+ outt := out.Type()
+ kt := outt.Key()
+ et := outt.Elem()
+
+ mapType := d.mapType
+ if outt.Key() == ifaceType && outt.Elem() == ifaceType {
+ d.mapType = outt
+ }
+
+ if out.IsNil() {
+ out.Set(reflect.MakeMap(outt))
+ }
+ l := len(n.children)
+ for i := 0; i < l; i += 2 {
+ if isMerge(n.children[i]) {
+ d.merge(n.children[i+1], out)
+ continue
+ }
+ k := reflect.New(kt).Elem()
+ if d.unmarshal(n.children[i], k) {
+ kkind := k.Kind()
+ if kkind == reflect.Interface {
+ kkind = k.Elem().Kind()
+ }
+ if kkind == reflect.Map || kkind == reflect.Slice {
+ failf("invalid map key: %#v", k.Interface())
+ }
+ e := reflect.New(et).Elem()
+ if d.unmarshal(n.children[i+1], e) {
+ out.SetMapIndex(k, e)
+ }
+ }
+ }
+ d.mapType = mapType
+ return true
+}
+
+func (d *decoder) mappingSlice(n *node, out reflect.Value) (good bool) {
+ outt := out.Type()
+ if outt.Elem() != mapItemType {
+ d.terror(n, yaml_MAP_TAG, out)
+ return false
+ }
+
+ mapType := d.mapType
+ d.mapType = outt
+
+ var slice []MapItem
+ var l = len(n.children)
+ for i := 0; i < l; i += 2 {
+ if isMerge(n.children[i]) {
+ d.merge(n.children[i+1], out)
+ continue
+ }
+ item := MapItem{}
+ k := reflect.ValueOf(&item.Key).Elem()
+ if d.unmarshal(n.children[i], k) {
+ v := reflect.ValueOf(&item.Value).Elem()
+ if d.unmarshal(n.children[i+1], v) {
+ slice = append(slice, item)
+ }
+ }
+ }
+ out.Set(reflect.ValueOf(slice))
+ d.mapType = mapType
+ return true
+}
+
+func (d *decoder) mappingStruct(n *node, out reflect.Value) (good bool) {
+ sinfo, err := getStructInfo(out.Type())
+ if err != nil {
+ panic(err)
+ }
+ name := settableValueOf("")
+ l := len(n.children)
+
+ var inlineMap reflect.Value
+ var elemType reflect.Type
+ if sinfo.InlineMap != -1 {
+ inlineMap = out.Field(sinfo.InlineMap)
+ inlineMap.Set(reflect.New(inlineMap.Type()).Elem())
+ elemType = inlineMap.Type().Elem()
+ }
+
+ for i := 0; i < l; i += 2 {
+ ni := n.children[i]
+ if isMerge(ni) {
+ d.merge(n.children[i+1], out)
+ continue
+ }
+ if !d.unmarshal(ni, name) {
+ continue
+ }
+ if info, ok := sinfo.FieldsMap[name.String()]; ok {
+ var field reflect.Value
+ if info.Inline == nil {
+ field = out.Field(info.Num)
+ } else {
+ field = out.FieldByIndex(info.Inline)
+ }
+ d.unmarshal(n.children[i+1], field)
+ } else if sinfo.InlineMap != -1 {
+ if inlineMap.IsNil() {
+ inlineMap.Set(reflect.MakeMap(inlineMap.Type()))
+ }
+ value := reflect.New(elemType).Elem()
+ d.unmarshal(n.children[i+1], value)
+ inlineMap.SetMapIndex(name, value)
+ }
+ }
+ return true
+}
+
+func failWantMap() {
+ failf("map merge requires map or sequence of maps as the value")
+}
+
+func (d *decoder) merge(n *node, out reflect.Value) {
+ switch n.kind {
+ case mappingNode:
+ d.unmarshal(n, out)
+ case aliasNode:
+ an, ok := d.doc.anchors[n.value]
+ if ok && an.kind != mappingNode {
+ failWantMap()
+ }
+ d.unmarshal(n, out)
+ case sequenceNode:
+ // Step backwards as earlier nodes take precedence.
+ for i := len(n.children) - 1; i >= 0; i-- {
+ ni := n.children[i]
+ if ni.kind == aliasNode {
+ an, ok := d.doc.anchors[ni.value]
+ if ok && an.kind != mappingNode {
+ failWantMap()
+ }
+ } else if ni.kind != mappingNode {
+ failWantMap()
+ }
+ d.unmarshal(ni, out)
+ }
+ default:
+ failWantMap()
+ }
+}
+
+func isMerge(n *node) bool {
+ return n.kind == scalarNode && n.value == "<<" && (n.implicit == true || n.tag == yaml_MERGE_TAG)
+}
diff --git a/vendor/gopkg.in/yaml.v2/emitterc.go b/vendor/gopkg.in/yaml.v2/emitterc.go
new file mode 100644
index 0000000000..2befd553ed
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v2/emitterc.go
@@ -0,0 +1,1685 @@
+package yaml
+
+import (
+ "bytes"
+)
+
+// Flush the buffer if needed.
+func flush(emitter *yaml_emitter_t) bool {
+ if emitter.buffer_pos+5 >= len(emitter.buffer) {
+ return yaml_emitter_flush(emitter)
+ }
+ return true
+}
+
+// Put a character to the output buffer.
+func put(emitter *yaml_emitter_t, value byte) bool {
+ if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) {
+ return false
+ }
+ emitter.buffer[emitter.buffer_pos] = value
+ emitter.buffer_pos++
+ emitter.column++
+ return true
+}
+
+// Put a line break to the output buffer.
+func put_break(emitter *yaml_emitter_t) bool {
+ if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) {
+ return false
+ }
+ switch emitter.line_break {
+ case yaml_CR_BREAK:
+ emitter.buffer[emitter.buffer_pos] = '\r'
+ emitter.buffer_pos += 1
+ case yaml_LN_BREAK:
+ emitter.buffer[emitter.buffer_pos] = '\n'
+ emitter.buffer_pos += 1
+ case yaml_CRLN_BREAK:
+ emitter.buffer[emitter.buffer_pos+0] = '\r'
+ emitter.buffer[emitter.buffer_pos+1] = '\n'
+ emitter.buffer_pos += 2
+ default:
+ panic("unknown line break setting")
+ }
+ emitter.column = 0
+ emitter.line++
+ return true
+}
+
+// Copy a character from a string into buffer.
+func write(emitter *yaml_emitter_t, s []byte, i *int) bool {
+ if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) {
+ return false
+ }
+ p := emitter.buffer_pos
+ w := width(s[*i])
+ switch w {
+ case 4:
+ emitter.buffer[p+3] = s[*i+3]
+ fallthrough
+ case 3:
+ emitter.buffer[p+2] = s[*i+2]
+ fallthrough
+ case 2:
+ emitter.buffer[p+1] = s[*i+1]
+ fallthrough
+ case 1:
+ emitter.buffer[p+0] = s[*i+0]
+ default:
+ panic("unknown character width")
+ }
+ emitter.column++
+ emitter.buffer_pos += w
+ *i += w
+ return true
+}
+
+// Write a whole string into buffer.
+func write_all(emitter *yaml_emitter_t, s []byte) bool {
+ for i := 0; i < len(s); {
+ if !write(emitter, s, &i) {
+ return false
+ }
+ }
+ return true
+}
+
+// Copy a line break character from a string into buffer.
+func write_break(emitter *yaml_emitter_t, s []byte, i *int) bool {
+ if s[*i] == '\n' {
+ if !put_break(emitter) {
+ return false
+ }
+ *i++
+ } else {
+ if !write(emitter, s, i) {
+ return false
+ }
+ emitter.column = 0
+ emitter.line++
+ }
+ return true
+}
+
+// Set an emitter error and return false.
+func yaml_emitter_set_emitter_error(emitter *yaml_emitter_t, problem string) bool {
+ emitter.error = yaml_EMITTER_ERROR
+ emitter.problem = problem
+ return false
+}
+
+// Emit an event.
+func yaml_emitter_emit(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ emitter.events = append(emitter.events, *event)
+ for !yaml_emitter_need_more_events(emitter) {
+ event := &emitter.events[emitter.events_head]
+ if !yaml_emitter_analyze_event(emitter, event) {
+ return false
+ }
+ if !yaml_emitter_state_machine(emitter, event) {
+ return false
+ }
+ yaml_event_delete(event)
+ emitter.events_head++
+ }
+ return true
+}
+
+// Check if we need to accumulate more events before emitting.
+//
+// We accumulate extra
+// - 1 event for DOCUMENT-START
+// - 2 events for SEQUENCE-START
+// - 3 events for MAPPING-START
+//
+func yaml_emitter_need_more_events(emitter *yaml_emitter_t) bool {
+ if emitter.events_head == len(emitter.events) {
+ return true
+ }
+ var accumulate int
+ switch emitter.events[emitter.events_head].typ {
+ case yaml_DOCUMENT_START_EVENT:
+ accumulate = 1
+ break
+ case yaml_SEQUENCE_START_EVENT:
+ accumulate = 2
+ break
+ case yaml_MAPPING_START_EVENT:
+ accumulate = 3
+ break
+ default:
+ return false
+ }
+ if len(emitter.events)-emitter.events_head > accumulate {
+ return false
+ }
+ var level int
+ for i := emitter.events_head; i < len(emitter.events); i++ {
+ switch emitter.events[i].typ {
+ case yaml_STREAM_START_EVENT, yaml_DOCUMENT_START_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT:
+ level++
+ case yaml_STREAM_END_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_END_EVENT, yaml_MAPPING_END_EVENT:
+ level--
+ }
+ if level == 0 {
+ return false
+ }
+ }
+ return true
+}
+
+// Append a directive to the directives stack.
+func yaml_emitter_append_tag_directive(emitter *yaml_emitter_t, value *yaml_tag_directive_t, allow_duplicates bool) bool {
+ for i := 0; i < len(emitter.tag_directives); i++ {
+ if bytes.Equal(value.handle, emitter.tag_directives[i].handle) {
+ if allow_duplicates {
+ return true
+ }
+ return yaml_emitter_set_emitter_error(emitter, "duplicate %TAG directive")
+ }
+ }
+
+ // [Go] Do we actually need to copy this given garbage collection
+ // and the lack of deallocating destructors?
+ tag_copy := yaml_tag_directive_t{
+ handle: make([]byte, len(value.handle)),
+ prefix: make([]byte, len(value.prefix)),
+ }
+ copy(tag_copy.handle, value.handle)
+ copy(tag_copy.prefix, value.prefix)
+ emitter.tag_directives = append(emitter.tag_directives, tag_copy)
+ return true
+}
+
+// Increase the indentation level.
+func yaml_emitter_increase_indent(emitter *yaml_emitter_t, flow, indentless bool) bool {
+ emitter.indents = append(emitter.indents, emitter.indent)
+ if emitter.indent < 0 {
+ if flow {
+ emitter.indent = emitter.best_indent
+ } else {
+ emitter.indent = 0
+ }
+ } else if !indentless {
+ emitter.indent += emitter.best_indent
+ }
+ return true
+}
+
+// State dispatcher.
+func yaml_emitter_state_machine(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ switch emitter.state {
+ default:
+ case yaml_EMIT_STREAM_START_STATE:
+ return yaml_emitter_emit_stream_start(emitter, event)
+
+ case yaml_EMIT_FIRST_DOCUMENT_START_STATE:
+ return yaml_emitter_emit_document_start(emitter, event, true)
+
+ case yaml_EMIT_DOCUMENT_START_STATE:
+ return yaml_emitter_emit_document_start(emitter, event, false)
+
+ case yaml_EMIT_DOCUMENT_CONTENT_STATE:
+ return yaml_emitter_emit_document_content(emitter, event)
+
+ case yaml_EMIT_DOCUMENT_END_STATE:
+ return yaml_emitter_emit_document_end(emitter, event)
+
+ case yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE:
+ return yaml_emitter_emit_flow_sequence_item(emitter, event, true)
+
+ case yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE:
+ return yaml_emitter_emit_flow_sequence_item(emitter, event, false)
+
+ case yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE:
+ return yaml_emitter_emit_flow_mapping_key(emitter, event, true)
+
+ case yaml_EMIT_FLOW_MAPPING_KEY_STATE:
+ return yaml_emitter_emit_flow_mapping_key(emitter, event, false)
+
+ case yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE:
+ return yaml_emitter_emit_flow_mapping_value(emitter, event, true)
+
+ case yaml_EMIT_FLOW_MAPPING_VALUE_STATE:
+ return yaml_emitter_emit_flow_mapping_value(emitter, event, false)
+
+ case yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE:
+ return yaml_emitter_emit_block_sequence_item(emitter, event, true)
+
+ case yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE:
+ return yaml_emitter_emit_block_sequence_item(emitter, event, false)
+
+ case yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE:
+ return yaml_emitter_emit_block_mapping_key(emitter, event, true)
+
+ case yaml_EMIT_BLOCK_MAPPING_KEY_STATE:
+ return yaml_emitter_emit_block_mapping_key(emitter, event, false)
+
+ case yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE:
+ return yaml_emitter_emit_block_mapping_value(emitter, event, true)
+
+ case yaml_EMIT_BLOCK_MAPPING_VALUE_STATE:
+ return yaml_emitter_emit_block_mapping_value(emitter, event, false)
+
+ case yaml_EMIT_END_STATE:
+ return yaml_emitter_set_emitter_error(emitter, "expected nothing after STREAM-END")
+ }
+ panic("invalid emitter state")
+}
+
+// Expect STREAM-START.
+func yaml_emitter_emit_stream_start(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ if event.typ != yaml_STREAM_START_EVENT {
+ return yaml_emitter_set_emitter_error(emitter, "expected STREAM-START")
+ }
+ if emitter.encoding == yaml_ANY_ENCODING {
+ emitter.encoding = event.encoding
+ if emitter.encoding == yaml_ANY_ENCODING {
+ emitter.encoding = yaml_UTF8_ENCODING
+ }
+ }
+ if emitter.best_indent < 2 || emitter.best_indent > 9 {
+ emitter.best_indent = 2
+ }
+ if emitter.best_width >= 0 && emitter.best_width <= emitter.best_indent*2 {
+ emitter.best_width = 80
+ }
+ if emitter.best_width < 0 {
+ emitter.best_width = 1<<31 - 1
+ }
+ if emitter.line_break == yaml_ANY_BREAK {
+ emitter.line_break = yaml_LN_BREAK
+ }
+
+ emitter.indent = -1
+ emitter.line = 0
+ emitter.column = 0
+ emitter.whitespace = true
+ emitter.indention = true
+
+ if emitter.encoding != yaml_UTF8_ENCODING {
+ if !yaml_emitter_write_bom(emitter) {
+ return false
+ }
+ }
+ emitter.state = yaml_EMIT_FIRST_DOCUMENT_START_STATE
+ return true
+}
+
+// Expect DOCUMENT-START or STREAM-END.
+func yaml_emitter_emit_document_start(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool {
+
+ if event.typ == yaml_DOCUMENT_START_EVENT {
+
+ if event.version_directive != nil {
+ if !yaml_emitter_analyze_version_directive(emitter, event.version_directive) {
+ return false
+ }
+ }
+
+ for i := 0; i < len(event.tag_directives); i++ {
+ tag_directive := &event.tag_directives[i]
+ if !yaml_emitter_analyze_tag_directive(emitter, tag_directive) {
+ return false
+ }
+ if !yaml_emitter_append_tag_directive(emitter, tag_directive, false) {
+ return false
+ }
+ }
+
+ for i := 0; i < len(default_tag_directives); i++ {
+ tag_directive := &default_tag_directives[i]
+ if !yaml_emitter_append_tag_directive(emitter, tag_directive, true) {
+ return false
+ }
+ }
+
+ implicit := event.implicit
+ if !first || emitter.canonical {
+ implicit = false
+ }
+
+ if emitter.open_ended && (event.version_directive != nil || len(event.tag_directives) > 0) {
+ if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+
+ if event.version_directive != nil {
+ implicit = false
+ if !yaml_emitter_write_indicator(emitter, []byte("%YAML"), true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte("1.1"), true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+
+ if len(event.tag_directives) > 0 {
+ implicit = false
+ for i := 0; i < len(event.tag_directives); i++ {
+ tag_directive := &event.tag_directives[i]
+ if !yaml_emitter_write_indicator(emitter, []byte("%TAG"), true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_tag_handle(emitter, tag_directive.handle) {
+ return false
+ }
+ if !yaml_emitter_write_tag_content(emitter, tag_directive.prefix, true) {
+ return false
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ }
+
+ if yaml_emitter_check_empty_document(emitter) {
+ implicit = false
+ }
+ if !implicit {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte("---"), true, false, false) {
+ return false
+ }
+ if emitter.canonical {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ }
+
+ emitter.state = yaml_EMIT_DOCUMENT_CONTENT_STATE
+ return true
+ }
+
+ if event.typ == yaml_STREAM_END_EVENT {
+ if emitter.open_ended {
+ if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if !yaml_emitter_flush(emitter) {
+ return false
+ }
+ emitter.state = yaml_EMIT_END_STATE
+ return true
+ }
+
+ return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-START or STREAM-END")
+}
+
+// Expect the root node.
+func yaml_emitter_emit_document_content(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ emitter.states = append(emitter.states, yaml_EMIT_DOCUMENT_END_STATE)
+ return yaml_emitter_emit_node(emitter, event, true, false, false, false)
+}
+
+// Expect DOCUMENT-END.
+func yaml_emitter_emit_document_end(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ if event.typ != yaml_DOCUMENT_END_EVENT {
+ return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-END")
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ if !event.implicit {
+ // [Go] Allocate the slice elsewhere.
+ if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if !yaml_emitter_flush(emitter) {
+ return false
+ }
+ emitter.state = yaml_EMIT_DOCUMENT_START_STATE
+ emitter.tag_directives = emitter.tag_directives[:0]
+ return true
+}
+
+// Expect a flow item node.
+func yaml_emitter_emit_flow_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool {
+ if first {
+ if !yaml_emitter_write_indicator(emitter, []byte{'['}, true, true, false) {
+ return false
+ }
+ if !yaml_emitter_increase_indent(emitter, true, false) {
+ return false
+ }
+ emitter.flow_level++
+ }
+
+ if event.typ == yaml_SEQUENCE_END_EVENT {
+ emitter.flow_level--
+ emitter.indent = emitter.indents[len(emitter.indents)-1]
+ emitter.indents = emitter.indents[:len(emitter.indents)-1]
+ if emitter.canonical && !first {
+ if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{']'}, false, false, false) {
+ return false
+ }
+ emitter.state = emitter.states[len(emitter.states)-1]
+ emitter.states = emitter.states[:len(emitter.states)-1]
+
+ return true
+ }
+
+ if !first {
+ if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
+ return false
+ }
+ }
+
+ if emitter.canonical || emitter.column > emitter.best_width {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ emitter.states = append(emitter.states, yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE)
+ return yaml_emitter_emit_node(emitter, event, false, true, false, false)
+}
+
+// Expect a flow key node.
+func yaml_emitter_emit_flow_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool {
+ if first {
+ if !yaml_emitter_write_indicator(emitter, []byte{'{'}, true, true, false) {
+ return false
+ }
+ if !yaml_emitter_increase_indent(emitter, true, false) {
+ return false
+ }
+ emitter.flow_level++
+ }
+
+ if event.typ == yaml_MAPPING_END_EVENT {
+ emitter.flow_level--
+ emitter.indent = emitter.indents[len(emitter.indents)-1]
+ emitter.indents = emitter.indents[:len(emitter.indents)-1]
+ if emitter.canonical && !first {
+ if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{'}'}, false, false, false) {
+ return false
+ }
+ emitter.state = emitter.states[len(emitter.states)-1]
+ emitter.states = emitter.states[:len(emitter.states)-1]
+ return true
+ }
+
+ if !first {
+ if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
+ return false
+ }
+ }
+ if emitter.canonical || emitter.column > emitter.best_width {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+
+ if !emitter.canonical && yaml_emitter_check_simple_key(emitter) {
+ emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE)
+ return yaml_emitter_emit_node(emitter, event, false, false, true, true)
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, false) {
+ return false
+ }
+ emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_VALUE_STATE)
+ return yaml_emitter_emit_node(emitter, event, false, false, true, false)
+}
+
+// Expect a flow value node.
+func yaml_emitter_emit_flow_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool {
+ if simple {
+ if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) {
+ return false
+ }
+ } else {
+ if emitter.canonical || emitter.column > emitter.best_width {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, false) {
+ return false
+ }
+ }
+ emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_KEY_STATE)
+ return yaml_emitter_emit_node(emitter, event, false, false, true, false)
+}
+
+// Expect a block item node.
+func yaml_emitter_emit_block_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool {
+ if first {
+ if !yaml_emitter_increase_indent(emitter, false, emitter.mapping_context && !emitter.indention) {
+ return false
+ }
+ }
+ if event.typ == yaml_SEQUENCE_END_EVENT {
+ emitter.indent = emitter.indents[len(emitter.indents)-1]
+ emitter.indents = emitter.indents[:len(emitter.indents)-1]
+ emitter.state = emitter.states[len(emitter.states)-1]
+ emitter.states = emitter.states[:len(emitter.states)-1]
+ return true
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{'-'}, true, false, true) {
+ return false
+ }
+ emitter.states = append(emitter.states, yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE)
+ return yaml_emitter_emit_node(emitter, event, false, true, false, false)
+}
+
+// Expect a block key node.
+func yaml_emitter_emit_block_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool {
+ if first {
+ if !yaml_emitter_increase_indent(emitter, false, false) {
+ return false
+ }
+ }
+ if event.typ == yaml_MAPPING_END_EVENT {
+ emitter.indent = emitter.indents[len(emitter.indents)-1]
+ emitter.indents = emitter.indents[:len(emitter.indents)-1]
+ emitter.state = emitter.states[len(emitter.states)-1]
+ emitter.states = emitter.states[:len(emitter.states)-1]
+ return true
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ if yaml_emitter_check_simple_key(emitter) {
+ emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE)
+ return yaml_emitter_emit_node(emitter, event, false, false, true, true)
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, true) {
+ return false
+ }
+ emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_VALUE_STATE)
+ return yaml_emitter_emit_node(emitter, event, false, false, true, false)
+}
+
+// Expect a block value node.
+func yaml_emitter_emit_block_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool {
+ if simple {
+ if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) {
+ return false
+ }
+ } else {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, true) {
+ return false
+ }
+ }
+ emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_KEY_STATE)
+ return yaml_emitter_emit_node(emitter, event, false, false, true, false)
+}
+
+// Expect a node.
+func yaml_emitter_emit_node(emitter *yaml_emitter_t, event *yaml_event_t,
+ root bool, sequence bool, mapping bool, simple_key bool) bool {
+
+ emitter.root_context = root
+ emitter.sequence_context = sequence
+ emitter.mapping_context = mapping
+ emitter.simple_key_context = simple_key
+
+ switch event.typ {
+ case yaml_ALIAS_EVENT:
+ return yaml_emitter_emit_alias(emitter, event)
+ case yaml_SCALAR_EVENT:
+ return yaml_emitter_emit_scalar(emitter, event)
+ case yaml_SEQUENCE_START_EVENT:
+ return yaml_emitter_emit_sequence_start(emitter, event)
+ case yaml_MAPPING_START_EVENT:
+ return yaml_emitter_emit_mapping_start(emitter, event)
+ default:
+ return yaml_emitter_set_emitter_error(emitter,
+ "expected SCALAR, SEQUENCE-START, MAPPING-START, or ALIAS")
+ }
+ return false
+}
+
+// Expect ALIAS.
+func yaml_emitter_emit_alias(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ if !yaml_emitter_process_anchor(emitter) {
+ return false
+ }
+ emitter.state = emitter.states[len(emitter.states)-1]
+ emitter.states = emitter.states[:len(emitter.states)-1]
+ return true
+}
+
+// Expect SCALAR.
+func yaml_emitter_emit_scalar(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ if !yaml_emitter_select_scalar_style(emitter, event) {
+ return false
+ }
+ if !yaml_emitter_process_anchor(emitter) {
+ return false
+ }
+ if !yaml_emitter_process_tag(emitter) {
+ return false
+ }
+ if !yaml_emitter_increase_indent(emitter, true, false) {
+ return false
+ }
+ if !yaml_emitter_process_scalar(emitter) {
+ return false
+ }
+ emitter.indent = emitter.indents[len(emitter.indents)-1]
+ emitter.indents = emitter.indents[:len(emitter.indents)-1]
+ emitter.state = emitter.states[len(emitter.states)-1]
+ emitter.states = emitter.states[:len(emitter.states)-1]
+ return true
+}
+
+// Expect SEQUENCE-START.
+func yaml_emitter_emit_sequence_start(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ if !yaml_emitter_process_anchor(emitter) {
+ return false
+ }
+ if !yaml_emitter_process_tag(emitter) {
+ return false
+ }
+ if emitter.flow_level > 0 || emitter.canonical || event.sequence_style() == yaml_FLOW_SEQUENCE_STYLE ||
+ yaml_emitter_check_empty_sequence(emitter) {
+ emitter.state = yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE
+ } else {
+ emitter.state = yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE
+ }
+ return true
+}
+
+// Expect MAPPING-START.
+func yaml_emitter_emit_mapping_start(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ if !yaml_emitter_process_anchor(emitter) {
+ return false
+ }
+ if !yaml_emitter_process_tag(emitter) {
+ return false
+ }
+ if emitter.flow_level > 0 || emitter.canonical || event.mapping_style() == yaml_FLOW_MAPPING_STYLE ||
+ yaml_emitter_check_empty_mapping(emitter) {
+ emitter.state = yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE
+ } else {
+ emitter.state = yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE
+ }
+ return true
+}
+
+// Check if the document content is an empty scalar.
+func yaml_emitter_check_empty_document(emitter *yaml_emitter_t) bool {
+ return false // [Go] Huh?
+}
+
+// Check if the next events represent an empty sequence.
+func yaml_emitter_check_empty_sequence(emitter *yaml_emitter_t) bool {
+ if len(emitter.events)-emitter.events_head < 2 {
+ return false
+ }
+ return emitter.events[emitter.events_head].typ == yaml_SEQUENCE_START_EVENT &&
+ emitter.events[emitter.events_head+1].typ == yaml_SEQUENCE_END_EVENT
+}
+
+// Check if the next events represent an empty mapping.
+func yaml_emitter_check_empty_mapping(emitter *yaml_emitter_t) bool {
+ if len(emitter.events)-emitter.events_head < 2 {
+ return false
+ }
+ return emitter.events[emitter.events_head].typ == yaml_MAPPING_START_EVENT &&
+ emitter.events[emitter.events_head+1].typ == yaml_MAPPING_END_EVENT
+}
+
+// Check if the next node can be expressed as a simple key.
+func yaml_emitter_check_simple_key(emitter *yaml_emitter_t) bool {
+ length := 0
+ switch emitter.events[emitter.events_head].typ {
+ case yaml_ALIAS_EVENT:
+ length += len(emitter.anchor_data.anchor)
+ case yaml_SCALAR_EVENT:
+ if emitter.scalar_data.multiline {
+ return false
+ }
+ length += len(emitter.anchor_data.anchor) +
+ len(emitter.tag_data.handle) +
+ len(emitter.tag_data.suffix) +
+ len(emitter.scalar_data.value)
+ case yaml_SEQUENCE_START_EVENT:
+ if !yaml_emitter_check_empty_sequence(emitter) {
+ return false
+ }
+ length += len(emitter.anchor_data.anchor) +
+ len(emitter.tag_data.handle) +
+ len(emitter.tag_data.suffix)
+ case yaml_MAPPING_START_EVENT:
+ if !yaml_emitter_check_empty_mapping(emitter) {
+ return false
+ }
+ length += len(emitter.anchor_data.anchor) +
+ len(emitter.tag_data.handle) +
+ len(emitter.tag_data.suffix)
+ default:
+ return false
+ }
+ return length <= 128
+}
+
+// Determine an acceptable scalar style.
+func yaml_emitter_select_scalar_style(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+
+ no_tag := len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0
+ if no_tag && !event.implicit && !event.quoted_implicit {
+ return yaml_emitter_set_emitter_error(emitter, "neither tag nor implicit flags are specified")
+ }
+
+ style := event.scalar_style()
+ if style == yaml_ANY_SCALAR_STYLE {
+ style = yaml_PLAIN_SCALAR_STYLE
+ }
+ if emitter.canonical {
+ style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+ }
+ if emitter.simple_key_context && emitter.scalar_data.multiline {
+ style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+ }
+
+ if style == yaml_PLAIN_SCALAR_STYLE {
+ if emitter.flow_level > 0 && !emitter.scalar_data.flow_plain_allowed ||
+ emitter.flow_level == 0 && !emitter.scalar_data.block_plain_allowed {
+ style = yaml_SINGLE_QUOTED_SCALAR_STYLE
+ }
+ if len(emitter.scalar_data.value) == 0 && (emitter.flow_level > 0 || emitter.simple_key_context) {
+ style = yaml_SINGLE_QUOTED_SCALAR_STYLE
+ }
+ if no_tag && !event.implicit {
+ style = yaml_SINGLE_QUOTED_SCALAR_STYLE
+ }
+ }
+ if style == yaml_SINGLE_QUOTED_SCALAR_STYLE {
+ if !emitter.scalar_data.single_quoted_allowed {
+ style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+ }
+ }
+ if style == yaml_LITERAL_SCALAR_STYLE || style == yaml_FOLDED_SCALAR_STYLE {
+ if !emitter.scalar_data.block_allowed || emitter.flow_level > 0 || emitter.simple_key_context {
+ style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+ }
+ }
+
+ if no_tag && !event.quoted_implicit && style != yaml_PLAIN_SCALAR_STYLE {
+ emitter.tag_data.handle = []byte{'!'}
+ }
+ emitter.scalar_data.style = style
+ return true
+}
+
+// Write an achor.
+func yaml_emitter_process_anchor(emitter *yaml_emitter_t) bool {
+ if emitter.anchor_data.anchor == nil {
+ return true
+ }
+ c := []byte{'&'}
+ if emitter.anchor_data.alias {
+ c[0] = '*'
+ }
+ if !yaml_emitter_write_indicator(emitter, c, true, false, false) {
+ return false
+ }
+ return yaml_emitter_write_anchor(emitter, emitter.anchor_data.anchor)
+}
+
+// Write a tag.
+func yaml_emitter_process_tag(emitter *yaml_emitter_t) bool {
+ if len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 {
+ return true
+ }
+ if len(emitter.tag_data.handle) > 0 {
+ if !yaml_emitter_write_tag_handle(emitter, emitter.tag_data.handle) {
+ return false
+ }
+ if len(emitter.tag_data.suffix) > 0 {
+ if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) {
+ return false
+ }
+ }
+ } else {
+ // [Go] Allocate these slices elsewhere.
+ if !yaml_emitter_write_indicator(emitter, []byte("!<"), true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) {
+ return false
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{'>'}, false, false, false) {
+ return false
+ }
+ }
+ return true
+}
+
+// Write a scalar.
+func yaml_emitter_process_scalar(emitter *yaml_emitter_t) bool {
+ switch emitter.scalar_data.style {
+ case yaml_PLAIN_SCALAR_STYLE:
+ return yaml_emitter_write_plain_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context)
+
+ case yaml_SINGLE_QUOTED_SCALAR_STYLE:
+ return yaml_emitter_write_single_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context)
+
+ case yaml_DOUBLE_QUOTED_SCALAR_STYLE:
+ return yaml_emitter_write_double_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context)
+
+ case yaml_LITERAL_SCALAR_STYLE:
+ return yaml_emitter_write_literal_scalar(emitter, emitter.scalar_data.value)
+
+ case yaml_FOLDED_SCALAR_STYLE:
+ return yaml_emitter_write_folded_scalar(emitter, emitter.scalar_data.value)
+ }
+ panic("unknown scalar style")
+}
+
+// Check if a %YAML directive is valid.
+func yaml_emitter_analyze_version_directive(emitter *yaml_emitter_t, version_directive *yaml_version_directive_t) bool {
+ if version_directive.major != 1 || version_directive.minor != 1 {
+ return yaml_emitter_set_emitter_error(emitter, "incompatible %YAML directive")
+ }
+ return true
+}
+
+// Check if a %TAG directive is valid.
+func yaml_emitter_analyze_tag_directive(emitter *yaml_emitter_t, tag_directive *yaml_tag_directive_t) bool {
+ handle := tag_directive.handle
+ prefix := tag_directive.prefix
+ if len(handle) == 0 {
+ return yaml_emitter_set_emitter_error(emitter, "tag handle must not be empty")
+ }
+ if handle[0] != '!' {
+ return yaml_emitter_set_emitter_error(emitter, "tag handle must start with '!'")
+ }
+ if handle[len(handle)-1] != '!' {
+ return yaml_emitter_set_emitter_error(emitter, "tag handle must end with '!'")
+ }
+ for i := 1; i < len(handle)-1; i += width(handle[i]) {
+ if !is_alpha(handle, i) {
+ return yaml_emitter_set_emitter_error(emitter, "tag handle must contain alphanumerical characters only")
+ }
+ }
+ if len(prefix) == 0 {
+ return yaml_emitter_set_emitter_error(emitter, "tag prefix must not be empty")
+ }
+ return true
+}
+
+// Check if an anchor is valid.
+func yaml_emitter_analyze_anchor(emitter *yaml_emitter_t, anchor []byte, alias bool) bool {
+ if len(anchor) == 0 {
+ problem := "anchor value must not be empty"
+ if alias {
+ problem = "alias value must not be empty"
+ }
+ return yaml_emitter_set_emitter_error(emitter, problem)
+ }
+ for i := 0; i < len(anchor); i += width(anchor[i]) {
+ if !is_alpha(anchor, i) {
+ problem := "anchor value must contain alphanumerical characters only"
+ if alias {
+ problem = "alias value must contain alphanumerical characters only"
+ }
+ return yaml_emitter_set_emitter_error(emitter, problem)
+ }
+ }
+ emitter.anchor_data.anchor = anchor
+ emitter.anchor_data.alias = alias
+ return true
+}
+
+// Check if a tag is valid.
+func yaml_emitter_analyze_tag(emitter *yaml_emitter_t, tag []byte) bool {
+ if len(tag) == 0 {
+ return yaml_emitter_set_emitter_error(emitter, "tag value must not be empty")
+ }
+ for i := 0; i < len(emitter.tag_directives); i++ {
+ tag_directive := &emitter.tag_directives[i]
+ if bytes.HasPrefix(tag, tag_directive.prefix) {
+ emitter.tag_data.handle = tag_directive.handle
+ emitter.tag_data.suffix = tag[len(tag_directive.prefix):]
+ return true
+ }
+ }
+ emitter.tag_data.suffix = tag
+ return true
+}
+
+// Check if a scalar is valid.
+func yaml_emitter_analyze_scalar(emitter *yaml_emitter_t, value []byte) bool {
+ var (
+ block_indicators = false
+ flow_indicators = false
+ line_breaks = false
+ special_characters = false
+
+ leading_space = false
+ leading_break = false
+ trailing_space = false
+ trailing_break = false
+ break_space = false
+ space_break = false
+
+ preceeded_by_whitespace = false
+ followed_by_whitespace = false
+ previous_space = false
+ previous_break = false
+ )
+
+ emitter.scalar_data.value = value
+
+ if len(value) == 0 {
+ emitter.scalar_data.multiline = false
+ emitter.scalar_data.flow_plain_allowed = false
+ emitter.scalar_data.block_plain_allowed = true
+ emitter.scalar_data.single_quoted_allowed = true
+ emitter.scalar_data.block_allowed = false
+ return true
+ }
+
+ if len(value) >= 3 && ((value[0] == '-' && value[1] == '-' && value[2] == '-') || (value[0] == '.' && value[1] == '.' && value[2] == '.')) {
+ block_indicators = true
+ flow_indicators = true
+ }
+
+ preceeded_by_whitespace = true
+ for i, w := 0, 0; i < len(value); i += w {
+ w = width(value[i])
+ followed_by_whitespace = i+w >= len(value) || is_blank(value, i+w)
+
+ if i == 0 {
+ switch value[i] {
+ case '#', ',', '[', ']', '{', '}', '&', '*', '!', '|', '>', '\'', '"', '%', '@', '`':
+ flow_indicators = true
+ block_indicators = true
+ case '?', ':':
+ flow_indicators = true
+ if followed_by_whitespace {
+ block_indicators = true
+ }
+ case '-':
+ if followed_by_whitespace {
+ flow_indicators = true
+ block_indicators = true
+ }
+ }
+ } else {
+ switch value[i] {
+ case ',', '?', '[', ']', '{', '}':
+ flow_indicators = true
+ case ':':
+ flow_indicators = true
+ if followed_by_whitespace {
+ block_indicators = true
+ }
+ case '#':
+ if preceeded_by_whitespace {
+ flow_indicators = true
+ block_indicators = true
+ }
+ }
+ }
+
+ if !is_printable(value, i) || !is_ascii(value, i) && !emitter.unicode {
+ special_characters = true
+ }
+ if is_space(value, i) {
+ if i == 0 {
+ leading_space = true
+ }
+ if i+width(value[i]) == len(value) {
+ trailing_space = true
+ }
+ if previous_break {
+ break_space = true
+ }
+ previous_space = true
+ previous_break = false
+ } else if is_break(value, i) {
+ line_breaks = true
+ if i == 0 {
+ leading_break = true
+ }
+ if i+width(value[i]) == len(value) {
+ trailing_break = true
+ }
+ if previous_space {
+ space_break = true
+ }
+ previous_space = false
+ previous_break = true
+ } else {
+ previous_space = false
+ previous_break = false
+ }
+
+ // [Go]: Why 'z'? Couldn't be the end of the string as that's the loop condition.
+ preceeded_by_whitespace = is_blankz(value, i)
+ }
+
+ emitter.scalar_data.multiline = line_breaks
+ emitter.scalar_data.flow_plain_allowed = true
+ emitter.scalar_data.block_plain_allowed = true
+ emitter.scalar_data.single_quoted_allowed = true
+ emitter.scalar_data.block_allowed = true
+
+ if leading_space || leading_break || trailing_space || trailing_break {
+ emitter.scalar_data.flow_plain_allowed = false
+ emitter.scalar_data.block_plain_allowed = false
+ }
+ if trailing_space {
+ emitter.scalar_data.block_allowed = false
+ }
+ if break_space {
+ emitter.scalar_data.flow_plain_allowed = false
+ emitter.scalar_data.block_plain_allowed = false
+ emitter.scalar_data.single_quoted_allowed = false
+ }
+ if space_break || special_characters {
+ emitter.scalar_data.flow_plain_allowed = false
+ emitter.scalar_data.block_plain_allowed = false
+ emitter.scalar_data.single_quoted_allowed = false
+ emitter.scalar_data.block_allowed = false
+ }
+ if line_breaks {
+ emitter.scalar_data.flow_plain_allowed = false
+ emitter.scalar_data.block_plain_allowed = false
+ }
+ if flow_indicators {
+ emitter.scalar_data.flow_plain_allowed = false
+ }
+ if block_indicators {
+ emitter.scalar_data.block_plain_allowed = false
+ }
+ return true
+}
+
+// Check if the event data is valid.
+func yaml_emitter_analyze_event(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+
+ emitter.anchor_data.anchor = nil
+ emitter.tag_data.handle = nil
+ emitter.tag_data.suffix = nil
+ emitter.scalar_data.value = nil
+
+ switch event.typ {
+ case yaml_ALIAS_EVENT:
+ if !yaml_emitter_analyze_anchor(emitter, event.anchor, true) {
+ return false
+ }
+
+ case yaml_SCALAR_EVENT:
+ if len(event.anchor) > 0 {
+ if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) {
+ return false
+ }
+ }
+ if len(event.tag) > 0 && (emitter.canonical || (!event.implicit && !event.quoted_implicit)) {
+ if !yaml_emitter_analyze_tag(emitter, event.tag) {
+ return false
+ }
+ }
+ if !yaml_emitter_analyze_scalar(emitter, event.value) {
+ return false
+ }
+
+ case yaml_SEQUENCE_START_EVENT:
+ if len(event.anchor) > 0 {
+ if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) {
+ return false
+ }
+ }
+ if len(event.tag) > 0 && (emitter.canonical || !event.implicit) {
+ if !yaml_emitter_analyze_tag(emitter, event.tag) {
+ return false
+ }
+ }
+
+ case yaml_MAPPING_START_EVENT:
+ if len(event.anchor) > 0 {
+ if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) {
+ return false
+ }
+ }
+ if len(event.tag) > 0 && (emitter.canonical || !event.implicit) {
+ if !yaml_emitter_analyze_tag(emitter, event.tag) {
+ return false
+ }
+ }
+ }
+ return true
+}
+
+// Write the BOM character.
+func yaml_emitter_write_bom(emitter *yaml_emitter_t) bool {
+ if !flush(emitter) {
+ return false
+ }
+ pos := emitter.buffer_pos
+ emitter.buffer[pos+0] = '\xEF'
+ emitter.buffer[pos+1] = '\xBB'
+ emitter.buffer[pos+2] = '\xBF'
+ emitter.buffer_pos += 3
+ return true
+}
+
+func yaml_emitter_write_indent(emitter *yaml_emitter_t) bool {
+ indent := emitter.indent
+ if indent < 0 {
+ indent = 0
+ }
+ if !emitter.indention || emitter.column > indent || (emitter.column == indent && !emitter.whitespace) {
+ if !put_break(emitter) {
+ return false
+ }
+ }
+ for emitter.column < indent {
+ if !put(emitter, ' ') {
+ return false
+ }
+ }
+ emitter.whitespace = true
+ emitter.indention = true
+ return true
+}
+
+func yaml_emitter_write_indicator(emitter *yaml_emitter_t, indicator []byte, need_whitespace, is_whitespace, is_indention bool) bool {
+ if need_whitespace && !emitter.whitespace {
+ if !put(emitter, ' ') {
+ return false
+ }
+ }
+ if !write_all(emitter, indicator) {
+ return false
+ }
+ emitter.whitespace = is_whitespace
+ emitter.indention = (emitter.indention && is_indention)
+ emitter.open_ended = false
+ return true
+}
+
+func yaml_emitter_write_anchor(emitter *yaml_emitter_t, value []byte) bool {
+ if !write_all(emitter, value) {
+ return false
+ }
+ emitter.whitespace = false
+ emitter.indention = false
+ return true
+}
+
+func yaml_emitter_write_tag_handle(emitter *yaml_emitter_t, value []byte) bool {
+ if !emitter.whitespace {
+ if !put(emitter, ' ') {
+ return false
+ }
+ }
+ if !write_all(emitter, value) {
+ return false
+ }
+ emitter.whitespace = false
+ emitter.indention = false
+ return true
+}
+
+func yaml_emitter_write_tag_content(emitter *yaml_emitter_t, value []byte, need_whitespace bool) bool {
+ if need_whitespace && !emitter.whitespace {
+ if !put(emitter, ' ') {
+ return false
+ }
+ }
+ for i := 0; i < len(value); {
+ var must_write bool
+ switch value[i] {
+ case ';', '/', '?', ':', '@', '&', '=', '+', '$', ',', '_', '.', '~', '*', '\'', '(', ')', '[', ']':
+ must_write = true
+ default:
+ must_write = is_alpha(value, i)
+ }
+ if must_write {
+ if !write(emitter, value, &i) {
+ return false
+ }
+ } else {
+ w := width(value[i])
+ for k := 0; k < w; k++ {
+ octet := value[i]
+ i++
+ if !put(emitter, '%') {
+ return false
+ }
+
+ c := octet >> 4
+ if c < 10 {
+ c += '0'
+ } else {
+ c += 'A' - 10
+ }
+ if !put(emitter, c) {
+ return false
+ }
+
+ c = octet & 0x0f
+ if c < 10 {
+ c += '0'
+ } else {
+ c += 'A' - 10
+ }
+ if !put(emitter, c) {
+ return false
+ }
+ }
+ }
+ }
+ emitter.whitespace = false
+ emitter.indention = false
+ return true
+}
+
+func yaml_emitter_write_plain_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool {
+ if !emitter.whitespace {
+ if !put(emitter, ' ') {
+ return false
+ }
+ }
+
+ spaces := false
+ breaks := false
+ for i := 0; i < len(value); {
+ if is_space(value, i) {
+ if allow_breaks && !spaces && emitter.column > emitter.best_width && !is_space(value, i+1) {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ i += width(value[i])
+ } else {
+ if !write(emitter, value, &i) {
+ return false
+ }
+ }
+ spaces = true
+ } else if is_break(value, i) {
+ if !breaks && value[i] == '\n' {
+ if !put_break(emitter) {
+ return false
+ }
+ }
+ if !write_break(emitter, value, &i) {
+ return false
+ }
+ emitter.indention = true
+ breaks = true
+ } else {
+ if breaks {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if !write(emitter, value, &i) {
+ return false
+ }
+ emitter.indention = false
+ spaces = false
+ breaks = false
+ }
+ }
+
+ emitter.whitespace = false
+ emitter.indention = false
+ if emitter.root_context {
+ emitter.open_ended = true
+ }
+
+ return true
+}
+
+func yaml_emitter_write_single_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool {
+
+ if !yaml_emitter_write_indicator(emitter, []byte{'\''}, true, false, false) {
+ return false
+ }
+
+ spaces := false
+ breaks := false
+ for i := 0; i < len(value); {
+ if is_space(value, i) {
+ if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 && !is_space(value, i+1) {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ i += width(value[i])
+ } else {
+ if !write(emitter, value, &i) {
+ return false
+ }
+ }
+ spaces = true
+ } else if is_break(value, i) {
+ if !breaks && value[i] == '\n' {
+ if !put_break(emitter) {
+ return false
+ }
+ }
+ if !write_break(emitter, value, &i) {
+ return false
+ }
+ emitter.indention = true
+ breaks = true
+ } else {
+ if breaks {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if value[i] == '\'' {
+ if !put(emitter, '\'') {
+ return false
+ }
+ }
+ if !write(emitter, value, &i) {
+ return false
+ }
+ emitter.indention = false
+ spaces = false
+ breaks = false
+ }
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{'\''}, false, false, false) {
+ return false
+ }
+ emitter.whitespace = false
+ emitter.indention = false
+ return true
+}
+
+func yaml_emitter_write_double_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool {
+ spaces := false
+ if !yaml_emitter_write_indicator(emitter, []byte{'"'}, true, false, false) {
+ return false
+ }
+
+ for i := 0; i < len(value); {
+ if !is_printable(value, i) || (!emitter.unicode && !is_ascii(value, i)) ||
+ is_bom(value, i) || is_break(value, i) ||
+ value[i] == '"' || value[i] == '\\' {
+
+ octet := value[i]
+
+ var w int
+ var v rune
+ switch {
+ case octet&0x80 == 0x00:
+ w, v = 1, rune(octet&0x7F)
+ case octet&0xE0 == 0xC0:
+ w, v = 2, rune(octet&0x1F)
+ case octet&0xF0 == 0xE0:
+ w, v = 3, rune(octet&0x0F)
+ case octet&0xF8 == 0xF0:
+ w, v = 4, rune(octet&0x07)
+ }
+ for k := 1; k < w; k++ {
+ octet = value[i+k]
+ v = (v << 6) + (rune(octet) & 0x3F)
+ }
+ i += w
+
+ if !put(emitter, '\\') {
+ return false
+ }
+
+ var ok bool
+ switch v {
+ case 0x00:
+ ok = put(emitter, '0')
+ case 0x07:
+ ok = put(emitter, 'a')
+ case 0x08:
+ ok = put(emitter, 'b')
+ case 0x09:
+ ok = put(emitter, 't')
+ case 0x0A:
+ ok = put(emitter, 'n')
+ case 0x0b:
+ ok = put(emitter, 'v')
+ case 0x0c:
+ ok = put(emitter, 'f')
+ case 0x0d:
+ ok = put(emitter, 'r')
+ case 0x1b:
+ ok = put(emitter, 'e')
+ case 0x22:
+ ok = put(emitter, '"')
+ case 0x5c:
+ ok = put(emitter, '\\')
+ case 0x85:
+ ok = put(emitter, 'N')
+ case 0xA0:
+ ok = put(emitter, '_')
+ case 0x2028:
+ ok = put(emitter, 'L')
+ case 0x2029:
+ ok = put(emitter, 'P')
+ default:
+ if v <= 0xFF {
+ ok = put(emitter, 'x')
+ w = 2
+ } else if v <= 0xFFFF {
+ ok = put(emitter, 'u')
+ w = 4
+ } else {
+ ok = put(emitter, 'U')
+ w = 8
+ }
+ for k := (w - 1) * 4; ok && k >= 0; k -= 4 {
+ digit := byte((v >> uint(k)) & 0x0F)
+ if digit < 10 {
+ ok = put(emitter, digit+'0')
+ } else {
+ ok = put(emitter, digit+'A'-10)
+ }
+ }
+ }
+ if !ok {
+ return false
+ }
+ spaces = false
+ } else if is_space(value, i) {
+ if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ if is_space(value, i+1) {
+ if !put(emitter, '\\') {
+ return false
+ }
+ }
+ i += width(value[i])
+ } else if !write(emitter, value, &i) {
+ return false
+ }
+ spaces = true
+ } else {
+ if !write(emitter, value, &i) {
+ return false
+ }
+ spaces = false
+ }
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{'"'}, false, false, false) {
+ return false
+ }
+ emitter.whitespace = false
+ emitter.indention = false
+ return true
+}
+
+func yaml_emitter_write_block_scalar_hints(emitter *yaml_emitter_t, value []byte) bool {
+ if is_space(value, 0) || is_break(value, 0) {
+ indent_hint := []byte{'0' + byte(emitter.best_indent)}
+ if !yaml_emitter_write_indicator(emitter, indent_hint, false, false, false) {
+ return false
+ }
+ }
+
+ emitter.open_ended = false
+
+ var chomp_hint [1]byte
+ if len(value) == 0 {
+ chomp_hint[0] = '-'
+ } else {
+ i := len(value) - 1
+ for value[i]&0xC0 == 0x80 {
+ i--
+ }
+ if !is_break(value, i) {
+ chomp_hint[0] = '-'
+ } else if i == 0 {
+ chomp_hint[0] = '+'
+ emitter.open_ended = true
+ } else {
+ i--
+ for value[i]&0xC0 == 0x80 {
+ i--
+ }
+ if is_break(value, i) {
+ chomp_hint[0] = '+'
+ emitter.open_ended = true
+ }
+ }
+ }
+ if chomp_hint[0] != 0 {
+ if !yaml_emitter_write_indicator(emitter, chomp_hint[:], false, false, false) {
+ return false
+ }
+ }
+ return true
+}
+
+func yaml_emitter_write_literal_scalar(emitter *yaml_emitter_t, value []byte) bool {
+ if !yaml_emitter_write_indicator(emitter, []byte{'|'}, true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_block_scalar_hints(emitter, value) {
+ return false
+ }
+ if !put_break(emitter) {
+ return false
+ }
+ emitter.indention = true
+ emitter.whitespace = true
+ breaks := true
+ for i := 0; i < len(value); {
+ if is_break(value, i) {
+ if !write_break(emitter, value, &i) {
+ return false
+ }
+ emitter.indention = true
+ breaks = true
+ } else {
+ if breaks {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if !write(emitter, value, &i) {
+ return false
+ }
+ emitter.indention = false
+ breaks = false
+ }
+ }
+
+ return true
+}
+
+func yaml_emitter_write_folded_scalar(emitter *yaml_emitter_t, value []byte) bool {
+ if !yaml_emitter_write_indicator(emitter, []byte{'>'}, true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_block_scalar_hints(emitter, value) {
+ return false
+ }
+
+ if !put_break(emitter) {
+ return false
+ }
+ emitter.indention = true
+ emitter.whitespace = true
+
+ breaks := true
+ leading_spaces := true
+ for i := 0; i < len(value); {
+ if is_break(value, i) {
+ if !breaks && !leading_spaces && value[i] == '\n' {
+ k := 0
+ for is_break(value, k) {
+ k += width(value[k])
+ }
+ if !is_blankz(value, k) {
+ if !put_break(emitter) {
+ return false
+ }
+ }
+ }
+ if !write_break(emitter, value, &i) {
+ return false
+ }
+ emitter.indention = true
+ breaks = true
+ } else {
+ if breaks {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ leading_spaces = is_blank(value, i)
+ }
+ if !breaks && is_space(value, i) && !is_space(value, i+1) && emitter.column > emitter.best_width {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ i += width(value[i])
+ } else {
+ if !write(emitter, value, &i) {
+ return false
+ }
+ }
+ emitter.indention = false
+ breaks = false
+ }
+ }
+ return true
+}
diff --git a/vendor/gopkg.in/yaml.v2/encode.go b/vendor/gopkg.in/yaml.v2/encode.go
new file mode 100644
index 0000000000..84f8499551
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v2/encode.go
@@ -0,0 +1,306 @@
+package yaml
+
+import (
+ "encoding"
+ "fmt"
+ "reflect"
+ "regexp"
+ "sort"
+ "strconv"
+ "strings"
+ "time"
+)
+
+type encoder struct {
+ emitter yaml_emitter_t
+ event yaml_event_t
+ out []byte
+ flow bool
+}
+
+func newEncoder() (e *encoder) {
+ e = &encoder{}
+ e.must(yaml_emitter_initialize(&e.emitter))
+ yaml_emitter_set_output_string(&e.emitter, &e.out)
+ yaml_emitter_set_unicode(&e.emitter, true)
+ e.must(yaml_stream_start_event_initialize(&e.event, yaml_UTF8_ENCODING))
+ e.emit()
+ e.must(yaml_document_start_event_initialize(&e.event, nil, nil, true))
+ e.emit()
+ return e
+}
+
+func (e *encoder) finish() {
+ e.must(yaml_document_end_event_initialize(&e.event, true))
+ e.emit()
+ e.emitter.open_ended = false
+ e.must(yaml_stream_end_event_initialize(&e.event))
+ e.emit()
+}
+
+func (e *encoder) destroy() {
+ yaml_emitter_delete(&e.emitter)
+}
+
+func (e *encoder) emit() {
+ // This will internally delete the e.event value.
+ if !yaml_emitter_emit(&e.emitter, &e.event) && e.event.typ != yaml_DOCUMENT_END_EVENT && e.event.typ != yaml_STREAM_END_EVENT {
+ e.must(false)
+ }
+}
+
+func (e *encoder) must(ok bool) {
+ if !ok {
+ msg := e.emitter.problem
+ if msg == "" {
+ msg = "unknown problem generating YAML content"
+ }
+ failf("%s", msg)
+ }
+}
+
+func (e *encoder) marshal(tag string, in reflect.Value) {
+ if !in.IsValid() {
+ e.nilv()
+ return
+ }
+ iface := in.Interface()
+ if m, ok := iface.(Marshaler); ok {
+ v, err := m.MarshalYAML()
+ if err != nil {
+ fail(err)
+ }
+ if v == nil {
+ e.nilv()
+ return
+ }
+ in = reflect.ValueOf(v)
+ } else if m, ok := iface.(encoding.TextMarshaler); ok {
+ text, err := m.MarshalText()
+ if err != nil {
+ fail(err)
+ }
+ in = reflect.ValueOf(string(text))
+ }
+ switch in.Kind() {
+ case reflect.Interface:
+ if in.IsNil() {
+ e.nilv()
+ } else {
+ e.marshal(tag, in.Elem())
+ }
+ case reflect.Map:
+ e.mapv(tag, in)
+ case reflect.Ptr:
+ if in.IsNil() {
+ e.nilv()
+ } else {
+ e.marshal(tag, in.Elem())
+ }
+ case reflect.Struct:
+ e.structv(tag, in)
+ case reflect.Slice:
+ if in.Type().Elem() == mapItemType {
+ e.itemsv(tag, in)
+ } else {
+ e.slicev(tag, in)
+ }
+ case reflect.String:
+ e.stringv(tag, in)
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ if in.Type() == durationType {
+ e.stringv(tag, reflect.ValueOf(iface.(time.Duration).String()))
+ } else {
+ e.intv(tag, in)
+ }
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ e.uintv(tag, in)
+ case reflect.Float32, reflect.Float64:
+ e.floatv(tag, in)
+ case reflect.Bool:
+ e.boolv(tag, in)
+ default:
+ panic("cannot marshal type: " + in.Type().String())
+ }
+}
+
+func (e *encoder) mapv(tag string, in reflect.Value) {
+ e.mappingv(tag, func() {
+ keys := keyList(in.MapKeys())
+ sort.Sort(keys)
+ for _, k := range keys {
+ e.marshal("", k)
+ e.marshal("", in.MapIndex(k))
+ }
+ })
+}
+
+func (e *encoder) itemsv(tag string, in reflect.Value) {
+ e.mappingv(tag, func() {
+ slice := in.Convert(reflect.TypeOf([]MapItem{})).Interface().([]MapItem)
+ for _, item := range slice {
+ e.marshal("", reflect.ValueOf(item.Key))
+ e.marshal("", reflect.ValueOf(item.Value))
+ }
+ })
+}
+
+func (e *encoder) structv(tag string, in reflect.Value) {
+ sinfo, err := getStructInfo(in.Type())
+ if err != nil {
+ panic(err)
+ }
+ e.mappingv(tag, func() {
+ for _, info := range sinfo.FieldsList {
+ var value reflect.Value
+ if info.Inline == nil {
+ value = in.Field(info.Num)
+ } else {
+ value = in.FieldByIndex(info.Inline)
+ }
+ if info.OmitEmpty && isZero(value) {
+ continue
+ }
+ e.marshal("", reflect.ValueOf(info.Key))
+ e.flow = info.Flow
+ e.marshal("", value)
+ }
+ if sinfo.InlineMap >= 0 {
+ m := in.Field(sinfo.InlineMap)
+ if m.Len() > 0 {
+ e.flow = false
+ keys := keyList(m.MapKeys())
+ sort.Sort(keys)
+ for _, k := range keys {
+ if _, found := sinfo.FieldsMap[k.String()]; found {
+ panic(fmt.Sprintf("Can't have key %q in inlined map; conflicts with struct field", k.String()))
+ }
+ e.marshal("", k)
+ e.flow = false
+ e.marshal("", m.MapIndex(k))
+ }
+ }
+ }
+ })
+}
+
+func (e *encoder) mappingv(tag string, f func()) {
+ implicit := tag == ""
+ style := yaml_BLOCK_MAPPING_STYLE
+ if e.flow {
+ e.flow = false
+ style = yaml_FLOW_MAPPING_STYLE
+ }
+ e.must(yaml_mapping_start_event_initialize(&e.event, nil, []byte(tag), implicit, style))
+ e.emit()
+ f()
+ e.must(yaml_mapping_end_event_initialize(&e.event))
+ e.emit()
+}
+
+func (e *encoder) slicev(tag string, in reflect.Value) {
+ implicit := tag == ""
+ style := yaml_BLOCK_SEQUENCE_STYLE
+ if e.flow {
+ e.flow = false
+ style = yaml_FLOW_SEQUENCE_STYLE
+ }
+ e.must(yaml_sequence_start_event_initialize(&e.event, nil, []byte(tag), implicit, style))
+ e.emit()
+ n := in.Len()
+ for i := 0; i < n; i++ {
+ e.marshal("", in.Index(i))
+ }
+ e.must(yaml_sequence_end_event_initialize(&e.event))
+ e.emit()
+}
+
+// isBase60 returns whether s is in base 60 notation as defined in YAML 1.1.
+//
+// The base 60 float notation in YAML 1.1 is a terrible idea and is unsupported
+// in YAML 1.2 and by this package, but these should be marshalled quoted for
+// the time being for compatibility with other parsers.
+func isBase60Float(s string) (result bool) {
+ // Fast path.
+ if s == "" {
+ return false
+ }
+ c := s[0]
+ if !(c == '+' || c == '-' || c >= '0' && c <= '9') || strings.IndexByte(s, ':') < 0 {
+ return false
+ }
+ // Do the full match.
+ return base60float.MatchString(s)
+}
+
+// From http://yaml.org/type/float.html, except the regular expression there
+// is bogus. In practice parsers do not enforce the "\.[0-9_]*" suffix.
+var base60float = regexp.MustCompile(`^[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+(?:\.[0-9_]*)?$`)
+
+func (e *encoder) stringv(tag string, in reflect.Value) {
+ var style yaml_scalar_style_t
+ s := in.String()
+ rtag, rs := resolve("", s)
+ if rtag == yaml_BINARY_TAG {
+ if tag == "" || tag == yaml_STR_TAG {
+ tag = rtag
+ s = rs.(string)
+ } else if tag == yaml_BINARY_TAG {
+ failf("explicitly tagged !!binary data must be base64-encoded")
+ } else {
+ failf("cannot marshal invalid UTF-8 data as %s", shortTag(tag))
+ }
+ }
+ if tag == "" && (rtag != yaml_STR_TAG || isBase60Float(s)) {
+ style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+ } else if strings.Contains(s, "\n") {
+ style = yaml_LITERAL_SCALAR_STYLE
+ } else {
+ style = yaml_PLAIN_SCALAR_STYLE
+ }
+ e.emitScalar(s, "", tag, style)
+}
+
+func (e *encoder) boolv(tag string, in reflect.Value) {
+ var s string
+ if in.Bool() {
+ s = "true"
+ } else {
+ s = "false"
+ }
+ e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
+}
+
+func (e *encoder) intv(tag string, in reflect.Value) {
+ s := strconv.FormatInt(in.Int(), 10)
+ e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
+}
+
+func (e *encoder) uintv(tag string, in reflect.Value) {
+ s := strconv.FormatUint(in.Uint(), 10)
+ e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
+}
+
+func (e *encoder) floatv(tag string, in reflect.Value) {
+ // FIXME: Handle 64 bits here.
+ s := strconv.FormatFloat(float64(in.Float()), 'g', -1, 32)
+ switch s {
+ case "+Inf":
+ s = ".inf"
+ case "-Inf":
+ s = "-.inf"
+ case "NaN":
+ s = ".nan"
+ }
+ e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
+}
+
+func (e *encoder) nilv() {
+ e.emitScalar("null", "", "", yaml_PLAIN_SCALAR_STYLE)
+}
+
+func (e *encoder) emitScalar(value, anchor, tag string, style yaml_scalar_style_t) {
+ implicit := tag == ""
+ e.must(yaml_scalar_event_initialize(&e.event, []byte(anchor), []byte(tag), []byte(value), implicit, implicit, style))
+ e.emit()
+}
diff --git a/vendor/gopkg.in/yaml.v2/parserc.go b/vendor/gopkg.in/yaml.v2/parserc.go
new file mode 100644
index 0000000000..0a7037ad1b
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v2/parserc.go
@@ -0,0 +1,1096 @@
+package yaml
+
+import (
+ "bytes"
+)
+
+// The parser implements the following grammar:
+//
+// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END
+// implicit_document ::= block_node DOCUMENT-END*
+// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+// block_node_or_indentless_sequence ::=
+// ALIAS
+// | properties (block_content | indentless_block_sequence)?
+// | block_content
+// | indentless_block_sequence
+// block_node ::= ALIAS
+// | properties block_content?
+// | block_content
+// flow_node ::= ALIAS
+// | properties flow_content?
+// | flow_content
+// properties ::= TAG ANCHOR? | ANCHOR TAG?
+// block_content ::= block_collection | flow_collection | SCALAR
+// flow_content ::= flow_collection | SCALAR
+// block_collection ::= block_sequence | block_mapping
+// flow_collection ::= flow_sequence | flow_mapping
+// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
+// indentless_sequence ::= (BLOCK-ENTRY block_node?)+
+// block_mapping ::= BLOCK-MAPPING_START
+// ((KEY block_node_or_indentless_sequence?)?
+// (VALUE block_node_or_indentless_sequence?)?)*
+// BLOCK-END
+// flow_sequence ::= FLOW-SEQUENCE-START
+// (flow_sequence_entry FLOW-ENTRY)*
+// flow_sequence_entry?
+// FLOW-SEQUENCE-END
+// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+// flow_mapping ::= FLOW-MAPPING-START
+// (flow_mapping_entry FLOW-ENTRY)*
+// flow_mapping_entry?
+// FLOW-MAPPING-END
+// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+
+// Peek the next token in the token queue.
+func peek_token(parser *yaml_parser_t) *yaml_token_t {
+ if parser.token_available || yaml_parser_fetch_more_tokens(parser) {
+ return &parser.tokens[parser.tokens_head]
+ }
+ return nil
+}
+
+// Remove the next token from the queue (must be called after peek_token).
+func skip_token(parser *yaml_parser_t) {
+ parser.token_available = false
+ parser.tokens_parsed++
+ parser.stream_end_produced = parser.tokens[parser.tokens_head].typ == yaml_STREAM_END_TOKEN
+ parser.tokens_head++
+}
+
+// Get the next event.
+func yaml_parser_parse(parser *yaml_parser_t, event *yaml_event_t) bool {
+ // Erase the event object.
+ *event = yaml_event_t{}
+
+ // No events after the end of the stream or error.
+ if parser.stream_end_produced || parser.error != yaml_NO_ERROR || parser.state == yaml_PARSE_END_STATE {
+ return true
+ }
+
+ // Generate the next event.
+ return yaml_parser_state_machine(parser, event)
+}
+
+// Set parser error.
+func yaml_parser_set_parser_error(parser *yaml_parser_t, problem string, problem_mark yaml_mark_t) bool {
+ parser.error = yaml_PARSER_ERROR
+ parser.problem = problem
+ parser.problem_mark = problem_mark
+ return false
+}
+
+func yaml_parser_set_parser_error_context(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string, problem_mark yaml_mark_t) bool {
+ parser.error = yaml_PARSER_ERROR
+ parser.context = context
+ parser.context_mark = context_mark
+ parser.problem = problem
+ parser.problem_mark = problem_mark
+ return false
+}
+
+// State dispatcher.
+func yaml_parser_state_machine(parser *yaml_parser_t, event *yaml_event_t) bool {
+ //trace("yaml_parser_state_machine", "state:", parser.state.String())
+
+ switch parser.state {
+ case yaml_PARSE_STREAM_START_STATE:
+ return yaml_parser_parse_stream_start(parser, event)
+
+ case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE:
+ return yaml_parser_parse_document_start(parser, event, true)
+
+ case yaml_PARSE_DOCUMENT_START_STATE:
+ return yaml_parser_parse_document_start(parser, event, false)
+
+ case yaml_PARSE_DOCUMENT_CONTENT_STATE:
+ return yaml_parser_parse_document_content(parser, event)
+
+ case yaml_PARSE_DOCUMENT_END_STATE:
+ return yaml_parser_parse_document_end(parser, event)
+
+ case yaml_PARSE_BLOCK_NODE_STATE:
+ return yaml_parser_parse_node(parser, event, true, false)
+
+ case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE:
+ return yaml_parser_parse_node(parser, event, true, true)
+
+ case yaml_PARSE_FLOW_NODE_STATE:
+ return yaml_parser_parse_node(parser, event, false, false)
+
+ case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE:
+ return yaml_parser_parse_block_sequence_entry(parser, event, true)
+
+ case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE:
+ return yaml_parser_parse_block_sequence_entry(parser, event, false)
+
+ case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE:
+ return yaml_parser_parse_indentless_sequence_entry(parser, event)
+
+ case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE:
+ return yaml_parser_parse_block_mapping_key(parser, event, true)
+
+ case yaml_PARSE_BLOCK_MAPPING_KEY_STATE:
+ return yaml_parser_parse_block_mapping_key(parser, event, false)
+
+ case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE:
+ return yaml_parser_parse_block_mapping_value(parser, event)
+
+ case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE:
+ return yaml_parser_parse_flow_sequence_entry(parser, event, true)
+
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE:
+ return yaml_parser_parse_flow_sequence_entry(parser, event, false)
+
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE:
+ return yaml_parser_parse_flow_sequence_entry_mapping_key(parser, event)
+
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE:
+ return yaml_parser_parse_flow_sequence_entry_mapping_value(parser, event)
+
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE:
+ return yaml_parser_parse_flow_sequence_entry_mapping_end(parser, event)
+
+ case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE:
+ return yaml_parser_parse_flow_mapping_key(parser, event, true)
+
+ case yaml_PARSE_FLOW_MAPPING_KEY_STATE:
+ return yaml_parser_parse_flow_mapping_key(parser, event, false)
+
+ case yaml_PARSE_FLOW_MAPPING_VALUE_STATE:
+ return yaml_parser_parse_flow_mapping_value(parser, event, false)
+
+ case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE:
+ return yaml_parser_parse_flow_mapping_value(parser, event, true)
+
+ default:
+ panic("invalid parser state")
+ }
+ return false
+}
+
+// Parse the production:
+// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END
+// ************
+func yaml_parser_parse_stream_start(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_STREAM_START_TOKEN {
+ return yaml_parser_set_parser_error(parser, "did not find expected ", token.start_mark)
+ }
+ parser.state = yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE
+ *event = yaml_event_t{
+ typ: yaml_STREAM_START_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ encoding: token.encoding,
+ }
+ skip_token(parser)
+ return true
+}
+
+// Parse the productions:
+// implicit_document ::= block_node DOCUMENT-END*
+// *
+// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+// *************************
+func yaml_parser_parse_document_start(parser *yaml_parser_t, event *yaml_event_t, implicit bool) bool {
+
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ // Parse extra document end indicators.
+ if !implicit {
+ for token.typ == yaml_DOCUMENT_END_TOKEN {
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ }
+ }
+
+ if implicit && token.typ != yaml_VERSION_DIRECTIVE_TOKEN &&
+ token.typ != yaml_TAG_DIRECTIVE_TOKEN &&
+ token.typ != yaml_DOCUMENT_START_TOKEN &&
+ token.typ != yaml_STREAM_END_TOKEN {
+ // Parse an implicit document.
+ if !yaml_parser_process_directives(parser, nil, nil) {
+ return false
+ }
+ parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE)
+ parser.state = yaml_PARSE_BLOCK_NODE_STATE
+
+ *event = yaml_event_t{
+ typ: yaml_DOCUMENT_START_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ }
+
+ } else if token.typ != yaml_STREAM_END_TOKEN {
+ // Parse an explicit document.
+ var version_directive *yaml_version_directive_t
+ var tag_directives []yaml_tag_directive_t
+ start_mark := token.start_mark
+ if !yaml_parser_process_directives(parser, &version_directive, &tag_directives) {
+ return false
+ }
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_DOCUMENT_START_TOKEN {
+ yaml_parser_set_parser_error(parser,
+ "did not find expected ", token.start_mark)
+ return false
+ }
+ parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE)
+ parser.state = yaml_PARSE_DOCUMENT_CONTENT_STATE
+ end_mark := token.end_mark
+
+ *event = yaml_event_t{
+ typ: yaml_DOCUMENT_START_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ version_directive: version_directive,
+ tag_directives: tag_directives,
+ implicit: false,
+ }
+ skip_token(parser)
+
+ } else {
+ // Parse the stream end.
+ parser.state = yaml_PARSE_END_STATE
+ *event = yaml_event_t{
+ typ: yaml_STREAM_END_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ }
+ skip_token(parser)
+ }
+
+ return true
+}
+
+// Parse the productions:
+// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+// ***********
+//
+func yaml_parser_parse_document_content(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ == yaml_VERSION_DIRECTIVE_TOKEN ||
+ token.typ == yaml_TAG_DIRECTIVE_TOKEN ||
+ token.typ == yaml_DOCUMENT_START_TOKEN ||
+ token.typ == yaml_DOCUMENT_END_TOKEN ||
+ token.typ == yaml_STREAM_END_TOKEN {
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+ return yaml_parser_process_empty_scalar(parser, event,
+ token.start_mark)
+ }
+ return yaml_parser_parse_node(parser, event, true, false)
+}
+
+// Parse the productions:
+// implicit_document ::= block_node DOCUMENT-END*
+// *************
+// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+//
+func yaml_parser_parse_document_end(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ start_mark := token.start_mark
+ end_mark := token.start_mark
+
+ implicit := true
+ if token.typ == yaml_DOCUMENT_END_TOKEN {
+ end_mark = token.end_mark
+ skip_token(parser)
+ implicit = false
+ }
+
+ parser.tag_directives = parser.tag_directives[:0]
+
+ parser.state = yaml_PARSE_DOCUMENT_START_STATE
+ *event = yaml_event_t{
+ typ: yaml_DOCUMENT_END_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ implicit: implicit,
+ }
+ return true
+}
+
+// Parse the productions:
+// block_node_or_indentless_sequence ::=
+// ALIAS
+// *****
+// | properties (block_content | indentless_block_sequence)?
+// ********** *
+// | block_content | indentless_block_sequence
+// *
+// block_node ::= ALIAS
+// *****
+// | properties block_content?
+// ********** *
+// | block_content
+// *
+// flow_node ::= ALIAS
+// *****
+// | properties flow_content?
+// ********** *
+// | flow_content
+// *
+// properties ::= TAG ANCHOR? | ANCHOR TAG?
+// *************************
+// block_content ::= block_collection | flow_collection | SCALAR
+// ******
+// flow_content ::= flow_collection | SCALAR
+// ******
+func yaml_parser_parse_node(parser *yaml_parser_t, event *yaml_event_t, block, indentless_sequence bool) bool {
+ //defer trace("yaml_parser_parse_node", "block:", block, "indentless_sequence:", indentless_sequence)()
+
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ if token.typ == yaml_ALIAS_TOKEN {
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+ *event = yaml_event_t{
+ typ: yaml_ALIAS_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ anchor: token.value,
+ }
+ skip_token(parser)
+ return true
+ }
+
+ start_mark := token.start_mark
+ end_mark := token.start_mark
+
+ var tag_token bool
+ var tag_handle, tag_suffix, anchor []byte
+ var tag_mark yaml_mark_t
+ if token.typ == yaml_ANCHOR_TOKEN {
+ anchor = token.value
+ start_mark = token.start_mark
+ end_mark = token.end_mark
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ == yaml_TAG_TOKEN {
+ tag_token = true
+ tag_handle = token.value
+ tag_suffix = token.suffix
+ tag_mark = token.start_mark
+ end_mark = token.end_mark
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ }
+ } else if token.typ == yaml_TAG_TOKEN {
+ tag_token = true
+ tag_handle = token.value
+ tag_suffix = token.suffix
+ start_mark = token.start_mark
+ tag_mark = token.start_mark
+ end_mark = token.end_mark
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ == yaml_ANCHOR_TOKEN {
+ anchor = token.value
+ end_mark = token.end_mark
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ }
+ }
+
+ var tag []byte
+ if tag_token {
+ if len(tag_handle) == 0 {
+ tag = tag_suffix
+ tag_suffix = nil
+ } else {
+ for i := range parser.tag_directives {
+ if bytes.Equal(parser.tag_directives[i].handle, tag_handle) {
+ tag = append([]byte(nil), parser.tag_directives[i].prefix...)
+ tag = append(tag, tag_suffix...)
+ break
+ }
+ }
+ if len(tag) == 0 {
+ yaml_parser_set_parser_error_context(parser,
+ "while parsing a node", start_mark,
+ "found undefined tag handle", tag_mark)
+ return false
+ }
+ }
+ }
+
+ implicit := len(tag) == 0
+ if indentless_sequence && token.typ == yaml_BLOCK_ENTRY_TOKEN {
+ end_mark = token.end_mark
+ parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_START_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE),
+ }
+ return true
+ }
+ if token.typ == yaml_SCALAR_TOKEN {
+ var plain_implicit, quoted_implicit bool
+ end_mark = token.end_mark
+ if (len(tag) == 0 && token.style == yaml_PLAIN_SCALAR_STYLE) || (len(tag) == 1 && tag[0] == '!') {
+ plain_implicit = true
+ } else if len(tag) == 0 {
+ quoted_implicit = true
+ }
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+
+ *event = yaml_event_t{
+ typ: yaml_SCALAR_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ anchor: anchor,
+ tag: tag,
+ value: token.value,
+ implicit: plain_implicit,
+ quoted_implicit: quoted_implicit,
+ style: yaml_style_t(token.style),
+ }
+ skip_token(parser)
+ return true
+ }
+ if token.typ == yaml_FLOW_SEQUENCE_START_TOKEN {
+ // [Go] Some of the events below can be merged as they differ only on style.
+ end_mark = token.end_mark
+ parser.state = yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_START_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ style: yaml_style_t(yaml_FLOW_SEQUENCE_STYLE),
+ }
+ return true
+ }
+ if token.typ == yaml_FLOW_MAPPING_START_TOKEN {
+ end_mark = token.end_mark
+ parser.state = yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_START_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ style: yaml_style_t(yaml_FLOW_MAPPING_STYLE),
+ }
+ return true
+ }
+ if block && token.typ == yaml_BLOCK_SEQUENCE_START_TOKEN {
+ end_mark = token.end_mark
+ parser.state = yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_START_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE),
+ }
+ return true
+ }
+ if block && token.typ == yaml_BLOCK_MAPPING_START_TOKEN {
+ end_mark = token.end_mark
+ parser.state = yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_START_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ style: yaml_style_t(yaml_BLOCK_MAPPING_STYLE),
+ }
+ return true
+ }
+ if len(anchor) > 0 || len(tag) > 0 {
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+
+ *event = yaml_event_t{
+ typ: yaml_SCALAR_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ quoted_implicit: false,
+ style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE),
+ }
+ return true
+ }
+
+ context := "while parsing a flow node"
+ if block {
+ context = "while parsing a block node"
+ }
+ yaml_parser_set_parser_error_context(parser, context, start_mark,
+ "did not find expected node content", token.start_mark)
+ return false
+}
+
+// Parse the productions:
+// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
+// ******************** *********** * *********
+//
+func yaml_parser_parse_block_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool {
+ if first {
+ token := peek_token(parser)
+ parser.marks = append(parser.marks, token.start_mark)
+ skip_token(parser)
+ }
+
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ if token.typ == yaml_BLOCK_ENTRY_TOKEN {
+ mark := token.end_mark
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_BLOCK_ENTRY_TOKEN && token.typ != yaml_BLOCK_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE)
+ return yaml_parser_parse_node(parser, event, true, false)
+ } else {
+ parser.state = yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE
+ return yaml_parser_process_empty_scalar(parser, event, mark)
+ }
+ }
+ if token.typ == yaml_BLOCK_END_TOKEN {
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_END_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ }
+
+ skip_token(parser)
+ return true
+ }
+
+ context_mark := parser.marks[len(parser.marks)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+ return yaml_parser_set_parser_error_context(parser,
+ "while parsing a block collection", context_mark,
+ "did not find expected '-' indicator", token.start_mark)
+}
+
+// Parse the productions:
+// indentless_sequence ::= (BLOCK-ENTRY block_node?)+
+// *********** *
+func yaml_parser_parse_indentless_sequence_entry(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ if token.typ == yaml_BLOCK_ENTRY_TOKEN {
+ mark := token.end_mark
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_BLOCK_ENTRY_TOKEN &&
+ token.typ != yaml_KEY_TOKEN &&
+ token.typ != yaml_VALUE_TOKEN &&
+ token.typ != yaml_BLOCK_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE)
+ return yaml_parser_parse_node(parser, event, true, false)
+ }
+ parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE
+ return yaml_parser_process_empty_scalar(parser, event, mark)
+ }
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_END_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.start_mark, // [Go] Shouldn't this be token.end_mark?
+ }
+ return true
+}
+
+// Parse the productions:
+// block_mapping ::= BLOCK-MAPPING_START
+// *******************
+// ((KEY block_node_or_indentless_sequence?)?
+// *** *
+// (VALUE block_node_or_indentless_sequence?)?)*
+//
+// BLOCK-END
+// *********
+//
+func yaml_parser_parse_block_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool {
+ if first {
+ token := peek_token(parser)
+ parser.marks = append(parser.marks, token.start_mark)
+ skip_token(parser)
+ }
+
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ if token.typ == yaml_KEY_TOKEN {
+ mark := token.end_mark
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_KEY_TOKEN &&
+ token.typ != yaml_VALUE_TOKEN &&
+ token.typ != yaml_BLOCK_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_VALUE_STATE)
+ return yaml_parser_parse_node(parser, event, true, true)
+ } else {
+ parser.state = yaml_PARSE_BLOCK_MAPPING_VALUE_STATE
+ return yaml_parser_process_empty_scalar(parser, event, mark)
+ }
+ } else if token.typ == yaml_BLOCK_END_TOKEN {
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_END_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ }
+ skip_token(parser)
+ return true
+ }
+
+ context_mark := parser.marks[len(parser.marks)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+ return yaml_parser_set_parser_error_context(parser,
+ "while parsing a block mapping", context_mark,
+ "did not find expected key", token.start_mark)
+}
+
+// Parse the productions:
+// block_mapping ::= BLOCK-MAPPING_START
+//
+// ((KEY block_node_or_indentless_sequence?)?
+//
+// (VALUE block_node_or_indentless_sequence?)?)*
+// ***** *
+// BLOCK-END
+//
+//
+func yaml_parser_parse_block_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ == yaml_VALUE_TOKEN {
+ mark := token.end_mark
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_KEY_TOKEN &&
+ token.typ != yaml_VALUE_TOKEN &&
+ token.typ != yaml_BLOCK_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_KEY_STATE)
+ return yaml_parser_parse_node(parser, event, true, true)
+ }
+ parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE
+ return yaml_parser_process_empty_scalar(parser, event, mark)
+ }
+ parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE
+ return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
+}
+
+// Parse the productions:
+// flow_sequence ::= FLOW-SEQUENCE-START
+// *******************
+// (flow_sequence_entry FLOW-ENTRY)*
+// * **********
+// flow_sequence_entry?
+// *
+// FLOW-SEQUENCE-END
+// *****************
+// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+// *
+//
+func yaml_parser_parse_flow_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool {
+ if first {
+ token := peek_token(parser)
+ parser.marks = append(parser.marks, token.start_mark)
+ skip_token(parser)
+ }
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN {
+ if !first {
+ if token.typ == yaml_FLOW_ENTRY_TOKEN {
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ } else {
+ context_mark := parser.marks[len(parser.marks)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+ return yaml_parser_set_parser_error_context(parser,
+ "while parsing a flow sequence", context_mark,
+ "did not find expected ',' or ']'", token.start_mark)
+ }
+ }
+
+ if token.typ == yaml_KEY_TOKEN {
+ parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_START_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ implicit: true,
+ style: yaml_style_t(yaml_FLOW_MAPPING_STYLE),
+ }
+ skip_token(parser)
+ return true
+ } else if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE)
+ return yaml_parser_parse_node(parser, event, false, false)
+ }
+ }
+
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_END_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ }
+
+ skip_token(parser)
+ return true
+}
+
+//
+// Parse the productions:
+// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+// *** *
+//
+func yaml_parser_parse_flow_sequence_entry_mapping_key(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_VALUE_TOKEN &&
+ token.typ != yaml_FLOW_ENTRY_TOKEN &&
+ token.typ != yaml_FLOW_SEQUENCE_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE)
+ return yaml_parser_parse_node(parser, event, false, false)
+ }
+ mark := token.end_mark
+ skip_token(parser)
+ parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE
+ return yaml_parser_process_empty_scalar(parser, event, mark)
+}
+
+// Parse the productions:
+// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+// ***** *
+//
+func yaml_parser_parse_flow_sequence_entry_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ == yaml_VALUE_TOKEN {
+ skip_token(parser)
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_SEQUENCE_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE)
+ return yaml_parser_parse_node(parser, event, false, false)
+ }
+ }
+ parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE
+ return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
+}
+
+// Parse the productions:
+// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+// *
+//
+func yaml_parser_parse_flow_sequence_entry_mapping_end(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_END_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.start_mark, // [Go] Shouldn't this be end_mark?
+ }
+ return true
+}
+
+// Parse the productions:
+// flow_mapping ::= FLOW-MAPPING-START
+// ******************
+// (flow_mapping_entry FLOW-ENTRY)*
+// * **********
+// flow_mapping_entry?
+// ******************
+// FLOW-MAPPING-END
+// ****************
+// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+// * *** *
+//
+func yaml_parser_parse_flow_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool {
+ if first {
+ token := peek_token(parser)
+ parser.marks = append(parser.marks, token.start_mark)
+ skip_token(parser)
+ }
+
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ if token.typ != yaml_FLOW_MAPPING_END_TOKEN {
+ if !first {
+ if token.typ == yaml_FLOW_ENTRY_TOKEN {
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ } else {
+ context_mark := parser.marks[len(parser.marks)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+ return yaml_parser_set_parser_error_context(parser,
+ "while parsing a flow mapping", context_mark,
+ "did not find expected ',' or '}'", token.start_mark)
+ }
+ }
+
+ if token.typ == yaml_KEY_TOKEN {
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_VALUE_TOKEN &&
+ token.typ != yaml_FLOW_ENTRY_TOKEN &&
+ token.typ != yaml_FLOW_MAPPING_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_VALUE_STATE)
+ return yaml_parser_parse_node(parser, event, false, false)
+ } else {
+ parser.state = yaml_PARSE_FLOW_MAPPING_VALUE_STATE
+ return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
+ }
+ } else if token.typ != yaml_FLOW_MAPPING_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE)
+ return yaml_parser_parse_node(parser, event, false, false)
+ }
+ }
+
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_END_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ }
+ skip_token(parser)
+ return true
+}
+
+// Parse the productions:
+// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+// * ***** *
+//
+func yaml_parser_parse_flow_mapping_value(parser *yaml_parser_t, event *yaml_event_t, empty bool) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if empty {
+ parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE
+ return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
+ }
+ if token.typ == yaml_VALUE_TOKEN {
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_MAPPING_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_KEY_STATE)
+ return yaml_parser_parse_node(parser, event, false, false)
+ }
+ }
+ parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE
+ return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
+}
+
+// Generate an empty scalar event.
+func yaml_parser_process_empty_scalar(parser *yaml_parser_t, event *yaml_event_t, mark yaml_mark_t) bool {
+ *event = yaml_event_t{
+ typ: yaml_SCALAR_EVENT,
+ start_mark: mark,
+ end_mark: mark,
+ value: nil, // Empty
+ implicit: true,
+ style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE),
+ }
+ return true
+}
+
+var default_tag_directives = []yaml_tag_directive_t{
+ {[]byte("!"), []byte("!")},
+ {[]byte("!!"), []byte("tag:yaml.org,2002:")},
+}
+
+// Parse directives.
+func yaml_parser_process_directives(parser *yaml_parser_t,
+ version_directive_ref **yaml_version_directive_t,
+ tag_directives_ref *[]yaml_tag_directive_t) bool {
+
+ var version_directive *yaml_version_directive_t
+ var tag_directives []yaml_tag_directive_t
+
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ for token.typ == yaml_VERSION_DIRECTIVE_TOKEN || token.typ == yaml_TAG_DIRECTIVE_TOKEN {
+ if token.typ == yaml_VERSION_DIRECTIVE_TOKEN {
+ if version_directive != nil {
+ yaml_parser_set_parser_error(parser,
+ "found duplicate %YAML directive", token.start_mark)
+ return false
+ }
+ if token.major != 1 || token.minor != 1 {
+ yaml_parser_set_parser_error(parser,
+ "found incompatible YAML document", token.start_mark)
+ return false
+ }
+ version_directive = &yaml_version_directive_t{
+ major: token.major,
+ minor: token.minor,
+ }
+ } else if token.typ == yaml_TAG_DIRECTIVE_TOKEN {
+ value := yaml_tag_directive_t{
+ handle: token.value,
+ prefix: token.prefix,
+ }
+ if !yaml_parser_append_tag_directive(parser, value, false, token.start_mark) {
+ return false
+ }
+ tag_directives = append(tag_directives, value)
+ }
+
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ }
+
+ for i := range default_tag_directives {
+ if !yaml_parser_append_tag_directive(parser, default_tag_directives[i], true, token.start_mark) {
+ return false
+ }
+ }
+
+ if version_directive_ref != nil {
+ *version_directive_ref = version_directive
+ }
+ if tag_directives_ref != nil {
+ *tag_directives_ref = tag_directives
+ }
+ return true
+}
+
+// Append a tag directive to the directives stack.
+func yaml_parser_append_tag_directive(parser *yaml_parser_t, value yaml_tag_directive_t, allow_duplicates bool, mark yaml_mark_t) bool {
+ for i := range parser.tag_directives {
+ if bytes.Equal(value.handle, parser.tag_directives[i].handle) {
+ if allow_duplicates {
+ return true
+ }
+ return yaml_parser_set_parser_error(parser, "found duplicate %TAG directive", mark)
+ }
+ }
+
+ // [Go] I suspect the copy is unnecessary. This was likely done
+ // because there was no way to track ownership of the data.
+ value_copy := yaml_tag_directive_t{
+ handle: make([]byte, len(value.handle)),
+ prefix: make([]byte, len(value.prefix)),
+ }
+ copy(value_copy.handle, value.handle)
+ copy(value_copy.prefix, value.prefix)
+ parser.tag_directives = append(parser.tag_directives, value_copy)
+ return true
+}
diff --git a/vendor/gopkg.in/yaml.v2/readerc.go b/vendor/gopkg.in/yaml.v2/readerc.go
new file mode 100644
index 0000000000..d5fb097277
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v2/readerc.go
@@ -0,0 +1,391 @@
+package yaml
+
+import (
+ "io"
+)
+
+// Set the reader error and return 0.
+func yaml_parser_set_reader_error(parser *yaml_parser_t, problem string, offset int, value int) bool {
+ parser.error = yaml_READER_ERROR
+ parser.problem = problem
+ parser.problem_offset = offset
+ parser.problem_value = value
+ return false
+}
+
+// Byte order marks.
+const (
+ bom_UTF8 = "\xef\xbb\xbf"
+ bom_UTF16LE = "\xff\xfe"
+ bom_UTF16BE = "\xfe\xff"
+)
+
+// Determine the input stream encoding by checking the BOM symbol. If no BOM is
+// found, the UTF-8 encoding is assumed. Return 1 on success, 0 on failure.
+func yaml_parser_determine_encoding(parser *yaml_parser_t) bool {
+ // Ensure that we had enough bytes in the raw buffer.
+ for !parser.eof && len(parser.raw_buffer)-parser.raw_buffer_pos < 3 {
+ if !yaml_parser_update_raw_buffer(parser) {
+ return false
+ }
+ }
+
+ // Determine the encoding.
+ buf := parser.raw_buffer
+ pos := parser.raw_buffer_pos
+ avail := len(buf) - pos
+ if avail >= 2 && buf[pos] == bom_UTF16LE[0] && buf[pos+1] == bom_UTF16LE[1] {
+ parser.encoding = yaml_UTF16LE_ENCODING
+ parser.raw_buffer_pos += 2
+ parser.offset += 2
+ } else if avail >= 2 && buf[pos] == bom_UTF16BE[0] && buf[pos+1] == bom_UTF16BE[1] {
+ parser.encoding = yaml_UTF16BE_ENCODING
+ parser.raw_buffer_pos += 2
+ parser.offset += 2
+ } else if avail >= 3 && buf[pos] == bom_UTF8[0] && buf[pos+1] == bom_UTF8[1] && buf[pos+2] == bom_UTF8[2] {
+ parser.encoding = yaml_UTF8_ENCODING
+ parser.raw_buffer_pos += 3
+ parser.offset += 3
+ } else {
+ parser.encoding = yaml_UTF8_ENCODING
+ }
+ return true
+}
+
+// Update the raw buffer.
+func yaml_parser_update_raw_buffer(parser *yaml_parser_t) bool {
+ size_read := 0
+
+ // Return if the raw buffer is full.
+ if parser.raw_buffer_pos == 0 && len(parser.raw_buffer) == cap(parser.raw_buffer) {
+ return true
+ }
+
+ // Return on EOF.
+ if parser.eof {
+ return true
+ }
+
+ // Move the remaining bytes in the raw buffer to the beginning.
+ if parser.raw_buffer_pos > 0 && parser.raw_buffer_pos < len(parser.raw_buffer) {
+ copy(parser.raw_buffer, parser.raw_buffer[parser.raw_buffer_pos:])
+ }
+ parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)-parser.raw_buffer_pos]
+ parser.raw_buffer_pos = 0
+
+ // Call the read handler to fill the buffer.
+ size_read, err := parser.read_handler(parser, parser.raw_buffer[len(parser.raw_buffer):cap(parser.raw_buffer)])
+ parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)+size_read]
+ if err == io.EOF {
+ parser.eof = true
+ } else if err != nil {
+ return yaml_parser_set_reader_error(parser, "input error: "+err.Error(), parser.offset, -1)
+ }
+ return true
+}
+
+// Ensure that the buffer contains at least `length` characters.
+// Return true on success, false on failure.
+//
+// The length is supposed to be significantly less that the buffer size.
+func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool {
+ if parser.read_handler == nil {
+ panic("read handler must be set")
+ }
+
+ // If the EOF flag is set and the raw buffer is empty, do nothing.
+ if parser.eof && parser.raw_buffer_pos == len(parser.raw_buffer) {
+ return true
+ }
+
+ // Return if the buffer contains enough characters.
+ if parser.unread >= length {
+ return true
+ }
+
+ // Determine the input encoding if it is not known yet.
+ if parser.encoding == yaml_ANY_ENCODING {
+ if !yaml_parser_determine_encoding(parser) {
+ return false
+ }
+ }
+
+ // Move the unread characters to the beginning of the buffer.
+ buffer_len := len(parser.buffer)
+ if parser.buffer_pos > 0 && parser.buffer_pos < buffer_len {
+ copy(parser.buffer, parser.buffer[parser.buffer_pos:])
+ buffer_len -= parser.buffer_pos
+ parser.buffer_pos = 0
+ } else if parser.buffer_pos == buffer_len {
+ buffer_len = 0
+ parser.buffer_pos = 0
+ }
+
+ // Open the whole buffer for writing, and cut it before returning.
+ parser.buffer = parser.buffer[:cap(parser.buffer)]
+
+ // Fill the buffer until it has enough characters.
+ first := true
+ for parser.unread < length {
+
+ // Fill the raw buffer if necessary.
+ if !first || parser.raw_buffer_pos == len(parser.raw_buffer) {
+ if !yaml_parser_update_raw_buffer(parser) {
+ parser.buffer = parser.buffer[:buffer_len]
+ return false
+ }
+ }
+ first = false
+
+ // Decode the raw buffer.
+ inner:
+ for parser.raw_buffer_pos != len(parser.raw_buffer) {
+ var value rune
+ var width int
+
+ raw_unread := len(parser.raw_buffer) - parser.raw_buffer_pos
+
+ // Decode the next character.
+ switch parser.encoding {
+ case yaml_UTF8_ENCODING:
+ // Decode a UTF-8 character. Check RFC 3629
+ // (http://www.ietf.org/rfc/rfc3629.txt) for more details.
+ //
+ // The following table (taken from the RFC) is used for
+ // decoding.
+ //
+ // Char. number range | UTF-8 octet sequence
+ // (hexadecimal) | (binary)
+ // --------------------+------------------------------------
+ // 0000 0000-0000 007F | 0xxxxxxx
+ // 0000 0080-0000 07FF | 110xxxxx 10xxxxxx
+ // 0000 0800-0000 FFFF | 1110xxxx 10xxxxxx 10xxxxxx
+ // 0001 0000-0010 FFFF | 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx
+ //
+ // Additionally, the characters in the range 0xD800-0xDFFF
+ // are prohibited as they are reserved for use with UTF-16
+ // surrogate pairs.
+
+ // Determine the length of the UTF-8 sequence.
+ octet := parser.raw_buffer[parser.raw_buffer_pos]
+ switch {
+ case octet&0x80 == 0x00:
+ width = 1
+ case octet&0xE0 == 0xC0:
+ width = 2
+ case octet&0xF0 == 0xE0:
+ width = 3
+ case octet&0xF8 == 0xF0:
+ width = 4
+ default:
+ // The leading octet is invalid.
+ return yaml_parser_set_reader_error(parser,
+ "invalid leading UTF-8 octet",
+ parser.offset, int(octet))
+ }
+
+ // Check if the raw buffer contains an incomplete character.
+ if width > raw_unread {
+ if parser.eof {
+ return yaml_parser_set_reader_error(parser,
+ "incomplete UTF-8 octet sequence",
+ parser.offset, -1)
+ }
+ break inner
+ }
+
+ // Decode the leading octet.
+ switch {
+ case octet&0x80 == 0x00:
+ value = rune(octet & 0x7F)
+ case octet&0xE0 == 0xC0:
+ value = rune(octet & 0x1F)
+ case octet&0xF0 == 0xE0:
+ value = rune(octet & 0x0F)
+ case octet&0xF8 == 0xF0:
+ value = rune(octet & 0x07)
+ default:
+ value = 0
+ }
+
+ // Check and decode the trailing octets.
+ for k := 1; k < width; k++ {
+ octet = parser.raw_buffer[parser.raw_buffer_pos+k]
+
+ // Check if the octet is valid.
+ if (octet & 0xC0) != 0x80 {
+ return yaml_parser_set_reader_error(parser,
+ "invalid trailing UTF-8 octet",
+ parser.offset+k, int(octet))
+ }
+
+ // Decode the octet.
+ value = (value << 6) + rune(octet&0x3F)
+ }
+
+ // Check the length of the sequence against the value.
+ switch {
+ case width == 1:
+ case width == 2 && value >= 0x80:
+ case width == 3 && value >= 0x800:
+ case width == 4 && value >= 0x10000:
+ default:
+ return yaml_parser_set_reader_error(parser,
+ "invalid length of a UTF-8 sequence",
+ parser.offset, -1)
+ }
+
+ // Check the range of the value.
+ if value >= 0xD800 && value <= 0xDFFF || value > 0x10FFFF {
+ return yaml_parser_set_reader_error(parser,
+ "invalid Unicode character",
+ parser.offset, int(value))
+ }
+
+ case yaml_UTF16LE_ENCODING, yaml_UTF16BE_ENCODING:
+ var low, high int
+ if parser.encoding == yaml_UTF16LE_ENCODING {
+ low, high = 0, 1
+ } else {
+ high, low = 1, 0
+ }
+
+ // The UTF-16 encoding is not as simple as one might
+ // naively think. Check RFC 2781
+ // (http://www.ietf.org/rfc/rfc2781.txt).
+ //
+ // Normally, two subsequent bytes describe a Unicode
+ // character. However a special technique (called a
+ // surrogate pair) is used for specifying character
+ // values larger than 0xFFFF.
+ //
+ // A surrogate pair consists of two pseudo-characters:
+ // high surrogate area (0xD800-0xDBFF)
+ // low surrogate area (0xDC00-0xDFFF)
+ //
+ // The following formulas are used for decoding
+ // and encoding characters using surrogate pairs:
+ //
+ // U = U' + 0x10000 (0x01 00 00 <= U <= 0x10 FF FF)
+ // U' = yyyyyyyyyyxxxxxxxxxx (0 <= U' <= 0x0F FF FF)
+ // W1 = 110110yyyyyyyyyy
+ // W2 = 110111xxxxxxxxxx
+ //
+ // where U is the character value, W1 is the high surrogate
+ // area, W2 is the low surrogate area.
+
+ // Check for incomplete UTF-16 character.
+ if raw_unread < 2 {
+ if parser.eof {
+ return yaml_parser_set_reader_error(parser,
+ "incomplete UTF-16 character",
+ parser.offset, -1)
+ }
+ break inner
+ }
+
+ // Get the character.
+ value = rune(parser.raw_buffer[parser.raw_buffer_pos+low]) +
+ (rune(parser.raw_buffer[parser.raw_buffer_pos+high]) << 8)
+
+ // Check for unexpected low surrogate area.
+ if value&0xFC00 == 0xDC00 {
+ return yaml_parser_set_reader_error(parser,
+ "unexpected low surrogate area",
+ parser.offset, int(value))
+ }
+
+ // Check for a high surrogate area.
+ if value&0xFC00 == 0xD800 {
+ width = 4
+
+ // Check for incomplete surrogate pair.
+ if raw_unread < 4 {
+ if parser.eof {
+ return yaml_parser_set_reader_error(parser,
+ "incomplete UTF-16 surrogate pair",
+ parser.offset, -1)
+ }
+ break inner
+ }
+
+ // Get the next character.
+ value2 := rune(parser.raw_buffer[parser.raw_buffer_pos+low+2]) +
+ (rune(parser.raw_buffer[parser.raw_buffer_pos+high+2]) << 8)
+
+ // Check for a low surrogate area.
+ if value2&0xFC00 != 0xDC00 {
+ return yaml_parser_set_reader_error(parser,
+ "expected low surrogate area",
+ parser.offset+2, int(value2))
+ }
+
+ // Generate the value of the surrogate pair.
+ value = 0x10000 + ((value & 0x3FF) << 10) + (value2 & 0x3FF)
+ } else {
+ width = 2
+ }
+
+ default:
+ panic("impossible")
+ }
+
+ // Check if the character is in the allowed range:
+ // #x9 | #xA | #xD | [#x20-#x7E] (8 bit)
+ // | #x85 | [#xA0-#xD7FF] | [#xE000-#xFFFD] (16 bit)
+ // | [#x10000-#x10FFFF] (32 bit)
+ switch {
+ case value == 0x09:
+ case value == 0x0A:
+ case value == 0x0D:
+ case value >= 0x20 && value <= 0x7E:
+ case value == 0x85:
+ case value >= 0xA0 && value <= 0xD7FF:
+ case value >= 0xE000 && value <= 0xFFFD:
+ case value >= 0x10000 && value <= 0x10FFFF:
+ default:
+ return yaml_parser_set_reader_error(parser,
+ "control characters are not allowed",
+ parser.offset, int(value))
+ }
+
+ // Move the raw pointers.
+ parser.raw_buffer_pos += width
+ parser.offset += width
+
+ // Finally put the character into the buffer.
+ if value <= 0x7F {
+ // 0000 0000-0000 007F . 0xxxxxxx
+ parser.buffer[buffer_len+0] = byte(value)
+ } else if value <= 0x7FF {
+ // 0000 0080-0000 07FF . 110xxxxx 10xxxxxx
+ parser.buffer[buffer_len+0] = byte(0xC0 + (value >> 6))
+ parser.buffer[buffer_len+1] = byte(0x80 + (value & 0x3F))
+ } else if value <= 0xFFFF {
+ // 0000 0800-0000 FFFF . 1110xxxx 10xxxxxx 10xxxxxx
+ parser.buffer[buffer_len+0] = byte(0xE0 + (value >> 12))
+ parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 6) & 0x3F))
+ parser.buffer[buffer_len+2] = byte(0x80 + (value & 0x3F))
+ } else {
+ // 0001 0000-0010 FFFF . 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx
+ parser.buffer[buffer_len+0] = byte(0xF0 + (value >> 18))
+ parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 12) & 0x3F))
+ parser.buffer[buffer_len+2] = byte(0x80 + ((value >> 6) & 0x3F))
+ parser.buffer[buffer_len+3] = byte(0x80 + (value & 0x3F))
+ }
+ buffer_len += width
+
+ parser.unread++
+ }
+
+ // On EOF, put NUL into the buffer and return.
+ if parser.eof {
+ parser.buffer[buffer_len] = 0
+ buffer_len++
+ parser.unread++
+ break
+ }
+ }
+ parser.buffer = parser.buffer[:buffer_len]
+ return true
+}
diff --git a/vendor/gopkg.in/yaml.v2/resolve.go b/vendor/gopkg.in/yaml.v2/resolve.go
new file mode 100644
index 0000000000..93a8632743
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v2/resolve.go
@@ -0,0 +1,203 @@
+package yaml
+
+import (
+ "encoding/base64"
+ "math"
+ "strconv"
+ "strings"
+ "unicode/utf8"
+)
+
+type resolveMapItem struct {
+ value interface{}
+ tag string
+}
+
+var resolveTable = make([]byte, 256)
+var resolveMap = make(map[string]resolveMapItem)
+
+func init() {
+ t := resolveTable
+ t[int('+')] = 'S' // Sign
+ t[int('-')] = 'S'
+ for _, c := range "0123456789" {
+ t[int(c)] = 'D' // Digit
+ }
+ for _, c := range "yYnNtTfFoO~" {
+ t[int(c)] = 'M' // In map
+ }
+ t[int('.')] = '.' // Float (potentially in map)
+
+ var resolveMapList = []struct {
+ v interface{}
+ tag string
+ l []string
+ }{
+ {true, yaml_BOOL_TAG, []string{"y", "Y", "yes", "Yes", "YES"}},
+ {true, yaml_BOOL_TAG, []string{"true", "True", "TRUE"}},
+ {true, yaml_BOOL_TAG, []string{"on", "On", "ON"}},
+ {false, yaml_BOOL_TAG, []string{"n", "N", "no", "No", "NO"}},
+ {false, yaml_BOOL_TAG, []string{"false", "False", "FALSE"}},
+ {false, yaml_BOOL_TAG, []string{"off", "Off", "OFF"}},
+ {nil, yaml_NULL_TAG, []string{"", "~", "null", "Null", "NULL"}},
+ {math.NaN(), yaml_FLOAT_TAG, []string{".nan", ".NaN", ".NAN"}},
+ {math.Inf(+1), yaml_FLOAT_TAG, []string{".inf", ".Inf", ".INF"}},
+ {math.Inf(+1), yaml_FLOAT_TAG, []string{"+.inf", "+.Inf", "+.INF"}},
+ {math.Inf(-1), yaml_FLOAT_TAG, []string{"-.inf", "-.Inf", "-.INF"}},
+ {"<<", yaml_MERGE_TAG, []string{"<<"}},
+ }
+
+ m := resolveMap
+ for _, item := range resolveMapList {
+ for _, s := range item.l {
+ m[s] = resolveMapItem{item.v, item.tag}
+ }
+ }
+}
+
+const longTagPrefix = "tag:yaml.org,2002:"
+
+func shortTag(tag string) string {
+ // TODO This can easily be made faster and produce less garbage.
+ if strings.HasPrefix(tag, longTagPrefix) {
+ return "!!" + tag[len(longTagPrefix):]
+ }
+ return tag
+}
+
+func longTag(tag string) string {
+ if strings.HasPrefix(tag, "!!") {
+ return longTagPrefix + tag[2:]
+ }
+ return tag
+}
+
+func resolvableTag(tag string) bool {
+ switch tag {
+ case "", yaml_STR_TAG, yaml_BOOL_TAG, yaml_INT_TAG, yaml_FLOAT_TAG, yaml_NULL_TAG:
+ return true
+ }
+ return false
+}
+
+func resolve(tag string, in string) (rtag string, out interface{}) {
+ if !resolvableTag(tag) {
+ return tag, in
+ }
+
+ defer func() {
+ switch tag {
+ case "", rtag, yaml_STR_TAG, yaml_BINARY_TAG:
+ return
+ }
+ failf("cannot decode %s `%s` as a %s", shortTag(rtag), in, shortTag(tag))
+ }()
+
+ // Any data is accepted as a !!str or !!binary.
+ // Otherwise, the prefix is enough of a hint about what it might be.
+ hint := byte('N')
+ if in != "" {
+ hint = resolveTable[in[0]]
+ }
+ if hint != 0 && tag != yaml_STR_TAG && tag != yaml_BINARY_TAG {
+ // Handle things we can lookup in a map.
+ if item, ok := resolveMap[in]; ok {
+ return item.tag, item.value
+ }
+
+ // Base 60 floats are a bad idea, were dropped in YAML 1.2, and
+ // are purposefully unsupported here. They're still quoted on
+ // the way out for compatibility with other parser, though.
+
+ switch hint {
+ case 'M':
+ // We've already checked the map above.
+
+ case '.':
+ // Not in the map, so maybe a normal float.
+ floatv, err := strconv.ParseFloat(in, 64)
+ if err == nil {
+ return yaml_FLOAT_TAG, floatv
+ }
+
+ case 'D', 'S':
+ // Int, float, or timestamp.
+ plain := strings.Replace(in, "_", "", -1)
+ intv, err := strconv.ParseInt(plain, 0, 64)
+ if err == nil {
+ if intv == int64(int(intv)) {
+ return yaml_INT_TAG, int(intv)
+ } else {
+ return yaml_INT_TAG, intv
+ }
+ }
+ uintv, err := strconv.ParseUint(plain, 0, 64)
+ if err == nil {
+ return yaml_INT_TAG, uintv
+ }
+ floatv, err := strconv.ParseFloat(plain, 64)
+ if err == nil {
+ return yaml_FLOAT_TAG, floatv
+ }
+ if strings.HasPrefix(plain, "0b") {
+ intv, err := strconv.ParseInt(plain[2:], 2, 64)
+ if err == nil {
+ if intv == int64(int(intv)) {
+ return yaml_INT_TAG, int(intv)
+ } else {
+ return yaml_INT_TAG, intv
+ }
+ }
+ uintv, err := strconv.ParseUint(plain[2:], 2, 64)
+ if err == nil {
+ return yaml_INT_TAG, uintv
+ }
+ } else if strings.HasPrefix(plain, "-0b") {
+ intv, err := strconv.ParseInt(plain[3:], 2, 64)
+ if err == nil {
+ if intv == int64(int(intv)) {
+ return yaml_INT_TAG, -int(intv)
+ } else {
+ return yaml_INT_TAG, -intv
+ }
+ }
+ }
+ // XXX Handle timestamps here.
+
+ default:
+ panic("resolveTable item not yet handled: " + string(rune(hint)) + " (with " + in + ")")
+ }
+ }
+ if tag == yaml_BINARY_TAG {
+ return yaml_BINARY_TAG, in
+ }
+ if utf8.ValidString(in) {
+ return yaml_STR_TAG, in
+ }
+ return yaml_BINARY_TAG, encodeBase64(in)
+}
+
+// encodeBase64 encodes s as base64 that is broken up into multiple lines
+// as appropriate for the resulting length.
+func encodeBase64(s string) string {
+ const lineLen = 70
+ encLen := base64.StdEncoding.EncodedLen(len(s))
+ lines := encLen/lineLen + 1
+ buf := make([]byte, encLen*2+lines)
+ in := buf[0:encLen]
+ out := buf[encLen:]
+ base64.StdEncoding.Encode(in, []byte(s))
+ k := 0
+ for i := 0; i < len(in); i += lineLen {
+ j := i + lineLen
+ if j > len(in) {
+ j = len(in)
+ }
+ k += copy(out[k:], in[i:j])
+ if lines > 1 {
+ out[k] = '\n'
+ k++
+ }
+ }
+ return string(out[:k])
+}
diff --git a/vendor/gopkg.in/yaml.v2/scannerc.go b/vendor/gopkg.in/yaml.v2/scannerc.go
new file mode 100644
index 0000000000..fe93b190c2
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v2/scannerc.go
@@ -0,0 +1,2710 @@
+package yaml
+
+import (
+ "bytes"
+ "fmt"
+)
+
+// Introduction
+// ************
+//
+// The following notes assume that you are familiar with the YAML specification
+// (http://yaml.org/spec/cvs/current.html). We mostly follow it, although in
+// some cases we are less restrictive that it requires.
+//
+// The process of transforming a YAML stream into a sequence of events is
+// divided on two steps: Scanning and Parsing.
+//
+// The Scanner transforms the input stream into a sequence of tokens, while the
+// parser transform the sequence of tokens produced by the Scanner into a
+// sequence of parsing events.
+//
+// The Scanner is rather clever and complicated. The Parser, on the contrary,
+// is a straightforward implementation of a recursive-descendant parser (or,
+// LL(1) parser, as it is usually called).
+//
+// Actually there are two issues of Scanning that might be called "clever", the
+// rest is quite straightforward. The issues are "block collection start" and
+// "simple keys". Both issues are explained below in details.
+//
+// Here the Scanning step is explained and implemented. We start with the list
+// of all the tokens produced by the Scanner together with short descriptions.
+//
+// Now, tokens:
+//
+// STREAM-START(encoding) # The stream start.
+// STREAM-END # The stream end.
+// VERSION-DIRECTIVE(major,minor) # The '%YAML' directive.
+// TAG-DIRECTIVE(handle,prefix) # The '%TAG' directive.
+// DOCUMENT-START # '---'
+// DOCUMENT-END # '...'
+// BLOCK-SEQUENCE-START # Indentation increase denoting a block
+// BLOCK-MAPPING-START # sequence or a block mapping.
+// BLOCK-END # Indentation decrease.
+// FLOW-SEQUENCE-START # '['
+// FLOW-SEQUENCE-END # ']'
+// BLOCK-SEQUENCE-START # '{'
+// BLOCK-SEQUENCE-END # '}'
+// BLOCK-ENTRY # '-'
+// FLOW-ENTRY # ','
+// KEY # '?' or nothing (simple keys).
+// VALUE # ':'
+// ALIAS(anchor) # '*anchor'
+// ANCHOR(anchor) # '&anchor'
+// TAG(handle,suffix) # '!handle!suffix'
+// SCALAR(value,style) # A scalar.
+//
+// The following two tokens are "virtual" tokens denoting the beginning and the
+// end of the stream:
+//
+// STREAM-START(encoding)
+// STREAM-END
+//
+// We pass the information about the input stream encoding with the
+// STREAM-START token.
+//
+// The next two tokens are responsible for tags:
+//
+// VERSION-DIRECTIVE(major,minor)
+// TAG-DIRECTIVE(handle,prefix)
+//
+// Example:
+//
+// %YAML 1.1
+// %TAG ! !foo
+// %TAG !yaml! tag:yaml.org,2002:
+// ---
+//
+// The correspoding sequence of tokens:
+//
+// STREAM-START(utf-8)
+// VERSION-DIRECTIVE(1,1)
+// TAG-DIRECTIVE("!","!foo")
+// TAG-DIRECTIVE("!yaml","tag:yaml.org,2002:")
+// DOCUMENT-START
+// STREAM-END
+//
+// Note that the VERSION-DIRECTIVE and TAG-DIRECTIVE tokens occupy a whole
+// line.
+//
+// The document start and end indicators are represented by:
+//
+// DOCUMENT-START
+// DOCUMENT-END
+//
+// Note that if a YAML stream contains an implicit document (without '---'
+// and '...' indicators), no DOCUMENT-START and DOCUMENT-END tokens will be
+// produced.
+//
+// In the following examples, we present whole documents together with the
+// produced tokens.
+//
+// 1. An implicit document:
+//
+// 'a scalar'
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// SCALAR("a scalar",single-quoted)
+// STREAM-END
+//
+// 2. An explicit document:
+//
+// ---
+// 'a scalar'
+// ...
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// DOCUMENT-START
+// SCALAR("a scalar",single-quoted)
+// DOCUMENT-END
+// STREAM-END
+//
+// 3. Several documents in a stream:
+//
+// 'a scalar'
+// ---
+// 'another scalar'
+// ---
+// 'yet another scalar'
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// SCALAR("a scalar",single-quoted)
+// DOCUMENT-START
+// SCALAR("another scalar",single-quoted)
+// DOCUMENT-START
+// SCALAR("yet another scalar",single-quoted)
+// STREAM-END
+//
+// We have already introduced the SCALAR token above. The following tokens are
+// used to describe aliases, anchors, tag, and scalars:
+//
+// ALIAS(anchor)
+// ANCHOR(anchor)
+// TAG(handle,suffix)
+// SCALAR(value,style)
+//
+// The following series of examples illustrate the usage of these tokens:
+//
+// 1. A recursive sequence:
+//
+// &A [ *A ]
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// ANCHOR("A")
+// FLOW-SEQUENCE-START
+// ALIAS("A")
+// FLOW-SEQUENCE-END
+// STREAM-END
+//
+// 2. A tagged scalar:
+//
+// !!float "3.14" # A good approximation.
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// TAG("!!","float")
+// SCALAR("3.14",double-quoted)
+// STREAM-END
+//
+// 3. Various scalar styles:
+//
+// --- # Implicit empty plain scalars do not produce tokens.
+// --- a plain scalar
+// --- 'a single-quoted scalar'
+// --- "a double-quoted scalar"
+// --- |-
+// a literal scalar
+// --- >-
+// a folded
+// scalar
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// DOCUMENT-START
+// DOCUMENT-START
+// SCALAR("a plain scalar",plain)
+// DOCUMENT-START
+// SCALAR("a single-quoted scalar",single-quoted)
+// DOCUMENT-START
+// SCALAR("a double-quoted scalar",double-quoted)
+// DOCUMENT-START
+// SCALAR("a literal scalar",literal)
+// DOCUMENT-START
+// SCALAR("a folded scalar",folded)
+// STREAM-END
+//
+// Now it's time to review collection-related tokens. We will start with
+// flow collections:
+//
+// FLOW-SEQUENCE-START
+// FLOW-SEQUENCE-END
+// FLOW-MAPPING-START
+// FLOW-MAPPING-END
+// FLOW-ENTRY
+// KEY
+// VALUE
+//
+// The tokens FLOW-SEQUENCE-START, FLOW-SEQUENCE-END, FLOW-MAPPING-START, and
+// FLOW-MAPPING-END represent the indicators '[', ']', '{', and '}'
+// correspondingly. FLOW-ENTRY represent the ',' indicator. Finally the
+// indicators '?' and ':', which are used for denoting mapping keys and values,
+// are represented by the KEY and VALUE tokens.
+//
+// The following examples show flow collections:
+//
+// 1. A flow sequence:
+//
+// [item 1, item 2, item 3]
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// FLOW-SEQUENCE-START
+// SCALAR("item 1",plain)
+// FLOW-ENTRY
+// SCALAR("item 2",plain)
+// FLOW-ENTRY
+// SCALAR("item 3",plain)
+// FLOW-SEQUENCE-END
+// STREAM-END
+//
+// 2. A flow mapping:
+//
+// {
+// a simple key: a value, # Note that the KEY token is produced.
+// ? a complex key: another value,
+// }
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// FLOW-MAPPING-START
+// KEY
+// SCALAR("a simple key",plain)
+// VALUE
+// SCALAR("a value",plain)
+// FLOW-ENTRY
+// KEY
+// SCALAR("a complex key",plain)
+// VALUE
+// SCALAR("another value",plain)
+// FLOW-ENTRY
+// FLOW-MAPPING-END
+// STREAM-END
+//
+// A simple key is a key which is not denoted by the '?' indicator. Note that
+// the Scanner still produce the KEY token whenever it encounters a simple key.
+//
+// For scanning block collections, the following tokens are used (note that we
+// repeat KEY and VALUE here):
+//
+// BLOCK-SEQUENCE-START
+// BLOCK-MAPPING-START
+// BLOCK-END
+// BLOCK-ENTRY
+// KEY
+// VALUE
+//
+// The tokens BLOCK-SEQUENCE-START and BLOCK-MAPPING-START denote indentation
+// increase that precedes a block collection (cf. the INDENT token in Python).
+// The token BLOCK-END denote indentation decrease that ends a block collection
+// (cf. the DEDENT token in Python). However YAML has some syntax pecularities
+// that makes detections of these tokens more complex.
+//
+// The tokens BLOCK-ENTRY, KEY, and VALUE are used to represent the indicators
+// '-', '?', and ':' correspondingly.
+//
+// The following examples show how the tokens BLOCK-SEQUENCE-START,
+// BLOCK-MAPPING-START, and BLOCK-END are emitted by the Scanner:
+//
+// 1. Block sequences:
+//
+// - item 1
+// - item 2
+// -
+// - item 3.1
+// - item 3.2
+// -
+// key 1: value 1
+// key 2: value 2
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// BLOCK-SEQUENCE-START
+// BLOCK-ENTRY
+// SCALAR("item 1",plain)
+// BLOCK-ENTRY
+// SCALAR("item 2",plain)
+// BLOCK-ENTRY
+// BLOCK-SEQUENCE-START
+// BLOCK-ENTRY
+// SCALAR("item 3.1",plain)
+// BLOCK-ENTRY
+// SCALAR("item 3.2",plain)
+// BLOCK-END
+// BLOCK-ENTRY
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("key 1",plain)
+// VALUE
+// SCALAR("value 1",plain)
+// KEY
+// SCALAR("key 2",plain)
+// VALUE
+// SCALAR("value 2",plain)
+// BLOCK-END
+// BLOCK-END
+// STREAM-END
+//
+// 2. Block mappings:
+//
+// a simple key: a value # The KEY token is produced here.
+// ? a complex key
+// : another value
+// a mapping:
+// key 1: value 1
+// key 2: value 2
+// a sequence:
+// - item 1
+// - item 2
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("a simple key",plain)
+// VALUE
+// SCALAR("a value",plain)
+// KEY
+// SCALAR("a complex key",plain)
+// VALUE
+// SCALAR("another value",plain)
+// KEY
+// SCALAR("a mapping",plain)
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("key 1",plain)
+// VALUE
+// SCALAR("value 1",plain)
+// KEY
+// SCALAR("key 2",plain)
+// VALUE
+// SCALAR("value 2",plain)
+// BLOCK-END
+// KEY
+// SCALAR("a sequence",plain)
+// VALUE
+// BLOCK-SEQUENCE-START
+// BLOCK-ENTRY
+// SCALAR("item 1",plain)
+// BLOCK-ENTRY
+// SCALAR("item 2",plain)
+// BLOCK-END
+// BLOCK-END
+// STREAM-END
+//
+// YAML does not always require to start a new block collection from a new
+// line. If the current line contains only '-', '?', and ':' indicators, a new
+// block collection may start at the current line. The following examples
+// illustrate this case:
+//
+// 1. Collections in a sequence:
+//
+// - - item 1
+// - item 2
+// - key 1: value 1
+// key 2: value 2
+// - ? complex key
+// : complex value
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// BLOCK-SEQUENCE-START
+// BLOCK-ENTRY
+// BLOCK-SEQUENCE-START
+// BLOCK-ENTRY
+// SCALAR("item 1",plain)
+// BLOCK-ENTRY
+// SCALAR("item 2",plain)
+// BLOCK-END
+// BLOCK-ENTRY
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("key 1",plain)
+// VALUE
+// SCALAR("value 1",plain)
+// KEY
+// SCALAR("key 2",plain)
+// VALUE
+// SCALAR("value 2",plain)
+// BLOCK-END
+// BLOCK-ENTRY
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("complex key")
+// VALUE
+// SCALAR("complex value")
+// BLOCK-END
+// BLOCK-END
+// STREAM-END
+//
+// 2. Collections in a mapping:
+//
+// ? a sequence
+// : - item 1
+// - item 2
+// ? a mapping
+// : key 1: value 1
+// key 2: value 2
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("a sequence",plain)
+// VALUE
+// BLOCK-SEQUENCE-START
+// BLOCK-ENTRY
+// SCALAR("item 1",plain)
+// BLOCK-ENTRY
+// SCALAR("item 2",plain)
+// BLOCK-END
+// KEY
+// SCALAR("a mapping",plain)
+// VALUE
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("key 1",plain)
+// VALUE
+// SCALAR("value 1",plain)
+// KEY
+// SCALAR("key 2",plain)
+// VALUE
+// SCALAR("value 2",plain)
+// BLOCK-END
+// BLOCK-END
+// STREAM-END
+//
+// YAML also permits non-indented sequences if they are included into a block
+// mapping. In this case, the token BLOCK-SEQUENCE-START is not produced:
+//
+// key:
+// - item 1 # BLOCK-SEQUENCE-START is NOT produced here.
+// - item 2
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("key",plain)
+// VALUE
+// BLOCK-ENTRY
+// SCALAR("item 1",plain)
+// BLOCK-ENTRY
+// SCALAR("item 2",plain)
+// BLOCK-END
+//
+
+// Ensure that the buffer contains the required number of characters.
+// Return true on success, false on failure (reader error or memory error).
+func cache(parser *yaml_parser_t, length int) bool {
+ // [Go] This was inlined: !cache(A, B) -> unread < B && !update(A, B)
+ return parser.unread >= length || yaml_parser_update_buffer(parser, length)
+}
+
+// Advance the buffer pointer.
+func skip(parser *yaml_parser_t) {
+ parser.mark.index++
+ parser.mark.column++
+ parser.unread--
+ parser.buffer_pos += width(parser.buffer[parser.buffer_pos])
+}
+
+func skip_line(parser *yaml_parser_t) {
+ if is_crlf(parser.buffer, parser.buffer_pos) {
+ parser.mark.index += 2
+ parser.mark.column = 0
+ parser.mark.line++
+ parser.unread -= 2
+ parser.buffer_pos += 2
+ } else if is_break(parser.buffer, parser.buffer_pos) {
+ parser.mark.index++
+ parser.mark.column = 0
+ parser.mark.line++
+ parser.unread--
+ parser.buffer_pos += width(parser.buffer[parser.buffer_pos])
+ }
+}
+
+// Copy a character to a string buffer and advance pointers.
+func read(parser *yaml_parser_t, s []byte) []byte {
+ w := width(parser.buffer[parser.buffer_pos])
+ if w == 0 {
+ panic("invalid character sequence")
+ }
+ if len(s) == 0 {
+ s = make([]byte, 0, 32)
+ }
+ if w == 1 && len(s)+w <= cap(s) {
+ s = s[:len(s)+1]
+ s[len(s)-1] = parser.buffer[parser.buffer_pos]
+ parser.buffer_pos++
+ } else {
+ s = append(s, parser.buffer[parser.buffer_pos:parser.buffer_pos+w]...)
+ parser.buffer_pos += w
+ }
+ parser.mark.index++
+ parser.mark.column++
+ parser.unread--
+ return s
+}
+
+// Copy a line break character to a string buffer and advance pointers.
+func read_line(parser *yaml_parser_t, s []byte) []byte {
+ buf := parser.buffer
+ pos := parser.buffer_pos
+ switch {
+ case buf[pos] == '\r' && buf[pos+1] == '\n':
+ // CR LF . LF
+ s = append(s, '\n')
+ parser.buffer_pos += 2
+ parser.mark.index++
+ parser.unread--
+ case buf[pos] == '\r' || buf[pos] == '\n':
+ // CR|LF . LF
+ s = append(s, '\n')
+ parser.buffer_pos += 1
+ case buf[pos] == '\xC2' && buf[pos+1] == '\x85':
+ // NEL . LF
+ s = append(s, '\n')
+ parser.buffer_pos += 2
+ case buf[pos] == '\xE2' && buf[pos+1] == '\x80' && (buf[pos+2] == '\xA8' || buf[pos+2] == '\xA9'):
+ // LS|PS . LS|PS
+ s = append(s, buf[parser.buffer_pos:pos+3]...)
+ parser.buffer_pos += 3
+ default:
+ return s
+ }
+ parser.mark.index++
+ parser.mark.column = 0
+ parser.mark.line++
+ parser.unread--
+ return s
+}
+
+// Get the next token.
+func yaml_parser_scan(parser *yaml_parser_t, token *yaml_token_t) bool {
+ // Erase the token object.
+ *token = yaml_token_t{} // [Go] Is this necessary?
+
+ // No tokens after STREAM-END or error.
+ if parser.stream_end_produced || parser.error != yaml_NO_ERROR {
+ return true
+ }
+
+ // Ensure that the tokens queue contains enough tokens.
+ if !parser.token_available {
+ if !yaml_parser_fetch_more_tokens(parser) {
+ return false
+ }
+ }
+
+ // Fetch the next token from the queue.
+ *token = parser.tokens[parser.tokens_head]
+ parser.tokens_head++
+ parser.tokens_parsed++
+ parser.token_available = false
+
+ if token.typ == yaml_STREAM_END_TOKEN {
+ parser.stream_end_produced = true
+ }
+ return true
+}
+
+// Set the scanner error and return false.
+func yaml_parser_set_scanner_error(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string) bool {
+ parser.error = yaml_SCANNER_ERROR
+ parser.context = context
+ parser.context_mark = context_mark
+ parser.problem = problem
+ parser.problem_mark = parser.mark
+ return false
+}
+
+func yaml_parser_set_scanner_tag_error(parser *yaml_parser_t, directive bool, context_mark yaml_mark_t, problem string) bool {
+ context := "while parsing a tag"
+ if directive {
+ context = "while parsing a %TAG directive"
+ }
+ return yaml_parser_set_scanner_error(parser, context, context_mark, "did not find URI escaped octet")
+}
+
+func trace(args ...interface{}) func() {
+ pargs := append([]interface{}{"+++"}, args...)
+ fmt.Println(pargs...)
+ pargs = append([]interface{}{"---"}, args...)
+ return func() { fmt.Println(pargs...) }
+}
+
+// Ensure that the tokens queue contains at least one token which can be
+// returned to the Parser.
+func yaml_parser_fetch_more_tokens(parser *yaml_parser_t) bool {
+ // While we need more tokens to fetch, do it.
+ for {
+ // Check if we really need to fetch more tokens.
+ need_more_tokens := false
+
+ if parser.tokens_head == len(parser.tokens) {
+ // Queue is empty.
+ need_more_tokens = true
+ } else {
+ // Check if any potential simple key may occupy the head position.
+ if !yaml_parser_stale_simple_keys(parser) {
+ return false
+ }
+
+ for i := range parser.simple_keys {
+ simple_key := &parser.simple_keys[i]
+ if simple_key.possible && simple_key.token_number == parser.tokens_parsed {
+ need_more_tokens = true
+ break
+ }
+ }
+ }
+
+ // We are finished.
+ if !need_more_tokens {
+ break
+ }
+ // Fetch the next token.
+ if !yaml_parser_fetch_next_token(parser) {
+ return false
+ }
+ }
+
+ parser.token_available = true
+ return true
+}
+
+// The dispatcher for token fetchers.
+func yaml_parser_fetch_next_token(parser *yaml_parser_t) bool {
+ // Ensure that the buffer is initialized.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ // Check if we just started scanning. Fetch STREAM-START then.
+ if !parser.stream_start_produced {
+ return yaml_parser_fetch_stream_start(parser)
+ }
+
+ // Eat whitespaces and comments until we reach the next token.
+ if !yaml_parser_scan_to_next_token(parser) {
+ return false
+ }
+
+ // Remove obsolete potential simple keys.
+ if !yaml_parser_stale_simple_keys(parser) {
+ return false
+ }
+
+ // Check the indentation level against the current column.
+ if !yaml_parser_unroll_indent(parser, parser.mark.column) {
+ return false
+ }
+
+ // Ensure that the buffer contains at least 4 characters. 4 is the length
+ // of the longest indicators ('--- ' and '... ').
+ if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) {
+ return false
+ }
+
+ // Is it the end of the stream?
+ if is_z(parser.buffer, parser.buffer_pos) {
+ return yaml_parser_fetch_stream_end(parser)
+ }
+
+ // Is it a directive?
+ if parser.mark.column == 0 && parser.buffer[parser.buffer_pos] == '%' {
+ return yaml_parser_fetch_directive(parser)
+ }
+
+ buf := parser.buffer
+ pos := parser.buffer_pos
+
+ // Is it the document start indicator?
+ if parser.mark.column == 0 && buf[pos] == '-' && buf[pos+1] == '-' && buf[pos+2] == '-' && is_blankz(buf, pos+3) {
+ return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_START_TOKEN)
+ }
+
+ // Is it the document end indicator?
+ if parser.mark.column == 0 && buf[pos] == '.' && buf[pos+1] == '.' && buf[pos+2] == '.' && is_blankz(buf, pos+3) {
+ return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_END_TOKEN)
+ }
+
+ // Is it the flow sequence start indicator?
+ if buf[pos] == '[' {
+ return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_SEQUENCE_START_TOKEN)
+ }
+
+ // Is it the flow mapping start indicator?
+ if parser.buffer[parser.buffer_pos] == '{' {
+ return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_MAPPING_START_TOKEN)
+ }
+
+ // Is it the flow sequence end indicator?
+ if parser.buffer[parser.buffer_pos] == ']' {
+ return yaml_parser_fetch_flow_collection_end(parser,
+ yaml_FLOW_SEQUENCE_END_TOKEN)
+ }
+
+ // Is it the flow mapping end indicator?
+ if parser.buffer[parser.buffer_pos] == '}' {
+ return yaml_parser_fetch_flow_collection_end(parser,
+ yaml_FLOW_MAPPING_END_TOKEN)
+ }
+
+ // Is it the flow entry indicator?
+ if parser.buffer[parser.buffer_pos] == ',' {
+ return yaml_parser_fetch_flow_entry(parser)
+ }
+
+ // Is it the block entry indicator?
+ if parser.buffer[parser.buffer_pos] == '-' && is_blankz(parser.buffer, parser.buffer_pos+1) {
+ return yaml_parser_fetch_block_entry(parser)
+ }
+
+ // Is it the key indicator?
+ if parser.buffer[parser.buffer_pos] == '?' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) {
+ return yaml_parser_fetch_key(parser)
+ }
+
+ // Is it the value indicator?
+ if parser.buffer[parser.buffer_pos] == ':' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) {
+ return yaml_parser_fetch_value(parser)
+ }
+
+ // Is it an alias?
+ if parser.buffer[parser.buffer_pos] == '*' {
+ return yaml_parser_fetch_anchor(parser, yaml_ALIAS_TOKEN)
+ }
+
+ // Is it an anchor?
+ if parser.buffer[parser.buffer_pos] == '&' {
+ return yaml_parser_fetch_anchor(parser, yaml_ANCHOR_TOKEN)
+ }
+
+ // Is it a tag?
+ if parser.buffer[parser.buffer_pos] == '!' {
+ return yaml_parser_fetch_tag(parser)
+ }
+
+ // Is it a literal scalar?
+ if parser.buffer[parser.buffer_pos] == '|' && parser.flow_level == 0 {
+ return yaml_parser_fetch_block_scalar(parser, true)
+ }
+
+ // Is it a folded scalar?
+ if parser.buffer[parser.buffer_pos] == '>' && parser.flow_level == 0 {
+ return yaml_parser_fetch_block_scalar(parser, false)
+ }
+
+ // Is it a single-quoted scalar?
+ if parser.buffer[parser.buffer_pos] == '\'' {
+ return yaml_parser_fetch_flow_scalar(parser, true)
+ }
+
+ // Is it a double-quoted scalar?
+ if parser.buffer[parser.buffer_pos] == '"' {
+ return yaml_parser_fetch_flow_scalar(parser, false)
+ }
+
+ // Is it a plain scalar?
+ //
+ // A plain scalar may start with any non-blank characters except
+ //
+ // '-', '?', ':', ',', '[', ']', '{', '}',
+ // '#', '&', '*', '!', '|', '>', '\'', '\"',
+ // '%', '@', '`'.
+ //
+ // In the block context (and, for the '-' indicator, in the flow context
+ // too), it may also start with the characters
+ //
+ // '-', '?', ':'
+ //
+ // if it is followed by a non-space character.
+ //
+ // The last rule is more restrictive than the specification requires.
+ // [Go] Make this logic more reasonable.
+ //switch parser.buffer[parser.buffer_pos] {
+ //case '-', '?', ':', ',', '?', '-', ',', ':', ']', '[', '}', '{', '&', '#', '!', '*', '>', '|', '"', '\'', '@', '%', '-', '`':
+ //}
+ if !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '-' ||
+ parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':' ||
+ parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '[' ||
+ parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' ||
+ parser.buffer[parser.buffer_pos] == '}' || parser.buffer[parser.buffer_pos] == '#' ||
+ parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '*' ||
+ parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '|' ||
+ parser.buffer[parser.buffer_pos] == '>' || parser.buffer[parser.buffer_pos] == '\'' ||
+ parser.buffer[parser.buffer_pos] == '"' || parser.buffer[parser.buffer_pos] == '%' ||
+ parser.buffer[parser.buffer_pos] == '@' || parser.buffer[parser.buffer_pos] == '`') ||
+ (parser.buffer[parser.buffer_pos] == '-' && !is_blank(parser.buffer, parser.buffer_pos+1)) ||
+ (parser.flow_level == 0 &&
+ (parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':') &&
+ !is_blankz(parser.buffer, parser.buffer_pos+1)) {
+ return yaml_parser_fetch_plain_scalar(parser)
+ }
+
+ // If we don't determine the token type so far, it is an error.
+ return yaml_parser_set_scanner_error(parser,
+ "while scanning for the next token", parser.mark,
+ "found character that cannot start any token")
+}
+
+// Check the list of potential simple keys and remove the positions that
+// cannot contain simple keys anymore.
+func yaml_parser_stale_simple_keys(parser *yaml_parser_t) bool {
+ // Check for a potential simple key for each flow level.
+ for i := range parser.simple_keys {
+ simple_key := &parser.simple_keys[i]
+
+ // The specification requires that a simple key
+ //
+ // - is limited to a single line,
+ // - is shorter than 1024 characters.
+ if simple_key.possible && (simple_key.mark.line < parser.mark.line || simple_key.mark.index+1024 < parser.mark.index) {
+
+ // Check if the potential simple key to be removed is required.
+ if simple_key.required {
+ return yaml_parser_set_scanner_error(parser,
+ "while scanning a simple key", simple_key.mark,
+ "could not find expected ':'")
+ }
+ simple_key.possible = false
+ }
+ }
+ return true
+}
+
+// Check if a simple key may start at the current position and add it if
+// needed.
+func yaml_parser_save_simple_key(parser *yaml_parser_t) bool {
+ // A simple key is required at the current position if the scanner is in
+ // the block context and the current column coincides with the indentation
+ // level.
+
+ required := parser.flow_level == 0 && parser.indent == parser.mark.column
+
+ // A simple key is required only when it is the first token in the current
+ // line. Therefore it is always allowed. But we add a check anyway.
+ if required && !parser.simple_key_allowed {
+ panic("should not happen")
+ }
+
+ //
+ // If the current position may start a simple key, save it.
+ //
+ if parser.simple_key_allowed {
+ simple_key := yaml_simple_key_t{
+ possible: true,
+ required: required,
+ token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head),
+ }
+ simple_key.mark = parser.mark
+
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+ parser.simple_keys[len(parser.simple_keys)-1] = simple_key
+ }
+ return true
+}
+
+// Remove a potential simple key at the current flow level.
+func yaml_parser_remove_simple_key(parser *yaml_parser_t) bool {
+ i := len(parser.simple_keys) - 1
+ if parser.simple_keys[i].possible {
+ // If the key is required, it is an error.
+ if parser.simple_keys[i].required {
+ return yaml_parser_set_scanner_error(parser,
+ "while scanning a simple key", parser.simple_keys[i].mark,
+ "could not find expected ':'")
+ }
+ }
+ // Remove the key from the stack.
+ parser.simple_keys[i].possible = false
+ return true
+}
+
+// Increase the flow level and resize the simple key list if needed.
+func yaml_parser_increase_flow_level(parser *yaml_parser_t) bool {
+ // Reset the simple key on the next level.
+ parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{})
+
+ // Increase the flow level.
+ parser.flow_level++
+ return true
+}
+
+// Decrease the flow level.
+func yaml_parser_decrease_flow_level(parser *yaml_parser_t) bool {
+ if parser.flow_level > 0 {
+ parser.flow_level--
+ parser.simple_keys = parser.simple_keys[:len(parser.simple_keys)-1]
+ }
+ return true
+}
+
+// Push the current indentation level to the stack and set the new level
+// the current column is greater than the indentation level. In this case,
+// append or insert the specified token into the token queue.
+func yaml_parser_roll_indent(parser *yaml_parser_t, column, number int, typ yaml_token_type_t, mark yaml_mark_t) bool {
+ // In the flow context, do nothing.
+ if parser.flow_level > 0 {
+ return true
+ }
+
+ if parser.indent < column {
+ // Push the current indentation level to the stack and set the new
+ // indentation level.
+ parser.indents = append(parser.indents, parser.indent)
+ parser.indent = column
+
+ // Create a token and insert it into the queue.
+ token := yaml_token_t{
+ typ: typ,
+ start_mark: mark,
+ end_mark: mark,
+ }
+ if number > -1 {
+ number -= parser.tokens_parsed
+ }
+ yaml_insert_token(parser, number, &token)
+ }
+ return true
+}
+
+// Pop indentation levels from the indents stack until the current level
+// becomes less or equal to the column. For each intendation level, append
+// the BLOCK-END token.
+func yaml_parser_unroll_indent(parser *yaml_parser_t, column int) bool {
+ // In the flow context, do nothing.
+ if parser.flow_level > 0 {
+ return true
+ }
+
+ // Loop through the intendation levels in the stack.
+ for parser.indent > column {
+ // Create a token and append it to the queue.
+ token := yaml_token_t{
+ typ: yaml_BLOCK_END_TOKEN,
+ start_mark: parser.mark,
+ end_mark: parser.mark,
+ }
+ yaml_insert_token(parser, -1, &token)
+
+ // Pop the indentation level.
+ parser.indent = parser.indents[len(parser.indents)-1]
+ parser.indents = parser.indents[:len(parser.indents)-1]
+ }
+ return true
+}
+
+// Initialize the scanner and produce the STREAM-START token.
+func yaml_parser_fetch_stream_start(parser *yaml_parser_t) bool {
+
+ // Set the initial indentation.
+ parser.indent = -1
+
+ // Initialize the simple key stack.
+ parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{})
+
+ // A simple key is allowed at the beginning of the stream.
+ parser.simple_key_allowed = true
+
+ // We have started.
+ parser.stream_start_produced = true
+
+ // Create the STREAM-START token and append it to the queue.
+ token := yaml_token_t{
+ typ: yaml_STREAM_START_TOKEN,
+ start_mark: parser.mark,
+ end_mark: parser.mark,
+ encoding: parser.encoding,
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the STREAM-END token and shut down the scanner.
+func yaml_parser_fetch_stream_end(parser *yaml_parser_t) bool {
+
+ // Force new line.
+ if parser.mark.column != 0 {
+ parser.mark.column = 0
+ parser.mark.line++
+ }
+
+ // Reset the indentation level.
+ if !yaml_parser_unroll_indent(parser, -1) {
+ return false
+ }
+
+ // Reset simple keys.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ parser.simple_key_allowed = false
+
+ // Create the STREAM-END token and append it to the queue.
+ token := yaml_token_t{
+ typ: yaml_STREAM_END_TOKEN,
+ start_mark: parser.mark,
+ end_mark: parser.mark,
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce a VERSION-DIRECTIVE or TAG-DIRECTIVE token.
+func yaml_parser_fetch_directive(parser *yaml_parser_t) bool {
+ // Reset the indentation level.
+ if !yaml_parser_unroll_indent(parser, -1) {
+ return false
+ }
+
+ // Reset simple keys.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ parser.simple_key_allowed = false
+
+ // Create the YAML-DIRECTIVE or TAG-DIRECTIVE token.
+ token := yaml_token_t{}
+ if !yaml_parser_scan_directive(parser, &token) {
+ return false
+ }
+ // Append the token to the queue.
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the DOCUMENT-START or DOCUMENT-END token.
+func yaml_parser_fetch_document_indicator(parser *yaml_parser_t, typ yaml_token_type_t) bool {
+ // Reset the indentation level.
+ if !yaml_parser_unroll_indent(parser, -1) {
+ return false
+ }
+
+ // Reset simple keys.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ parser.simple_key_allowed = false
+
+ // Consume the token.
+ start_mark := parser.mark
+
+ skip(parser)
+ skip(parser)
+ skip(parser)
+
+ end_mark := parser.mark
+
+ // Create the DOCUMENT-START or DOCUMENT-END token.
+ token := yaml_token_t{
+ typ: typ,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ }
+ // Append the token to the queue.
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the FLOW-SEQUENCE-START or FLOW-MAPPING-START token.
+func yaml_parser_fetch_flow_collection_start(parser *yaml_parser_t, typ yaml_token_type_t) bool {
+ // The indicators '[' and '{' may start a simple key.
+ if !yaml_parser_save_simple_key(parser) {
+ return false
+ }
+
+ // Increase the flow level.
+ if !yaml_parser_increase_flow_level(parser) {
+ return false
+ }
+
+ // A simple key may follow the indicators '[' and '{'.
+ parser.simple_key_allowed = true
+
+ // Consume the token.
+ start_mark := parser.mark
+ skip(parser)
+ end_mark := parser.mark
+
+ // Create the FLOW-SEQUENCE-START of FLOW-MAPPING-START token.
+ token := yaml_token_t{
+ typ: typ,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ }
+ // Append the token to the queue.
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the FLOW-SEQUENCE-END or FLOW-MAPPING-END token.
+func yaml_parser_fetch_flow_collection_end(parser *yaml_parser_t, typ yaml_token_type_t) bool {
+ // Reset any potential simple key on the current flow level.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ // Decrease the flow level.
+ if !yaml_parser_decrease_flow_level(parser) {
+ return false
+ }
+
+ // No simple keys after the indicators ']' and '}'.
+ parser.simple_key_allowed = false
+
+ // Consume the token.
+
+ start_mark := parser.mark
+ skip(parser)
+ end_mark := parser.mark
+
+ // Create the FLOW-SEQUENCE-END of FLOW-MAPPING-END token.
+ token := yaml_token_t{
+ typ: typ,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ }
+ // Append the token to the queue.
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the FLOW-ENTRY token.
+func yaml_parser_fetch_flow_entry(parser *yaml_parser_t) bool {
+ // Reset any potential simple keys on the current flow level.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ // Simple keys are allowed after ','.
+ parser.simple_key_allowed = true
+
+ // Consume the token.
+ start_mark := parser.mark
+ skip(parser)
+ end_mark := parser.mark
+
+ // Create the FLOW-ENTRY token and append it to the queue.
+ token := yaml_token_t{
+ typ: yaml_FLOW_ENTRY_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the BLOCK-ENTRY token.
+func yaml_parser_fetch_block_entry(parser *yaml_parser_t) bool {
+ // Check if the scanner is in the block context.
+ if parser.flow_level == 0 {
+ // Check if we are allowed to start a new entry.
+ if !parser.simple_key_allowed {
+ return yaml_parser_set_scanner_error(parser, "", parser.mark,
+ "block sequence entries are not allowed in this context")
+ }
+ // Add the BLOCK-SEQUENCE-START token if needed.
+ if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_SEQUENCE_START_TOKEN, parser.mark) {
+ return false
+ }
+ } else {
+ // It is an error for the '-' indicator to occur in the flow context,
+ // but we let the Parser detect and report about it because the Parser
+ // is able to point to the context.
+ }
+
+ // Reset any potential simple keys on the current flow level.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ // Simple keys are allowed after '-'.
+ parser.simple_key_allowed = true
+
+ // Consume the token.
+ start_mark := parser.mark
+ skip(parser)
+ end_mark := parser.mark
+
+ // Create the BLOCK-ENTRY token and append it to the queue.
+ token := yaml_token_t{
+ typ: yaml_BLOCK_ENTRY_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the KEY token.
+func yaml_parser_fetch_key(parser *yaml_parser_t) bool {
+
+ // In the block context, additional checks are required.
+ if parser.flow_level == 0 {
+ // Check if we are allowed to start a new key (not nessesary simple).
+ if !parser.simple_key_allowed {
+ return yaml_parser_set_scanner_error(parser, "", parser.mark,
+ "mapping keys are not allowed in this context")
+ }
+ // Add the BLOCK-MAPPING-START token if needed.
+ if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) {
+ return false
+ }
+ }
+
+ // Reset any potential simple keys on the current flow level.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ // Simple keys are allowed after '?' in the block context.
+ parser.simple_key_allowed = parser.flow_level == 0
+
+ // Consume the token.
+ start_mark := parser.mark
+ skip(parser)
+ end_mark := parser.mark
+
+ // Create the KEY token and append it to the queue.
+ token := yaml_token_t{
+ typ: yaml_KEY_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the VALUE token.
+func yaml_parser_fetch_value(parser *yaml_parser_t) bool {
+
+ simple_key := &parser.simple_keys[len(parser.simple_keys)-1]
+
+ // Have we found a simple key?
+ if simple_key.possible {
+ // Create the KEY token and insert it into the queue.
+ token := yaml_token_t{
+ typ: yaml_KEY_TOKEN,
+ start_mark: simple_key.mark,
+ end_mark: simple_key.mark,
+ }
+ yaml_insert_token(parser, simple_key.token_number-parser.tokens_parsed, &token)
+
+ // In the block context, we may need to add the BLOCK-MAPPING-START token.
+ if !yaml_parser_roll_indent(parser, simple_key.mark.column,
+ simple_key.token_number,
+ yaml_BLOCK_MAPPING_START_TOKEN, simple_key.mark) {
+ return false
+ }
+
+ // Remove the simple key.
+ simple_key.possible = false
+
+ // A simple key cannot follow another simple key.
+ parser.simple_key_allowed = false
+
+ } else {
+ // The ':' indicator follows a complex key.
+
+ // In the block context, extra checks are required.
+ if parser.flow_level == 0 {
+
+ // Check if we are allowed to start a complex value.
+ if !parser.simple_key_allowed {
+ return yaml_parser_set_scanner_error(parser, "", parser.mark,
+ "mapping values are not allowed in this context")
+ }
+
+ // Add the BLOCK-MAPPING-START token if needed.
+ if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) {
+ return false
+ }
+ }
+
+ // Simple keys after ':' are allowed in the block context.
+ parser.simple_key_allowed = parser.flow_level == 0
+ }
+
+ // Consume the token.
+ start_mark := parser.mark
+ skip(parser)
+ end_mark := parser.mark
+
+ // Create the VALUE token and append it to the queue.
+ token := yaml_token_t{
+ typ: yaml_VALUE_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the ALIAS or ANCHOR token.
+func yaml_parser_fetch_anchor(parser *yaml_parser_t, typ yaml_token_type_t) bool {
+ // An anchor or an alias could be a simple key.
+ if !yaml_parser_save_simple_key(parser) {
+ return false
+ }
+
+ // A simple key cannot follow an anchor or an alias.
+ parser.simple_key_allowed = false
+
+ // Create the ALIAS or ANCHOR token and append it to the queue.
+ var token yaml_token_t
+ if !yaml_parser_scan_anchor(parser, &token, typ) {
+ return false
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the TAG token.
+func yaml_parser_fetch_tag(parser *yaml_parser_t) bool {
+ // A tag could be a simple key.
+ if !yaml_parser_save_simple_key(parser) {
+ return false
+ }
+
+ // A simple key cannot follow a tag.
+ parser.simple_key_allowed = false
+
+ // Create the TAG token and append it to the queue.
+ var token yaml_token_t
+ if !yaml_parser_scan_tag(parser, &token) {
+ return false
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the SCALAR(...,literal) or SCALAR(...,folded) tokens.
+func yaml_parser_fetch_block_scalar(parser *yaml_parser_t, literal bool) bool {
+ // Remove any potential simple keys.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ // A simple key may follow a block scalar.
+ parser.simple_key_allowed = true
+
+ // Create the SCALAR token and append it to the queue.
+ var token yaml_token_t
+ if !yaml_parser_scan_block_scalar(parser, &token, literal) {
+ return false
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the SCALAR(...,single-quoted) or SCALAR(...,double-quoted) tokens.
+func yaml_parser_fetch_flow_scalar(parser *yaml_parser_t, single bool) bool {
+ // A plain scalar could be a simple key.
+ if !yaml_parser_save_simple_key(parser) {
+ return false
+ }
+
+ // A simple key cannot follow a flow scalar.
+ parser.simple_key_allowed = false
+
+ // Create the SCALAR token and append it to the queue.
+ var token yaml_token_t
+ if !yaml_parser_scan_flow_scalar(parser, &token, single) {
+ return false
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the SCALAR(...,plain) token.
+func yaml_parser_fetch_plain_scalar(parser *yaml_parser_t) bool {
+ // A plain scalar could be a simple key.
+ if !yaml_parser_save_simple_key(parser) {
+ return false
+ }
+
+ // A simple key cannot follow a flow scalar.
+ parser.simple_key_allowed = false
+
+ // Create the SCALAR token and append it to the queue.
+ var token yaml_token_t
+ if !yaml_parser_scan_plain_scalar(parser, &token) {
+ return false
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Eat whitespaces and comments until the next token is found.
+func yaml_parser_scan_to_next_token(parser *yaml_parser_t) bool {
+
+ // Until the next token is not found.
+ for {
+ // Allow the BOM mark to start a line.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ if parser.mark.column == 0 && is_bom(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ }
+
+ // Eat whitespaces.
+ // Tabs are allowed:
+ // - in the flow context
+ // - in the block context, but not at the beginning of the line or
+ // after '-', '?', or ':' (complex value).
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ for parser.buffer[parser.buffer_pos] == ' ' || ((parser.flow_level > 0 || !parser.simple_key_allowed) && parser.buffer[parser.buffer_pos] == '\t') {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Eat a comment until a line break.
+ if parser.buffer[parser.buffer_pos] == '#' {
+ for !is_breakz(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+ }
+
+ // If it is a line break, eat it.
+ if is_break(parser.buffer, parser.buffer_pos) {
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+ skip_line(parser)
+
+ // In the block context, a new line may start a simple key.
+ if parser.flow_level == 0 {
+ parser.simple_key_allowed = true
+ }
+ } else {
+ break // We have found a token.
+ }
+ }
+
+ return true
+}
+
+// Scan a YAML-DIRECTIVE or TAG-DIRECTIVE token.
+//
+// Scope:
+// %YAML 1.1 # a comment \n
+// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+// %TAG !yaml! tag:yaml.org,2002: \n
+// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+//
+func yaml_parser_scan_directive(parser *yaml_parser_t, token *yaml_token_t) bool {
+ // Eat '%'.
+ start_mark := parser.mark
+ skip(parser)
+
+ // Scan the directive name.
+ var name []byte
+ if !yaml_parser_scan_directive_name(parser, start_mark, &name) {
+ return false
+ }
+
+ // Is it a YAML directive?
+ if bytes.Equal(name, []byte("YAML")) {
+ // Scan the VERSION directive value.
+ var major, minor int8
+ if !yaml_parser_scan_version_directive_value(parser, start_mark, &major, &minor) {
+ return false
+ }
+ end_mark := parser.mark
+
+ // Create a VERSION-DIRECTIVE token.
+ *token = yaml_token_t{
+ typ: yaml_VERSION_DIRECTIVE_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ major: major,
+ minor: minor,
+ }
+
+ // Is it a TAG directive?
+ } else if bytes.Equal(name, []byte("TAG")) {
+ // Scan the TAG directive value.
+ var handle, prefix []byte
+ if !yaml_parser_scan_tag_directive_value(parser, start_mark, &handle, &prefix) {
+ return false
+ }
+ end_mark := parser.mark
+
+ // Create a TAG-DIRECTIVE token.
+ *token = yaml_token_t{
+ typ: yaml_TAG_DIRECTIVE_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ value: handle,
+ prefix: prefix,
+ }
+
+ // Unknown directive.
+ } else {
+ yaml_parser_set_scanner_error(parser, "while scanning a directive",
+ start_mark, "found uknown directive name")
+ return false
+ }
+
+ // Eat the rest of the line including any comments.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ for is_blank(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ if parser.buffer[parser.buffer_pos] == '#' {
+ for !is_breakz(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+ }
+
+ // Check if we are at the end of the line.
+ if !is_breakz(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a directive",
+ start_mark, "did not find expected comment or line break")
+ return false
+ }
+
+ // Eat a line break.
+ if is_break(parser.buffer, parser.buffer_pos) {
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+ skip_line(parser)
+ }
+
+ return true
+}
+
+// Scan the directive name.
+//
+// Scope:
+// %YAML 1.1 # a comment \n
+// ^^^^
+// %TAG !yaml! tag:yaml.org,2002: \n
+// ^^^
+//
+func yaml_parser_scan_directive_name(parser *yaml_parser_t, start_mark yaml_mark_t, name *[]byte) bool {
+ // Consume the directive name.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ var s []byte
+ for is_alpha(parser.buffer, parser.buffer_pos) {
+ s = read(parser, s)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Check if the name is empty.
+ if len(s) == 0 {
+ yaml_parser_set_scanner_error(parser, "while scanning a directive",
+ start_mark, "could not find expected directive name")
+ return false
+ }
+
+ // Check for an blank character after the name.
+ if !is_blankz(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a directive",
+ start_mark, "found unexpected non-alphabetical character")
+ return false
+ }
+ *name = s
+ return true
+}
+
+// Scan the value of VERSION-DIRECTIVE.
+//
+// Scope:
+// %YAML 1.1 # a comment \n
+// ^^^^^^
+func yaml_parser_scan_version_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, major, minor *int8) bool {
+ // Eat whitespaces.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ for is_blank(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Consume the major version number.
+ if !yaml_parser_scan_version_directive_number(parser, start_mark, major) {
+ return false
+ }
+
+ // Eat '.'.
+ if parser.buffer[parser.buffer_pos] != '.' {
+ return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive",
+ start_mark, "did not find expected digit or '.' character")
+ }
+
+ skip(parser)
+
+ // Consume the minor version number.
+ if !yaml_parser_scan_version_directive_number(parser, start_mark, minor) {
+ return false
+ }
+ return true
+}
+
+const max_number_length = 2
+
+// Scan the version number of VERSION-DIRECTIVE.
+//
+// Scope:
+// %YAML 1.1 # a comment \n
+// ^
+// %YAML 1.1 # a comment \n
+// ^
+func yaml_parser_scan_version_directive_number(parser *yaml_parser_t, start_mark yaml_mark_t, number *int8) bool {
+
+ // Repeat while the next character is digit.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ var value, length int8
+ for is_digit(parser.buffer, parser.buffer_pos) {
+ // Check if the number is too long.
+ length++
+ if length > max_number_length {
+ return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive",
+ start_mark, "found extremely long version number")
+ }
+ value = value*10 + int8(as_digit(parser.buffer, parser.buffer_pos))
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Check if the number was present.
+ if length == 0 {
+ return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive",
+ start_mark, "did not find expected version number")
+ }
+ *number = value
+ return true
+}
+
+// Scan the value of a TAG-DIRECTIVE token.
+//
+// Scope:
+// %TAG !yaml! tag:yaml.org,2002: \n
+// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+//
+func yaml_parser_scan_tag_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, handle, prefix *[]byte) bool {
+ var handle_value, prefix_value []byte
+
+ // Eat whitespaces.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ for is_blank(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Scan a handle.
+ if !yaml_parser_scan_tag_handle(parser, true, start_mark, &handle_value) {
+ return false
+ }
+
+ // Expect a whitespace.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ if !is_blank(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive",
+ start_mark, "did not find expected whitespace")
+ return false
+ }
+
+ // Eat whitespaces.
+ for is_blank(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Scan a prefix.
+ if !yaml_parser_scan_tag_uri(parser, true, nil, start_mark, &prefix_value) {
+ return false
+ }
+
+ // Expect a whitespace or line break.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ if !is_blankz(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive",
+ start_mark, "did not find expected whitespace or line break")
+ return false
+ }
+
+ *handle = handle_value
+ *prefix = prefix_value
+ return true
+}
+
+func yaml_parser_scan_anchor(parser *yaml_parser_t, token *yaml_token_t, typ yaml_token_type_t) bool {
+ var s []byte
+
+ // Eat the indicator character.
+ start_mark := parser.mark
+ skip(parser)
+
+ // Consume the value.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ for is_alpha(parser.buffer, parser.buffer_pos) {
+ s = read(parser, s)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ end_mark := parser.mark
+
+ /*
+ * Check if length of the anchor is greater than 0 and it is followed by
+ * a whitespace character or one of the indicators:
+ *
+ * '?', ':', ',', ']', '}', '%', '@', '`'.
+ */
+
+ if len(s) == 0 ||
+ !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '?' ||
+ parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == ',' ||
+ parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '}' ||
+ parser.buffer[parser.buffer_pos] == '%' || parser.buffer[parser.buffer_pos] == '@' ||
+ parser.buffer[parser.buffer_pos] == '`') {
+ context := "while scanning an alias"
+ if typ == yaml_ANCHOR_TOKEN {
+ context = "while scanning an anchor"
+ }
+ yaml_parser_set_scanner_error(parser, context, start_mark,
+ "did not find expected alphabetic or numeric character")
+ return false
+ }
+
+ // Create a token.
+ *token = yaml_token_t{
+ typ: typ,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ value: s,
+ }
+
+ return true
+}
+
+/*
+ * Scan a TAG token.
+ */
+
+func yaml_parser_scan_tag(parser *yaml_parser_t, token *yaml_token_t) bool {
+ var handle, suffix []byte
+
+ start_mark := parser.mark
+
+ // Check if the tag is in the canonical form.
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+
+ if parser.buffer[parser.buffer_pos+1] == '<' {
+ // Keep the handle as ''
+
+ // Eat '!<'
+ skip(parser)
+ skip(parser)
+
+ // Consume the tag value.
+ if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) {
+ return false
+ }
+
+ // Check for '>' and eat it.
+ if parser.buffer[parser.buffer_pos] != '>' {
+ yaml_parser_set_scanner_error(parser, "while scanning a tag",
+ start_mark, "did not find the expected '>'")
+ return false
+ }
+
+ skip(parser)
+ } else {
+ // The tag has either the '!suffix' or the '!handle!suffix' form.
+
+ // First, try to scan a handle.
+ if !yaml_parser_scan_tag_handle(parser, false, start_mark, &handle) {
+ return false
+ }
+
+ // Check if it is, indeed, handle.
+ if handle[0] == '!' && len(handle) > 1 && handle[len(handle)-1] == '!' {
+ // Scan the suffix now.
+ if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) {
+ return false
+ }
+ } else {
+ // It wasn't a handle after all. Scan the rest of the tag.
+ if !yaml_parser_scan_tag_uri(parser, false, handle, start_mark, &suffix) {
+ return false
+ }
+
+ // Set the handle to '!'.
+ handle = []byte{'!'}
+
+ // A special case: the '!' tag. Set the handle to '' and the
+ // suffix to '!'.
+ if len(suffix) == 0 {
+ handle, suffix = suffix, handle
+ }
+ }
+ }
+
+ // Check the character which ends the tag.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ if !is_blankz(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a tag",
+ start_mark, "did not find expected whitespace or line break")
+ return false
+ }
+
+ end_mark := parser.mark
+
+ // Create a token.
+ *token = yaml_token_t{
+ typ: yaml_TAG_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ value: handle,
+ suffix: suffix,
+ }
+ return true
+}
+
+// Scan a tag handle.
+func yaml_parser_scan_tag_handle(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, handle *[]byte) bool {
+ // Check the initial '!' character.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ if parser.buffer[parser.buffer_pos] != '!' {
+ yaml_parser_set_scanner_tag_error(parser, directive,
+ start_mark, "did not find expected '!'")
+ return false
+ }
+
+ var s []byte
+
+ // Copy the '!' character.
+ s = read(parser, s)
+
+ // Copy all subsequent alphabetical and numerical characters.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ for is_alpha(parser.buffer, parser.buffer_pos) {
+ s = read(parser, s)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Check if the trailing character is '!' and copy it.
+ if parser.buffer[parser.buffer_pos] == '!' {
+ s = read(parser, s)
+ } else {
+ // It's either the '!' tag or not really a tag handle. If it's a %TAG
+ // directive, it's an error. If it's a tag token, it must be a part of URI.
+ if directive && !(s[0] == '!' && s[1] == 0) {
+ yaml_parser_set_scanner_tag_error(parser, directive,
+ start_mark, "did not find expected '!'")
+ return false
+ }
+ }
+
+ *handle = s
+ return true
+}
+
+// Scan a tag.
+func yaml_parser_scan_tag_uri(parser *yaml_parser_t, directive bool, head []byte, start_mark yaml_mark_t, uri *[]byte) bool {
+ //size_t length = head ? strlen((char *)head) : 0
+ var s []byte
+
+ // Copy the head if needed.
+ //
+ // Note that we don't copy the leading '!' character.
+ if len(head) > 1 {
+ s = append(s, head[1:]...)
+ }
+
+ // Scan the tag.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ // The set of characters that may appear in URI is as follows:
+ //
+ // '0'-'9', 'A'-'Z', 'a'-'z', '_', '-', ';', '/', '?', ':', '@', '&',
+ // '=', '+', '$', ',', '.', '!', '~', '*', '\'', '(', ')', '[', ']',
+ // '%'.
+ // [Go] Convert this into more reasonable logic.
+ for is_alpha(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == ';' ||
+ parser.buffer[parser.buffer_pos] == '/' || parser.buffer[parser.buffer_pos] == '?' ||
+ parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == '@' ||
+ parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '=' ||
+ parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '$' ||
+ parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '.' ||
+ parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '~' ||
+ parser.buffer[parser.buffer_pos] == '*' || parser.buffer[parser.buffer_pos] == '\'' ||
+ parser.buffer[parser.buffer_pos] == '(' || parser.buffer[parser.buffer_pos] == ')' ||
+ parser.buffer[parser.buffer_pos] == '[' || parser.buffer[parser.buffer_pos] == ']' ||
+ parser.buffer[parser.buffer_pos] == '%' {
+ // Check if it is a URI-escape sequence.
+ if parser.buffer[parser.buffer_pos] == '%' {
+ if !yaml_parser_scan_uri_escapes(parser, directive, start_mark, &s) {
+ return false
+ }
+ } else {
+ s = read(parser, s)
+ }
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Check if the tag is non-empty.
+ if len(s) == 0 {
+ yaml_parser_set_scanner_tag_error(parser, directive,
+ start_mark, "did not find expected tag URI")
+ return false
+ }
+ *uri = s
+ return true
+}
+
+// Decode an URI-escape sequence corresponding to a single UTF-8 character.
+func yaml_parser_scan_uri_escapes(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, s *[]byte) bool {
+
+ // Decode the required number of characters.
+ w := 1024
+ for w > 0 {
+ // Check for a URI-escaped octet.
+ if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) {
+ return false
+ }
+
+ if !(parser.buffer[parser.buffer_pos] == '%' &&
+ is_hex(parser.buffer, parser.buffer_pos+1) &&
+ is_hex(parser.buffer, parser.buffer_pos+2)) {
+ return yaml_parser_set_scanner_tag_error(parser, directive,
+ start_mark, "did not find URI escaped octet")
+ }
+
+ // Get the octet.
+ octet := byte((as_hex(parser.buffer, parser.buffer_pos+1) << 4) + as_hex(parser.buffer, parser.buffer_pos+2))
+
+ // If it is the leading octet, determine the length of the UTF-8 sequence.
+ if w == 1024 {
+ w = width(octet)
+ if w == 0 {
+ return yaml_parser_set_scanner_tag_error(parser, directive,
+ start_mark, "found an incorrect leading UTF-8 octet")
+ }
+ } else {
+ // Check if the trailing octet is correct.
+ if octet&0xC0 != 0x80 {
+ return yaml_parser_set_scanner_tag_error(parser, directive,
+ start_mark, "found an incorrect trailing UTF-8 octet")
+ }
+ }
+
+ // Copy the octet and move the pointers.
+ *s = append(*s, octet)
+ skip(parser)
+ skip(parser)
+ skip(parser)
+ w--
+ }
+ return true
+}
+
+// Scan a block scalar.
+func yaml_parser_scan_block_scalar(parser *yaml_parser_t, token *yaml_token_t, literal bool) bool {
+ // Eat the indicator '|' or '>'.
+ start_mark := parser.mark
+ skip(parser)
+
+ // Scan the additional block scalar indicators.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ // Check for a chomping indicator.
+ var chomping, increment int
+ if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' {
+ // Set the chomping method and eat the indicator.
+ if parser.buffer[parser.buffer_pos] == '+' {
+ chomping = +1
+ } else {
+ chomping = -1
+ }
+ skip(parser)
+
+ // Check for an indentation indicator.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ if is_digit(parser.buffer, parser.buffer_pos) {
+ // Check that the intendation is greater than 0.
+ if parser.buffer[parser.buffer_pos] == '0' {
+ yaml_parser_set_scanner_error(parser, "while scanning a block scalar",
+ start_mark, "found an intendation indicator equal to 0")
+ return false
+ }
+
+ // Get the intendation level and eat the indicator.
+ increment = as_digit(parser.buffer, parser.buffer_pos)
+ skip(parser)
+ }
+
+ } else if is_digit(parser.buffer, parser.buffer_pos) {
+ // Do the same as above, but in the opposite order.
+
+ if parser.buffer[parser.buffer_pos] == '0' {
+ yaml_parser_set_scanner_error(parser, "while scanning a block scalar",
+ start_mark, "found an intendation indicator equal to 0")
+ return false
+ }
+ increment = as_digit(parser.buffer, parser.buffer_pos)
+ skip(parser)
+
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' {
+ if parser.buffer[parser.buffer_pos] == '+' {
+ chomping = +1
+ } else {
+ chomping = -1
+ }
+ skip(parser)
+ }
+ }
+
+ // Eat whitespaces and comments to the end of the line.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ for is_blank(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+ if parser.buffer[parser.buffer_pos] == '#' {
+ for !is_breakz(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+ }
+
+ // Check if we are at the end of the line.
+ if !is_breakz(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a block scalar",
+ start_mark, "did not find expected comment or line break")
+ return false
+ }
+
+ // Eat a line break.
+ if is_break(parser.buffer, parser.buffer_pos) {
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+ skip_line(parser)
+ }
+
+ end_mark := parser.mark
+
+ // Set the intendation level if it was specified.
+ var indent int
+ if increment > 0 {
+ if parser.indent >= 0 {
+ indent = parser.indent + increment
+ } else {
+ indent = increment
+ }
+ }
+
+ // Scan the leading line breaks and determine the indentation level if needed.
+ var s, leading_break, trailing_breaks []byte
+ if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) {
+ return false
+ }
+
+ // Scan the block scalar content.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ var leading_blank, trailing_blank bool
+ for parser.mark.column == indent && !is_z(parser.buffer, parser.buffer_pos) {
+ // We are at the beginning of a non-empty line.
+
+ // Is it a trailing whitespace?
+ trailing_blank = is_blank(parser.buffer, parser.buffer_pos)
+
+ // Check if we need to fold the leading line break.
+ if !literal && !leading_blank && !trailing_blank && len(leading_break) > 0 && leading_break[0] == '\n' {
+ // Do we need to join the lines by space?
+ if len(trailing_breaks) == 0 {
+ s = append(s, ' ')
+ }
+ } else {
+ s = append(s, leading_break...)
+ }
+ leading_break = leading_break[:0]
+
+ // Append the remaining line breaks.
+ s = append(s, trailing_breaks...)
+ trailing_breaks = trailing_breaks[:0]
+
+ // Is it a leading whitespace?
+ leading_blank = is_blank(parser.buffer, parser.buffer_pos)
+
+ // Consume the current line.
+ for !is_breakz(parser.buffer, parser.buffer_pos) {
+ s = read(parser, s)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Consume the line break.
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+
+ leading_break = read_line(parser, leading_break)
+
+ // Eat the following intendation spaces and line breaks.
+ if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) {
+ return false
+ }
+ }
+
+ // Chomp the tail.
+ if chomping != -1 {
+ s = append(s, leading_break...)
+ }
+ if chomping == 1 {
+ s = append(s, trailing_breaks...)
+ }
+
+ // Create a token.
+ *token = yaml_token_t{
+ typ: yaml_SCALAR_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ value: s,
+ style: yaml_LITERAL_SCALAR_STYLE,
+ }
+ if !literal {
+ token.style = yaml_FOLDED_SCALAR_STYLE
+ }
+ return true
+}
+
+// Scan intendation spaces and line breaks for a block scalar. Determine the
+// intendation level if needed.
+func yaml_parser_scan_block_scalar_breaks(parser *yaml_parser_t, indent *int, breaks *[]byte, start_mark yaml_mark_t, end_mark *yaml_mark_t) bool {
+ *end_mark = parser.mark
+
+ // Eat the intendation spaces and line breaks.
+ max_indent := 0
+ for {
+ // Eat the intendation spaces.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ for (*indent == 0 || parser.mark.column < *indent) && is_space(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+ if parser.mark.column > max_indent {
+ max_indent = parser.mark.column
+ }
+
+ // Check for a tab character messing the intendation.
+ if (*indent == 0 || parser.mark.column < *indent) && is_tab(parser.buffer, parser.buffer_pos) {
+ return yaml_parser_set_scanner_error(parser, "while scanning a block scalar",
+ start_mark, "found a tab character where an intendation space is expected")
+ }
+
+ // Have we found a non-empty line?
+ if !is_break(parser.buffer, parser.buffer_pos) {
+ break
+ }
+
+ // Consume the line break.
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+ // [Go] Should really be returning breaks instead.
+ *breaks = read_line(parser, *breaks)
+ *end_mark = parser.mark
+ }
+
+ // Determine the indentation level if needed.
+ if *indent == 0 {
+ *indent = max_indent
+ if *indent < parser.indent+1 {
+ *indent = parser.indent + 1
+ }
+ if *indent < 1 {
+ *indent = 1
+ }
+ }
+ return true
+}
+
+// Scan a quoted scalar.
+func yaml_parser_scan_flow_scalar(parser *yaml_parser_t, token *yaml_token_t, single bool) bool {
+ // Eat the left quote.
+ start_mark := parser.mark
+ skip(parser)
+
+ // Consume the content of the quoted scalar.
+ var s, leading_break, trailing_breaks, whitespaces []byte
+ for {
+ // Check that there are no document indicators at the beginning of the line.
+ if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) {
+ return false
+ }
+
+ if parser.mark.column == 0 &&
+ ((parser.buffer[parser.buffer_pos+0] == '-' &&
+ parser.buffer[parser.buffer_pos+1] == '-' &&
+ parser.buffer[parser.buffer_pos+2] == '-') ||
+ (parser.buffer[parser.buffer_pos+0] == '.' &&
+ parser.buffer[parser.buffer_pos+1] == '.' &&
+ parser.buffer[parser.buffer_pos+2] == '.')) &&
+ is_blankz(parser.buffer, parser.buffer_pos+3) {
+ yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar",
+ start_mark, "found unexpected document indicator")
+ return false
+ }
+
+ // Check for EOF.
+ if is_z(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar",
+ start_mark, "found unexpected end of stream")
+ return false
+ }
+
+ // Consume non-blank characters.
+ leading_blanks := false
+ for !is_blankz(parser.buffer, parser.buffer_pos) {
+ if single && parser.buffer[parser.buffer_pos] == '\'' && parser.buffer[parser.buffer_pos+1] == '\'' {
+ // Is is an escaped single quote.
+ s = append(s, '\'')
+ skip(parser)
+ skip(parser)
+
+ } else if single && parser.buffer[parser.buffer_pos] == '\'' {
+ // It is a right single quote.
+ break
+ } else if !single && parser.buffer[parser.buffer_pos] == '"' {
+ // It is a right double quote.
+ break
+
+ } else if !single && parser.buffer[parser.buffer_pos] == '\\' && is_break(parser.buffer, parser.buffer_pos+1) {
+ // It is an escaped line break.
+ if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) {
+ return false
+ }
+ skip(parser)
+ skip_line(parser)
+ leading_blanks = true
+ break
+
+ } else if !single && parser.buffer[parser.buffer_pos] == '\\' {
+ // It is an escape sequence.
+ code_length := 0
+
+ // Check the escape character.
+ switch parser.buffer[parser.buffer_pos+1] {
+ case '0':
+ s = append(s, 0)
+ case 'a':
+ s = append(s, '\x07')
+ case 'b':
+ s = append(s, '\x08')
+ case 't', '\t':
+ s = append(s, '\x09')
+ case 'n':
+ s = append(s, '\x0A')
+ case 'v':
+ s = append(s, '\x0B')
+ case 'f':
+ s = append(s, '\x0C')
+ case 'r':
+ s = append(s, '\x0D')
+ case 'e':
+ s = append(s, '\x1B')
+ case ' ':
+ s = append(s, '\x20')
+ case '"':
+ s = append(s, '"')
+ case '\'':
+ s = append(s, '\'')
+ case '\\':
+ s = append(s, '\\')
+ case 'N': // NEL (#x85)
+ s = append(s, '\xC2')
+ s = append(s, '\x85')
+ case '_': // #xA0
+ s = append(s, '\xC2')
+ s = append(s, '\xA0')
+ case 'L': // LS (#x2028)
+ s = append(s, '\xE2')
+ s = append(s, '\x80')
+ s = append(s, '\xA8')
+ case 'P': // PS (#x2029)
+ s = append(s, '\xE2')
+ s = append(s, '\x80')
+ s = append(s, '\xA9')
+ case 'x':
+ code_length = 2
+ case 'u':
+ code_length = 4
+ case 'U':
+ code_length = 8
+ default:
+ yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar",
+ start_mark, "found unknown escape character")
+ return false
+ }
+
+ skip(parser)
+ skip(parser)
+
+ // Consume an arbitrary escape code.
+ if code_length > 0 {
+ var value int
+
+ // Scan the character value.
+ if parser.unread < code_length && !yaml_parser_update_buffer(parser, code_length) {
+ return false
+ }
+ for k := 0; k < code_length; k++ {
+ if !is_hex(parser.buffer, parser.buffer_pos+k) {
+ yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar",
+ start_mark, "did not find expected hexdecimal number")
+ return false
+ }
+ value = (value << 4) + as_hex(parser.buffer, parser.buffer_pos+k)
+ }
+
+ // Check the value and write the character.
+ if (value >= 0xD800 && value <= 0xDFFF) || value > 0x10FFFF {
+ yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar",
+ start_mark, "found invalid Unicode character escape code")
+ return false
+ }
+ if value <= 0x7F {
+ s = append(s, byte(value))
+ } else if value <= 0x7FF {
+ s = append(s, byte(0xC0+(value>>6)))
+ s = append(s, byte(0x80+(value&0x3F)))
+ } else if value <= 0xFFFF {
+ s = append(s, byte(0xE0+(value>>12)))
+ s = append(s, byte(0x80+((value>>6)&0x3F)))
+ s = append(s, byte(0x80+(value&0x3F)))
+ } else {
+ s = append(s, byte(0xF0+(value>>18)))
+ s = append(s, byte(0x80+((value>>12)&0x3F)))
+ s = append(s, byte(0x80+((value>>6)&0x3F)))
+ s = append(s, byte(0x80+(value&0x3F)))
+ }
+
+ // Advance the pointer.
+ for k := 0; k < code_length; k++ {
+ skip(parser)
+ }
+ }
+ } else {
+ // It is a non-escaped non-blank character.
+ s = read(parser, s)
+ }
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+ }
+
+ // Check if we are at the end of the scalar.
+ if single {
+ if parser.buffer[parser.buffer_pos] == '\'' {
+ break
+ }
+ } else {
+ if parser.buffer[parser.buffer_pos] == '"' {
+ break
+ }
+ }
+
+ // Consume blank characters.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) {
+ if is_blank(parser.buffer, parser.buffer_pos) {
+ // Consume a space or a tab character.
+ if !leading_blanks {
+ whitespaces = read(parser, whitespaces)
+ } else {
+ skip(parser)
+ }
+ } else {
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+
+ // Check if it is a first line break.
+ if !leading_blanks {
+ whitespaces = whitespaces[:0]
+ leading_break = read_line(parser, leading_break)
+ leading_blanks = true
+ } else {
+ trailing_breaks = read_line(parser, trailing_breaks)
+ }
+ }
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Join the whitespaces or fold line breaks.
+ if leading_blanks {
+ // Do we need to fold line breaks?
+ if len(leading_break) > 0 && leading_break[0] == '\n' {
+ if len(trailing_breaks) == 0 {
+ s = append(s, ' ')
+ } else {
+ s = append(s, trailing_breaks...)
+ }
+ } else {
+ s = append(s, leading_break...)
+ s = append(s, trailing_breaks...)
+ }
+ trailing_breaks = trailing_breaks[:0]
+ leading_break = leading_break[:0]
+ } else {
+ s = append(s, whitespaces...)
+ whitespaces = whitespaces[:0]
+ }
+ }
+
+ // Eat the right quote.
+ skip(parser)
+ end_mark := parser.mark
+
+ // Create a token.
+ *token = yaml_token_t{
+ typ: yaml_SCALAR_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ value: s,
+ style: yaml_SINGLE_QUOTED_SCALAR_STYLE,
+ }
+ if !single {
+ token.style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+ }
+ return true
+}
+
+// Scan a plain scalar.
+func yaml_parser_scan_plain_scalar(parser *yaml_parser_t, token *yaml_token_t) bool {
+
+ var s, leading_break, trailing_breaks, whitespaces []byte
+ var leading_blanks bool
+ var indent = parser.indent + 1
+
+ start_mark := parser.mark
+ end_mark := parser.mark
+
+ // Consume the content of the plain scalar.
+ for {
+ // Check for a document indicator.
+ if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) {
+ return false
+ }
+ if parser.mark.column == 0 &&
+ ((parser.buffer[parser.buffer_pos+0] == '-' &&
+ parser.buffer[parser.buffer_pos+1] == '-' &&
+ parser.buffer[parser.buffer_pos+2] == '-') ||
+ (parser.buffer[parser.buffer_pos+0] == '.' &&
+ parser.buffer[parser.buffer_pos+1] == '.' &&
+ parser.buffer[parser.buffer_pos+2] == '.')) &&
+ is_blankz(parser.buffer, parser.buffer_pos+3) {
+ break
+ }
+
+ // Check for a comment.
+ if parser.buffer[parser.buffer_pos] == '#' {
+ break
+ }
+
+ // Consume non-blank characters.
+ for !is_blankz(parser.buffer, parser.buffer_pos) {
+
+ // Check for 'x:x' in the flow context. TODO: Fix the test "spec-08-13".
+ if parser.flow_level > 0 &&
+ parser.buffer[parser.buffer_pos] == ':' &&
+ !is_blankz(parser.buffer, parser.buffer_pos+1) {
+ yaml_parser_set_scanner_error(parser, "while scanning a plain scalar",
+ start_mark, "found unexpected ':'")
+ return false
+ }
+
+ // Check for indicators that may end a plain scalar.
+ if (parser.buffer[parser.buffer_pos] == ':' && is_blankz(parser.buffer, parser.buffer_pos+1)) ||
+ (parser.flow_level > 0 &&
+ (parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == ':' ||
+ parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == '[' ||
+ parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' ||
+ parser.buffer[parser.buffer_pos] == '}')) {
+ break
+ }
+
+ // Check if we need to join whitespaces and breaks.
+ if leading_blanks || len(whitespaces) > 0 {
+ if leading_blanks {
+ // Do we need to fold line breaks?
+ if leading_break[0] == '\n' {
+ if len(trailing_breaks) == 0 {
+ s = append(s, ' ')
+ } else {
+ s = append(s, trailing_breaks...)
+ }
+ } else {
+ s = append(s, leading_break...)
+ s = append(s, trailing_breaks...)
+ }
+ trailing_breaks = trailing_breaks[:0]
+ leading_break = leading_break[:0]
+ leading_blanks = false
+ } else {
+ s = append(s, whitespaces...)
+ whitespaces = whitespaces[:0]
+ }
+ }
+
+ // Copy the character.
+ s = read(parser, s)
+
+ end_mark = parser.mark
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+ }
+
+ // Is it the end?
+ if !(is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos)) {
+ break
+ }
+
+ // Consume blank characters.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) {
+ if is_blank(parser.buffer, parser.buffer_pos) {
+
+ // Check for tab character that abuse intendation.
+ if leading_blanks && parser.mark.column < indent && is_tab(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a plain scalar",
+ start_mark, "found a tab character that violate intendation")
+ return false
+ }
+
+ // Consume a space or a tab character.
+ if !leading_blanks {
+ whitespaces = read(parser, whitespaces)
+ } else {
+ skip(parser)
+ }
+ } else {
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+
+ // Check if it is a first line break.
+ if !leading_blanks {
+ whitespaces = whitespaces[:0]
+ leading_break = read_line(parser, leading_break)
+ leading_blanks = true
+ } else {
+ trailing_breaks = read_line(parser, trailing_breaks)
+ }
+ }
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Check intendation level.
+ if parser.flow_level == 0 && parser.mark.column < indent {
+ break
+ }
+ }
+
+ // Create a token.
+ *token = yaml_token_t{
+ typ: yaml_SCALAR_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ value: s,
+ style: yaml_PLAIN_SCALAR_STYLE,
+ }
+
+ // Note that we change the 'simple_key_allowed' flag.
+ if leading_blanks {
+ parser.simple_key_allowed = true
+ }
+ return true
+}
diff --git a/vendor/gopkg.in/yaml.v2/sorter.go b/vendor/gopkg.in/yaml.v2/sorter.go
new file mode 100644
index 0000000000..5958822f9c
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v2/sorter.go
@@ -0,0 +1,104 @@
+package yaml
+
+import (
+ "reflect"
+ "unicode"
+)
+
+type keyList []reflect.Value
+
+func (l keyList) Len() int { return len(l) }
+func (l keyList) Swap(i, j int) { l[i], l[j] = l[j], l[i] }
+func (l keyList) Less(i, j int) bool {
+ a := l[i]
+ b := l[j]
+ ak := a.Kind()
+ bk := b.Kind()
+ for (ak == reflect.Interface || ak == reflect.Ptr) && !a.IsNil() {
+ a = a.Elem()
+ ak = a.Kind()
+ }
+ for (bk == reflect.Interface || bk == reflect.Ptr) && !b.IsNil() {
+ b = b.Elem()
+ bk = b.Kind()
+ }
+ af, aok := keyFloat(a)
+ bf, bok := keyFloat(b)
+ if aok && bok {
+ if af != bf {
+ return af < bf
+ }
+ if ak != bk {
+ return ak < bk
+ }
+ return numLess(a, b)
+ }
+ if ak != reflect.String || bk != reflect.String {
+ return ak < bk
+ }
+ ar, br := []rune(a.String()), []rune(b.String())
+ for i := 0; i < len(ar) && i < len(br); i++ {
+ if ar[i] == br[i] {
+ continue
+ }
+ al := unicode.IsLetter(ar[i])
+ bl := unicode.IsLetter(br[i])
+ if al && bl {
+ return ar[i] < br[i]
+ }
+ if al || bl {
+ return bl
+ }
+ var ai, bi int
+ var an, bn int64
+ for ai = i; ai < len(ar) && unicode.IsDigit(ar[ai]); ai++ {
+ an = an*10 + int64(ar[ai]-'0')
+ }
+ for bi = i; bi < len(br) && unicode.IsDigit(br[bi]); bi++ {
+ bn = bn*10 + int64(br[bi]-'0')
+ }
+ if an != bn {
+ return an < bn
+ }
+ if ai != bi {
+ return ai < bi
+ }
+ return ar[i] < br[i]
+ }
+ return len(ar) < len(br)
+}
+
+// keyFloat returns a float value for v if it is a number/bool
+// and whether it is a number/bool or not.
+func keyFloat(v reflect.Value) (f float64, ok bool) {
+ switch v.Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return float64(v.Int()), true
+ case reflect.Float32, reflect.Float64:
+ return v.Float(), true
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return float64(v.Uint()), true
+ case reflect.Bool:
+ if v.Bool() {
+ return 1, true
+ }
+ return 0, true
+ }
+ return 0, false
+}
+
+// numLess returns whether a < b.
+// a and b must necessarily have the same kind.
+func numLess(a, b reflect.Value) bool {
+ switch a.Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return a.Int() < b.Int()
+ case reflect.Float32, reflect.Float64:
+ return a.Float() < b.Float()
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return a.Uint() < b.Uint()
+ case reflect.Bool:
+ return !a.Bool() && b.Bool()
+ }
+ panic("not a number")
+}
diff --git a/vendor/gopkg.in/yaml.v2/writerc.go b/vendor/gopkg.in/yaml.v2/writerc.go
new file mode 100644
index 0000000000..190362f25d
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v2/writerc.go
@@ -0,0 +1,89 @@
+package yaml
+
+// Set the writer error and return false.
+func yaml_emitter_set_writer_error(emitter *yaml_emitter_t, problem string) bool {
+ emitter.error = yaml_WRITER_ERROR
+ emitter.problem = problem
+ return false
+}
+
+// Flush the output buffer.
+func yaml_emitter_flush(emitter *yaml_emitter_t) bool {
+ if emitter.write_handler == nil {
+ panic("write handler not set")
+ }
+
+ // Check if the buffer is empty.
+ if emitter.buffer_pos == 0 {
+ return true
+ }
+
+ // If the output encoding is UTF-8, we don't need to recode the buffer.
+ if emitter.encoding == yaml_UTF8_ENCODING {
+ if err := emitter.write_handler(emitter, emitter.buffer[:emitter.buffer_pos]); err != nil {
+ return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error())
+ }
+ emitter.buffer_pos = 0
+ return true
+ }
+
+ // Recode the buffer into the raw buffer.
+ var low, high int
+ if emitter.encoding == yaml_UTF16LE_ENCODING {
+ low, high = 0, 1
+ } else {
+ high, low = 1, 0
+ }
+
+ pos := 0
+ for pos < emitter.buffer_pos {
+ // See the "reader.c" code for more details on UTF-8 encoding. Note
+ // that we assume that the buffer contains a valid UTF-8 sequence.
+
+ // Read the next UTF-8 character.
+ octet := emitter.buffer[pos]
+
+ var w int
+ var value rune
+ switch {
+ case octet&0x80 == 0x00:
+ w, value = 1, rune(octet&0x7F)
+ case octet&0xE0 == 0xC0:
+ w, value = 2, rune(octet&0x1F)
+ case octet&0xF0 == 0xE0:
+ w, value = 3, rune(octet&0x0F)
+ case octet&0xF8 == 0xF0:
+ w, value = 4, rune(octet&0x07)
+ }
+ for k := 1; k < w; k++ {
+ octet = emitter.buffer[pos+k]
+ value = (value << 6) + (rune(octet) & 0x3F)
+ }
+ pos += w
+
+ // Write the character.
+ if value < 0x10000 {
+ var b [2]byte
+ b[high] = byte(value >> 8)
+ b[low] = byte(value & 0xFF)
+ emitter.raw_buffer = append(emitter.raw_buffer, b[0], b[1])
+ } else {
+ // Write the character using a surrogate pair (check "reader.c").
+ var b [4]byte
+ value -= 0x10000
+ b[high] = byte(0xD8 + (value >> 18))
+ b[low] = byte((value >> 10) & 0xFF)
+ b[high+2] = byte(0xDC + ((value >> 8) & 0xFF))
+ b[low+2] = byte(value & 0xFF)
+ emitter.raw_buffer = append(emitter.raw_buffer, b[0], b[1], b[2], b[3])
+ }
+ }
+
+ // Write the raw buffer.
+ if err := emitter.write_handler(emitter, emitter.raw_buffer); err != nil {
+ return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error())
+ }
+ emitter.buffer_pos = 0
+ emitter.raw_buffer = emitter.raw_buffer[:0]
+ return true
+}
diff --git a/vendor/gopkg.in/yaml.v2/yaml.go b/vendor/gopkg.in/yaml.v2/yaml.go
new file mode 100644
index 0000000000..d133edf9d3
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v2/yaml.go
@@ -0,0 +1,346 @@
+// Package yaml implements YAML support for the Go language.
+//
+// Source code and other details for the project are available at GitHub:
+//
+// https://github.com/go-yaml/yaml
+//
+package yaml
+
+import (
+ "errors"
+ "fmt"
+ "reflect"
+ "strings"
+ "sync"
+)
+
+// MapSlice encodes and decodes as a YAML map.
+// The order of keys is preserved when encoding and decoding.
+type MapSlice []MapItem
+
+// MapItem is an item in a MapSlice.
+type MapItem struct {
+ Key, Value interface{}
+}
+
+// The Unmarshaler interface may be implemented by types to customize their
+// behavior when being unmarshaled from a YAML document. The UnmarshalYAML
+// method receives a function that may be called to unmarshal the original
+// YAML value into a field or variable. It is safe to call the unmarshal
+// function parameter more than once if necessary.
+type Unmarshaler interface {
+ UnmarshalYAML(unmarshal func(interface{}) error) error
+}
+
+// The Marshaler interface may be implemented by types to customize their
+// behavior when being marshaled into a YAML document. The returned value
+// is marshaled in place of the original value implementing Marshaler.
+//
+// If an error is returned by MarshalYAML, the marshaling procedure stops
+// and returns with the provided error.
+type Marshaler interface {
+ MarshalYAML() (interface{}, error)
+}
+
+// Unmarshal decodes the first document found within the in byte slice
+// and assigns decoded values into the out value.
+//
+// Maps and pointers (to a struct, string, int, etc) are accepted as out
+// values. If an internal pointer within a struct is not initialized,
+// the yaml package will initialize it if necessary for unmarshalling
+// the provided data. The out parameter must not be nil.
+//
+// The type of the decoded values should be compatible with the respective
+// values in out. If one or more values cannot be decoded due to a type
+// mismatches, decoding continues partially until the end of the YAML
+// content, and a *yaml.TypeError is returned with details for all
+// missed values.
+//
+// Struct fields are only unmarshalled if they are exported (have an
+// upper case first letter), and are unmarshalled using the field name
+// lowercased as the default key. Custom keys may be defined via the
+// "yaml" name in the field tag: the content preceding the first comma
+// is used as the key, and the following comma-separated options are
+// used to tweak the marshalling process (see Marshal).
+// Conflicting names result in a runtime error.
+//
+// For example:
+//
+// type T struct {
+// F int `yaml:"a,omitempty"`
+// B int
+// }
+// var t T
+// yaml.Unmarshal([]byte("a: 1\nb: 2"), &t)
+//
+// See the documentation of Marshal for the format of tags and a list of
+// supported tag options.
+//
+func Unmarshal(in []byte, out interface{}) (err error) {
+ defer handleErr(&err)
+ d := newDecoder()
+ p := newParser(in)
+ defer p.destroy()
+ node := p.parse()
+ if node != nil {
+ v := reflect.ValueOf(out)
+ if v.Kind() == reflect.Ptr && !v.IsNil() {
+ v = v.Elem()
+ }
+ d.unmarshal(node, v)
+ }
+ if len(d.terrors) > 0 {
+ return &TypeError{d.terrors}
+ }
+ return nil
+}
+
+// Marshal serializes the value provided into a YAML document. The structure
+// of the generated document will reflect the structure of the value itself.
+// Maps and pointers (to struct, string, int, etc) are accepted as the in value.
+//
+// Struct fields are only unmarshalled if they are exported (have an upper case
+// first letter), and are unmarshalled using the field name lowercased as the
+// default key. Custom keys may be defined via the "yaml" name in the field
+// tag: the content preceding the first comma is used as the key, and the
+// following comma-separated options are used to tweak the marshalling process.
+// Conflicting names result in a runtime error.
+//
+// The field tag format accepted is:
+//
+// `(...) yaml:"[][,[,]]" (...)`
+//
+// The following flags are currently supported:
+//
+// omitempty Only include the field if it's not set to the zero
+// value for the type or to empty slices or maps.
+// Does not apply to zero valued structs.
+//
+// flow Marshal using a flow style (useful for structs,
+// sequences and maps).
+//
+// inline Inline the field, which must be a struct or a map,
+// causing all of its fields or keys to be processed as if
+// they were part of the outer struct. For maps, keys must
+// not conflict with the yaml keys of other struct fields.
+//
+// In addition, if the key is "-", the field is ignored.
+//
+// For example:
+//
+// type T struct {
+// F int "a,omitempty"
+// B int
+// }
+// yaml.Marshal(&T{B: 2}) // Returns "b: 2\n"
+// yaml.Marshal(&T{F: 1}} // Returns "a: 1\nb: 0\n"
+//
+func Marshal(in interface{}) (out []byte, err error) {
+ defer handleErr(&err)
+ e := newEncoder()
+ defer e.destroy()
+ e.marshal("", reflect.ValueOf(in))
+ e.finish()
+ out = e.out
+ return
+}
+
+func handleErr(err *error) {
+ if v := recover(); v != nil {
+ if e, ok := v.(yamlError); ok {
+ *err = e.err
+ } else {
+ panic(v)
+ }
+ }
+}
+
+type yamlError struct {
+ err error
+}
+
+func fail(err error) {
+ panic(yamlError{err})
+}
+
+func failf(format string, args ...interface{}) {
+ panic(yamlError{fmt.Errorf("yaml: "+format, args...)})
+}
+
+// A TypeError is returned by Unmarshal when one or more fields in
+// the YAML document cannot be properly decoded into the requested
+// types. When this error is returned, the value is still
+// unmarshaled partially.
+type TypeError struct {
+ Errors []string
+}
+
+func (e *TypeError) Error() string {
+ return fmt.Sprintf("yaml: unmarshal errors:\n %s", strings.Join(e.Errors, "\n "))
+}
+
+// --------------------------------------------------------------------------
+// Maintain a mapping of keys to structure field indexes
+
+// The code in this section was copied from mgo/bson.
+
+// structInfo holds details for the serialization of fields of
+// a given struct.
+type structInfo struct {
+ FieldsMap map[string]fieldInfo
+ FieldsList []fieldInfo
+
+ // InlineMap is the number of the field in the struct that
+ // contains an ,inline map, or -1 if there's none.
+ InlineMap int
+}
+
+type fieldInfo struct {
+ Key string
+ Num int
+ OmitEmpty bool
+ Flow bool
+
+ // Inline holds the field index if the field is part of an inlined struct.
+ Inline []int
+}
+
+var structMap = make(map[reflect.Type]*structInfo)
+var fieldMapMutex sync.RWMutex
+
+func getStructInfo(st reflect.Type) (*structInfo, error) {
+ fieldMapMutex.RLock()
+ sinfo, found := structMap[st]
+ fieldMapMutex.RUnlock()
+ if found {
+ return sinfo, nil
+ }
+
+ n := st.NumField()
+ fieldsMap := make(map[string]fieldInfo)
+ fieldsList := make([]fieldInfo, 0, n)
+ inlineMap := -1
+ for i := 0; i != n; i++ {
+ field := st.Field(i)
+ if field.PkgPath != "" {
+ continue // Private field
+ }
+
+ info := fieldInfo{Num: i}
+
+ tag := field.Tag.Get("yaml")
+ if tag == "" && strings.Index(string(field.Tag), ":") < 0 {
+ tag = string(field.Tag)
+ }
+ if tag == "-" {
+ continue
+ }
+
+ inline := false
+ fields := strings.Split(tag, ",")
+ if len(fields) > 1 {
+ for _, flag := range fields[1:] {
+ switch flag {
+ case "omitempty":
+ info.OmitEmpty = true
+ case "flow":
+ info.Flow = true
+ case "inline":
+ inline = true
+ default:
+ return nil, errors.New(fmt.Sprintf("Unsupported flag %q in tag %q of type %s", flag, tag, st))
+ }
+ }
+ tag = fields[0]
+ }
+
+ if inline {
+ switch field.Type.Kind() {
+ case reflect.Map:
+ if inlineMap >= 0 {
+ return nil, errors.New("Multiple ,inline maps in struct " + st.String())
+ }
+ if field.Type.Key() != reflect.TypeOf("") {
+ return nil, errors.New("Option ,inline needs a map with string keys in struct " + st.String())
+ }
+ inlineMap = info.Num
+ case reflect.Struct:
+ sinfo, err := getStructInfo(field.Type)
+ if err != nil {
+ return nil, err
+ }
+ for _, finfo := range sinfo.FieldsList {
+ if _, found := fieldsMap[finfo.Key]; found {
+ msg := "Duplicated key '" + finfo.Key + "' in struct " + st.String()
+ return nil, errors.New(msg)
+ }
+ if finfo.Inline == nil {
+ finfo.Inline = []int{i, finfo.Num}
+ } else {
+ finfo.Inline = append([]int{i}, finfo.Inline...)
+ }
+ fieldsMap[finfo.Key] = finfo
+ fieldsList = append(fieldsList, finfo)
+ }
+ default:
+ //return nil, errors.New("Option ,inline needs a struct value or map field")
+ return nil, errors.New("Option ,inline needs a struct value field")
+ }
+ continue
+ }
+
+ if tag != "" {
+ info.Key = tag
+ } else {
+ info.Key = strings.ToLower(field.Name)
+ }
+
+ if _, found = fieldsMap[info.Key]; found {
+ msg := "Duplicated key '" + info.Key + "' in struct " + st.String()
+ return nil, errors.New(msg)
+ }
+
+ fieldsList = append(fieldsList, info)
+ fieldsMap[info.Key] = info
+ }
+
+ sinfo = &structInfo{fieldsMap, fieldsList, inlineMap}
+
+ fieldMapMutex.Lock()
+ structMap[st] = sinfo
+ fieldMapMutex.Unlock()
+ return sinfo, nil
+}
+
+func isZero(v reflect.Value) bool {
+ switch v.Kind() {
+ case reflect.String:
+ return len(v.String()) == 0
+ case reflect.Interface, reflect.Ptr:
+ return v.IsNil()
+ case reflect.Slice:
+ return v.Len() == 0
+ case reflect.Map:
+ return v.Len() == 0
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return v.Int() == 0
+ case reflect.Float32, reflect.Float64:
+ return v.Float() == 0
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return v.Uint() == 0
+ case reflect.Bool:
+ return !v.Bool()
+ case reflect.Struct:
+ vt := v.Type()
+ for i := v.NumField() - 1; i >= 0; i-- {
+ if vt.Field(i).PkgPath != "" {
+ continue // Private field
+ }
+ if !isZero(v.Field(i)) {
+ return false
+ }
+ }
+ return true
+ }
+ return false
+}
diff --git a/vendor/gopkg.in/yaml.v2/yamlh.go b/vendor/gopkg.in/yaml.v2/yamlh.go
new file mode 100644
index 0000000000..d60a6b6b00
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v2/yamlh.go
@@ -0,0 +1,716 @@
+package yaml
+
+import (
+ "io"
+)
+
+// The version directive data.
+type yaml_version_directive_t struct {
+ major int8 // The major version number.
+ minor int8 // The minor version number.
+}
+
+// The tag directive data.
+type yaml_tag_directive_t struct {
+ handle []byte // The tag handle.
+ prefix []byte // The tag prefix.
+}
+
+type yaml_encoding_t int
+
+// The stream encoding.
+const (
+ // Let the parser choose the encoding.
+ yaml_ANY_ENCODING yaml_encoding_t = iota
+
+ yaml_UTF8_ENCODING // The default UTF-8 encoding.
+ yaml_UTF16LE_ENCODING // The UTF-16-LE encoding with BOM.
+ yaml_UTF16BE_ENCODING // The UTF-16-BE encoding with BOM.
+)
+
+type yaml_break_t int
+
+// Line break types.
+const (
+ // Let the parser choose the break type.
+ yaml_ANY_BREAK yaml_break_t = iota
+
+ yaml_CR_BREAK // Use CR for line breaks (Mac style).
+ yaml_LN_BREAK // Use LN for line breaks (Unix style).
+ yaml_CRLN_BREAK // Use CR LN for line breaks (DOS style).
+)
+
+type yaml_error_type_t int
+
+// Many bad things could happen with the parser and emitter.
+const (
+ // No error is produced.
+ yaml_NO_ERROR yaml_error_type_t = iota
+
+ yaml_MEMORY_ERROR // Cannot allocate or reallocate a block of memory.
+ yaml_READER_ERROR // Cannot read or decode the input stream.
+ yaml_SCANNER_ERROR // Cannot scan the input stream.
+ yaml_PARSER_ERROR // Cannot parse the input stream.
+ yaml_COMPOSER_ERROR // Cannot compose a YAML document.
+ yaml_WRITER_ERROR // Cannot write to the output stream.
+ yaml_EMITTER_ERROR // Cannot emit a YAML stream.
+)
+
+// The pointer position.
+type yaml_mark_t struct {
+ index int // The position index.
+ line int // The position line.
+ column int // The position column.
+}
+
+// Node Styles
+
+type yaml_style_t int8
+
+type yaml_scalar_style_t yaml_style_t
+
+// Scalar styles.
+const (
+ // Let the emitter choose the style.
+ yaml_ANY_SCALAR_STYLE yaml_scalar_style_t = iota
+
+ yaml_PLAIN_SCALAR_STYLE // The plain scalar style.
+ yaml_SINGLE_QUOTED_SCALAR_STYLE // The single-quoted scalar style.
+ yaml_DOUBLE_QUOTED_SCALAR_STYLE // The double-quoted scalar style.
+ yaml_LITERAL_SCALAR_STYLE // The literal scalar style.
+ yaml_FOLDED_SCALAR_STYLE // The folded scalar style.
+)
+
+type yaml_sequence_style_t yaml_style_t
+
+// Sequence styles.
+const (
+ // Let the emitter choose the style.
+ yaml_ANY_SEQUENCE_STYLE yaml_sequence_style_t = iota
+
+ yaml_BLOCK_SEQUENCE_STYLE // The block sequence style.
+ yaml_FLOW_SEQUENCE_STYLE // The flow sequence style.
+)
+
+type yaml_mapping_style_t yaml_style_t
+
+// Mapping styles.
+const (
+ // Let the emitter choose the style.
+ yaml_ANY_MAPPING_STYLE yaml_mapping_style_t = iota
+
+ yaml_BLOCK_MAPPING_STYLE // The block mapping style.
+ yaml_FLOW_MAPPING_STYLE // The flow mapping style.
+)
+
+// Tokens
+
+type yaml_token_type_t int
+
+// Token types.
+const (
+ // An empty token.
+ yaml_NO_TOKEN yaml_token_type_t = iota
+
+ yaml_STREAM_START_TOKEN // A STREAM-START token.
+ yaml_STREAM_END_TOKEN // A STREAM-END token.
+
+ yaml_VERSION_DIRECTIVE_TOKEN // A VERSION-DIRECTIVE token.
+ yaml_TAG_DIRECTIVE_TOKEN // A TAG-DIRECTIVE token.
+ yaml_DOCUMENT_START_TOKEN // A DOCUMENT-START token.
+ yaml_DOCUMENT_END_TOKEN // A DOCUMENT-END token.
+
+ yaml_BLOCK_SEQUENCE_START_TOKEN // A BLOCK-SEQUENCE-START token.
+ yaml_BLOCK_MAPPING_START_TOKEN // A BLOCK-SEQUENCE-END token.
+ yaml_BLOCK_END_TOKEN // A BLOCK-END token.
+
+ yaml_FLOW_SEQUENCE_START_TOKEN // A FLOW-SEQUENCE-START token.
+ yaml_FLOW_SEQUENCE_END_TOKEN // A FLOW-SEQUENCE-END token.
+ yaml_FLOW_MAPPING_START_TOKEN // A FLOW-MAPPING-START token.
+ yaml_FLOW_MAPPING_END_TOKEN // A FLOW-MAPPING-END token.
+
+ yaml_BLOCK_ENTRY_TOKEN // A BLOCK-ENTRY token.
+ yaml_FLOW_ENTRY_TOKEN // A FLOW-ENTRY token.
+ yaml_KEY_TOKEN // A KEY token.
+ yaml_VALUE_TOKEN // A VALUE token.
+
+ yaml_ALIAS_TOKEN // An ALIAS token.
+ yaml_ANCHOR_TOKEN // An ANCHOR token.
+ yaml_TAG_TOKEN // A TAG token.
+ yaml_SCALAR_TOKEN // A SCALAR token.
+)
+
+func (tt yaml_token_type_t) String() string {
+ switch tt {
+ case yaml_NO_TOKEN:
+ return "yaml_NO_TOKEN"
+ case yaml_STREAM_START_TOKEN:
+ return "yaml_STREAM_START_TOKEN"
+ case yaml_STREAM_END_TOKEN:
+ return "yaml_STREAM_END_TOKEN"
+ case yaml_VERSION_DIRECTIVE_TOKEN:
+ return "yaml_VERSION_DIRECTIVE_TOKEN"
+ case yaml_TAG_DIRECTIVE_TOKEN:
+ return "yaml_TAG_DIRECTIVE_TOKEN"
+ case yaml_DOCUMENT_START_TOKEN:
+ return "yaml_DOCUMENT_START_TOKEN"
+ case yaml_DOCUMENT_END_TOKEN:
+ return "yaml_DOCUMENT_END_TOKEN"
+ case yaml_BLOCK_SEQUENCE_START_TOKEN:
+ return "yaml_BLOCK_SEQUENCE_START_TOKEN"
+ case yaml_BLOCK_MAPPING_START_TOKEN:
+ return "yaml_BLOCK_MAPPING_START_TOKEN"
+ case yaml_BLOCK_END_TOKEN:
+ return "yaml_BLOCK_END_TOKEN"
+ case yaml_FLOW_SEQUENCE_START_TOKEN:
+ return "yaml_FLOW_SEQUENCE_START_TOKEN"
+ case yaml_FLOW_SEQUENCE_END_TOKEN:
+ return "yaml_FLOW_SEQUENCE_END_TOKEN"
+ case yaml_FLOW_MAPPING_START_TOKEN:
+ return "yaml_FLOW_MAPPING_START_TOKEN"
+ case yaml_FLOW_MAPPING_END_TOKEN:
+ return "yaml_FLOW_MAPPING_END_TOKEN"
+ case yaml_BLOCK_ENTRY_TOKEN:
+ return "yaml_BLOCK_ENTRY_TOKEN"
+ case yaml_FLOW_ENTRY_TOKEN:
+ return "yaml_FLOW_ENTRY_TOKEN"
+ case yaml_KEY_TOKEN:
+ return "yaml_KEY_TOKEN"
+ case yaml_VALUE_TOKEN:
+ return "yaml_VALUE_TOKEN"
+ case yaml_ALIAS_TOKEN:
+ return "yaml_ALIAS_TOKEN"
+ case yaml_ANCHOR_TOKEN:
+ return "yaml_ANCHOR_TOKEN"
+ case yaml_TAG_TOKEN:
+ return "yaml_TAG_TOKEN"
+ case yaml_SCALAR_TOKEN:
+ return "yaml_SCALAR_TOKEN"
+ }
+ return ""
+}
+
+// The token structure.
+type yaml_token_t struct {
+ // The token type.
+ typ yaml_token_type_t
+
+ // The start/end of the token.
+ start_mark, end_mark yaml_mark_t
+
+ // The stream encoding (for yaml_STREAM_START_TOKEN).
+ encoding yaml_encoding_t
+
+ // The alias/anchor/scalar value or tag/tag directive handle
+ // (for yaml_ALIAS_TOKEN, yaml_ANCHOR_TOKEN, yaml_SCALAR_TOKEN, yaml_TAG_TOKEN, yaml_TAG_DIRECTIVE_TOKEN).
+ value []byte
+
+ // The tag suffix (for yaml_TAG_TOKEN).
+ suffix []byte
+
+ // The tag directive prefix (for yaml_TAG_DIRECTIVE_TOKEN).
+ prefix []byte
+
+ // The scalar style (for yaml_SCALAR_TOKEN).
+ style yaml_scalar_style_t
+
+ // The version directive major/minor (for yaml_VERSION_DIRECTIVE_TOKEN).
+ major, minor int8
+}
+
+// Events
+
+type yaml_event_type_t int8
+
+// Event types.
+const (
+ // An empty event.
+ yaml_NO_EVENT yaml_event_type_t = iota
+
+ yaml_STREAM_START_EVENT // A STREAM-START event.
+ yaml_STREAM_END_EVENT // A STREAM-END event.
+ yaml_DOCUMENT_START_EVENT // A DOCUMENT-START event.
+ yaml_DOCUMENT_END_EVENT // A DOCUMENT-END event.
+ yaml_ALIAS_EVENT // An ALIAS event.
+ yaml_SCALAR_EVENT // A SCALAR event.
+ yaml_SEQUENCE_START_EVENT // A SEQUENCE-START event.
+ yaml_SEQUENCE_END_EVENT // A SEQUENCE-END event.
+ yaml_MAPPING_START_EVENT // A MAPPING-START event.
+ yaml_MAPPING_END_EVENT // A MAPPING-END event.
+)
+
+// The event structure.
+type yaml_event_t struct {
+
+ // The event type.
+ typ yaml_event_type_t
+
+ // The start and end of the event.
+ start_mark, end_mark yaml_mark_t
+
+ // The document encoding (for yaml_STREAM_START_EVENT).
+ encoding yaml_encoding_t
+
+ // The version directive (for yaml_DOCUMENT_START_EVENT).
+ version_directive *yaml_version_directive_t
+
+ // The list of tag directives (for yaml_DOCUMENT_START_EVENT).
+ tag_directives []yaml_tag_directive_t
+
+ // The anchor (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_ALIAS_EVENT).
+ anchor []byte
+
+ // The tag (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT).
+ tag []byte
+
+ // The scalar value (for yaml_SCALAR_EVENT).
+ value []byte
+
+ // Is the document start/end indicator implicit, or the tag optional?
+ // (for yaml_DOCUMENT_START_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_SCALAR_EVENT).
+ implicit bool
+
+ // Is the tag optional for any non-plain style? (for yaml_SCALAR_EVENT).
+ quoted_implicit bool
+
+ // The style (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT).
+ style yaml_style_t
+}
+
+func (e *yaml_event_t) scalar_style() yaml_scalar_style_t { return yaml_scalar_style_t(e.style) }
+func (e *yaml_event_t) sequence_style() yaml_sequence_style_t { return yaml_sequence_style_t(e.style) }
+func (e *yaml_event_t) mapping_style() yaml_mapping_style_t { return yaml_mapping_style_t(e.style) }
+
+// Nodes
+
+const (
+ yaml_NULL_TAG = "tag:yaml.org,2002:null" // The tag !!null with the only possible value: null.
+ yaml_BOOL_TAG = "tag:yaml.org,2002:bool" // The tag !!bool with the values: true and false.
+ yaml_STR_TAG = "tag:yaml.org,2002:str" // The tag !!str for string values.
+ yaml_INT_TAG = "tag:yaml.org,2002:int" // The tag !!int for integer values.
+ yaml_FLOAT_TAG = "tag:yaml.org,2002:float" // The tag !!float for float values.
+ yaml_TIMESTAMP_TAG = "tag:yaml.org,2002:timestamp" // The tag !!timestamp for date and time values.
+
+ yaml_SEQ_TAG = "tag:yaml.org,2002:seq" // The tag !!seq is used to denote sequences.
+ yaml_MAP_TAG = "tag:yaml.org,2002:map" // The tag !!map is used to denote mapping.
+
+ // Not in original libyaml.
+ yaml_BINARY_TAG = "tag:yaml.org,2002:binary"
+ yaml_MERGE_TAG = "tag:yaml.org,2002:merge"
+
+ yaml_DEFAULT_SCALAR_TAG = yaml_STR_TAG // The default scalar tag is !!str.
+ yaml_DEFAULT_SEQUENCE_TAG = yaml_SEQ_TAG // The default sequence tag is !!seq.
+ yaml_DEFAULT_MAPPING_TAG = yaml_MAP_TAG // The default mapping tag is !!map.
+)
+
+type yaml_node_type_t int
+
+// Node types.
+const (
+ // An empty node.
+ yaml_NO_NODE yaml_node_type_t = iota
+
+ yaml_SCALAR_NODE // A scalar node.
+ yaml_SEQUENCE_NODE // A sequence node.
+ yaml_MAPPING_NODE // A mapping node.
+)
+
+// An element of a sequence node.
+type yaml_node_item_t int
+
+// An element of a mapping node.
+type yaml_node_pair_t struct {
+ key int // The key of the element.
+ value int // The value of the element.
+}
+
+// The node structure.
+type yaml_node_t struct {
+ typ yaml_node_type_t // The node type.
+ tag []byte // The node tag.
+
+ // The node data.
+
+ // The scalar parameters (for yaml_SCALAR_NODE).
+ scalar struct {
+ value []byte // The scalar value.
+ length int // The length of the scalar value.
+ style yaml_scalar_style_t // The scalar style.
+ }
+
+ // The sequence parameters (for YAML_SEQUENCE_NODE).
+ sequence struct {
+ items_data []yaml_node_item_t // The stack of sequence items.
+ style yaml_sequence_style_t // The sequence style.
+ }
+
+ // The mapping parameters (for yaml_MAPPING_NODE).
+ mapping struct {
+ pairs_data []yaml_node_pair_t // The stack of mapping pairs (key, value).
+ pairs_start *yaml_node_pair_t // The beginning of the stack.
+ pairs_end *yaml_node_pair_t // The end of the stack.
+ pairs_top *yaml_node_pair_t // The top of the stack.
+ style yaml_mapping_style_t // The mapping style.
+ }
+
+ start_mark yaml_mark_t // The beginning of the node.
+ end_mark yaml_mark_t // The end of the node.
+
+}
+
+// The document structure.
+type yaml_document_t struct {
+
+ // The document nodes.
+ nodes []yaml_node_t
+
+ // The version directive.
+ version_directive *yaml_version_directive_t
+
+ // The list of tag directives.
+ tag_directives_data []yaml_tag_directive_t
+ tag_directives_start int // The beginning of the tag directives list.
+ tag_directives_end int // The end of the tag directives list.
+
+ start_implicit int // Is the document start indicator implicit?
+ end_implicit int // Is the document end indicator implicit?
+
+ // The start/end of the document.
+ start_mark, end_mark yaml_mark_t
+}
+
+// The prototype of a read handler.
+//
+// The read handler is called when the parser needs to read more bytes from the
+// source. The handler should write not more than size bytes to the buffer.
+// The number of written bytes should be set to the size_read variable.
+//
+// [in,out] data A pointer to an application data specified by
+// yaml_parser_set_input().
+// [out] buffer The buffer to write the data from the source.
+// [in] size The size of the buffer.
+// [out] size_read The actual number of bytes read from the source.
+//
+// On success, the handler should return 1. If the handler failed,
+// the returned value should be 0. On EOF, the handler should set the
+// size_read to 0 and return 1.
+type yaml_read_handler_t func(parser *yaml_parser_t, buffer []byte) (n int, err error)
+
+// This structure holds information about a potential simple key.
+type yaml_simple_key_t struct {
+ possible bool // Is a simple key possible?
+ required bool // Is a simple key required?
+ token_number int // The number of the token.
+ mark yaml_mark_t // The position mark.
+}
+
+// The states of the parser.
+type yaml_parser_state_t int
+
+const (
+ yaml_PARSE_STREAM_START_STATE yaml_parser_state_t = iota
+
+ yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE // Expect the beginning of an implicit document.
+ yaml_PARSE_DOCUMENT_START_STATE // Expect DOCUMENT-START.
+ yaml_PARSE_DOCUMENT_CONTENT_STATE // Expect the content of a document.
+ yaml_PARSE_DOCUMENT_END_STATE // Expect DOCUMENT-END.
+ yaml_PARSE_BLOCK_NODE_STATE // Expect a block node.
+ yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE // Expect a block node or indentless sequence.
+ yaml_PARSE_FLOW_NODE_STATE // Expect a flow node.
+ yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a block sequence.
+ yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE // Expect an entry of a block sequence.
+ yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE // Expect an entry of an indentless sequence.
+ yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping.
+ yaml_PARSE_BLOCK_MAPPING_KEY_STATE // Expect a block mapping key.
+ yaml_PARSE_BLOCK_MAPPING_VALUE_STATE // Expect a block mapping value.
+ yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a flow sequence.
+ yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE // Expect an entry of a flow sequence.
+ yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE // Expect a key of an ordered mapping.
+ yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE // Expect a value of an ordered mapping.
+ yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE // Expect the and of an ordered mapping entry.
+ yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping.
+ yaml_PARSE_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping.
+ yaml_PARSE_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping.
+ yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE // Expect an empty value of a flow mapping.
+ yaml_PARSE_END_STATE // Expect nothing.
+)
+
+func (ps yaml_parser_state_t) String() string {
+ switch ps {
+ case yaml_PARSE_STREAM_START_STATE:
+ return "yaml_PARSE_STREAM_START_STATE"
+ case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE:
+ return "yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE"
+ case yaml_PARSE_DOCUMENT_START_STATE:
+ return "yaml_PARSE_DOCUMENT_START_STATE"
+ case yaml_PARSE_DOCUMENT_CONTENT_STATE:
+ return "yaml_PARSE_DOCUMENT_CONTENT_STATE"
+ case yaml_PARSE_DOCUMENT_END_STATE:
+ return "yaml_PARSE_DOCUMENT_END_STATE"
+ case yaml_PARSE_BLOCK_NODE_STATE:
+ return "yaml_PARSE_BLOCK_NODE_STATE"
+ case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE:
+ return "yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE"
+ case yaml_PARSE_FLOW_NODE_STATE:
+ return "yaml_PARSE_FLOW_NODE_STATE"
+ case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE:
+ return "yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE"
+ case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE:
+ return "yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE"
+ case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE:
+ return "yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE"
+ case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE:
+ return "yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE"
+ case yaml_PARSE_BLOCK_MAPPING_KEY_STATE:
+ return "yaml_PARSE_BLOCK_MAPPING_KEY_STATE"
+ case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE:
+ return "yaml_PARSE_BLOCK_MAPPING_VALUE_STATE"
+ case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE:
+ return "yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE"
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE:
+ return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE"
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE:
+ return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE"
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE:
+ return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE"
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE:
+ return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE"
+ case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE:
+ return "yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE"
+ case yaml_PARSE_FLOW_MAPPING_KEY_STATE:
+ return "yaml_PARSE_FLOW_MAPPING_KEY_STATE"
+ case yaml_PARSE_FLOW_MAPPING_VALUE_STATE:
+ return "yaml_PARSE_FLOW_MAPPING_VALUE_STATE"
+ case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE:
+ return "yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE"
+ case yaml_PARSE_END_STATE:
+ return "yaml_PARSE_END_STATE"
+ }
+ return ""
+}
+
+// This structure holds aliases data.
+type yaml_alias_data_t struct {
+ anchor []byte // The anchor.
+ index int // The node id.
+ mark yaml_mark_t // The anchor mark.
+}
+
+// The parser structure.
+//
+// All members are internal. Manage the structure using the
+// yaml_parser_ family of functions.
+type yaml_parser_t struct {
+
+ // Error handling
+
+ error yaml_error_type_t // Error type.
+
+ problem string // Error description.
+
+ // The byte about which the problem occured.
+ problem_offset int
+ problem_value int
+ problem_mark yaml_mark_t
+
+ // The error context.
+ context string
+ context_mark yaml_mark_t
+
+ // Reader stuff
+
+ read_handler yaml_read_handler_t // Read handler.
+
+ input_file io.Reader // File input data.
+ input []byte // String input data.
+ input_pos int
+
+ eof bool // EOF flag
+
+ buffer []byte // The working buffer.
+ buffer_pos int // The current position of the buffer.
+
+ unread int // The number of unread characters in the buffer.
+
+ raw_buffer []byte // The raw buffer.
+ raw_buffer_pos int // The current position of the buffer.
+
+ encoding yaml_encoding_t // The input encoding.
+
+ offset int // The offset of the current position (in bytes).
+ mark yaml_mark_t // The mark of the current position.
+
+ // Scanner stuff
+
+ stream_start_produced bool // Have we started to scan the input stream?
+ stream_end_produced bool // Have we reached the end of the input stream?
+
+ flow_level int // The number of unclosed '[' and '{' indicators.
+
+ tokens []yaml_token_t // The tokens queue.
+ tokens_head int // The head of the tokens queue.
+ tokens_parsed int // The number of tokens fetched from the queue.
+ token_available bool // Does the tokens queue contain a token ready for dequeueing.
+
+ indent int // The current indentation level.
+ indents []int // The indentation levels stack.
+
+ simple_key_allowed bool // May a simple key occur at the current position?
+ simple_keys []yaml_simple_key_t // The stack of simple keys.
+
+ // Parser stuff
+
+ state yaml_parser_state_t // The current parser state.
+ states []yaml_parser_state_t // The parser states stack.
+ marks []yaml_mark_t // The stack of marks.
+ tag_directives []yaml_tag_directive_t // The list of TAG directives.
+
+ // Dumper stuff
+
+ aliases []yaml_alias_data_t // The alias data.
+
+ document *yaml_document_t // The currently parsed document.
+}
+
+// Emitter Definitions
+
+// The prototype of a write handler.
+//
+// The write handler is called when the emitter needs to flush the accumulated
+// characters to the output. The handler should write @a size bytes of the
+// @a buffer to the output.
+//
+// @param[in,out] data A pointer to an application data specified by
+// yaml_emitter_set_output().
+// @param[in] buffer The buffer with bytes to be written.
+// @param[in] size The size of the buffer.
+//
+// @returns On success, the handler should return @c 1. If the handler failed,
+// the returned value should be @c 0.
+//
+type yaml_write_handler_t func(emitter *yaml_emitter_t, buffer []byte) error
+
+type yaml_emitter_state_t int
+
+// The emitter states.
+const (
+ // Expect STREAM-START.
+ yaml_EMIT_STREAM_START_STATE yaml_emitter_state_t = iota
+
+ yaml_EMIT_FIRST_DOCUMENT_START_STATE // Expect the first DOCUMENT-START or STREAM-END.
+ yaml_EMIT_DOCUMENT_START_STATE // Expect DOCUMENT-START or STREAM-END.
+ yaml_EMIT_DOCUMENT_CONTENT_STATE // Expect the content of a document.
+ yaml_EMIT_DOCUMENT_END_STATE // Expect DOCUMENT-END.
+ yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a flow sequence.
+ yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE // Expect an item of a flow sequence.
+ yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping.
+ yaml_EMIT_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping.
+ yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a flow mapping.
+ yaml_EMIT_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping.
+ yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a block sequence.
+ yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE // Expect an item of a block sequence.
+ yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping.
+ yaml_EMIT_BLOCK_MAPPING_KEY_STATE // Expect the key of a block mapping.
+ yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a block mapping.
+ yaml_EMIT_BLOCK_MAPPING_VALUE_STATE // Expect a value of a block mapping.
+ yaml_EMIT_END_STATE // Expect nothing.
+)
+
+// The emitter structure.
+//
+// All members are internal. Manage the structure using the @c yaml_emitter_
+// family of functions.
+type yaml_emitter_t struct {
+
+ // Error handling
+
+ error yaml_error_type_t // Error type.
+ problem string // Error description.
+
+ // Writer stuff
+
+ write_handler yaml_write_handler_t // Write handler.
+
+ output_buffer *[]byte // String output data.
+ output_file io.Writer // File output data.
+
+ buffer []byte // The working buffer.
+ buffer_pos int // The current position of the buffer.
+
+ raw_buffer []byte // The raw buffer.
+ raw_buffer_pos int // The current position of the buffer.
+
+ encoding yaml_encoding_t // The stream encoding.
+
+ // Emitter stuff
+
+ canonical bool // If the output is in the canonical style?
+ best_indent int // The number of indentation spaces.
+ best_width int // The preferred width of the output lines.
+ unicode bool // Allow unescaped non-ASCII characters?
+ line_break yaml_break_t // The preferred line break.
+
+ state yaml_emitter_state_t // The current emitter state.
+ states []yaml_emitter_state_t // The stack of states.
+
+ events []yaml_event_t // The event queue.
+ events_head int // The head of the event queue.
+
+ indents []int // The stack of indentation levels.
+
+ tag_directives []yaml_tag_directive_t // The list of tag directives.
+
+ indent int // The current indentation level.
+
+ flow_level int // The current flow level.
+
+ root_context bool // Is it the document root context?
+ sequence_context bool // Is it a sequence context?
+ mapping_context bool // Is it a mapping context?
+ simple_key_context bool // Is it a simple mapping key context?
+
+ line int // The current line.
+ column int // The current column.
+ whitespace bool // If the last character was a whitespace?
+ indention bool // If the last character was an indentation character (' ', '-', '?', ':')?
+ open_ended bool // If an explicit document end is required?
+
+ // Anchor analysis.
+ anchor_data struct {
+ anchor []byte // The anchor value.
+ alias bool // Is it an alias?
+ }
+
+ // Tag analysis.
+ tag_data struct {
+ handle []byte // The tag handle.
+ suffix []byte // The tag suffix.
+ }
+
+ // Scalar analysis.
+ scalar_data struct {
+ value []byte // The scalar value.
+ multiline bool // Does the scalar contain line breaks?
+ flow_plain_allowed bool // Can the scalar be expessed in the flow plain style?
+ block_plain_allowed bool // Can the scalar be expressed in the block plain style?
+ single_quoted_allowed bool // Can the scalar be expressed in the single quoted style?
+ block_allowed bool // Can the scalar be expressed in the literal or folded styles?
+ style yaml_scalar_style_t // The output style.
+ }
+
+ // Dumper stuff
+
+ opened bool // If the stream was already opened?
+ closed bool // If the stream was already closed?
+
+ // The information associated with the document nodes.
+ anchors *struct {
+ references int // The number of references.
+ anchor int // The anchor id.
+ serialized bool // If the node has been emitted?
+ }
+
+ last_anchor_id int // The last assigned anchor id.
+
+ document *yaml_document_t // The currently emitted document.
+}
diff --git a/vendor/gopkg.in/yaml.v2/yamlprivateh.go b/vendor/gopkg.in/yaml.v2/yamlprivateh.go
new file mode 100644
index 0000000000..8110ce3c37
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v2/yamlprivateh.go
@@ -0,0 +1,173 @@
+package yaml
+
+const (
+ // The size of the input raw buffer.
+ input_raw_buffer_size = 512
+
+ // The size of the input buffer.
+ // It should be possible to decode the whole raw buffer.
+ input_buffer_size = input_raw_buffer_size * 3
+
+ // The size of the output buffer.
+ output_buffer_size = 128
+
+ // The size of the output raw buffer.
+ // It should be possible to encode the whole output buffer.
+ output_raw_buffer_size = (output_buffer_size*2 + 2)
+
+ // The size of other stacks and queues.
+ initial_stack_size = 16
+ initial_queue_size = 16
+ initial_string_size = 16
+)
+
+// Check if the character at the specified position is an alphabetical
+// character, a digit, '_', or '-'.
+func is_alpha(b []byte, i int) bool {
+ return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'Z' || b[i] >= 'a' && b[i] <= 'z' || b[i] == '_' || b[i] == '-'
+}
+
+// Check if the character at the specified position is a digit.
+func is_digit(b []byte, i int) bool {
+ return b[i] >= '0' && b[i] <= '9'
+}
+
+// Get the value of a digit.
+func as_digit(b []byte, i int) int {
+ return int(b[i]) - '0'
+}
+
+// Check if the character at the specified position is a hex-digit.
+func is_hex(b []byte, i int) bool {
+ return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'F' || b[i] >= 'a' && b[i] <= 'f'
+}
+
+// Get the value of a hex-digit.
+func as_hex(b []byte, i int) int {
+ bi := b[i]
+ if bi >= 'A' && bi <= 'F' {
+ return int(bi) - 'A' + 10
+ }
+ if bi >= 'a' && bi <= 'f' {
+ return int(bi) - 'a' + 10
+ }
+ return int(bi) - '0'
+}
+
+// Check if the character is ASCII.
+func is_ascii(b []byte, i int) bool {
+ return b[i] <= 0x7F
+}
+
+// Check if the character at the start of the buffer can be printed unescaped.
+func is_printable(b []byte, i int) bool {
+ return ((b[i] == 0x0A) || // . == #x0A
+ (b[i] >= 0x20 && b[i] <= 0x7E) || // #x20 <= . <= #x7E
+ (b[i] == 0xC2 && b[i+1] >= 0xA0) || // #0xA0 <= . <= #xD7FF
+ (b[i] > 0xC2 && b[i] < 0xED) ||
+ (b[i] == 0xED && b[i+1] < 0xA0) ||
+ (b[i] == 0xEE) ||
+ (b[i] == 0xEF && // #xE000 <= . <= #xFFFD
+ !(b[i+1] == 0xBB && b[i+2] == 0xBF) && // && . != #xFEFF
+ !(b[i+1] == 0xBF && (b[i+2] == 0xBE || b[i+2] == 0xBF))))
+}
+
+// Check if the character at the specified position is NUL.
+func is_z(b []byte, i int) bool {
+ return b[i] == 0x00
+}
+
+// Check if the beginning of the buffer is a BOM.
+func is_bom(b []byte, i int) bool {
+ return b[0] == 0xEF && b[1] == 0xBB && b[2] == 0xBF
+}
+
+// Check if the character at the specified position is space.
+func is_space(b []byte, i int) bool {
+ return b[i] == ' '
+}
+
+// Check if the character at the specified position is tab.
+func is_tab(b []byte, i int) bool {
+ return b[i] == '\t'
+}
+
+// Check if the character at the specified position is blank (space or tab).
+func is_blank(b []byte, i int) bool {
+ //return is_space(b, i) || is_tab(b, i)
+ return b[i] == ' ' || b[i] == '\t'
+}
+
+// Check if the character at the specified position is a line break.
+func is_break(b []byte, i int) bool {
+ return (b[i] == '\r' || // CR (#xD)
+ b[i] == '\n' || // LF (#xA)
+ b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9) // PS (#x2029)
+}
+
+func is_crlf(b []byte, i int) bool {
+ return b[i] == '\r' && b[i+1] == '\n'
+}
+
+// Check if the character is a line break or NUL.
+func is_breakz(b []byte, i int) bool {
+ //return is_break(b, i) || is_z(b, i)
+ return ( // is_break:
+ b[i] == '\r' || // CR (#xD)
+ b[i] == '\n' || // LF (#xA)
+ b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029)
+ // is_z:
+ b[i] == 0)
+}
+
+// Check if the character is a line break, space, or NUL.
+func is_spacez(b []byte, i int) bool {
+ //return is_space(b, i) || is_breakz(b, i)
+ return ( // is_space:
+ b[i] == ' ' ||
+ // is_breakz:
+ b[i] == '\r' || // CR (#xD)
+ b[i] == '\n' || // LF (#xA)
+ b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029)
+ b[i] == 0)
+}
+
+// Check if the character is a line break, space, tab, or NUL.
+func is_blankz(b []byte, i int) bool {
+ //return is_blank(b, i) || is_breakz(b, i)
+ return ( // is_blank:
+ b[i] == ' ' || b[i] == '\t' ||
+ // is_breakz:
+ b[i] == '\r' || // CR (#xD)
+ b[i] == '\n' || // LF (#xA)
+ b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029)
+ b[i] == 0)
+}
+
+// Determine the width of the character.
+func width(b byte) int {
+ // Don't replace these by a switch without first
+ // confirming that it is being inlined.
+ if b&0x80 == 0x00 {
+ return 1
+ }
+ if b&0xE0 == 0xC0 {
+ return 2
+ }
+ if b&0xF0 == 0xE0 {
+ return 3
+ }
+ if b&0xF8 == 0xF0 {
+ return 4
+ }
+ return 0
+
+}