diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json deleted file mode 100644 index efae49efff..0000000000 --- a/Godeps/Godeps.json +++ /dev/null @@ -1,104 +0,0 @@ -{ - "ImportPath": "github.com/prometheus/prometheus", - "GoVersion": "go1.4.2", - "Deps": [ - { - "ImportPath": "bitbucket.org/ww/goautoneg", - "Comment": "null-5", - "Rev": "75cd24fc2f2c2a2088577d12123ddee5f54e0675" - }, - { - "ImportPath": "github.com/Sirupsen/logrus", - "Comment": "v0.8.5-1-g11538ee", - "Rev": "11538ee6888f72d4ab44a1aeba06b9bc4cb134a1" - }, - { - "ImportPath": "github.com/beorn7/perks/quantile", - "Rev": "b965b613227fddccbfffe13eae360ed3fa822f8d" - }, - { - "ImportPath": "github.com/golang/protobuf/proto", - "Rev": "0f7a9caded1fb3c9cc5a9b4bcf2ff633cc8ae644" - }, - { - "ImportPath": "github.com/golang/snappy", - "Rev": "0c7f8a7704bfec561913f4df52c832f094ef56f0" - }, - { - "ImportPath": "github.com/hashicorp/consul/api", - "Comment": "v0.5.2-180-ga6317f2", - "Rev": "a6317f2fb2ba9d5ae695f7fa703cfb30a1c59af1" - }, - { - "ImportPath": "github.com/julienschmidt/httprouter", - "Comment": "v1.1-3-g6aacfd5", - "Rev": "6aacfd5ab513e34f7e64ea9627ab9670371b34e7" - }, - { - "ImportPath": "github.com/matttproud/golang_protobuf_extensions/pbutil", - "Rev": "fc2b8d3a73c4867e51861bbdd5ae3c1f0869dd6a" - }, - { - "ImportPath": "github.com/miekg/dns", - "Rev": "e59f851c912767b1db587dcabee6e6652e495c75" - }, - { - "ImportPath": "github.com/prometheus/client_golang/extraction", - "Rev": "3a499bf7fc46bc58337ce612d0cbb29c550b8118" - }, - { - "ImportPath": "github.com/prometheus/client_golang/model", - "Rev": "3a499bf7fc46bc58337ce612d0cbb29c550b8118" - }, - { - "ImportPath": "github.com/prometheus/client_golang/prometheus", - "Rev": "3a499bf7fc46bc58337ce612d0cbb29c550b8118" - }, - { - "ImportPath": "github.com/prometheus/client_golang/text", - "Rev": "3a499bf7fc46bc58337ce612d0cbb29c550b8118" - }, - { - "ImportPath": "github.com/prometheus/common/expfmt", - "Rev": "7cd9f43750daf997c60f33f46680780067410f28" - }, - { - "ImportPath": "github.com/prometheus/common/model", - "Rev": "7cd9f43750daf997c60f33f46680780067410f28" - }, - { - "ImportPath": "github.com/prometheus/client_model/go", - "Comment": "model-0.0.2-12-gfa8ad6f", - "Rev": "fa8ad6fec33561be4280a8f0514318c79d7f6cb6" - }, - { - "ImportPath": "github.com/prometheus/log", - "Rev": "439e5db48fbb50ebbaf2c816030473a62f505f55" - }, - { - "ImportPath": "github.com/prometheus/procfs", - "Rev": "c91d8eefde16bd047416409eb56353ea84a186e4" - }, - { - "ImportPath": "github.com/samuel/go-zookeeper/zk", - "Rev": "5bb5cfc093ad18a28148c578f8632cfdb4d802e4" - }, - { - "ImportPath": "github.com/syndtr/goleveldb/leveldb", - "Rev": "183614d6b32571e867df4cf086f5480ceefbdfac" - }, - { - "ImportPath": "golang.org/x/net/context", - "Rev": "b71143c25f0aad5f54981684b715686d34c56d25" - }, - { - "ImportPath": "gopkg.in/fsnotify.v1", - "Comment": "v1.2.0", - "Rev": "96c060f6a6b7e0d6f75fddd10efeaca3e5d1bcb0" - }, - { - "ImportPath": "gopkg.in/yaml.v2", - "Rev": "7ad95dd0798a40da1ccdff6dff35fd177b5edf40" - } - ] -} diff --git a/Godeps/Readme b/Godeps/Readme deleted file mode 100644 index 4cdaa53d56..0000000000 --- a/Godeps/Readme +++ /dev/null @@ -1,5 +0,0 @@ -This directory tree is generated automatically by godep. - -Please do not edit. - -See https://github.com/tools/godep for more information. diff --git a/Godeps/_workspace/.gitignore b/Godeps/_workspace/.gitignore deleted file mode 100644 index f037d684ef..0000000000 --- a/Godeps/_workspace/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -/pkg -/bin diff --git a/Godeps/_workspace/src/bitbucket.org/ww/goautoneg/Makefile b/Godeps/_workspace/src/bitbucket.org/ww/goautoneg/Makefile deleted file mode 100644 index e33ee17303..0000000000 --- a/Godeps/_workspace/src/bitbucket.org/ww/goautoneg/Makefile +++ /dev/null @@ -1,13 +0,0 @@ -include $(GOROOT)/src/Make.inc - -TARG=bitbucket.org/ww/goautoneg -GOFILES=autoneg.go - -include $(GOROOT)/src/Make.pkg - -format: - gofmt -w *.go - -docs: - gomake clean - godoc ${TARG} > README.txt diff --git a/Godeps/_workspace/src/bitbucket.org/ww/goautoneg/README.txt b/Godeps/_workspace/src/bitbucket.org/ww/goautoneg/README.txt deleted file mode 100644 index 7723656d58..0000000000 --- a/Godeps/_workspace/src/bitbucket.org/ww/goautoneg/README.txt +++ /dev/null @@ -1,67 +0,0 @@ -PACKAGE - -package goautoneg -import "bitbucket.org/ww/goautoneg" - -HTTP Content-Type Autonegotiation. - -The functions in this package implement the behaviour specified in -http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html - -Copyright (c) 2011, Open Knowledge Foundation Ltd. -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - - Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - - Neither the name of the Open Knowledge Foundation Ltd. nor the - names of its contributors may be used to endorse or promote - products derived from this software without specific prior written - permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - -FUNCTIONS - -func Negotiate(header string, alternatives []string) (content_type string) -Negotiate the most appropriate content_type given the accept header -and a list of alternatives. - -func ParseAccept(header string) (accept []Accept) -Parse an Accept Header string returning a sorted list -of clauses - - -TYPES - -type Accept struct { - Type, SubType string - Q float32 - Params map[string]string -} -Structure to represent a clause in an HTTP Accept Header - - -SUBDIRECTORIES - - .hg diff --git a/Godeps/_workspace/src/bitbucket.org/ww/goautoneg/autoneg.go b/Godeps/_workspace/src/bitbucket.org/ww/goautoneg/autoneg.go deleted file mode 100644 index 648b38cb65..0000000000 --- a/Godeps/_workspace/src/bitbucket.org/ww/goautoneg/autoneg.go +++ /dev/null @@ -1,162 +0,0 @@ -/* -HTTP Content-Type Autonegotiation. - -The functions in this package implement the behaviour specified in -http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html - -Copyright (c) 2011, Open Knowledge Foundation Ltd. -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - - Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - - Neither the name of the Open Knowledge Foundation Ltd. nor the - names of its contributors may be used to endorse or promote - products derived from this software without specific prior written - permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - -*/ -package goautoneg - -import ( - "sort" - "strconv" - "strings" -) - -// Structure to represent a clause in an HTTP Accept Header -type Accept struct { - Type, SubType string - Q float64 - Params map[string]string -} - -// For internal use, so that we can use the sort interface -type accept_slice []Accept - -func (accept accept_slice) Len() int { - slice := []Accept(accept) - return len(slice) -} - -func (accept accept_slice) Less(i, j int) bool { - slice := []Accept(accept) - ai, aj := slice[i], slice[j] - if ai.Q > aj.Q { - return true - } - if ai.Type != "*" && aj.Type == "*" { - return true - } - if ai.SubType != "*" && aj.SubType == "*" { - return true - } - return false -} - -func (accept accept_slice) Swap(i, j int) { - slice := []Accept(accept) - slice[i], slice[j] = slice[j], slice[i] -} - -// Parse an Accept Header string returning a sorted list -// of clauses -func ParseAccept(header string) (accept []Accept) { - parts := strings.Split(header, ",") - accept = make([]Accept, 0, len(parts)) - for _, part := range parts { - part := strings.Trim(part, " ") - - a := Accept{} - a.Params = make(map[string]string) - a.Q = 1.0 - - mrp := strings.Split(part, ";") - - media_range := mrp[0] - sp := strings.Split(media_range, "/") - a.Type = strings.Trim(sp[0], " ") - - switch { - case len(sp) == 1 && a.Type == "*": - a.SubType = "*" - case len(sp) == 2: - a.SubType = strings.Trim(sp[1], " ") - default: - continue - } - - if len(mrp) == 1 { - accept = append(accept, a) - continue - } - - for _, param := range mrp[1:] { - sp := strings.SplitN(param, "=", 2) - if len(sp) != 2 { - continue - } - token := strings.Trim(sp[0], " ") - if token == "q" { - a.Q, _ = strconv.ParseFloat(sp[1], 32) - } else { - a.Params[token] = strings.Trim(sp[1], " ") - } - } - - accept = append(accept, a) - } - - slice := accept_slice(accept) - sort.Sort(slice) - - return -} - -// Negotiate the most appropriate content_type given the accept header -// and a list of alternatives. -func Negotiate(header string, alternatives []string) (content_type string) { - asp := make([][]string, 0, len(alternatives)) - for _, ctype := range alternatives { - asp = append(asp, strings.SplitN(ctype, "/", 2)) - } - for _, clause := range ParseAccept(header) { - for i, ctsp := range asp { - if clause.Type == ctsp[0] && clause.SubType == ctsp[1] { - content_type = alternatives[i] - return - } - if clause.Type == ctsp[0] && clause.SubType == "*" { - content_type = alternatives[i] - return - } - if clause.Type == "*" && clause.SubType == "*" { - content_type = alternatives[i] - return - } - } - } - return -} diff --git a/Godeps/_workspace/src/bitbucket.org/ww/goautoneg/autoneg_test.go b/Godeps/_workspace/src/bitbucket.org/ww/goautoneg/autoneg_test.go deleted file mode 100644 index 41d328f1d5..0000000000 --- a/Godeps/_workspace/src/bitbucket.org/ww/goautoneg/autoneg_test.go +++ /dev/null @@ -1,33 +0,0 @@ -package goautoneg - -import ( - "testing" -) - -var chrome = "application/xml,application/xhtml+xml,text/html;q=0.9,text/plain;q=0.8,image/png,*/*;q=0.5" - -func TestParseAccept(t *testing.T) { - alternatives := []string{"text/html", "image/png"} - content_type := Negotiate(chrome, alternatives) - if content_type != "image/png" { - t.Errorf("got %s expected image/png", content_type) - } - - alternatives = []string{"text/html", "text/plain", "text/n3"} - content_type = Negotiate(chrome, alternatives) - if content_type != "text/html" { - t.Errorf("got %s expected text/html", content_type) - } - - alternatives = []string{"text/n3", "text/plain"} - content_type = Negotiate(chrome, alternatives) - if content_type != "text/plain" { - t.Errorf("got %s expected text/plain", content_type) - } - - alternatives = []string{"text/n3", "application/rdf+xml"} - content_type = Negotiate(chrome, alternatives) - if content_type != "text/n3" { - t.Errorf("got %s expected text/n3", content_type) - } -} diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/.gitignore b/Godeps/_workspace/src/github.com/Sirupsen/logrus/.gitignore deleted file mode 100644 index 66be63a005..0000000000 --- a/Godeps/_workspace/src/github.com/Sirupsen/logrus/.gitignore +++ /dev/null @@ -1 +0,0 @@ -logrus diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/.travis.yml b/Godeps/_workspace/src/github.com/Sirupsen/logrus/.travis.yml deleted file mode 100644 index 2d8c086617..0000000000 --- a/Godeps/_workspace/src/github.com/Sirupsen/logrus/.travis.yml +++ /dev/null @@ -1,8 +0,0 @@ -language: go -go: - - 1.2 - - 1.3 - - 1.4 - - tip -install: - - go get -t ./... diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/CHANGELOG.md b/Godeps/_workspace/src/github.com/Sirupsen/logrus/CHANGELOG.md deleted file mode 100644 index ab18440107..0000000000 --- a/Godeps/_workspace/src/github.com/Sirupsen/logrus/CHANGELOG.md +++ /dev/null @@ -1,37 +0,0 @@ -# 0.8.5 - -* logrus/core: revert #208 - -# 0.8.4 - -* formatter/text: fix data race (#218) - -# 0.8.3 - -* logrus/core: fix entry log level (#208) -* logrus/core: improve performance of text formatter by 40% -* logrus/core: expose `LevelHooks` type -* logrus/core: add support for DragonflyBSD and NetBSD -* formatter/text: print structs more verbosely - -# 0.8.2 - -* logrus: fix more Fatal family functions - -# 0.8.1 - -* logrus: fix not exiting on `Fatalf` and `Fatalln` - -# 0.8.0 - -* logrus: defaults to stderr instead of stdout -* hooks/sentry: add special field for `*http.Request` -* formatter/text: ignore Windows for colors - -# 0.7.3 - -* formatter/\*: allow configuration of timestamp layout - -# 0.7.2 - -* formatter/text: Add configuration option for time format (#158) diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/LICENSE b/Godeps/_workspace/src/github.com/Sirupsen/logrus/LICENSE deleted file mode 100644 index f090cb42f3..0000000000 --- a/Godeps/_workspace/src/github.com/Sirupsen/logrus/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2014 Simon Eskildsen - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/README.md b/Godeps/_workspace/src/github.com/Sirupsen/logrus/README.md deleted file mode 100644 index bd9ffb6eda..0000000000 --- a/Godeps/_workspace/src/github.com/Sirupsen/logrus/README.md +++ /dev/null @@ -1,356 +0,0 @@ -# Logrus :walrus: [![Build Status](https://travis-ci.org/Sirupsen/logrus.svg?branch=master)](https://travis-ci.org/Sirupsen/logrus) [![godoc reference](https://godoc.org/github.com/Sirupsen/logrus?status.png)][godoc] - -Logrus is a structured logger for Go (golang), completely API compatible with -the standard library logger. [Godoc][godoc]. **Please note the Logrus API is not -yet stable (pre 1.0). Logrus itself is completely stable and has been used in -many large deployments. The core API is unlikely to change much but please -version control your Logrus to make sure you aren't fetching latest `master` on -every build.** - -Nicely color-coded in development (when a TTY is attached, otherwise just -plain text): - -![Colored](http://i.imgur.com/PY7qMwd.png) - -With `log.Formatter = new(logrus.JSONFormatter)`, for easy parsing by logstash -or Splunk: - -```json -{"animal":"walrus","level":"info","msg":"A group of walrus emerges from the -ocean","size":10,"time":"2014-03-10 19:57:38.562264131 -0400 EDT"} - -{"level":"warning","msg":"The group's number increased tremendously!", -"number":122,"omg":true,"time":"2014-03-10 19:57:38.562471297 -0400 EDT"} - -{"animal":"walrus","level":"info","msg":"A giant walrus appears!", -"size":10,"time":"2014-03-10 19:57:38.562500591 -0400 EDT"} - -{"animal":"walrus","level":"info","msg":"Tremendously sized cow enters the ocean.", -"size":9,"time":"2014-03-10 19:57:38.562527896 -0400 EDT"} - -{"level":"fatal","msg":"The ice breaks!","number":100,"omg":true, -"time":"2014-03-10 19:57:38.562543128 -0400 EDT"} -``` - -With the default `log.Formatter = new(&log.TextFormatter{})` when a TTY is not -attached, the output is compatible with the -[logfmt](http://godoc.org/github.com/kr/logfmt) format: - -```text -time="2015-03-26T01:27:38-04:00" level=debug msg="Started observing beach" animal=walrus number=8 -time="2015-03-26T01:27:38-04:00" level=info msg="A group of walrus emerges from the ocean" animal=walrus size=10 -time="2015-03-26T01:27:38-04:00" level=warning msg="The group's number increased tremendously!" number=122 omg=true -time="2015-03-26T01:27:38-04:00" level=debug msg="Temperature changes" temperature=-4 -time="2015-03-26T01:27:38-04:00" level=panic msg="It's over 9000!" animal=orca size=9009 -time="2015-03-26T01:27:38-04:00" level=fatal msg="The ice breaks!" err=&{0x2082280c0 map[animal:orca size:9009] 2015-03-26 01:27:38.441574009 -0400 EDT panic It's over 9000!} number=100 omg=true -exit status 1 -``` - -#### Example - -The simplest way to use Logrus is simply the package-level exported logger: - -```go -package main - -import ( - log "github.com/Sirupsen/logrus" -) - -func main() { - log.WithFields(log.Fields{ - "animal": "walrus", - }).Info("A walrus appears") -} -``` - -Note that it's completely api-compatible with the stdlib logger, so you can -replace your `log` imports everywhere with `log "github.com/Sirupsen/logrus"` -and you'll now have the flexibility of Logrus. You can customize it all you -want: - -```go -package main - -import ( - "os" - log "github.com/Sirupsen/logrus" - "github.com/Sirupsen/logrus/hooks/airbrake" -) - -func init() { - // Log as JSON instead of the default ASCII formatter. - log.SetFormatter(&log.JSONFormatter{}) - - // Use the Airbrake hook to report errors that have Error severity or above to - // an exception tracker. You can create custom hooks, see the Hooks section. - log.AddHook(airbrake.NewHook("https://example.com", "xyz", "development")) - - // Output to stderr instead of stdout, could also be a file. - log.SetOutput(os.Stderr) - - // Only log the warning severity or above. - log.SetLevel(log.WarnLevel) -} - -func main() { - log.WithFields(log.Fields{ - "animal": "walrus", - "size": 10, - }).Info("A group of walrus emerges from the ocean") - - log.WithFields(log.Fields{ - "omg": true, - "number": 122, - }).Warn("The group's number increased tremendously!") - - log.WithFields(log.Fields{ - "omg": true, - "number": 100, - }).Fatal("The ice breaks!") - - // A common pattern is to re-use fields between logging statements by re-using - // the logrus.Entry returned from WithFields() - contextLogger := log.WithFields(log.Fields{ - "common": "this is a common field", - "other": "I also should be logged always", - }) - - contextLogger.Info("I'll be logged with common and other field") - contextLogger.Info("Me too") -} -``` - -For more advanced usage such as logging to multiple locations from the same -application, you can also create an instance of the `logrus` Logger: - -```go -package main - -import ( - "github.com/Sirupsen/logrus" -) - -// Create a new instance of the logger. You can have any number of instances. -var log = logrus.New() - -func main() { - // The API for setting attributes is a little different than the package level - // exported logger. See Godoc. - log.Out = os.Stderr - - log.WithFields(logrus.Fields{ - "animal": "walrus", - "size": 10, - }).Info("A group of walrus emerges from the ocean") -} -``` - -#### Fields - -Logrus encourages careful, structured logging though logging fields instead of -long, unparseable error messages. For example, instead of: `log.Fatalf("Failed -to send event %s to topic %s with key %d")`, you should log the much more -discoverable: - -```go -log.WithFields(log.Fields{ - "event": event, - "topic": topic, - "key": key, -}).Fatal("Failed to send event") -``` - -We've found this API forces you to think about logging in a way that produces -much more useful logging messages. We've been in countless situations where just -a single added field to a log statement that was already there would've saved us -hours. The `WithFields` call is optional. - -In general, with Logrus using any of the `printf`-family functions should be -seen as a hint you should add a field, however, you can still use the -`printf`-family functions with Logrus. - -#### Hooks - -You can add hooks for logging levels. For example to send errors to an exception -tracking service on `Error`, `Fatal` and `Panic`, info to StatsD or log to -multiple places simultaneously, e.g. syslog. - -Logrus comes with [built-in hooks](hooks/). Add those, or your custom hook, in -`init`: - -```go -import ( - log "github.com/Sirupsen/logrus" - "github.com/Sirupsen/logrus/hooks/airbrake" - logrus_syslog "github.com/Sirupsen/logrus/hooks/syslog" - "log/syslog" -) - -func init() { - log.AddHook(airbrake.NewHook("https://example.com", "xyz", "development")) - - hook, err := logrus_syslog.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "") - if err != nil { - log.Error("Unable to connect to local syslog daemon") - } else { - log.AddHook(hook) - } -} -``` - - -| Hook | Description | -| ----- | ----------- | -| [Airbrake](https://github.com/Sirupsen/logrus/blob/master/hooks/airbrake/airbrake.go) | Send errors to an exception tracking service compatible with the Airbrake API. Uses [`airbrake-go`](https://github.com/tobi/airbrake-go) behind the scenes. | -| [Papertrail](https://github.com/Sirupsen/logrus/blob/master/hooks/papertrail/papertrail.go) | Send errors to the Papertrail hosted logging service via UDP. | -| [Syslog](https://github.com/Sirupsen/logrus/blob/master/hooks/syslog/syslog.go) | Send errors to remote syslog server. Uses standard library `log/syslog` behind the scenes. | -| [BugSnag](https://github.com/Sirupsen/logrus/blob/master/hooks/bugsnag/bugsnag.go) | Send errors to the Bugsnag exception tracking service. | -| [Sentry](https://github.com/Sirupsen/logrus/blob/master/hooks/sentry/sentry.go) | Send errors to the Sentry error logging and aggregation service. | -| [Hiprus](https://github.com/nubo/hiprus) | Send errors to a channel in hipchat. | -| [Logrusly](https://github.com/sebest/logrusly) | Send logs to [Loggly](https://www.loggly.com/) | -| [Slackrus](https://github.com/johntdyer/slackrus) | Hook for Slack chat. | -| [Journalhook](https://github.com/wercker/journalhook) | Hook for logging to `systemd-journald` | -| [Graylog](https://github.com/gemnasium/logrus-hooks/tree/master/graylog) | Hook for logging to [Graylog](http://graylog2.org/) | -| [Raygun](https://github.com/squirkle/logrus-raygun-hook) | Hook for logging to [Raygun.io](http://raygun.io/) | -| [LFShook](https://github.com/rifflock/lfshook) | Hook for logging to the local filesystem | -| [Honeybadger](https://github.com/agonzalezro/logrus_honeybadger) | Hook for sending exceptions to Honeybadger | -| [Mail](https://github.com/zbindenren/logrus_mail) | Hook for sending exceptions via mail | -| [Rollrus](https://github.com/heroku/rollrus) | Hook for sending errors to rollbar | -| [Fluentd](https://github.com/evalphobia/logrus_fluent) | Hook for logging to fluentd | - -#### Level logging - -Logrus has six logging levels: Debug, Info, Warning, Error, Fatal and Panic. - -```go -log.Debug("Useful debugging information.") -log.Info("Something noteworthy happened!") -log.Warn("You should probably take a look at this.") -log.Error("Something failed but I'm not quitting.") -// Calls os.Exit(1) after logging -log.Fatal("Bye.") -// Calls panic() after logging -log.Panic("I'm bailing.") -``` - -You can set the logging level on a `Logger`, then it will only log entries with -that severity or anything above it: - -```go -// Will log anything that is info or above (warn, error, fatal, panic). Default. -log.SetLevel(log.InfoLevel) -``` - -It may be useful to set `log.Level = logrus.DebugLevel` in a debug or verbose -environment if your application has that. - -#### Entries - -Besides the fields added with `WithField` or `WithFields` some fields are -automatically added to all logging events: - -1. `time`. The timestamp when the entry was created. -2. `msg`. The logging message passed to `{Info,Warn,Error,Fatal,Panic}` after - the `AddFields` call. E.g. `Failed to send event.` -3. `level`. The logging level. E.g. `info`. - -#### Environments - -Logrus has no notion of environment. - -If you wish for hooks and formatters to only be used in specific environments, -you should handle that yourself. For example, if your application has a global -variable `Environment`, which is a string representation of the environment you -could do: - -```go -import ( - log "github.com/Sirupsen/logrus" -) - -init() { - // do something here to set environment depending on an environment variable - // or command-line flag - if Environment == "production" { - log.SetFormatter(&logrus.JSONFormatter{}) - } else { - // The TextFormatter is default, you don't actually have to do this. - log.SetFormatter(&log.TextFormatter{}) - } -} -``` - -This configuration is how `logrus` was intended to be used, but JSON in -production is mostly only useful if you do log aggregation with tools like -Splunk or Logstash. - -#### Formatters - -The built-in logging formatters are: - -* `logrus.TextFormatter`. Logs the event in colors if stdout is a tty, otherwise - without colors. - * *Note:* to force colored output when there is no TTY, set the `ForceColors` - field to `true`. To force no colored output even if there is a TTY set the - `DisableColors` field to `true` -* `logrus.JSONFormatter`. Logs fields as JSON. -* `logrus_logstash.LogstashFormatter`. Logs fields as Logstash Events (http://logstash.net). - - ```go - logrus.SetFormatter(&logrus_logstash.LogstashFormatter{Type: “application_name"}) - ``` - -Third party logging formatters: - -* [`zalgo`](https://github.com/aybabtme/logzalgo): invoking the P͉̫o̳̼̊w̖͈̰͎e̬͔̭͂r͚̼̹̲ ̫͓͉̳͈ō̠͕͖̚f̝͍̠ ͕̲̞͖͑Z̖̫̤̫ͪa͉̬͈̗l͖͎g̳̥o̰̥̅!̣͔̲̻͊̄ ̙̘̦̹̦. - -You can define your formatter by implementing the `Formatter` interface, -requiring a `Format` method. `Format` takes an `*Entry`. `entry.Data` is a -`Fields` type (`map[string]interface{}`) with all your fields as well as the -default ones (see Entries section above): - -```go -type MyJSONFormatter struct { -} - -log.SetFormatter(new(MyJSONFormatter)) - -func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) { - // Note this doesn't include Time, Level and Message which are available on - // the Entry. Consult `godoc` on information about those fields or read the - // source of the official loggers. - serialized, err := json.Marshal(entry.Data) - if err != nil { - return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err) - } - return append(serialized, '\n'), nil -} -``` - -#### Logger as an `io.Writer` - -Logrus can be transformed into an `io.Writer`. That writer is the end of an `io.Pipe` and it is your responsibility to close it. - -```go -w := logger.Writer() -defer w.Close() - -srv := http.Server{ - // create a stdlib log.Logger that writes to - // logrus.Logger. - ErrorLog: log.New(w, "", 0), -} -``` - -Each line written to that writer will be printed the usual way, using formatters -and hooks. The level for those entries is `info`. - -#### Rotation - -Log rotation is not provided with Logrus. Log rotation should be done by an -external program (like `logrotate(8)`) that can compress and delete old log -entries. It should not be a feature of the application-level logger. - - -[godoc]: https://godoc.org/github.com/Sirupsen/logrus diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/entry.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/entry.go deleted file mode 100644 index 699ea035cc..0000000000 --- a/Godeps/_workspace/src/github.com/Sirupsen/logrus/entry.go +++ /dev/null @@ -1,254 +0,0 @@ -package logrus - -import ( - "bytes" - "fmt" - "io" - "os" - "time" -) - -// An entry is the final or intermediate Logrus logging entry. It contains all -// the fields passed with WithField{,s}. It's finally logged when Debug, Info, -// Warn, Error, Fatal or Panic is called on it. These objects can be reused and -// passed around as much as you wish to avoid field duplication. -type Entry struct { - Logger *Logger - - // Contains all the fields set by the user. - Data Fields - - // Time at which the log entry was created - Time time.Time - - // Level the log entry was logged at: Debug, Info, Warn, Error, Fatal or Panic - Level Level - - // Message passed to Debug, Info, Warn, Error, Fatal or Panic - Message string -} - -func NewEntry(logger *Logger) *Entry { - return &Entry{ - Logger: logger, - // Default is three fields, give a little extra room - Data: make(Fields, 5), - } -} - -// Returns a reader for the entry, which is a proxy to the formatter. -func (entry *Entry) Reader() (*bytes.Buffer, error) { - serialized, err := entry.Logger.Formatter.Format(entry) - return bytes.NewBuffer(serialized), err -} - -// Returns the string representation from the reader and ultimately the -// formatter. -func (entry *Entry) String() (string, error) { - reader, err := entry.Reader() - if err != nil { - return "", err - } - - return reader.String(), err -} - -// Add a single field to the Entry. -func (entry *Entry) WithField(key string, value interface{}) *Entry { - return entry.WithFields(Fields{key: value}) -} - -// Add a map of fields to the Entry. -func (entry *Entry) WithFields(fields Fields) *Entry { - data := Fields{} - for k, v := range entry.Data { - data[k] = v - } - for k, v := range fields { - data[k] = v - } - return &Entry{Logger: entry.Logger, Data: data} -} - -func (entry *Entry) log(level Level, msg string) { - entry.Time = time.Now() - entry.Level = level - entry.Message = msg - - if err := entry.Logger.Hooks.Fire(level, entry); err != nil { - entry.Logger.mu.Lock() - fmt.Fprintf(os.Stderr, "Failed to fire hook: %v\n", err) - entry.Logger.mu.Unlock() - } - - reader, err := entry.Reader() - if err != nil { - entry.Logger.mu.Lock() - fmt.Fprintf(os.Stderr, "Failed to obtain reader, %v\n", err) - entry.Logger.mu.Unlock() - } - - entry.Logger.mu.Lock() - defer entry.Logger.mu.Unlock() - - _, err = io.Copy(entry.Logger.Out, reader) - if err != nil { - fmt.Fprintf(os.Stderr, "Failed to write to log, %v\n", err) - } - - // To avoid Entry#log() returning a value that only would make sense for - // panic() to use in Entry#Panic(), we avoid the allocation by checking - // directly here. - if level <= PanicLevel { - panic(entry) - } -} - -func (entry *Entry) Debug(args ...interface{}) { - if entry.Logger.Level >= DebugLevel { - entry.log(DebugLevel, fmt.Sprint(args...)) - } -} - -func (entry *Entry) Print(args ...interface{}) { - entry.Info(args...) -} - -func (entry *Entry) Info(args ...interface{}) { - if entry.Logger.Level >= InfoLevel { - entry.log(InfoLevel, fmt.Sprint(args...)) - } -} - -func (entry *Entry) Warn(args ...interface{}) { - if entry.Logger.Level >= WarnLevel { - entry.log(WarnLevel, fmt.Sprint(args...)) - } -} - -func (entry *Entry) Warning(args ...interface{}) { - entry.Warn(args...) -} - -func (entry *Entry) Error(args ...interface{}) { - if entry.Logger.Level >= ErrorLevel { - entry.log(ErrorLevel, fmt.Sprint(args...)) - } -} - -func (entry *Entry) Fatal(args ...interface{}) { - if entry.Logger.Level >= FatalLevel { - entry.log(FatalLevel, fmt.Sprint(args...)) - } - os.Exit(1) -} - -func (entry *Entry) Panic(args ...interface{}) { - if entry.Logger.Level >= PanicLevel { - entry.log(PanicLevel, fmt.Sprint(args...)) - } - panic(fmt.Sprint(args...)) -} - -// Entry Printf family functions - -func (entry *Entry) Debugf(format string, args ...interface{}) { - if entry.Logger.Level >= DebugLevel { - entry.Debug(fmt.Sprintf(format, args...)) - } -} - -func (entry *Entry) Infof(format string, args ...interface{}) { - if entry.Logger.Level >= InfoLevel { - entry.Info(fmt.Sprintf(format, args...)) - } -} - -func (entry *Entry) Printf(format string, args ...interface{}) { - entry.Infof(format, args...) -} - -func (entry *Entry) Warnf(format string, args ...interface{}) { - if entry.Logger.Level >= WarnLevel { - entry.Warn(fmt.Sprintf(format, args...)) - } -} - -func (entry *Entry) Warningf(format string, args ...interface{}) { - entry.Warnf(format, args...) -} - -func (entry *Entry) Errorf(format string, args ...interface{}) { - if entry.Logger.Level >= ErrorLevel { - entry.Error(fmt.Sprintf(format, args...)) - } -} - -func (entry *Entry) Fatalf(format string, args ...interface{}) { - if entry.Logger.Level >= FatalLevel { - entry.Fatal(fmt.Sprintf(format, args...)) - } - os.Exit(1) -} - -func (entry *Entry) Panicf(format string, args ...interface{}) { - if entry.Logger.Level >= PanicLevel { - entry.Panic(fmt.Sprintf(format, args...)) - } -} - -// Entry Println family functions - -func (entry *Entry) Debugln(args ...interface{}) { - if entry.Logger.Level >= DebugLevel { - entry.Debug(entry.sprintlnn(args...)) - } -} - -func (entry *Entry) Infoln(args ...interface{}) { - if entry.Logger.Level >= InfoLevel { - entry.Info(entry.sprintlnn(args...)) - } -} - -func (entry *Entry) Println(args ...interface{}) { - entry.Infoln(args...) -} - -func (entry *Entry) Warnln(args ...interface{}) { - if entry.Logger.Level >= WarnLevel { - entry.Warn(entry.sprintlnn(args...)) - } -} - -func (entry *Entry) Warningln(args ...interface{}) { - entry.Warnln(args...) -} - -func (entry *Entry) Errorln(args ...interface{}) { - if entry.Logger.Level >= ErrorLevel { - entry.Error(entry.sprintlnn(args...)) - } -} - -func (entry *Entry) Fatalln(args ...interface{}) { - if entry.Logger.Level >= FatalLevel { - entry.Fatal(entry.sprintlnn(args...)) - } - os.Exit(1) -} - -func (entry *Entry) Panicln(args ...interface{}) { - if entry.Logger.Level >= PanicLevel { - entry.Panic(entry.sprintlnn(args...)) - } -} - -// Sprintlnn => Sprint no newline. This is to get the behavior of how -// fmt.Sprintln where spaces are always added between operands, regardless of -// their type. Instead of vendoring the Sprintln implementation to spare a -// string allocation, we do the simplest thing. -func (entry *Entry) sprintlnn(args ...interface{}) string { - msg := fmt.Sprintln(args...) - return msg[:len(msg)-1] -} diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/entry_test.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/entry_test.go deleted file mode 100644 index 98717df490..0000000000 --- a/Godeps/_workspace/src/github.com/Sirupsen/logrus/entry_test.go +++ /dev/null @@ -1,53 +0,0 @@ -package logrus - -import ( - "bytes" - "fmt" - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestEntryPanicln(t *testing.T) { - errBoom := fmt.Errorf("boom time") - - defer func() { - p := recover() - assert.NotNil(t, p) - - switch pVal := p.(type) { - case *Entry: - assert.Equal(t, "kaboom", pVal.Message) - assert.Equal(t, errBoom, pVal.Data["err"]) - default: - t.Fatalf("want type *Entry, got %T: %#v", pVal, pVal) - } - }() - - logger := New() - logger.Out = &bytes.Buffer{} - entry := NewEntry(logger) - entry.WithField("err", errBoom).Panicln("kaboom") -} - -func TestEntryPanicf(t *testing.T) { - errBoom := fmt.Errorf("boom again") - - defer func() { - p := recover() - assert.NotNil(t, p) - - switch pVal := p.(type) { - case *Entry: - assert.Equal(t, "kaboom true", pVal.Message) - assert.Equal(t, errBoom, pVal.Data["err"]) - default: - t.Fatalf("want type *Entry, got %T: %#v", pVal, pVal) - } - }() - - logger := New() - logger.Out = &bytes.Buffer{} - entry := NewEntry(logger) - entry.WithField("err", errBoom).Panicf("kaboom %v", true) -} diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/examples/basic/basic.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/examples/basic/basic.go deleted file mode 100644 index a1623ec003..0000000000 --- a/Godeps/_workspace/src/github.com/Sirupsen/logrus/examples/basic/basic.go +++ /dev/null @@ -1,50 +0,0 @@ -package main - -import ( - "github.com/Sirupsen/logrus" -) - -var log = logrus.New() - -func init() { - log.Formatter = new(logrus.JSONFormatter) - log.Formatter = new(logrus.TextFormatter) // default - log.Level = logrus.DebugLevel -} - -func main() { - defer func() { - err := recover() - if err != nil { - log.WithFields(logrus.Fields{ - "omg": true, - "err": err, - "number": 100, - }).Fatal("The ice breaks!") - } - }() - - log.WithFields(logrus.Fields{ - "animal": "walrus", - "number": 8, - }).Debug("Started observing beach") - - log.WithFields(logrus.Fields{ - "animal": "walrus", - "size": 10, - }).Info("A group of walrus emerges from the ocean") - - log.WithFields(logrus.Fields{ - "omg": true, - "number": 122, - }).Warn("The group's number increased tremendously!") - - log.WithFields(logrus.Fields{ - "temperature": -4, - }).Debug("Temperature changes") - - log.WithFields(logrus.Fields{ - "animal": "orca", - "size": 9009, - }).Panic("It's over 9000!") -} diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/examples/hook/hook.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/examples/hook/hook.go deleted file mode 100644 index cb5759a35c..0000000000 --- a/Godeps/_workspace/src/github.com/Sirupsen/logrus/examples/hook/hook.go +++ /dev/null @@ -1,30 +0,0 @@ -package main - -import ( - "github.com/Sirupsen/logrus" - "github.com/Sirupsen/logrus/hooks/airbrake" -) - -var log = logrus.New() - -func init() { - log.Formatter = new(logrus.TextFormatter) // default - log.Hooks.Add(airbrake.NewHook("https://example.com", "xyz", "development")) -} - -func main() { - log.WithFields(logrus.Fields{ - "animal": "walrus", - "size": 10, - }).Info("A group of walrus emerges from the ocean") - - log.WithFields(logrus.Fields{ - "omg": true, - "number": 122, - }).Warn("The group's number increased tremendously!") - - log.WithFields(logrus.Fields{ - "omg": true, - "number": 100, - }).Fatal("The ice breaks!") -} diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/exported.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/exported.go deleted file mode 100644 index a67e1b802d..0000000000 --- a/Godeps/_workspace/src/github.com/Sirupsen/logrus/exported.go +++ /dev/null @@ -1,188 +0,0 @@ -package logrus - -import ( - "io" -) - -var ( - // std is the name of the standard logger in stdlib `log` - std = New() -) - -func StandardLogger() *Logger { - return std -} - -// SetOutput sets the standard logger output. -func SetOutput(out io.Writer) { - std.mu.Lock() - defer std.mu.Unlock() - std.Out = out -} - -// SetFormatter sets the standard logger formatter. -func SetFormatter(formatter Formatter) { - std.mu.Lock() - defer std.mu.Unlock() - std.Formatter = formatter -} - -// SetLevel sets the standard logger level. -func SetLevel(level Level) { - std.mu.Lock() - defer std.mu.Unlock() - std.Level = level -} - -// GetLevel returns the standard logger level. -func GetLevel() Level { - std.mu.Lock() - defer std.mu.Unlock() - return std.Level -} - -// AddHook adds a hook to the standard logger hooks. -func AddHook(hook Hook) { - std.mu.Lock() - defer std.mu.Unlock() - std.Hooks.Add(hook) -} - -// WithField creates an entry from the standard logger and adds a field to -// it. If you want multiple fields, use `WithFields`. -// -// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal -// or Panic on the Entry it returns. -func WithField(key string, value interface{}) *Entry { - return std.WithField(key, value) -} - -// WithFields creates an entry from the standard logger and adds multiple -// fields to it. This is simply a helper for `WithField`, invoking it -// once for each field. -// -// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal -// or Panic on the Entry it returns. -func WithFields(fields Fields) *Entry { - return std.WithFields(fields) -} - -// Debug logs a message at level Debug on the standard logger. -func Debug(args ...interface{}) { - std.Debug(args...) -} - -// Print logs a message at level Info on the standard logger. -func Print(args ...interface{}) { - std.Print(args...) -} - -// Info logs a message at level Info on the standard logger. -func Info(args ...interface{}) { - std.Info(args...) -} - -// Warn logs a message at level Warn on the standard logger. -func Warn(args ...interface{}) { - std.Warn(args...) -} - -// Warning logs a message at level Warn on the standard logger. -func Warning(args ...interface{}) { - std.Warning(args...) -} - -// Error logs a message at level Error on the standard logger. -func Error(args ...interface{}) { - std.Error(args...) -} - -// Panic logs a message at level Panic on the standard logger. -func Panic(args ...interface{}) { - std.Panic(args...) -} - -// Fatal logs a message at level Fatal on the standard logger. -func Fatal(args ...interface{}) { - std.Fatal(args...) -} - -// Debugf logs a message at level Debug on the standard logger. -func Debugf(format string, args ...interface{}) { - std.Debugf(format, args...) -} - -// Printf logs a message at level Info on the standard logger. -func Printf(format string, args ...interface{}) { - std.Printf(format, args...) -} - -// Infof logs a message at level Info on the standard logger. -func Infof(format string, args ...interface{}) { - std.Infof(format, args...) -} - -// Warnf logs a message at level Warn on the standard logger. -func Warnf(format string, args ...interface{}) { - std.Warnf(format, args...) -} - -// Warningf logs a message at level Warn on the standard logger. -func Warningf(format string, args ...interface{}) { - std.Warningf(format, args...) -} - -// Errorf logs a message at level Error on the standard logger. -func Errorf(format string, args ...interface{}) { - std.Errorf(format, args...) -} - -// Panicf logs a message at level Panic on the standard logger. -func Panicf(format string, args ...interface{}) { - std.Panicf(format, args...) -} - -// Fatalf logs a message at level Fatal on the standard logger. -func Fatalf(format string, args ...interface{}) { - std.Fatalf(format, args...) -} - -// Debugln logs a message at level Debug on the standard logger. -func Debugln(args ...interface{}) { - std.Debugln(args...) -} - -// Println logs a message at level Info on the standard logger. -func Println(args ...interface{}) { - std.Println(args...) -} - -// Infoln logs a message at level Info on the standard logger. -func Infoln(args ...interface{}) { - std.Infoln(args...) -} - -// Warnln logs a message at level Warn on the standard logger. -func Warnln(args ...interface{}) { - std.Warnln(args...) -} - -// Warningln logs a message at level Warn on the standard logger. -func Warningln(args ...interface{}) { - std.Warningln(args...) -} - -// Errorln logs a message at level Error on the standard logger. -func Errorln(args ...interface{}) { - std.Errorln(args...) -} - -// Panicln logs a message at level Panic on the standard logger. -func Panicln(args ...interface{}) { - std.Panicln(args...) -} - -// Fatalln logs a message at level Fatal on the standard logger. -func Fatalln(args ...interface{}) { - std.Fatalln(args...) -} diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/formatter.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/formatter.go deleted file mode 100644 index 104d689f18..0000000000 --- a/Godeps/_workspace/src/github.com/Sirupsen/logrus/formatter.go +++ /dev/null @@ -1,48 +0,0 @@ -package logrus - -import "time" - -const DefaultTimestampFormat = time.RFC3339 - -// The Formatter interface is used to implement a custom Formatter. It takes an -// `Entry`. It exposes all the fields, including the default ones: -// -// * `entry.Data["msg"]`. The message passed from Info, Warn, Error .. -// * `entry.Data["time"]`. The timestamp. -// * `entry.Data["level"]. The level the entry was logged at. -// -// Any additional fields added with `WithField` or `WithFields` are also in -// `entry.Data`. Format is expected to return an array of bytes which are then -// logged to `logger.Out`. -type Formatter interface { - Format(*Entry) ([]byte, error) -} - -// This is to not silently overwrite `time`, `msg` and `level` fields when -// dumping it. If this code wasn't there doing: -// -// logrus.WithField("level", 1).Info("hello") -// -// Would just silently drop the user provided level. Instead with this code -// it'll logged as: -// -// {"level": "info", "fields.level": 1, "msg": "hello", "time": "..."} -// -// It's not exported because it's still using Data in an opinionated way. It's to -// avoid code duplication between the two default formatters. -func prefixFieldClashes(data Fields) { - _, ok := data["time"] - if ok { - data["fields.time"] = data["time"] - } - - _, ok = data["msg"] - if ok { - data["fields.msg"] = data["msg"] - } - - _, ok = data["level"] - if ok { - data["fields.level"] = data["level"] - } -} diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/formatter_bench_test.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/formatter_bench_test.go deleted file mode 100644 index c6d290c77f..0000000000 --- a/Godeps/_workspace/src/github.com/Sirupsen/logrus/formatter_bench_test.go +++ /dev/null @@ -1,98 +0,0 @@ -package logrus - -import ( - "fmt" - "testing" - "time" -) - -// smallFields is a small size data set for benchmarking -var smallFields = Fields{ - "foo": "bar", - "baz": "qux", - "one": "two", - "three": "four", -} - -// largeFields is a large size data set for benchmarking -var largeFields = Fields{ - "foo": "bar", - "baz": "qux", - "one": "two", - "three": "four", - "five": "six", - "seven": "eight", - "nine": "ten", - "eleven": "twelve", - "thirteen": "fourteen", - "fifteen": "sixteen", - "seventeen": "eighteen", - "nineteen": "twenty", - "a": "b", - "c": "d", - "e": "f", - "g": "h", - "i": "j", - "k": "l", - "m": "n", - "o": "p", - "q": "r", - "s": "t", - "u": "v", - "w": "x", - "y": "z", - "this": "will", - "make": "thirty", - "entries": "yeah", -} - -var errorFields = Fields{ - "foo": fmt.Errorf("bar"), - "baz": fmt.Errorf("qux"), -} - -func BenchmarkErrorTextFormatter(b *testing.B) { - doBenchmark(b, &TextFormatter{DisableColors: true}, errorFields) -} - -func BenchmarkSmallTextFormatter(b *testing.B) { - doBenchmark(b, &TextFormatter{DisableColors: true}, smallFields) -} - -func BenchmarkLargeTextFormatter(b *testing.B) { - doBenchmark(b, &TextFormatter{DisableColors: true}, largeFields) -} - -func BenchmarkSmallColoredTextFormatter(b *testing.B) { - doBenchmark(b, &TextFormatter{ForceColors: true}, smallFields) -} - -func BenchmarkLargeColoredTextFormatter(b *testing.B) { - doBenchmark(b, &TextFormatter{ForceColors: true}, largeFields) -} - -func BenchmarkSmallJSONFormatter(b *testing.B) { - doBenchmark(b, &JSONFormatter{}, smallFields) -} - -func BenchmarkLargeJSONFormatter(b *testing.B) { - doBenchmark(b, &JSONFormatter{}, largeFields) -} - -func doBenchmark(b *testing.B, formatter Formatter, fields Fields) { - entry := &Entry{ - Time: time.Time{}, - Level: InfoLevel, - Message: "message", - Data: fields, - } - var d []byte - var err error - for i := 0; i < b.N; i++ { - d, err = formatter.Format(entry) - if err != nil { - b.Fatal(err) - } - b.SetBytes(int64(len(d))) - } -} diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/formatters/logstash/logstash.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/formatters/logstash/logstash.go deleted file mode 100644 index 8ea93ddf20..0000000000 --- a/Godeps/_workspace/src/github.com/Sirupsen/logrus/formatters/logstash/logstash.go +++ /dev/null @@ -1,56 +0,0 @@ -package logstash - -import ( - "encoding/json" - "fmt" - - "github.com/Sirupsen/logrus" -) - -// Formatter generates json in logstash format. -// Logstash site: http://logstash.net/ -type LogstashFormatter struct { - Type string // if not empty use for logstash type field. - - // TimestampFormat sets the format used for timestamps. - TimestampFormat string -} - -func (f *LogstashFormatter) Format(entry *logrus.Entry) ([]byte, error) { - entry.Data["@version"] = 1 - - if f.TimestampFormat == "" { - f.TimestampFormat = logrus.DefaultTimestampFormat - } - - entry.Data["@timestamp"] = entry.Time.Format(f.TimestampFormat) - - // set message field - v, ok := entry.Data["message"] - if ok { - entry.Data["fields.message"] = v - } - entry.Data["message"] = entry.Message - - // set level field - v, ok = entry.Data["level"] - if ok { - entry.Data["fields.level"] = v - } - entry.Data["level"] = entry.Level.String() - - // set type field - if f.Type != "" { - v, ok = entry.Data["type"] - if ok { - entry.Data["fields.type"] = v - } - entry.Data["type"] = f.Type - } - - serialized, err := json.Marshal(entry.Data) - if err != nil { - return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err) - } - return append(serialized, '\n'), nil -} diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/formatters/logstash/logstash_test.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/formatters/logstash/logstash_test.go deleted file mode 100644 index d8814a0eae..0000000000 --- a/Godeps/_workspace/src/github.com/Sirupsen/logrus/formatters/logstash/logstash_test.go +++ /dev/null @@ -1,52 +0,0 @@ -package logstash - -import ( - "bytes" - "encoding/json" - "github.com/Sirupsen/logrus" - "github.com/stretchr/testify/assert" - "testing" -) - -func TestLogstashFormatter(t *testing.T) { - assert := assert.New(t) - - lf := LogstashFormatter{Type: "abc"} - - fields := logrus.Fields{ - "message": "def", - "level": "ijk", - "type": "lmn", - "one": 1, - "pi": 3.14, - "bool": true, - } - - entry := logrus.WithFields(fields) - entry.Message = "msg" - entry.Level = logrus.InfoLevel - - b, _ := lf.Format(entry) - - var data map[string]interface{} - dec := json.NewDecoder(bytes.NewReader(b)) - dec.UseNumber() - dec.Decode(&data) - - // base fields - assert.Equal(json.Number("1"), data["@version"]) - assert.NotEmpty(data["@timestamp"]) - assert.Equal("abc", data["type"]) - assert.Equal("msg", data["message"]) - assert.Equal("info", data["level"]) - - // substituted fields - assert.Equal("def", data["fields.message"]) - assert.Equal("ijk", data["fields.level"]) - assert.Equal("lmn", data["fields.type"]) - - // formats - assert.Equal(json.Number("1"), data["one"]) - assert.Equal(json.Number("3.14"), data["pi"]) - assert.Equal(true, data["bool"]) -} diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/hook_test.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hook_test.go deleted file mode 100644 index 13f34cb6f8..0000000000 --- a/Godeps/_workspace/src/github.com/Sirupsen/logrus/hook_test.go +++ /dev/null @@ -1,122 +0,0 @@ -package logrus - -import ( - "testing" - - "github.com/stretchr/testify/assert" -) - -type TestHook struct { - Fired bool -} - -func (hook *TestHook) Fire(entry *Entry) error { - hook.Fired = true - return nil -} - -func (hook *TestHook) Levels() []Level { - return []Level{ - DebugLevel, - InfoLevel, - WarnLevel, - ErrorLevel, - FatalLevel, - PanicLevel, - } -} - -func TestHookFires(t *testing.T) { - hook := new(TestHook) - - LogAndAssertJSON(t, func(log *Logger) { - log.Hooks.Add(hook) - assert.Equal(t, hook.Fired, false) - - log.Print("test") - }, func(fields Fields) { - assert.Equal(t, hook.Fired, true) - }) -} - -type ModifyHook struct { -} - -func (hook *ModifyHook) Fire(entry *Entry) error { - entry.Data["wow"] = "whale" - return nil -} - -func (hook *ModifyHook) Levels() []Level { - return []Level{ - DebugLevel, - InfoLevel, - WarnLevel, - ErrorLevel, - FatalLevel, - PanicLevel, - } -} - -func TestHookCanModifyEntry(t *testing.T) { - hook := new(ModifyHook) - - LogAndAssertJSON(t, func(log *Logger) { - log.Hooks.Add(hook) - log.WithField("wow", "elephant").Print("test") - }, func(fields Fields) { - assert.Equal(t, fields["wow"], "whale") - }) -} - -func TestCanFireMultipleHooks(t *testing.T) { - hook1 := new(ModifyHook) - hook2 := new(TestHook) - - LogAndAssertJSON(t, func(log *Logger) { - log.Hooks.Add(hook1) - log.Hooks.Add(hook2) - - log.WithField("wow", "elephant").Print("test") - }, func(fields Fields) { - assert.Equal(t, fields["wow"], "whale") - assert.Equal(t, hook2.Fired, true) - }) -} - -type ErrorHook struct { - Fired bool -} - -func (hook *ErrorHook) Fire(entry *Entry) error { - hook.Fired = true - return nil -} - -func (hook *ErrorHook) Levels() []Level { - return []Level{ - ErrorLevel, - } -} - -func TestErrorHookShouldntFireOnInfo(t *testing.T) { - hook := new(ErrorHook) - - LogAndAssertJSON(t, func(log *Logger) { - log.Hooks.Add(hook) - log.Info("test") - }, func(fields Fields) { - assert.Equal(t, hook.Fired, false) - }) -} - -func TestErrorHookShouldFireOnError(t *testing.T) { - hook := new(ErrorHook) - - LogAndAssertJSON(t, func(log *Logger) { - log.Hooks.Add(hook) - log.Error("test") - }, func(fields Fields) { - assert.Equal(t, hook.Fired, true) - }) -} diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks.go deleted file mode 100644 index 3f151cdc39..0000000000 --- a/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks.go +++ /dev/null @@ -1,34 +0,0 @@ -package logrus - -// A hook to be fired when logging on the logging levels returned from -// `Levels()` on your implementation of the interface. Note that this is not -// fired in a goroutine or a channel with workers, you should handle such -// functionality yourself if your call is non-blocking and you don't wish for -// the logging calls for levels returned from `Levels()` to block. -type Hook interface { - Levels() []Level - Fire(*Entry) error -} - -// Internal type for storing the hooks on a logger instance. -type LevelHooks map[Level][]Hook - -// Add a hook to an instance of logger. This is called with -// `log.Hooks.Add(new(MyHook))` where `MyHook` implements the `Hook` interface. -func (hooks LevelHooks) Add(hook Hook) { - for _, level := range hook.Levels() { - hooks[level] = append(hooks[level], hook) - } -} - -// Fire all the hooks for the passed level. Used by `entry.log` to fire -// appropriate hooks for a log entry. -func (hooks LevelHooks) Fire(level Level, entry *Entry) error { - for _, hook := range hooks[level] { - if err := hook.Fire(entry); err != nil { - return err - } - } - - return nil -} diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/airbrake/airbrake.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/airbrake/airbrake.go deleted file mode 100644 index b0502c335a..0000000000 --- a/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/airbrake/airbrake.go +++ /dev/null @@ -1,54 +0,0 @@ -package airbrake - -import ( - "errors" - "fmt" - - "github.com/Sirupsen/logrus" - "github.com/tobi/airbrake-go" -) - -// AirbrakeHook to send exceptions to an exception-tracking service compatible -// with the Airbrake API. -type airbrakeHook struct { - APIKey string - Endpoint string - Environment string -} - -func NewHook(endpoint, apiKey, env string) *airbrakeHook { - return &airbrakeHook{ - APIKey: apiKey, - Endpoint: endpoint, - Environment: env, - } -} - -func (hook *airbrakeHook) Fire(entry *logrus.Entry) error { - airbrake.ApiKey = hook.APIKey - airbrake.Endpoint = hook.Endpoint - airbrake.Environment = hook.Environment - - var notifyErr error - err, ok := entry.Data["error"].(error) - if ok { - notifyErr = err - } else { - notifyErr = errors.New(entry.Message) - } - - airErr := airbrake.Notify(notifyErr) - if airErr != nil { - return fmt.Errorf("Failed to send error to Airbrake: %s", airErr) - } - - return nil -} - -func (hook *airbrakeHook) Levels() []logrus.Level { - return []logrus.Level{ - logrus.ErrorLevel, - logrus.FatalLevel, - logrus.PanicLevel, - } -} diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/airbrake/airbrake_test.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/airbrake/airbrake_test.go deleted file mode 100644 index 058a91e343..0000000000 --- a/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/airbrake/airbrake_test.go +++ /dev/null @@ -1,133 +0,0 @@ -package airbrake - -import ( - "encoding/xml" - "net/http" - "net/http/httptest" - "testing" - "time" - - "github.com/Sirupsen/logrus" -) - -type notice struct { - Error NoticeError `xml:"error"` -} -type NoticeError struct { - Class string `xml:"class"` - Message string `xml:"message"` -} - -type customErr struct { - msg string -} - -func (e *customErr) Error() string { - return e.msg -} - -const ( - testAPIKey = "abcxyz" - testEnv = "development" - expectedClass = "*airbrake.customErr" - expectedMsg = "foo" - unintendedMsg = "Airbrake will not see this string" -) - -var ( - noticeError = make(chan NoticeError, 1) -) - -// TestLogEntryMessageReceived checks if invoking Logrus' log.Error -// method causes an XML payload containing the log entry message is received -// by a HTTP server emulating an Airbrake-compatible endpoint. -func TestLogEntryMessageReceived(t *testing.T) { - log := logrus.New() - ts := startAirbrakeServer(t) - defer ts.Close() - - hook := NewHook(ts.URL, testAPIKey, "production") - log.Hooks.Add(hook) - - log.Error(expectedMsg) - - select { - case received := <-noticeError: - if received.Message != expectedMsg { - t.Errorf("Unexpected message received: %s", received.Message) - } - case <-time.After(time.Second): - t.Error("Timed out; no notice received by Airbrake API") - } -} - -// TestLogEntryMessageReceived confirms that, when passing an error type using -// logrus.Fields, a HTTP server emulating an Airbrake endpoint receives the -// error message returned by the Error() method on the error interface -// rather than the logrus.Entry.Message string. -func TestLogEntryWithErrorReceived(t *testing.T) { - log := logrus.New() - ts := startAirbrakeServer(t) - defer ts.Close() - - hook := NewHook(ts.URL, testAPIKey, "production") - log.Hooks.Add(hook) - - log.WithFields(logrus.Fields{ - "error": &customErr{expectedMsg}, - }).Error(unintendedMsg) - - select { - case received := <-noticeError: - if received.Message != expectedMsg { - t.Errorf("Unexpected message received: %s", received.Message) - } - if received.Class != expectedClass { - t.Errorf("Unexpected error class: %s", received.Class) - } - case <-time.After(time.Second): - t.Error("Timed out; no notice received by Airbrake API") - } -} - -// TestLogEntryWithNonErrorTypeNotReceived confirms that, when passing a -// non-error type using logrus.Fields, a HTTP server emulating an Airbrake -// endpoint receives the logrus.Entry.Message string. -// -// Only error types are supported when setting the 'error' field using -// logrus.WithFields(). -func TestLogEntryWithNonErrorTypeNotReceived(t *testing.T) { - log := logrus.New() - ts := startAirbrakeServer(t) - defer ts.Close() - - hook := NewHook(ts.URL, testAPIKey, "production") - log.Hooks.Add(hook) - - log.WithFields(logrus.Fields{ - "error": expectedMsg, - }).Error(unintendedMsg) - - select { - case received := <-noticeError: - if received.Message != unintendedMsg { - t.Errorf("Unexpected message received: %s", received.Message) - } - case <-time.After(time.Second): - t.Error("Timed out; no notice received by Airbrake API") - } -} - -func startAirbrakeServer(t *testing.T) *httptest.Server { - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - var notice notice - if err := xml.NewDecoder(r.Body).Decode(¬ice); err != nil { - t.Error(err) - } - r.Body.Close() - - noticeError <- notice.Error - })) - - return ts -} diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/bugsnag/bugsnag.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/bugsnag/bugsnag.go deleted file mode 100644 index d20a0f54ab..0000000000 --- a/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/bugsnag/bugsnag.go +++ /dev/null @@ -1,68 +0,0 @@ -package logrus_bugsnag - -import ( - "errors" - - "github.com/Sirupsen/logrus" - "github.com/bugsnag/bugsnag-go" -) - -type bugsnagHook struct{} - -// ErrBugsnagUnconfigured is returned if NewBugsnagHook is called before -// bugsnag.Configure. Bugsnag must be configured before the hook. -var ErrBugsnagUnconfigured = errors.New("bugsnag must be configured before installing this logrus hook") - -// ErrBugsnagSendFailed indicates that the hook failed to submit an error to -// bugsnag. The error was successfully generated, but `bugsnag.Notify()` -// failed. -type ErrBugsnagSendFailed struct { - err error -} - -func (e ErrBugsnagSendFailed) Error() string { - return "failed to send error to Bugsnag: " + e.err.Error() -} - -// NewBugsnagHook initializes a logrus hook which sends exceptions to an -// exception-tracking service compatible with the Bugsnag API. Before using -// this hook, you must call bugsnag.Configure(). The returned object should be -// registered with a log via `AddHook()` -// -// Entries that trigger an Error, Fatal or Panic should now include an "error" -// field to send to Bugsnag. -func NewBugsnagHook() (*bugsnagHook, error) { - if bugsnag.Config.APIKey == "" { - return nil, ErrBugsnagUnconfigured - } - return &bugsnagHook{}, nil -} - -// Fire forwards an error to Bugsnag. Given a logrus.Entry, it extracts the -// "error" field (or the Message if the error isn't present) and sends it off. -func (hook *bugsnagHook) Fire(entry *logrus.Entry) error { - var notifyErr error - err, ok := entry.Data["error"].(error) - if ok { - notifyErr = err - } else { - notifyErr = errors.New(entry.Message) - } - - bugsnagErr := bugsnag.Notify(notifyErr) - if bugsnagErr != nil { - return ErrBugsnagSendFailed{bugsnagErr} - } - - return nil -} - -// Levels enumerates the log levels on which the error should be forwarded to -// bugsnag: everything at or above the "Error" level. -func (hook *bugsnagHook) Levels() []logrus.Level { - return []logrus.Level{ - logrus.ErrorLevel, - logrus.FatalLevel, - logrus.PanicLevel, - } -} diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/bugsnag/bugsnag_test.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/bugsnag/bugsnag_test.go deleted file mode 100644 index e9ea298d89..0000000000 --- a/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/bugsnag/bugsnag_test.go +++ /dev/null @@ -1,64 +0,0 @@ -package logrus_bugsnag - -import ( - "encoding/json" - "errors" - "io/ioutil" - "net/http" - "net/http/httptest" - "testing" - "time" - - "github.com/Sirupsen/logrus" - "github.com/bugsnag/bugsnag-go" -) - -type notice struct { - Events []struct { - Exceptions []struct { - Message string `json:"message"` - } `json:"exceptions"` - } `json:"events"` -} - -func TestNoticeReceived(t *testing.T) { - msg := make(chan string, 1) - expectedMsg := "foo" - - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - var notice notice - data, _ := ioutil.ReadAll(r.Body) - if err := json.Unmarshal(data, ¬ice); err != nil { - t.Error(err) - } - _ = r.Body.Close() - - msg <- notice.Events[0].Exceptions[0].Message - })) - defer ts.Close() - - hook := &bugsnagHook{} - - bugsnag.Configure(bugsnag.Configuration{ - Endpoint: ts.URL, - ReleaseStage: "production", - APIKey: "12345678901234567890123456789012", - Synchronous: true, - }) - - log := logrus.New() - log.Hooks.Add(hook) - - log.WithFields(logrus.Fields{ - "error": errors.New(expectedMsg), - }).Error("Bugsnag will not see this string") - - select { - case received := <-msg: - if received != expectedMsg { - t.Errorf("Unexpected message received: %s", received) - } - case <-time.After(time.Second): - t.Error("Timed out; no notice received by Bugsnag API") - } -} diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/papertrail/README.md b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/papertrail/README.md deleted file mode 100644 index ae61e9229a..0000000000 --- a/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/papertrail/README.md +++ /dev/null @@ -1,28 +0,0 @@ -# Papertrail Hook for Logrus :walrus: - -[Papertrail](https://papertrailapp.com) provides hosted log management. Once stored in Papertrail, you can [group](http://help.papertrailapp.com/kb/how-it-works/groups/) your logs on various dimensions, [search](http://help.papertrailapp.com/kb/how-it-works/search-syntax) them, and trigger [alerts](http://help.papertrailapp.com/kb/how-it-works/alerts). - -In most deployments, you'll want to send logs to Papertrail via their [remote_syslog](http://help.papertrailapp.com/kb/configuration/configuring-centralized-logging-from-text-log-files-in-unix/) daemon, which requires no application-specific configuration. This hook is intended for relatively low-volume logging, likely in managed cloud hosting deployments where installing `remote_syslog` is not possible. - -## Usage - -You can find your Papertrail UDP port on your [Papertrail account page](https://papertrailapp.com/account/destinations). Substitute it below for `YOUR_PAPERTRAIL_UDP_PORT`. - -For `YOUR_APP_NAME`, substitute a short string that will readily identify your application or service in the logs. - -```go -import ( - "log/syslog" - "github.com/Sirupsen/logrus" - "github.com/Sirupsen/logrus/hooks/papertrail" -) - -func main() { - log := logrus.New() - hook, err := logrus_papertrail.NewPapertrailHook("logs.papertrailapp.com", YOUR_PAPERTRAIL_UDP_PORT, YOUR_APP_NAME) - - if err == nil { - log.Hooks.Add(hook) - } -} -``` diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/papertrail/papertrail.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/papertrail/papertrail.go deleted file mode 100644 index c0f10c1bda..0000000000 --- a/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/papertrail/papertrail.go +++ /dev/null @@ -1,55 +0,0 @@ -package logrus_papertrail - -import ( - "fmt" - "net" - "os" - "time" - - "github.com/Sirupsen/logrus" -) - -const ( - format = "Jan 2 15:04:05" -) - -// PapertrailHook to send logs to a logging service compatible with the Papertrail API. -type PapertrailHook struct { - Host string - Port int - AppName string - UDPConn net.Conn -} - -// NewPapertrailHook creates a hook to be added to an instance of logger. -func NewPapertrailHook(host string, port int, appName string) (*PapertrailHook, error) { - conn, err := net.Dial("udp", fmt.Sprintf("%s:%d", host, port)) - return &PapertrailHook{host, port, appName, conn}, err -} - -// Fire is called when a log event is fired. -func (hook *PapertrailHook) Fire(entry *logrus.Entry) error { - date := time.Now().Format(format) - msg, _ := entry.String() - payload := fmt.Sprintf("<22> %s %s: %s", date, hook.AppName, msg) - - bytesWritten, err := hook.UDPConn.Write([]byte(payload)) - if err != nil { - fmt.Fprintf(os.Stderr, "Unable to send log line to Papertrail via UDP. Wrote %d bytes before error: %v", bytesWritten, err) - return err - } - - return nil -} - -// Levels returns the available logging levels. -func (hook *PapertrailHook) Levels() []logrus.Level { - return []logrus.Level{ - logrus.PanicLevel, - logrus.FatalLevel, - logrus.ErrorLevel, - logrus.WarnLevel, - logrus.InfoLevel, - logrus.DebugLevel, - } -} diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/papertrail/papertrail_test.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/papertrail/papertrail_test.go deleted file mode 100644 index 96318d0030..0000000000 --- a/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/papertrail/papertrail_test.go +++ /dev/null @@ -1,26 +0,0 @@ -package logrus_papertrail - -import ( - "fmt" - "testing" - - "github.com/Sirupsen/logrus" - "github.com/stvp/go-udp-testing" -) - -func TestWritingToUDP(t *testing.T) { - port := 16661 - udp.SetAddr(fmt.Sprintf(":%d", port)) - - hook, err := NewPapertrailHook("localhost", port, "test") - if err != nil { - t.Errorf("Unable to connect to local UDP server.") - } - - log := logrus.New() - log.Hooks.Add(hook) - - udp.ShouldReceive(t, "foo", func() { - log.Info("foo") - }) -} diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/sentry/README.md b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/sentry/README.md deleted file mode 100644 index 8b1f9a16f3..0000000000 --- a/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/sentry/README.md +++ /dev/null @@ -1,78 +0,0 @@ -# Sentry Hook for Logrus :walrus: - -[Sentry](https://getsentry.com) provides both self-hosted and hosted -solutions for exception tracking. -Both client and server are -[open source](https://github.com/getsentry/sentry). - -## Usage - -Every sentry application defined on the server gets a different -[DSN](https://www.getsentry.com/docs/). In the example below replace -`YOUR_DSN` with the one created for your application. - -```go -import ( - "github.com/Sirupsen/logrus" - "github.com/Sirupsen/logrus/hooks/sentry" -) - -func main() { - log := logrus.New() - hook, err := logrus_sentry.NewSentryHook(YOUR_DSN, []logrus.Level{ - logrus.PanicLevel, - logrus.FatalLevel, - logrus.ErrorLevel, - }) - - if err == nil { - log.Hooks.Add(hook) - } -} -``` - -If you wish to initialize a SentryHook with tags, you can use the `NewWithTagsSentryHook` constructor to provide default tags: - -```go -tags := map[string]string{ - "site": "example.com", -} -levels := []logrus.Level{ - logrus.PanicLevel, - logrus.FatalLevel, - logrus.ErrorLevel, -} -hook, err := logrus_sentry.NewWithTagsSentryHook(YOUR_DSN, tags, levels) - -``` - - -## Special fields - -Some logrus fields have a special meaning in this hook, -these are `server_name`, `logger` and `http_request`. -When logs are sent to sentry these fields are treated differently. -- `server_name` (also known as hostname) is the name of the server which -is logging the event (hostname.example.com) -- `logger` is the part of the application which is logging the event. -In go this usually means setting it to the name of the package. -- `http_request` is the in-coming request(*http.Request). The detailed request data are sent to Sentry. - -## Timeout - -`Timeout` is the time the sentry hook will wait for a response -from the sentry server. - -If this time elapses with no response from -the server an error will be returned. - -If `Timeout` is set to 0 the SentryHook will not wait for a reply -and will assume a correct delivery. - -The SentryHook has a default timeout of `100 milliseconds` when created -with a call to `NewSentryHook`. This can be changed by assigning a value to the `Timeout` field: - -```go -hook, _ := logrus_sentry.NewSentryHook(...) -hook.Timeout = 20*time.Second -``` diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/sentry/sentry.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/sentry/sentry.go deleted file mode 100644 index 4d184b2fc9..0000000000 --- a/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/sentry/sentry.go +++ /dev/null @@ -1,131 +0,0 @@ -package logrus_sentry - -import ( - "fmt" - "net/http" - "time" - - "github.com/Sirupsen/logrus" - "github.com/getsentry/raven-go" -) - -var ( - severityMap = map[logrus.Level]raven.Severity{ - logrus.DebugLevel: raven.DEBUG, - logrus.InfoLevel: raven.INFO, - logrus.WarnLevel: raven.WARNING, - logrus.ErrorLevel: raven.ERROR, - logrus.FatalLevel: raven.FATAL, - logrus.PanicLevel: raven.FATAL, - } -) - -func getAndDel(d logrus.Fields, key string) (string, bool) { - var ( - ok bool - v interface{} - val string - ) - if v, ok = d[key]; !ok { - return "", false - } - - if val, ok = v.(string); !ok { - return "", false - } - delete(d, key) - return val, true -} - -func getAndDelRequest(d logrus.Fields, key string) (*http.Request, bool) { - var ( - ok bool - v interface{} - req *http.Request - ) - if v, ok = d[key]; !ok { - return nil, false - } - if req, ok = v.(*http.Request); !ok || req == nil { - return nil, false - } - delete(d, key) - return req, true -} - -// SentryHook delivers logs to a sentry server. -type SentryHook struct { - // Timeout sets the time to wait for a delivery error from the sentry server. - // If this is set to zero the server will not wait for any response and will - // consider the message correctly sent - Timeout time.Duration - - client *raven.Client - levels []logrus.Level -} - -// NewSentryHook creates a hook to be added to an instance of logger -// and initializes the raven client. -// This method sets the timeout to 100 milliseconds. -func NewSentryHook(DSN string, levels []logrus.Level) (*SentryHook, error) { - client, err := raven.New(DSN) - if err != nil { - return nil, err - } - return &SentryHook{100 * time.Millisecond, client, levels}, nil -} - -// NewWithTagsSentryHook creates a hook with tags to be added to an instance -// of logger and initializes the raven client. This method sets the timeout to -// 100 milliseconds. -func NewWithTagsSentryHook(DSN string, tags map[string]string, levels []logrus.Level) (*SentryHook, error) { - client, err := raven.NewWithTags(DSN, tags) - if err != nil { - return nil, err - } - return &SentryHook{100 * time.Millisecond, client, levels}, nil -} - -// Called when an event should be sent to sentry -// Special fields that sentry uses to give more information to the server -// are extracted from entry.Data (if they are found) -// These fields are: logger, server_name and http_request -func (hook *SentryHook) Fire(entry *logrus.Entry) error { - packet := &raven.Packet{ - Message: entry.Message, - Timestamp: raven.Timestamp(entry.Time), - Level: severityMap[entry.Level], - Platform: "go", - } - - d := entry.Data - - if logger, ok := getAndDel(d, "logger"); ok { - packet.Logger = logger - } - if serverName, ok := getAndDel(d, "server_name"); ok { - packet.ServerName = serverName - } - if req, ok := getAndDelRequest(d, "http_request"); ok { - packet.Interfaces = append(packet.Interfaces, raven.NewHttp(req)) - } - packet.Extra = map[string]interface{}(d) - - _, errCh := hook.client.Capture(packet, nil) - timeout := hook.Timeout - if timeout != 0 { - timeoutCh := time.After(timeout) - select { - case err := <-errCh: - return err - case <-timeoutCh: - return fmt.Errorf("no response from sentry server in %s", timeout) - } - } - return nil -} - -// Levels returns the available logging levels. -func (hook *SentryHook) Levels() []logrus.Level { - return hook.levels -} diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/sentry/sentry_test.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/sentry/sentry_test.go deleted file mode 100644 index 5f59f699cb..0000000000 --- a/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/sentry/sentry_test.go +++ /dev/null @@ -1,132 +0,0 @@ -package logrus_sentry - -import ( - "encoding/json" - "fmt" - "io/ioutil" - "net/http" - "net/http/httptest" - "reflect" - "strings" - "testing" - - "github.com/Sirupsen/logrus" - "github.com/getsentry/raven-go" -) - -const ( - message = "error message" - server_name = "testserver.internal" - logger_name = "test.logger" -) - -func getTestLogger() *logrus.Logger { - l := logrus.New() - l.Out = ioutil.Discard - return l -} - -func WithTestDSN(t *testing.T, tf func(string, <-chan *raven.Packet)) { - pch := make(chan *raven.Packet, 1) - s := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) { - defer req.Body.Close() - d := json.NewDecoder(req.Body) - p := &raven.Packet{} - err := d.Decode(p) - if err != nil { - t.Fatal(err.Error()) - } - - pch <- p - })) - defer s.Close() - - fragments := strings.SplitN(s.URL, "://", 2) - dsn := fmt.Sprintf( - "%s://public:secret@%s/sentry/project-id", - fragments[0], - fragments[1], - ) - tf(dsn, pch) -} - -func TestSpecialFields(t *testing.T) { - WithTestDSN(t, func(dsn string, pch <-chan *raven.Packet) { - logger := getTestLogger() - - hook, err := NewSentryHook(dsn, []logrus.Level{ - logrus.ErrorLevel, - }) - - if err != nil { - t.Fatal(err.Error()) - } - logger.Hooks.Add(hook) - - req, _ := http.NewRequest("GET", "url", nil) - logger.WithFields(logrus.Fields{ - "server_name": server_name, - "logger": logger_name, - "http_request": req, - }).Error(message) - - packet := <-pch - if packet.Logger != logger_name { - t.Errorf("logger should have been %s, was %s", logger_name, packet.Logger) - } - - if packet.ServerName != server_name { - t.Errorf("server_name should have been %s, was %s", server_name, packet.ServerName) - } - }) -} - -func TestSentryHandler(t *testing.T) { - WithTestDSN(t, func(dsn string, pch <-chan *raven.Packet) { - logger := getTestLogger() - hook, err := NewSentryHook(dsn, []logrus.Level{ - logrus.ErrorLevel, - }) - if err != nil { - t.Fatal(err.Error()) - } - logger.Hooks.Add(hook) - - logger.Error(message) - packet := <-pch - if packet.Message != message { - t.Errorf("message should have been %s, was %s", message, packet.Message) - } - }) -} - -func TestSentryTags(t *testing.T) { - WithTestDSN(t, func(dsn string, pch <-chan *raven.Packet) { - logger := getTestLogger() - tags := map[string]string{ - "site": "test", - } - levels := []logrus.Level{ - logrus.ErrorLevel, - } - - hook, err := NewWithTagsSentryHook(dsn, tags, levels) - if err != nil { - t.Fatal(err.Error()) - } - - logger.Hooks.Add(hook) - - logger.Error(message) - packet := <-pch - expected := raven.Tags{ - raven.Tag{ - Key: "site", - Value: "test", - }, - } - if !reflect.DeepEqual(packet.Tags, expected) { - t.Errorf("message should have been %s, was %s", message, packet.Message) - } - }) -} diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/syslog/README.md b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/syslog/README.md deleted file mode 100644 index 4dbb8e7290..0000000000 --- a/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/syslog/README.md +++ /dev/null @@ -1,20 +0,0 @@ -# Syslog Hooks for Logrus :walrus: - -## Usage - -```go -import ( - "log/syslog" - "github.com/Sirupsen/logrus" - logrus_syslog "github.com/Sirupsen/logrus/hooks/syslog" -) - -func main() { - log := logrus.New() - hook, err := logrus_syslog.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "") - - if err == nil { - log.Hooks.Add(hook) - } -} -``` diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/syslog/syslog.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/syslog/syslog.go deleted file mode 100644 index b6fa374628..0000000000 --- a/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/syslog/syslog.go +++ /dev/null @@ -1,59 +0,0 @@ -package logrus_syslog - -import ( - "fmt" - "github.com/Sirupsen/logrus" - "log/syslog" - "os" -) - -// SyslogHook to send logs via syslog. -type SyslogHook struct { - Writer *syslog.Writer - SyslogNetwork string - SyslogRaddr string -} - -// Creates a hook to be added to an instance of logger. This is called with -// `hook, err := NewSyslogHook("udp", "localhost:514", syslog.LOG_DEBUG, "")` -// `if err == nil { log.Hooks.Add(hook) }` -func NewSyslogHook(network, raddr string, priority syslog.Priority, tag string) (*SyslogHook, error) { - w, err := syslog.Dial(network, raddr, priority, tag) - return &SyslogHook{w, network, raddr}, err -} - -func (hook *SyslogHook) Fire(entry *logrus.Entry) error { - line, err := entry.String() - if err != nil { - fmt.Fprintf(os.Stderr, "Unable to read entry, %v", err) - return err - } - - switch entry.Level { - case logrus.PanicLevel: - return hook.Writer.Crit(line) - case logrus.FatalLevel: - return hook.Writer.Crit(line) - case logrus.ErrorLevel: - return hook.Writer.Err(line) - case logrus.WarnLevel: - return hook.Writer.Warning(line) - case logrus.InfoLevel: - return hook.Writer.Info(line) - case logrus.DebugLevel: - return hook.Writer.Debug(line) - default: - return nil - } -} - -func (hook *SyslogHook) Levels() []logrus.Level { - return []logrus.Level{ - logrus.PanicLevel, - logrus.FatalLevel, - logrus.ErrorLevel, - logrus.WarnLevel, - logrus.InfoLevel, - logrus.DebugLevel, - } -} diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/syslog/syslog_test.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/syslog/syslog_test.go deleted file mode 100644 index 42762dc10d..0000000000 --- a/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/syslog/syslog_test.go +++ /dev/null @@ -1,26 +0,0 @@ -package logrus_syslog - -import ( - "github.com/Sirupsen/logrus" - "log/syslog" - "testing" -) - -func TestLocalhostAddAndPrint(t *testing.T) { - log := logrus.New() - hook, err := NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "") - - if err != nil { - t.Errorf("Unable to connect to local syslog.") - } - - log.Hooks.Add(hook) - - for _, level := range hook.Levels() { - if len(log.Hooks[level]) != 1 { - t.Errorf("SyslogHook was not added. The length of log.Hooks[%v]: %v", level, len(log.Hooks[level])) - } - } - - log.Info("Congratulations!") -} diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/json_formatter.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/json_formatter.go deleted file mode 100644 index 2ad6dc5cf4..0000000000 --- a/Godeps/_workspace/src/github.com/Sirupsen/logrus/json_formatter.go +++ /dev/null @@ -1,41 +0,0 @@ -package logrus - -import ( - "encoding/json" - "fmt" -) - -type JSONFormatter struct { - // TimestampFormat sets the format used for marshaling timestamps. - TimestampFormat string -} - -func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) { - data := make(Fields, len(entry.Data)+3) - for k, v := range entry.Data { - switch v := v.(type) { - case error: - // Otherwise errors are ignored by `encoding/json` - // https://github.com/Sirupsen/logrus/issues/137 - data[k] = v.Error() - default: - data[k] = v - } - } - prefixFieldClashes(data) - - timestampFormat := f.TimestampFormat - if timestampFormat == "" { - timestampFormat = DefaultTimestampFormat - } - - data["time"] = entry.Time.Format(timestampFormat) - data["msg"] = entry.Message - data["level"] = entry.Level.String() - - serialized, err := json.Marshal(data) - if err != nil { - return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err) - } - return append(serialized, '\n'), nil -} diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/json_formatter_test.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/json_formatter_test.go deleted file mode 100644 index 1d70873254..0000000000 --- a/Godeps/_workspace/src/github.com/Sirupsen/logrus/json_formatter_test.go +++ /dev/null @@ -1,120 +0,0 @@ -package logrus - -import ( - "encoding/json" - "errors" - - "testing" -) - -func TestErrorNotLost(t *testing.T) { - formatter := &JSONFormatter{} - - b, err := formatter.Format(WithField("error", errors.New("wild walrus"))) - if err != nil { - t.Fatal("Unable to format entry: ", err) - } - - entry := make(map[string]interface{}) - err = json.Unmarshal(b, &entry) - if err != nil { - t.Fatal("Unable to unmarshal formatted entry: ", err) - } - - if entry["error"] != "wild walrus" { - t.Fatal("Error field not set") - } -} - -func TestErrorNotLostOnFieldNotNamedError(t *testing.T) { - formatter := &JSONFormatter{} - - b, err := formatter.Format(WithField("omg", errors.New("wild walrus"))) - if err != nil { - t.Fatal("Unable to format entry: ", err) - } - - entry := make(map[string]interface{}) - err = json.Unmarshal(b, &entry) - if err != nil { - t.Fatal("Unable to unmarshal formatted entry: ", err) - } - - if entry["omg"] != "wild walrus" { - t.Fatal("Error field not set") - } -} - -func TestFieldClashWithTime(t *testing.T) { - formatter := &JSONFormatter{} - - b, err := formatter.Format(WithField("time", "right now!")) - if err != nil { - t.Fatal("Unable to format entry: ", err) - } - - entry := make(map[string]interface{}) - err = json.Unmarshal(b, &entry) - if err != nil { - t.Fatal("Unable to unmarshal formatted entry: ", err) - } - - if entry["fields.time"] != "right now!" { - t.Fatal("fields.time not set to original time field") - } - - if entry["time"] != "0001-01-01T00:00:00Z" { - t.Fatal("time field not set to current time, was: ", entry["time"]) - } -} - -func TestFieldClashWithMsg(t *testing.T) { - formatter := &JSONFormatter{} - - b, err := formatter.Format(WithField("msg", "something")) - if err != nil { - t.Fatal("Unable to format entry: ", err) - } - - entry := make(map[string]interface{}) - err = json.Unmarshal(b, &entry) - if err != nil { - t.Fatal("Unable to unmarshal formatted entry: ", err) - } - - if entry["fields.msg"] != "something" { - t.Fatal("fields.msg not set to original msg field") - } -} - -func TestFieldClashWithLevel(t *testing.T) { - formatter := &JSONFormatter{} - - b, err := formatter.Format(WithField("level", "something")) - if err != nil { - t.Fatal("Unable to format entry: ", err) - } - - entry := make(map[string]interface{}) - err = json.Unmarshal(b, &entry) - if err != nil { - t.Fatal("Unable to unmarshal formatted entry: ", err) - } - - if entry["fields.level"] != "something" { - t.Fatal("fields.level not set to original level field") - } -} - -func TestJSONEntryEndsWithNewline(t *testing.T) { - formatter := &JSONFormatter{} - - b, err := formatter.Format(WithField("level", "something")) - if err != nil { - t.Fatal("Unable to format entry: ", err) - } - - if b[len(b)-1] != '\n' { - t.Fatal("Expected JSON log entry to end with a newline") - } -} diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/logger.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/logger.go deleted file mode 100644 index e4974bfbe7..0000000000 --- a/Godeps/_workspace/src/github.com/Sirupsen/logrus/logger.go +++ /dev/null @@ -1,206 +0,0 @@ -package logrus - -import ( - "io" - "os" - "sync" -) - -type Logger struct { - // The logs are `io.Copy`'d to this in a mutex. It's common to set this to a - // file, or leave it default which is `os.Stdout`. You can also set this to - // something more adventorous, such as logging to Kafka. - Out io.Writer - // Hooks for the logger instance. These allow firing events based on logging - // levels and log entries. For example, to send errors to an error tracking - // service, log to StatsD or dump the core on fatal errors. - Hooks LevelHooks - // All log entries pass through the formatter before logged to Out. The - // included formatters are `TextFormatter` and `JSONFormatter` for which - // TextFormatter is the default. In development (when a TTY is attached) it - // logs with colors, but to a file it wouldn't. You can easily implement your - // own that implements the `Formatter` interface, see the `README` or included - // formatters for examples. - Formatter Formatter - // The logging level the logger should log at. This is typically (and defaults - // to) `logrus.Info`, which allows Info(), Warn(), Error() and Fatal() to be - // logged. `logrus.Debug` is useful in - Level Level - // Used to sync writing to the log. - mu sync.Mutex -} - -// Creates a new logger. Configuration should be set by changing `Formatter`, -// `Out` and `Hooks` directly on the default logger instance. You can also just -// instantiate your own: -// -// var log = &Logger{ -// Out: os.Stderr, -// Formatter: new(JSONFormatter), -// Hooks: make(LevelHooks), -// Level: logrus.DebugLevel, -// } -// -// It's recommended to make this a global instance called `log`. -func New() *Logger { - return &Logger{ - Out: os.Stderr, - Formatter: new(TextFormatter), - Hooks: make(LevelHooks), - Level: InfoLevel, - } -} - -// Adds a field to the log entry, note that you it doesn't log until you call -// Debug, Print, Info, Warn, Fatal or Panic. It only creates a log entry. -// Ff you want multiple fields, use `WithFields`. -func (logger *Logger) WithField(key string, value interface{}) *Entry { - return NewEntry(logger).WithField(key, value) -} - -// Adds a struct of fields to the log entry. All it does is call `WithField` for -// each `Field`. -func (logger *Logger) WithFields(fields Fields) *Entry { - return NewEntry(logger).WithFields(fields) -} - -func (logger *Logger) Debugf(format string, args ...interface{}) { - if logger.Level >= DebugLevel { - NewEntry(logger).Debugf(format, args...) - } -} - -func (logger *Logger) Infof(format string, args ...interface{}) { - if logger.Level >= InfoLevel { - NewEntry(logger).Infof(format, args...) - } -} - -func (logger *Logger) Printf(format string, args ...interface{}) { - NewEntry(logger).Printf(format, args...) -} - -func (logger *Logger) Warnf(format string, args ...interface{}) { - if logger.Level >= WarnLevel { - NewEntry(logger).Warnf(format, args...) - } -} - -func (logger *Logger) Warningf(format string, args ...interface{}) { - if logger.Level >= WarnLevel { - NewEntry(logger).Warnf(format, args...) - } -} - -func (logger *Logger) Errorf(format string, args ...interface{}) { - if logger.Level >= ErrorLevel { - NewEntry(logger).Errorf(format, args...) - } -} - -func (logger *Logger) Fatalf(format string, args ...interface{}) { - if logger.Level >= FatalLevel { - NewEntry(logger).Fatalf(format, args...) - } - os.Exit(1) -} - -func (logger *Logger) Panicf(format string, args ...interface{}) { - if logger.Level >= PanicLevel { - NewEntry(logger).Panicf(format, args...) - } -} - -func (logger *Logger) Debug(args ...interface{}) { - if logger.Level >= DebugLevel { - NewEntry(logger).Debug(args...) - } -} - -func (logger *Logger) Info(args ...interface{}) { - if logger.Level >= InfoLevel { - NewEntry(logger).Info(args...) - } -} - -func (logger *Logger) Print(args ...interface{}) { - NewEntry(logger).Info(args...) -} - -func (logger *Logger) Warn(args ...interface{}) { - if logger.Level >= WarnLevel { - NewEntry(logger).Warn(args...) - } -} - -func (logger *Logger) Warning(args ...interface{}) { - if logger.Level >= WarnLevel { - NewEntry(logger).Warn(args...) - } -} - -func (logger *Logger) Error(args ...interface{}) { - if logger.Level >= ErrorLevel { - NewEntry(logger).Error(args...) - } -} - -func (logger *Logger) Fatal(args ...interface{}) { - if logger.Level >= FatalLevel { - NewEntry(logger).Fatal(args...) - } - os.Exit(1) -} - -func (logger *Logger) Panic(args ...interface{}) { - if logger.Level >= PanicLevel { - NewEntry(logger).Panic(args...) - } -} - -func (logger *Logger) Debugln(args ...interface{}) { - if logger.Level >= DebugLevel { - NewEntry(logger).Debugln(args...) - } -} - -func (logger *Logger) Infoln(args ...interface{}) { - if logger.Level >= InfoLevel { - NewEntry(logger).Infoln(args...) - } -} - -func (logger *Logger) Println(args ...interface{}) { - NewEntry(logger).Println(args...) -} - -func (logger *Logger) Warnln(args ...interface{}) { - if logger.Level >= WarnLevel { - NewEntry(logger).Warnln(args...) - } -} - -func (logger *Logger) Warningln(args ...interface{}) { - if logger.Level >= WarnLevel { - NewEntry(logger).Warnln(args...) - } -} - -func (logger *Logger) Errorln(args ...interface{}) { - if logger.Level >= ErrorLevel { - NewEntry(logger).Errorln(args...) - } -} - -func (logger *Logger) Fatalln(args ...interface{}) { - if logger.Level >= FatalLevel { - NewEntry(logger).Fatalln(args...) - } - os.Exit(1) -} - -func (logger *Logger) Panicln(args ...interface{}) { - if logger.Level >= PanicLevel { - NewEntry(logger).Panicln(args...) - } -} diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/logrus.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/logrus.go deleted file mode 100644 index 43ee12e90e..0000000000 --- a/Godeps/_workspace/src/github.com/Sirupsen/logrus/logrus.go +++ /dev/null @@ -1,94 +0,0 @@ -package logrus - -import ( - "fmt" - "log" -) - -// Fields type, used to pass to `WithFields`. -type Fields map[string]interface{} - -// Level type -type Level uint8 - -// Convert the Level to a string. E.g. PanicLevel becomes "panic". -func (level Level) String() string { - switch level { - case DebugLevel: - return "debug" - case InfoLevel: - return "info" - case WarnLevel: - return "warning" - case ErrorLevel: - return "error" - case FatalLevel: - return "fatal" - case PanicLevel: - return "panic" - } - - return "unknown" -} - -// ParseLevel takes a string level and returns the Logrus log level constant. -func ParseLevel(lvl string) (Level, error) { - switch lvl { - case "panic": - return PanicLevel, nil - case "fatal": - return FatalLevel, nil - case "error": - return ErrorLevel, nil - case "warn", "warning": - return WarnLevel, nil - case "info": - return InfoLevel, nil - case "debug": - return DebugLevel, nil - } - - var l Level - return l, fmt.Errorf("not a valid logrus Level: %q", lvl) -} - -// These are the different logging levels. You can set the logging level to log -// on your instance of logger, obtained with `logrus.New()`. -const ( - // PanicLevel level, highest level of severity. Logs and then calls panic with the - // message passed to Debug, Info, ... - PanicLevel Level = iota - // FatalLevel level. Logs and then calls `os.Exit(1)`. It will exit even if the - // logging level is set to Panic. - FatalLevel - // ErrorLevel level. Logs. Used for errors that should definitely be noted. - // Commonly used for hooks to send errors to an error tracking service. - ErrorLevel - // WarnLevel level. Non-critical entries that deserve eyes. - WarnLevel - // InfoLevel level. General operational entries about what's going on inside the - // application. - InfoLevel - // DebugLevel level. Usually only enabled when debugging. Very verbose logging. - DebugLevel -) - -// Won't compile if StdLogger can't be realized by a log.Logger -var _ StdLogger = &log.Logger{} - -// StdLogger is what your logrus-enabled library should take, that way -// it'll accept a stdlib logger and a logrus logger. There's no standard -// interface, this is the closest we get, unfortunately. -type StdLogger interface { - Print(...interface{}) - Printf(string, ...interface{}) - Println(...interface{}) - - Fatal(...interface{}) - Fatalf(string, ...interface{}) - Fatalln(...interface{}) - - Panic(...interface{}) - Panicf(string, ...interface{}) - Panicln(...interface{}) -} diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/logrus_test.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/logrus_test.go deleted file mode 100644 index efaacea236..0000000000 --- a/Godeps/_workspace/src/github.com/Sirupsen/logrus/logrus_test.go +++ /dev/null @@ -1,301 +0,0 @@ -package logrus - -import ( - "bytes" - "encoding/json" - "strconv" - "strings" - "sync" - "testing" - - "github.com/stretchr/testify/assert" -) - -func LogAndAssertJSON(t *testing.T, log func(*Logger), assertions func(fields Fields)) { - var buffer bytes.Buffer - var fields Fields - - logger := New() - logger.Out = &buffer - logger.Formatter = new(JSONFormatter) - - log(logger) - - err := json.Unmarshal(buffer.Bytes(), &fields) - assert.Nil(t, err) - - assertions(fields) -} - -func LogAndAssertText(t *testing.T, log func(*Logger), assertions func(fields map[string]string)) { - var buffer bytes.Buffer - - logger := New() - logger.Out = &buffer - logger.Formatter = &TextFormatter{ - DisableColors: true, - } - - log(logger) - - fields := make(map[string]string) - for _, kv := range strings.Split(buffer.String(), " ") { - if !strings.Contains(kv, "=") { - continue - } - kvArr := strings.Split(kv, "=") - key := strings.TrimSpace(kvArr[0]) - val := kvArr[1] - if kvArr[1][0] == '"' { - var err error - val, err = strconv.Unquote(val) - assert.NoError(t, err) - } - fields[key] = val - } - assertions(fields) -} - -func TestPrint(t *testing.T) { - LogAndAssertJSON(t, func(log *Logger) { - log.Print("test") - }, func(fields Fields) { - assert.Equal(t, fields["msg"], "test") - assert.Equal(t, fields["level"], "info") - }) -} - -func TestInfo(t *testing.T) { - LogAndAssertJSON(t, func(log *Logger) { - log.Info("test") - }, func(fields Fields) { - assert.Equal(t, fields["msg"], "test") - assert.Equal(t, fields["level"], "info") - }) -} - -func TestWarn(t *testing.T) { - LogAndAssertJSON(t, func(log *Logger) { - log.Warn("test") - }, func(fields Fields) { - assert.Equal(t, fields["msg"], "test") - assert.Equal(t, fields["level"], "warning") - }) -} - -func TestInfolnShouldAddSpacesBetweenStrings(t *testing.T) { - LogAndAssertJSON(t, func(log *Logger) { - log.Infoln("test", "test") - }, func(fields Fields) { - assert.Equal(t, fields["msg"], "test test") - }) -} - -func TestInfolnShouldAddSpacesBetweenStringAndNonstring(t *testing.T) { - LogAndAssertJSON(t, func(log *Logger) { - log.Infoln("test", 10) - }, func(fields Fields) { - assert.Equal(t, fields["msg"], "test 10") - }) -} - -func TestInfolnShouldAddSpacesBetweenTwoNonStrings(t *testing.T) { - LogAndAssertJSON(t, func(log *Logger) { - log.Infoln(10, 10) - }, func(fields Fields) { - assert.Equal(t, fields["msg"], "10 10") - }) -} - -func TestInfoShouldAddSpacesBetweenTwoNonStrings(t *testing.T) { - LogAndAssertJSON(t, func(log *Logger) { - log.Infoln(10, 10) - }, func(fields Fields) { - assert.Equal(t, fields["msg"], "10 10") - }) -} - -func TestInfoShouldNotAddSpacesBetweenStringAndNonstring(t *testing.T) { - LogAndAssertJSON(t, func(log *Logger) { - log.Info("test", 10) - }, func(fields Fields) { - assert.Equal(t, fields["msg"], "test10") - }) -} - -func TestInfoShouldNotAddSpacesBetweenStrings(t *testing.T) { - LogAndAssertJSON(t, func(log *Logger) { - log.Info("test", "test") - }, func(fields Fields) { - assert.Equal(t, fields["msg"], "testtest") - }) -} - -func TestWithFieldsShouldAllowAssignments(t *testing.T) { - var buffer bytes.Buffer - var fields Fields - - logger := New() - logger.Out = &buffer - logger.Formatter = new(JSONFormatter) - - localLog := logger.WithFields(Fields{ - "key1": "value1", - }) - - localLog.WithField("key2", "value2").Info("test") - err := json.Unmarshal(buffer.Bytes(), &fields) - assert.Nil(t, err) - - assert.Equal(t, "value2", fields["key2"]) - assert.Equal(t, "value1", fields["key1"]) - - buffer = bytes.Buffer{} - fields = Fields{} - localLog.Info("test") - err = json.Unmarshal(buffer.Bytes(), &fields) - assert.Nil(t, err) - - _, ok := fields["key2"] - assert.Equal(t, false, ok) - assert.Equal(t, "value1", fields["key1"]) -} - -func TestUserSuppliedFieldDoesNotOverwriteDefaults(t *testing.T) { - LogAndAssertJSON(t, func(log *Logger) { - log.WithField("msg", "hello").Info("test") - }, func(fields Fields) { - assert.Equal(t, fields["msg"], "test") - }) -} - -func TestUserSuppliedMsgFieldHasPrefix(t *testing.T) { - LogAndAssertJSON(t, func(log *Logger) { - log.WithField("msg", "hello").Info("test") - }, func(fields Fields) { - assert.Equal(t, fields["msg"], "test") - assert.Equal(t, fields["fields.msg"], "hello") - }) -} - -func TestUserSuppliedTimeFieldHasPrefix(t *testing.T) { - LogAndAssertJSON(t, func(log *Logger) { - log.WithField("time", "hello").Info("test") - }, func(fields Fields) { - assert.Equal(t, fields["fields.time"], "hello") - }) -} - -func TestUserSuppliedLevelFieldHasPrefix(t *testing.T) { - LogAndAssertJSON(t, func(log *Logger) { - log.WithField("level", 1).Info("test") - }, func(fields Fields) { - assert.Equal(t, fields["level"], "info") - assert.Equal(t, fields["fields.level"], 1.0) // JSON has floats only - }) -} - -func TestDefaultFieldsAreNotPrefixed(t *testing.T) { - LogAndAssertText(t, func(log *Logger) { - ll := log.WithField("herp", "derp") - ll.Info("hello") - ll.Info("bye") - }, func(fields map[string]string) { - for _, fieldName := range []string{"fields.level", "fields.time", "fields.msg"} { - if _, ok := fields[fieldName]; ok { - t.Fatalf("should not have prefixed %q: %v", fieldName, fields) - } - } - }) -} - -func TestDoubleLoggingDoesntPrefixPreviousFields(t *testing.T) { - - var buffer bytes.Buffer - var fields Fields - - logger := New() - logger.Out = &buffer - logger.Formatter = new(JSONFormatter) - - llog := logger.WithField("context", "eating raw fish") - - llog.Info("looks delicious") - - err := json.Unmarshal(buffer.Bytes(), &fields) - assert.NoError(t, err, "should have decoded first message") - assert.Equal(t, len(fields), 4, "should only have msg/time/level/context fields") - assert.Equal(t, fields["msg"], "looks delicious") - assert.Equal(t, fields["context"], "eating raw fish") - - buffer.Reset() - - llog.Warn("omg it is!") - - err = json.Unmarshal(buffer.Bytes(), &fields) - assert.NoError(t, err, "should have decoded second message") - assert.Equal(t, len(fields), 4, "should only have msg/time/level/context fields") - assert.Equal(t, fields["msg"], "omg it is!") - assert.Equal(t, fields["context"], "eating raw fish") - assert.Nil(t, fields["fields.msg"], "should not have prefixed previous `msg` entry") - -} - -func TestConvertLevelToString(t *testing.T) { - assert.Equal(t, "debug", DebugLevel.String()) - assert.Equal(t, "info", InfoLevel.String()) - assert.Equal(t, "warning", WarnLevel.String()) - assert.Equal(t, "error", ErrorLevel.String()) - assert.Equal(t, "fatal", FatalLevel.String()) - assert.Equal(t, "panic", PanicLevel.String()) -} - -func TestParseLevel(t *testing.T) { - l, err := ParseLevel("panic") - assert.Nil(t, err) - assert.Equal(t, PanicLevel, l) - - l, err = ParseLevel("fatal") - assert.Nil(t, err) - assert.Equal(t, FatalLevel, l) - - l, err = ParseLevel("error") - assert.Nil(t, err) - assert.Equal(t, ErrorLevel, l) - - l, err = ParseLevel("warn") - assert.Nil(t, err) - assert.Equal(t, WarnLevel, l) - - l, err = ParseLevel("warning") - assert.Nil(t, err) - assert.Equal(t, WarnLevel, l) - - l, err = ParseLevel("info") - assert.Nil(t, err) - assert.Equal(t, InfoLevel, l) - - l, err = ParseLevel("debug") - assert.Nil(t, err) - assert.Equal(t, DebugLevel, l) - - l, err = ParseLevel("invalid") - assert.Equal(t, "not a valid logrus Level: \"invalid\"", err.Error()) -} - -func TestGetSetLevelRace(t *testing.T) { - wg := sync.WaitGroup{} - for i := 0; i < 100; i++ { - wg.Add(1) - go func(i int) { - defer wg.Done() - if i%2 == 0 { - SetLevel(InfoLevel) - } else { - GetLevel() - } - }(i) - - } - wg.Wait() -} diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_bsd.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_bsd.go deleted file mode 100644 index 71f8d67a55..0000000000 --- a/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_bsd.go +++ /dev/null @@ -1,9 +0,0 @@ -// +build darwin freebsd openbsd netbsd dragonfly - -package logrus - -import "syscall" - -const ioctlReadTermios = syscall.TIOCGETA - -type Termios syscall.Termios diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_linux.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_linux.go deleted file mode 100644 index a2c0b40db6..0000000000 --- a/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_linux.go +++ /dev/null @@ -1,12 +0,0 @@ -// Based on ssh/terminal: -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package logrus - -import "syscall" - -const ioctlReadTermios = syscall.TCGETS - -type Termios syscall.Termios diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_notwindows.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_notwindows.go deleted file mode 100644 index 4bb5376028..0000000000 --- a/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_notwindows.go +++ /dev/null @@ -1,21 +0,0 @@ -// Based on ssh/terminal: -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build linux darwin freebsd openbsd netbsd dragonfly - -package logrus - -import ( - "syscall" - "unsafe" -) - -// IsTerminal returns true if the given file descriptor is a terminal. -func IsTerminal() bool { - fd := syscall.Stdout - var termios Termios - _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0) - return err == 0 -} diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_windows.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_windows.go deleted file mode 100644 index 2e09f6f7e3..0000000000 --- a/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_windows.go +++ /dev/null @@ -1,27 +0,0 @@ -// Based on ssh/terminal: -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build windows - -package logrus - -import ( - "syscall" - "unsafe" -) - -var kernel32 = syscall.NewLazyDLL("kernel32.dll") - -var ( - procGetConsoleMode = kernel32.NewProc("GetConsoleMode") -) - -// IsTerminal returns true if the given file descriptor is a terminal. -func IsTerminal() bool { - fd := syscall.Stdout - var st uint32 - r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0) - return r != 0 && e == 0 -} diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/text_formatter.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/text_formatter.go deleted file mode 100644 index 17cc298484..0000000000 --- a/Godeps/_workspace/src/github.com/Sirupsen/logrus/text_formatter.go +++ /dev/null @@ -1,159 +0,0 @@ -package logrus - -import ( - "bytes" - "fmt" - "runtime" - "sort" - "strings" - "time" -) - -const ( - nocolor = 0 - red = 31 - green = 32 - yellow = 33 - blue = 34 - gray = 37 -) - -var ( - baseTimestamp time.Time - isTerminal bool -) - -func init() { - baseTimestamp = time.Now() - isTerminal = IsTerminal() -} - -func miniTS() int { - return int(time.Since(baseTimestamp) / time.Second) -} - -type TextFormatter struct { - // Set to true to bypass checking for a TTY before outputting colors. - ForceColors bool - - // Force disabling colors. - DisableColors bool - - // Disable timestamp logging. useful when output is redirected to logging - // system that already adds timestamps. - DisableTimestamp bool - - // Enable logging the full timestamp when a TTY is attached instead of just - // the time passed since beginning of execution. - FullTimestamp bool - - // TimestampFormat to use for display when a full timestamp is printed - TimestampFormat string - - // The fields are sorted by default for a consistent output. For applications - // that log extremely frequently and don't use the JSON formatter this may not - // be desired. - DisableSorting bool -} - -func (f *TextFormatter) Format(entry *Entry) ([]byte, error) { - var keys []string = make([]string, 0, len(entry.Data)) - for k := range entry.Data { - keys = append(keys, k) - } - - if !f.DisableSorting { - sort.Strings(keys) - } - - b := &bytes.Buffer{} - - prefixFieldClashes(entry.Data) - - isColorTerminal := isTerminal && (runtime.GOOS != "windows") - isColored := (f.ForceColors || isColorTerminal) && !f.DisableColors - - timestampFormat := f.TimestampFormat - if timestampFormat == "" { - timestampFormat = DefaultTimestampFormat - } - if isColored { - f.printColored(b, entry, keys, timestampFormat) - } else { - if !f.DisableTimestamp { - f.appendKeyValue(b, "time", entry.Time.Format(timestampFormat)) - } - f.appendKeyValue(b, "level", entry.Level.String()) - f.appendKeyValue(b, "msg", entry.Message) - for _, key := range keys { - f.appendKeyValue(b, key, entry.Data[key]) - } - } - - b.WriteByte('\n') - return b.Bytes(), nil -} - -func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []string, timestampFormat string) { - var levelColor int - switch entry.Level { - case DebugLevel: - levelColor = gray - case WarnLevel: - levelColor = yellow - case ErrorLevel, FatalLevel, PanicLevel: - levelColor = red - default: - levelColor = blue - } - - levelText := strings.ToUpper(entry.Level.String())[0:4] - - if !f.FullTimestamp { - fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d] %-44s ", levelColor, levelText, miniTS(), entry.Message) - } else { - fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%s] %-44s ", levelColor, levelText, entry.Time.Format(timestampFormat), entry.Message) - } - for _, k := range keys { - v := entry.Data[k] - fmt.Fprintf(b, " \x1b[%dm%s\x1b[0m=%+v", levelColor, k, v) - } -} - -func needsQuoting(text string) bool { - for _, ch := range text { - if !((ch >= 'a' && ch <= 'z') || - (ch >= 'A' && ch <= 'Z') || - (ch >= '0' && ch <= '9') || - ch == '-' || ch == '.') { - return false - } - } - return true -} - -func (f *TextFormatter) appendKeyValue(b *bytes.Buffer, key string, value interface{}) { - - b.WriteString(key) - b.WriteByte('=') - - switch value := value.(type) { - case string: - if needsQuoting(value) { - b.WriteString(value) - } else { - fmt.Fprintf(b, "%q", value) - } - case error: - errmsg := value.Error() - if needsQuoting(errmsg) { - b.WriteString(errmsg) - } else { - fmt.Fprintf(b, "%q", value) - } - default: - fmt.Fprint(b, value) - } - - b.WriteByte(' ') -} diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/text_formatter_test.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/text_formatter_test.go deleted file mode 100644 index e25a44f67b..0000000000 --- a/Godeps/_workspace/src/github.com/Sirupsen/logrus/text_formatter_test.go +++ /dev/null @@ -1,61 +0,0 @@ -package logrus - -import ( - "bytes" - "errors" - "testing" - "time" -) - -func TestQuoting(t *testing.T) { - tf := &TextFormatter{DisableColors: true} - - checkQuoting := func(q bool, value interface{}) { - b, _ := tf.Format(WithField("test", value)) - idx := bytes.Index(b, ([]byte)("test=")) - cont := bytes.Contains(b[idx+5:], []byte{'"'}) - if cont != q { - if q { - t.Errorf("quoting expected for: %#v", value) - } else { - t.Errorf("quoting not expected for: %#v", value) - } - } - } - - checkQuoting(false, "abcd") - checkQuoting(false, "v1.0") - checkQuoting(false, "1234567890") - checkQuoting(true, "/foobar") - checkQuoting(true, "x y") - checkQuoting(true, "x,y") - checkQuoting(false, errors.New("invalid")) - checkQuoting(true, errors.New("invalid argument")) -} - -func TestTimestampFormat(t *testing.T) { - checkTimeStr := func(format string) { - customFormatter := &TextFormatter{DisableColors: true, TimestampFormat: format} - customStr, _ := customFormatter.Format(WithField("test", "test")) - timeStart := bytes.Index(customStr, ([]byte)("time=")) - timeEnd := bytes.Index(customStr, ([]byte)("level=")) - timeStr := customStr[timeStart+5 : timeEnd-1] - if timeStr[0] == '"' && timeStr[len(timeStr)-1] == '"' { - timeStr = timeStr[1 : len(timeStr)-1] - } - if format == "" { - format = time.RFC3339 - } - _, e := time.Parse(format, (string)(timeStr)) - if e != nil { - t.Errorf("time string \"%s\" did not match provided time format \"%s\": %s", timeStr, format, e) - } - } - - checkTimeStr("2006-01-02T15:04:05.000000000Z07:00") - checkTimeStr("Mon Jan _2 15:04:05 2006") - checkTimeStr("") -} - -// TODO add tests for sorting etc., this requires a parser for the text -// formatter output. diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/writer.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/writer.go deleted file mode 100644 index 1e30b1c753..0000000000 --- a/Godeps/_workspace/src/github.com/Sirupsen/logrus/writer.go +++ /dev/null @@ -1,31 +0,0 @@ -package logrus - -import ( - "bufio" - "io" - "runtime" -) - -func (logger *Logger) Writer() *io.PipeWriter { - reader, writer := io.Pipe() - - go logger.writerScanner(reader) - runtime.SetFinalizer(writer, writerFinalizer) - - return writer -} - -func (logger *Logger) writerScanner(reader *io.PipeReader) { - scanner := bufio.NewScanner(reader) - for scanner.Scan() { - logger.Print(scanner.Text()) - } - if err := scanner.Err(); err != nil { - logger.Errorf("Error while reading from Writer: %s", err) - } - reader.Close() -} - -func writerFinalizer(writer *io.PipeWriter) { - writer.Close() -} diff --git a/Godeps/_workspace/src/github.com/beorn7/perks/README.md b/Godeps/_workspace/src/github.com/beorn7/perks/README.md deleted file mode 100644 index fc05777701..0000000000 --- a/Godeps/_workspace/src/github.com/beorn7/perks/README.md +++ /dev/null @@ -1,31 +0,0 @@ -# Perks for Go (golang.org) - -Perks contains the Go package quantile that computes approximate quantiles over -an unbounded data stream within low memory and CPU bounds. - -For more information and examples, see: -http://godoc.org/github.com/bmizerany/perks - -A very special thank you and shout out to Graham Cormode (Rutgers University), -Flip Korn (AT&T Labs–Research), S. Muthukrishnan (Rutgers University), and -Divesh Srivastava (AT&T Labs–Research) for their research and publication of -[Effective Computation of Biased Quantiles over Data Streams](http://www.cs.rutgers.edu/~muthu/bquant.pdf) - -Thank you, also: -* Armon Dadgar (@armon) -* Andrew Gerrand (@nf) -* Brad Fitzpatrick (@bradfitz) -* Keith Rarick (@kr) - -FAQ: - -Q: Why not move the quantile package into the project root? -A: I want to add more packages to perks later. - -Copyright (C) 2013 Blake Mizerany - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/Godeps/_workspace/src/github.com/beorn7/perks/quantile/bench_test.go b/Godeps/_workspace/src/github.com/beorn7/perks/quantile/bench_test.go deleted file mode 100644 index 0bd0e4e775..0000000000 --- a/Godeps/_workspace/src/github.com/beorn7/perks/quantile/bench_test.go +++ /dev/null @@ -1,63 +0,0 @@ -package quantile - -import ( - "testing" -) - -func BenchmarkInsertTargeted(b *testing.B) { - b.ReportAllocs() - - s := NewTargeted(Targets) - b.ResetTimer() - for i := float64(0); i < float64(b.N); i++ { - s.Insert(i) - } -} - -func BenchmarkInsertTargetedSmallEpsilon(b *testing.B) { - s := NewTargeted(TargetsSmallEpsilon) - b.ResetTimer() - for i := float64(0); i < float64(b.N); i++ { - s.Insert(i) - } -} - -func BenchmarkInsertBiased(b *testing.B) { - s := NewLowBiased(0.01) - b.ResetTimer() - for i := float64(0); i < float64(b.N); i++ { - s.Insert(i) - } -} - -func BenchmarkInsertBiasedSmallEpsilon(b *testing.B) { - s := NewLowBiased(0.0001) - b.ResetTimer() - for i := float64(0); i < float64(b.N); i++ { - s.Insert(i) - } -} - -func BenchmarkQuery(b *testing.B) { - s := NewTargeted(Targets) - for i := float64(0); i < 1e6; i++ { - s.Insert(i) - } - b.ResetTimer() - n := float64(b.N) - for i := float64(0); i < n; i++ { - s.Query(i / n) - } -} - -func BenchmarkQuerySmallEpsilon(b *testing.B) { - s := NewTargeted(TargetsSmallEpsilon) - for i := float64(0); i < 1e6; i++ { - s.Insert(i) - } - b.ResetTimer() - n := float64(b.N) - for i := float64(0); i < n; i++ { - s.Query(i / n) - } -} diff --git a/Godeps/_workspace/src/github.com/beorn7/perks/quantile/example_test.go b/Godeps/_workspace/src/github.com/beorn7/perks/quantile/example_test.go deleted file mode 100644 index ab3293aaf2..0000000000 --- a/Godeps/_workspace/src/github.com/beorn7/perks/quantile/example_test.go +++ /dev/null @@ -1,121 +0,0 @@ -// +build go1.1 - -package quantile_test - -import ( - "bufio" - "fmt" - "log" - "os" - "strconv" - "time" - - "github.com/beorn7/perks/quantile" -) - -func Example_simple() { - ch := make(chan float64) - go sendFloats(ch) - - // Compute the 50th, 90th, and 99th percentile. - q := quantile.NewTargeted(map[float64]float64{ - 0.50: 0.005, - 0.90: 0.001, - 0.99: 0.0001, - }) - for v := range ch { - q.Insert(v) - } - - fmt.Println("perc50:", q.Query(0.50)) - fmt.Println("perc90:", q.Query(0.90)) - fmt.Println("perc99:", q.Query(0.99)) - fmt.Println("count:", q.Count()) - // Output: - // perc50: 5 - // perc90: 16 - // perc99: 223 - // count: 2388 -} - -func Example_mergeMultipleStreams() { - // Scenario: - // We have multiple database shards. On each shard, there is a process - // collecting query response times from the database logs and inserting - // them into a Stream (created via NewTargeted(0.90)), much like the - // Simple example. These processes expose a network interface for us to - // ask them to serialize and send us the results of their - // Stream.Samples so we may Merge and Query them. - // - // NOTES: - // * These sample sets are small, allowing us to get them - // across the network much faster than sending the entire list of data - // points. - // - // * For this to work correctly, we must supply the same quantiles - // a priori the process collecting the samples supplied to NewTargeted, - // even if we do not plan to query them all here. - ch := make(chan quantile.Samples) - getDBQuerySamples(ch) - q := quantile.NewTargeted(map[float64]float64{0.90: 0.001}) - for samples := range ch { - q.Merge(samples) - } - fmt.Println("perc90:", q.Query(0.90)) -} - -func Example_window() { - // Scenario: We want the 90th, 95th, and 99th percentiles for each - // minute. - - ch := make(chan float64) - go sendStreamValues(ch) - - tick := time.NewTicker(1 * time.Minute) - q := quantile.NewTargeted(map[float64]float64{ - 0.90: 0.001, - 0.95: 0.0005, - 0.99: 0.0001, - }) - for { - select { - case t := <-tick.C: - flushToDB(t, q.Samples()) - q.Reset() - case v := <-ch: - q.Insert(v) - } - } -} - -func sendStreamValues(ch chan float64) { - // Use your imagination -} - -func flushToDB(t time.Time, samples quantile.Samples) { - // Use your imagination -} - -// This is a stub for the above example. In reality this would hit the remote -// servers via http or something like it. -func getDBQuerySamples(ch chan quantile.Samples) {} - -func sendFloats(ch chan<- float64) { - f, err := os.Open("exampledata.txt") - if err != nil { - log.Fatal(err) - } - sc := bufio.NewScanner(f) - for sc.Scan() { - b := sc.Bytes() - v, err := strconv.ParseFloat(string(b), 64) - if err != nil { - log.Fatal(err) - } - ch <- v - } - if sc.Err() != nil { - log.Fatal(sc.Err()) - } - close(ch) -} diff --git a/Godeps/_workspace/src/github.com/beorn7/perks/quantile/exampledata.txt b/Godeps/_workspace/src/github.com/beorn7/perks/quantile/exampledata.txt deleted file mode 100644 index 1602287d7c..0000000000 --- a/Godeps/_workspace/src/github.com/beorn7/perks/quantile/exampledata.txt +++ /dev/null @@ -1,2388 +0,0 @@ -8 -5 -26 -12 -5 -235 -13 -6 -28 -30 -3 -3 -3 -3 -5 -2 -33 -7 -2 -4 -7 -12 -14 -5 -8 -3 -10 -4 -5 -3 -6 -6 -209 -20 -3 -10 -14 -3 -4 -6 -8 -5 -11 -7 -3 -2 -3 -3 -212 -5 -222 -4 -10 -10 -5 -6 -3 -8 -3 -10 -254 -220 -2 -3 -5 -24 -5 -4 -222 -7 -3 -3 -223 -8 -15 -12 -14 -14 -3 -2 -2 -3 -13 -3 -11 -4 -4 -6 -5 -7 -13 -5 -3 -5 -2 -5 -3 -5 -2 -7 -15 -17 -14 -3 -6 -6 -3 -17 -5 -4 -7 -6 -4 -4 -8 -6 -8 -3 -9 -3 -6 -3 -4 -5 -3 -3 -660 -4 -6 -10 -3 -6 -3 -2 -5 -13 -2 -4 -4 -10 -4 -8 -4 -3 -7 -9 -9 -3 -10 -37 -3 -13 -4 -12 -3 -6 -10 -8 -5 -21 -2 -3 -8 -3 -2 -3 -3 -4 -12 -2 -4 -8 -8 -4 -3 -2 -20 -1 -6 -32 -2 -11 -6 -18 -3 -8 -11 -3 -212 -3 -4 -2 -6 -7 -12 -11 -3 -2 -16 -10 -6 -4 -6 -3 -2 -7 -3 -2 -2 -2 -2 -5 -6 -4 -3 -10 -3 -4 -6 -5 -3 -4 -4 -5 -6 -4 -3 -4 -4 -5 -7 -5 -5 -3 -2 -7 -2 -4 -12 -4 -5 -6 -2 -4 -4 -8 -4 -15 -13 -7 -16 -5 -3 -23 -5 -5 -7 -3 -2 -9 -8 -7 -5 -8 -11 -4 -10 -76 -4 -47 -4 -3 -2 -7 -4 -2 -3 -37 -10 -4 -2 -20 -5 -4 -4 -10 -10 -4 -3 -7 -23 -240 -7 -13 -5 -5 -3 -3 -2 -5 -4 -2 -8 -7 -19 -2 -23 -8 -7 -2 -5 -3 -8 -3 -8 -13 -5 -5 -5 -2 -3 -23 -4 -9 -8 -4 -3 -3 -5 -220 -2 -3 -4 -6 -14 -3 -53 -6 -2 -5 -18 -6 -3 -219 -6 -5 -2 -5 -3 -6 -5 -15 -4 -3 -17 -3 -2 -4 -7 -2 -3 -3 -4 -4 -3 -2 -664 -6 -3 -23 -5 -5 -16 -5 -8 -2 -4 -2 -24 -12 -3 -2 -3 -5 -8 -3 -5 -4 -3 -14 -3 -5 -8 -2 -3 -7 -9 -4 -2 -3 -6 -8 -4 -3 -4 -6 -5 -3 -3 -6 -3 -19 -4 -4 -6 -3 -6 -3 -5 -22 -5 -4 -4 -3 -8 -11 -4 -9 -7 -6 -13 -4 -4 -4 -6 -17 -9 -3 -3 -3 -4 -3 -221 -5 -11 -3 -4 -2 -12 -6 -3 -5 -7 -5 -7 -4 -9 -7 -14 -37 -19 -217 -16 -3 -5 -2 -2 -7 -19 -7 -6 -7 -4 -24 -5 -11 -4 -7 -7 -9 -13 -3 -4 -3 -6 -28 -4 -4 -5 -5 -2 -5 -6 -4 -4 -6 -10 -5 -4 -3 -2 -3 -3 -6 -5 -5 -4 -3 -2 -3 -7 -4 -6 -18 -16 -8 -16 -4 -5 -8 -6 -9 -13 -1545 -6 -215 -6 -5 -6 -3 -45 -31 -5 -2 -2 -4 -3 -3 -2 -5 -4 -3 -5 -7 -7 -4 -5 -8 -5 -4 -749 -2 -31 -9 -11 -2 -11 -5 -4 -4 -7 -9 -11 -4 -5 -4 -7 -3 -4 -6 -2 -15 -3 -4 -3 -4 -3 -5 -2 -13 -5 -5 -3 -3 -23 -4 -4 -5 -7 -4 -13 -2 -4 -3 -4 -2 -6 -2 -7 -3 -5 -5 -3 -29 -5 -4 -4 -3 -10 -2 -3 -79 -16 -6 -6 -7 -7 -3 -5 -5 -7 -4 -3 -7 -9 -5 -6 -5 -9 -6 -3 -6 -4 -17 -2 -10 -9 -3 -6 -2 -3 -21 -22 -5 -11 -4 -2 -17 -2 -224 -2 -14 -3 -4 -4 -2 -4 -4 -4 -4 -5 -3 -4 -4 -10 -2 -6 -3 -3 -5 -7 -2 -7 -5 -6 -3 -218 -2 -2 -5 -2 -6 -3 -5 -222 -14 -6 -33 -3 -2 -5 -3 -3 -3 -9 -5 -3 -3 -2 -7 -4 -3 -4 -3 -5 -6 -5 -26 -4 -13 -9 -7 -3 -221 -3 -3 -4 -4 -4 -4 -2 -18 -5 -3 -7 -9 -6 -8 -3 -10 -3 -11 -9 -5 -4 -17 -5 -5 -6 -6 -3 -2 -4 -12 -17 -6 -7 -218 -4 -2 -4 -10 -3 -5 -15 -3 -9 -4 -3 -3 -6 -29 -3 -3 -4 -5 -5 -3 -8 -5 -6 -6 -7 -5 -3 -5 -3 -29 -2 -31 -5 -15 -24 -16 -5 -207 -4 -3 -3 -2 -15 -4 -4 -13 -5 -5 -4 -6 -10 -2 -7 -8 -4 -6 -20 -5 -3 -4 -3 -12 -12 -5 -17 -7 -3 -3 -3 -6 -10 -3 -5 -25 -80 -4 -9 -3 -2 -11 -3 -3 -2 -3 -8 -7 -5 -5 -19 -5 -3 -3 -12 -11 -2 -6 -5 -5 -5 -3 -3 -3 -4 -209 -14 -3 -2 -5 -19 -4 -4 -3 -4 -14 -5 -6 -4 -13 -9 -7 -4 -7 -10 -2 -9 -5 -7 -2 -8 -4 -6 -5 -5 -222 -8 -7 -12 -5 -216 -3 -4 -4 -6 -3 -14 -8 -7 -13 -4 -3 -3 -3 -3 -17 -5 -4 -3 -33 -6 -6 -33 -7 -5 -3 -8 -7 -5 -2 -9 -4 -2 -233 -24 -7 -4 -8 -10 -3 -4 -15 -2 -16 -3 -3 -13 -12 -7 -5 -4 -207 -4 -2 -4 -27 -15 -2 -5 -2 -25 -6 -5 -5 -6 -13 -6 -18 -6 -4 -12 -225 -10 -7 -5 -2 -2 -11 -4 -14 -21 -8 -10 -3 -5 -4 -232 -2 -5 -5 -3 -7 -17 -11 -6 -6 -23 -4 -6 -3 -5 -4 -2 -17 -3 -6 -5 -8 -3 -2 -2 -14 -9 -4 -4 -2 -5 -5 -3 -7 -6 -12 -6 -10 -3 -6 -2 -2 -19 -5 -4 -4 -9 -2 -4 -13 -3 -5 -6 -3 -6 -5 -4 -9 -6 -3 -5 -7 -3 -6 -6 -4 -3 -10 -6 -3 -221 -3 -5 -3 -6 -4 -8 -5 -3 -6 -4 -4 -2 -54 -5 -6 -11 -3 -3 -4 -4 -4 -3 -7 -3 -11 -11 -7 -10 -6 -13 -223 -213 -15 -231 -7 -3 -7 -228 -2 -3 -4 -4 -5 -6 -7 -4 -13 -3 -4 -5 -3 -6 -4 -6 -7 -2 -4 -3 -4 -3 -3 -6 -3 -7 -3 -5 -18 -5 -6 -8 -10 -3 -3 -3 -2 -4 -2 -4 -4 -5 -6 -6 -4 -10 -13 -3 -12 -5 -12 -16 -8 -4 -19 -11 -2 -4 -5 -6 -8 -5 -6 -4 -18 -10 -4 -2 -216 -6 -6 -6 -2 -4 -12 -8 -3 -11 -5 -6 -14 -5 -3 -13 -4 -5 -4 -5 -3 -28 -6 -3 -7 -219 -3 -9 -7 -3 -10 -6 -3 -4 -19 -5 -7 -11 -6 -15 -19 -4 -13 -11 -3 -7 -5 -10 -2 -8 -11 -2 -6 -4 -6 -24 -6 -3 -3 -3 -3 -6 -18 -4 -11 -4 -2 -5 -10 -8 -3 -9 -5 -3 -4 -5 -6 -2 -5 -7 -4 -4 -14 -6 -4 -4 -5 -5 -7 -2 -4 -3 -7 -3 -3 -6 -4 -5 -4 -4 -4 -3 -3 -3 -3 -8 -14 -2 -3 -5 -3 -2 -4 -5 -3 -7 -3 -3 -18 -3 -4 -4 -5 -7 -3 -3 -3 -13 -5 -4 -8 -211 -5 -5 -3 -5 -2 -5 -4 -2 -655 -6 -3 -5 -11 -2 -5 -3 -12 -9 -15 -11 -5 -12 -217 -2 -6 -17 -3 -3 -207 -5 -5 -4 -5 -9 -3 -2 -8 -5 -4 -3 -2 -5 -12 -4 -14 -5 -4 -2 -13 -5 -8 -4 -225 -4 -3 -4 -5 -4 -3 -3 -6 -23 -9 -2 -6 -7 -233 -4 -4 -6 -18 -3 -4 -6 -3 -4 -4 -2 -3 -7 -4 -13 -227 -4 -3 -5 -4 -2 -12 -9 -17 -3 -7 -14 -6 -4 -5 -21 -4 -8 -9 -2 -9 -25 -16 -3 -6 -4 -7 -8 -5 -2 -3 -5 -4 -3 -3 -5 -3 -3 -3 -2 -3 -19 -2 -4 -3 -4 -2 -3 -4 -4 -2 -4 -3 -3 -3 -2 -6 -3 -17 -5 -6 -4 -3 -13 -5 -3 -3 -3 -4 -9 -4 -2 -14 -12 -4 -5 -24 -4 -3 -37 -12 -11 -21 -3 -4 -3 -13 -4 -2 -3 -15 -4 -11 -4 -4 -3 -8 -3 -4 -4 -12 -8 -5 -3 -3 -4 -2 -220 -3 -5 -223 -3 -3 -3 -10 -3 -15 -4 -241 -9 -7 -3 -6 -6 -23 -4 -13 -7 -3 -4 -7 -4 -9 -3 -3 -4 -10 -5 -5 -1 -5 -24 -2 -4 -5 -5 -6 -14 -3 -8 -2 -3 -5 -13 -13 -3 -5 -2 -3 -15 -3 -4 -2 -10 -4 -4 -4 -5 -5 -3 -5 -3 -4 -7 -4 -27 -3 -6 -4 -15 -3 -5 -6 -6 -5 -4 -8 -3 -9 -2 -6 -3 -4 -3 -7 -4 -18 -3 -11 -3 -3 -8 -9 -7 -24 -3 -219 -7 -10 -4 -5 -9 -12 -2 -5 -4 -4 -4 -3 -3 -19 -5 -8 -16 -8 -6 -22 -3 -23 -3 -242 -9 -4 -3 -3 -5 -7 -3 -3 -5 -8 -3 -7 -5 -14 -8 -10 -3 -4 -3 -7 -4 -6 -7 -4 -10 -4 -3 -11 -3 -7 -10 -3 -13 -6 -8 -12 -10 -5 -7 -9 -3 -4 -7 -7 -10 -8 -30 -9 -19 -4 -3 -19 -15 -4 -13 -3 -215 -223 -4 -7 -4 -8 -17 -16 -3 -7 -6 -5 -5 -4 -12 -3 -7 -4 -4 -13 -4 -5 -2 -5 -6 -5 -6 -6 -7 -10 -18 -23 -9 -3 -3 -6 -5 -2 -4 -2 -7 -3 -3 -2 -5 -5 -14 -10 -224 -6 -3 -4 -3 -7 -5 -9 -3 -6 -4 -2 -5 -11 -4 -3 -3 -2 -8 -4 -7 -4 -10 -7 -3 -3 -18 -18 -17 -3 -3 -3 -4 -5 -3 -3 -4 -12 -7 -3 -11 -13 -5 -4 -7 -13 -5 -4 -11 -3 -12 -3 -6 -4 -4 -21 -4 -6 -9 -5 -3 -10 -8 -4 -6 -4 -4 -6 -5 -4 -8 -6 -4 -6 -4 -4 -5 -9 -6 -3 -4 -2 -9 -3 -18 -2 -4 -3 -13 -3 -6 -6 -8 -7 -9 -3 -2 -16 -3 -4 -6 -3 -2 -33 -22 -14 -4 -9 -12 -4 -5 -6 -3 -23 -9 -4 -3 -5 -5 -3 -4 -5 -3 -5 -3 -10 -4 -5 -5 -8 -4 -4 -6 -8 -5 -4 -3 -4 -6 -3 -3 -3 -5 -9 -12 -6 -5 -9 -3 -5 -3 -2 -2 -2 -18 -3 -2 -21 -2 -5 -4 -6 -4 -5 -10 -3 -9 -3 -2 -10 -7 -3 -6 -6 -4 -4 -8 -12 -7 -3 -7 -3 -3 -9 -3 -4 -5 -4 -4 -5 -5 -10 -15 -4 -4 -14 -6 -227 -3 -14 -5 -216 -22 -5 -4 -2 -2 -6 -3 -4 -2 -9 -9 -4 -3 -28 -13 -11 -4 -5 -3 -3 -2 -3 -3 -5 -3 -4 -3 -5 -23 -26 -3 -4 -5 -6 -4 -6 -3 -5 -5 -3 -4 -3 -2 -2 -2 -7 -14 -3 -6 -7 -17 -2 -2 -15 -14 -16 -4 -6 -7 -13 -6 -4 -5 -6 -16 -3 -3 -28 -3 -6 -15 -3 -9 -2 -4 -6 -3 -3 -22 -4 -12 -6 -7 -2 -5 -4 -10 -3 -16 -6 -9 -2 -5 -12 -7 -5 -5 -5 -5 -2 -11 -9 -17 -4 -3 -11 -7 -3 -5 -15 -4 -3 -4 -211 -8 -7 -5 -4 -7 -6 -7 -6 -3 -6 -5 -6 -5 -3 -4 -4 -26 -4 -6 -10 -4 -4 -3 -2 -3 -3 -4 -5 -9 -3 -9 -4 -4 -5 -5 -8 -2 -4 -2 -3 -8 -4 -11 -19 -5 -8 -6 -3 -5 -6 -12 -3 -2 -4 -16 -12 -3 -4 -4 -8 -6 -5 -6 -6 -219 -8 -222 -6 -16 -3 -13 -19 -5 -4 -3 -11 -6 -10 -4 -7 -7 -12 -5 -3 -3 -5 -6 -10 -3 -8 -2 -5 -4 -7 -2 -4 -4 -2 -12 -9 -6 -4 -2 -40 -2 -4 -10 -4 -223 -4 -2 -20 -6 -7 -24 -5 -4 -5 -2 -20 -16 -6 -5 -13 -2 -3 -3 -19 -3 -2 -4 -5 -6 -7 -11 -12 -5 -6 -7 -7 -3 -5 -3 -5 -3 -14 -3 -4 -4 -2 -11 -1 -7 -3 -9 -6 -11 -12 -5 -8 -6 -221 -4 -2 -12 -4 -3 -15 -4 -5 -226 -7 -218 -7 -5 -4 -5 -18 -4 -5 -9 -4 -4 -2 -9 -18 -18 -9 -5 -6 -6 -3 -3 -7 -3 -5 -4 -4 -4 -12 -3 -6 -31 -5 -4 -7 -3 -6 -5 -6 -5 -11 -2 -2 -11 -11 -6 -7 -5 -8 -7 -10 -5 -23 -7 -4 -3 -5 -34 -2 -5 -23 -7 -3 -6 -8 -4 -4 -4 -2 -5 -3 -8 -5 -4 -8 -25 -2 -3 -17 -8 -3 -4 -8 -7 -3 -15 -6 -5 -7 -21 -9 -5 -6 -6 -5 -3 -2 -3 -10 -3 -6 -3 -14 -7 -4 -4 -8 -7 -8 -2 -6 -12 -4 -213 -6 -5 -21 -8 -2 -5 -23 -3 -11 -2 -3 -6 -25 -2 -3 -6 -7 -6 -6 -4 -4 -6 -3 -17 -9 -7 -6 -4 -3 -10 -7 -2 -3 -3 -3 -11 -8 -3 -7 -6 -4 -14 -36 -3 -4 -3 -3 -22 -13 -21 -4 -2 -7 -4 -4 -17 -15 -3 -7 -11 -2 -4 -7 -6 -209 -6 -3 -2 -2 -24 -4 -9 -4 -3 -3 -3 -29 -2 -2 -4 -3 -3 -5 -4 -6 -3 -3 -2 -4 diff --git a/Godeps/_workspace/src/github.com/beorn7/perks/quantile/stream.go b/Godeps/_workspace/src/github.com/beorn7/perks/quantile/stream.go deleted file mode 100644 index 587b1fc5ba..0000000000 --- a/Godeps/_workspace/src/github.com/beorn7/perks/quantile/stream.go +++ /dev/null @@ -1,292 +0,0 @@ -// Package quantile computes approximate quantiles over an unbounded data -// stream within low memory and CPU bounds. -// -// A small amount of accuracy is traded to achieve the above properties. -// -// Multiple streams can be merged before calling Query to generate a single set -// of results. This is meaningful when the streams represent the same type of -// data. See Merge and Samples. -// -// For more detailed information about the algorithm used, see: -// -// Effective Computation of Biased Quantiles over Data Streams -// -// http://www.cs.rutgers.edu/~muthu/bquant.pdf -package quantile - -import ( - "math" - "sort" -) - -// Sample holds an observed value and meta information for compression. JSON -// tags have been added for convenience. -type Sample struct { - Value float64 `json:",string"` - Width float64 `json:",string"` - Delta float64 `json:",string"` -} - -// Samples represents a slice of samples. It implements sort.Interface. -type Samples []Sample - -func (a Samples) Len() int { return len(a) } -func (a Samples) Less(i, j int) bool { return a[i].Value < a[j].Value } -func (a Samples) Swap(i, j int) { a[i], a[j] = a[j], a[i] } - -type invariant func(s *stream, r float64) float64 - -// NewLowBiased returns an initialized Stream for low-biased quantiles -// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but -// error guarantees can still be given even for the lower ranks of the data -// distribution. -// -// The provided epsilon is a relative error, i.e. the true quantile of a value -// returned by a query is guaranteed to be within (1±Epsilon)*Quantile. -// -// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error -// properties. -func NewLowBiased(epsilon float64) *Stream { - ƒ := func(s *stream, r float64) float64 { - return 2 * epsilon * r - } - return newStream(ƒ) -} - -// NewHighBiased returns an initialized Stream for high-biased quantiles -// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but -// error guarantees can still be given even for the higher ranks of the data -// distribution. -// -// The provided epsilon is a relative error, i.e. the true quantile of a value -// returned by a query is guaranteed to be within 1-(1±Epsilon)*(1-Quantile). -// -// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error -// properties. -func NewHighBiased(epsilon float64) *Stream { - ƒ := func(s *stream, r float64) float64 { - return 2 * epsilon * (s.n - r) - } - return newStream(ƒ) -} - -// NewTargeted returns an initialized Stream concerned with a particular set of -// quantile values that are supplied a priori. Knowing these a priori reduces -// space and computation time. The targets map maps the desired quantiles to -// their absolute errors, i.e. the true quantile of a value returned by a query -// is guaranteed to be within (Quantile±Epsilon). -// -// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error properties. -func NewTargeted(targets map[float64]float64) *Stream { - ƒ := func(s *stream, r float64) float64 { - var m = math.MaxFloat64 - var f float64 - for quantile, epsilon := range targets { - if quantile*s.n <= r { - f = (2 * epsilon * r) / quantile - } else { - f = (2 * epsilon * (s.n - r)) / (1 - quantile) - } - if f < m { - m = f - } - } - return m - } - return newStream(ƒ) -} - -// Stream computes quantiles for a stream of float64s. It is not thread-safe by -// design. Take care when using across multiple goroutines. -type Stream struct { - *stream - b Samples - sorted bool -} - -func newStream(ƒ invariant) *Stream { - x := &stream{ƒ: ƒ} - return &Stream{x, make(Samples, 0, 500), true} -} - -// Insert inserts v into the stream. -func (s *Stream) Insert(v float64) { - s.insert(Sample{Value: v, Width: 1}) -} - -func (s *Stream) insert(sample Sample) { - s.b = append(s.b, sample) - s.sorted = false - if len(s.b) == cap(s.b) { - s.flush() - } -} - -// Query returns the computed qth percentiles value. If s was created with -// NewTargeted, and q is not in the set of quantiles provided a priori, Query -// will return an unspecified result. -func (s *Stream) Query(q float64) float64 { - if !s.flushed() { - // Fast path when there hasn't been enough data for a flush; - // this also yields better accuracy for small sets of data. - l := len(s.b) - if l == 0 { - return 0 - } - i := int(float64(l) * q) - if i > 0 { - i -= 1 - } - s.maybeSort() - return s.b[i].Value - } - s.flush() - return s.stream.query(q) -} - -// Merge merges samples into the underlying streams samples. This is handy when -// merging multiple streams from separate threads, database shards, etc. -// -// ATTENTION: This method is broken and does not yield correct results. The -// underlying algorithm is not capable of merging streams correctly. -func (s *Stream) Merge(samples Samples) { - sort.Sort(samples) - s.stream.merge(samples) -} - -// Reset reinitializes and clears the list reusing the samples buffer memory. -func (s *Stream) Reset() { - s.stream.reset() - s.b = s.b[:0] -} - -// Samples returns stream samples held by s. -func (s *Stream) Samples() Samples { - if !s.flushed() { - return s.b - } - s.flush() - return s.stream.samples() -} - -// Count returns the total number of samples observed in the stream -// since initialization. -func (s *Stream) Count() int { - return len(s.b) + s.stream.count() -} - -func (s *Stream) flush() { - s.maybeSort() - s.stream.merge(s.b) - s.b = s.b[:0] -} - -func (s *Stream) maybeSort() { - if !s.sorted { - s.sorted = true - sort.Sort(s.b) - } -} - -func (s *Stream) flushed() bool { - return len(s.stream.l) > 0 -} - -type stream struct { - n float64 - l []Sample - ƒ invariant -} - -func (s *stream) reset() { - s.l = s.l[:0] - s.n = 0 -} - -func (s *stream) insert(v float64) { - s.merge(Samples{{v, 1, 0}}) -} - -func (s *stream) merge(samples Samples) { - // TODO(beorn7): This tries to merge not only individual samples, but - // whole summaries. The paper doesn't mention merging summaries at - // all. Unittests show that the merging is inaccurate. Find out how to - // do merges properly. - var r float64 - i := 0 - for _, sample := range samples { - for ; i < len(s.l); i++ { - c := s.l[i] - if c.Value > sample.Value { - // Insert at position i. - s.l = append(s.l, Sample{}) - copy(s.l[i+1:], s.l[i:]) - s.l[i] = Sample{ - sample.Value, - sample.Width, - math.Max(sample.Delta, math.Floor(s.ƒ(s, r))-1), - // TODO(beorn7): How to calculate delta correctly? - } - i++ - goto inserted - } - r += c.Width - } - s.l = append(s.l, Sample{sample.Value, sample.Width, 0}) - i++ - inserted: - s.n += sample.Width - r += sample.Width - } - s.compress() -} - -func (s *stream) count() int { - return int(s.n) -} - -func (s *stream) query(q float64) float64 { - t := math.Ceil(q * s.n) - t += math.Ceil(s.ƒ(s, t) / 2) - p := s.l[0] - var r float64 - for _, c := range s.l[1:] { - r += p.Width - if r+c.Width+c.Delta > t { - return p.Value - } - p = c - } - return p.Value -} - -func (s *stream) compress() { - if len(s.l) < 2 { - return - } - x := s.l[len(s.l)-1] - xi := len(s.l) - 1 - r := s.n - 1 - x.Width - - for i := len(s.l) - 2; i >= 0; i-- { - c := s.l[i] - if c.Width+x.Width+x.Delta <= s.ƒ(s, r) { - x.Width += c.Width - s.l[xi] = x - // Remove element at i. - copy(s.l[i:], s.l[i+1:]) - s.l = s.l[:len(s.l)-1] - xi -= 1 - } else { - x = c - xi = i - } - r -= c.Width - } -} - -func (s *stream) samples() Samples { - samples := make(Samples, len(s.l)) - copy(samples, s.l) - return samples -} diff --git a/Godeps/_workspace/src/github.com/beorn7/perks/quantile/stream_test.go b/Godeps/_workspace/src/github.com/beorn7/perks/quantile/stream_test.go deleted file mode 100644 index 4dba05449c..0000000000 --- a/Godeps/_workspace/src/github.com/beorn7/perks/quantile/stream_test.go +++ /dev/null @@ -1,188 +0,0 @@ -package quantile - -import ( - "math" - "math/rand" - "sort" - "testing" -) - -var ( - Targets = map[float64]float64{ - 0.01: 0.001, - 0.10: 0.01, - 0.50: 0.05, - 0.90: 0.01, - 0.99: 0.001, - } - TargetsSmallEpsilon = map[float64]float64{ - 0.01: 0.0001, - 0.10: 0.001, - 0.50: 0.005, - 0.90: 0.001, - 0.99: 0.0001, - } - LowQuantiles = []float64{0.01, 0.1, 0.5} - HighQuantiles = []float64{0.99, 0.9, 0.5} -) - -const RelativeEpsilon = 0.01 - -func verifyPercsWithAbsoluteEpsilon(t *testing.T, a []float64, s *Stream) { - sort.Float64s(a) - for quantile, epsilon := range Targets { - n := float64(len(a)) - k := int(quantile * n) - lower := int((quantile - epsilon) * n) - if lower < 1 { - lower = 1 - } - upper := int(math.Ceil((quantile + epsilon) * n)) - if upper > len(a) { - upper = len(a) - } - w, min, max := a[k-1], a[lower-1], a[upper-1] - if g := s.Query(quantile); g < min || g > max { - t.Errorf("q=%f: want %v [%f,%f], got %v", quantile, w, min, max, g) - } - } -} - -func verifyLowPercsWithRelativeEpsilon(t *testing.T, a []float64, s *Stream) { - sort.Float64s(a) - for _, qu := range LowQuantiles { - n := float64(len(a)) - k := int(qu * n) - - lowerRank := int((1 - RelativeEpsilon) * qu * n) - upperRank := int(math.Ceil((1 + RelativeEpsilon) * qu * n)) - w, min, max := a[k-1], a[lowerRank-1], a[upperRank-1] - if g := s.Query(qu); g < min || g > max { - t.Errorf("q=%f: want %v [%f,%f], got %v", qu, w, min, max, g) - } - } -} - -func verifyHighPercsWithRelativeEpsilon(t *testing.T, a []float64, s *Stream) { - sort.Float64s(a) - for _, qu := range HighQuantiles { - n := float64(len(a)) - k := int(qu * n) - - lowerRank := int((1 - (1+RelativeEpsilon)*(1-qu)) * n) - upperRank := int(math.Ceil((1 - (1-RelativeEpsilon)*(1-qu)) * n)) - w, min, max := a[k-1], a[lowerRank-1], a[upperRank-1] - if g := s.Query(qu); g < min || g > max { - t.Errorf("q=%f: want %v [%f,%f], got %v", qu, w, min, max, g) - } - } -} - -func populateStream(s *Stream) []float64 { - a := make([]float64, 0, 1e5+100) - for i := 0; i < cap(a); i++ { - v := rand.NormFloat64() - // Add 5% asymmetric outliers. - if i%20 == 0 { - v = v*v + 1 - } - s.Insert(v) - a = append(a, v) - } - return a -} - -func TestTargetedQuery(t *testing.T) { - rand.Seed(42) - s := NewTargeted(Targets) - a := populateStream(s) - verifyPercsWithAbsoluteEpsilon(t, a, s) -} - -func TestLowBiasedQuery(t *testing.T) { - rand.Seed(42) - s := NewLowBiased(RelativeEpsilon) - a := populateStream(s) - verifyLowPercsWithRelativeEpsilon(t, a, s) -} - -func TestHighBiasedQuery(t *testing.T) { - rand.Seed(42) - s := NewHighBiased(RelativeEpsilon) - a := populateStream(s) - verifyHighPercsWithRelativeEpsilon(t, a, s) -} - -// BrokenTestTargetedMerge is broken, see Merge doc comment. -func BrokenTestTargetedMerge(t *testing.T) { - rand.Seed(42) - s1 := NewTargeted(Targets) - s2 := NewTargeted(Targets) - a := populateStream(s1) - a = append(a, populateStream(s2)...) - s1.Merge(s2.Samples()) - verifyPercsWithAbsoluteEpsilon(t, a, s1) -} - -// BrokenTestLowBiasedMerge is broken, see Merge doc comment. -func BrokenTestLowBiasedMerge(t *testing.T) { - rand.Seed(42) - s1 := NewLowBiased(RelativeEpsilon) - s2 := NewLowBiased(RelativeEpsilon) - a := populateStream(s1) - a = append(a, populateStream(s2)...) - s1.Merge(s2.Samples()) - verifyLowPercsWithRelativeEpsilon(t, a, s2) -} - -// BrokenTestHighBiasedMerge is broken, see Merge doc comment. -func BrokenTestHighBiasedMerge(t *testing.T) { - rand.Seed(42) - s1 := NewHighBiased(RelativeEpsilon) - s2 := NewHighBiased(RelativeEpsilon) - a := populateStream(s1) - a = append(a, populateStream(s2)...) - s1.Merge(s2.Samples()) - verifyHighPercsWithRelativeEpsilon(t, a, s2) -} - -func TestUncompressed(t *testing.T) { - q := NewTargeted(Targets) - for i := 100; i > 0; i-- { - q.Insert(float64(i)) - } - if g := q.Count(); g != 100 { - t.Errorf("want count 100, got %d", g) - } - // Before compression, Query should have 100% accuracy. - for quantile := range Targets { - w := quantile * 100 - if g := q.Query(quantile); g != w { - t.Errorf("want %f, got %f", w, g) - } - } -} - -func TestUncompressedSamples(t *testing.T) { - q := NewTargeted(map[float64]float64{0.99: 0.001}) - for i := 1; i <= 100; i++ { - q.Insert(float64(i)) - } - if g := q.Samples().Len(); g != 100 { - t.Errorf("want count 100, got %d", g) - } -} - -func TestUncompressedOne(t *testing.T) { - q := NewTargeted(map[float64]float64{0.99: 0.01}) - q.Insert(3.14) - if g := q.Query(0.90); g != 3.14 { - t.Error("want PI, got", g) - } -} - -func TestDefaults(t *testing.T) { - if g := NewTargeted(map[float64]float64{0.99: 0.001}).Query(0.99); g != 0 { - t.Errorf("want 0, got %f", g) - } -} diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/LICENSE b/Godeps/_workspace/src/github.com/golang/protobuf/LICENSE deleted file mode 100644 index 1b1b1921ef..0000000000 --- a/Godeps/_workspace/src/github.com/golang/protobuf/LICENSE +++ /dev/null @@ -1,31 +0,0 @@ -Go support for Protocol Buffers - Google's data interchange format - -Copyright 2010 The Go Authors. All rights reserved. -https://github.com/golang/protobuf - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/Makefile b/Godeps/_workspace/src/github.com/golang/protobuf/proto/Makefile deleted file mode 100644 index f1f06564a1..0000000000 --- a/Godeps/_workspace/src/github.com/golang/protobuf/proto/Makefile +++ /dev/null @@ -1,43 +0,0 @@ -# Go support for Protocol Buffers - Google's data interchange format -# -# Copyright 2010 The Go Authors. All rights reserved. -# https://github.com/golang/protobuf -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -install: - go install - -test: install generate-test-pbs - go test - - -generate-test-pbs: - make install - make -C testdata - protoc --go_out=Mtestdata/test.proto=github.com/golang/protobuf/proto/testdata:. proto3_proto/proto3.proto - make diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/all_test.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/all_test.go deleted file mode 100644 index 5a9b6a47f6..0000000000 --- a/Godeps/_workspace/src/github.com/golang/protobuf/proto/all_test.go +++ /dev/null @@ -1,2083 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto_test - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "math" - "math/rand" - "reflect" - "runtime/debug" - "strings" - "testing" - "time" - - . "github.com/golang/protobuf/proto" - . "github.com/golang/protobuf/proto/testdata" -) - -var globalO *Buffer - -func old() *Buffer { - if globalO == nil { - globalO = NewBuffer(nil) - } - globalO.Reset() - return globalO -} - -func equalbytes(b1, b2 []byte, t *testing.T) { - if len(b1) != len(b2) { - t.Errorf("wrong lengths: 2*%d != %d", len(b1), len(b2)) - return - } - for i := 0; i < len(b1); i++ { - if b1[i] != b2[i] { - t.Errorf("bad byte[%d]:%x %x: %s %s", i, b1[i], b2[i], b1, b2) - } - } -} - -func initGoTestField() *GoTestField { - f := new(GoTestField) - f.Label = String("label") - f.Type = String("type") - return f -} - -// These are all structurally equivalent but the tag numbers differ. -// (It's remarkable that required, optional, and repeated all have -// 8 letters.) -func initGoTest_RequiredGroup() *GoTest_RequiredGroup { - return &GoTest_RequiredGroup{ - RequiredField: String("required"), - } -} - -func initGoTest_OptionalGroup() *GoTest_OptionalGroup { - return &GoTest_OptionalGroup{ - RequiredField: String("optional"), - } -} - -func initGoTest_RepeatedGroup() *GoTest_RepeatedGroup { - return &GoTest_RepeatedGroup{ - RequiredField: String("repeated"), - } -} - -func initGoTest(setdefaults bool) *GoTest { - pb := new(GoTest) - if setdefaults { - pb.F_BoolDefaulted = Bool(Default_GoTest_F_BoolDefaulted) - pb.F_Int32Defaulted = Int32(Default_GoTest_F_Int32Defaulted) - pb.F_Int64Defaulted = Int64(Default_GoTest_F_Int64Defaulted) - pb.F_Fixed32Defaulted = Uint32(Default_GoTest_F_Fixed32Defaulted) - pb.F_Fixed64Defaulted = Uint64(Default_GoTest_F_Fixed64Defaulted) - pb.F_Uint32Defaulted = Uint32(Default_GoTest_F_Uint32Defaulted) - pb.F_Uint64Defaulted = Uint64(Default_GoTest_F_Uint64Defaulted) - pb.F_FloatDefaulted = Float32(Default_GoTest_F_FloatDefaulted) - pb.F_DoubleDefaulted = Float64(Default_GoTest_F_DoubleDefaulted) - pb.F_StringDefaulted = String(Default_GoTest_F_StringDefaulted) - pb.F_BytesDefaulted = Default_GoTest_F_BytesDefaulted - pb.F_Sint32Defaulted = Int32(Default_GoTest_F_Sint32Defaulted) - pb.F_Sint64Defaulted = Int64(Default_GoTest_F_Sint64Defaulted) - } - - pb.Kind = GoTest_TIME.Enum() - pb.RequiredField = initGoTestField() - pb.F_BoolRequired = Bool(true) - pb.F_Int32Required = Int32(3) - pb.F_Int64Required = Int64(6) - pb.F_Fixed32Required = Uint32(32) - pb.F_Fixed64Required = Uint64(64) - pb.F_Uint32Required = Uint32(3232) - pb.F_Uint64Required = Uint64(6464) - pb.F_FloatRequired = Float32(3232) - pb.F_DoubleRequired = Float64(6464) - pb.F_StringRequired = String("string") - pb.F_BytesRequired = []byte("bytes") - pb.F_Sint32Required = Int32(-32) - pb.F_Sint64Required = Int64(-64) - pb.Requiredgroup = initGoTest_RequiredGroup() - - return pb -} - -func fail(msg string, b *bytes.Buffer, s string, t *testing.T) { - data := b.Bytes() - ld := len(data) - ls := len(s) / 2 - - fmt.Printf("fail %s ld=%d ls=%d\n", msg, ld, ls) - - // find the interesting spot - n - n := ls - if ld < ls { - n = ld - } - j := 0 - for i := 0; i < n; i++ { - bs := hex(s[j])*16 + hex(s[j+1]) - j += 2 - if data[i] == bs { - continue - } - n = i - break - } - l := n - 10 - if l < 0 { - l = 0 - } - h := n + 10 - - // find the interesting spot - n - fmt.Printf("is[%d]:", l) - for i := l; i < h; i++ { - if i >= ld { - fmt.Printf(" --") - continue - } - fmt.Printf(" %.2x", data[i]) - } - fmt.Printf("\n") - - fmt.Printf("sb[%d]:", l) - for i := l; i < h; i++ { - if i >= ls { - fmt.Printf(" --") - continue - } - bs := hex(s[j])*16 + hex(s[j+1]) - j += 2 - fmt.Printf(" %.2x", bs) - } - fmt.Printf("\n") - - t.Fail() - - // t.Errorf("%s: \ngood: %s\nbad: %x", msg, s, b.Bytes()) - // Print the output in a partially-decoded format; can - // be helpful when updating the test. It produces the output - // that is pasted, with minor edits, into the argument to verify(). - // data := b.Bytes() - // nesting := 0 - // for b.Len() > 0 { - // start := len(data) - b.Len() - // var u uint64 - // u, err := DecodeVarint(b) - // if err != nil { - // fmt.Printf("decode error on varint:", err) - // return - // } - // wire := u & 0x7 - // tag := u >> 3 - // switch wire { - // case WireVarint: - // v, err := DecodeVarint(b) - // if err != nil { - // fmt.Printf("decode error on varint:", err) - // return - // } - // fmt.Printf("\t\t\"%x\" // field %d, encoding %d, value %d\n", - // data[start:len(data)-b.Len()], tag, wire, v) - // case WireFixed32: - // v, err := DecodeFixed32(b) - // if err != nil { - // fmt.Printf("decode error on fixed32:", err) - // return - // } - // fmt.Printf("\t\t\"%x\" // field %d, encoding %d, value %d\n", - // data[start:len(data)-b.Len()], tag, wire, v) - // case WireFixed64: - // v, err := DecodeFixed64(b) - // if err != nil { - // fmt.Printf("decode error on fixed64:", err) - // return - // } - // fmt.Printf("\t\t\"%x\" // field %d, encoding %d, value %d\n", - // data[start:len(data)-b.Len()], tag, wire, v) - // case WireBytes: - // nb, err := DecodeVarint(b) - // if err != nil { - // fmt.Printf("decode error on bytes:", err) - // return - // } - // after_tag := len(data) - b.Len() - // str := make([]byte, nb) - // _, err = b.Read(str) - // if err != nil { - // fmt.Printf("decode error on bytes:", err) - // return - // } - // fmt.Printf("\t\t\"%x\" \"%x\" // field %d, encoding %d (FIELD)\n", - // data[start:after_tag], str, tag, wire) - // case WireStartGroup: - // nesting++ - // fmt.Printf("\t\t\"%x\"\t\t// start group field %d level %d\n", - // data[start:len(data)-b.Len()], tag, nesting) - // case WireEndGroup: - // fmt.Printf("\t\t\"%x\"\t\t// end group field %d level %d\n", - // data[start:len(data)-b.Len()], tag, nesting) - // nesting-- - // default: - // fmt.Printf("unrecognized wire type %d\n", wire) - // return - // } - // } -} - -func hex(c uint8) uint8 { - if '0' <= c && c <= '9' { - return c - '0' - } - if 'a' <= c && c <= 'f' { - return 10 + c - 'a' - } - if 'A' <= c && c <= 'F' { - return 10 + c - 'A' - } - return 0 -} - -func equal(b []byte, s string, t *testing.T) bool { - if 2*len(b) != len(s) { - // fail(fmt.Sprintf("wrong lengths: 2*%d != %d", len(b), len(s)), b, s, t) - fmt.Printf("wrong lengths: 2*%d != %d\n", len(b), len(s)) - return false - } - for i, j := 0, 0; i < len(b); i, j = i+1, j+2 { - x := hex(s[j])*16 + hex(s[j+1]) - if b[i] != x { - // fail(fmt.Sprintf("bad byte[%d]:%x %x", i, b[i], x), b, s, t) - fmt.Printf("bad byte[%d]:%x %x", i, b[i], x) - return false - } - } - return true -} - -func overify(t *testing.T, pb *GoTest, expected string) { - o := old() - err := o.Marshal(pb) - if err != nil { - fmt.Printf("overify marshal-1 err = %v", err) - o.DebugPrint("", o.Bytes()) - t.Fatalf("expected = %s", expected) - } - if !equal(o.Bytes(), expected, t) { - o.DebugPrint("overify neq 1", o.Bytes()) - t.Fatalf("expected = %s", expected) - } - - // Now test Unmarshal by recreating the original buffer. - pbd := new(GoTest) - err = o.Unmarshal(pbd) - if err != nil { - t.Fatalf("overify unmarshal err = %v", err) - o.DebugPrint("", o.Bytes()) - t.Fatalf("string = %s", expected) - } - o.Reset() - err = o.Marshal(pbd) - if err != nil { - t.Errorf("overify marshal-2 err = %v", err) - o.DebugPrint("", o.Bytes()) - t.Fatalf("string = %s", expected) - } - if !equal(o.Bytes(), expected, t) { - o.DebugPrint("overify neq 2", o.Bytes()) - t.Fatalf("string = %s", expected) - } -} - -// Simple tests for numeric encode/decode primitives (varint, etc.) -func TestNumericPrimitives(t *testing.T) { - for i := uint64(0); i < 1e6; i += 111 { - o := old() - if o.EncodeVarint(i) != nil { - t.Error("EncodeVarint") - break - } - x, e := o.DecodeVarint() - if e != nil { - t.Fatal("DecodeVarint") - } - if x != i { - t.Fatal("varint decode fail:", i, x) - } - - o = old() - if o.EncodeFixed32(i) != nil { - t.Fatal("encFixed32") - } - x, e = o.DecodeFixed32() - if e != nil { - t.Fatal("decFixed32") - } - if x != i { - t.Fatal("fixed32 decode fail:", i, x) - } - - o = old() - if o.EncodeFixed64(i*1234567) != nil { - t.Error("encFixed64") - break - } - x, e = o.DecodeFixed64() - if e != nil { - t.Error("decFixed64") - break - } - if x != i*1234567 { - t.Error("fixed64 decode fail:", i*1234567, x) - break - } - - o = old() - i32 := int32(i - 12345) - if o.EncodeZigzag32(uint64(i32)) != nil { - t.Fatal("EncodeZigzag32") - } - x, e = o.DecodeZigzag32() - if e != nil { - t.Fatal("DecodeZigzag32") - } - if x != uint64(uint32(i32)) { - t.Fatal("zigzag32 decode fail:", i32, x) - } - - o = old() - i64 := int64(i - 12345) - if o.EncodeZigzag64(uint64(i64)) != nil { - t.Fatal("EncodeZigzag64") - } - x, e = o.DecodeZigzag64() - if e != nil { - t.Fatal("DecodeZigzag64") - } - if x != uint64(i64) { - t.Fatal("zigzag64 decode fail:", i64, x) - } - } -} - -// fakeMarshaler is a simple struct implementing Marshaler and Message interfaces. -type fakeMarshaler struct { - b []byte - err error -} - -func (f fakeMarshaler) Marshal() ([]byte, error) { - return f.b, f.err -} - -func (f fakeMarshaler) String() string { - return fmt.Sprintf("Bytes: %v Error: %v", f.b, f.err) -} - -func (f fakeMarshaler) ProtoMessage() {} - -func (f fakeMarshaler) Reset() {} - -// Simple tests for proto messages that implement the Marshaler interface. -func TestMarshalerEncoding(t *testing.T) { - tests := []struct { - name string - m Message - want []byte - wantErr error - }{ - { - name: "Marshaler that fails", - m: fakeMarshaler{ - err: errors.New("some marshal err"), - b: []byte{5, 6, 7}, - }, - // Since there's an error, nothing should be written to buffer. - want: nil, - wantErr: errors.New("some marshal err"), - }, - { - name: "Marshaler that succeeds", - m: fakeMarshaler{ - b: []byte{0, 1, 2, 3, 4, 127, 255}, - }, - want: []byte{0, 1, 2, 3, 4, 127, 255}, - wantErr: nil, - }, - } - for _, test := range tests { - b := NewBuffer(nil) - err := b.Marshal(test.m) - if !reflect.DeepEqual(test.wantErr, err) { - t.Errorf("%s: got err %v wanted %v", test.name, err, test.wantErr) - } - if !reflect.DeepEqual(test.want, b.Bytes()) { - t.Errorf("%s: got bytes %v wanted %v", test.name, b.Bytes(), test.want) - } - } -} - -// Simple tests for bytes -func TestBytesPrimitives(t *testing.T) { - o := old() - bytes := []byte{'n', 'o', 'w', ' ', 'i', 's', ' ', 't', 'h', 'e', ' ', 't', 'i', 'm', 'e'} - if o.EncodeRawBytes(bytes) != nil { - t.Error("EncodeRawBytes") - } - decb, e := o.DecodeRawBytes(false) - if e != nil { - t.Error("DecodeRawBytes") - } - equalbytes(bytes, decb, t) -} - -// Simple tests for strings -func TestStringPrimitives(t *testing.T) { - o := old() - s := "now is the time" - if o.EncodeStringBytes(s) != nil { - t.Error("enc_string") - } - decs, e := o.DecodeStringBytes() - if e != nil { - t.Error("dec_string") - } - if s != decs { - t.Error("string encode/decode fail:", s, decs) - } -} - -// Do we catch the "required bit not set" case? -func TestRequiredBit(t *testing.T) { - o := old() - pb := new(GoTest) - err := o.Marshal(pb) - if err == nil { - t.Error("did not catch missing required fields") - } else if strings.Index(err.Error(), "Kind") < 0 { - t.Error("wrong error type:", err) - } -} - -// Check that all fields are nil. -// Clearly silly, and a residue from a more interesting test with an earlier, -// different initialization property, but it once caught a compiler bug so -// it lives. -func checkInitialized(pb *GoTest, t *testing.T) { - if pb.F_BoolDefaulted != nil { - t.Error("New or Reset did not set boolean:", *pb.F_BoolDefaulted) - } - if pb.F_Int32Defaulted != nil { - t.Error("New or Reset did not set int32:", *pb.F_Int32Defaulted) - } - if pb.F_Int64Defaulted != nil { - t.Error("New or Reset did not set int64:", *pb.F_Int64Defaulted) - } - if pb.F_Fixed32Defaulted != nil { - t.Error("New or Reset did not set fixed32:", *pb.F_Fixed32Defaulted) - } - if pb.F_Fixed64Defaulted != nil { - t.Error("New or Reset did not set fixed64:", *pb.F_Fixed64Defaulted) - } - if pb.F_Uint32Defaulted != nil { - t.Error("New or Reset did not set uint32:", *pb.F_Uint32Defaulted) - } - if pb.F_Uint64Defaulted != nil { - t.Error("New or Reset did not set uint64:", *pb.F_Uint64Defaulted) - } - if pb.F_FloatDefaulted != nil { - t.Error("New or Reset did not set float:", *pb.F_FloatDefaulted) - } - if pb.F_DoubleDefaulted != nil { - t.Error("New or Reset did not set double:", *pb.F_DoubleDefaulted) - } - if pb.F_StringDefaulted != nil { - t.Error("New or Reset did not set string:", *pb.F_StringDefaulted) - } - if pb.F_BytesDefaulted != nil { - t.Error("New or Reset did not set bytes:", string(pb.F_BytesDefaulted)) - } - if pb.F_Sint32Defaulted != nil { - t.Error("New or Reset did not set int32:", *pb.F_Sint32Defaulted) - } - if pb.F_Sint64Defaulted != nil { - t.Error("New or Reset did not set int64:", *pb.F_Sint64Defaulted) - } -} - -// Does Reset() reset? -func TestReset(t *testing.T) { - pb := initGoTest(true) - // muck with some values - pb.F_BoolDefaulted = Bool(false) - pb.F_Int32Defaulted = Int32(237) - pb.F_Int64Defaulted = Int64(12346) - pb.F_Fixed32Defaulted = Uint32(32000) - pb.F_Fixed64Defaulted = Uint64(666) - pb.F_Uint32Defaulted = Uint32(323232) - pb.F_Uint64Defaulted = nil - pb.F_FloatDefaulted = nil - pb.F_DoubleDefaulted = Float64(0) - pb.F_StringDefaulted = String("gotcha") - pb.F_BytesDefaulted = []byte("asdfasdf") - pb.F_Sint32Defaulted = Int32(123) - pb.F_Sint64Defaulted = Int64(789) - pb.Reset() - checkInitialized(pb, t) -} - -// All required fields set, no defaults provided. -func TestEncodeDecode1(t *testing.T) { - pb := initGoTest(false) - overify(t, pb, - "0807"+ // field 1, encoding 0, value 7 - "220d"+"0a056c6162656c120474797065"+ // field 4, encoding 2 (GoTestField) - "5001"+ // field 10, encoding 0, value 1 - "5803"+ // field 11, encoding 0, value 3 - "6006"+ // field 12, encoding 0, value 6 - "6d20000000"+ // field 13, encoding 5, value 0x20 - "714000000000000000"+ // field 14, encoding 1, value 0x40 - "78a019"+ // field 15, encoding 0, value 0xca0 = 3232 - "8001c032"+ // field 16, encoding 0, value 0x1940 = 6464 - "8d0100004a45"+ // field 17, encoding 5, value 3232.0 - "9101000000000040b940"+ // field 18, encoding 1, value 6464.0 - "9a0106"+"737472696e67"+ // field 19, encoding 2, string "string" - "b304"+ // field 70, encoding 3, start group - "ba0408"+"7265717569726564"+ // field 71, encoding 2, string "required" - "b404"+ // field 70, encoding 4, end group - "aa0605"+"6279746573"+ // field 101, encoding 2, string "bytes" - "b0063f"+ // field 102, encoding 0, 0x3f zigzag32 - "b8067f") // field 103, encoding 0, 0x7f zigzag64 -} - -// All required fields set, defaults provided. -func TestEncodeDecode2(t *testing.T) { - pb := initGoTest(true) - overify(t, pb, - "0807"+ // field 1, encoding 0, value 7 - "220d"+"0a056c6162656c120474797065"+ // field 4, encoding 2 (GoTestField) - "5001"+ // field 10, encoding 0, value 1 - "5803"+ // field 11, encoding 0, value 3 - "6006"+ // field 12, encoding 0, value 6 - "6d20000000"+ // field 13, encoding 5, value 32 - "714000000000000000"+ // field 14, encoding 1, value 64 - "78a019"+ // field 15, encoding 0, value 3232 - "8001c032"+ // field 16, encoding 0, value 6464 - "8d0100004a45"+ // field 17, encoding 5, value 3232.0 - "9101000000000040b940"+ // field 18, encoding 1, value 6464.0 - "9a0106"+"737472696e67"+ // field 19, encoding 2 string "string" - "c00201"+ // field 40, encoding 0, value 1 - "c80220"+ // field 41, encoding 0, value 32 - "d00240"+ // field 42, encoding 0, value 64 - "dd0240010000"+ // field 43, encoding 5, value 320 - "e1028002000000000000"+ // field 44, encoding 1, value 640 - "e8028019"+ // field 45, encoding 0, value 3200 - "f0028032"+ // field 46, encoding 0, value 6400 - "fd02e0659948"+ // field 47, encoding 5, value 314159.0 - "81030000000050971041"+ // field 48, encoding 1, value 271828.0 - "8a0310"+"68656c6c6f2c2022776f726c6421220a"+ // field 49, encoding 2 string "hello, \"world!\"\n" - "b304"+ // start group field 70 level 1 - "ba0408"+"7265717569726564"+ // field 71, encoding 2, string "required" - "b404"+ // end group field 70 level 1 - "aa0605"+"6279746573"+ // field 101, encoding 2 string "bytes" - "b0063f"+ // field 102, encoding 0, 0x3f zigzag32 - "b8067f"+ // field 103, encoding 0, 0x7f zigzag64 - "8a1907"+"4269676e6f7365"+ // field 401, encoding 2, string "Bignose" - "90193f"+ // field 402, encoding 0, value 63 - "98197f") // field 403, encoding 0, value 127 - -} - -// All default fields set to their default value by hand -func TestEncodeDecode3(t *testing.T) { - pb := initGoTest(false) - pb.F_BoolDefaulted = Bool(true) - pb.F_Int32Defaulted = Int32(32) - pb.F_Int64Defaulted = Int64(64) - pb.F_Fixed32Defaulted = Uint32(320) - pb.F_Fixed64Defaulted = Uint64(640) - pb.F_Uint32Defaulted = Uint32(3200) - pb.F_Uint64Defaulted = Uint64(6400) - pb.F_FloatDefaulted = Float32(314159) - pb.F_DoubleDefaulted = Float64(271828) - pb.F_StringDefaulted = String("hello, \"world!\"\n") - pb.F_BytesDefaulted = []byte("Bignose") - pb.F_Sint32Defaulted = Int32(-32) - pb.F_Sint64Defaulted = Int64(-64) - - overify(t, pb, - "0807"+ // field 1, encoding 0, value 7 - "220d"+"0a056c6162656c120474797065"+ // field 4, encoding 2 (GoTestField) - "5001"+ // field 10, encoding 0, value 1 - "5803"+ // field 11, encoding 0, value 3 - "6006"+ // field 12, encoding 0, value 6 - "6d20000000"+ // field 13, encoding 5, value 32 - "714000000000000000"+ // field 14, encoding 1, value 64 - "78a019"+ // field 15, encoding 0, value 3232 - "8001c032"+ // field 16, encoding 0, value 6464 - "8d0100004a45"+ // field 17, encoding 5, value 3232.0 - "9101000000000040b940"+ // field 18, encoding 1, value 6464.0 - "9a0106"+"737472696e67"+ // field 19, encoding 2 string "string" - "c00201"+ // field 40, encoding 0, value 1 - "c80220"+ // field 41, encoding 0, value 32 - "d00240"+ // field 42, encoding 0, value 64 - "dd0240010000"+ // field 43, encoding 5, value 320 - "e1028002000000000000"+ // field 44, encoding 1, value 640 - "e8028019"+ // field 45, encoding 0, value 3200 - "f0028032"+ // field 46, encoding 0, value 6400 - "fd02e0659948"+ // field 47, encoding 5, value 314159.0 - "81030000000050971041"+ // field 48, encoding 1, value 271828.0 - "8a0310"+"68656c6c6f2c2022776f726c6421220a"+ // field 49, encoding 2 string "hello, \"world!\"\n" - "b304"+ // start group field 70 level 1 - "ba0408"+"7265717569726564"+ // field 71, encoding 2, string "required" - "b404"+ // end group field 70 level 1 - "aa0605"+"6279746573"+ // field 101, encoding 2 string "bytes" - "b0063f"+ // field 102, encoding 0, 0x3f zigzag32 - "b8067f"+ // field 103, encoding 0, 0x7f zigzag64 - "8a1907"+"4269676e6f7365"+ // field 401, encoding 2, string "Bignose" - "90193f"+ // field 402, encoding 0, value 63 - "98197f") // field 403, encoding 0, value 127 - -} - -// All required fields set, defaults provided, all non-defaulted optional fields have values. -func TestEncodeDecode4(t *testing.T) { - pb := initGoTest(true) - pb.Table = String("hello") - pb.Param = Int32(7) - pb.OptionalField = initGoTestField() - pb.F_BoolOptional = Bool(true) - pb.F_Int32Optional = Int32(32) - pb.F_Int64Optional = Int64(64) - pb.F_Fixed32Optional = Uint32(3232) - pb.F_Fixed64Optional = Uint64(6464) - pb.F_Uint32Optional = Uint32(323232) - pb.F_Uint64Optional = Uint64(646464) - pb.F_FloatOptional = Float32(32.) - pb.F_DoubleOptional = Float64(64.) - pb.F_StringOptional = String("hello") - pb.F_BytesOptional = []byte("Bignose") - pb.F_Sint32Optional = Int32(-32) - pb.F_Sint64Optional = Int64(-64) - pb.Optionalgroup = initGoTest_OptionalGroup() - - overify(t, pb, - "0807"+ // field 1, encoding 0, value 7 - "1205"+"68656c6c6f"+ // field 2, encoding 2, string "hello" - "1807"+ // field 3, encoding 0, value 7 - "220d"+"0a056c6162656c120474797065"+ // field 4, encoding 2 (GoTestField) - "320d"+"0a056c6162656c120474797065"+ // field 6, encoding 2 (GoTestField) - "5001"+ // field 10, encoding 0, value 1 - "5803"+ // field 11, encoding 0, value 3 - "6006"+ // field 12, encoding 0, value 6 - "6d20000000"+ // field 13, encoding 5, value 32 - "714000000000000000"+ // field 14, encoding 1, value 64 - "78a019"+ // field 15, encoding 0, value 3232 - "8001c032"+ // field 16, encoding 0, value 6464 - "8d0100004a45"+ // field 17, encoding 5, value 3232.0 - "9101000000000040b940"+ // field 18, encoding 1, value 6464.0 - "9a0106"+"737472696e67"+ // field 19, encoding 2 string "string" - "f00101"+ // field 30, encoding 0, value 1 - "f80120"+ // field 31, encoding 0, value 32 - "800240"+ // field 32, encoding 0, value 64 - "8d02a00c0000"+ // field 33, encoding 5, value 3232 - "91024019000000000000"+ // field 34, encoding 1, value 6464 - "9802a0dd13"+ // field 35, encoding 0, value 323232 - "a002c0ba27"+ // field 36, encoding 0, value 646464 - "ad0200000042"+ // field 37, encoding 5, value 32.0 - "b1020000000000005040"+ // field 38, encoding 1, value 64.0 - "ba0205"+"68656c6c6f"+ // field 39, encoding 2, string "hello" - "c00201"+ // field 40, encoding 0, value 1 - "c80220"+ // field 41, encoding 0, value 32 - "d00240"+ // field 42, encoding 0, value 64 - "dd0240010000"+ // field 43, encoding 5, value 320 - "e1028002000000000000"+ // field 44, encoding 1, value 640 - "e8028019"+ // field 45, encoding 0, value 3200 - "f0028032"+ // field 46, encoding 0, value 6400 - "fd02e0659948"+ // field 47, encoding 5, value 314159.0 - "81030000000050971041"+ // field 48, encoding 1, value 271828.0 - "8a0310"+"68656c6c6f2c2022776f726c6421220a"+ // field 49, encoding 2 string "hello, \"world!\"\n" - "b304"+ // start group field 70 level 1 - "ba0408"+"7265717569726564"+ // field 71, encoding 2, string "required" - "b404"+ // end group field 70 level 1 - "d305"+ // start group field 90 level 1 - "da0508"+"6f7074696f6e616c"+ // field 91, encoding 2, string "optional" - "d405"+ // end group field 90 level 1 - "aa0605"+"6279746573"+ // field 101, encoding 2 string "bytes" - "b0063f"+ // field 102, encoding 0, 0x3f zigzag32 - "b8067f"+ // field 103, encoding 0, 0x7f zigzag64 - "ea1207"+"4269676e6f7365"+ // field 301, encoding 2, string "Bignose" - "f0123f"+ // field 302, encoding 0, value 63 - "f8127f"+ // field 303, encoding 0, value 127 - "8a1907"+"4269676e6f7365"+ // field 401, encoding 2, string "Bignose" - "90193f"+ // field 402, encoding 0, value 63 - "98197f") // field 403, encoding 0, value 127 - -} - -// All required fields set, defaults provided, all repeated fields given two values. -func TestEncodeDecode5(t *testing.T) { - pb := initGoTest(true) - pb.RepeatedField = []*GoTestField{initGoTestField(), initGoTestField()} - pb.F_BoolRepeated = []bool{false, true} - pb.F_Int32Repeated = []int32{32, 33} - pb.F_Int64Repeated = []int64{64, 65} - pb.F_Fixed32Repeated = []uint32{3232, 3333} - pb.F_Fixed64Repeated = []uint64{6464, 6565} - pb.F_Uint32Repeated = []uint32{323232, 333333} - pb.F_Uint64Repeated = []uint64{646464, 656565} - pb.F_FloatRepeated = []float32{32., 33.} - pb.F_DoubleRepeated = []float64{64., 65.} - pb.F_StringRepeated = []string{"hello", "sailor"} - pb.F_BytesRepeated = [][]byte{[]byte("big"), []byte("nose")} - pb.F_Sint32Repeated = []int32{32, -32} - pb.F_Sint64Repeated = []int64{64, -64} - pb.Repeatedgroup = []*GoTest_RepeatedGroup{initGoTest_RepeatedGroup(), initGoTest_RepeatedGroup()} - - overify(t, pb, - "0807"+ // field 1, encoding 0, value 7 - "220d"+"0a056c6162656c120474797065"+ // field 4, encoding 2 (GoTestField) - "2a0d"+"0a056c6162656c120474797065"+ // field 5, encoding 2 (GoTestField) - "2a0d"+"0a056c6162656c120474797065"+ // field 5, encoding 2 (GoTestField) - "5001"+ // field 10, encoding 0, value 1 - "5803"+ // field 11, encoding 0, value 3 - "6006"+ // field 12, encoding 0, value 6 - "6d20000000"+ // field 13, encoding 5, value 32 - "714000000000000000"+ // field 14, encoding 1, value 64 - "78a019"+ // field 15, encoding 0, value 3232 - "8001c032"+ // field 16, encoding 0, value 6464 - "8d0100004a45"+ // field 17, encoding 5, value 3232.0 - "9101000000000040b940"+ // field 18, encoding 1, value 6464.0 - "9a0106"+"737472696e67"+ // field 19, encoding 2 string "string" - "a00100"+ // field 20, encoding 0, value 0 - "a00101"+ // field 20, encoding 0, value 1 - "a80120"+ // field 21, encoding 0, value 32 - "a80121"+ // field 21, encoding 0, value 33 - "b00140"+ // field 22, encoding 0, value 64 - "b00141"+ // field 22, encoding 0, value 65 - "bd01a00c0000"+ // field 23, encoding 5, value 3232 - "bd01050d0000"+ // field 23, encoding 5, value 3333 - "c1014019000000000000"+ // field 24, encoding 1, value 6464 - "c101a519000000000000"+ // field 24, encoding 1, value 6565 - "c801a0dd13"+ // field 25, encoding 0, value 323232 - "c80195ac14"+ // field 25, encoding 0, value 333333 - "d001c0ba27"+ // field 26, encoding 0, value 646464 - "d001b58928"+ // field 26, encoding 0, value 656565 - "dd0100000042"+ // field 27, encoding 5, value 32.0 - "dd0100000442"+ // field 27, encoding 5, value 33.0 - "e1010000000000005040"+ // field 28, encoding 1, value 64.0 - "e1010000000000405040"+ // field 28, encoding 1, value 65.0 - "ea0105"+"68656c6c6f"+ // field 29, encoding 2, string "hello" - "ea0106"+"7361696c6f72"+ // field 29, encoding 2, string "sailor" - "c00201"+ // field 40, encoding 0, value 1 - "c80220"+ // field 41, encoding 0, value 32 - "d00240"+ // field 42, encoding 0, value 64 - "dd0240010000"+ // field 43, encoding 5, value 320 - "e1028002000000000000"+ // field 44, encoding 1, value 640 - "e8028019"+ // field 45, encoding 0, value 3200 - "f0028032"+ // field 46, encoding 0, value 6400 - "fd02e0659948"+ // field 47, encoding 5, value 314159.0 - "81030000000050971041"+ // field 48, encoding 1, value 271828.0 - "8a0310"+"68656c6c6f2c2022776f726c6421220a"+ // field 49, encoding 2 string "hello, \"world!\"\n" - "b304"+ // start group field 70 level 1 - "ba0408"+"7265717569726564"+ // field 71, encoding 2, string "required" - "b404"+ // end group field 70 level 1 - "8305"+ // start group field 80 level 1 - "8a0508"+"7265706561746564"+ // field 81, encoding 2, string "repeated" - "8405"+ // end group field 80 level 1 - "8305"+ // start group field 80 level 1 - "8a0508"+"7265706561746564"+ // field 81, encoding 2, string "repeated" - "8405"+ // end group field 80 level 1 - "aa0605"+"6279746573"+ // field 101, encoding 2 string "bytes" - "b0063f"+ // field 102, encoding 0, 0x3f zigzag32 - "b8067f"+ // field 103, encoding 0, 0x7f zigzag64 - "ca0c03"+"626967"+ // field 201, encoding 2, string "big" - "ca0c04"+"6e6f7365"+ // field 201, encoding 2, string "nose" - "d00c40"+ // field 202, encoding 0, value 32 - "d00c3f"+ // field 202, encoding 0, value -32 - "d80c8001"+ // field 203, encoding 0, value 64 - "d80c7f"+ // field 203, encoding 0, value -64 - "8a1907"+"4269676e6f7365"+ // field 401, encoding 2, string "Bignose" - "90193f"+ // field 402, encoding 0, value 63 - "98197f") // field 403, encoding 0, value 127 - -} - -// All required fields set, all packed repeated fields given two values. -func TestEncodeDecode6(t *testing.T) { - pb := initGoTest(false) - pb.F_BoolRepeatedPacked = []bool{false, true} - pb.F_Int32RepeatedPacked = []int32{32, 33} - pb.F_Int64RepeatedPacked = []int64{64, 65} - pb.F_Fixed32RepeatedPacked = []uint32{3232, 3333} - pb.F_Fixed64RepeatedPacked = []uint64{6464, 6565} - pb.F_Uint32RepeatedPacked = []uint32{323232, 333333} - pb.F_Uint64RepeatedPacked = []uint64{646464, 656565} - pb.F_FloatRepeatedPacked = []float32{32., 33.} - pb.F_DoubleRepeatedPacked = []float64{64., 65.} - pb.F_Sint32RepeatedPacked = []int32{32, -32} - pb.F_Sint64RepeatedPacked = []int64{64, -64} - - overify(t, pb, - "0807"+ // field 1, encoding 0, value 7 - "220d"+"0a056c6162656c120474797065"+ // field 4, encoding 2 (GoTestField) - "5001"+ // field 10, encoding 0, value 1 - "5803"+ // field 11, encoding 0, value 3 - "6006"+ // field 12, encoding 0, value 6 - "6d20000000"+ // field 13, encoding 5, value 32 - "714000000000000000"+ // field 14, encoding 1, value 64 - "78a019"+ // field 15, encoding 0, value 3232 - "8001c032"+ // field 16, encoding 0, value 6464 - "8d0100004a45"+ // field 17, encoding 5, value 3232.0 - "9101000000000040b940"+ // field 18, encoding 1, value 6464.0 - "9a0106"+"737472696e67"+ // field 19, encoding 2 string "string" - "9203020001"+ // field 50, encoding 2, 2 bytes, value 0, value 1 - "9a03022021"+ // field 51, encoding 2, 2 bytes, value 32, value 33 - "a203024041"+ // field 52, encoding 2, 2 bytes, value 64, value 65 - "aa0308"+ // field 53, encoding 2, 8 bytes - "a00c0000050d0000"+ // value 3232, value 3333 - "b20310"+ // field 54, encoding 2, 16 bytes - "4019000000000000a519000000000000"+ // value 6464, value 6565 - "ba0306"+ // field 55, encoding 2, 6 bytes - "a0dd1395ac14"+ // value 323232, value 333333 - "c20306"+ // field 56, encoding 2, 6 bytes - "c0ba27b58928"+ // value 646464, value 656565 - "ca0308"+ // field 57, encoding 2, 8 bytes - "0000004200000442"+ // value 32.0, value 33.0 - "d20310"+ // field 58, encoding 2, 16 bytes - "00000000000050400000000000405040"+ // value 64.0, value 65.0 - "b304"+ // start group field 70 level 1 - "ba0408"+"7265717569726564"+ // field 71, encoding 2, string "required" - "b404"+ // end group field 70 level 1 - "aa0605"+"6279746573"+ // field 101, encoding 2 string "bytes" - "b0063f"+ // field 102, encoding 0, 0x3f zigzag32 - "b8067f"+ // field 103, encoding 0, 0x7f zigzag64 - "b21f02"+ // field 502, encoding 2, 2 bytes - "403f"+ // value 32, value -32 - "ba1f03"+ // field 503, encoding 2, 3 bytes - "80017f") // value 64, value -64 -} - -// Test that we can encode empty bytes fields. -func TestEncodeDecodeBytes1(t *testing.T) { - pb := initGoTest(false) - - // Create our bytes - pb.F_BytesRequired = []byte{} - pb.F_BytesRepeated = [][]byte{{}} - pb.F_BytesOptional = []byte{} - - d, err := Marshal(pb) - if err != nil { - t.Error(err) - } - - pbd := new(GoTest) - if err := Unmarshal(d, pbd); err != nil { - t.Error(err) - } - - if pbd.F_BytesRequired == nil || len(pbd.F_BytesRequired) != 0 { - t.Error("required empty bytes field is incorrect") - } - if pbd.F_BytesRepeated == nil || len(pbd.F_BytesRepeated) == 1 && pbd.F_BytesRepeated[0] == nil { - t.Error("repeated empty bytes field is incorrect") - } - if pbd.F_BytesOptional == nil || len(pbd.F_BytesOptional) != 0 { - t.Error("optional empty bytes field is incorrect") - } -} - -// Test that we encode nil-valued fields of a repeated bytes field correctly. -// Since entries in a repeated field cannot be nil, nil must mean empty value. -func TestEncodeDecodeBytes2(t *testing.T) { - pb := initGoTest(false) - - // Create our bytes - pb.F_BytesRepeated = [][]byte{nil} - - d, err := Marshal(pb) - if err != nil { - t.Error(err) - } - - pbd := new(GoTest) - if err := Unmarshal(d, pbd); err != nil { - t.Error(err) - } - - if len(pbd.F_BytesRepeated) != 1 || pbd.F_BytesRepeated[0] == nil { - t.Error("Unexpected value for repeated bytes field") - } -} - -// All required fields set, defaults provided, all repeated fields given two values. -func TestSkippingUnrecognizedFields(t *testing.T) { - o := old() - pb := initGoTestField() - - // Marshal it normally. - o.Marshal(pb) - - // Now new a GoSkipTest record. - skip := &GoSkipTest{ - SkipInt32: Int32(32), - SkipFixed32: Uint32(3232), - SkipFixed64: Uint64(6464), - SkipString: String("skipper"), - Skipgroup: &GoSkipTest_SkipGroup{ - GroupInt32: Int32(75), - GroupString: String("wxyz"), - }, - } - - // Marshal it into same buffer. - o.Marshal(skip) - - pbd := new(GoTestField) - o.Unmarshal(pbd) - - // The __unrecognized field should be a marshaling of GoSkipTest - skipd := new(GoSkipTest) - - o.SetBuf(pbd.XXX_unrecognized) - o.Unmarshal(skipd) - - if *skipd.SkipInt32 != *skip.SkipInt32 { - t.Error("skip int32", skipd.SkipInt32) - } - if *skipd.SkipFixed32 != *skip.SkipFixed32 { - t.Error("skip fixed32", skipd.SkipFixed32) - } - if *skipd.SkipFixed64 != *skip.SkipFixed64 { - t.Error("skip fixed64", skipd.SkipFixed64) - } - if *skipd.SkipString != *skip.SkipString { - t.Error("skip string", *skipd.SkipString) - } - if *skipd.Skipgroup.GroupInt32 != *skip.Skipgroup.GroupInt32 { - t.Error("skip group int32", skipd.Skipgroup.GroupInt32) - } - if *skipd.Skipgroup.GroupString != *skip.Skipgroup.GroupString { - t.Error("skip group string", *skipd.Skipgroup.GroupString) - } -} - -// Check that unrecognized fields of a submessage are preserved. -func TestSubmessageUnrecognizedFields(t *testing.T) { - nm := &NewMessage{ - Nested: &NewMessage_Nested{ - Name: String("Nigel"), - FoodGroup: String("carbs"), - }, - } - b, err := Marshal(nm) - if err != nil { - t.Fatalf("Marshal of NewMessage: %v", err) - } - - // Unmarshal into an OldMessage. - om := new(OldMessage) - if err := Unmarshal(b, om); err != nil { - t.Fatalf("Unmarshal to OldMessage: %v", err) - } - exp := &OldMessage{ - Nested: &OldMessage_Nested{ - Name: String("Nigel"), - // normal protocol buffer users should not do this - XXX_unrecognized: []byte("\x12\x05carbs"), - }, - } - if !Equal(om, exp) { - t.Errorf("om = %v, want %v", om, exp) - } - - // Clone the OldMessage. - om = Clone(om).(*OldMessage) - if !Equal(om, exp) { - t.Errorf("Clone(om) = %v, want %v", om, exp) - } - - // Marshal the OldMessage, then unmarshal it into an empty NewMessage. - if b, err = Marshal(om); err != nil { - t.Fatalf("Marshal of OldMessage: %v", err) - } - t.Logf("Marshal(%v) -> %q", om, b) - nm2 := new(NewMessage) - if err := Unmarshal(b, nm2); err != nil { - t.Fatalf("Unmarshal to NewMessage: %v", err) - } - if !Equal(nm, nm2) { - t.Errorf("NewMessage round-trip: %v => %v", nm, nm2) - } -} - -// Check that an int32 field can be upgraded to an int64 field. -func TestNegativeInt32(t *testing.T) { - om := &OldMessage{ - Num: Int32(-1), - } - b, err := Marshal(om) - if err != nil { - t.Fatalf("Marshal of OldMessage: %v", err) - } - - // Check the size. It should be 11 bytes; - // 1 for the field/wire type, and 10 for the negative number. - if len(b) != 11 { - t.Errorf("%v marshaled as %q, wanted 11 bytes", om, b) - } - - // Unmarshal into a NewMessage. - nm := new(NewMessage) - if err := Unmarshal(b, nm); err != nil { - t.Fatalf("Unmarshal to NewMessage: %v", err) - } - want := &NewMessage{ - Num: Int64(-1), - } - if !Equal(nm, want) { - t.Errorf("nm = %v, want %v", nm, want) - } -} - -// Check that we can grow an array (repeated field) to have many elements. -// This test doesn't depend only on our encoding; for variety, it makes sure -// we create, encode, and decode the correct contents explicitly. It's therefore -// a bit messier. -// This test also uses (and hence tests) the Marshal/Unmarshal functions -// instead of the methods. -func TestBigRepeated(t *testing.T) { - pb := initGoTest(true) - - // Create the arrays - const N = 50 // Internally the library starts much smaller. - pb.Repeatedgroup = make([]*GoTest_RepeatedGroup, N) - pb.F_Sint64Repeated = make([]int64, N) - pb.F_Sint32Repeated = make([]int32, N) - pb.F_BytesRepeated = make([][]byte, N) - pb.F_StringRepeated = make([]string, N) - pb.F_DoubleRepeated = make([]float64, N) - pb.F_FloatRepeated = make([]float32, N) - pb.F_Uint64Repeated = make([]uint64, N) - pb.F_Uint32Repeated = make([]uint32, N) - pb.F_Fixed64Repeated = make([]uint64, N) - pb.F_Fixed32Repeated = make([]uint32, N) - pb.F_Int64Repeated = make([]int64, N) - pb.F_Int32Repeated = make([]int32, N) - pb.F_BoolRepeated = make([]bool, N) - pb.RepeatedField = make([]*GoTestField, N) - - // Fill in the arrays with checkable values. - igtf := initGoTestField() - igtrg := initGoTest_RepeatedGroup() - for i := 0; i < N; i++ { - pb.Repeatedgroup[i] = igtrg - pb.F_Sint64Repeated[i] = int64(i) - pb.F_Sint32Repeated[i] = int32(i) - s := fmt.Sprint(i) - pb.F_BytesRepeated[i] = []byte(s) - pb.F_StringRepeated[i] = s - pb.F_DoubleRepeated[i] = float64(i) - pb.F_FloatRepeated[i] = float32(i) - pb.F_Uint64Repeated[i] = uint64(i) - pb.F_Uint32Repeated[i] = uint32(i) - pb.F_Fixed64Repeated[i] = uint64(i) - pb.F_Fixed32Repeated[i] = uint32(i) - pb.F_Int64Repeated[i] = int64(i) - pb.F_Int32Repeated[i] = int32(i) - pb.F_BoolRepeated[i] = i%2 == 0 - pb.RepeatedField[i] = igtf - } - - // Marshal. - buf, _ := Marshal(pb) - - // Now test Unmarshal by recreating the original buffer. - pbd := new(GoTest) - Unmarshal(buf, pbd) - - // Check the checkable values - for i := uint64(0); i < N; i++ { - if pbd.Repeatedgroup[i] == nil { // TODO: more checking? - t.Error("pbd.Repeatedgroup bad") - } - var x uint64 - x = uint64(pbd.F_Sint64Repeated[i]) - if x != i { - t.Error("pbd.F_Sint64Repeated bad", x, i) - } - x = uint64(pbd.F_Sint32Repeated[i]) - if x != i { - t.Error("pbd.F_Sint32Repeated bad", x, i) - } - s := fmt.Sprint(i) - equalbytes(pbd.F_BytesRepeated[i], []byte(s), t) - if pbd.F_StringRepeated[i] != s { - t.Error("pbd.F_Sint32Repeated bad", pbd.F_StringRepeated[i], i) - } - x = uint64(pbd.F_DoubleRepeated[i]) - if x != i { - t.Error("pbd.F_DoubleRepeated bad", x, i) - } - x = uint64(pbd.F_FloatRepeated[i]) - if x != i { - t.Error("pbd.F_FloatRepeated bad", x, i) - } - x = pbd.F_Uint64Repeated[i] - if x != i { - t.Error("pbd.F_Uint64Repeated bad", x, i) - } - x = uint64(pbd.F_Uint32Repeated[i]) - if x != i { - t.Error("pbd.F_Uint32Repeated bad", x, i) - } - x = pbd.F_Fixed64Repeated[i] - if x != i { - t.Error("pbd.F_Fixed64Repeated bad", x, i) - } - x = uint64(pbd.F_Fixed32Repeated[i]) - if x != i { - t.Error("pbd.F_Fixed32Repeated bad", x, i) - } - x = uint64(pbd.F_Int64Repeated[i]) - if x != i { - t.Error("pbd.F_Int64Repeated bad", x, i) - } - x = uint64(pbd.F_Int32Repeated[i]) - if x != i { - t.Error("pbd.F_Int32Repeated bad", x, i) - } - if pbd.F_BoolRepeated[i] != (i%2 == 0) { - t.Error("pbd.F_BoolRepeated bad", x, i) - } - if pbd.RepeatedField[i] == nil { // TODO: more checking? - t.Error("pbd.RepeatedField bad") - } - } -} - -// Verify we give a useful message when decoding to the wrong structure type. -func TestTypeMismatch(t *testing.T) { - pb1 := initGoTest(true) - - // Marshal - o := old() - o.Marshal(pb1) - - // Now Unmarshal it to the wrong type. - pb2 := initGoTestField() - err := o.Unmarshal(pb2) - if err == nil { - t.Error("expected error, got no error") - } else if !strings.Contains(err.Error(), "bad wiretype") { - t.Error("expected bad wiretype error, got", err) - } -} - -func encodeDecode(t *testing.T, in, out Message, msg string) { - buf, err := Marshal(in) - if err != nil { - t.Fatalf("failed marshaling %v: %v", msg, err) - } - if err := Unmarshal(buf, out); err != nil { - t.Fatalf("failed unmarshaling %v: %v", msg, err) - } -} - -func TestPackedNonPackedDecoderSwitching(t *testing.T) { - np, p := new(NonPackedTest), new(PackedTest) - - // non-packed -> packed - np.A = []int32{0, 1, 1, 2, 3, 5} - encodeDecode(t, np, p, "non-packed -> packed") - if !reflect.DeepEqual(np.A, p.B) { - t.Errorf("failed non-packed -> packed; np.A=%+v, p.B=%+v", np.A, p.B) - } - - // packed -> non-packed - np.Reset() - p.B = []int32{3, 1, 4, 1, 5, 9} - encodeDecode(t, p, np, "packed -> non-packed") - if !reflect.DeepEqual(p.B, np.A) { - t.Errorf("failed packed -> non-packed; p.B=%+v, np.A=%+v", p.B, np.A) - } -} - -func TestProto1RepeatedGroup(t *testing.T) { - pb := &MessageList{ - Message: []*MessageList_Message{ - { - Name: String("blah"), - Count: Int32(7), - }, - // NOTE: pb.Message[1] is a nil - nil, - }, - } - - o := old() - err := o.Marshal(pb) - if err == nil || !strings.Contains(err.Error(), "repeated field Message has nil") { - t.Fatalf("unexpected or no error when marshaling: %v", err) - } -} - -// Test that enums work. Checks for a bug introduced by making enums -// named types instead of int32: newInt32FromUint64 would crash with -// a type mismatch in reflect.PointTo. -func TestEnum(t *testing.T) { - pb := new(GoEnum) - pb.Foo = FOO_FOO1.Enum() - o := old() - if err := o.Marshal(pb); err != nil { - t.Fatal("error encoding enum:", err) - } - pb1 := new(GoEnum) - if err := o.Unmarshal(pb1); err != nil { - t.Fatal("error decoding enum:", err) - } - if *pb1.Foo != FOO_FOO1 { - t.Error("expected 7 but got ", *pb1.Foo) - } -} - -// Enum types have String methods. Check that enum fields can be printed. -// We don't care what the value actually is, just as long as it doesn't crash. -func TestPrintingNilEnumFields(t *testing.T) { - pb := new(GoEnum) - fmt.Sprintf("%+v", pb) -} - -// Verify that absent required fields cause Marshal/Unmarshal to return errors. -func TestRequiredFieldEnforcement(t *testing.T) { - pb := new(GoTestField) - _, err := Marshal(pb) - if err == nil { - t.Error("marshal: expected error, got nil") - } else if strings.Index(err.Error(), "Label") < 0 { - t.Errorf("marshal: bad error type: %v", err) - } - - // A slightly sneaky, yet valid, proto. It encodes the same required field twice, - // so simply counting the required fields is insufficient. - // field 1, encoding 2, value "hi" - buf := []byte("\x0A\x02hi\x0A\x02hi") - err = Unmarshal(buf, pb) - if err == nil { - t.Error("unmarshal: expected error, got nil") - } else if strings.Index(err.Error(), "{Unknown}") < 0 { - t.Errorf("unmarshal: bad error type: %v", err) - } -} - -func TestTypedNilMarshal(t *testing.T) { - // A typed nil should return ErrNil and not crash. - _, err := Marshal((*GoEnum)(nil)) - if err != ErrNil { - t.Errorf("Marshal: got err %v, want ErrNil", err) - } -} - -// A type that implements the Marshaler interface, but is not nillable. -type nonNillableInt uint64 - -func (nni nonNillableInt) Marshal() ([]byte, error) { - return EncodeVarint(uint64(nni)), nil -} - -type NNIMessage struct { - nni nonNillableInt -} - -func (*NNIMessage) Reset() {} -func (*NNIMessage) String() string { return "" } -func (*NNIMessage) ProtoMessage() {} - -// A type that implements the Marshaler interface and is nillable. -type nillableMessage struct { - x uint64 -} - -func (nm *nillableMessage) Marshal() ([]byte, error) { - return EncodeVarint(nm.x), nil -} - -type NMMessage struct { - nm *nillableMessage -} - -func (*NMMessage) Reset() {} -func (*NMMessage) String() string { return "" } -func (*NMMessage) ProtoMessage() {} - -// Verify a type that uses the Marshaler interface, but has a nil pointer. -func TestNilMarshaler(t *testing.T) { - // Try a struct with a Marshaler field that is nil. - // It should be directly marshable. - nmm := new(NMMessage) - if _, err := Marshal(nmm); err != nil { - t.Error("unexpected error marshaling nmm: ", err) - } - - // Try a struct with a Marshaler field that is not nillable. - nnim := new(NNIMessage) - nnim.nni = 7 - var _ Marshaler = nnim.nni // verify it is truly a Marshaler - if _, err := Marshal(nnim); err != nil { - t.Error("unexpected error marshaling nnim: ", err) - } -} - -func TestAllSetDefaults(t *testing.T) { - // Exercise SetDefaults with all scalar field types. - m := &Defaults{ - // NaN != NaN, so override that here. - F_Nan: Float32(1.7), - } - expected := &Defaults{ - F_Bool: Bool(true), - F_Int32: Int32(32), - F_Int64: Int64(64), - F_Fixed32: Uint32(320), - F_Fixed64: Uint64(640), - F_Uint32: Uint32(3200), - F_Uint64: Uint64(6400), - F_Float: Float32(314159), - F_Double: Float64(271828), - F_String: String(`hello, "world!"` + "\n"), - F_Bytes: []byte("Bignose"), - F_Sint32: Int32(-32), - F_Sint64: Int64(-64), - F_Enum: Defaults_GREEN.Enum(), - F_Pinf: Float32(float32(math.Inf(1))), - F_Ninf: Float32(float32(math.Inf(-1))), - F_Nan: Float32(1.7), - StrZero: String(""), - } - SetDefaults(m) - if !Equal(m, expected) { - t.Errorf("SetDefaults failed\n got %v\nwant %v", m, expected) - } -} - -func TestSetDefaultsWithSetField(t *testing.T) { - // Check that a set value is not overridden. - m := &Defaults{ - F_Int32: Int32(12), - } - SetDefaults(m) - if v := m.GetF_Int32(); v != 12 { - t.Errorf("m.FInt32 = %v, want 12", v) - } -} - -func TestSetDefaultsWithSubMessage(t *testing.T) { - m := &OtherMessage{ - Key: Int64(123), - Inner: &InnerMessage{ - Host: String("gopher"), - }, - } - expected := &OtherMessage{ - Key: Int64(123), - Inner: &InnerMessage{ - Host: String("gopher"), - Port: Int32(4000), - }, - } - SetDefaults(m) - if !Equal(m, expected) { - t.Errorf("\n got %v\nwant %v", m, expected) - } -} - -func TestSetDefaultsWithRepeatedSubMessage(t *testing.T) { - m := &MyMessage{ - RepInner: []*InnerMessage{{}}, - } - expected := &MyMessage{ - RepInner: []*InnerMessage{{ - Port: Int32(4000), - }}, - } - SetDefaults(m) - if !Equal(m, expected) { - t.Errorf("\n got %v\nwant %v", m, expected) - } -} - -func TestSetDefaultWithRepeatedNonMessage(t *testing.T) { - m := &MyMessage{ - Pet: []string{"turtle", "wombat"}, - } - expected := Clone(m) - SetDefaults(m) - if !Equal(m, expected) { - t.Errorf("\n got %v\nwant %v", m, expected) - } -} - -func TestMaximumTagNumber(t *testing.T) { - m := &MaxTag{ - LastField: String("natural goat essence"), - } - buf, err := Marshal(m) - if err != nil { - t.Fatalf("proto.Marshal failed: %v", err) - } - m2 := new(MaxTag) - if err := Unmarshal(buf, m2); err != nil { - t.Fatalf("proto.Unmarshal failed: %v", err) - } - if got, want := m2.GetLastField(), *m.LastField; got != want { - t.Errorf("got %q, want %q", got, want) - } -} - -func TestJSON(t *testing.T) { - m := &MyMessage{ - Count: Int32(4), - Pet: []string{"bunny", "kitty"}, - Inner: &InnerMessage{ - Host: String("cauchy"), - }, - Bikeshed: MyMessage_GREEN.Enum(), - } - const expected = `{"count":4,"pet":["bunny","kitty"],"inner":{"host":"cauchy"},"bikeshed":1}` - - b, err := json.Marshal(m) - if err != nil { - t.Fatalf("json.Marshal failed: %v", err) - } - s := string(b) - if s != expected { - t.Errorf("got %s\nwant %s", s, expected) - } - - received := new(MyMessage) - if err := json.Unmarshal(b, received); err != nil { - t.Fatalf("json.Unmarshal failed: %v", err) - } - if !Equal(received, m) { - t.Fatalf("got %s, want %s", received, m) - } - - // Test unmarshalling of JSON with symbolic enum name. - const old = `{"count":4,"pet":["bunny","kitty"],"inner":{"host":"cauchy"},"bikeshed":"GREEN"}` - received.Reset() - if err := json.Unmarshal([]byte(old), received); err != nil { - t.Fatalf("json.Unmarshal failed: %v", err) - } - if !Equal(received, m) { - t.Fatalf("got %s, want %s", received, m) - } -} - -func TestBadWireType(t *testing.T) { - b := []byte{7<<3 | 6} // field 7, wire type 6 - pb := new(OtherMessage) - if err := Unmarshal(b, pb); err == nil { - t.Errorf("Unmarshal did not fail") - } else if !strings.Contains(err.Error(), "unknown wire type") { - t.Errorf("wrong error: %v", err) - } -} - -func TestBytesWithInvalidLength(t *testing.T) { - // If a byte sequence has an invalid (negative) length, Unmarshal should not panic. - b := []byte{2<<3 | WireBytes, 0xff, 0xff, 0xff, 0xff, 0xff, 0} - Unmarshal(b, new(MyMessage)) -} - -func TestLengthOverflow(t *testing.T) { - // Overflowing a length should not panic. - b := []byte{2<<3 | WireBytes, 1, 1, 3<<3 | WireBytes, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f, 0x01} - Unmarshal(b, new(MyMessage)) -} - -func TestVarintOverflow(t *testing.T) { - // Overflowing a 64-bit length should not be allowed. - b := []byte{1<<3 | WireVarint, 0x01, 3<<3 | WireBytes, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x01} - if err := Unmarshal(b, new(MyMessage)); err == nil { - t.Fatalf("Overflowed uint64 length without error") - } -} - -func TestUnmarshalFuzz(t *testing.T) { - const N = 1000 - seed := time.Now().UnixNano() - t.Logf("RNG seed is %d", seed) - rng := rand.New(rand.NewSource(seed)) - buf := make([]byte, 20) - for i := 0; i < N; i++ { - for j := range buf { - buf[j] = byte(rng.Intn(256)) - } - fuzzUnmarshal(t, buf) - } -} - -func TestMergeMessages(t *testing.T) { - pb := &MessageList{Message: []*MessageList_Message{{Name: String("x"), Count: Int32(1)}}} - data, err := Marshal(pb) - if err != nil { - t.Fatalf("Marshal: %v", err) - } - - pb1 := new(MessageList) - if err := Unmarshal(data, pb1); err != nil { - t.Fatalf("first Unmarshal: %v", err) - } - if err := Unmarshal(data, pb1); err != nil { - t.Fatalf("second Unmarshal: %v", err) - } - if len(pb1.Message) != 1 { - t.Errorf("two Unmarshals produced %d Messages, want 1", len(pb1.Message)) - } - - pb2 := new(MessageList) - if err := UnmarshalMerge(data, pb2); err != nil { - t.Fatalf("first UnmarshalMerge: %v", err) - } - if err := UnmarshalMerge(data, pb2); err != nil { - t.Fatalf("second UnmarshalMerge: %v", err) - } - if len(pb2.Message) != 2 { - t.Errorf("two UnmarshalMerges produced %d Messages, want 2", len(pb2.Message)) - } -} - -func TestExtensionMarshalOrder(t *testing.T) { - m := &MyMessage{Count: Int(123)} - if err := SetExtension(m, E_Ext_More, &Ext{Data: String("alpha")}); err != nil { - t.Fatalf("SetExtension: %v", err) - } - if err := SetExtension(m, E_Ext_Text, String("aleph")); err != nil { - t.Fatalf("SetExtension: %v", err) - } - if err := SetExtension(m, E_Ext_Number, Int32(1)); err != nil { - t.Fatalf("SetExtension: %v", err) - } - - // Serialize m several times, and check we get the same bytes each time. - var orig []byte - for i := 0; i < 100; i++ { - b, err := Marshal(m) - if err != nil { - t.Fatalf("Marshal: %v", err) - } - if i == 0 { - orig = b - continue - } - if !bytes.Equal(b, orig) { - t.Errorf("Bytes differ on attempt #%d", i) - } - } -} - -// Many extensions, because small maps might not iterate differently on each iteration. -var exts = []*ExtensionDesc{ - E_X201, - E_X202, - E_X203, - E_X204, - E_X205, - E_X206, - E_X207, - E_X208, - E_X209, - E_X210, - E_X211, - E_X212, - E_X213, - E_X214, - E_X215, - E_X216, - E_X217, - E_X218, - E_X219, - E_X220, - E_X221, - E_X222, - E_X223, - E_X224, - E_X225, - E_X226, - E_X227, - E_X228, - E_X229, - E_X230, - E_X231, - E_X232, - E_X233, - E_X234, - E_X235, - E_X236, - E_X237, - E_X238, - E_X239, - E_X240, - E_X241, - E_X242, - E_X243, - E_X244, - E_X245, - E_X246, - E_X247, - E_X248, - E_X249, - E_X250, -} - -func TestMessageSetMarshalOrder(t *testing.T) { - m := &MyMessageSet{} - for _, x := range exts { - if err := SetExtension(m, x, &Empty{}); err != nil { - t.Fatalf("SetExtension: %v", err) - } - } - - buf, err := Marshal(m) - if err != nil { - t.Fatalf("Marshal: %v", err) - } - - // Serialize m several times, and check we get the same bytes each time. - for i := 0; i < 10; i++ { - b1, err := Marshal(m) - if err != nil { - t.Fatalf("Marshal: %v", err) - } - if !bytes.Equal(b1, buf) { - t.Errorf("Bytes differ on re-Marshal #%d", i) - } - - m2 := &MyMessageSet{} - if err := Unmarshal(buf, m2); err != nil { - t.Errorf("Unmarshal: %v", err) - } - b2, err := Marshal(m2) - if err != nil { - t.Errorf("re-Marshal: %v", err) - } - if !bytes.Equal(b2, buf) { - t.Errorf("Bytes differ on round-trip #%d", i) - } - } -} - -func TestUnmarshalMergesMessages(t *testing.T) { - // If a nested message occurs twice in the input, - // the fields should be merged when decoding. - a := &OtherMessage{ - Key: Int64(123), - Inner: &InnerMessage{ - Host: String("polhode"), - Port: Int32(1234), - }, - } - aData, err := Marshal(a) - if err != nil { - t.Fatalf("Marshal(a): %v", err) - } - b := &OtherMessage{ - Weight: Float32(1.2), - Inner: &InnerMessage{ - Host: String("herpolhode"), - Connected: Bool(true), - }, - } - bData, err := Marshal(b) - if err != nil { - t.Fatalf("Marshal(b): %v", err) - } - want := &OtherMessage{ - Key: Int64(123), - Weight: Float32(1.2), - Inner: &InnerMessage{ - Host: String("herpolhode"), - Port: Int32(1234), - Connected: Bool(true), - }, - } - got := new(OtherMessage) - if err := Unmarshal(append(aData, bData...), got); err != nil { - t.Fatalf("Unmarshal: %v", err) - } - if !Equal(got, want) { - t.Errorf("\n got %v\nwant %v", got, want) - } -} - -func TestEncodingSizes(t *testing.T) { - tests := []struct { - m Message - n int - }{ - {&Defaults{F_Int32: Int32(math.MaxInt32)}, 6}, - {&Defaults{F_Int32: Int32(math.MinInt32)}, 11}, - {&Defaults{F_Uint32: Uint32(uint32(math.MaxInt32) + 1)}, 6}, - {&Defaults{F_Uint32: Uint32(math.MaxUint32)}, 6}, - } - for _, test := range tests { - b, err := Marshal(test.m) - if err != nil { - t.Errorf("Marshal(%v): %v", test.m, err) - continue - } - if len(b) != test.n { - t.Errorf("Marshal(%v) yielded %d bytes, want %d bytes", test.m, len(b), test.n) - } - } -} - -func TestRequiredNotSetError(t *testing.T) { - pb := initGoTest(false) - pb.RequiredField.Label = nil - pb.F_Int32Required = nil - pb.F_Int64Required = nil - - expected := "0807" + // field 1, encoding 0, value 7 - "2206" + "120474797065" + // field 4, encoding 2 (GoTestField) - "5001" + // field 10, encoding 0, value 1 - "6d20000000" + // field 13, encoding 5, value 0x20 - "714000000000000000" + // field 14, encoding 1, value 0x40 - "78a019" + // field 15, encoding 0, value 0xca0 = 3232 - "8001c032" + // field 16, encoding 0, value 0x1940 = 6464 - "8d0100004a45" + // field 17, encoding 5, value 3232.0 - "9101000000000040b940" + // field 18, encoding 1, value 6464.0 - "9a0106" + "737472696e67" + // field 19, encoding 2, string "string" - "b304" + // field 70, encoding 3, start group - "ba0408" + "7265717569726564" + // field 71, encoding 2, string "required" - "b404" + // field 70, encoding 4, end group - "aa0605" + "6279746573" + // field 101, encoding 2, string "bytes" - "b0063f" + // field 102, encoding 0, 0x3f zigzag32 - "b8067f" // field 103, encoding 0, 0x7f zigzag64 - - o := old() - bytes, err := Marshal(pb) - if _, ok := err.(*RequiredNotSetError); !ok { - fmt.Printf("marshal-1 err = %v, want *RequiredNotSetError", err) - o.DebugPrint("", bytes) - t.Fatalf("expected = %s", expected) - } - if strings.Index(err.Error(), "RequiredField.Label") < 0 { - t.Errorf("marshal-1 wrong err msg: %v", err) - } - if !equal(bytes, expected, t) { - o.DebugPrint("neq 1", bytes) - t.Fatalf("expected = %s", expected) - } - - // Now test Unmarshal by recreating the original buffer. - pbd := new(GoTest) - err = Unmarshal(bytes, pbd) - if _, ok := err.(*RequiredNotSetError); !ok { - t.Fatalf("unmarshal err = %v, want *RequiredNotSetError", err) - o.DebugPrint("", bytes) - t.Fatalf("string = %s", expected) - } - if strings.Index(err.Error(), "RequiredField.{Unknown}") < 0 { - t.Errorf("unmarshal wrong err msg: %v", err) - } - bytes, err = Marshal(pbd) - if _, ok := err.(*RequiredNotSetError); !ok { - t.Errorf("marshal-2 err = %v, want *RequiredNotSetError", err) - o.DebugPrint("", bytes) - t.Fatalf("string = %s", expected) - } - if strings.Index(err.Error(), "RequiredField.Label") < 0 { - t.Errorf("marshal-2 wrong err msg: %v", err) - } - if !equal(bytes, expected, t) { - o.DebugPrint("neq 2", bytes) - t.Fatalf("string = %s", expected) - } -} - -func fuzzUnmarshal(t *testing.T, data []byte) { - defer func() { - if e := recover(); e != nil { - t.Errorf("These bytes caused a panic: %+v", data) - t.Logf("Stack:\n%s", debug.Stack()) - t.FailNow() - } - }() - - pb := new(MyMessage) - Unmarshal(data, pb) -} - -func TestMapFieldMarshal(t *testing.T) { - m := &MessageWithMap{ - NameMapping: map[int32]string{ - 1: "Rob", - 4: "Ian", - 8: "Dave", - }, - } - b, err := Marshal(m) - if err != nil { - t.Fatalf("Marshal: %v", err) - } - - // b should be the concatenation of these three byte sequences in some order. - parts := []string{ - "\n\a\b\x01\x12\x03Rob", - "\n\a\b\x04\x12\x03Ian", - "\n\b\b\x08\x12\x04Dave", - } - ok := false - for i := range parts { - for j := range parts { - if j == i { - continue - } - for k := range parts { - if k == i || k == j { - continue - } - try := parts[i] + parts[j] + parts[k] - if bytes.Equal(b, []byte(try)) { - ok = true - break - } - } - } - } - if !ok { - t.Fatalf("Incorrect Marshal output.\n got %q\nwant %q (or a permutation of that)", b, parts[0]+parts[1]+parts[2]) - } - t.Logf("FYI b: %q", b) - - (new(Buffer)).DebugPrint("Dump of b", b) -} - -func TestMapFieldRoundTrips(t *testing.T) { - m := &MessageWithMap{ - NameMapping: map[int32]string{ - 1: "Rob", - 4: "Ian", - 8: "Dave", - }, - MsgMapping: map[int64]*FloatingPoint{ - 0x7001: &FloatingPoint{F: Float64(2.0)}, - }, - ByteMapping: map[bool][]byte{ - false: []byte("that's not right!"), - true: []byte("aye, 'tis true!"), - }, - } - b, err := Marshal(m) - if err != nil { - t.Fatalf("Marshal: %v", err) - } - t.Logf("FYI b: %q", b) - m2 := new(MessageWithMap) - if err := Unmarshal(b, m2); err != nil { - t.Fatalf("Unmarshal: %v", err) - } - for _, pair := range [][2]interface{}{ - {m.NameMapping, m2.NameMapping}, - {m.MsgMapping, m2.MsgMapping}, - {m.ByteMapping, m2.ByteMapping}, - } { - if !reflect.DeepEqual(pair[0], pair[1]) { - t.Errorf("Map did not survive a round trip.\ninitial: %v\n final: %v", pair[0], pair[1]) - } - } -} - -func TestMapFieldWithNil(t *testing.T) { - m := &MessageWithMap{ - MsgMapping: map[int64]*FloatingPoint{ - 1: nil, - }, - } - b, err := Marshal(m) - if err == nil { - t.Fatalf("Marshal of bad map should have failed, got these bytes: %v", b) - } -} - -// Benchmarks - -func testMsg() *GoTest { - pb := initGoTest(true) - const N = 1000 // Internally the library starts much smaller. - pb.F_Int32Repeated = make([]int32, N) - pb.F_DoubleRepeated = make([]float64, N) - for i := 0; i < N; i++ { - pb.F_Int32Repeated[i] = int32(i) - pb.F_DoubleRepeated[i] = float64(i) - } - return pb -} - -func bytesMsg() *GoTest { - pb := initGoTest(true) - buf := make([]byte, 4000) - for i := range buf { - buf[i] = byte(i) - } - pb.F_BytesDefaulted = buf - return pb -} - -func benchmarkMarshal(b *testing.B, pb Message, marshal func(Message) ([]byte, error)) { - d, _ := marshal(pb) - b.SetBytes(int64(len(d))) - b.ResetTimer() - for i := 0; i < b.N; i++ { - marshal(pb) - } -} - -func benchmarkBufferMarshal(b *testing.B, pb Message) { - p := NewBuffer(nil) - benchmarkMarshal(b, pb, func(pb0 Message) ([]byte, error) { - p.Reset() - err := p.Marshal(pb0) - return p.Bytes(), err - }) -} - -func benchmarkSize(b *testing.B, pb Message) { - benchmarkMarshal(b, pb, func(pb0 Message) ([]byte, error) { - Size(pb) - return nil, nil - }) -} - -func newOf(pb Message) Message { - in := reflect.ValueOf(pb) - if in.IsNil() { - return pb - } - return reflect.New(in.Type().Elem()).Interface().(Message) -} - -func benchmarkUnmarshal(b *testing.B, pb Message, unmarshal func([]byte, Message) error) { - d, _ := Marshal(pb) - b.SetBytes(int64(len(d))) - pbd := newOf(pb) - - b.ResetTimer() - for i := 0; i < b.N; i++ { - unmarshal(d, pbd) - } -} - -func benchmarkBufferUnmarshal(b *testing.B, pb Message) { - p := NewBuffer(nil) - benchmarkUnmarshal(b, pb, func(d []byte, pb0 Message) error { - p.SetBuf(d) - return p.Unmarshal(pb0) - }) -} - -// Benchmark{Marshal,BufferMarshal,Size,Unmarshal,BufferUnmarshal}{,Bytes} - -func BenchmarkMarshal(b *testing.B) { - benchmarkMarshal(b, testMsg(), Marshal) -} - -func BenchmarkBufferMarshal(b *testing.B) { - benchmarkBufferMarshal(b, testMsg()) -} - -func BenchmarkSize(b *testing.B) { - benchmarkSize(b, testMsg()) -} - -func BenchmarkUnmarshal(b *testing.B) { - benchmarkUnmarshal(b, testMsg(), Unmarshal) -} - -func BenchmarkBufferUnmarshal(b *testing.B) { - benchmarkBufferUnmarshal(b, testMsg()) -} - -func BenchmarkMarshalBytes(b *testing.B) { - benchmarkMarshal(b, bytesMsg(), Marshal) -} - -func BenchmarkBufferMarshalBytes(b *testing.B) { - benchmarkBufferMarshal(b, bytesMsg()) -} - -func BenchmarkSizeBytes(b *testing.B) { - benchmarkSize(b, bytesMsg()) -} - -func BenchmarkUnmarshalBytes(b *testing.B) { - benchmarkUnmarshal(b, bytesMsg(), Unmarshal) -} - -func BenchmarkBufferUnmarshalBytes(b *testing.B) { - benchmarkBufferUnmarshal(b, bytesMsg()) -} - -func BenchmarkUnmarshalUnrecognizedFields(b *testing.B) { - b.StopTimer() - pb := initGoTestField() - skip := &GoSkipTest{ - SkipInt32: Int32(32), - SkipFixed32: Uint32(3232), - SkipFixed64: Uint64(6464), - SkipString: String("skipper"), - Skipgroup: &GoSkipTest_SkipGroup{ - GroupInt32: Int32(75), - GroupString: String("wxyz"), - }, - } - - pbd := new(GoTestField) - p := NewBuffer(nil) - p.Marshal(pb) - p.Marshal(skip) - p2 := NewBuffer(nil) - - b.StartTimer() - for i := 0; i < b.N; i++ { - p2.SetBuf(p.Bytes()) - p2.Unmarshal(pbd) - } -} diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/clone.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/clone.go deleted file mode 100644 index 915a68b8ec..0000000000 --- a/Godeps/_workspace/src/github.com/golang/protobuf/proto/clone.go +++ /dev/null @@ -1,212 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2011 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// Protocol buffer deep copy and merge. -// TODO: MessageSet and RawMessage. - -package proto - -import ( - "log" - "reflect" - "strings" -) - -// Clone returns a deep copy of a protocol buffer. -func Clone(pb Message) Message { - in := reflect.ValueOf(pb) - if in.IsNil() { - return pb - } - - out := reflect.New(in.Type().Elem()) - // out is empty so a merge is a deep copy. - mergeStruct(out.Elem(), in.Elem()) - return out.Interface().(Message) -} - -// Merge merges src into dst. -// Required and optional fields that are set in src will be set to that value in dst. -// Elements of repeated fields will be appended. -// Merge panics if src and dst are not the same type, or if dst is nil. -func Merge(dst, src Message) { - in := reflect.ValueOf(src) - out := reflect.ValueOf(dst) - if out.IsNil() { - panic("proto: nil destination") - } - if in.Type() != out.Type() { - // Explicit test prior to mergeStruct so that mistyped nils will fail - panic("proto: type mismatch") - } - if in.IsNil() { - // Merging nil into non-nil is a quiet no-op - return - } - mergeStruct(out.Elem(), in.Elem()) -} - -func mergeStruct(out, in reflect.Value) { - sprop := GetProperties(in.Type()) - for i := 0; i < in.NumField(); i++ { - f := in.Type().Field(i) - if strings.HasPrefix(f.Name, "XXX_") { - continue - } - mergeAny(out.Field(i), in.Field(i), false, sprop.Prop[i]) - } - - if emIn, ok := in.Addr().Interface().(extendableProto); ok { - emOut := out.Addr().Interface().(extendableProto) - mergeExtension(emOut.ExtensionMap(), emIn.ExtensionMap()) - } - - uf := in.FieldByName("XXX_unrecognized") - if !uf.IsValid() { - return - } - uin := uf.Bytes() - if len(uin) > 0 { - out.FieldByName("XXX_unrecognized").SetBytes(append([]byte(nil), uin...)) - } -} - -// mergeAny performs a merge between two values of the same type. -// viaPtr indicates whether the values were indirected through a pointer (implying proto2). -// prop is set if this is a struct field (it may be nil). -func mergeAny(out, in reflect.Value, viaPtr bool, prop *Properties) { - if in.Type() == protoMessageType { - if !in.IsNil() { - if out.IsNil() { - out.Set(reflect.ValueOf(Clone(in.Interface().(Message)))) - } else { - Merge(out.Interface().(Message), in.Interface().(Message)) - } - } - return - } - switch in.Kind() { - case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, - reflect.String, reflect.Uint32, reflect.Uint64: - if !viaPtr && isProto3Zero(in) { - return - } - out.Set(in) - case reflect.Map: - if in.Len() == 0 { - return - } - if out.IsNil() { - out.Set(reflect.MakeMap(in.Type())) - } - // For maps with value types of *T or []byte we need to deep copy each value. - elemKind := in.Type().Elem().Kind() - for _, key := range in.MapKeys() { - var val reflect.Value - switch elemKind { - case reflect.Ptr: - val = reflect.New(in.Type().Elem().Elem()) - mergeAny(val, in.MapIndex(key), false, nil) - case reflect.Slice: - val = in.MapIndex(key) - val = reflect.ValueOf(append([]byte{}, val.Bytes()...)) - default: - val = in.MapIndex(key) - } - out.SetMapIndex(key, val) - } - case reflect.Ptr: - if in.IsNil() { - return - } - if out.IsNil() { - out.Set(reflect.New(in.Elem().Type())) - } - mergeAny(out.Elem(), in.Elem(), true, nil) - case reflect.Slice: - if in.IsNil() { - return - } - if in.Type().Elem().Kind() == reflect.Uint8 { - // []byte is a scalar bytes field, not a repeated field. - - // Edge case: if this is in a proto3 message, a zero length - // bytes field is considered the zero value, and should not - // be merged. - if prop != nil && prop.proto3 && in.Len() == 0 { - return - } - - // Make a deep copy. - // Append to []byte{} instead of []byte(nil) so that we never end up - // with a nil result. - out.SetBytes(append([]byte{}, in.Bytes()...)) - return - } - n := in.Len() - if out.IsNil() { - out.Set(reflect.MakeSlice(in.Type(), 0, n)) - } - switch in.Type().Elem().Kind() { - case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, - reflect.String, reflect.Uint32, reflect.Uint64: - out.Set(reflect.AppendSlice(out, in)) - default: - for i := 0; i < n; i++ { - x := reflect.Indirect(reflect.New(in.Type().Elem())) - mergeAny(x, in.Index(i), false, nil) - out.Set(reflect.Append(out, x)) - } - } - case reflect.Struct: - mergeStruct(out, in) - default: - // unknown type, so not a protocol buffer - log.Printf("proto: don't know how to copy %v", in) - } -} - -func mergeExtension(out, in map[int32]Extension) { - for extNum, eIn := range in { - eOut := Extension{desc: eIn.desc} - if eIn.value != nil { - v := reflect.New(reflect.TypeOf(eIn.value)).Elem() - mergeAny(v, reflect.ValueOf(eIn.value), false, nil) - eOut.value = v.Interface() - } - if eIn.enc != nil { - eOut.enc = make([]byte, len(eIn.enc)) - copy(eOut.enc, eIn.enc) - } - - out[extNum] = eOut - } -} diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/clone_test.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/clone_test.go deleted file mode 100644 index a1c697bc84..0000000000 --- a/Godeps/_workspace/src/github.com/golang/protobuf/proto/clone_test.go +++ /dev/null @@ -1,245 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2011 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto_test - -import ( - "testing" - - "github.com/golang/protobuf/proto" - - proto3pb "github.com/golang/protobuf/proto/proto3_proto" - pb "github.com/golang/protobuf/proto/testdata" -) - -var cloneTestMessage = &pb.MyMessage{ - Count: proto.Int32(42), - Name: proto.String("Dave"), - Pet: []string{"bunny", "kitty", "horsey"}, - Inner: &pb.InnerMessage{ - Host: proto.String("niles"), - Port: proto.Int32(9099), - Connected: proto.Bool(true), - }, - Others: []*pb.OtherMessage{ - { - Value: []byte("some bytes"), - }, - }, - Somegroup: &pb.MyMessage_SomeGroup{ - GroupField: proto.Int32(6), - }, - RepBytes: [][]byte{[]byte("sham"), []byte("wow")}, -} - -func init() { - ext := &pb.Ext{ - Data: proto.String("extension"), - } - if err := proto.SetExtension(cloneTestMessage, pb.E_Ext_More, ext); err != nil { - panic("SetExtension: " + err.Error()) - } -} - -func TestClone(t *testing.T) { - m := proto.Clone(cloneTestMessage).(*pb.MyMessage) - if !proto.Equal(m, cloneTestMessage) { - t.Errorf("Clone(%v) = %v", cloneTestMessage, m) - } - - // Verify it was a deep copy. - *m.Inner.Port++ - if proto.Equal(m, cloneTestMessage) { - t.Error("Mutating clone changed the original") - } - // Byte fields and repeated fields should be copied. - if &m.Pet[0] == &cloneTestMessage.Pet[0] { - t.Error("Pet: repeated field not copied") - } - if &m.Others[0] == &cloneTestMessage.Others[0] { - t.Error("Others: repeated field not copied") - } - if &m.Others[0].Value[0] == &cloneTestMessage.Others[0].Value[0] { - t.Error("Others[0].Value: bytes field not copied") - } - if &m.RepBytes[0] == &cloneTestMessage.RepBytes[0] { - t.Error("RepBytes: repeated field not copied") - } - if &m.RepBytes[0][0] == &cloneTestMessage.RepBytes[0][0] { - t.Error("RepBytes[0]: bytes field not copied") - } -} - -func TestCloneNil(t *testing.T) { - var m *pb.MyMessage - if c := proto.Clone(m); !proto.Equal(m, c) { - t.Errorf("Clone(%v) = %v", m, c) - } -} - -var mergeTests = []struct { - src, dst, want proto.Message -}{ - { - src: &pb.MyMessage{ - Count: proto.Int32(42), - }, - dst: &pb.MyMessage{ - Name: proto.String("Dave"), - }, - want: &pb.MyMessage{ - Count: proto.Int32(42), - Name: proto.String("Dave"), - }, - }, - { - src: &pb.MyMessage{ - Inner: &pb.InnerMessage{ - Host: proto.String("hey"), - Connected: proto.Bool(true), - }, - Pet: []string{"horsey"}, - Others: []*pb.OtherMessage{ - { - Value: []byte("some bytes"), - }, - }, - }, - dst: &pb.MyMessage{ - Inner: &pb.InnerMessage{ - Host: proto.String("niles"), - Port: proto.Int32(9099), - }, - Pet: []string{"bunny", "kitty"}, - Others: []*pb.OtherMessage{ - { - Key: proto.Int64(31415926535), - }, - { - // Explicitly test a src=nil field - Inner: nil, - }, - }, - }, - want: &pb.MyMessage{ - Inner: &pb.InnerMessage{ - Host: proto.String("hey"), - Connected: proto.Bool(true), - Port: proto.Int32(9099), - }, - Pet: []string{"bunny", "kitty", "horsey"}, - Others: []*pb.OtherMessage{ - { - Key: proto.Int64(31415926535), - }, - {}, - { - Value: []byte("some bytes"), - }, - }, - }, - }, - { - src: &pb.MyMessage{ - RepBytes: [][]byte{[]byte("wow")}, - }, - dst: &pb.MyMessage{ - Somegroup: &pb.MyMessage_SomeGroup{ - GroupField: proto.Int32(6), - }, - RepBytes: [][]byte{[]byte("sham")}, - }, - want: &pb.MyMessage{ - Somegroup: &pb.MyMessage_SomeGroup{ - GroupField: proto.Int32(6), - }, - RepBytes: [][]byte{[]byte("sham"), []byte("wow")}, - }, - }, - // Check that a scalar bytes field replaces rather than appends. - { - src: &pb.OtherMessage{Value: []byte("foo")}, - dst: &pb.OtherMessage{Value: []byte("bar")}, - want: &pb.OtherMessage{Value: []byte("foo")}, - }, - { - src: &pb.MessageWithMap{ - NameMapping: map[int32]string{6: "Nigel"}, - MsgMapping: map[int64]*pb.FloatingPoint{ - 0x4001: &pb.FloatingPoint{F: proto.Float64(2.0)}, - }, - ByteMapping: map[bool][]byte{true: []byte("wowsa")}, - }, - dst: &pb.MessageWithMap{ - NameMapping: map[int32]string{ - 6: "Bruce", // should be overwritten - 7: "Andrew", - }, - }, - want: &pb.MessageWithMap{ - NameMapping: map[int32]string{ - 6: "Nigel", - 7: "Andrew", - }, - MsgMapping: map[int64]*pb.FloatingPoint{ - 0x4001: &pb.FloatingPoint{F: proto.Float64(2.0)}, - }, - ByteMapping: map[bool][]byte{true: []byte("wowsa")}, - }, - }, - // proto3 shouldn't merge zero values, - // in the same way that proto2 shouldn't merge nils. - { - src: &proto3pb.Message{ - Name: "Aaron", - Data: []byte(""), // zero value, but not nil - }, - dst: &proto3pb.Message{ - HeightInCm: 176, - Data: []byte("texas!"), - }, - want: &proto3pb.Message{ - Name: "Aaron", - HeightInCm: 176, - Data: []byte("texas!"), - }, - }, -} - -func TestMerge(t *testing.T) { - for _, m := range mergeTests { - got := proto.Clone(m.dst) - proto.Merge(got, m.src) - if !proto.Equal(got, m.want) { - t.Errorf("Merge(%v, %v)\n got %v\nwant %v\n", m.dst, m.src, got, m.want) - } - } -} diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/decode.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/decode.go deleted file mode 100644 index bf71dcad10..0000000000 --- a/Godeps/_workspace/src/github.com/golang/protobuf/proto/decode.go +++ /dev/null @@ -1,827 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -/* - * Routines for decoding protocol buffer data to construct in-memory representations. - */ - -import ( - "errors" - "fmt" - "io" - "os" - "reflect" -) - -// errOverflow is returned when an integer is too large to be represented. -var errOverflow = errors.New("proto: integer overflow") - -// The fundamental decoders that interpret bytes on the wire. -// Those that take integer types all return uint64 and are -// therefore of type valueDecoder. - -// DecodeVarint reads a varint-encoded integer from the slice. -// It returns the integer and the number of bytes consumed, or -// zero if there is not enough. -// This is the format for the -// int32, int64, uint32, uint64, bool, and enum -// protocol buffer types. -func DecodeVarint(buf []byte) (x uint64, n int) { - // x, n already 0 - for shift := uint(0); shift < 64; shift += 7 { - if n >= len(buf) { - return 0, 0 - } - b := uint64(buf[n]) - n++ - x |= (b & 0x7F) << shift - if (b & 0x80) == 0 { - return x, n - } - } - - // The number is too large to represent in a 64-bit value. - return 0, 0 -} - -// DecodeVarint reads a varint-encoded integer from the Buffer. -// This is the format for the -// int32, int64, uint32, uint64, bool, and enum -// protocol buffer types. -func (p *Buffer) DecodeVarint() (x uint64, err error) { - // x, err already 0 - - i := p.index - l := len(p.buf) - - for shift := uint(0); shift < 64; shift += 7 { - if i >= l { - err = io.ErrUnexpectedEOF - return - } - b := p.buf[i] - i++ - x |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - p.index = i - return - } - } - - // The number is too large to represent in a 64-bit value. - err = errOverflow - return -} - -// DecodeFixed64 reads a 64-bit integer from the Buffer. -// This is the format for the -// fixed64, sfixed64, and double protocol buffer types. -func (p *Buffer) DecodeFixed64() (x uint64, err error) { - // x, err already 0 - i := p.index + 8 - if i < 0 || i > len(p.buf) { - err = io.ErrUnexpectedEOF - return - } - p.index = i - - x = uint64(p.buf[i-8]) - x |= uint64(p.buf[i-7]) << 8 - x |= uint64(p.buf[i-6]) << 16 - x |= uint64(p.buf[i-5]) << 24 - x |= uint64(p.buf[i-4]) << 32 - x |= uint64(p.buf[i-3]) << 40 - x |= uint64(p.buf[i-2]) << 48 - x |= uint64(p.buf[i-1]) << 56 - return -} - -// DecodeFixed32 reads a 32-bit integer from the Buffer. -// This is the format for the -// fixed32, sfixed32, and float protocol buffer types. -func (p *Buffer) DecodeFixed32() (x uint64, err error) { - // x, err already 0 - i := p.index + 4 - if i < 0 || i > len(p.buf) { - err = io.ErrUnexpectedEOF - return - } - p.index = i - - x = uint64(p.buf[i-4]) - x |= uint64(p.buf[i-3]) << 8 - x |= uint64(p.buf[i-2]) << 16 - x |= uint64(p.buf[i-1]) << 24 - return -} - -// DecodeZigzag64 reads a zigzag-encoded 64-bit integer -// from the Buffer. -// This is the format used for the sint64 protocol buffer type. -func (p *Buffer) DecodeZigzag64() (x uint64, err error) { - x, err = p.DecodeVarint() - if err != nil { - return - } - x = (x >> 1) ^ uint64((int64(x&1)<<63)>>63) - return -} - -// DecodeZigzag32 reads a zigzag-encoded 32-bit integer -// from the Buffer. -// This is the format used for the sint32 protocol buffer type. -func (p *Buffer) DecodeZigzag32() (x uint64, err error) { - x, err = p.DecodeVarint() - if err != nil { - return - } - x = uint64((uint32(x) >> 1) ^ uint32((int32(x&1)<<31)>>31)) - return -} - -// These are not ValueDecoders: they produce an array of bytes or a string. -// bytes, embedded messages - -// DecodeRawBytes reads a count-delimited byte buffer from the Buffer. -// This is the format used for the bytes protocol buffer -// type and for embedded messages. -func (p *Buffer) DecodeRawBytes(alloc bool) (buf []byte, err error) { - n, err := p.DecodeVarint() - if err != nil { - return nil, err - } - - nb := int(n) - if nb < 0 { - return nil, fmt.Errorf("proto: bad byte length %d", nb) - } - end := p.index + nb - if end < p.index || end > len(p.buf) { - return nil, io.ErrUnexpectedEOF - } - - if !alloc { - // todo: check if can get more uses of alloc=false - buf = p.buf[p.index:end] - p.index += nb - return - } - - buf = make([]byte, nb) - copy(buf, p.buf[p.index:]) - p.index += nb - return -} - -// DecodeStringBytes reads an encoded string from the Buffer. -// This is the format used for the proto2 string type. -func (p *Buffer) DecodeStringBytes() (s string, err error) { - buf, err := p.DecodeRawBytes(false) - if err != nil { - return - } - return string(buf), nil -} - -// Skip the next item in the buffer. Its wire type is decoded and presented as an argument. -// If the protocol buffer has extensions, and the field matches, add it as an extension. -// Otherwise, if the XXX_unrecognized field exists, append the skipped data there. -func (o *Buffer) skipAndSave(t reflect.Type, tag, wire int, base structPointer, unrecField field) error { - oi := o.index - - err := o.skip(t, tag, wire) - if err != nil { - return err - } - - if !unrecField.IsValid() { - return nil - } - - ptr := structPointer_Bytes(base, unrecField) - - // Add the skipped field to struct field - obuf := o.buf - - o.buf = *ptr - o.EncodeVarint(uint64(tag<<3 | wire)) - *ptr = append(o.buf, obuf[oi:o.index]...) - - o.buf = obuf - - return nil -} - -// Skip the next item in the buffer. Its wire type is decoded and presented as an argument. -func (o *Buffer) skip(t reflect.Type, tag, wire int) error { - - var u uint64 - var err error - - switch wire { - case WireVarint: - _, err = o.DecodeVarint() - case WireFixed64: - _, err = o.DecodeFixed64() - case WireBytes: - _, err = o.DecodeRawBytes(false) - case WireFixed32: - _, err = o.DecodeFixed32() - case WireStartGroup: - for { - u, err = o.DecodeVarint() - if err != nil { - break - } - fwire := int(u & 0x7) - if fwire == WireEndGroup { - break - } - ftag := int(u >> 3) - err = o.skip(t, ftag, fwire) - if err != nil { - break - } - } - default: - err = fmt.Errorf("proto: can't skip unknown wire type %d for %s", wire, t) - } - return err -} - -// Unmarshaler is the interface representing objects that can -// unmarshal themselves. The method should reset the receiver before -// decoding starts. The argument points to data that may be -// overwritten, so implementations should not keep references to the -// buffer. -type Unmarshaler interface { - Unmarshal([]byte) error -} - -// Unmarshal parses the protocol buffer representation in buf and places the -// decoded result in pb. If the struct underlying pb does not match -// the data in buf, the results can be unpredictable. -// -// Unmarshal resets pb before starting to unmarshal, so any -// existing data in pb is always removed. Use UnmarshalMerge -// to preserve and append to existing data. -func Unmarshal(buf []byte, pb Message) error { - pb.Reset() - return UnmarshalMerge(buf, pb) -} - -// UnmarshalMerge parses the protocol buffer representation in buf and -// writes the decoded result to pb. If the struct underlying pb does not match -// the data in buf, the results can be unpredictable. -// -// UnmarshalMerge merges into existing data in pb. -// Most code should use Unmarshal instead. -func UnmarshalMerge(buf []byte, pb Message) error { - // If the object can unmarshal itself, let it. - if u, ok := pb.(Unmarshaler); ok { - return u.Unmarshal(buf) - } - return NewBuffer(buf).Unmarshal(pb) -} - -// Unmarshal parses the protocol buffer representation in the -// Buffer and places the decoded result in pb. If the struct -// underlying pb does not match the data in the buffer, the results can be -// unpredictable. -func (p *Buffer) Unmarshal(pb Message) error { - // If the object can unmarshal itself, let it. - if u, ok := pb.(Unmarshaler); ok { - err := u.Unmarshal(p.buf[p.index:]) - p.index = len(p.buf) - return err - } - - typ, base, err := getbase(pb) - if err != nil { - return err - } - - err = p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), false, base) - - if collectStats { - stats.Decode++ - } - - return err -} - -// unmarshalType does the work of unmarshaling a structure. -func (o *Buffer) unmarshalType(st reflect.Type, prop *StructProperties, is_group bool, base structPointer) error { - var state errorState - required, reqFields := prop.reqCount, uint64(0) - - var err error - for err == nil && o.index < len(o.buf) { - oi := o.index - var u uint64 - u, err = o.DecodeVarint() - if err != nil { - break - } - wire := int(u & 0x7) - if wire == WireEndGroup { - if is_group { - return nil // input is satisfied - } - return fmt.Errorf("proto: %s: wiretype end group for non-group", st) - } - tag := int(u >> 3) - if tag <= 0 { - return fmt.Errorf("proto: %s: illegal tag %d (wire type %d)", st, tag, wire) - } - fieldnum, ok := prop.decoderTags.get(tag) - if !ok { - // Maybe it's an extension? - if prop.extendable { - if e := structPointer_Interface(base, st).(extendableProto); isExtensionField(e, int32(tag)) { - if err = o.skip(st, tag, wire); err == nil { - ext := e.ExtensionMap()[int32(tag)] // may be missing - ext.enc = append(ext.enc, o.buf[oi:o.index]...) - e.ExtensionMap()[int32(tag)] = ext - } - continue - } - } - err = o.skipAndSave(st, tag, wire, base, prop.unrecField) - continue - } - p := prop.Prop[fieldnum] - - if p.dec == nil { - fmt.Fprintf(os.Stderr, "proto: no protobuf decoder for %s.%s\n", st, st.Field(fieldnum).Name) - continue - } - dec := p.dec - if wire != WireStartGroup && wire != p.WireType { - if wire == WireBytes && p.packedDec != nil { - // a packable field - dec = p.packedDec - } else { - err = fmt.Errorf("proto: bad wiretype for field %s.%s: got wiretype %d, want %d", st, st.Field(fieldnum).Name, wire, p.WireType) - continue - } - } - decErr := dec(o, p, base) - if decErr != nil && !state.shouldContinue(decErr, p) { - err = decErr - } - if err == nil && p.Required { - // Successfully decoded a required field. - if tag <= 64 { - // use bitmap for fields 1-64 to catch field reuse. - var mask uint64 = 1 << uint64(tag-1) - if reqFields&mask == 0 { - // new required field - reqFields |= mask - required-- - } - } else { - // This is imprecise. It can be fooled by a required field - // with a tag > 64 that is encoded twice; that's very rare. - // A fully correct implementation would require allocating - // a data structure, which we would like to avoid. - required-- - } - } - } - if err == nil { - if is_group { - return io.ErrUnexpectedEOF - } - if state.err != nil { - return state.err - } - if required > 0 { - // Not enough information to determine the exact field. If we use extra - // CPU, we could determine the field only if the missing required field - // has a tag <= 64 and we check reqFields. - return &RequiredNotSetError{"{Unknown}"} - } - } - return err -} - -// Individual type decoders -// For each, -// u is the decoded value, -// v is a pointer to the field (pointer) in the struct - -// Sizes of the pools to allocate inside the Buffer. -// The goal is modest amortization and allocation -// on at least 16-byte boundaries. -const ( - boolPoolSize = 16 - uint32PoolSize = 8 - uint64PoolSize = 4 -) - -// Decode a bool. -func (o *Buffer) dec_bool(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - if len(o.bools) == 0 { - o.bools = make([]bool, boolPoolSize) - } - o.bools[0] = u != 0 - *structPointer_Bool(base, p.field) = &o.bools[0] - o.bools = o.bools[1:] - return nil -} - -func (o *Buffer) dec_proto3_bool(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - *structPointer_BoolVal(base, p.field) = u != 0 - return nil -} - -// Decode an int32. -func (o *Buffer) dec_int32(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - word32_Set(structPointer_Word32(base, p.field), o, uint32(u)) - return nil -} - -func (o *Buffer) dec_proto3_int32(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - word32Val_Set(structPointer_Word32Val(base, p.field), uint32(u)) - return nil -} - -// Decode an int64. -func (o *Buffer) dec_int64(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - word64_Set(structPointer_Word64(base, p.field), o, u) - return nil -} - -func (o *Buffer) dec_proto3_int64(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - word64Val_Set(structPointer_Word64Val(base, p.field), o, u) - return nil -} - -// Decode a string. -func (o *Buffer) dec_string(p *Properties, base structPointer) error { - s, err := o.DecodeStringBytes() - if err != nil { - return err - } - *structPointer_String(base, p.field) = &s - return nil -} - -func (o *Buffer) dec_proto3_string(p *Properties, base structPointer) error { - s, err := o.DecodeStringBytes() - if err != nil { - return err - } - *structPointer_StringVal(base, p.field) = s - return nil -} - -// Decode a slice of bytes ([]byte). -func (o *Buffer) dec_slice_byte(p *Properties, base structPointer) error { - b, err := o.DecodeRawBytes(true) - if err != nil { - return err - } - *structPointer_Bytes(base, p.field) = b - return nil -} - -// Decode a slice of bools ([]bool). -func (o *Buffer) dec_slice_bool(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - v := structPointer_BoolSlice(base, p.field) - *v = append(*v, u != 0) - return nil -} - -// Decode a slice of bools ([]bool) in packed format. -func (o *Buffer) dec_slice_packed_bool(p *Properties, base structPointer) error { - v := structPointer_BoolSlice(base, p.field) - - nn, err := o.DecodeVarint() - if err != nil { - return err - } - nb := int(nn) // number of bytes of encoded bools - - y := *v - for i := 0; i < nb; i++ { - u, err := p.valDec(o) - if err != nil { - return err - } - y = append(y, u != 0) - } - - *v = y - return nil -} - -// Decode a slice of int32s ([]int32). -func (o *Buffer) dec_slice_int32(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - structPointer_Word32Slice(base, p.field).Append(uint32(u)) - return nil -} - -// Decode a slice of int32s ([]int32) in packed format. -func (o *Buffer) dec_slice_packed_int32(p *Properties, base structPointer) error { - v := structPointer_Word32Slice(base, p.field) - - nn, err := o.DecodeVarint() - if err != nil { - return err - } - nb := int(nn) // number of bytes of encoded int32s - - fin := o.index + nb - if fin < o.index { - return errOverflow - } - for o.index < fin { - u, err := p.valDec(o) - if err != nil { - return err - } - v.Append(uint32(u)) - } - return nil -} - -// Decode a slice of int64s ([]int64). -func (o *Buffer) dec_slice_int64(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - - structPointer_Word64Slice(base, p.field).Append(u) - return nil -} - -// Decode a slice of int64s ([]int64) in packed format. -func (o *Buffer) dec_slice_packed_int64(p *Properties, base structPointer) error { - v := structPointer_Word64Slice(base, p.field) - - nn, err := o.DecodeVarint() - if err != nil { - return err - } - nb := int(nn) // number of bytes of encoded int64s - - fin := o.index + nb - if fin < o.index { - return errOverflow - } - for o.index < fin { - u, err := p.valDec(o) - if err != nil { - return err - } - v.Append(u) - } - return nil -} - -// Decode a slice of strings ([]string). -func (o *Buffer) dec_slice_string(p *Properties, base structPointer) error { - s, err := o.DecodeStringBytes() - if err != nil { - return err - } - v := structPointer_StringSlice(base, p.field) - *v = append(*v, s) - return nil -} - -// Decode a slice of slice of bytes ([][]byte). -func (o *Buffer) dec_slice_slice_byte(p *Properties, base structPointer) error { - b, err := o.DecodeRawBytes(true) - if err != nil { - return err - } - v := structPointer_BytesSlice(base, p.field) - *v = append(*v, b) - return nil -} - -// Decode a map field. -func (o *Buffer) dec_new_map(p *Properties, base structPointer) error { - raw, err := o.DecodeRawBytes(false) - if err != nil { - return err - } - oi := o.index // index at the end of this map entry - o.index -= len(raw) // move buffer back to start of map entry - - mptr := structPointer_NewAt(base, p.field, p.mtype) // *map[K]V - if mptr.Elem().IsNil() { - mptr.Elem().Set(reflect.MakeMap(mptr.Type().Elem())) - } - v := mptr.Elem() // map[K]V - - // Prepare addressable doubly-indirect placeholders for the key and value types. - // See enc_new_map for why. - keyptr := reflect.New(reflect.PtrTo(p.mtype.Key())).Elem() // addressable *K - keybase := toStructPointer(keyptr.Addr()) // **K - - var valbase structPointer - var valptr reflect.Value - switch p.mtype.Elem().Kind() { - case reflect.Slice: - // []byte - var dummy []byte - valptr = reflect.ValueOf(&dummy) // *[]byte - valbase = toStructPointer(valptr) // *[]byte - case reflect.Ptr: - // message; valptr is **Msg; need to allocate the intermediate pointer - valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V - valptr.Set(reflect.New(valptr.Type().Elem())) - valbase = toStructPointer(valptr) - default: - // everything else - valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V - valbase = toStructPointer(valptr.Addr()) // **V - } - - // Decode. - // This parses a restricted wire format, namely the encoding of a message - // with two fields. See enc_new_map for the format. - for o.index < oi { - // tagcode for key and value properties are always a single byte - // because they have tags 1 and 2. - tagcode := o.buf[o.index] - o.index++ - switch tagcode { - case p.mkeyprop.tagcode[0]: - if err := p.mkeyprop.dec(o, p.mkeyprop, keybase); err != nil { - return err - } - case p.mvalprop.tagcode[0]: - if err := p.mvalprop.dec(o, p.mvalprop, valbase); err != nil { - return err - } - default: - // TODO: Should we silently skip this instead? - return fmt.Errorf("proto: bad map data tag %d", raw[0]) - } - } - keyelem, valelem := keyptr.Elem(), valptr.Elem() - if !keyelem.IsValid() || !valelem.IsValid() { - // We did not decode the key or the value in the map entry. - // Either way, it's an invalid map entry. - return fmt.Errorf("proto: bad map data: missing key/val") - } - - v.SetMapIndex(keyelem, valelem) - return nil -} - -// Decode a group. -func (o *Buffer) dec_struct_group(p *Properties, base structPointer) error { - bas := structPointer_GetStructPointer(base, p.field) - if structPointer_IsNil(bas) { - // allocate new nested message - bas = toStructPointer(reflect.New(p.stype)) - structPointer_SetStructPointer(base, p.field, bas) - } - return o.unmarshalType(p.stype, p.sprop, true, bas) -} - -// Decode an embedded message. -func (o *Buffer) dec_struct_message(p *Properties, base structPointer) (err error) { - raw, e := o.DecodeRawBytes(false) - if e != nil { - return e - } - - bas := structPointer_GetStructPointer(base, p.field) - if structPointer_IsNil(bas) { - // allocate new nested message - bas = toStructPointer(reflect.New(p.stype)) - structPointer_SetStructPointer(base, p.field, bas) - } - - // If the object can unmarshal itself, let it. - if p.isUnmarshaler { - iv := structPointer_Interface(bas, p.stype) - return iv.(Unmarshaler).Unmarshal(raw) - } - - obuf := o.buf - oi := o.index - o.buf = raw - o.index = 0 - - err = o.unmarshalType(p.stype, p.sprop, false, bas) - o.buf = obuf - o.index = oi - - return err -} - -// Decode a slice of embedded messages. -func (o *Buffer) dec_slice_struct_message(p *Properties, base structPointer) error { - return o.dec_slice_struct(p, false, base) -} - -// Decode a slice of embedded groups. -func (o *Buffer) dec_slice_struct_group(p *Properties, base structPointer) error { - return o.dec_slice_struct(p, true, base) -} - -// Decode a slice of structs ([]*struct). -func (o *Buffer) dec_slice_struct(p *Properties, is_group bool, base structPointer) error { - v := reflect.New(p.stype) - bas := toStructPointer(v) - structPointer_StructPointerSlice(base, p.field).Append(bas) - - if is_group { - err := o.unmarshalType(p.stype, p.sprop, is_group, bas) - return err - } - - raw, err := o.DecodeRawBytes(false) - if err != nil { - return err - } - - // If the object can unmarshal itself, let it. - if p.isUnmarshaler { - iv := v.Interface() - return iv.(Unmarshaler).Unmarshal(raw) - } - - obuf := o.buf - oi := o.index - o.buf = raw - o.index = 0 - - err = o.unmarshalType(p.stype, p.sprop, is_group, bas) - - o.buf = obuf - o.index = oi - - return err -} diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/encode.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/encode.go deleted file mode 100644 index 72c780b915..0000000000 --- a/Godeps/_workspace/src/github.com/golang/protobuf/proto/encode.go +++ /dev/null @@ -1,1293 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -/* - * Routines for encoding data into the wire format for protocol buffers. - */ - -import ( - "errors" - "fmt" - "reflect" - "sort" -) - -// RequiredNotSetError is the error returned if Marshal is called with -// a protocol buffer struct whose required fields have not -// all been initialized. It is also the error returned if Unmarshal is -// called with an encoded protocol buffer that does not include all the -// required fields. -// -// When printed, RequiredNotSetError reports the first unset required field in a -// message. If the field cannot be precisely determined, it is reported as -// "{Unknown}". -type RequiredNotSetError struct { - field string -} - -func (e *RequiredNotSetError) Error() string { - return fmt.Sprintf("proto: required field %q not set", e.field) -} - -var ( - // errRepeatedHasNil is the error returned if Marshal is called with - // a struct with a repeated field containing a nil element. - errRepeatedHasNil = errors.New("proto: repeated field has nil element") - - // ErrNil is the error returned if Marshal is called with nil. - ErrNil = errors.New("proto: Marshal called with nil") -) - -// The fundamental encoders that put bytes on the wire. -// Those that take integer types all accept uint64 and are -// therefore of type valueEncoder. - -const maxVarintBytes = 10 // maximum length of a varint - -// EncodeVarint returns the varint encoding of x. -// This is the format for the -// int32, int64, uint32, uint64, bool, and enum -// protocol buffer types. -// Not used by the package itself, but helpful to clients -// wishing to use the same encoding. -func EncodeVarint(x uint64) []byte { - var buf [maxVarintBytes]byte - var n int - for n = 0; x > 127; n++ { - buf[n] = 0x80 | uint8(x&0x7F) - x >>= 7 - } - buf[n] = uint8(x) - n++ - return buf[0:n] -} - -// EncodeVarint writes a varint-encoded integer to the Buffer. -// This is the format for the -// int32, int64, uint32, uint64, bool, and enum -// protocol buffer types. -func (p *Buffer) EncodeVarint(x uint64) error { - for x >= 1<<7 { - p.buf = append(p.buf, uint8(x&0x7f|0x80)) - x >>= 7 - } - p.buf = append(p.buf, uint8(x)) - return nil -} - -func sizeVarint(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n -} - -// EncodeFixed64 writes a 64-bit integer to the Buffer. -// This is the format for the -// fixed64, sfixed64, and double protocol buffer types. -func (p *Buffer) EncodeFixed64(x uint64) error { - p.buf = append(p.buf, - uint8(x), - uint8(x>>8), - uint8(x>>16), - uint8(x>>24), - uint8(x>>32), - uint8(x>>40), - uint8(x>>48), - uint8(x>>56)) - return nil -} - -func sizeFixed64(x uint64) int { - return 8 -} - -// EncodeFixed32 writes a 32-bit integer to the Buffer. -// This is the format for the -// fixed32, sfixed32, and float protocol buffer types. -func (p *Buffer) EncodeFixed32(x uint64) error { - p.buf = append(p.buf, - uint8(x), - uint8(x>>8), - uint8(x>>16), - uint8(x>>24)) - return nil -} - -func sizeFixed32(x uint64) int { - return 4 -} - -// EncodeZigzag64 writes a zigzag-encoded 64-bit integer -// to the Buffer. -// This is the format used for the sint64 protocol buffer type. -func (p *Buffer) EncodeZigzag64(x uint64) error { - // use signed number to get arithmetic right shift. - return p.EncodeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} - -func sizeZigzag64(x uint64) int { - return sizeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} - -// EncodeZigzag32 writes a zigzag-encoded 32-bit integer -// to the Buffer. -// This is the format used for the sint32 protocol buffer type. -func (p *Buffer) EncodeZigzag32(x uint64) error { - // use signed number to get arithmetic right shift. - return p.EncodeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31)))) -} - -func sizeZigzag32(x uint64) int { - return sizeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31)))) -} - -// EncodeRawBytes writes a count-delimited byte buffer to the Buffer. -// This is the format used for the bytes protocol buffer -// type and for embedded messages. -func (p *Buffer) EncodeRawBytes(b []byte) error { - p.EncodeVarint(uint64(len(b))) - p.buf = append(p.buf, b...) - return nil -} - -func sizeRawBytes(b []byte) int { - return sizeVarint(uint64(len(b))) + - len(b) -} - -// EncodeStringBytes writes an encoded string to the Buffer. -// This is the format used for the proto2 string type. -func (p *Buffer) EncodeStringBytes(s string) error { - p.EncodeVarint(uint64(len(s))) - p.buf = append(p.buf, s...) - return nil -} - -func sizeStringBytes(s string) int { - return sizeVarint(uint64(len(s))) + - len(s) -} - -// Marshaler is the interface representing objects that can marshal themselves. -type Marshaler interface { - Marshal() ([]byte, error) -} - -// Marshal takes the protocol buffer -// and encodes it into the wire format, returning the data. -func Marshal(pb Message) ([]byte, error) { - // Can the object marshal itself? - if m, ok := pb.(Marshaler); ok { - return m.Marshal() - } - p := NewBuffer(nil) - err := p.Marshal(pb) - var state errorState - if err != nil && !state.shouldContinue(err, nil) { - return nil, err - } - if p.buf == nil && err == nil { - // Return a non-nil slice on success. - return []byte{}, nil - } - return p.buf, err -} - -// Marshal takes the protocol buffer -// and encodes it into the wire format, writing the result to the -// Buffer. -func (p *Buffer) Marshal(pb Message) error { - // Can the object marshal itself? - if m, ok := pb.(Marshaler); ok { - data, err := m.Marshal() - if err != nil { - return err - } - p.buf = append(p.buf, data...) - return nil - } - - t, base, err := getbase(pb) - if structPointer_IsNil(base) { - return ErrNil - } - if err == nil { - err = p.enc_struct(GetProperties(t.Elem()), base) - } - - if collectStats { - stats.Encode++ - } - - return err -} - -// Size returns the encoded size of a protocol buffer. -func Size(pb Message) (n int) { - // Can the object marshal itself? If so, Size is slow. - // TODO: add Size to Marshaler, or add a Sizer interface. - if m, ok := pb.(Marshaler); ok { - b, _ := m.Marshal() - return len(b) - } - - t, base, err := getbase(pb) - if structPointer_IsNil(base) { - return 0 - } - if err == nil { - n = size_struct(GetProperties(t.Elem()), base) - } - - if collectStats { - stats.Size++ - } - - return -} - -// Individual type encoders. - -// Encode a bool. -func (o *Buffer) enc_bool(p *Properties, base structPointer) error { - v := *structPointer_Bool(base, p.field) - if v == nil { - return ErrNil - } - x := 0 - if *v { - x = 1 - } - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, uint64(x)) - return nil -} - -func (o *Buffer) enc_proto3_bool(p *Properties, base structPointer) error { - v := *structPointer_BoolVal(base, p.field) - if !v { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, 1) - return nil -} - -func size_bool(p *Properties, base structPointer) int { - v := *structPointer_Bool(base, p.field) - if v == nil { - return 0 - } - return len(p.tagcode) + 1 // each bool takes exactly one byte -} - -func size_proto3_bool(p *Properties, base structPointer) int { - v := *structPointer_BoolVal(base, p.field) - if !v { - return 0 - } - return len(p.tagcode) + 1 // each bool takes exactly one byte -} - -// Encode an int32. -func (o *Buffer) enc_int32(p *Properties, base structPointer) error { - v := structPointer_Word32(base, p.field) - if word32_IsNil(v) { - return ErrNil - } - x := int32(word32_Get(v)) // permit sign extension to use full 64-bit range - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, uint64(x)) - return nil -} - -func (o *Buffer) enc_proto3_int32(p *Properties, base structPointer) error { - v := structPointer_Word32Val(base, p.field) - x := int32(word32Val_Get(v)) // permit sign extension to use full 64-bit range - if x == 0 { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, uint64(x)) - return nil -} - -func size_int32(p *Properties, base structPointer) (n int) { - v := structPointer_Word32(base, p.field) - if word32_IsNil(v) { - return 0 - } - x := int32(word32_Get(v)) // permit sign extension to use full 64-bit range - n += len(p.tagcode) - n += p.valSize(uint64(x)) - return -} - -func size_proto3_int32(p *Properties, base structPointer) (n int) { - v := structPointer_Word32Val(base, p.field) - x := int32(word32Val_Get(v)) // permit sign extension to use full 64-bit range - if x == 0 { - return 0 - } - n += len(p.tagcode) - n += p.valSize(uint64(x)) - return -} - -// Encode a uint32. -// Exactly the same as int32, except for no sign extension. -func (o *Buffer) enc_uint32(p *Properties, base structPointer) error { - v := structPointer_Word32(base, p.field) - if word32_IsNil(v) { - return ErrNil - } - x := word32_Get(v) - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, uint64(x)) - return nil -} - -func (o *Buffer) enc_proto3_uint32(p *Properties, base structPointer) error { - v := structPointer_Word32Val(base, p.field) - x := word32Val_Get(v) - if x == 0 { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, uint64(x)) - return nil -} - -func size_uint32(p *Properties, base structPointer) (n int) { - v := structPointer_Word32(base, p.field) - if word32_IsNil(v) { - return 0 - } - x := word32_Get(v) - n += len(p.tagcode) - n += p.valSize(uint64(x)) - return -} - -func size_proto3_uint32(p *Properties, base structPointer) (n int) { - v := structPointer_Word32Val(base, p.field) - x := word32Val_Get(v) - if x == 0 { - return 0 - } - n += len(p.tagcode) - n += p.valSize(uint64(x)) - return -} - -// Encode an int64. -func (o *Buffer) enc_int64(p *Properties, base structPointer) error { - v := structPointer_Word64(base, p.field) - if word64_IsNil(v) { - return ErrNil - } - x := word64_Get(v) - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, x) - return nil -} - -func (o *Buffer) enc_proto3_int64(p *Properties, base structPointer) error { - v := structPointer_Word64Val(base, p.field) - x := word64Val_Get(v) - if x == 0 { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, x) - return nil -} - -func size_int64(p *Properties, base structPointer) (n int) { - v := structPointer_Word64(base, p.field) - if word64_IsNil(v) { - return 0 - } - x := word64_Get(v) - n += len(p.tagcode) - n += p.valSize(x) - return -} - -func size_proto3_int64(p *Properties, base structPointer) (n int) { - v := structPointer_Word64Val(base, p.field) - x := word64Val_Get(v) - if x == 0 { - return 0 - } - n += len(p.tagcode) - n += p.valSize(x) - return -} - -// Encode a string. -func (o *Buffer) enc_string(p *Properties, base structPointer) error { - v := *structPointer_String(base, p.field) - if v == nil { - return ErrNil - } - x := *v - o.buf = append(o.buf, p.tagcode...) - o.EncodeStringBytes(x) - return nil -} - -func (o *Buffer) enc_proto3_string(p *Properties, base structPointer) error { - v := *structPointer_StringVal(base, p.field) - if v == "" { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeStringBytes(v) - return nil -} - -func size_string(p *Properties, base structPointer) (n int) { - v := *structPointer_String(base, p.field) - if v == nil { - return 0 - } - x := *v - n += len(p.tagcode) - n += sizeStringBytes(x) - return -} - -func size_proto3_string(p *Properties, base structPointer) (n int) { - v := *structPointer_StringVal(base, p.field) - if v == "" { - return 0 - } - n += len(p.tagcode) - n += sizeStringBytes(v) - return -} - -// All protocol buffer fields are nillable, but be careful. -func isNil(v reflect.Value) bool { - switch v.Kind() { - case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: - return v.IsNil() - } - return false -} - -// Encode a message struct. -func (o *Buffer) enc_struct_message(p *Properties, base structPointer) error { - var state errorState - structp := structPointer_GetStructPointer(base, p.field) - if structPointer_IsNil(structp) { - return ErrNil - } - - // Can the object marshal itself? - if p.isMarshaler { - m := structPointer_Interface(structp, p.stype).(Marshaler) - data, err := m.Marshal() - if err != nil && !state.shouldContinue(err, nil) { - return err - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(data) - return nil - } - - o.buf = append(o.buf, p.tagcode...) - return o.enc_len_struct(p.sprop, structp, &state) -} - -func size_struct_message(p *Properties, base structPointer) int { - structp := structPointer_GetStructPointer(base, p.field) - if structPointer_IsNil(structp) { - return 0 - } - - // Can the object marshal itself? - if p.isMarshaler { - m := structPointer_Interface(structp, p.stype).(Marshaler) - data, _ := m.Marshal() - n0 := len(p.tagcode) - n1 := sizeRawBytes(data) - return n0 + n1 - } - - n0 := len(p.tagcode) - n1 := size_struct(p.sprop, structp) - n2 := sizeVarint(uint64(n1)) // size of encoded length - return n0 + n1 + n2 -} - -// Encode a group struct. -func (o *Buffer) enc_struct_group(p *Properties, base structPointer) error { - var state errorState - b := structPointer_GetStructPointer(base, p.field) - if structPointer_IsNil(b) { - return ErrNil - } - - o.EncodeVarint(uint64((p.Tag << 3) | WireStartGroup)) - err := o.enc_struct(p.sprop, b) - if err != nil && !state.shouldContinue(err, nil) { - return err - } - o.EncodeVarint(uint64((p.Tag << 3) | WireEndGroup)) - return state.err -} - -func size_struct_group(p *Properties, base structPointer) (n int) { - b := structPointer_GetStructPointer(base, p.field) - if structPointer_IsNil(b) { - return 0 - } - - n += sizeVarint(uint64((p.Tag << 3) | WireStartGroup)) - n += size_struct(p.sprop, b) - n += sizeVarint(uint64((p.Tag << 3) | WireEndGroup)) - return -} - -// Encode a slice of bools ([]bool). -func (o *Buffer) enc_slice_bool(p *Properties, base structPointer) error { - s := *structPointer_BoolSlice(base, p.field) - l := len(s) - if l == 0 { - return ErrNil - } - for _, x := range s { - o.buf = append(o.buf, p.tagcode...) - v := uint64(0) - if x { - v = 1 - } - p.valEnc(o, v) - } - return nil -} - -func size_slice_bool(p *Properties, base structPointer) int { - s := *structPointer_BoolSlice(base, p.field) - l := len(s) - if l == 0 { - return 0 - } - return l * (len(p.tagcode) + 1) // each bool takes exactly one byte -} - -// Encode a slice of bools ([]bool) in packed format. -func (o *Buffer) enc_slice_packed_bool(p *Properties, base structPointer) error { - s := *structPointer_BoolSlice(base, p.field) - l := len(s) - if l == 0 { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeVarint(uint64(l)) // each bool takes exactly one byte - for _, x := range s { - v := uint64(0) - if x { - v = 1 - } - p.valEnc(o, v) - } - return nil -} - -func size_slice_packed_bool(p *Properties, base structPointer) (n int) { - s := *structPointer_BoolSlice(base, p.field) - l := len(s) - if l == 0 { - return 0 - } - n += len(p.tagcode) - n += sizeVarint(uint64(l)) - n += l // each bool takes exactly one byte - return -} - -// Encode a slice of bytes ([]byte). -func (o *Buffer) enc_slice_byte(p *Properties, base structPointer) error { - s := *structPointer_Bytes(base, p.field) - if s == nil { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(s) - return nil -} - -func (o *Buffer) enc_proto3_slice_byte(p *Properties, base structPointer) error { - s := *structPointer_Bytes(base, p.field) - if len(s) == 0 { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(s) - return nil -} - -func size_slice_byte(p *Properties, base structPointer) (n int) { - s := *structPointer_Bytes(base, p.field) - if s == nil { - return 0 - } - n += len(p.tagcode) - n += sizeRawBytes(s) - return -} - -func size_proto3_slice_byte(p *Properties, base structPointer) (n int) { - s := *structPointer_Bytes(base, p.field) - if len(s) == 0 { - return 0 - } - n += len(p.tagcode) - n += sizeRawBytes(s) - return -} - -// Encode a slice of int32s ([]int32). -func (o *Buffer) enc_slice_int32(p *Properties, base structPointer) error { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return ErrNil - } - for i := 0; i < l; i++ { - o.buf = append(o.buf, p.tagcode...) - x := int32(s.Index(i)) // permit sign extension to use full 64-bit range - p.valEnc(o, uint64(x)) - } - return nil -} - -func size_slice_int32(p *Properties, base structPointer) (n int) { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return 0 - } - for i := 0; i < l; i++ { - n += len(p.tagcode) - x := int32(s.Index(i)) // permit sign extension to use full 64-bit range - n += p.valSize(uint64(x)) - } - return -} - -// Encode a slice of int32s ([]int32) in packed format. -func (o *Buffer) enc_slice_packed_int32(p *Properties, base structPointer) error { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return ErrNil - } - // TODO: Reuse a Buffer. - buf := NewBuffer(nil) - for i := 0; i < l; i++ { - x := int32(s.Index(i)) // permit sign extension to use full 64-bit range - p.valEnc(buf, uint64(x)) - } - - o.buf = append(o.buf, p.tagcode...) - o.EncodeVarint(uint64(len(buf.buf))) - o.buf = append(o.buf, buf.buf...) - return nil -} - -func size_slice_packed_int32(p *Properties, base structPointer) (n int) { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return 0 - } - var bufSize int - for i := 0; i < l; i++ { - x := int32(s.Index(i)) // permit sign extension to use full 64-bit range - bufSize += p.valSize(uint64(x)) - } - - n += len(p.tagcode) - n += sizeVarint(uint64(bufSize)) - n += bufSize - return -} - -// Encode a slice of uint32s ([]uint32). -// Exactly the same as int32, except for no sign extension. -func (o *Buffer) enc_slice_uint32(p *Properties, base structPointer) error { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return ErrNil - } - for i := 0; i < l; i++ { - o.buf = append(o.buf, p.tagcode...) - x := s.Index(i) - p.valEnc(o, uint64(x)) - } - return nil -} - -func size_slice_uint32(p *Properties, base structPointer) (n int) { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return 0 - } - for i := 0; i < l; i++ { - n += len(p.tagcode) - x := s.Index(i) - n += p.valSize(uint64(x)) - } - return -} - -// Encode a slice of uint32s ([]uint32) in packed format. -// Exactly the same as int32, except for no sign extension. -func (o *Buffer) enc_slice_packed_uint32(p *Properties, base structPointer) error { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return ErrNil - } - // TODO: Reuse a Buffer. - buf := NewBuffer(nil) - for i := 0; i < l; i++ { - p.valEnc(buf, uint64(s.Index(i))) - } - - o.buf = append(o.buf, p.tagcode...) - o.EncodeVarint(uint64(len(buf.buf))) - o.buf = append(o.buf, buf.buf...) - return nil -} - -func size_slice_packed_uint32(p *Properties, base structPointer) (n int) { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return 0 - } - var bufSize int - for i := 0; i < l; i++ { - bufSize += p.valSize(uint64(s.Index(i))) - } - - n += len(p.tagcode) - n += sizeVarint(uint64(bufSize)) - n += bufSize - return -} - -// Encode a slice of int64s ([]int64). -func (o *Buffer) enc_slice_int64(p *Properties, base structPointer) error { - s := structPointer_Word64Slice(base, p.field) - l := s.Len() - if l == 0 { - return ErrNil - } - for i := 0; i < l; i++ { - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, s.Index(i)) - } - return nil -} - -func size_slice_int64(p *Properties, base structPointer) (n int) { - s := structPointer_Word64Slice(base, p.field) - l := s.Len() - if l == 0 { - return 0 - } - for i := 0; i < l; i++ { - n += len(p.tagcode) - n += p.valSize(s.Index(i)) - } - return -} - -// Encode a slice of int64s ([]int64) in packed format. -func (o *Buffer) enc_slice_packed_int64(p *Properties, base structPointer) error { - s := structPointer_Word64Slice(base, p.field) - l := s.Len() - if l == 0 { - return ErrNil - } - // TODO: Reuse a Buffer. - buf := NewBuffer(nil) - for i := 0; i < l; i++ { - p.valEnc(buf, s.Index(i)) - } - - o.buf = append(o.buf, p.tagcode...) - o.EncodeVarint(uint64(len(buf.buf))) - o.buf = append(o.buf, buf.buf...) - return nil -} - -func size_slice_packed_int64(p *Properties, base structPointer) (n int) { - s := structPointer_Word64Slice(base, p.field) - l := s.Len() - if l == 0 { - return 0 - } - var bufSize int - for i := 0; i < l; i++ { - bufSize += p.valSize(s.Index(i)) - } - - n += len(p.tagcode) - n += sizeVarint(uint64(bufSize)) - n += bufSize - return -} - -// Encode a slice of slice of bytes ([][]byte). -func (o *Buffer) enc_slice_slice_byte(p *Properties, base structPointer) error { - ss := *structPointer_BytesSlice(base, p.field) - l := len(ss) - if l == 0 { - return ErrNil - } - for i := 0; i < l; i++ { - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(ss[i]) - } - return nil -} - -func size_slice_slice_byte(p *Properties, base structPointer) (n int) { - ss := *structPointer_BytesSlice(base, p.field) - l := len(ss) - if l == 0 { - return 0 - } - n += l * len(p.tagcode) - for i := 0; i < l; i++ { - n += sizeRawBytes(ss[i]) - } - return -} - -// Encode a slice of strings ([]string). -func (o *Buffer) enc_slice_string(p *Properties, base structPointer) error { - ss := *structPointer_StringSlice(base, p.field) - l := len(ss) - for i := 0; i < l; i++ { - o.buf = append(o.buf, p.tagcode...) - o.EncodeStringBytes(ss[i]) - } - return nil -} - -func size_slice_string(p *Properties, base structPointer) (n int) { - ss := *structPointer_StringSlice(base, p.field) - l := len(ss) - n += l * len(p.tagcode) - for i := 0; i < l; i++ { - n += sizeStringBytes(ss[i]) - } - return -} - -// Encode a slice of message structs ([]*struct). -func (o *Buffer) enc_slice_struct_message(p *Properties, base structPointer) error { - var state errorState - s := structPointer_StructPointerSlice(base, p.field) - l := s.Len() - - for i := 0; i < l; i++ { - structp := s.Index(i) - if structPointer_IsNil(structp) { - return errRepeatedHasNil - } - - // Can the object marshal itself? - if p.isMarshaler { - m := structPointer_Interface(structp, p.stype).(Marshaler) - data, err := m.Marshal() - if err != nil && !state.shouldContinue(err, nil) { - return err - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(data) - continue - } - - o.buf = append(o.buf, p.tagcode...) - err := o.enc_len_struct(p.sprop, structp, &state) - if err != nil && !state.shouldContinue(err, nil) { - if err == ErrNil { - return errRepeatedHasNil - } - return err - } - } - return state.err -} - -func size_slice_struct_message(p *Properties, base structPointer) (n int) { - s := structPointer_StructPointerSlice(base, p.field) - l := s.Len() - n += l * len(p.tagcode) - for i := 0; i < l; i++ { - structp := s.Index(i) - if structPointer_IsNil(structp) { - return // return the size up to this point - } - - // Can the object marshal itself? - if p.isMarshaler { - m := structPointer_Interface(structp, p.stype).(Marshaler) - data, _ := m.Marshal() - n += len(p.tagcode) - n += sizeRawBytes(data) - continue - } - - n0 := size_struct(p.sprop, structp) - n1 := sizeVarint(uint64(n0)) // size of encoded length - n += n0 + n1 - } - return -} - -// Encode a slice of group structs ([]*struct). -func (o *Buffer) enc_slice_struct_group(p *Properties, base structPointer) error { - var state errorState - s := structPointer_StructPointerSlice(base, p.field) - l := s.Len() - - for i := 0; i < l; i++ { - b := s.Index(i) - if structPointer_IsNil(b) { - return errRepeatedHasNil - } - - o.EncodeVarint(uint64((p.Tag << 3) | WireStartGroup)) - - err := o.enc_struct(p.sprop, b) - - if err != nil && !state.shouldContinue(err, nil) { - if err == ErrNil { - return errRepeatedHasNil - } - return err - } - - o.EncodeVarint(uint64((p.Tag << 3) | WireEndGroup)) - } - return state.err -} - -func size_slice_struct_group(p *Properties, base structPointer) (n int) { - s := structPointer_StructPointerSlice(base, p.field) - l := s.Len() - - n += l * sizeVarint(uint64((p.Tag<<3)|WireStartGroup)) - n += l * sizeVarint(uint64((p.Tag<<3)|WireEndGroup)) - for i := 0; i < l; i++ { - b := s.Index(i) - if structPointer_IsNil(b) { - return // return size up to this point - } - - n += size_struct(p.sprop, b) - } - return -} - -// Encode an extension map. -func (o *Buffer) enc_map(p *Properties, base structPointer) error { - v := *structPointer_ExtMap(base, p.field) - if err := encodeExtensionMap(v); err != nil { - return err - } - // Fast-path for common cases: zero or one extensions. - if len(v) <= 1 { - for _, e := range v { - o.buf = append(o.buf, e.enc...) - } - return nil - } - - // Sort keys to provide a deterministic encoding. - keys := make([]int, 0, len(v)) - for k := range v { - keys = append(keys, int(k)) - } - sort.Ints(keys) - - for _, k := range keys { - o.buf = append(o.buf, v[int32(k)].enc...) - } - return nil -} - -func size_map(p *Properties, base structPointer) int { - v := *structPointer_ExtMap(base, p.field) - return sizeExtensionMap(v) -} - -// Encode a map field. -func (o *Buffer) enc_new_map(p *Properties, base structPointer) error { - var state errorState // XXX: or do we need to plumb this through? - - /* - A map defined as - map map_field = N; - is encoded in the same way as - message MapFieldEntry { - key_type key = 1; - value_type value = 2; - } - repeated MapFieldEntry map_field = N; - */ - - v := structPointer_NewAt(base, p.field, p.mtype).Elem() // map[K]V - if v.Len() == 0 { - return nil - } - - keycopy, valcopy, keybase, valbase := mapEncodeScratch(p.mtype) - - enc := func() error { - if err := p.mkeyprop.enc(o, p.mkeyprop, keybase); err != nil { - return err - } - if err := p.mvalprop.enc(o, p.mvalprop, valbase); err != nil { - return err - } - return nil - } - - keys := v.MapKeys() - sort.Sort(mapKeys(keys)) - for _, key := range keys { - val := v.MapIndex(key) - - // The only illegal map entry values are nil message pointers. - if val.Kind() == reflect.Ptr && val.IsNil() { - return errors.New("proto: map has nil element") - } - - keycopy.Set(key) - valcopy.Set(val) - - o.buf = append(o.buf, p.tagcode...) - if err := o.enc_len_thing(enc, &state); err != nil { - return err - } - } - return nil -} - -func size_new_map(p *Properties, base structPointer) int { - v := structPointer_NewAt(base, p.field, p.mtype).Elem() // map[K]V - - keycopy, valcopy, keybase, valbase := mapEncodeScratch(p.mtype) - - n := 0 - for _, key := range v.MapKeys() { - val := v.MapIndex(key) - keycopy.Set(key) - valcopy.Set(val) - - // Tag codes for key and val are the responsibility of the sub-sizer. - keysize := p.mkeyprop.size(p.mkeyprop, keybase) - valsize := p.mvalprop.size(p.mvalprop, valbase) - entry := keysize + valsize - // Add on tag code and length of map entry itself. - n += len(p.tagcode) + sizeVarint(uint64(entry)) + entry - } - return n -} - -// mapEncodeScratch returns a new reflect.Value matching the map's value type, -// and a structPointer suitable for passing to an encoder or sizer. -func mapEncodeScratch(mapType reflect.Type) (keycopy, valcopy reflect.Value, keybase, valbase structPointer) { - // Prepare addressable doubly-indirect placeholders for the key and value types. - // This is needed because the element-type encoders expect **T, but the map iteration produces T. - - keycopy = reflect.New(mapType.Key()).Elem() // addressable K - keyptr := reflect.New(reflect.PtrTo(keycopy.Type())).Elem() // addressable *K - keyptr.Set(keycopy.Addr()) // - keybase = toStructPointer(keyptr.Addr()) // **K - - // Value types are more varied and require special handling. - switch mapType.Elem().Kind() { - case reflect.Slice: - // []byte - var dummy []byte - valcopy = reflect.ValueOf(&dummy).Elem() // addressable []byte - valbase = toStructPointer(valcopy.Addr()) - case reflect.Ptr: - // message; the generated field type is map[K]*Msg (so V is *Msg), - // so we only need one level of indirection. - valcopy = reflect.New(mapType.Elem()).Elem() // addressable V - valbase = toStructPointer(valcopy.Addr()) - default: - // everything else - valcopy = reflect.New(mapType.Elem()).Elem() // addressable V - valptr := reflect.New(reflect.PtrTo(valcopy.Type())).Elem() // addressable *V - valptr.Set(valcopy.Addr()) // - valbase = toStructPointer(valptr.Addr()) // **V - } - return -} - -// Encode a struct. -func (o *Buffer) enc_struct(prop *StructProperties, base structPointer) error { - var state errorState - // Encode fields in tag order so that decoders may use optimizations - // that depend on the ordering. - // https://developers.google.com/protocol-buffers/docs/encoding#order - for _, i := range prop.order { - p := prop.Prop[i] - if p.enc != nil { - err := p.enc(o, p, base) - if err != nil { - if err == ErrNil { - if p.Required && state.err == nil { - state.err = &RequiredNotSetError{p.Name} - } - } else if err == errRepeatedHasNil { - // Give more context to nil values in repeated fields. - return errors.New("repeated field " + p.OrigName + " has nil element") - } else if !state.shouldContinue(err, p) { - return err - } - } - } - } - - // Add unrecognized fields at the end. - if prop.unrecField.IsValid() { - v := *structPointer_Bytes(base, prop.unrecField) - if len(v) > 0 { - o.buf = append(o.buf, v...) - } - } - - return state.err -} - -func size_struct(prop *StructProperties, base structPointer) (n int) { - for _, i := range prop.order { - p := prop.Prop[i] - if p.size != nil { - n += p.size(p, base) - } - } - - // Add unrecognized fields at the end. - if prop.unrecField.IsValid() { - v := *structPointer_Bytes(base, prop.unrecField) - n += len(v) - } - - return -} - -var zeroes [20]byte // longer than any conceivable sizeVarint - -// Encode a struct, preceded by its encoded length (as a varint). -func (o *Buffer) enc_len_struct(prop *StructProperties, base structPointer, state *errorState) error { - return o.enc_len_thing(func() error { return o.enc_struct(prop, base) }, state) -} - -// Encode something, preceded by its encoded length (as a varint). -func (o *Buffer) enc_len_thing(enc func() error, state *errorState) error { - iLen := len(o.buf) - o.buf = append(o.buf, 0, 0, 0, 0) // reserve four bytes for length - iMsg := len(o.buf) - err := enc() - if err != nil && !state.shouldContinue(err, nil) { - return err - } - lMsg := len(o.buf) - iMsg - lLen := sizeVarint(uint64(lMsg)) - switch x := lLen - (iMsg - iLen); { - case x > 0: // actual length is x bytes larger than the space we reserved - // Move msg x bytes right. - o.buf = append(o.buf, zeroes[:x]...) - copy(o.buf[iMsg+x:], o.buf[iMsg:iMsg+lMsg]) - case x < 0: // actual length is x bytes smaller than the space we reserved - // Move msg x bytes left. - copy(o.buf[iMsg+x:], o.buf[iMsg:iMsg+lMsg]) - o.buf = o.buf[:len(o.buf)+x] // x is negative - } - // Encode the length in the reserved space. - o.buf = o.buf[:iLen] - o.EncodeVarint(uint64(lMsg)) - o.buf = o.buf[:len(o.buf)+lMsg] - return state.err -} - -// errorState maintains the first error that occurs and updates that error -// with additional context. -type errorState struct { - err error -} - -// shouldContinue reports whether encoding should continue upon encountering the -// given error. If the error is RequiredNotSetError, shouldContinue returns true -// and, if this is the first appearance of that error, remembers it for future -// reporting. -// -// If prop is not nil, it may update any error with additional context about the -// field with the error. -func (s *errorState) shouldContinue(err error, prop *Properties) bool { - // Ignore unset required fields. - reqNotSet, ok := err.(*RequiredNotSetError) - if !ok { - return false - } - if s.err == nil { - if prop != nil { - err = &RequiredNotSetError{prop.Name + "." + reqNotSet.field} - } - s.err = err - } - return true -} diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/equal.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/equal.go deleted file mode 100644 index d8673a3e97..0000000000 --- a/Godeps/_workspace/src/github.com/golang/protobuf/proto/equal.go +++ /dev/null @@ -1,256 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2011 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// Protocol buffer comparison. -// TODO: MessageSet. - -package proto - -import ( - "bytes" - "log" - "reflect" - "strings" -) - -/* -Equal returns true iff protocol buffers a and b are equal. -The arguments must both be pointers to protocol buffer structs. - -Equality is defined in this way: - - Two messages are equal iff they are the same type, - corresponding fields are equal, unknown field sets - are equal, and extensions sets are equal. - - Two set scalar fields are equal iff their values are equal. - If the fields are of a floating-point type, remember that - NaN != x for all x, including NaN. - - Two repeated fields are equal iff their lengths are the same, - and their corresponding elements are equal (a "bytes" field, - although represented by []byte, is not a repeated field) - - Two unset fields are equal. - - Two unknown field sets are equal if their current - encoded state is equal. - - Two extension sets are equal iff they have corresponding - elements that are pairwise equal. - - Every other combination of things are not equal. - -The return value is undefined if a and b are not protocol buffers. -*/ -func Equal(a, b Message) bool { - if a == nil || b == nil { - return a == b - } - v1, v2 := reflect.ValueOf(a), reflect.ValueOf(b) - if v1.Type() != v2.Type() { - return false - } - if v1.Kind() == reflect.Ptr { - if v1.IsNil() { - return v2.IsNil() - } - if v2.IsNil() { - return false - } - v1, v2 = v1.Elem(), v2.Elem() - } - if v1.Kind() != reflect.Struct { - return false - } - return equalStruct(v1, v2) -} - -// v1 and v2 are known to have the same type. -func equalStruct(v1, v2 reflect.Value) bool { - for i := 0; i < v1.NumField(); i++ { - f := v1.Type().Field(i) - if strings.HasPrefix(f.Name, "XXX_") { - continue - } - f1, f2 := v1.Field(i), v2.Field(i) - if f.Type.Kind() == reflect.Ptr { - if n1, n2 := f1.IsNil(), f2.IsNil(); n1 && n2 { - // both unset - continue - } else if n1 != n2 { - // set/unset mismatch - return false - } - b1, ok := f1.Interface().(raw) - if ok { - b2 := f2.Interface().(raw) - // RawMessage - if !bytes.Equal(b1.Bytes(), b2.Bytes()) { - return false - } - continue - } - f1, f2 = f1.Elem(), f2.Elem() - } - if !equalAny(f1, f2) { - return false - } - } - - if em1 := v1.FieldByName("XXX_extensions"); em1.IsValid() { - em2 := v2.FieldByName("XXX_extensions") - if !equalExtensions(v1.Type(), em1.Interface().(map[int32]Extension), em2.Interface().(map[int32]Extension)) { - return false - } - } - - uf := v1.FieldByName("XXX_unrecognized") - if !uf.IsValid() { - return true - } - - u1 := uf.Bytes() - u2 := v2.FieldByName("XXX_unrecognized").Bytes() - if !bytes.Equal(u1, u2) { - return false - } - - return true -} - -// v1 and v2 are known to have the same type. -func equalAny(v1, v2 reflect.Value) bool { - if v1.Type() == protoMessageType { - m1, _ := v1.Interface().(Message) - m2, _ := v2.Interface().(Message) - return Equal(m1, m2) - } - switch v1.Kind() { - case reflect.Bool: - return v1.Bool() == v2.Bool() - case reflect.Float32, reflect.Float64: - return v1.Float() == v2.Float() - case reflect.Int32, reflect.Int64: - return v1.Int() == v2.Int() - case reflect.Map: - if v1.Len() != v2.Len() { - return false - } - for _, key := range v1.MapKeys() { - val2 := v2.MapIndex(key) - if !val2.IsValid() { - // This key was not found in the second map. - return false - } - if !equalAny(v1.MapIndex(key), val2) { - return false - } - } - return true - case reflect.Ptr: - return equalAny(v1.Elem(), v2.Elem()) - case reflect.Slice: - if v1.Type().Elem().Kind() == reflect.Uint8 { - // short circuit: []byte - if v1.IsNil() != v2.IsNil() { - return false - } - return bytes.Equal(v1.Interface().([]byte), v2.Interface().([]byte)) - } - - if v1.Len() != v2.Len() { - return false - } - for i := 0; i < v1.Len(); i++ { - if !equalAny(v1.Index(i), v2.Index(i)) { - return false - } - } - return true - case reflect.String: - return v1.Interface().(string) == v2.Interface().(string) - case reflect.Struct: - return equalStruct(v1, v2) - case reflect.Uint32, reflect.Uint64: - return v1.Uint() == v2.Uint() - } - - // unknown type, so not a protocol buffer - log.Printf("proto: don't know how to compare %v", v1) - return false -} - -// base is the struct type that the extensions are based on. -// em1 and em2 are extension maps. -func equalExtensions(base reflect.Type, em1, em2 map[int32]Extension) bool { - if len(em1) != len(em2) { - return false - } - - for extNum, e1 := range em1 { - e2, ok := em2[extNum] - if !ok { - return false - } - - m1, m2 := e1.value, e2.value - - if m1 != nil && m2 != nil { - // Both are unencoded. - if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2)) { - return false - } - continue - } - - // At least one is encoded. To do a semantically correct comparison - // we need to unmarshal them first. - var desc *ExtensionDesc - if m := extensionMaps[base]; m != nil { - desc = m[extNum] - } - if desc == nil { - log.Printf("proto: don't know how to compare extension %d of %v", extNum, base) - continue - } - var err error - if m1 == nil { - m1, err = decodeExtension(e1.enc, desc) - } - if m2 == nil && err == nil { - m2, err = decodeExtension(e2.enc, desc) - } - if err != nil { - // The encoded form is invalid. - log.Printf("proto: badly encoded extension %d of %v: %v", extNum, base, err) - return false - } - if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2)) { - return false - } - } - - return true -} diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/equal_test.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/equal_test.go deleted file mode 100644 index b322f65ab6..0000000000 --- a/Godeps/_workspace/src/github.com/golang/protobuf/proto/equal_test.go +++ /dev/null @@ -1,191 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2011 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto_test - -import ( - "testing" - - . "github.com/golang/protobuf/proto" - pb "github.com/golang/protobuf/proto/testdata" -) - -// Four identical base messages. -// The init function adds extensions to some of them. -var messageWithoutExtension = &pb.MyMessage{Count: Int32(7)} -var messageWithExtension1a = &pb.MyMessage{Count: Int32(7)} -var messageWithExtension1b = &pb.MyMessage{Count: Int32(7)} -var messageWithExtension2 = &pb.MyMessage{Count: Int32(7)} - -// Two messages with non-message extensions. -var messageWithInt32Extension1 = &pb.MyMessage{Count: Int32(8)} -var messageWithInt32Extension2 = &pb.MyMessage{Count: Int32(8)} - -func init() { - ext1 := &pb.Ext{Data: String("Kirk")} - ext2 := &pb.Ext{Data: String("Picard")} - - // messageWithExtension1a has ext1, but never marshals it. - if err := SetExtension(messageWithExtension1a, pb.E_Ext_More, ext1); err != nil { - panic("SetExtension on 1a failed: " + err.Error()) - } - - // messageWithExtension1b is the unmarshaled form of messageWithExtension1a. - if err := SetExtension(messageWithExtension1b, pb.E_Ext_More, ext1); err != nil { - panic("SetExtension on 1b failed: " + err.Error()) - } - buf, err := Marshal(messageWithExtension1b) - if err != nil { - panic("Marshal of 1b failed: " + err.Error()) - } - messageWithExtension1b.Reset() - if err := Unmarshal(buf, messageWithExtension1b); err != nil { - panic("Unmarshal of 1b failed: " + err.Error()) - } - - // messageWithExtension2 has ext2. - if err := SetExtension(messageWithExtension2, pb.E_Ext_More, ext2); err != nil { - panic("SetExtension on 2 failed: " + err.Error()) - } - - if err := SetExtension(messageWithInt32Extension1, pb.E_Ext_Number, Int32(23)); err != nil { - panic("SetExtension on Int32-1 failed: " + err.Error()) - } - if err := SetExtension(messageWithInt32Extension1, pb.E_Ext_Number, Int32(24)); err != nil { - panic("SetExtension on Int32-2 failed: " + err.Error()) - } -} - -var EqualTests = []struct { - desc string - a, b Message - exp bool -}{ - {"different types", &pb.GoEnum{}, &pb.GoTestField{}, false}, - {"equal empty", &pb.GoEnum{}, &pb.GoEnum{}, true}, - {"nil vs nil", nil, nil, true}, - {"typed nil vs typed nil", (*pb.GoEnum)(nil), (*pb.GoEnum)(nil), true}, - {"typed nil vs empty", (*pb.GoEnum)(nil), &pb.GoEnum{}, false}, - {"different typed nil", (*pb.GoEnum)(nil), (*pb.GoTestField)(nil), false}, - - {"one set field, one unset field", &pb.GoTestField{Label: String("foo")}, &pb.GoTestField{}, false}, - {"one set field zero, one unset field", &pb.GoTest{Param: Int32(0)}, &pb.GoTest{}, false}, - {"different set fields", &pb.GoTestField{Label: String("foo")}, &pb.GoTestField{Label: String("bar")}, false}, - {"equal set", &pb.GoTestField{Label: String("foo")}, &pb.GoTestField{Label: String("foo")}, true}, - - {"repeated, one set", &pb.GoTest{F_Int32Repeated: []int32{2, 3}}, &pb.GoTest{}, false}, - {"repeated, different length", &pb.GoTest{F_Int32Repeated: []int32{2, 3}}, &pb.GoTest{F_Int32Repeated: []int32{2}}, false}, - {"repeated, different value", &pb.GoTest{F_Int32Repeated: []int32{2}}, &pb.GoTest{F_Int32Repeated: []int32{3}}, false}, - {"repeated, equal", &pb.GoTest{F_Int32Repeated: []int32{2, 4}}, &pb.GoTest{F_Int32Repeated: []int32{2, 4}}, true}, - {"repeated, nil equal nil", &pb.GoTest{F_Int32Repeated: nil}, &pb.GoTest{F_Int32Repeated: nil}, true}, - {"repeated, nil equal empty", &pb.GoTest{F_Int32Repeated: nil}, &pb.GoTest{F_Int32Repeated: []int32{}}, true}, - {"repeated, empty equal nil", &pb.GoTest{F_Int32Repeated: []int32{}}, &pb.GoTest{F_Int32Repeated: nil}, true}, - - { - "nested, different", - &pb.GoTest{RequiredField: &pb.GoTestField{Label: String("foo")}}, - &pb.GoTest{RequiredField: &pb.GoTestField{Label: String("bar")}}, - false, - }, - { - "nested, equal", - &pb.GoTest{RequiredField: &pb.GoTestField{Label: String("wow")}}, - &pb.GoTest{RequiredField: &pb.GoTestField{Label: String("wow")}}, - true, - }, - - {"bytes", &pb.OtherMessage{Value: []byte("foo")}, &pb.OtherMessage{Value: []byte("foo")}, true}, - {"bytes, empty", &pb.OtherMessage{Value: []byte{}}, &pb.OtherMessage{Value: []byte{}}, true}, - {"bytes, empty vs nil", &pb.OtherMessage{Value: []byte{}}, &pb.OtherMessage{Value: nil}, false}, - { - "repeated bytes", - &pb.MyMessage{RepBytes: [][]byte{[]byte("sham"), []byte("wow")}}, - &pb.MyMessage{RepBytes: [][]byte{[]byte("sham"), []byte("wow")}}, - true, - }, - - {"extension vs. no extension", messageWithoutExtension, messageWithExtension1a, false}, - {"extension vs. same extension", messageWithExtension1a, messageWithExtension1b, true}, - {"extension vs. different extension", messageWithExtension1a, messageWithExtension2, false}, - - {"int32 extension vs. itself", messageWithInt32Extension1, messageWithInt32Extension1, true}, - {"int32 extension vs. a different int32", messageWithInt32Extension1, messageWithInt32Extension2, false}, - - { - "message with group", - &pb.MyMessage{ - Count: Int32(1), - Somegroup: &pb.MyMessage_SomeGroup{ - GroupField: Int32(5), - }, - }, - &pb.MyMessage{ - Count: Int32(1), - Somegroup: &pb.MyMessage_SomeGroup{ - GroupField: Int32(5), - }, - }, - true, - }, - - { - "map same", - &pb.MessageWithMap{NameMapping: map[int32]string{1: "Ken"}}, - &pb.MessageWithMap{NameMapping: map[int32]string{1: "Ken"}}, - true, - }, - { - "map different entry", - &pb.MessageWithMap{NameMapping: map[int32]string{1: "Ken"}}, - &pb.MessageWithMap{NameMapping: map[int32]string{2: "Rob"}}, - false, - }, - { - "map different key only", - &pb.MessageWithMap{NameMapping: map[int32]string{1: "Ken"}}, - &pb.MessageWithMap{NameMapping: map[int32]string{2: "Ken"}}, - false, - }, - { - "map different value only", - &pb.MessageWithMap{NameMapping: map[int32]string{1: "Ken"}}, - &pb.MessageWithMap{NameMapping: map[int32]string{1: "Rob"}}, - false, - }, -} - -func TestEqual(t *testing.T) { - for _, tc := range EqualTests { - if res := Equal(tc.a, tc.b); res != tc.exp { - t.Errorf("%v: Equal(%v, %v) = %v, want %v", tc.desc, tc.a, tc.b, res, tc.exp) - } - } -} diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/extensions.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/extensions.go deleted file mode 100644 index e591ccef79..0000000000 --- a/Godeps/_workspace/src/github.com/golang/protobuf/proto/extensions.go +++ /dev/null @@ -1,400 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -/* - * Types and routines for supporting protocol buffer extensions. - */ - -import ( - "errors" - "fmt" - "reflect" - "strconv" - "sync" -) - -// ErrMissingExtension is the error returned by GetExtension if the named extension is not in the message. -var ErrMissingExtension = errors.New("proto: missing extension") - -// ExtensionRange represents a range of message extensions for a protocol buffer. -// Used in code generated by the protocol compiler. -type ExtensionRange struct { - Start, End int32 // both inclusive -} - -// extendableProto is an interface implemented by any protocol buffer that may be extended. -type extendableProto interface { - Message - ExtensionRangeArray() []ExtensionRange - ExtensionMap() map[int32]Extension -} - -var extendableProtoType = reflect.TypeOf((*extendableProto)(nil)).Elem() - -// ExtensionDesc represents an extension specification. -// Used in generated code from the protocol compiler. -type ExtensionDesc struct { - ExtendedType Message // nil pointer to the type that is being extended - ExtensionType interface{} // nil pointer to the extension type - Field int32 // field number - Name string // fully-qualified name of extension, for text formatting - Tag string // protobuf tag style -} - -func (ed *ExtensionDesc) repeated() bool { - t := reflect.TypeOf(ed.ExtensionType) - return t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 -} - -// Extension represents an extension in a message. -type Extension struct { - // When an extension is stored in a message using SetExtension - // only desc and value are set. When the message is marshaled - // enc will be set to the encoded form of the message. - // - // When a message is unmarshaled and contains extensions, each - // extension will have only enc set. When such an extension is - // accessed using GetExtension (or GetExtensions) desc and value - // will be set. - desc *ExtensionDesc - value interface{} - enc []byte -} - -// SetRawExtension is for testing only. -func SetRawExtension(base extendableProto, id int32, b []byte) { - base.ExtensionMap()[id] = Extension{enc: b} -} - -// isExtensionField returns true iff the given field number is in an extension range. -func isExtensionField(pb extendableProto, field int32) bool { - for _, er := range pb.ExtensionRangeArray() { - if er.Start <= field && field <= er.End { - return true - } - } - return false -} - -// checkExtensionTypes checks that the given extension is valid for pb. -func checkExtensionTypes(pb extendableProto, extension *ExtensionDesc) error { - // Check the extended type. - if a, b := reflect.TypeOf(pb), reflect.TypeOf(extension.ExtendedType); a != b { - return errors.New("proto: bad extended type; " + b.String() + " does not extend " + a.String()) - } - // Check the range. - if !isExtensionField(pb, extension.Field) { - return errors.New("proto: bad extension number; not in declared ranges") - } - return nil -} - -// extPropKey is sufficient to uniquely identify an extension. -type extPropKey struct { - base reflect.Type - field int32 -} - -var extProp = struct { - sync.RWMutex - m map[extPropKey]*Properties -}{ - m: make(map[extPropKey]*Properties), -} - -func extensionProperties(ed *ExtensionDesc) *Properties { - key := extPropKey{base: reflect.TypeOf(ed.ExtendedType), field: ed.Field} - - extProp.RLock() - if prop, ok := extProp.m[key]; ok { - extProp.RUnlock() - return prop - } - extProp.RUnlock() - - extProp.Lock() - defer extProp.Unlock() - // Check again. - if prop, ok := extProp.m[key]; ok { - return prop - } - - prop := new(Properties) - prop.Init(reflect.TypeOf(ed.ExtensionType), "unknown_name", ed.Tag, nil) - extProp.m[key] = prop - return prop -} - -// encodeExtensionMap encodes any unmarshaled (unencoded) extensions in m. -func encodeExtensionMap(m map[int32]Extension) error { - for k, e := range m { - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - et := reflect.TypeOf(e.desc.ExtensionType) - props := extensionProperties(e.desc) - - p := NewBuffer(nil) - // If e.value has type T, the encoder expects a *struct{ X T }. - // Pass a *T with a zero field and hope it all works out. - x := reflect.New(et) - x.Elem().Set(reflect.ValueOf(e.value)) - if err := props.enc(p, props, toStructPointer(x)); err != nil { - return err - } - e.enc = p.buf - m[k] = e - } - return nil -} - -func sizeExtensionMap(m map[int32]Extension) (n int) { - for _, e := range m { - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - n += len(e.enc) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - et := reflect.TypeOf(e.desc.ExtensionType) - props := extensionProperties(e.desc) - - // If e.value has type T, the encoder expects a *struct{ X T }. - // Pass a *T with a zero field and hope it all works out. - x := reflect.New(et) - x.Elem().Set(reflect.ValueOf(e.value)) - n += props.size(props, toStructPointer(x)) - } - return -} - -// HasExtension returns whether the given extension is present in pb. -func HasExtension(pb extendableProto, extension *ExtensionDesc) bool { - // TODO: Check types, field numbers, etc.? - _, ok := pb.ExtensionMap()[extension.Field] - return ok -} - -// ClearExtension removes the given extension from pb. -func ClearExtension(pb extendableProto, extension *ExtensionDesc) { - // TODO: Check types, field numbers, etc.? - delete(pb.ExtensionMap(), extension.Field) -} - -// GetExtension parses and returns the given extension of pb. -// If the extension is not present and has no default value it returns ErrMissingExtension. -func GetExtension(pb extendableProto, extension *ExtensionDesc) (interface{}, error) { - if err := checkExtensionTypes(pb, extension); err != nil { - return nil, err - } - - emap := pb.ExtensionMap() - e, ok := emap[extension.Field] - if !ok { - // defaultExtensionValue returns the default value or - // ErrMissingExtension if there is no default. - return defaultExtensionValue(extension) - } - - if e.value != nil { - // Already decoded. Check the descriptor, though. - if e.desc != extension { - // This shouldn't happen. If it does, it means that - // GetExtension was called twice with two different - // descriptors with the same field number. - return nil, errors.New("proto: descriptor conflict") - } - return e.value, nil - } - - v, err := decodeExtension(e.enc, extension) - if err != nil { - return nil, err - } - - // Remember the decoded version and drop the encoded version. - // That way it is safe to mutate what we return. - e.value = v - e.desc = extension - e.enc = nil - emap[extension.Field] = e - return e.value, nil -} - -// defaultExtensionValue returns the default value for extension. -// If no default for an extension is defined ErrMissingExtension is returned. -func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) { - t := reflect.TypeOf(extension.ExtensionType) - props := extensionProperties(extension) - - sf, _, err := fieldDefault(t, props) - if err != nil { - return nil, err - } - - if sf == nil || sf.value == nil { - // There is no default value. - return nil, ErrMissingExtension - } - - if t.Kind() != reflect.Ptr { - // We do not need to return a Ptr, we can directly return sf.value. - return sf.value, nil - } - - // We need to return an interface{} that is a pointer to sf.value. - value := reflect.New(t).Elem() - value.Set(reflect.New(value.Type().Elem())) - if sf.kind == reflect.Int32 { - // We may have an int32 or an enum, but the underlying data is int32. - // Since we can't set an int32 into a non int32 reflect.value directly - // set it as a int32. - value.Elem().SetInt(int64(sf.value.(int32))) - } else { - value.Elem().Set(reflect.ValueOf(sf.value)) - } - return value.Interface(), nil -} - -// decodeExtension decodes an extension encoded in b. -func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) { - o := NewBuffer(b) - - t := reflect.TypeOf(extension.ExtensionType) - rep := extension.repeated() - - props := extensionProperties(extension) - - // t is a pointer to a struct, pointer to basic type or a slice. - // Allocate a "field" to store the pointer/slice itself; the - // pointer/slice will be stored here. We pass - // the address of this field to props.dec. - // This passes a zero field and a *t and lets props.dec - // interpret it as a *struct{ x t }. - value := reflect.New(t).Elem() - - for { - // Discard wire type and field number varint. It isn't needed. - if _, err := o.DecodeVarint(); err != nil { - return nil, err - } - - if err := props.dec(o, props, toStructPointer(value.Addr())); err != nil { - return nil, err - } - - if !rep || o.index >= len(o.buf) { - break - } - } - return value.Interface(), nil -} - -// GetExtensions returns a slice of the extensions present in pb that are also listed in es. -// The returned slice has the same length as es; missing extensions will appear as nil elements. -func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) { - epb, ok := pb.(extendableProto) - if !ok { - err = errors.New("proto: not an extendable proto") - return - } - extensions = make([]interface{}, len(es)) - for i, e := range es { - extensions[i], err = GetExtension(epb, e) - if err == ErrMissingExtension { - err = nil - } - if err != nil { - return - } - } - return -} - -// SetExtension sets the specified extension of pb to the specified value. -func SetExtension(pb extendableProto, extension *ExtensionDesc, value interface{}) error { - if err := checkExtensionTypes(pb, extension); err != nil { - return err - } - typ := reflect.TypeOf(extension.ExtensionType) - if typ != reflect.TypeOf(value) { - return errors.New("proto: bad extension value type") - } - // nil extension values need to be caught early, because the - // encoder can't distinguish an ErrNil due to a nil extension - // from an ErrNil due to a missing field. Extensions are - // always optional, so the encoder would just swallow the error - // and drop all the extensions from the encoded message. - if reflect.ValueOf(value).IsNil() { - return fmt.Errorf("proto: SetExtension called with nil value of type %T", value) - } - - pb.ExtensionMap()[extension.Field] = Extension{desc: extension, value: value} - return nil -} - -// A global registry of extensions. -// The generated code will register the generated descriptors by calling RegisterExtension. - -var extensionMaps = make(map[reflect.Type]map[int32]*ExtensionDesc) - -// RegisterExtension is called from the generated code. -func RegisterExtension(desc *ExtensionDesc) { - st := reflect.TypeOf(desc.ExtendedType).Elem() - m := extensionMaps[st] - if m == nil { - m = make(map[int32]*ExtensionDesc) - extensionMaps[st] = m - } - if _, ok := m[desc.Field]; ok { - panic("proto: duplicate extension registered: " + st.String() + " " + strconv.Itoa(int(desc.Field))) - } - m[desc.Field] = desc -} - -// RegisteredExtensions returns a map of the registered extensions of a -// protocol buffer struct, indexed by the extension number. -// The argument pb should be a nil pointer to the struct type. -func RegisteredExtensions(pb Message) map[int32]*ExtensionDesc { - return extensionMaps[reflect.TypeOf(pb).Elem()] -} diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/extensions_test.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/extensions_test.go deleted file mode 100644 index 72552767d8..0000000000 --- a/Godeps/_workspace/src/github.com/golang/protobuf/proto/extensions_test.go +++ /dev/null @@ -1,292 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2014 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto_test - -import ( - "fmt" - "reflect" - "testing" - - "github.com/golang/protobuf/proto" - pb "github.com/golang/protobuf/proto/testdata" -) - -func TestGetExtensionsWithMissingExtensions(t *testing.T) { - msg := &pb.MyMessage{} - ext1 := &pb.Ext{} - if err := proto.SetExtension(msg, pb.E_Ext_More, ext1); err != nil { - t.Fatalf("Could not set ext1: %s", ext1) - } - exts, err := proto.GetExtensions(msg, []*proto.ExtensionDesc{ - pb.E_Ext_More, - pb.E_Ext_Text, - }) - if err != nil { - t.Fatalf("GetExtensions() failed: %s", err) - } - if exts[0] != ext1 { - t.Errorf("ext1 not in returned extensions: %T %v", exts[0], exts[0]) - } - if exts[1] != nil { - t.Errorf("ext2 in returned extensions: %T %v", exts[1], exts[1]) - } -} - -func TestGetExtensionStability(t *testing.T) { - check := func(m *pb.MyMessage) bool { - ext1, err := proto.GetExtension(m, pb.E_Ext_More) - if err != nil { - t.Fatalf("GetExtension() failed: %s", err) - } - ext2, err := proto.GetExtension(m, pb.E_Ext_More) - if err != nil { - t.Fatalf("GetExtension() failed: %s", err) - } - return ext1 == ext2 - } - msg := &pb.MyMessage{Count: proto.Int32(4)} - ext0 := &pb.Ext{} - if err := proto.SetExtension(msg, pb.E_Ext_More, ext0); err != nil { - t.Fatalf("Could not set ext1: %s", ext0) - } - if !check(msg) { - t.Errorf("GetExtension() not stable before marshaling") - } - bb, err := proto.Marshal(msg) - if err != nil { - t.Fatalf("Marshal() failed: %s", err) - } - msg1 := &pb.MyMessage{} - err = proto.Unmarshal(bb, msg1) - if err != nil { - t.Fatalf("Unmarshal() failed: %s", err) - } - if !check(msg1) { - t.Errorf("GetExtension() not stable after unmarshaling") - } -} - -func TestGetExtensionDefaults(t *testing.T) { - var setFloat64 float64 = 1 - var setFloat32 float32 = 2 - var setInt32 int32 = 3 - var setInt64 int64 = 4 - var setUint32 uint32 = 5 - var setUint64 uint64 = 6 - var setBool = true - var setBool2 = false - var setString = "Goodnight string" - var setBytes = []byte("Goodnight bytes") - var setEnum = pb.DefaultsMessage_TWO - - type testcase struct { - ext *proto.ExtensionDesc // Extension we are testing. - want interface{} // Expected value of extension, or nil (meaning that GetExtension will fail). - def interface{} // Expected value of extension after ClearExtension(). - } - tests := []testcase{ - {pb.E_NoDefaultDouble, setFloat64, nil}, - {pb.E_NoDefaultFloat, setFloat32, nil}, - {pb.E_NoDefaultInt32, setInt32, nil}, - {pb.E_NoDefaultInt64, setInt64, nil}, - {pb.E_NoDefaultUint32, setUint32, nil}, - {pb.E_NoDefaultUint64, setUint64, nil}, - {pb.E_NoDefaultSint32, setInt32, nil}, - {pb.E_NoDefaultSint64, setInt64, nil}, - {pb.E_NoDefaultFixed32, setUint32, nil}, - {pb.E_NoDefaultFixed64, setUint64, nil}, - {pb.E_NoDefaultSfixed32, setInt32, nil}, - {pb.E_NoDefaultSfixed64, setInt64, nil}, - {pb.E_NoDefaultBool, setBool, nil}, - {pb.E_NoDefaultBool, setBool2, nil}, - {pb.E_NoDefaultString, setString, nil}, - {pb.E_NoDefaultBytes, setBytes, nil}, - {pb.E_NoDefaultEnum, setEnum, nil}, - {pb.E_DefaultDouble, setFloat64, float64(3.1415)}, - {pb.E_DefaultFloat, setFloat32, float32(3.14)}, - {pb.E_DefaultInt32, setInt32, int32(42)}, - {pb.E_DefaultInt64, setInt64, int64(43)}, - {pb.E_DefaultUint32, setUint32, uint32(44)}, - {pb.E_DefaultUint64, setUint64, uint64(45)}, - {pb.E_DefaultSint32, setInt32, int32(46)}, - {pb.E_DefaultSint64, setInt64, int64(47)}, - {pb.E_DefaultFixed32, setUint32, uint32(48)}, - {pb.E_DefaultFixed64, setUint64, uint64(49)}, - {pb.E_DefaultSfixed32, setInt32, int32(50)}, - {pb.E_DefaultSfixed64, setInt64, int64(51)}, - {pb.E_DefaultBool, setBool, true}, - {pb.E_DefaultBool, setBool2, true}, - {pb.E_DefaultString, setString, "Hello, string"}, - {pb.E_DefaultBytes, setBytes, []byte("Hello, bytes")}, - {pb.E_DefaultEnum, setEnum, pb.DefaultsMessage_ONE}, - } - - checkVal := func(test testcase, msg *pb.DefaultsMessage, valWant interface{}) error { - val, err := proto.GetExtension(msg, test.ext) - if err != nil { - if valWant != nil { - return fmt.Errorf("GetExtension(): %s", err) - } - if want := proto.ErrMissingExtension; err != want { - return fmt.Errorf("Unexpected error: got %v, want %v", err, want) - } - return nil - } - - // All proto2 extension values are either a pointer to a value or a slice of values. - ty := reflect.TypeOf(val) - tyWant := reflect.TypeOf(test.ext.ExtensionType) - if got, want := ty, tyWant; got != want { - return fmt.Errorf("unexpected reflect.TypeOf(): got %v want %v", got, want) - } - tye := ty.Elem() - tyeWant := tyWant.Elem() - if got, want := tye, tyeWant; got != want { - return fmt.Errorf("unexpected reflect.TypeOf().Elem(): got %v want %v", got, want) - } - - // Check the name of the type of the value. - // If it is an enum it will be type int32 with the name of the enum. - if got, want := tye.Name(), tye.Name(); got != want { - return fmt.Errorf("unexpected reflect.TypeOf().Elem().Name(): got %v want %v", got, want) - } - - // Check that value is what we expect. - // If we have a pointer in val, get the value it points to. - valExp := val - if ty.Kind() == reflect.Ptr { - valExp = reflect.ValueOf(val).Elem().Interface() - } - if got, want := valExp, valWant; !reflect.DeepEqual(got, want) { - return fmt.Errorf("unexpected reflect.DeepEqual(): got %v want %v", got, want) - } - - return nil - } - - setTo := func(test testcase) interface{} { - setTo := reflect.ValueOf(test.want) - if typ := reflect.TypeOf(test.ext.ExtensionType); typ.Kind() == reflect.Ptr { - setTo = reflect.New(typ).Elem() - setTo.Set(reflect.New(setTo.Type().Elem())) - setTo.Elem().Set(reflect.ValueOf(test.want)) - } - return setTo.Interface() - } - - for _, test := range tests { - msg := &pb.DefaultsMessage{} - name := test.ext.Name - - // Check the initial value. - if err := checkVal(test, msg, test.def); err != nil { - t.Errorf("%s: %v", name, err) - } - - // Set the per-type value and check value. - name = fmt.Sprintf("%s (set to %T %v)", name, test.want, test.want) - if err := proto.SetExtension(msg, test.ext, setTo(test)); err != nil { - t.Errorf("%s: SetExtension(): %v", name, err) - continue - } - if err := checkVal(test, msg, test.want); err != nil { - t.Errorf("%s: %v", name, err) - continue - } - - // Set and check the value. - name += " (cleared)" - proto.ClearExtension(msg, test.ext) - if err := checkVal(test, msg, test.def); err != nil { - t.Errorf("%s: %v", name, err) - } - } -} - -func TestExtensionsRoundTrip(t *testing.T) { - msg := &pb.MyMessage{} - ext1 := &pb.Ext{ - Data: proto.String("hi"), - } - ext2 := &pb.Ext{ - Data: proto.String("there"), - } - exists := proto.HasExtension(msg, pb.E_Ext_More) - if exists { - t.Error("Extension More present unexpectedly") - } - if err := proto.SetExtension(msg, pb.E_Ext_More, ext1); err != nil { - t.Error(err) - } - if err := proto.SetExtension(msg, pb.E_Ext_More, ext2); err != nil { - t.Error(err) - } - e, err := proto.GetExtension(msg, pb.E_Ext_More) - if err != nil { - t.Error(err) - } - x, ok := e.(*pb.Ext) - if !ok { - t.Errorf("e has type %T, expected testdata.Ext", e) - } else if *x.Data != "there" { - t.Errorf("SetExtension failed to overwrite, got %+v, not 'there'", x) - } - proto.ClearExtension(msg, pb.E_Ext_More) - if _, err = proto.GetExtension(msg, pb.E_Ext_More); err != proto.ErrMissingExtension { - t.Errorf("got %v, expected ErrMissingExtension", e) - } - if _, err := proto.GetExtension(msg, pb.E_X215); err == nil { - t.Error("expected bad extension error, got nil") - } - if err := proto.SetExtension(msg, pb.E_X215, 12); err == nil { - t.Error("expected extension err") - } - if err := proto.SetExtension(msg, pb.E_Ext_More, 12); err == nil { - t.Error("expected some sort of type mismatch error, got nil") - } -} - -func TestNilExtension(t *testing.T) { - msg := &pb.MyMessage{ - Count: proto.Int32(1), - } - if err := proto.SetExtension(msg, pb.E_Ext_Text, proto.String("hello")); err != nil { - t.Fatal(err) - } - if err := proto.SetExtension(msg, pb.E_Ext_More, (*pb.Ext)(nil)); err == nil { - t.Error("expected SetExtension to fail due to a nil extension") - } else if want := "proto: SetExtension called with nil value of type *testdata.Ext"; err.Error() != want { - t.Errorf("expected error %v, got %v", want, err) - } - // Note: if the behavior of Marshal is ever changed to ignore nil extensions, update - // this test to verify that E_Ext_Text is properly propagated through marshal->unmarshal. -} diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/lib.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/lib.go deleted file mode 100644 index 0b28b08643..0000000000 --- a/Godeps/_workspace/src/github.com/golang/protobuf/proto/lib.go +++ /dev/null @@ -1,813 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -/* -Package proto converts data structures to and from the wire format of -protocol buffers. It works in concert with the Go source code generated -for .proto files by the protocol compiler. - -A summary of the properties of the protocol buffer interface -for a protocol buffer variable v: - - - Names are turned from camel_case to CamelCase for export. - - There are no methods on v to set fields; just treat - them as structure fields. - - There are getters that return a field's value if set, - and return the field's default value if unset. - The getters work even if the receiver is a nil message. - - The zero value for a struct is its correct initialization state. - All desired fields must be set before marshaling. - - A Reset() method will restore a protobuf struct to its zero state. - - Non-repeated fields are pointers to the values; nil means unset. - That is, optional or required field int32 f becomes F *int32. - - Repeated fields are slices. - - Helper functions are available to aid the setting of fields. - msg.Foo = proto.String("hello") // set field - - Constants are defined to hold the default values of all fields that - have them. They have the form Default_StructName_FieldName. - Because the getter methods handle defaulted values, - direct use of these constants should be rare. - - Enums are given type names and maps from names to values. - Enum values are prefixed by the enclosing message's name, or by the - enum's type name if it is a top-level enum. Enum types have a String - method, and a Enum method to assist in message construction. - - Nested messages, groups and enums have type names prefixed with the name of - the surrounding message type. - - Extensions are given descriptor names that start with E_, - followed by an underscore-delimited list of the nested messages - that contain it (if any) followed by the CamelCased name of the - extension field itself. HasExtension, ClearExtension, GetExtension - and SetExtension are functions for manipulating extensions. - - Marshal and Unmarshal are functions to encode and decode the wire format. - -The simplest way to describe this is to see an example. -Given file test.proto, containing - - package example; - - enum FOO { X = 17; } - - message Test { - required string label = 1; - optional int32 type = 2 [default=77]; - repeated int64 reps = 3; - optional group OptionalGroup = 4 { - required string RequiredField = 5; - } - } - -The resulting file, test.pb.go, is: - - package example - - import proto "github.com/golang/protobuf/proto" - import math "math" - - type FOO int32 - const ( - FOO_X FOO = 17 - ) - var FOO_name = map[int32]string{ - 17: "X", - } - var FOO_value = map[string]int32{ - "X": 17, - } - - func (x FOO) Enum() *FOO { - p := new(FOO) - *p = x - return p - } - func (x FOO) String() string { - return proto.EnumName(FOO_name, int32(x)) - } - func (x *FOO) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(FOO_value, data) - if err != nil { - return err - } - *x = FOO(value) - return nil - } - - type Test struct { - Label *string `protobuf:"bytes,1,req,name=label" json:"label,omitempty"` - Type *int32 `protobuf:"varint,2,opt,name=type,def=77" json:"type,omitempty"` - Reps []int64 `protobuf:"varint,3,rep,name=reps" json:"reps,omitempty"` - Optionalgroup *Test_OptionalGroup `protobuf:"group,4,opt,name=OptionalGroup" json:"optionalgroup,omitempty"` - XXX_unrecognized []byte `json:"-"` - } - func (m *Test) Reset() { *m = Test{} } - func (m *Test) String() string { return proto.CompactTextString(m) } - func (*Test) ProtoMessage() {} - const Default_Test_Type int32 = 77 - - func (m *Test) GetLabel() string { - if m != nil && m.Label != nil { - return *m.Label - } - return "" - } - - func (m *Test) GetType() int32 { - if m != nil && m.Type != nil { - return *m.Type - } - return Default_Test_Type - } - - func (m *Test) GetOptionalgroup() *Test_OptionalGroup { - if m != nil { - return m.Optionalgroup - } - return nil - } - - type Test_OptionalGroup struct { - RequiredField *string `protobuf:"bytes,5,req" json:"RequiredField,omitempty"` - } - func (m *Test_OptionalGroup) Reset() { *m = Test_OptionalGroup{} } - func (m *Test_OptionalGroup) String() string { return proto.CompactTextString(m) } - - func (m *Test_OptionalGroup) GetRequiredField() string { - if m != nil && m.RequiredField != nil { - return *m.RequiredField - } - return "" - } - - func init() { - proto.RegisterEnum("example.FOO", FOO_name, FOO_value) - } - -To create and play with a Test object: - -package main - - import ( - "log" - - "github.com/golang/protobuf/proto" - pb "./example.pb" - ) - - func main() { - test := &pb.Test{ - Label: proto.String("hello"), - Type: proto.Int32(17), - Optionalgroup: &pb.Test_OptionalGroup{ - RequiredField: proto.String("good bye"), - }, - } - data, err := proto.Marshal(test) - if err != nil { - log.Fatal("marshaling error: ", err) - } - newTest := &pb.Test{} - err = proto.Unmarshal(data, newTest) - if err != nil { - log.Fatal("unmarshaling error: ", err) - } - // Now test and newTest contain the same data. - if test.GetLabel() != newTest.GetLabel() { - log.Fatalf("data mismatch %q != %q", test.GetLabel(), newTest.GetLabel()) - } - // etc. - } -*/ -package proto - -import ( - "encoding/json" - "fmt" - "log" - "reflect" - "strconv" - "sync" -) - -// Message is implemented by generated protocol buffer messages. -type Message interface { - Reset() - String() string - ProtoMessage() -} - -// Stats records allocation details about the protocol buffer encoders -// and decoders. Useful for tuning the library itself. -type Stats struct { - Emalloc uint64 // mallocs in encode - Dmalloc uint64 // mallocs in decode - Encode uint64 // number of encodes - Decode uint64 // number of decodes - Chit uint64 // number of cache hits - Cmiss uint64 // number of cache misses - Size uint64 // number of sizes -} - -// Set to true to enable stats collection. -const collectStats = false - -var stats Stats - -// GetStats returns a copy of the global Stats structure. -func GetStats() Stats { return stats } - -// A Buffer is a buffer manager for marshaling and unmarshaling -// protocol buffers. It may be reused between invocations to -// reduce memory usage. It is not necessary to use a Buffer; -// the global functions Marshal and Unmarshal create a -// temporary Buffer and are fine for most applications. -type Buffer struct { - buf []byte // encode/decode byte stream - index int // write point - - // pools of basic types to amortize allocation. - bools []bool - uint32s []uint32 - uint64s []uint64 - - // extra pools, only used with pointer_reflect.go - int32s []int32 - int64s []int64 - float32s []float32 - float64s []float64 -} - -// NewBuffer allocates a new Buffer and initializes its internal data to -// the contents of the argument slice. -func NewBuffer(e []byte) *Buffer { - return &Buffer{buf: e} -} - -// Reset resets the Buffer, ready for marshaling a new protocol buffer. -func (p *Buffer) Reset() { - p.buf = p.buf[0:0] // for reading/writing - p.index = 0 // for reading -} - -// SetBuf replaces the internal buffer with the slice, -// ready for unmarshaling the contents of the slice. -func (p *Buffer) SetBuf(s []byte) { - p.buf = s - p.index = 0 -} - -// Bytes returns the contents of the Buffer. -func (p *Buffer) Bytes() []byte { return p.buf } - -/* - * Helper routines for simplifying the creation of optional fields of basic type. - */ - -// Bool is a helper routine that allocates a new bool value -// to store v and returns a pointer to it. -func Bool(v bool) *bool { - return &v -} - -// Int32 is a helper routine that allocates a new int32 value -// to store v and returns a pointer to it. -func Int32(v int32) *int32 { - return &v -} - -// Int is a helper routine that allocates a new int32 value -// to store v and returns a pointer to it, but unlike Int32 -// its argument value is an int. -func Int(v int) *int32 { - p := new(int32) - *p = int32(v) - return p -} - -// Int64 is a helper routine that allocates a new int64 value -// to store v and returns a pointer to it. -func Int64(v int64) *int64 { - return &v -} - -// Float32 is a helper routine that allocates a new float32 value -// to store v and returns a pointer to it. -func Float32(v float32) *float32 { - return &v -} - -// Float64 is a helper routine that allocates a new float64 value -// to store v and returns a pointer to it. -func Float64(v float64) *float64 { - return &v -} - -// Uint32 is a helper routine that allocates a new uint32 value -// to store v and returns a pointer to it. -func Uint32(v uint32) *uint32 { - return &v -} - -// Uint64 is a helper routine that allocates a new uint64 value -// to store v and returns a pointer to it. -func Uint64(v uint64) *uint64 { - return &v -} - -// String is a helper routine that allocates a new string value -// to store v and returns a pointer to it. -func String(v string) *string { - return &v -} - -// EnumName is a helper function to simplify printing protocol buffer enums -// by name. Given an enum map and a value, it returns a useful string. -func EnumName(m map[int32]string, v int32) string { - s, ok := m[v] - if ok { - return s - } - return strconv.Itoa(int(v)) -} - -// UnmarshalJSONEnum is a helper function to simplify recovering enum int values -// from their JSON-encoded representation. Given a map from the enum's symbolic -// names to its int values, and a byte buffer containing the JSON-encoded -// value, it returns an int32 that can be cast to the enum type by the caller. -// -// The function can deal with both JSON representations, numeric and symbolic. -func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) { - if data[0] == '"' { - // New style: enums are strings. - var repr string - if err := json.Unmarshal(data, &repr); err != nil { - return -1, err - } - val, ok := m[repr] - if !ok { - return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr) - } - return val, nil - } - // Old style: enums are ints. - var val int32 - if err := json.Unmarshal(data, &val); err != nil { - return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName) - } - return val, nil -} - -// DebugPrint dumps the encoded data in b in a debugging format with a header -// including the string s. Used in testing but made available for general debugging. -func (p *Buffer) DebugPrint(s string, b []byte) { - var u uint64 - - obuf := p.buf - index := p.index - p.buf = b - p.index = 0 - depth := 0 - - fmt.Printf("\n--- %s ---\n", s) - -out: - for { - for i := 0; i < depth; i++ { - fmt.Print(" ") - } - - index := p.index - if index == len(p.buf) { - break - } - - op, err := p.DecodeVarint() - if err != nil { - fmt.Printf("%3d: fetching op err %v\n", index, err) - break out - } - tag := op >> 3 - wire := op & 7 - - switch wire { - default: - fmt.Printf("%3d: t=%3d unknown wire=%d\n", - index, tag, wire) - break out - - case WireBytes: - var r []byte - - r, err = p.DecodeRawBytes(false) - if err != nil { - break out - } - fmt.Printf("%3d: t=%3d bytes [%d]", index, tag, len(r)) - if len(r) <= 6 { - for i := 0; i < len(r); i++ { - fmt.Printf(" %.2x", r[i]) - } - } else { - for i := 0; i < 3; i++ { - fmt.Printf(" %.2x", r[i]) - } - fmt.Printf(" ..") - for i := len(r) - 3; i < len(r); i++ { - fmt.Printf(" %.2x", r[i]) - } - } - fmt.Printf("\n") - - case WireFixed32: - u, err = p.DecodeFixed32() - if err != nil { - fmt.Printf("%3d: t=%3d fix32 err %v\n", index, tag, err) - break out - } - fmt.Printf("%3d: t=%3d fix32 %d\n", index, tag, u) - - case WireFixed64: - u, err = p.DecodeFixed64() - if err != nil { - fmt.Printf("%3d: t=%3d fix64 err %v\n", index, tag, err) - break out - } - fmt.Printf("%3d: t=%3d fix64 %d\n", index, tag, u) - break - - case WireVarint: - u, err = p.DecodeVarint() - if err != nil { - fmt.Printf("%3d: t=%3d varint err %v\n", index, tag, err) - break out - } - fmt.Printf("%3d: t=%3d varint %d\n", index, tag, u) - - case WireStartGroup: - if err != nil { - fmt.Printf("%3d: t=%3d start err %v\n", index, tag, err) - break out - } - fmt.Printf("%3d: t=%3d start\n", index, tag) - depth++ - - case WireEndGroup: - depth-- - if err != nil { - fmt.Printf("%3d: t=%3d end err %v\n", index, tag, err) - break out - } - fmt.Printf("%3d: t=%3d end\n", index, tag) - } - } - - if depth != 0 { - fmt.Printf("%3d: start-end not balanced %d\n", p.index, depth) - } - fmt.Printf("\n") - - p.buf = obuf - p.index = index -} - -// SetDefaults sets unset protocol buffer fields to their default values. -// It only modifies fields that are both unset and have defined defaults. -// It recursively sets default values in any non-nil sub-messages. -func SetDefaults(pb Message) { - setDefaults(reflect.ValueOf(pb), true, false) -} - -// v is a pointer to a struct. -func setDefaults(v reflect.Value, recur, zeros bool) { - v = v.Elem() - - defaultMu.RLock() - dm, ok := defaults[v.Type()] - defaultMu.RUnlock() - if !ok { - dm = buildDefaultMessage(v.Type()) - defaultMu.Lock() - defaults[v.Type()] = dm - defaultMu.Unlock() - } - - for _, sf := range dm.scalars { - f := v.Field(sf.index) - if !f.IsNil() { - // field already set - continue - } - dv := sf.value - if dv == nil && !zeros { - // no explicit default, and don't want to set zeros - continue - } - fptr := f.Addr().Interface() // **T - // TODO: Consider batching the allocations we do here. - switch sf.kind { - case reflect.Bool: - b := new(bool) - if dv != nil { - *b = dv.(bool) - } - *(fptr.(**bool)) = b - case reflect.Float32: - f := new(float32) - if dv != nil { - *f = dv.(float32) - } - *(fptr.(**float32)) = f - case reflect.Float64: - f := new(float64) - if dv != nil { - *f = dv.(float64) - } - *(fptr.(**float64)) = f - case reflect.Int32: - // might be an enum - if ft := f.Type(); ft != int32PtrType { - // enum - f.Set(reflect.New(ft.Elem())) - if dv != nil { - f.Elem().SetInt(int64(dv.(int32))) - } - } else { - // int32 field - i := new(int32) - if dv != nil { - *i = dv.(int32) - } - *(fptr.(**int32)) = i - } - case reflect.Int64: - i := new(int64) - if dv != nil { - *i = dv.(int64) - } - *(fptr.(**int64)) = i - case reflect.String: - s := new(string) - if dv != nil { - *s = dv.(string) - } - *(fptr.(**string)) = s - case reflect.Uint8: - // exceptional case: []byte - var b []byte - if dv != nil { - db := dv.([]byte) - b = make([]byte, len(db)) - copy(b, db) - } else { - b = []byte{} - } - *(fptr.(*[]byte)) = b - case reflect.Uint32: - u := new(uint32) - if dv != nil { - *u = dv.(uint32) - } - *(fptr.(**uint32)) = u - case reflect.Uint64: - u := new(uint64) - if dv != nil { - *u = dv.(uint64) - } - *(fptr.(**uint64)) = u - default: - log.Printf("proto: can't set default for field %v (sf.kind=%v)", f, sf.kind) - } - } - - for _, ni := range dm.nested { - f := v.Field(ni) - // f is *T or []*T or map[T]*T - switch f.Kind() { - case reflect.Ptr: - if f.IsNil() { - continue - } - setDefaults(f, recur, zeros) - - case reflect.Slice: - for i := 0; i < f.Len(); i++ { - e := f.Index(i) - if e.IsNil() { - continue - } - setDefaults(e, recur, zeros) - } - - case reflect.Map: - for _, k := range f.MapKeys() { - e := f.MapIndex(k) - if e.IsNil() { - continue - } - setDefaults(e, recur, zeros) - } - } - } -} - -var ( - // defaults maps a protocol buffer struct type to a slice of the fields, - // with its scalar fields set to their proto-declared non-zero default values. - defaultMu sync.RWMutex - defaults = make(map[reflect.Type]defaultMessage) - - int32PtrType = reflect.TypeOf((*int32)(nil)) -) - -// defaultMessage represents information about the default values of a message. -type defaultMessage struct { - scalars []scalarField - nested []int // struct field index of nested messages -} - -type scalarField struct { - index int // struct field index - kind reflect.Kind // element type (the T in *T or []T) - value interface{} // the proto-declared default value, or nil -} - -// t is a struct type. -func buildDefaultMessage(t reflect.Type) (dm defaultMessage) { - sprop := GetProperties(t) - for _, prop := range sprop.Prop { - fi, ok := sprop.decoderTags.get(prop.Tag) - if !ok { - // XXX_unrecognized - continue - } - ft := t.Field(fi).Type - - sf, nested, err := fieldDefault(ft, prop) - switch { - case err != nil: - log.Print(err) - case nested: - dm.nested = append(dm.nested, fi) - case sf != nil: - sf.index = fi - dm.scalars = append(dm.scalars, *sf) - } - } - - return dm -} - -// fieldDefault returns the scalarField for field type ft. -// sf will be nil if the field can not have a default. -// nestedMessage will be true if this is a nested message. -// Note that sf.index is not set on return. -func fieldDefault(ft reflect.Type, prop *Properties) (sf *scalarField, nestedMessage bool, err error) { - var canHaveDefault bool - switch ft.Kind() { - case reflect.Ptr: - if ft.Elem().Kind() == reflect.Struct { - nestedMessage = true - } else { - canHaveDefault = true // proto2 scalar field - } - - case reflect.Slice: - switch ft.Elem().Kind() { - case reflect.Ptr: - nestedMessage = true // repeated message - case reflect.Uint8: - canHaveDefault = true // bytes field - } - - case reflect.Map: - if ft.Elem().Kind() == reflect.Ptr { - nestedMessage = true // map with message values - } - } - - if !canHaveDefault { - if nestedMessage { - return nil, true, nil - } - return nil, false, nil - } - - // We now know that ft is a pointer or slice. - sf = &scalarField{kind: ft.Elem().Kind()} - - // scalar fields without defaults - if !prop.HasDefault { - return sf, false, nil - } - - // a scalar field: either *T or []byte - switch ft.Elem().Kind() { - case reflect.Bool: - x, err := strconv.ParseBool(prop.Default) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default bool %q: %v", prop.Default, err) - } - sf.value = x - case reflect.Float32: - x, err := strconv.ParseFloat(prop.Default, 32) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default float32 %q: %v", prop.Default, err) - } - sf.value = float32(x) - case reflect.Float64: - x, err := strconv.ParseFloat(prop.Default, 64) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default float64 %q: %v", prop.Default, err) - } - sf.value = x - case reflect.Int32: - x, err := strconv.ParseInt(prop.Default, 10, 32) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default int32 %q: %v", prop.Default, err) - } - sf.value = int32(x) - case reflect.Int64: - x, err := strconv.ParseInt(prop.Default, 10, 64) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default int64 %q: %v", prop.Default, err) - } - sf.value = x - case reflect.String: - sf.value = prop.Default - case reflect.Uint8: - // []byte (not *uint8) - sf.value = []byte(prop.Default) - case reflect.Uint32: - x, err := strconv.ParseUint(prop.Default, 10, 32) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default uint32 %q: %v", prop.Default, err) - } - sf.value = uint32(x) - case reflect.Uint64: - x, err := strconv.ParseUint(prop.Default, 10, 64) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default uint64 %q: %v", prop.Default, err) - } - sf.value = x - default: - return nil, false, fmt.Errorf("proto: unhandled def kind %v", ft.Elem().Kind()) - } - - return sf, false, nil -} - -// Map fields may have key types of non-float scalars, strings and enums. -// The easiest way to sort them in some deterministic order is to use fmt. -// If this turns out to be inefficient we can always consider other options, -// such as doing a Schwartzian transform. - -type mapKeys []reflect.Value - -func (s mapKeys) Len() int { return len(s) } -func (s mapKeys) Swap(i, j int) { s[i], s[j] = s[j], s[i] } -func (s mapKeys) Less(i, j int) bool { - return fmt.Sprint(s[i].Interface()) < fmt.Sprint(s[j].Interface()) -} - -// isProto3Zero reports whether v is a zero proto3 value. -func isProto3Zero(v reflect.Value) bool { - switch v.Kind() { - case reflect.Bool: - return !v.Bool() - case reflect.Int32, reflect.Int64: - return v.Int() == 0 - case reflect.Uint32, reflect.Uint64: - return v.Uint() == 0 - case reflect.Float32, reflect.Float64: - return v.Float() == 0 - case reflect.String: - return v.String() == "" - } - return false -} diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/message_set.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/message_set.go deleted file mode 100644 index 9d912bce19..0000000000 --- a/Godeps/_workspace/src/github.com/golang/protobuf/proto/message_set.go +++ /dev/null @@ -1,287 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -/* - * Support for message sets. - */ - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "reflect" - "sort" -) - -// ErrNoMessageTypeId occurs when a protocol buffer does not have a message type ID. -// A message type ID is required for storing a protocol buffer in a message set. -var ErrNoMessageTypeId = errors.New("proto does not have a message type ID") - -// The first two types (_MessageSet_Item and MessageSet) -// model what the protocol compiler produces for the following protocol message: -// message MessageSet { -// repeated group Item = 1 { -// required int32 type_id = 2; -// required string message = 3; -// }; -// } -// That is the MessageSet wire format. We can't use a proto to generate these -// because that would introduce a circular dependency between it and this package. -// -// When a proto1 proto has a field that looks like: -// optional message info = 3; -// the protocol compiler produces a field in the generated struct that looks like: -// Info *_proto_.MessageSet `protobuf:"bytes,3,opt,name=info"` -// The package is automatically inserted so there is no need for that proto file to -// import this package. - -type _MessageSet_Item struct { - TypeId *int32 `protobuf:"varint,2,req,name=type_id"` - Message []byte `protobuf:"bytes,3,req,name=message"` -} - -type MessageSet struct { - Item []*_MessageSet_Item `protobuf:"group,1,rep"` - XXX_unrecognized []byte - // TODO: caching? -} - -// Make sure MessageSet is a Message. -var _ Message = (*MessageSet)(nil) - -// messageTypeIder is an interface satisfied by a protocol buffer type -// that may be stored in a MessageSet. -type messageTypeIder interface { - MessageTypeId() int32 -} - -func (ms *MessageSet) find(pb Message) *_MessageSet_Item { - mti, ok := pb.(messageTypeIder) - if !ok { - return nil - } - id := mti.MessageTypeId() - for _, item := range ms.Item { - if *item.TypeId == id { - return item - } - } - return nil -} - -func (ms *MessageSet) Has(pb Message) bool { - if ms.find(pb) != nil { - return true - } - return false -} - -func (ms *MessageSet) Unmarshal(pb Message) error { - if item := ms.find(pb); item != nil { - return Unmarshal(item.Message, pb) - } - if _, ok := pb.(messageTypeIder); !ok { - return ErrNoMessageTypeId - } - return nil // TODO: return error instead? -} - -func (ms *MessageSet) Marshal(pb Message) error { - msg, err := Marshal(pb) - if err != nil { - return err - } - if item := ms.find(pb); item != nil { - // reuse existing item - item.Message = msg - return nil - } - - mti, ok := pb.(messageTypeIder) - if !ok { - return ErrNoMessageTypeId - } - - mtid := mti.MessageTypeId() - ms.Item = append(ms.Item, &_MessageSet_Item{ - TypeId: &mtid, - Message: msg, - }) - return nil -} - -func (ms *MessageSet) Reset() { *ms = MessageSet{} } -func (ms *MessageSet) String() string { return CompactTextString(ms) } -func (*MessageSet) ProtoMessage() {} - -// Support for the message_set_wire_format message option. - -func skipVarint(buf []byte) []byte { - i := 0 - for ; buf[i]&0x80 != 0; i++ { - } - return buf[i+1:] -} - -// MarshalMessageSet encodes the extension map represented by m in the message set wire format. -// It is called by generated Marshal methods on protocol buffer messages with the message_set_wire_format option. -func MarshalMessageSet(m map[int32]Extension) ([]byte, error) { - if err := encodeExtensionMap(m); err != nil { - return nil, err - } - - // Sort extension IDs to provide a deterministic encoding. - // See also enc_map in encode.go. - ids := make([]int, 0, len(m)) - for id := range m { - ids = append(ids, int(id)) - } - sort.Ints(ids) - - ms := &MessageSet{Item: make([]*_MessageSet_Item, 0, len(m))} - for _, id := range ids { - e := m[int32(id)] - // Remove the wire type and field number varint, as well as the length varint. - msg := skipVarint(skipVarint(e.enc)) - - ms.Item = append(ms.Item, &_MessageSet_Item{ - TypeId: Int32(int32(id)), - Message: msg, - }) - } - return Marshal(ms) -} - -// UnmarshalMessageSet decodes the extension map encoded in buf in the message set wire format. -// It is called by generated Unmarshal methods on protocol buffer messages with the message_set_wire_format option. -func UnmarshalMessageSet(buf []byte, m map[int32]Extension) error { - ms := new(MessageSet) - if err := Unmarshal(buf, ms); err != nil { - return err - } - for _, item := range ms.Item { - id := *item.TypeId - msg := item.Message - - // Restore wire type and field number varint, plus length varint. - // Be careful to preserve duplicate items. - b := EncodeVarint(uint64(id)<<3 | WireBytes) - if ext, ok := m[id]; ok { - // Existing data; rip off the tag and length varint - // so we join the new data correctly. - // We can assume that ext.enc is set because we are unmarshaling. - o := ext.enc[len(b):] // skip wire type and field number - _, n := DecodeVarint(o) // calculate length of length varint - o = o[n:] // skip length varint - msg = append(o, msg...) // join old data and new data - } - b = append(b, EncodeVarint(uint64(len(msg)))...) - b = append(b, msg...) - - m[id] = Extension{enc: b} - } - return nil -} - -// MarshalMessageSetJSON encodes the extension map represented by m in JSON format. -// It is called by generated MarshalJSON methods on protocol buffer messages with the message_set_wire_format option. -func MarshalMessageSetJSON(m map[int32]Extension) ([]byte, error) { - var b bytes.Buffer - b.WriteByte('{') - - // Process the map in key order for deterministic output. - ids := make([]int32, 0, len(m)) - for id := range m { - ids = append(ids, id) - } - sort.Sort(int32Slice(ids)) // int32Slice defined in text.go - - for i, id := range ids { - ext := m[id] - if i > 0 { - b.WriteByte(',') - } - - msd, ok := messageSetMap[id] - if !ok { - // Unknown type; we can't render it, so skip it. - continue - } - fmt.Fprintf(&b, `"[%s]":`, msd.name) - - x := ext.value - if x == nil { - x = reflect.New(msd.t.Elem()).Interface() - if err := Unmarshal(ext.enc, x.(Message)); err != nil { - return nil, err - } - } - d, err := json.Marshal(x) - if err != nil { - return nil, err - } - b.Write(d) - } - b.WriteByte('}') - return b.Bytes(), nil -} - -// UnmarshalMessageSetJSON decodes the extension map encoded in buf in JSON format. -// It is called by generated UnmarshalJSON methods on protocol buffer messages with the message_set_wire_format option. -func UnmarshalMessageSetJSON(buf []byte, m map[int32]Extension) error { - // Common-case fast path. - if len(buf) == 0 || bytes.Equal(buf, []byte("{}")) { - return nil - } - - // This is fairly tricky, and it's not clear that it is needed. - return errors.New("TODO: UnmarshalMessageSetJSON not yet implemented") -} - -// A global registry of types that can be used in a MessageSet. - -var messageSetMap = make(map[int32]messageSetDesc) - -type messageSetDesc struct { - t reflect.Type // pointer to struct - name string -} - -// RegisterMessageSetType is called from the generated code. -func RegisterMessageSetType(m Message, fieldNum int32, name string) { - messageSetMap[fieldNum] = messageSetDesc{ - t: reflect.TypeOf(m), - name: name, - } -} diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/message_set_test.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/message_set_test.go deleted file mode 100644 index 7c29bccf4b..0000000000 --- a/Godeps/_workspace/src/github.com/golang/protobuf/proto/message_set_test.go +++ /dev/null @@ -1,66 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2014 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "bytes" - "testing" -) - -func TestUnmarshalMessageSetWithDuplicate(t *testing.T) { - // Check that a repeated message set entry will be concatenated. - in := &MessageSet{ - Item: []*_MessageSet_Item{ - {TypeId: Int32(12345), Message: []byte("hoo")}, - {TypeId: Int32(12345), Message: []byte("hah")}, - }, - } - b, err := Marshal(in) - if err != nil { - t.Fatalf("Marshal: %v", err) - } - t.Logf("Marshaled bytes: %q", b) - - m := make(map[int32]Extension) - if err := UnmarshalMessageSet(b, m); err != nil { - t.Fatalf("UnmarshalMessageSet: %v", err) - } - ext, ok := m[12345] - if !ok { - t.Fatalf("Didn't retrieve extension 12345; map is %v", m) - } - // Skip wire type/field number and length varints. - got := skipVarint(skipVarint(ext.enc)) - if want := []byte("hoohah"); !bytes.Equal(got, want) { - t.Errorf("Combined extension is %q, want %q", got, want) - } -} diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/pointer_reflect.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/pointer_reflect.go deleted file mode 100644 index 749919d250..0000000000 --- a/Godeps/_workspace/src/github.com/golang/protobuf/proto/pointer_reflect.go +++ /dev/null @@ -1,479 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2012 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// +build appengine - -// This file contains an implementation of proto field accesses using package reflect. -// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can -// be used on App Engine. - -package proto - -import ( - "math" - "reflect" -) - -// A structPointer is a pointer to a struct. -type structPointer struct { - v reflect.Value -} - -// toStructPointer returns a structPointer equivalent to the given reflect value. -// The reflect value must itself be a pointer to a struct. -func toStructPointer(v reflect.Value) structPointer { - return structPointer{v} -} - -// IsNil reports whether p is nil. -func structPointer_IsNil(p structPointer) bool { - return p.v.IsNil() -} - -// Interface returns the struct pointer as an interface value. -func structPointer_Interface(p structPointer, _ reflect.Type) interface{} { - return p.v.Interface() -} - -// A field identifies a field in a struct, accessible from a structPointer. -// In this implementation, a field is identified by the sequence of field indices -// passed to reflect's FieldByIndex. -type field []int - -// toField returns a field equivalent to the given reflect field. -func toField(f *reflect.StructField) field { - return f.Index -} - -// invalidField is an invalid field identifier. -var invalidField = field(nil) - -// IsValid reports whether the field identifier is valid. -func (f field) IsValid() bool { return f != nil } - -// field returns the given field in the struct as a reflect value. -func structPointer_field(p structPointer, f field) reflect.Value { - // Special case: an extension map entry with a value of type T - // passes a *T to the struct-handling code with a zero field, - // expecting that it will be treated as equivalent to *struct{ X T }, - // which has the same memory layout. We have to handle that case - // specially, because reflect will panic if we call FieldByIndex on a - // non-struct. - if f == nil { - return p.v.Elem() - } - - return p.v.Elem().FieldByIndex(f) -} - -// ifield returns the given field in the struct as an interface value. -func structPointer_ifield(p structPointer, f field) interface{} { - return structPointer_field(p, f).Addr().Interface() -} - -// Bytes returns the address of a []byte field in the struct. -func structPointer_Bytes(p structPointer, f field) *[]byte { - return structPointer_ifield(p, f).(*[]byte) -} - -// BytesSlice returns the address of a [][]byte field in the struct. -func structPointer_BytesSlice(p structPointer, f field) *[][]byte { - return structPointer_ifield(p, f).(*[][]byte) -} - -// Bool returns the address of a *bool field in the struct. -func structPointer_Bool(p structPointer, f field) **bool { - return structPointer_ifield(p, f).(**bool) -} - -// BoolVal returns the address of a bool field in the struct. -func structPointer_BoolVal(p structPointer, f field) *bool { - return structPointer_ifield(p, f).(*bool) -} - -// BoolSlice returns the address of a []bool field in the struct. -func structPointer_BoolSlice(p structPointer, f field) *[]bool { - return structPointer_ifield(p, f).(*[]bool) -} - -// String returns the address of a *string field in the struct. -func structPointer_String(p structPointer, f field) **string { - return structPointer_ifield(p, f).(**string) -} - -// StringVal returns the address of a string field in the struct. -func structPointer_StringVal(p structPointer, f field) *string { - return structPointer_ifield(p, f).(*string) -} - -// StringSlice returns the address of a []string field in the struct. -func structPointer_StringSlice(p structPointer, f field) *[]string { - return structPointer_ifield(p, f).(*[]string) -} - -// ExtMap returns the address of an extension map field in the struct. -func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension { - return structPointer_ifield(p, f).(*map[int32]Extension) -} - -// NewAt returns the reflect.Value for a pointer to a field in the struct. -func structPointer_NewAt(p structPointer, f field, typ reflect.Type) reflect.Value { - return structPointer_field(p, f).Addr() -} - -// SetStructPointer writes a *struct field in the struct. -func structPointer_SetStructPointer(p structPointer, f field, q structPointer) { - structPointer_field(p, f).Set(q.v) -} - -// GetStructPointer reads a *struct field in the struct. -func structPointer_GetStructPointer(p structPointer, f field) structPointer { - return structPointer{structPointer_field(p, f)} -} - -// StructPointerSlice the address of a []*struct field in the struct. -func structPointer_StructPointerSlice(p structPointer, f field) structPointerSlice { - return structPointerSlice{structPointer_field(p, f)} -} - -// A structPointerSlice represents the address of a slice of pointers to structs -// (themselves messages or groups). That is, v.Type() is *[]*struct{...}. -type structPointerSlice struct { - v reflect.Value -} - -func (p structPointerSlice) Len() int { return p.v.Len() } -func (p structPointerSlice) Index(i int) structPointer { return structPointer{p.v.Index(i)} } -func (p structPointerSlice) Append(q structPointer) { - p.v.Set(reflect.Append(p.v, q.v)) -} - -var ( - int32Type = reflect.TypeOf(int32(0)) - uint32Type = reflect.TypeOf(uint32(0)) - float32Type = reflect.TypeOf(float32(0)) - int64Type = reflect.TypeOf(int64(0)) - uint64Type = reflect.TypeOf(uint64(0)) - float64Type = reflect.TypeOf(float64(0)) -) - -// A word32 represents a field of type *int32, *uint32, *float32, or *enum. -// That is, v.Type() is *int32, *uint32, *float32, or *enum and v is assignable. -type word32 struct { - v reflect.Value -} - -// IsNil reports whether p is nil. -func word32_IsNil(p word32) bool { - return p.v.IsNil() -} - -// Set sets p to point at a newly allocated word with bits set to x. -func word32_Set(p word32, o *Buffer, x uint32) { - t := p.v.Type().Elem() - switch t { - case int32Type: - if len(o.int32s) == 0 { - o.int32s = make([]int32, uint32PoolSize) - } - o.int32s[0] = int32(x) - p.v.Set(reflect.ValueOf(&o.int32s[0])) - o.int32s = o.int32s[1:] - return - case uint32Type: - if len(o.uint32s) == 0 { - o.uint32s = make([]uint32, uint32PoolSize) - } - o.uint32s[0] = x - p.v.Set(reflect.ValueOf(&o.uint32s[0])) - o.uint32s = o.uint32s[1:] - return - case float32Type: - if len(o.float32s) == 0 { - o.float32s = make([]float32, uint32PoolSize) - } - o.float32s[0] = math.Float32frombits(x) - p.v.Set(reflect.ValueOf(&o.float32s[0])) - o.float32s = o.float32s[1:] - return - } - - // must be enum - p.v.Set(reflect.New(t)) - p.v.Elem().SetInt(int64(int32(x))) -} - -// Get gets the bits pointed at by p, as a uint32. -func word32_Get(p word32) uint32 { - elem := p.v.Elem() - switch elem.Kind() { - case reflect.Int32: - return uint32(elem.Int()) - case reflect.Uint32: - return uint32(elem.Uint()) - case reflect.Float32: - return math.Float32bits(float32(elem.Float())) - } - panic("unreachable") -} - -// Word32 returns a reference to a *int32, *uint32, *float32, or *enum field in the struct. -func structPointer_Word32(p structPointer, f field) word32 { - return word32{structPointer_field(p, f)} -} - -// A word32Val represents a field of type int32, uint32, float32, or enum. -// That is, v.Type() is int32, uint32, float32, or enum and v is assignable. -type word32Val struct { - v reflect.Value -} - -// Set sets *p to x. -func word32Val_Set(p word32Val, x uint32) { - switch p.v.Type() { - case int32Type: - p.v.SetInt(int64(x)) - return - case uint32Type: - p.v.SetUint(uint64(x)) - return - case float32Type: - p.v.SetFloat(float64(math.Float32frombits(x))) - return - } - - // must be enum - p.v.SetInt(int64(int32(x))) -} - -// Get gets the bits pointed at by p, as a uint32. -func word32Val_Get(p word32Val) uint32 { - elem := p.v - switch elem.Kind() { - case reflect.Int32: - return uint32(elem.Int()) - case reflect.Uint32: - return uint32(elem.Uint()) - case reflect.Float32: - return math.Float32bits(float32(elem.Float())) - } - panic("unreachable") -} - -// Word32Val returns a reference to a int32, uint32, float32, or enum field in the struct. -func structPointer_Word32Val(p structPointer, f field) word32Val { - return word32Val{structPointer_field(p, f)} -} - -// A word32Slice is a slice of 32-bit values. -// That is, v.Type() is []int32, []uint32, []float32, or []enum. -type word32Slice struct { - v reflect.Value -} - -func (p word32Slice) Append(x uint32) { - n, m := p.v.Len(), p.v.Cap() - if n < m { - p.v.SetLen(n + 1) - } else { - t := p.v.Type().Elem() - p.v.Set(reflect.Append(p.v, reflect.Zero(t))) - } - elem := p.v.Index(n) - switch elem.Kind() { - case reflect.Int32: - elem.SetInt(int64(int32(x))) - case reflect.Uint32: - elem.SetUint(uint64(x)) - case reflect.Float32: - elem.SetFloat(float64(math.Float32frombits(x))) - } -} - -func (p word32Slice) Len() int { - return p.v.Len() -} - -func (p word32Slice) Index(i int) uint32 { - elem := p.v.Index(i) - switch elem.Kind() { - case reflect.Int32: - return uint32(elem.Int()) - case reflect.Uint32: - return uint32(elem.Uint()) - case reflect.Float32: - return math.Float32bits(float32(elem.Float())) - } - panic("unreachable") -} - -// Word32Slice returns a reference to a []int32, []uint32, []float32, or []enum field in the struct. -func structPointer_Word32Slice(p structPointer, f field) word32Slice { - return word32Slice{structPointer_field(p, f)} -} - -// word64 is like word32 but for 64-bit values. -type word64 struct { - v reflect.Value -} - -func word64_Set(p word64, o *Buffer, x uint64) { - t := p.v.Type().Elem() - switch t { - case int64Type: - if len(o.int64s) == 0 { - o.int64s = make([]int64, uint64PoolSize) - } - o.int64s[0] = int64(x) - p.v.Set(reflect.ValueOf(&o.int64s[0])) - o.int64s = o.int64s[1:] - return - case uint64Type: - if len(o.uint64s) == 0 { - o.uint64s = make([]uint64, uint64PoolSize) - } - o.uint64s[0] = x - p.v.Set(reflect.ValueOf(&o.uint64s[0])) - o.uint64s = o.uint64s[1:] - return - case float64Type: - if len(o.float64s) == 0 { - o.float64s = make([]float64, uint64PoolSize) - } - o.float64s[0] = math.Float64frombits(x) - p.v.Set(reflect.ValueOf(&o.float64s[0])) - o.float64s = o.float64s[1:] - return - } - panic("unreachable") -} - -func word64_IsNil(p word64) bool { - return p.v.IsNil() -} - -func word64_Get(p word64) uint64 { - elem := p.v.Elem() - switch elem.Kind() { - case reflect.Int64: - return uint64(elem.Int()) - case reflect.Uint64: - return elem.Uint() - case reflect.Float64: - return math.Float64bits(elem.Float()) - } - panic("unreachable") -} - -func structPointer_Word64(p structPointer, f field) word64 { - return word64{structPointer_field(p, f)} -} - -// word64Val is like word32Val but for 64-bit values. -type word64Val struct { - v reflect.Value -} - -func word64Val_Set(p word64Val, o *Buffer, x uint64) { - switch p.v.Type() { - case int64Type: - p.v.SetInt(int64(x)) - return - case uint64Type: - p.v.SetUint(x) - return - case float64Type: - p.v.SetFloat(math.Float64frombits(x)) - return - } - panic("unreachable") -} - -func word64Val_Get(p word64Val) uint64 { - elem := p.v - switch elem.Kind() { - case reflect.Int64: - return uint64(elem.Int()) - case reflect.Uint64: - return elem.Uint() - case reflect.Float64: - return math.Float64bits(elem.Float()) - } - panic("unreachable") -} - -func structPointer_Word64Val(p structPointer, f field) word64Val { - return word64Val{structPointer_field(p, f)} -} - -type word64Slice struct { - v reflect.Value -} - -func (p word64Slice) Append(x uint64) { - n, m := p.v.Len(), p.v.Cap() - if n < m { - p.v.SetLen(n + 1) - } else { - t := p.v.Type().Elem() - p.v.Set(reflect.Append(p.v, reflect.Zero(t))) - } - elem := p.v.Index(n) - switch elem.Kind() { - case reflect.Int64: - elem.SetInt(int64(int64(x))) - case reflect.Uint64: - elem.SetUint(uint64(x)) - case reflect.Float64: - elem.SetFloat(float64(math.Float64frombits(x))) - } -} - -func (p word64Slice) Len() int { - return p.v.Len() -} - -func (p word64Slice) Index(i int) uint64 { - elem := p.v.Index(i) - switch elem.Kind() { - case reflect.Int64: - return uint64(elem.Int()) - case reflect.Uint64: - return uint64(elem.Uint()) - case reflect.Float64: - return math.Float64bits(float64(elem.Float())) - } - panic("unreachable") -} - -func structPointer_Word64Slice(p structPointer, f field) word64Slice { - return word64Slice{structPointer_field(p, f)} -} diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/pointer_unsafe.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/pointer_unsafe.go deleted file mode 100644 index e9be0fe92e..0000000000 --- a/Godeps/_workspace/src/github.com/golang/protobuf/proto/pointer_unsafe.go +++ /dev/null @@ -1,266 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2012 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// +build !appengine - -// This file contains the implementation of the proto field accesses using package unsafe. - -package proto - -import ( - "reflect" - "unsafe" -) - -// NOTE: These type_Foo functions would more idiomatically be methods, -// but Go does not allow methods on pointer types, and we must preserve -// some pointer type for the garbage collector. We use these -// funcs with clunky names as our poor approximation to methods. -// -// An alternative would be -// type structPointer struct { p unsafe.Pointer } -// but that does not registerize as well. - -// A structPointer is a pointer to a struct. -type structPointer unsafe.Pointer - -// toStructPointer returns a structPointer equivalent to the given reflect value. -func toStructPointer(v reflect.Value) structPointer { - return structPointer(unsafe.Pointer(v.Pointer())) -} - -// IsNil reports whether p is nil. -func structPointer_IsNil(p structPointer) bool { - return p == nil -} - -// Interface returns the struct pointer, assumed to have element type t, -// as an interface value. -func structPointer_Interface(p structPointer, t reflect.Type) interface{} { - return reflect.NewAt(t, unsafe.Pointer(p)).Interface() -} - -// A field identifies a field in a struct, accessible from a structPointer. -// In this implementation, a field is identified by its byte offset from the start of the struct. -type field uintptr - -// toField returns a field equivalent to the given reflect field. -func toField(f *reflect.StructField) field { - return field(f.Offset) -} - -// invalidField is an invalid field identifier. -const invalidField = ^field(0) - -// IsValid reports whether the field identifier is valid. -func (f field) IsValid() bool { - return f != ^field(0) -} - -// Bytes returns the address of a []byte field in the struct. -func structPointer_Bytes(p structPointer, f field) *[]byte { - return (*[]byte)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -// BytesSlice returns the address of a [][]byte field in the struct. -func structPointer_BytesSlice(p structPointer, f field) *[][]byte { - return (*[][]byte)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -// Bool returns the address of a *bool field in the struct. -func structPointer_Bool(p structPointer, f field) **bool { - return (**bool)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -// BoolVal returns the address of a bool field in the struct. -func structPointer_BoolVal(p structPointer, f field) *bool { - return (*bool)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -// BoolSlice returns the address of a []bool field in the struct. -func structPointer_BoolSlice(p structPointer, f field) *[]bool { - return (*[]bool)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -// String returns the address of a *string field in the struct. -func structPointer_String(p structPointer, f field) **string { - return (**string)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -// StringVal returns the address of a string field in the struct. -func structPointer_StringVal(p structPointer, f field) *string { - return (*string)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -// StringSlice returns the address of a []string field in the struct. -func structPointer_StringSlice(p structPointer, f field) *[]string { - return (*[]string)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -// ExtMap returns the address of an extension map field in the struct. -func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension { - return (*map[int32]Extension)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -// NewAt returns the reflect.Value for a pointer to a field in the struct. -func structPointer_NewAt(p structPointer, f field, typ reflect.Type) reflect.Value { - return reflect.NewAt(typ, unsafe.Pointer(uintptr(p)+uintptr(f))) -} - -// SetStructPointer writes a *struct field in the struct. -func structPointer_SetStructPointer(p structPointer, f field, q structPointer) { - *(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f))) = q -} - -// GetStructPointer reads a *struct field in the struct. -func structPointer_GetStructPointer(p structPointer, f field) structPointer { - return *(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -// StructPointerSlice the address of a []*struct field in the struct. -func structPointer_StructPointerSlice(p structPointer, f field) *structPointerSlice { - return (*structPointerSlice)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -// A structPointerSlice represents a slice of pointers to structs (themselves submessages or groups). -type structPointerSlice []structPointer - -func (v *structPointerSlice) Len() int { return len(*v) } -func (v *structPointerSlice) Index(i int) structPointer { return (*v)[i] } -func (v *structPointerSlice) Append(p structPointer) { *v = append(*v, p) } - -// A word32 is the address of a "pointer to 32-bit value" field. -type word32 **uint32 - -// IsNil reports whether *v is nil. -func word32_IsNil(p word32) bool { - return *p == nil -} - -// Set sets *v to point at a newly allocated word set to x. -func word32_Set(p word32, o *Buffer, x uint32) { - if len(o.uint32s) == 0 { - o.uint32s = make([]uint32, uint32PoolSize) - } - o.uint32s[0] = x - *p = &o.uint32s[0] - o.uint32s = o.uint32s[1:] -} - -// Get gets the value pointed at by *v. -func word32_Get(p word32) uint32 { - return **p -} - -// Word32 returns the address of a *int32, *uint32, *float32, or *enum field in the struct. -func structPointer_Word32(p structPointer, f field) word32 { - return word32((**uint32)(unsafe.Pointer(uintptr(p) + uintptr(f)))) -} - -// A word32Val is the address of a 32-bit value field. -type word32Val *uint32 - -// Set sets *p to x. -func word32Val_Set(p word32Val, x uint32) { - *p = x -} - -// Get gets the value pointed at by p. -func word32Val_Get(p word32Val) uint32 { - return *p -} - -// Word32Val returns the address of a *int32, *uint32, *float32, or *enum field in the struct. -func structPointer_Word32Val(p structPointer, f field) word32Val { - return word32Val((*uint32)(unsafe.Pointer(uintptr(p) + uintptr(f)))) -} - -// A word32Slice is a slice of 32-bit values. -type word32Slice []uint32 - -func (v *word32Slice) Append(x uint32) { *v = append(*v, x) } -func (v *word32Slice) Len() int { return len(*v) } -func (v *word32Slice) Index(i int) uint32 { return (*v)[i] } - -// Word32Slice returns the address of a []int32, []uint32, []float32, or []enum field in the struct. -func structPointer_Word32Slice(p structPointer, f field) *word32Slice { - return (*word32Slice)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -// word64 is like word32 but for 64-bit values. -type word64 **uint64 - -func word64_Set(p word64, o *Buffer, x uint64) { - if len(o.uint64s) == 0 { - o.uint64s = make([]uint64, uint64PoolSize) - } - o.uint64s[0] = x - *p = &o.uint64s[0] - o.uint64s = o.uint64s[1:] -} - -func word64_IsNil(p word64) bool { - return *p == nil -} - -func word64_Get(p word64) uint64 { - return **p -} - -func structPointer_Word64(p structPointer, f field) word64 { - return word64((**uint64)(unsafe.Pointer(uintptr(p) + uintptr(f)))) -} - -// word64Val is like word32Val but for 64-bit values. -type word64Val *uint64 - -func word64Val_Set(p word64Val, o *Buffer, x uint64) { - *p = x -} - -func word64Val_Get(p word64Val) uint64 { - return *p -} - -func structPointer_Word64Val(p structPointer, f field) word64Val { - return word64Val((*uint64)(unsafe.Pointer(uintptr(p) + uintptr(f)))) -} - -// word64Slice is like word32Slice but for 64-bit values. -type word64Slice []uint64 - -func (v *word64Slice) Append(x uint64) { *v = append(*v, x) } -func (v *word64Slice) Len() int { return len(*v) } -func (v *word64Slice) Index(i int) uint64 { return (*v)[i] } - -func structPointer_Word64Slice(p structPointer, f field) *word64Slice { - return (*word64Slice)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/properties.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/properties.go deleted file mode 100644 index d74844ab2a..0000000000 --- a/Godeps/_workspace/src/github.com/golang/protobuf/proto/properties.go +++ /dev/null @@ -1,742 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -/* - * Routines for encoding data into the wire format for protocol buffers. - */ - -import ( - "fmt" - "os" - "reflect" - "sort" - "strconv" - "strings" - "sync" -) - -const debug bool = false - -// Constants that identify the encoding of a value on the wire. -const ( - WireVarint = 0 - WireFixed64 = 1 - WireBytes = 2 - WireStartGroup = 3 - WireEndGroup = 4 - WireFixed32 = 5 -) - -const startSize = 10 // initial slice/string sizes - -// Encoders are defined in encode.go -// An encoder outputs the full representation of a field, including its -// tag and encoder type. -type encoder func(p *Buffer, prop *Properties, base structPointer) error - -// A valueEncoder encodes a single integer in a particular encoding. -type valueEncoder func(o *Buffer, x uint64) error - -// Sizers are defined in encode.go -// A sizer returns the encoded size of a field, including its tag and encoder -// type. -type sizer func(prop *Properties, base structPointer) int - -// A valueSizer returns the encoded size of a single integer in a particular -// encoding. -type valueSizer func(x uint64) int - -// Decoders are defined in decode.go -// A decoder creates a value from its wire representation. -// Unrecognized subelements are saved in unrec. -type decoder func(p *Buffer, prop *Properties, base structPointer) error - -// A valueDecoder decodes a single integer in a particular encoding. -type valueDecoder func(o *Buffer) (x uint64, err error) - -// tagMap is an optimization over map[int]int for typical protocol buffer -// use-cases. Encoded protocol buffers are often in tag order with small tag -// numbers. -type tagMap struct { - fastTags []int - slowTags map[int]int -} - -// tagMapFastLimit is the upper bound on the tag number that will be stored in -// the tagMap slice rather than its map. -const tagMapFastLimit = 1024 - -func (p *tagMap) get(t int) (int, bool) { - if t > 0 && t < tagMapFastLimit { - if t >= len(p.fastTags) { - return 0, false - } - fi := p.fastTags[t] - return fi, fi >= 0 - } - fi, ok := p.slowTags[t] - return fi, ok -} - -func (p *tagMap) put(t int, fi int) { - if t > 0 && t < tagMapFastLimit { - for len(p.fastTags) < t+1 { - p.fastTags = append(p.fastTags, -1) - } - p.fastTags[t] = fi - return - } - if p.slowTags == nil { - p.slowTags = make(map[int]int) - } - p.slowTags[t] = fi -} - -// StructProperties represents properties for all the fields of a struct. -// decoderTags and decoderOrigNames should only be used by the decoder. -type StructProperties struct { - Prop []*Properties // properties for each field - reqCount int // required count - decoderTags tagMap // map from proto tag to struct field number - decoderOrigNames map[string]int // map from original name to struct field number - order []int // list of struct field numbers in tag order - unrecField field // field id of the XXX_unrecognized []byte field - extendable bool // is this an extendable proto -} - -// Implement the sorting interface so we can sort the fields in tag order, as recommended by the spec. -// See encode.go, (*Buffer).enc_struct. - -func (sp *StructProperties) Len() int { return len(sp.order) } -func (sp *StructProperties) Less(i, j int) bool { - return sp.Prop[sp.order[i]].Tag < sp.Prop[sp.order[j]].Tag -} -func (sp *StructProperties) Swap(i, j int) { sp.order[i], sp.order[j] = sp.order[j], sp.order[i] } - -// Properties represents the protocol-specific behavior of a single struct field. -type Properties struct { - Name string // name of the field, for error messages - OrigName string // original name before protocol compiler (always set) - Wire string - WireType int - Tag int - Required bool - Optional bool - Repeated bool - Packed bool // relevant for repeated primitives only - Enum string // set for enum types only - proto3 bool // whether this is known to be a proto3 field; set for []byte only - - Default string // default value - HasDefault bool // whether an explicit default was provided - def_uint64 uint64 - - enc encoder - valEnc valueEncoder // set for bool and numeric types only - field field - tagcode []byte // encoding of EncodeVarint((Tag<<3)|WireType) - tagbuf [8]byte - stype reflect.Type // set for struct types only - sprop *StructProperties // set for struct types only - isMarshaler bool - isUnmarshaler bool - - mtype reflect.Type // set for map types only - mkeyprop *Properties // set for map types only - mvalprop *Properties // set for map types only - - size sizer - valSize valueSizer // set for bool and numeric types only - - dec decoder - valDec valueDecoder // set for bool and numeric types only - - // If this is a packable field, this will be the decoder for the packed version of the field. - packedDec decoder -} - -// String formats the properties in the protobuf struct field tag style. -func (p *Properties) String() string { - s := p.Wire - s = "," - s += strconv.Itoa(p.Tag) - if p.Required { - s += ",req" - } - if p.Optional { - s += ",opt" - } - if p.Repeated { - s += ",rep" - } - if p.Packed { - s += ",packed" - } - if p.OrigName != p.Name { - s += ",name=" + p.OrigName - } - if p.proto3 { - s += ",proto3" - } - if len(p.Enum) > 0 { - s += ",enum=" + p.Enum - } - if p.HasDefault { - s += ",def=" + p.Default - } - return s -} - -// Parse populates p by parsing a string in the protobuf struct field tag style. -func (p *Properties) Parse(s string) { - // "bytes,49,opt,name=foo,def=hello!" - fields := strings.Split(s, ",") // breaks def=, but handled below. - if len(fields) < 2 { - fmt.Fprintf(os.Stderr, "proto: tag has too few fields: %q\n", s) - return - } - - p.Wire = fields[0] - switch p.Wire { - case "varint": - p.WireType = WireVarint - p.valEnc = (*Buffer).EncodeVarint - p.valDec = (*Buffer).DecodeVarint - p.valSize = sizeVarint - case "fixed32": - p.WireType = WireFixed32 - p.valEnc = (*Buffer).EncodeFixed32 - p.valDec = (*Buffer).DecodeFixed32 - p.valSize = sizeFixed32 - case "fixed64": - p.WireType = WireFixed64 - p.valEnc = (*Buffer).EncodeFixed64 - p.valDec = (*Buffer).DecodeFixed64 - p.valSize = sizeFixed64 - case "zigzag32": - p.WireType = WireVarint - p.valEnc = (*Buffer).EncodeZigzag32 - p.valDec = (*Buffer).DecodeZigzag32 - p.valSize = sizeZigzag32 - case "zigzag64": - p.WireType = WireVarint - p.valEnc = (*Buffer).EncodeZigzag64 - p.valDec = (*Buffer).DecodeZigzag64 - p.valSize = sizeZigzag64 - case "bytes", "group": - p.WireType = WireBytes - // no numeric converter for non-numeric types - default: - fmt.Fprintf(os.Stderr, "proto: tag has unknown wire type: %q\n", s) - return - } - - var err error - p.Tag, err = strconv.Atoi(fields[1]) - if err != nil { - return - } - - for i := 2; i < len(fields); i++ { - f := fields[i] - switch { - case f == "req": - p.Required = true - case f == "opt": - p.Optional = true - case f == "rep": - p.Repeated = true - case f == "packed": - p.Packed = true - case strings.HasPrefix(f, "name="): - p.OrigName = f[5:] - case strings.HasPrefix(f, "enum="): - p.Enum = f[5:] - case f == "proto3": - p.proto3 = true - case strings.HasPrefix(f, "def="): - p.HasDefault = true - p.Default = f[4:] // rest of string - if i+1 < len(fields) { - // Commas aren't escaped, and def is always last. - p.Default += "," + strings.Join(fields[i+1:], ",") - break - } - } - } -} - -func logNoSliceEnc(t1, t2 reflect.Type) { - fmt.Fprintf(os.Stderr, "proto: no slice oenc for %T = []%T\n", t1, t2) -} - -var protoMessageType = reflect.TypeOf((*Message)(nil)).Elem() - -// Initialize the fields for encoding and decoding. -func (p *Properties) setEncAndDec(typ reflect.Type, f *reflect.StructField, lockGetProp bool) { - p.enc = nil - p.dec = nil - p.size = nil - - switch t1 := typ; t1.Kind() { - default: - fmt.Fprintf(os.Stderr, "proto: no coders for %v\n", t1) - - // proto3 scalar types - - case reflect.Bool: - p.enc = (*Buffer).enc_proto3_bool - p.dec = (*Buffer).dec_proto3_bool - p.size = size_proto3_bool - case reflect.Int32: - p.enc = (*Buffer).enc_proto3_int32 - p.dec = (*Buffer).dec_proto3_int32 - p.size = size_proto3_int32 - case reflect.Uint32: - p.enc = (*Buffer).enc_proto3_uint32 - p.dec = (*Buffer).dec_proto3_int32 // can reuse - p.size = size_proto3_uint32 - case reflect.Int64, reflect.Uint64: - p.enc = (*Buffer).enc_proto3_int64 - p.dec = (*Buffer).dec_proto3_int64 - p.size = size_proto3_int64 - case reflect.Float32: - p.enc = (*Buffer).enc_proto3_uint32 // can just treat them as bits - p.dec = (*Buffer).dec_proto3_int32 - p.size = size_proto3_uint32 - case reflect.Float64: - p.enc = (*Buffer).enc_proto3_int64 // can just treat them as bits - p.dec = (*Buffer).dec_proto3_int64 - p.size = size_proto3_int64 - case reflect.String: - p.enc = (*Buffer).enc_proto3_string - p.dec = (*Buffer).dec_proto3_string - p.size = size_proto3_string - - case reflect.Ptr: - switch t2 := t1.Elem(); t2.Kind() { - default: - fmt.Fprintf(os.Stderr, "proto: no encoder function for %v -> %v\n", t1, t2) - break - case reflect.Bool: - p.enc = (*Buffer).enc_bool - p.dec = (*Buffer).dec_bool - p.size = size_bool - case reflect.Int32: - p.enc = (*Buffer).enc_int32 - p.dec = (*Buffer).dec_int32 - p.size = size_int32 - case reflect.Uint32: - p.enc = (*Buffer).enc_uint32 - p.dec = (*Buffer).dec_int32 // can reuse - p.size = size_uint32 - case reflect.Int64, reflect.Uint64: - p.enc = (*Buffer).enc_int64 - p.dec = (*Buffer).dec_int64 - p.size = size_int64 - case reflect.Float32: - p.enc = (*Buffer).enc_uint32 // can just treat them as bits - p.dec = (*Buffer).dec_int32 - p.size = size_uint32 - case reflect.Float64: - p.enc = (*Buffer).enc_int64 // can just treat them as bits - p.dec = (*Buffer).dec_int64 - p.size = size_int64 - case reflect.String: - p.enc = (*Buffer).enc_string - p.dec = (*Buffer).dec_string - p.size = size_string - case reflect.Struct: - p.stype = t1.Elem() - p.isMarshaler = isMarshaler(t1) - p.isUnmarshaler = isUnmarshaler(t1) - if p.Wire == "bytes" { - p.enc = (*Buffer).enc_struct_message - p.dec = (*Buffer).dec_struct_message - p.size = size_struct_message - } else { - p.enc = (*Buffer).enc_struct_group - p.dec = (*Buffer).dec_struct_group - p.size = size_struct_group - } - } - - case reflect.Slice: - switch t2 := t1.Elem(); t2.Kind() { - default: - logNoSliceEnc(t1, t2) - break - case reflect.Bool: - if p.Packed { - p.enc = (*Buffer).enc_slice_packed_bool - p.size = size_slice_packed_bool - } else { - p.enc = (*Buffer).enc_slice_bool - p.size = size_slice_bool - } - p.dec = (*Buffer).dec_slice_bool - p.packedDec = (*Buffer).dec_slice_packed_bool - case reflect.Int32: - if p.Packed { - p.enc = (*Buffer).enc_slice_packed_int32 - p.size = size_slice_packed_int32 - } else { - p.enc = (*Buffer).enc_slice_int32 - p.size = size_slice_int32 - } - p.dec = (*Buffer).dec_slice_int32 - p.packedDec = (*Buffer).dec_slice_packed_int32 - case reflect.Uint32: - if p.Packed { - p.enc = (*Buffer).enc_slice_packed_uint32 - p.size = size_slice_packed_uint32 - } else { - p.enc = (*Buffer).enc_slice_uint32 - p.size = size_slice_uint32 - } - p.dec = (*Buffer).dec_slice_int32 - p.packedDec = (*Buffer).dec_slice_packed_int32 - case reflect.Int64, reflect.Uint64: - if p.Packed { - p.enc = (*Buffer).enc_slice_packed_int64 - p.size = size_slice_packed_int64 - } else { - p.enc = (*Buffer).enc_slice_int64 - p.size = size_slice_int64 - } - p.dec = (*Buffer).dec_slice_int64 - p.packedDec = (*Buffer).dec_slice_packed_int64 - case reflect.Uint8: - p.enc = (*Buffer).enc_slice_byte - p.dec = (*Buffer).dec_slice_byte - p.size = size_slice_byte - // This is a []byte, which is either a bytes field, - // or the value of a map field. In the latter case, - // we always encode an empty []byte, so we should not - // use the proto3 enc/size funcs. - // f == nil iff this is the key/value of a map field. - if p.proto3 && f != nil { - p.enc = (*Buffer).enc_proto3_slice_byte - p.size = size_proto3_slice_byte - } - case reflect.Float32, reflect.Float64: - switch t2.Bits() { - case 32: - // can just treat them as bits - if p.Packed { - p.enc = (*Buffer).enc_slice_packed_uint32 - p.size = size_slice_packed_uint32 - } else { - p.enc = (*Buffer).enc_slice_uint32 - p.size = size_slice_uint32 - } - p.dec = (*Buffer).dec_slice_int32 - p.packedDec = (*Buffer).dec_slice_packed_int32 - case 64: - // can just treat them as bits - if p.Packed { - p.enc = (*Buffer).enc_slice_packed_int64 - p.size = size_slice_packed_int64 - } else { - p.enc = (*Buffer).enc_slice_int64 - p.size = size_slice_int64 - } - p.dec = (*Buffer).dec_slice_int64 - p.packedDec = (*Buffer).dec_slice_packed_int64 - default: - logNoSliceEnc(t1, t2) - break - } - case reflect.String: - p.enc = (*Buffer).enc_slice_string - p.dec = (*Buffer).dec_slice_string - p.size = size_slice_string - case reflect.Ptr: - switch t3 := t2.Elem(); t3.Kind() { - default: - fmt.Fprintf(os.Stderr, "proto: no ptr oenc for %T -> %T -> %T\n", t1, t2, t3) - break - case reflect.Struct: - p.stype = t2.Elem() - p.isMarshaler = isMarshaler(t2) - p.isUnmarshaler = isUnmarshaler(t2) - if p.Wire == "bytes" { - p.enc = (*Buffer).enc_slice_struct_message - p.dec = (*Buffer).dec_slice_struct_message - p.size = size_slice_struct_message - } else { - p.enc = (*Buffer).enc_slice_struct_group - p.dec = (*Buffer).dec_slice_struct_group - p.size = size_slice_struct_group - } - } - case reflect.Slice: - switch t2.Elem().Kind() { - default: - fmt.Fprintf(os.Stderr, "proto: no slice elem oenc for %T -> %T -> %T\n", t1, t2, t2.Elem()) - break - case reflect.Uint8: - p.enc = (*Buffer).enc_slice_slice_byte - p.dec = (*Buffer).dec_slice_slice_byte - p.size = size_slice_slice_byte - } - } - - case reflect.Map: - p.enc = (*Buffer).enc_new_map - p.dec = (*Buffer).dec_new_map - p.size = size_new_map - - p.mtype = t1 - p.mkeyprop = &Properties{} - p.mkeyprop.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp) - p.mvalprop = &Properties{} - vtype := p.mtype.Elem() - if vtype.Kind() != reflect.Ptr && vtype.Kind() != reflect.Slice { - // The value type is not a message (*T) or bytes ([]byte), - // so we need encoders for the pointer to this type. - vtype = reflect.PtrTo(vtype) - } - p.mvalprop.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp) - } - - // precalculate tag code - wire := p.WireType - if p.Packed { - wire = WireBytes - } - x := uint32(p.Tag)<<3 | uint32(wire) - i := 0 - for i = 0; x > 127; i++ { - p.tagbuf[i] = 0x80 | uint8(x&0x7F) - x >>= 7 - } - p.tagbuf[i] = uint8(x) - p.tagcode = p.tagbuf[0 : i+1] - - if p.stype != nil { - if lockGetProp { - p.sprop = GetProperties(p.stype) - } else { - p.sprop = getPropertiesLocked(p.stype) - } - } -} - -var ( - marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem() - unmarshalerType = reflect.TypeOf((*Unmarshaler)(nil)).Elem() -) - -// isMarshaler reports whether type t implements Marshaler. -func isMarshaler(t reflect.Type) bool { - // We're checking for (likely) pointer-receiver methods - // so if t is not a pointer, something is very wrong. - // The calls above only invoke isMarshaler on pointer types. - if t.Kind() != reflect.Ptr { - panic("proto: misuse of isMarshaler") - } - return t.Implements(marshalerType) -} - -// isUnmarshaler reports whether type t implements Unmarshaler. -func isUnmarshaler(t reflect.Type) bool { - // We're checking for (likely) pointer-receiver methods - // so if t is not a pointer, something is very wrong. - // The calls above only invoke isUnmarshaler on pointer types. - if t.Kind() != reflect.Ptr { - panic("proto: misuse of isUnmarshaler") - } - return t.Implements(unmarshalerType) -} - -// Init populates the properties from a protocol buffer struct tag. -func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) { - p.init(typ, name, tag, f, true) -} - -func (p *Properties) init(typ reflect.Type, name, tag string, f *reflect.StructField, lockGetProp bool) { - // "bytes,49,opt,def=hello!" - p.Name = name - p.OrigName = name - if f != nil { - p.field = toField(f) - } - if tag == "" { - return - } - p.Parse(tag) - p.setEncAndDec(typ, f, lockGetProp) -} - -var ( - propertiesMu sync.RWMutex - propertiesMap = make(map[reflect.Type]*StructProperties) -) - -// GetProperties returns the list of properties for the type represented by t. -// t must represent a generated struct type of a protocol message. -func GetProperties(t reflect.Type) *StructProperties { - if t.Kind() != reflect.Struct { - panic("proto: type must have kind struct") - } - - // Most calls to GetProperties in a long-running program will be - // retrieving details for types we have seen before. - propertiesMu.RLock() - sprop, ok := propertiesMap[t] - propertiesMu.RUnlock() - if ok { - if collectStats { - stats.Chit++ - } - return sprop - } - - propertiesMu.Lock() - sprop = getPropertiesLocked(t) - propertiesMu.Unlock() - return sprop -} - -// getPropertiesLocked requires that propertiesMu is held. -func getPropertiesLocked(t reflect.Type) *StructProperties { - if prop, ok := propertiesMap[t]; ok { - if collectStats { - stats.Chit++ - } - return prop - } - if collectStats { - stats.Cmiss++ - } - - prop := new(StructProperties) - // in case of recursive protos, fill this in now. - propertiesMap[t] = prop - - // build properties - prop.extendable = reflect.PtrTo(t).Implements(extendableProtoType) - prop.unrecField = invalidField - prop.Prop = make([]*Properties, t.NumField()) - prop.order = make([]int, t.NumField()) - - for i := 0; i < t.NumField(); i++ { - f := t.Field(i) - p := new(Properties) - name := f.Name - p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false) - - if f.Name == "XXX_extensions" { // special case - p.enc = (*Buffer).enc_map - p.dec = nil // not needed - p.size = size_map - } - if f.Name == "XXX_unrecognized" { // special case - prop.unrecField = toField(&f) - } - prop.Prop[i] = p - prop.order[i] = i - if debug { - print(i, " ", f.Name, " ", t.String(), " ") - if p.Tag > 0 { - print(p.String()) - } - print("\n") - } - if p.enc == nil && !strings.HasPrefix(f.Name, "XXX_") { - fmt.Fprintln(os.Stderr, "proto: no encoder for", f.Name, f.Type.String(), "[GetProperties]") - } - } - - // Re-order prop.order. - sort.Sort(prop) - - // build required counts - // build tags - reqCount := 0 - prop.decoderOrigNames = make(map[string]int) - for i, p := range prop.Prop { - if strings.HasPrefix(p.Name, "XXX_") { - // Internal fields should not appear in tags/origNames maps. - // They are handled specially when encoding and decoding. - continue - } - if p.Required { - reqCount++ - } - prop.decoderTags.put(p.Tag, i) - prop.decoderOrigNames[p.OrigName] = i - } - prop.reqCount = reqCount - - return prop -} - -// Return the Properties object for the x[0]'th field of the structure. -func propByIndex(t reflect.Type, x []int) *Properties { - if len(x) != 1 { - fmt.Fprintf(os.Stderr, "proto: field index dimension %d (not 1) for type %s\n", len(x), t) - return nil - } - prop := GetProperties(t) - return prop.Prop[x[0]] -} - -// Get the address and type of a pointer to a struct from an interface. -func getbase(pb Message) (t reflect.Type, b structPointer, err error) { - if pb == nil { - err = ErrNil - return - } - // get the reflect type of the pointer to the struct. - t = reflect.TypeOf(pb) - // get the address of the struct. - value := reflect.ValueOf(pb) - b = toStructPointer(value) - return -} - -// A global registry of enum types. -// The generated code will register the generated maps by calling RegisterEnum. - -var enumValueMaps = make(map[string]map[string]int32) - -// RegisterEnum is called from the generated code to install the enum descriptor -// maps into the global table to aid parsing text format protocol buffers. -func RegisterEnum(typeName string, unusedNameMap map[int32]string, valueMap map[string]int32) { - if _, ok := enumValueMaps[typeName]; ok { - panic("proto: duplicate enum registered: " + typeName) - } - enumValueMaps[typeName] = valueMap -} diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/proto3_proto/proto3.pb.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/proto3_proto/proto3.pb.go deleted file mode 100644 index 37c7782092..0000000000 --- a/Godeps/_workspace/src/github.com/golang/protobuf/proto/proto3_proto/proto3.pb.go +++ /dev/null @@ -1,122 +0,0 @@ -// Code generated by protoc-gen-go. -// source: proto3_proto/proto3.proto -// DO NOT EDIT! - -/* -Package proto3_proto is a generated protocol buffer package. - -It is generated from these files: - proto3_proto/proto3.proto - -It has these top-level messages: - Message - Nested - MessageWithMap -*/ -package proto3_proto - -import proto "github.com/golang/protobuf/proto" -import testdata "github.com/golang/protobuf/proto/testdata" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal - -type Message_Humour int32 - -const ( - Message_UNKNOWN Message_Humour = 0 - Message_PUNS Message_Humour = 1 - Message_SLAPSTICK Message_Humour = 2 - Message_BILL_BAILEY Message_Humour = 3 -) - -var Message_Humour_name = map[int32]string{ - 0: "UNKNOWN", - 1: "PUNS", - 2: "SLAPSTICK", - 3: "BILL_BAILEY", -} -var Message_Humour_value = map[string]int32{ - "UNKNOWN": 0, - "PUNS": 1, - "SLAPSTICK": 2, - "BILL_BAILEY": 3, -} - -func (x Message_Humour) String() string { - return proto.EnumName(Message_Humour_name, int32(x)) -} - -type Message struct { - Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Hilarity Message_Humour `protobuf:"varint,2,opt,name=hilarity,enum=proto3_proto.Message_Humour" json:"hilarity,omitempty"` - HeightInCm uint32 `protobuf:"varint,3,opt,name=height_in_cm" json:"height_in_cm,omitempty"` - Data []byte `protobuf:"bytes,4,opt,name=data,proto3" json:"data,omitempty"` - ResultCount int64 `protobuf:"varint,7,opt,name=result_count" json:"result_count,omitempty"` - TrueScotsman bool `protobuf:"varint,8,opt,name=true_scotsman" json:"true_scotsman,omitempty"` - Score float32 `protobuf:"fixed32,9,opt,name=score" json:"score,omitempty"` - Key []uint64 `protobuf:"varint,5,rep,name=key" json:"key,omitempty"` - Nested *Nested `protobuf:"bytes,6,opt,name=nested" json:"nested,omitempty"` - Terrain map[string]*Nested `protobuf:"bytes,10,rep,name=terrain" json:"terrain,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - Proto2Field *testdata.SubDefaults `protobuf:"bytes,11,opt,name=proto2_field" json:"proto2_field,omitempty"` - Proto2Value map[string]*testdata.SubDefaults `protobuf:"bytes,13,rep,name=proto2_value" json:"proto2_value,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` -} - -func (m *Message) Reset() { *m = Message{} } -func (m *Message) String() string { return proto.CompactTextString(m) } -func (*Message) ProtoMessage() {} - -func (m *Message) GetNested() *Nested { - if m != nil { - return m.Nested - } - return nil -} - -func (m *Message) GetTerrain() map[string]*Nested { - if m != nil { - return m.Terrain - } - return nil -} - -func (m *Message) GetProto2Field() *testdata.SubDefaults { - if m != nil { - return m.Proto2Field - } - return nil -} - -func (m *Message) GetProto2Value() map[string]*testdata.SubDefaults { - if m != nil { - return m.Proto2Value - } - return nil -} - -type Nested struct { - Bunny string `protobuf:"bytes,1,opt,name=bunny" json:"bunny,omitempty"` -} - -func (m *Nested) Reset() { *m = Nested{} } -func (m *Nested) String() string { return proto.CompactTextString(m) } -func (*Nested) ProtoMessage() {} - -type MessageWithMap struct { - ByteMapping map[bool][]byte `protobuf:"bytes,1,rep,name=byte_mapping" json:"byte_mapping,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value,proto3"` -} - -func (m *MessageWithMap) Reset() { *m = MessageWithMap{} } -func (m *MessageWithMap) String() string { return proto.CompactTextString(m) } -func (*MessageWithMap) ProtoMessage() {} - -func (m *MessageWithMap) GetByteMapping() map[bool][]byte { - if m != nil { - return m.ByteMapping - } - return nil -} - -func init() { - proto.RegisterEnum("proto3_proto.Message_Humour", Message_Humour_name, Message_Humour_value) -} diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/proto3_proto/proto3.proto b/Godeps/_workspace/src/github.com/golang/protobuf/proto/proto3_proto/proto3.proto deleted file mode 100644 index e2311d9294..0000000000 --- a/Godeps/_workspace/src/github.com/golang/protobuf/proto/proto3_proto/proto3.proto +++ /dev/null @@ -1,68 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2014 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -syntax = "proto3"; - -import "testdata/test.proto"; - -package proto3_proto; - -message Message { - enum Humour { - UNKNOWN = 0; - PUNS = 1; - SLAPSTICK = 2; - BILL_BAILEY = 3; - } - - string name = 1; - Humour hilarity = 2; - uint32 height_in_cm = 3; - bytes data = 4; - int64 result_count = 7; - bool true_scotsman = 8; - float score = 9; - - repeated uint64 key = 5; - Nested nested = 6; - - map terrain = 10; - testdata.SubDefaults proto2_field = 11; - map proto2_value = 13; -} - -message Nested { - string bunny = 1; -} - -message MessageWithMap { - map byte_mapping = 1; -} diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/proto3_test.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/proto3_test.go deleted file mode 100644 index 462f8055c3..0000000000 --- a/Godeps/_workspace/src/github.com/golang/protobuf/proto/proto3_test.go +++ /dev/null @@ -1,125 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2014 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto_test - -import ( - "testing" - - "github.com/golang/protobuf/proto" - pb "github.com/golang/protobuf/proto/proto3_proto" - tpb "github.com/golang/protobuf/proto/testdata" -) - -func TestProto3ZeroValues(t *testing.T) { - tests := []struct { - desc string - m proto.Message - }{ - {"zero message", &pb.Message{}}, - {"empty bytes field", &pb.Message{Data: []byte{}}}, - } - for _, test := range tests { - b, err := proto.Marshal(test.m) - if err != nil { - t.Errorf("%s: proto.Marshal: %v", test.desc, err) - continue - } - if len(b) > 0 { - t.Errorf("%s: Encoding is non-empty: %q", test.desc, b) - } - } -} - -func TestRoundTripProto3(t *testing.T) { - m := &pb.Message{ - Name: "David", // (2 | 1<<3): 0x0a 0x05 "David" - Hilarity: pb.Message_PUNS, // (0 | 2<<3): 0x10 0x01 - HeightInCm: 178, // (0 | 3<<3): 0x18 0xb2 0x01 - Data: []byte("roboto"), // (2 | 4<<3): 0x20 0x06 "roboto" - ResultCount: 47, // (0 | 7<<3): 0x38 0x2f - TrueScotsman: true, // (0 | 8<<3): 0x40 0x01 - Score: 8.1, // (5 | 9<<3): 0x4d <8.1> - - Key: []uint64{1, 0xdeadbeef}, - Nested: &pb.Nested{ - Bunny: "Monty", - }, - } - t.Logf(" m: %v", m) - - b, err := proto.Marshal(m) - if err != nil { - t.Fatalf("proto.Marshal: %v", err) - } - t.Logf(" b: %q", b) - - m2 := new(pb.Message) - if err := proto.Unmarshal(b, m2); err != nil { - t.Fatalf("proto.Unmarshal: %v", err) - } - t.Logf("m2: %v", m2) - - if !proto.Equal(m, m2) { - t.Errorf("proto.Equal returned false:\n m: %v\nm2: %v", m, m2) - } -} - -func TestProto3SetDefaults(t *testing.T) { - in := &pb.Message{ - Terrain: map[string]*pb.Nested{ - "meadow": new(pb.Nested), - }, - Proto2Field: new(tpb.SubDefaults), - Proto2Value: map[string]*tpb.SubDefaults{ - "badlands": new(tpb.SubDefaults), - }, - } - - got := proto.Clone(in).(*pb.Message) - proto.SetDefaults(got) - - // There are no defaults in proto3. Everything should be the zero value, but - // we need to remember to set defaults for nested proto2 messages. - want := &pb.Message{ - Terrain: map[string]*pb.Nested{ - "meadow": new(pb.Nested), - }, - Proto2Field: &tpb.SubDefaults{N: proto.Int64(7)}, - Proto2Value: map[string]*tpb.SubDefaults{ - "badlands": &tpb.SubDefaults{N: proto.Int64(7)}, - }, - } - - if !proto.Equal(got, want) { - t.Errorf("with in = %v\nproto.SetDefaults(in) =>\ngot %v\nwant %v", in, got, want) - } -} diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/size2_test.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/size2_test.go deleted file mode 100644 index a2729c39a1..0000000000 --- a/Godeps/_workspace/src/github.com/golang/protobuf/proto/size2_test.go +++ /dev/null @@ -1,63 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2012 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "testing" -) - -// This is a separate file and package from size_test.go because that one uses -// generated messages and thus may not be in package proto without having a circular -// dependency, whereas this file tests unexported details of size.go. - -func TestVarintSize(t *testing.T) { - // Check the edge cases carefully. - testCases := []struct { - n uint64 - size int - }{ - {0, 1}, - {1, 1}, - {127, 1}, - {128, 2}, - {16383, 2}, - {16384, 3}, - {1<<63 - 1, 9}, - {1 << 63, 10}, - } - for _, tc := range testCases { - size := sizeVarint(tc.n) - if size != tc.size { - t.Errorf("sizeVarint(%d) = %d, want %d", tc.n, size, tc.size) - } - } -} diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/size_test.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/size_test.go deleted file mode 100644 index db5614fd1d..0000000000 --- a/Godeps/_workspace/src/github.com/golang/protobuf/proto/size_test.go +++ /dev/null @@ -1,142 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2012 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto_test - -import ( - "log" - "strings" - "testing" - - . "github.com/golang/protobuf/proto" - proto3pb "github.com/golang/protobuf/proto/proto3_proto" - pb "github.com/golang/protobuf/proto/testdata" -) - -var messageWithExtension1 = &pb.MyMessage{Count: Int32(7)} - -// messageWithExtension2 is in equal_test.go. -var messageWithExtension3 = &pb.MyMessage{Count: Int32(8)} - -func init() { - if err := SetExtension(messageWithExtension1, pb.E_Ext_More, &pb.Ext{Data: String("Abbott")}); err != nil { - log.Panicf("SetExtension: %v", err) - } - if err := SetExtension(messageWithExtension3, pb.E_Ext_More, &pb.Ext{Data: String("Costello")}); err != nil { - log.Panicf("SetExtension: %v", err) - } - - // Force messageWithExtension3 to have the extension encoded. - Marshal(messageWithExtension3) - -} - -var SizeTests = []struct { - desc string - pb Message -}{ - {"empty", &pb.OtherMessage{}}, - // Basic types. - {"bool", &pb.Defaults{F_Bool: Bool(true)}}, - {"int32", &pb.Defaults{F_Int32: Int32(12)}}, - {"negative int32", &pb.Defaults{F_Int32: Int32(-1)}}, - {"small int64", &pb.Defaults{F_Int64: Int64(1)}}, - {"big int64", &pb.Defaults{F_Int64: Int64(1 << 20)}}, - {"negative int64", &pb.Defaults{F_Int64: Int64(-1)}}, - {"fixed32", &pb.Defaults{F_Fixed32: Uint32(71)}}, - {"fixed64", &pb.Defaults{F_Fixed64: Uint64(72)}}, - {"uint32", &pb.Defaults{F_Uint32: Uint32(123)}}, - {"uint64", &pb.Defaults{F_Uint64: Uint64(124)}}, - {"float", &pb.Defaults{F_Float: Float32(12.6)}}, - {"double", &pb.Defaults{F_Double: Float64(13.9)}}, - {"string", &pb.Defaults{F_String: String("niles")}}, - {"bytes", &pb.Defaults{F_Bytes: []byte("wowsa")}}, - {"bytes, empty", &pb.Defaults{F_Bytes: []byte{}}}, - {"sint32", &pb.Defaults{F_Sint32: Int32(65)}}, - {"sint64", &pb.Defaults{F_Sint64: Int64(67)}}, - {"enum", &pb.Defaults{F_Enum: pb.Defaults_BLUE.Enum()}}, - // Repeated. - {"empty repeated bool", &pb.MoreRepeated{Bools: []bool{}}}, - {"repeated bool", &pb.MoreRepeated{Bools: []bool{false, true, true, false}}}, - {"packed repeated bool", &pb.MoreRepeated{BoolsPacked: []bool{false, true, true, false, true, true, true}}}, - {"repeated int32", &pb.MoreRepeated{Ints: []int32{1, 12203, 1729, -1}}}, - {"repeated int32 packed", &pb.MoreRepeated{IntsPacked: []int32{1, 12203, 1729}}}, - {"repeated int64 packed", &pb.MoreRepeated{Int64SPacked: []int64{ - // Need enough large numbers to verify that the header is counting the number of bytes - // for the field, not the number of elements. - 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, - 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, - }}}, - {"repeated string", &pb.MoreRepeated{Strings: []string{"r", "ken", "gri"}}}, - {"repeated fixed", &pb.MoreRepeated{Fixeds: []uint32{1, 2, 3, 4}}}, - // Nested. - {"nested", &pb.OldMessage{Nested: &pb.OldMessage_Nested{Name: String("whatever")}}}, - {"group", &pb.GroupOld{G: &pb.GroupOld_G{X: Int32(12345)}}}, - // Other things. - {"unrecognized", &pb.MoreRepeated{XXX_unrecognized: []byte{13<<3 | 0, 4}}}, - {"extension (unencoded)", messageWithExtension1}, - {"extension (encoded)", messageWithExtension3}, - // proto3 message - {"proto3 empty", &proto3pb.Message{}}, - {"proto3 bool", &proto3pb.Message{TrueScotsman: true}}, - {"proto3 int64", &proto3pb.Message{ResultCount: 1}}, - {"proto3 uint32", &proto3pb.Message{HeightInCm: 123}}, - {"proto3 float", &proto3pb.Message{Score: 12.6}}, - {"proto3 string", &proto3pb.Message{Name: "Snezana"}}, - {"proto3 bytes", &proto3pb.Message{Data: []byte("wowsa")}}, - {"proto3 bytes, empty", &proto3pb.Message{Data: []byte{}}}, - {"proto3 enum", &proto3pb.Message{Hilarity: proto3pb.Message_PUNS}}, - {"proto3 map field with empty bytes", &proto3pb.MessageWithMap{ByteMapping: map[bool][]byte{false: []byte{}}}}, - - {"map field", &pb.MessageWithMap{NameMapping: map[int32]string{1: "Rob", 7: "Andrew"}}}, - {"map field with message", &pb.MessageWithMap{MsgMapping: map[int64]*pb.FloatingPoint{0x7001: &pb.FloatingPoint{F: Float64(2.0)}}}}, - {"map field with bytes", &pb.MessageWithMap{ByteMapping: map[bool][]byte{true: []byte("this time for sure")}}}, - {"map field with empty bytes", &pb.MessageWithMap{ByteMapping: map[bool][]byte{true: []byte{}}}}, - - {"map field with big entry", &pb.MessageWithMap{NameMapping: map[int32]string{8: strings.Repeat("x", 125)}}}, - {"map field with big key and val", &pb.MessageWithMap{StrToStr: map[string]string{strings.Repeat("x", 70): strings.Repeat("y", 70)}}}, - {"map field with big numeric key", &pb.MessageWithMap{NameMapping: map[int32]string{0xf00d: "om nom nom"}}}, -} - -func TestSize(t *testing.T) { - for _, tc := range SizeTests { - size := Size(tc.pb) - b, err := Marshal(tc.pb) - if err != nil { - t.Errorf("%v: Marshal failed: %v", tc.desc, err) - continue - } - if size != len(b) { - t.Errorf("%v: Size(%v) = %d, want %d", tc.desc, tc.pb, size, len(b)) - t.Logf("%v: bytes: %#v", tc.desc, b) - } - } -} diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/testdata/Makefile b/Godeps/_workspace/src/github.com/golang/protobuf/proto/testdata/Makefile deleted file mode 100644 index fc288628a7..0000000000 --- a/Godeps/_workspace/src/github.com/golang/protobuf/proto/testdata/Makefile +++ /dev/null @@ -1,50 +0,0 @@ -# Go support for Protocol Buffers - Google's data interchange format -# -# Copyright 2010 The Go Authors. All rights reserved. -# https://github.com/golang/protobuf -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - -include ../../Make.protobuf - -all: regenerate - -regenerate: - rm -f test.pb.go - make test.pb.go - -# The following rules are just aids to development. Not needed for typical testing. - -diff: regenerate - git diff test.pb.go - -restore: - cp test.pb.go.golden test.pb.go - -preserve: - cp test.pb.go test.pb.go.golden diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/testdata/golden_test.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/testdata/golden_test.go deleted file mode 100644 index 7172d0e969..0000000000 --- a/Godeps/_workspace/src/github.com/golang/protobuf/proto/testdata/golden_test.go +++ /dev/null @@ -1,86 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2012 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// Verify that the compiler output for test.proto is unchanged. - -package testdata - -import ( - "crypto/sha1" - "fmt" - "io/ioutil" - "os" - "os/exec" - "path/filepath" - "testing" -) - -// sum returns in string form (for easy comparison) the SHA-1 hash of the named file. -func sum(t *testing.T, name string) string { - data, err := ioutil.ReadFile(name) - if err != nil { - t.Fatal(err) - } - t.Logf("sum(%q): length is %d", name, len(data)) - hash := sha1.New() - _, err = hash.Write(data) - if err != nil { - t.Fatal(err) - } - return fmt.Sprintf("% x", hash.Sum(nil)) -} - -func run(t *testing.T, name string, args ...string) { - cmd := exec.Command(name, args...) - cmd.Stdin = os.Stdin - cmd.Stdout = os.Stdout - cmd.Stderr = os.Stderr - err := cmd.Run() - if err != nil { - t.Fatal(err) - } -} - -func TestGolden(t *testing.T) { - // Compute the original checksum. - goldenSum := sum(t, "test.pb.go") - // Run the proto compiler. - run(t, "protoc", "--go_out="+os.TempDir(), "test.proto") - newFile := filepath.Join(os.TempDir(), "test.pb.go") - defer os.Remove(newFile) - // Compute the new checksum. - newSum := sum(t, newFile) - // Verify - if newSum != goldenSum { - run(t, "diff", "-u", "test.pb.go", newFile) - t.Fatal("Code generated by protoc-gen-go has changed; update test.pb.go") - } -} diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/testdata/test.pb.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/testdata/test.pb.go deleted file mode 100644 index 13674a4491..0000000000 --- a/Godeps/_workspace/src/github.com/golang/protobuf/proto/testdata/test.pb.go +++ /dev/null @@ -1,2746 +0,0 @@ -// Code generated by protoc-gen-go. -// source: test.proto -// DO NOT EDIT! - -/* -Package testdata is a generated protocol buffer package. - -It is generated from these files: - test.proto - -It has these top-level messages: - GoEnum - GoTestField - GoTest - GoSkipTest - NonPackedTest - PackedTest - MaxTag - OldMessage - NewMessage - InnerMessage - OtherMessage - MyMessage - Ext - DefaultsMessage - MyMessageSet - Empty - MessageList - Strings - Defaults - SubDefaults - RepeatedEnum - MoreRepeated - GroupOld - GroupNew - FloatingPoint - MessageWithMap -*/ -package testdata - -import proto "github.com/golang/protobuf/proto" -import math "math" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = math.Inf - -type FOO int32 - -const ( - FOO_FOO1 FOO = 1 -) - -var FOO_name = map[int32]string{ - 1: "FOO1", -} -var FOO_value = map[string]int32{ - "FOO1": 1, -} - -func (x FOO) Enum() *FOO { - p := new(FOO) - *p = x - return p -} -func (x FOO) String() string { - return proto.EnumName(FOO_name, int32(x)) -} -func (x *FOO) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(FOO_value, data, "FOO") - if err != nil { - return err - } - *x = FOO(value) - return nil -} - -// An enum, for completeness. -type GoTest_KIND int32 - -const ( - GoTest_VOID GoTest_KIND = 0 - // Basic types - GoTest_BOOL GoTest_KIND = 1 - GoTest_BYTES GoTest_KIND = 2 - GoTest_FINGERPRINT GoTest_KIND = 3 - GoTest_FLOAT GoTest_KIND = 4 - GoTest_INT GoTest_KIND = 5 - GoTest_STRING GoTest_KIND = 6 - GoTest_TIME GoTest_KIND = 7 - // Groupings - GoTest_TUPLE GoTest_KIND = 8 - GoTest_ARRAY GoTest_KIND = 9 - GoTest_MAP GoTest_KIND = 10 - // Table types - GoTest_TABLE GoTest_KIND = 11 - // Functions - GoTest_FUNCTION GoTest_KIND = 12 -) - -var GoTest_KIND_name = map[int32]string{ - 0: "VOID", - 1: "BOOL", - 2: "BYTES", - 3: "FINGERPRINT", - 4: "FLOAT", - 5: "INT", - 6: "STRING", - 7: "TIME", - 8: "TUPLE", - 9: "ARRAY", - 10: "MAP", - 11: "TABLE", - 12: "FUNCTION", -} -var GoTest_KIND_value = map[string]int32{ - "VOID": 0, - "BOOL": 1, - "BYTES": 2, - "FINGERPRINT": 3, - "FLOAT": 4, - "INT": 5, - "STRING": 6, - "TIME": 7, - "TUPLE": 8, - "ARRAY": 9, - "MAP": 10, - "TABLE": 11, - "FUNCTION": 12, -} - -func (x GoTest_KIND) Enum() *GoTest_KIND { - p := new(GoTest_KIND) - *p = x - return p -} -func (x GoTest_KIND) String() string { - return proto.EnumName(GoTest_KIND_name, int32(x)) -} -func (x *GoTest_KIND) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(GoTest_KIND_value, data, "GoTest_KIND") - if err != nil { - return err - } - *x = GoTest_KIND(value) - return nil -} - -type MyMessage_Color int32 - -const ( - MyMessage_RED MyMessage_Color = 0 - MyMessage_GREEN MyMessage_Color = 1 - MyMessage_BLUE MyMessage_Color = 2 -) - -var MyMessage_Color_name = map[int32]string{ - 0: "RED", - 1: "GREEN", - 2: "BLUE", -} -var MyMessage_Color_value = map[string]int32{ - "RED": 0, - "GREEN": 1, - "BLUE": 2, -} - -func (x MyMessage_Color) Enum() *MyMessage_Color { - p := new(MyMessage_Color) - *p = x - return p -} -func (x MyMessage_Color) String() string { - return proto.EnumName(MyMessage_Color_name, int32(x)) -} -func (x *MyMessage_Color) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(MyMessage_Color_value, data, "MyMessage_Color") - if err != nil { - return err - } - *x = MyMessage_Color(value) - return nil -} - -type DefaultsMessage_DefaultsEnum int32 - -const ( - DefaultsMessage_ZERO DefaultsMessage_DefaultsEnum = 0 - DefaultsMessage_ONE DefaultsMessage_DefaultsEnum = 1 - DefaultsMessage_TWO DefaultsMessage_DefaultsEnum = 2 -) - -var DefaultsMessage_DefaultsEnum_name = map[int32]string{ - 0: "ZERO", - 1: "ONE", - 2: "TWO", -} -var DefaultsMessage_DefaultsEnum_value = map[string]int32{ - "ZERO": 0, - "ONE": 1, - "TWO": 2, -} - -func (x DefaultsMessage_DefaultsEnum) Enum() *DefaultsMessage_DefaultsEnum { - p := new(DefaultsMessage_DefaultsEnum) - *p = x - return p -} -func (x DefaultsMessage_DefaultsEnum) String() string { - return proto.EnumName(DefaultsMessage_DefaultsEnum_name, int32(x)) -} -func (x *DefaultsMessage_DefaultsEnum) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(DefaultsMessage_DefaultsEnum_value, data, "DefaultsMessage_DefaultsEnum") - if err != nil { - return err - } - *x = DefaultsMessage_DefaultsEnum(value) - return nil -} - -type Defaults_Color int32 - -const ( - Defaults_RED Defaults_Color = 0 - Defaults_GREEN Defaults_Color = 1 - Defaults_BLUE Defaults_Color = 2 -) - -var Defaults_Color_name = map[int32]string{ - 0: "RED", - 1: "GREEN", - 2: "BLUE", -} -var Defaults_Color_value = map[string]int32{ - "RED": 0, - "GREEN": 1, - "BLUE": 2, -} - -func (x Defaults_Color) Enum() *Defaults_Color { - p := new(Defaults_Color) - *p = x - return p -} -func (x Defaults_Color) String() string { - return proto.EnumName(Defaults_Color_name, int32(x)) -} -func (x *Defaults_Color) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(Defaults_Color_value, data, "Defaults_Color") - if err != nil { - return err - } - *x = Defaults_Color(value) - return nil -} - -type RepeatedEnum_Color int32 - -const ( - RepeatedEnum_RED RepeatedEnum_Color = 1 -) - -var RepeatedEnum_Color_name = map[int32]string{ - 1: "RED", -} -var RepeatedEnum_Color_value = map[string]int32{ - "RED": 1, -} - -func (x RepeatedEnum_Color) Enum() *RepeatedEnum_Color { - p := new(RepeatedEnum_Color) - *p = x - return p -} -func (x RepeatedEnum_Color) String() string { - return proto.EnumName(RepeatedEnum_Color_name, int32(x)) -} -func (x *RepeatedEnum_Color) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(RepeatedEnum_Color_value, data, "RepeatedEnum_Color") - if err != nil { - return err - } - *x = RepeatedEnum_Color(value) - return nil -} - -type GoEnum struct { - Foo *FOO `protobuf:"varint,1,req,name=foo,enum=testdata.FOO" json:"foo,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GoEnum) Reset() { *m = GoEnum{} } -func (m *GoEnum) String() string { return proto.CompactTextString(m) } -func (*GoEnum) ProtoMessage() {} - -func (m *GoEnum) GetFoo() FOO { - if m != nil && m.Foo != nil { - return *m.Foo - } - return FOO_FOO1 -} - -type GoTestField struct { - Label *string `protobuf:"bytes,1,req" json:"Label,omitempty"` - Type *string `protobuf:"bytes,2,req" json:"Type,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GoTestField) Reset() { *m = GoTestField{} } -func (m *GoTestField) String() string { return proto.CompactTextString(m) } -func (*GoTestField) ProtoMessage() {} - -func (m *GoTestField) GetLabel() string { - if m != nil && m.Label != nil { - return *m.Label - } - return "" -} - -func (m *GoTestField) GetType() string { - if m != nil && m.Type != nil { - return *m.Type - } - return "" -} - -type GoTest struct { - // Some typical parameters - Kind *GoTest_KIND `protobuf:"varint,1,req,enum=testdata.GoTest_KIND" json:"Kind,omitempty"` - Table *string `protobuf:"bytes,2,opt" json:"Table,omitempty"` - Param *int32 `protobuf:"varint,3,opt" json:"Param,omitempty"` - // Required, repeated and optional foreign fields. - RequiredField *GoTestField `protobuf:"bytes,4,req" json:"RequiredField,omitempty"` - RepeatedField []*GoTestField `protobuf:"bytes,5,rep" json:"RepeatedField,omitempty"` - OptionalField *GoTestField `protobuf:"bytes,6,opt" json:"OptionalField,omitempty"` - // Required fields of all basic types - F_BoolRequired *bool `protobuf:"varint,10,req,name=F_Bool_required" json:"F_Bool_required,omitempty"` - F_Int32Required *int32 `protobuf:"varint,11,req,name=F_Int32_required" json:"F_Int32_required,omitempty"` - F_Int64Required *int64 `protobuf:"varint,12,req,name=F_Int64_required" json:"F_Int64_required,omitempty"` - F_Fixed32Required *uint32 `protobuf:"fixed32,13,req,name=F_Fixed32_required" json:"F_Fixed32_required,omitempty"` - F_Fixed64Required *uint64 `protobuf:"fixed64,14,req,name=F_Fixed64_required" json:"F_Fixed64_required,omitempty"` - F_Uint32Required *uint32 `protobuf:"varint,15,req,name=F_Uint32_required" json:"F_Uint32_required,omitempty"` - F_Uint64Required *uint64 `protobuf:"varint,16,req,name=F_Uint64_required" json:"F_Uint64_required,omitempty"` - F_FloatRequired *float32 `protobuf:"fixed32,17,req,name=F_Float_required" json:"F_Float_required,omitempty"` - F_DoubleRequired *float64 `protobuf:"fixed64,18,req,name=F_Double_required" json:"F_Double_required,omitempty"` - F_StringRequired *string `protobuf:"bytes,19,req,name=F_String_required" json:"F_String_required,omitempty"` - F_BytesRequired []byte `protobuf:"bytes,101,req,name=F_Bytes_required" json:"F_Bytes_required,omitempty"` - F_Sint32Required *int32 `protobuf:"zigzag32,102,req,name=F_Sint32_required" json:"F_Sint32_required,omitempty"` - F_Sint64Required *int64 `protobuf:"zigzag64,103,req,name=F_Sint64_required" json:"F_Sint64_required,omitempty"` - // Repeated fields of all basic types - F_BoolRepeated []bool `protobuf:"varint,20,rep,name=F_Bool_repeated" json:"F_Bool_repeated,omitempty"` - F_Int32Repeated []int32 `protobuf:"varint,21,rep,name=F_Int32_repeated" json:"F_Int32_repeated,omitempty"` - F_Int64Repeated []int64 `protobuf:"varint,22,rep,name=F_Int64_repeated" json:"F_Int64_repeated,omitempty"` - F_Fixed32Repeated []uint32 `protobuf:"fixed32,23,rep,name=F_Fixed32_repeated" json:"F_Fixed32_repeated,omitempty"` - F_Fixed64Repeated []uint64 `protobuf:"fixed64,24,rep,name=F_Fixed64_repeated" json:"F_Fixed64_repeated,omitempty"` - F_Uint32Repeated []uint32 `protobuf:"varint,25,rep,name=F_Uint32_repeated" json:"F_Uint32_repeated,omitempty"` - F_Uint64Repeated []uint64 `protobuf:"varint,26,rep,name=F_Uint64_repeated" json:"F_Uint64_repeated,omitempty"` - F_FloatRepeated []float32 `protobuf:"fixed32,27,rep,name=F_Float_repeated" json:"F_Float_repeated,omitempty"` - F_DoubleRepeated []float64 `protobuf:"fixed64,28,rep,name=F_Double_repeated" json:"F_Double_repeated,omitempty"` - F_StringRepeated []string `protobuf:"bytes,29,rep,name=F_String_repeated" json:"F_String_repeated,omitempty"` - F_BytesRepeated [][]byte `protobuf:"bytes,201,rep,name=F_Bytes_repeated" json:"F_Bytes_repeated,omitempty"` - F_Sint32Repeated []int32 `protobuf:"zigzag32,202,rep,name=F_Sint32_repeated" json:"F_Sint32_repeated,omitempty"` - F_Sint64Repeated []int64 `protobuf:"zigzag64,203,rep,name=F_Sint64_repeated" json:"F_Sint64_repeated,omitempty"` - // Optional fields of all basic types - F_BoolOptional *bool `protobuf:"varint,30,opt,name=F_Bool_optional" json:"F_Bool_optional,omitempty"` - F_Int32Optional *int32 `protobuf:"varint,31,opt,name=F_Int32_optional" json:"F_Int32_optional,omitempty"` - F_Int64Optional *int64 `protobuf:"varint,32,opt,name=F_Int64_optional" json:"F_Int64_optional,omitempty"` - F_Fixed32Optional *uint32 `protobuf:"fixed32,33,opt,name=F_Fixed32_optional" json:"F_Fixed32_optional,omitempty"` - F_Fixed64Optional *uint64 `protobuf:"fixed64,34,opt,name=F_Fixed64_optional" json:"F_Fixed64_optional,omitempty"` - F_Uint32Optional *uint32 `protobuf:"varint,35,opt,name=F_Uint32_optional" json:"F_Uint32_optional,omitempty"` - F_Uint64Optional *uint64 `protobuf:"varint,36,opt,name=F_Uint64_optional" json:"F_Uint64_optional,omitempty"` - F_FloatOptional *float32 `protobuf:"fixed32,37,opt,name=F_Float_optional" json:"F_Float_optional,omitempty"` - F_DoubleOptional *float64 `protobuf:"fixed64,38,opt,name=F_Double_optional" json:"F_Double_optional,omitempty"` - F_StringOptional *string `protobuf:"bytes,39,opt,name=F_String_optional" json:"F_String_optional,omitempty"` - F_BytesOptional []byte `protobuf:"bytes,301,opt,name=F_Bytes_optional" json:"F_Bytes_optional,omitempty"` - F_Sint32Optional *int32 `protobuf:"zigzag32,302,opt,name=F_Sint32_optional" json:"F_Sint32_optional,omitempty"` - F_Sint64Optional *int64 `protobuf:"zigzag64,303,opt,name=F_Sint64_optional" json:"F_Sint64_optional,omitempty"` - // Default-valued fields of all basic types - F_BoolDefaulted *bool `protobuf:"varint,40,opt,name=F_Bool_defaulted,def=1" json:"F_Bool_defaulted,omitempty"` - F_Int32Defaulted *int32 `protobuf:"varint,41,opt,name=F_Int32_defaulted,def=32" json:"F_Int32_defaulted,omitempty"` - F_Int64Defaulted *int64 `protobuf:"varint,42,opt,name=F_Int64_defaulted,def=64" json:"F_Int64_defaulted,omitempty"` - F_Fixed32Defaulted *uint32 `protobuf:"fixed32,43,opt,name=F_Fixed32_defaulted,def=320" json:"F_Fixed32_defaulted,omitempty"` - F_Fixed64Defaulted *uint64 `protobuf:"fixed64,44,opt,name=F_Fixed64_defaulted,def=640" json:"F_Fixed64_defaulted,omitempty"` - F_Uint32Defaulted *uint32 `protobuf:"varint,45,opt,name=F_Uint32_defaulted,def=3200" json:"F_Uint32_defaulted,omitempty"` - F_Uint64Defaulted *uint64 `protobuf:"varint,46,opt,name=F_Uint64_defaulted,def=6400" json:"F_Uint64_defaulted,omitempty"` - F_FloatDefaulted *float32 `protobuf:"fixed32,47,opt,name=F_Float_defaulted,def=314159" json:"F_Float_defaulted,omitempty"` - F_DoubleDefaulted *float64 `protobuf:"fixed64,48,opt,name=F_Double_defaulted,def=271828" json:"F_Double_defaulted,omitempty"` - F_StringDefaulted *string `protobuf:"bytes,49,opt,name=F_String_defaulted,def=hello, \"world!\"\n" json:"F_String_defaulted,omitempty"` - F_BytesDefaulted []byte `protobuf:"bytes,401,opt,name=F_Bytes_defaulted,def=Bignose" json:"F_Bytes_defaulted,omitempty"` - F_Sint32Defaulted *int32 `protobuf:"zigzag32,402,opt,name=F_Sint32_defaulted,def=-32" json:"F_Sint32_defaulted,omitempty"` - F_Sint64Defaulted *int64 `protobuf:"zigzag64,403,opt,name=F_Sint64_defaulted,def=-64" json:"F_Sint64_defaulted,omitempty"` - // Packed repeated fields (no string or bytes). - F_BoolRepeatedPacked []bool `protobuf:"varint,50,rep,packed,name=F_Bool_repeated_packed" json:"F_Bool_repeated_packed,omitempty"` - F_Int32RepeatedPacked []int32 `protobuf:"varint,51,rep,packed,name=F_Int32_repeated_packed" json:"F_Int32_repeated_packed,omitempty"` - F_Int64RepeatedPacked []int64 `protobuf:"varint,52,rep,packed,name=F_Int64_repeated_packed" json:"F_Int64_repeated_packed,omitempty"` - F_Fixed32RepeatedPacked []uint32 `protobuf:"fixed32,53,rep,packed,name=F_Fixed32_repeated_packed" json:"F_Fixed32_repeated_packed,omitempty"` - F_Fixed64RepeatedPacked []uint64 `protobuf:"fixed64,54,rep,packed,name=F_Fixed64_repeated_packed" json:"F_Fixed64_repeated_packed,omitempty"` - F_Uint32RepeatedPacked []uint32 `protobuf:"varint,55,rep,packed,name=F_Uint32_repeated_packed" json:"F_Uint32_repeated_packed,omitempty"` - F_Uint64RepeatedPacked []uint64 `protobuf:"varint,56,rep,packed,name=F_Uint64_repeated_packed" json:"F_Uint64_repeated_packed,omitempty"` - F_FloatRepeatedPacked []float32 `protobuf:"fixed32,57,rep,packed,name=F_Float_repeated_packed" json:"F_Float_repeated_packed,omitempty"` - F_DoubleRepeatedPacked []float64 `protobuf:"fixed64,58,rep,packed,name=F_Double_repeated_packed" json:"F_Double_repeated_packed,omitempty"` - F_Sint32RepeatedPacked []int32 `protobuf:"zigzag32,502,rep,packed,name=F_Sint32_repeated_packed" json:"F_Sint32_repeated_packed,omitempty"` - F_Sint64RepeatedPacked []int64 `protobuf:"zigzag64,503,rep,packed,name=F_Sint64_repeated_packed" json:"F_Sint64_repeated_packed,omitempty"` - Requiredgroup *GoTest_RequiredGroup `protobuf:"group,70,req,name=RequiredGroup" json:"requiredgroup,omitempty"` - Repeatedgroup []*GoTest_RepeatedGroup `protobuf:"group,80,rep,name=RepeatedGroup" json:"repeatedgroup,omitempty"` - Optionalgroup *GoTest_OptionalGroup `protobuf:"group,90,opt,name=OptionalGroup" json:"optionalgroup,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GoTest) Reset() { *m = GoTest{} } -func (m *GoTest) String() string { return proto.CompactTextString(m) } -func (*GoTest) ProtoMessage() {} - -const Default_GoTest_F_BoolDefaulted bool = true -const Default_GoTest_F_Int32Defaulted int32 = 32 -const Default_GoTest_F_Int64Defaulted int64 = 64 -const Default_GoTest_F_Fixed32Defaulted uint32 = 320 -const Default_GoTest_F_Fixed64Defaulted uint64 = 640 -const Default_GoTest_F_Uint32Defaulted uint32 = 3200 -const Default_GoTest_F_Uint64Defaulted uint64 = 6400 -const Default_GoTest_F_FloatDefaulted float32 = 314159 -const Default_GoTest_F_DoubleDefaulted float64 = 271828 -const Default_GoTest_F_StringDefaulted string = "hello, \"world!\"\n" - -var Default_GoTest_F_BytesDefaulted []byte = []byte("Bignose") - -const Default_GoTest_F_Sint32Defaulted int32 = -32 -const Default_GoTest_F_Sint64Defaulted int64 = -64 - -func (m *GoTest) GetKind() GoTest_KIND { - if m != nil && m.Kind != nil { - return *m.Kind - } - return GoTest_VOID -} - -func (m *GoTest) GetTable() string { - if m != nil && m.Table != nil { - return *m.Table - } - return "" -} - -func (m *GoTest) GetParam() int32 { - if m != nil && m.Param != nil { - return *m.Param - } - return 0 -} - -func (m *GoTest) GetRequiredField() *GoTestField { - if m != nil { - return m.RequiredField - } - return nil -} - -func (m *GoTest) GetRepeatedField() []*GoTestField { - if m != nil { - return m.RepeatedField - } - return nil -} - -func (m *GoTest) GetOptionalField() *GoTestField { - if m != nil { - return m.OptionalField - } - return nil -} - -func (m *GoTest) GetF_BoolRequired() bool { - if m != nil && m.F_BoolRequired != nil { - return *m.F_BoolRequired - } - return false -} - -func (m *GoTest) GetF_Int32Required() int32 { - if m != nil && m.F_Int32Required != nil { - return *m.F_Int32Required - } - return 0 -} - -func (m *GoTest) GetF_Int64Required() int64 { - if m != nil && m.F_Int64Required != nil { - return *m.F_Int64Required - } - return 0 -} - -func (m *GoTest) GetF_Fixed32Required() uint32 { - if m != nil && m.F_Fixed32Required != nil { - return *m.F_Fixed32Required - } - return 0 -} - -func (m *GoTest) GetF_Fixed64Required() uint64 { - if m != nil && m.F_Fixed64Required != nil { - return *m.F_Fixed64Required - } - return 0 -} - -func (m *GoTest) GetF_Uint32Required() uint32 { - if m != nil && m.F_Uint32Required != nil { - return *m.F_Uint32Required - } - return 0 -} - -func (m *GoTest) GetF_Uint64Required() uint64 { - if m != nil && m.F_Uint64Required != nil { - return *m.F_Uint64Required - } - return 0 -} - -func (m *GoTest) GetF_FloatRequired() float32 { - if m != nil && m.F_FloatRequired != nil { - return *m.F_FloatRequired - } - return 0 -} - -func (m *GoTest) GetF_DoubleRequired() float64 { - if m != nil && m.F_DoubleRequired != nil { - return *m.F_DoubleRequired - } - return 0 -} - -func (m *GoTest) GetF_StringRequired() string { - if m != nil && m.F_StringRequired != nil { - return *m.F_StringRequired - } - return "" -} - -func (m *GoTest) GetF_BytesRequired() []byte { - if m != nil { - return m.F_BytesRequired - } - return nil -} - -func (m *GoTest) GetF_Sint32Required() int32 { - if m != nil && m.F_Sint32Required != nil { - return *m.F_Sint32Required - } - return 0 -} - -func (m *GoTest) GetF_Sint64Required() int64 { - if m != nil && m.F_Sint64Required != nil { - return *m.F_Sint64Required - } - return 0 -} - -func (m *GoTest) GetF_BoolRepeated() []bool { - if m != nil { - return m.F_BoolRepeated - } - return nil -} - -func (m *GoTest) GetF_Int32Repeated() []int32 { - if m != nil { - return m.F_Int32Repeated - } - return nil -} - -func (m *GoTest) GetF_Int64Repeated() []int64 { - if m != nil { - return m.F_Int64Repeated - } - return nil -} - -func (m *GoTest) GetF_Fixed32Repeated() []uint32 { - if m != nil { - return m.F_Fixed32Repeated - } - return nil -} - -func (m *GoTest) GetF_Fixed64Repeated() []uint64 { - if m != nil { - return m.F_Fixed64Repeated - } - return nil -} - -func (m *GoTest) GetF_Uint32Repeated() []uint32 { - if m != nil { - return m.F_Uint32Repeated - } - return nil -} - -func (m *GoTest) GetF_Uint64Repeated() []uint64 { - if m != nil { - return m.F_Uint64Repeated - } - return nil -} - -func (m *GoTest) GetF_FloatRepeated() []float32 { - if m != nil { - return m.F_FloatRepeated - } - return nil -} - -func (m *GoTest) GetF_DoubleRepeated() []float64 { - if m != nil { - return m.F_DoubleRepeated - } - return nil -} - -func (m *GoTest) GetF_StringRepeated() []string { - if m != nil { - return m.F_StringRepeated - } - return nil -} - -func (m *GoTest) GetF_BytesRepeated() [][]byte { - if m != nil { - return m.F_BytesRepeated - } - return nil -} - -func (m *GoTest) GetF_Sint32Repeated() []int32 { - if m != nil { - return m.F_Sint32Repeated - } - return nil -} - -func (m *GoTest) GetF_Sint64Repeated() []int64 { - if m != nil { - return m.F_Sint64Repeated - } - return nil -} - -func (m *GoTest) GetF_BoolOptional() bool { - if m != nil && m.F_BoolOptional != nil { - return *m.F_BoolOptional - } - return false -} - -func (m *GoTest) GetF_Int32Optional() int32 { - if m != nil && m.F_Int32Optional != nil { - return *m.F_Int32Optional - } - return 0 -} - -func (m *GoTest) GetF_Int64Optional() int64 { - if m != nil && m.F_Int64Optional != nil { - return *m.F_Int64Optional - } - return 0 -} - -func (m *GoTest) GetF_Fixed32Optional() uint32 { - if m != nil && m.F_Fixed32Optional != nil { - return *m.F_Fixed32Optional - } - return 0 -} - -func (m *GoTest) GetF_Fixed64Optional() uint64 { - if m != nil && m.F_Fixed64Optional != nil { - return *m.F_Fixed64Optional - } - return 0 -} - -func (m *GoTest) GetF_Uint32Optional() uint32 { - if m != nil && m.F_Uint32Optional != nil { - return *m.F_Uint32Optional - } - return 0 -} - -func (m *GoTest) GetF_Uint64Optional() uint64 { - if m != nil && m.F_Uint64Optional != nil { - return *m.F_Uint64Optional - } - return 0 -} - -func (m *GoTest) GetF_FloatOptional() float32 { - if m != nil && m.F_FloatOptional != nil { - return *m.F_FloatOptional - } - return 0 -} - -func (m *GoTest) GetF_DoubleOptional() float64 { - if m != nil && m.F_DoubleOptional != nil { - return *m.F_DoubleOptional - } - return 0 -} - -func (m *GoTest) GetF_StringOptional() string { - if m != nil && m.F_StringOptional != nil { - return *m.F_StringOptional - } - return "" -} - -func (m *GoTest) GetF_BytesOptional() []byte { - if m != nil { - return m.F_BytesOptional - } - return nil -} - -func (m *GoTest) GetF_Sint32Optional() int32 { - if m != nil && m.F_Sint32Optional != nil { - return *m.F_Sint32Optional - } - return 0 -} - -func (m *GoTest) GetF_Sint64Optional() int64 { - if m != nil && m.F_Sint64Optional != nil { - return *m.F_Sint64Optional - } - return 0 -} - -func (m *GoTest) GetF_BoolDefaulted() bool { - if m != nil && m.F_BoolDefaulted != nil { - return *m.F_BoolDefaulted - } - return Default_GoTest_F_BoolDefaulted -} - -func (m *GoTest) GetF_Int32Defaulted() int32 { - if m != nil && m.F_Int32Defaulted != nil { - return *m.F_Int32Defaulted - } - return Default_GoTest_F_Int32Defaulted -} - -func (m *GoTest) GetF_Int64Defaulted() int64 { - if m != nil && m.F_Int64Defaulted != nil { - return *m.F_Int64Defaulted - } - return Default_GoTest_F_Int64Defaulted -} - -func (m *GoTest) GetF_Fixed32Defaulted() uint32 { - if m != nil && m.F_Fixed32Defaulted != nil { - return *m.F_Fixed32Defaulted - } - return Default_GoTest_F_Fixed32Defaulted -} - -func (m *GoTest) GetF_Fixed64Defaulted() uint64 { - if m != nil && m.F_Fixed64Defaulted != nil { - return *m.F_Fixed64Defaulted - } - return Default_GoTest_F_Fixed64Defaulted -} - -func (m *GoTest) GetF_Uint32Defaulted() uint32 { - if m != nil && m.F_Uint32Defaulted != nil { - return *m.F_Uint32Defaulted - } - return Default_GoTest_F_Uint32Defaulted -} - -func (m *GoTest) GetF_Uint64Defaulted() uint64 { - if m != nil && m.F_Uint64Defaulted != nil { - return *m.F_Uint64Defaulted - } - return Default_GoTest_F_Uint64Defaulted -} - -func (m *GoTest) GetF_FloatDefaulted() float32 { - if m != nil && m.F_FloatDefaulted != nil { - return *m.F_FloatDefaulted - } - return Default_GoTest_F_FloatDefaulted -} - -func (m *GoTest) GetF_DoubleDefaulted() float64 { - if m != nil && m.F_DoubleDefaulted != nil { - return *m.F_DoubleDefaulted - } - return Default_GoTest_F_DoubleDefaulted -} - -func (m *GoTest) GetF_StringDefaulted() string { - if m != nil && m.F_StringDefaulted != nil { - return *m.F_StringDefaulted - } - return Default_GoTest_F_StringDefaulted -} - -func (m *GoTest) GetF_BytesDefaulted() []byte { - if m != nil && m.F_BytesDefaulted != nil { - return m.F_BytesDefaulted - } - return append([]byte(nil), Default_GoTest_F_BytesDefaulted...) -} - -func (m *GoTest) GetF_Sint32Defaulted() int32 { - if m != nil && m.F_Sint32Defaulted != nil { - return *m.F_Sint32Defaulted - } - return Default_GoTest_F_Sint32Defaulted -} - -func (m *GoTest) GetF_Sint64Defaulted() int64 { - if m != nil && m.F_Sint64Defaulted != nil { - return *m.F_Sint64Defaulted - } - return Default_GoTest_F_Sint64Defaulted -} - -func (m *GoTest) GetF_BoolRepeatedPacked() []bool { - if m != nil { - return m.F_BoolRepeatedPacked - } - return nil -} - -func (m *GoTest) GetF_Int32RepeatedPacked() []int32 { - if m != nil { - return m.F_Int32RepeatedPacked - } - return nil -} - -func (m *GoTest) GetF_Int64RepeatedPacked() []int64 { - if m != nil { - return m.F_Int64RepeatedPacked - } - return nil -} - -func (m *GoTest) GetF_Fixed32RepeatedPacked() []uint32 { - if m != nil { - return m.F_Fixed32RepeatedPacked - } - return nil -} - -func (m *GoTest) GetF_Fixed64RepeatedPacked() []uint64 { - if m != nil { - return m.F_Fixed64RepeatedPacked - } - return nil -} - -func (m *GoTest) GetF_Uint32RepeatedPacked() []uint32 { - if m != nil { - return m.F_Uint32RepeatedPacked - } - return nil -} - -func (m *GoTest) GetF_Uint64RepeatedPacked() []uint64 { - if m != nil { - return m.F_Uint64RepeatedPacked - } - return nil -} - -func (m *GoTest) GetF_FloatRepeatedPacked() []float32 { - if m != nil { - return m.F_FloatRepeatedPacked - } - return nil -} - -func (m *GoTest) GetF_DoubleRepeatedPacked() []float64 { - if m != nil { - return m.F_DoubleRepeatedPacked - } - return nil -} - -func (m *GoTest) GetF_Sint32RepeatedPacked() []int32 { - if m != nil { - return m.F_Sint32RepeatedPacked - } - return nil -} - -func (m *GoTest) GetF_Sint64RepeatedPacked() []int64 { - if m != nil { - return m.F_Sint64RepeatedPacked - } - return nil -} - -func (m *GoTest) GetRequiredgroup() *GoTest_RequiredGroup { - if m != nil { - return m.Requiredgroup - } - return nil -} - -func (m *GoTest) GetRepeatedgroup() []*GoTest_RepeatedGroup { - if m != nil { - return m.Repeatedgroup - } - return nil -} - -func (m *GoTest) GetOptionalgroup() *GoTest_OptionalGroup { - if m != nil { - return m.Optionalgroup - } - return nil -} - -// Required, repeated, and optional groups. -type GoTest_RequiredGroup struct { - RequiredField *string `protobuf:"bytes,71,req" json:"RequiredField,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GoTest_RequiredGroup) Reset() { *m = GoTest_RequiredGroup{} } -func (m *GoTest_RequiredGroup) String() string { return proto.CompactTextString(m) } -func (*GoTest_RequiredGroup) ProtoMessage() {} - -func (m *GoTest_RequiredGroup) GetRequiredField() string { - if m != nil && m.RequiredField != nil { - return *m.RequiredField - } - return "" -} - -type GoTest_RepeatedGroup struct { - RequiredField *string `protobuf:"bytes,81,req" json:"RequiredField,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GoTest_RepeatedGroup) Reset() { *m = GoTest_RepeatedGroup{} } -func (m *GoTest_RepeatedGroup) String() string { return proto.CompactTextString(m) } -func (*GoTest_RepeatedGroup) ProtoMessage() {} - -func (m *GoTest_RepeatedGroup) GetRequiredField() string { - if m != nil && m.RequiredField != nil { - return *m.RequiredField - } - return "" -} - -type GoTest_OptionalGroup struct { - RequiredField *string `protobuf:"bytes,91,req" json:"RequiredField,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GoTest_OptionalGroup) Reset() { *m = GoTest_OptionalGroup{} } -func (m *GoTest_OptionalGroup) String() string { return proto.CompactTextString(m) } -func (*GoTest_OptionalGroup) ProtoMessage() {} - -func (m *GoTest_OptionalGroup) GetRequiredField() string { - if m != nil && m.RequiredField != nil { - return *m.RequiredField - } - return "" -} - -// For testing skipping of unrecognized fields. -// Numbers are all big, larger than tag numbers in GoTestField, -// the message used in the corresponding test. -type GoSkipTest struct { - SkipInt32 *int32 `protobuf:"varint,11,req,name=skip_int32" json:"skip_int32,omitempty"` - SkipFixed32 *uint32 `protobuf:"fixed32,12,req,name=skip_fixed32" json:"skip_fixed32,omitempty"` - SkipFixed64 *uint64 `protobuf:"fixed64,13,req,name=skip_fixed64" json:"skip_fixed64,omitempty"` - SkipString *string `protobuf:"bytes,14,req,name=skip_string" json:"skip_string,omitempty"` - Skipgroup *GoSkipTest_SkipGroup `protobuf:"group,15,req,name=SkipGroup" json:"skipgroup,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GoSkipTest) Reset() { *m = GoSkipTest{} } -func (m *GoSkipTest) String() string { return proto.CompactTextString(m) } -func (*GoSkipTest) ProtoMessage() {} - -func (m *GoSkipTest) GetSkipInt32() int32 { - if m != nil && m.SkipInt32 != nil { - return *m.SkipInt32 - } - return 0 -} - -func (m *GoSkipTest) GetSkipFixed32() uint32 { - if m != nil && m.SkipFixed32 != nil { - return *m.SkipFixed32 - } - return 0 -} - -func (m *GoSkipTest) GetSkipFixed64() uint64 { - if m != nil && m.SkipFixed64 != nil { - return *m.SkipFixed64 - } - return 0 -} - -func (m *GoSkipTest) GetSkipString() string { - if m != nil && m.SkipString != nil { - return *m.SkipString - } - return "" -} - -func (m *GoSkipTest) GetSkipgroup() *GoSkipTest_SkipGroup { - if m != nil { - return m.Skipgroup - } - return nil -} - -type GoSkipTest_SkipGroup struct { - GroupInt32 *int32 `protobuf:"varint,16,req,name=group_int32" json:"group_int32,omitempty"` - GroupString *string `protobuf:"bytes,17,req,name=group_string" json:"group_string,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GoSkipTest_SkipGroup) Reset() { *m = GoSkipTest_SkipGroup{} } -func (m *GoSkipTest_SkipGroup) String() string { return proto.CompactTextString(m) } -func (*GoSkipTest_SkipGroup) ProtoMessage() {} - -func (m *GoSkipTest_SkipGroup) GetGroupInt32() int32 { - if m != nil && m.GroupInt32 != nil { - return *m.GroupInt32 - } - return 0 -} - -func (m *GoSkipTest_SkipGroup) GetGroupString() string { - if m != nil && m.GroupString != nil { - return *m.GroupString - } - return "" -} - -// For testing packed/non-packed decoder switching. -// A serialized instance of one should be deserializable as the other. -type NonPackedTest struct { - A []int32 `protobuf:"varint,1,rep,name=a" json:"a,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *NonPackedTest) Reset() { *m = NonPackedTest{} } -func (m *NonPackedTest) String() string { return proto.CompactTextString(m) } -func (*NonPackedTest) ProtoMessage() {} - -func (m *NonPackedTest) GetA() []int32 { - if m != nil { - return m.A - } - return nil -} - -type PackedTest struct { - B []int32 `protobuf:"varint,1,rep,packed,name=b" json:"b,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *PackedTest) Reset() { *m = PackedTest{} } -func (m *PackedTest) String() string { return proto.CompactTextString(m) } -func (*PackedTest) ProtoMessage() {} - -func (m *PackedTest) GetB() []int32 { - if m != nil { - return m.B - } - return nil -} - -type MaxTag struct { - // Maximum possible tag number. - LastField *string `protobuf:"bytes,536870911,opt,name=last_field" json:"last_field,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *MaxTag) Reset() { *m = MaxTag{} } -func (m *MaxTag) String() string { return proto.CompactTextString(m) } -func (*MaxTag) ProtoMessage() {} - -func (m *MaxTag) GetLastField() string { - if m != nil && m.LastField != nil { - return *m.LastField - } - return "" -} - -type OldMessage struct { - Nested *OldMessage_Nested `protobuf:"bytes,1,opt,name=nested" json:"nested,omitempty"` - Num *int32 `protobuf:"varint,2,opt,name=num" json:"num,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *OldMessage) Reset() { *m = OldMessage{} } -func (m *OldMessage) String() string { return proto.CompactTextString(m) } -func (*OldMessage) ProtoMessage() {} - -func (m *OldMessage) GetNested() *OldMessage_Nested { - if m != nil { - return m.Nested - } - return nil -} - -func (m *OldMessage) GetNum() int32 { - if m != nil && m.Num != nil { - return *m.Num - } - return 0 -} - -type OldMessage_Nested struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *OldMessage_Nested) Reset() { *m = OldMessage_Nested{} } -func (m *OldMessage_Nested) String() string { return proto.CompactTextString(m) } -func (*OldMessage_Nested) ProtoMessage() {} - -func (m *OldMessage_Nested) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -// NewMessage is wire compatible with OldMessage; -// imagine it as a future version. -type NewMessage struct { - Nested *NewMessage_Nested `protobuf:"bytes,1,opt,name=nested" json:"nested,omitempty"` - // This is an int32 in OldMessage. - Num *int64 `protobuf:"varint,2,opt,name=num" json:"num,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *NewMessage) Reset() { *m = NewMessage{} } -func (m *NewMessage) String() string { return proto.CompactTextString(m) } -func (*NewMessage) ProtoMessage() {} - -func (m *NewMessage) GetNested() *NewMessage_Nested { - if m != nil { - return m.Nested - } - return nil -} - -func (m *NewMessage) GetNum() int64 { - if m != nil && m.Num != nil { - return *m.Num - } - return 0 -} - -type NewMessage_Nested struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - FoodGroup *string `protobuf:"bytes,2,opt,name=food_group" json:"food_group,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *NewMessage_Nested) Reset() { *m = NewMessage_Nested{} } -func (m *NewMessage_Nested) String() string { return proto.CompactTextString(m) } -func (*NewMessage_Nested) ProtoMessage() {} - -func (m *NewMessage_Nested) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -func (m *NewMessage_Nested) GetFoodGroup() string { - if m != nil && m.FoodGroup != nil { - return *m.FoodGroup - } - return "" -} - -type InnerMessage struct { - Host *string `protobuf:"bytes,1,req,name=host" json:"host,omitempty"` - Port *int32 `protobuf:"varint,2,opt,name=port,def=4000" json:"port,omitempty"` - Connected *bool `protobuf:"varint,3,opt,name=connected" json:"connected,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *InnerMessage) Reset() { *m = InnerMessage{} } -func (m *InnerMessage) String() string { return proto.CompactTextString(m) } -func (*InnerMessage) ProtoMessage() {} - -const Default_InnerMessage_Port int32 = 4000 - -func (m *InnerMessage) GetHost() string { - if m != nil && m.Host != nil { - return *m.Host - } - return "" -} - -func (m *InnerMessage) GetPort() int32 { - if m != nil && m.Port != nil { - return *m.Port - } - return Default_InnerMessage_Port -} - -func (m *InnerMessage) GetConnected() bool { - if m != nil && m.Connected != nil { - return *m.Connected - } - return false -} - -type OtherMessage struct { - Key *int64 `protobuf:"varint,1,opt,name=key" json:"key,omitempty"` - Value []byte `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` - Weight *float32 `protobuf:"fixed32,3,opt,name=weight" json:"weight,omitempty"` - Inner *InnerMessage `protobuf:"bytes,4,opt,name=inner" json:"inner,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *OtherMessage) Reset() { *m = OtherMessage{} } -func (m *OtherMessage) String() string { return proto.CompactTextString(m) } -func (*OtherMessage) ProtoMessage() {} - -func (m *OtherMessage) GetKey() int64 { - if m != nil && m.Key != nil { - return *m.Key - } - return 0 -} - -func (m *OtherMessage) GetValue() []byte { - if m != nil { - return m.Value - } - return nil -} - -func (m *OtherMessage) GetWeight() float32 { - if m != nil && m.Weight != nil { - return *m.Weight - } - return 0 -} - -func (m *OtherMessage) GetInner() *InnerMessage { - if m != nil { - return m.Inner - } - return nil -} - -type MyMessage struct { - Count *int32 `protobuf:"varint,1,req,name=count" json:"count,omitempty"` - Name *string `protobuf:"bytes,2,opt,name=name" json:"name,omitempty"` - Quote *string `protobuf:"bytes,3,opt,name=quote" json:"quote,omitempty"` - Pet []string `protobuf:"bytes,4,rep,name=pet" json:"pet,omitempty"` - Inner *InnerMessage `protobuf:"bytes,5,opt,name=inner" json:"inner,omitempty"` - Others []*OtherMessage `protobuf:"bytes,6,rep,name=others" json:"others,omitempty"` - RepInner []*InnerMessage `protobuf:"bytes,12,rep,name=rep_inner" json:"rep_inner,omitempty"` - Bikeshed *MyMessage_Color `protobuf:"varint,7,opt,name=bikeshed,enum=testdata.MyMessage_Color" json:"bikeshed,omitempty"` - Somegroup *MyMessage_SomeGroup `protobuf:"group,8,opt,name=SomeGroup" json:"somegroup,omitempty"` - // This field becomes [][]byte in the generated code. - RepBytes [][]byte `protobuf:"bytes,10,rep,name=rep_bytes" json:"rep_bytes,omitempty"` - Bigfloat *float64 `protobuf:"fixed64,11,opt,name=bigfloat" json:"bigfloat,omitempty"` - XXX_extensions map[int32]proto.Extension `json:"-"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *MyMessage) Reset() { *m = MyMessage{} } -func (m *MyMessage) String() string { return proto.CompactTextString(m) } -func (*MyMessage) ProtoMessage() {} - -var extRange_MyMessage = []proto.ExtensionRange{ - {100, 536870911}, -} - -func (*MyMessage) ExtensionRangeArray() []proto.ExtensionRange { - return extRange_MyMessage -} -func (m *MyMessage) ExtensionMap() map[int32]proto.Extension { - if m.XXX_extensions == nil { - m.XXX_extensions = make(map[int32]proto.Extension) - } - return m.XXX_extensions -} - -func (m *MyMessage) GetCount() int32 { - if m != nil && m.Count != nil { - return *m.Count - } - return 0 -} - -func (m *MyMessage) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -func (m *MyMessage) GetQuote() string { - if m != nil && m.Quote != nil { - return *m.Quote - } - return "" -} - -func (m *MyMessage) GetPet() []string { - if m != nil { - return m.Pet - } - return nil -} - -func (m *MyMessage) GetInner() *InnerMessage { - if m != nil { - return m.Inner - } - return nil -} - -func (m *MyMessage) GetOthers() []*OtherMessage { - if m != nil { - return m.Others - } - return nil -} - -func (m *MyMessage) GetRepInner() []*InnerMessage { - if m != nil { - return m.RepInner - } - return nil -} - -func (m *MyMessage) GetBikeshed() MyMessage_Color { - if m != nil && m.Bikeshed != nil { - return *m.Bikeshed - } - return MyMessage_RED -} - -func (m *MyMessage) GetSomegroup() *MyMessage_SomeGroup { - if m != nil { - return m.Somegroup - } - return nil -} - -func (m *MyMessage) GetRepBytes() [][]byte { - if m != nil { - return m.RepBytes - } - return nil -} - -func (m *MyMessage) GetBigfloat() float64 { - if m != nil && m.Bigfloat != nil { - return *m.Bigfloat - } - return 0 -} - -type MyMessage_SomeGroup struct { - GroupField *int32 `protobuf:"varint,9,opt,name=group_field" json:"group_field,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *MyMessage_SomeGroup) Reset() { *m = MyMessage_SomeGroup{} } -func (m *MyMessage_SomeGroup) String() string { return proto.CompactTextString(m) } -func (*MyMessage_SomeGroup) ProtoMessage() {} - -func (m *MyMessage_SomeGroup) GetGroupField() int32 { - if m != nil && m.GroupField != nil { - return *m.GroupField - } - return 0 -} - -type Ext struct { - Data *string `protobuf:"bytes,1,opt,name=data" json:"data,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Ext) Reset() { *m = Ext{} } -func (m *Ext) String() string { return proto.CompactTextString(m) } -func (*Ext) ProtoMessage() {} - -func (m *Ext) GetData() string { - if m != nil && m.Data != nil { - return *m.Data - } - return "" -} - -var E_Ext_More = &proto.ExtensionDesc{ - ExtendedType: (*MyMessage)(nil), - ExtensionType: (*Ext)(nil), - Field: 103, - Name: "testdata.Ext.more", - Tag: "bytes,103,opt,name=more", -} - -var E_Ext_Text = &proto.ExtensionDesc{ - ExtendedType: (*MyMessage)(nil), - ExtensionType: (*string)(nil), - Field: 104, - Name: "testdata.Ext.text", - Tag: "bytes,104,opt,name=text", -} - -var E_Ext_Number = &proto.ExtensionDesc{ - ExtendedType: (*MyMessage)(nil), - ExtensionType: (*int32)(nil), - Field: 105, - Name: "testdata.Ext.number", - Tag: "varint,105,opt,name=number", -} - -type DefaultsMessage struct { - XXX_extensions map[int32]proto.Extension `json:"-"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *DefaultsMessage) Reset() { *m = DefaultsMessage{} } -func (m *DefaultsMessage) String() string { return proto.CompactTextString(m) } -func (*DefaultsMessage) ProtoMessage() {} - -var extRange_DefaultsMessage = []proto.ExtensionRange{ - {100, 536870911}, -} - -func (*DefaultsMessage) ExtensionRangeArray() []proto.ExtensionRange { - return extRange_DefaultsMessage -} -func (m *DefaultsMessage) ExtensionMap() map[int32]proto.Extension { - if m.XXX_extensions == nil { - m.XXX_extensions = make(map[int32]proto.Extension) - } - return m.XXX_extensions -} - -type MyMessageSet struct { - XXX_extensions map[int32]proto.Extension `json:"-"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *MyMessageSet) Reset() { *m = MyMessageSet{} } -func (m *MyMessageSet) String() string { return proto.CompactTextString(m) } -func (*MyMessageSet) ProtoMessage() {} - -func (m *MyMessageSet) Marshal() ([]byte, error) { - return proto.MarshalMessageSet(m.ExtensionMap()) -} -func (m *MyMessageSet) Unmarshal(buf []byte) error { - return proto.UnmarshalMessageSet(buf, m.ExtensionMap()) -} -func (m *MyMessageSet) MarshalJSON() ([]byte, error) { - return proto.MarshalMessageSetJSON(m.XXX_extensions) -} -func (m *MyMessageSet) UnmarshalJSON(buf []byte) error { - return proto.UnmarshalMessageSetJSON(buf, m.XXX_extensions) -} - -// ensure MyMessageSet satisfies proto.Marshaler and proto.Unmarshaler -var _ proto.Marshaler = (*MyMessageSet)(nil) -var _ proto.Unmarshaler = (*MyMessageSet)(nil) - -var extRange_MyMessageSet = []proto.ExtensionRange{ - {100, 2147483646}, -} - -func (*MyMessageSet) ExtensionRangeArray() []proto.ExtensionRange { - return extRange_MyMessageSet -} -func (m *MyMessageSet) ExtensionMap() map[int32]proto.Extension { - if m.XXX_extensions == nil { - m.XXX_extensions = make(map[int32]proto.Extension) - } - return m.XXX_extensions -} - -type Empty struct { - XXX_unrecognized []byte `json:"-"` -} - -func (m *Empty) Reset() { *m = Empty{} } -func (m *Empty) String() string { return proto.CompactTextString(m) } -func (*Empty) ProtoMessage() {} - -type MessageList struct { - Message []*MessageList_Message `protobuf:"group,1,rep" json:"message,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *MessageList) Reset() { *m = MessageList{} } -func (m *MessageList) String() string { return proto.CompactTextString(m) } -func (*MessageList) ProtoMessage() {} - -func (m *MessageList) GetMessage() []*MessageList_Message { - if m != nil { - return m.Message - } - return nil -} - -type MessageList_Message struct { - Name *string `protobuf:"bytes,2,req,name=name" json:"name,omitempty"` - Count *int32 `protobuf:"varint,3,req,name=count" json:"count,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *MessageList_Message) Reset() { *m = MessageList_Message{} } -func (m *MessageList_Message) String() string { return proto.CompactTextString(m) } -func (*MessageList_Message) ProtoMessage() {} - -func (m *MessageList_Message) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -func (m *MessageList_Message) GetCount() int32 { - if m != nil && m.Count != nil { - return *m.Count - } - return 0 -} - -type Strings struct { - StringField *string `protobuf:"bytes,1,opt,name=string_field" json:"string_field,omitempty"` - BytesField []byte `protobuf:"bytes,2,opt,name=bytes_field" json:"bytes_field,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Strings) Reset() { *m = Strings{} } -func (m *Strings) String() string { return proto.CompactTextString(m) } -func (*Strings) ProtoMessage() {} - -func (m *Strings) GetStringField() string { - if m != nil && m.StringField != nil { - return *m.StringField - } - return "" -} - -func (m *Strings) GetBytesField() []byte { - if m != nil { - return m.BytesField - } - return nil -} - -type Defaults struct { - // Default-valued fields of all basic types. - // Same as GoTest, but copied here to make testing easier. - F_Bool *bool `protobuf:"varint,1,opt,def=1" json:"F_Bool,omitempty"` - F_Int32 *int32 `protobuf:"varint,2,opt,def=32" json:"F_Int32,omitempty"` - F_Int64 *int64 `protobuf:"varint,3,opt,def=64" json:"F_Int64,omitempty"` - F_Fixed32 *uint32 `protobuf:"fixed32,4,opt,def=320" json:"F_Fixed32,omitempty"` - F_Fixed64 *uint64 `protobuf:"fixed64,5,opt,def=640" json:"F_Fixed64,omitempty"` - F_Uint32 *uint32 `protobuf:"varint,6,opt,def=3200" json:"F_Uint32,omitempty"` - F_Uint64 *uint64 `protobuf:"varint,7,opt,def=6400" json:"F_Uint64,omitempty"` - F_Float *float32 `protobuf:"fixed32,8,opt,def=314159" json:"F_Float,omitempty"` - F_Double *float64 `protobuf:"fixed64,9,opt,def=271828" json:"F_Double,omitempty"` - F_String *string `protobuf:"bytes,10,opt,def=hello, \"world!\"\n" json:"F_String,omitempty"` - F_Bytes []byte `protobuf:"bytes,11,opt,def=Bignose" json:"F_Bytes,omitempty"` - F_Sint32 *int32 `protobuf:"zigzag32,12,opt,def=-32" json:"F_Sint32,omitempty"` - F_Sint64 *int64 `protobuf:"zigzag64,13,opt,def=-64" json:"F_Sint64,omitempty"` - F_Enum *Defaults_Color `protobuf:"varint,14,opt,enum=testdata.Defaults_Color,def=1" json:"F_Enum,omitempty"` - // More fields with crazy defaults. - F_Pinf *float32 `protobuf:"fixed32,15,opt,def=inf" json:"F_Pinf,omitempty"` - F_Ninf *float32 `protobuf:"fixed32,16,opt,def=-inf" json:"F_Ninf,omitempty"` - F_Nan *float32 `protobuf:"fixed32,17,opt,def=nan" json:"F_Nan,omitempty"` - // Sub-message. - Sub *SubDefaults `protobuf:"bytes,18,opt,name=sub" json:"sub,omitempty"` - // Redundant but explicit defaults. - StrZero *string `protobuf:"bytes,19,opt,name=str_zero,def=" json:"str_zero,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Defaults) Reset() { *m = Defaults{} } -func (m *Defaults) String() string { return proto.CompactTextString(m) } -func (*Defaults) ProtoMessage() {} - -const Default_Defaults_F_Bool bool = true -const Default_Defaults_F_Int32 int32 = 32 -const Default_Defaults_F_Int64 int64 = 64 -const Default_Defaults_F_Fixed32 uint32 = 320 -const Default_Defaults_F_Fixed64 uint64 = 640 -const Default_Defaults_F_Uint32 uint32 = 3200 -const Default_Defaults_F_Uint64 uint64 = 6400 -const Default_Defaults_F_Float float32 = 314159 -const Default_Defaults_F_Double float64 = 271828 -const Default_Defaults_F_String string = "hello, \"world!\"\n" - -var Default_Defaults_F_Bytes []byte = []byte("Bignose") - -const Default_Defaults_F_Sint32 int32 = -32 -const Default_Defaults_F_Sint64 int64 = -64 -const Default_Defaults_F_Enum Defaults_Color = Defaults_GREEN - -var Default_Defaults_F_Pinf float32 = float32(math.Inf(1)) -var Default_Defaults_F_Ninf float32 = float32(math.Inf(-1)) -var Default_Defaults_F_Nan float32 = float32(math.NaN()) - -func (m *Defaults) GetF_Bool() bool { - if m != nil && m.F_Bool != nil { - return *m.F_Bool - } - return Default_Defaults_F_Bool -} - -func (m *Defaults) GetF_Int32() int32 { - if m != nil && m.F_Int32 != nil { - return *m.F_Int32 - } - return Default_Defaults_F_Int32 -} - -func (m *Defaults) GetF_Int64() int64 { - if m != nil && m.F_Int64 != nil { - return *m.F_Int64 - } - return Default_Defaults_F_Int64 -} - -func (m *Defaults) GetF_Fixed32() uint32 { - if m != nil && m.F_Fixed32 != nil { - return *m.F_Fixed32 - } - return Default_Defaults_F_Fixed32 -} - -func (m *Defaults) GetF_Fixed64() uint64 { - if m != nil && m.F_Fixed64 != nil { - return *m.F_Fixed64 - } - return Default_Defaults_F_Fixed64 -} - -func (m *Defaults) GetF_Uint32() uint32 { - if m != nil && m.F_Uint32 != nil { - return *m.F_Uint32 - } - return Default_Defaults_F_Uint32 -} - -func (m *Defaults) GetF_Uint64() uint64 { - if m != nil && m.F_Uint64 != nil { - return *m.F_Uint64 - } - return Default_Defaults_F_Uint64 -} - -func (m *Defaults) GetF_Float() float32 { - if m != nil && m.F_Float != nil { - return *m.F_Float - } - return Default_Defaults_F_Float -} - -func (m *Defaults) GetF_Double() float64 { - if m != nil && m.F_Double != nil { - return *m.F_Double - } - return Default_Defaults_F_Double -} - -func (m *Defaults) GetF_String() string { - if m != nil && m.F_String != nil { - return *m.F_String - } - return Default_Defaults_F_String -} - -func (m *Defaults) GetF_Bytes() []byte { - if m != nil && m.F_Bytes != nil { - return m.F_Bytes - } - return append([]byte(nil), Default_Defaults_F_Bytes...) -} - -func (m *Defaults) GetF_Sint32() int32 { - if m != nil && m.F_Sint32 != nil { - return *m.F_Sint32 - } - return Default_Defaults_F_Sint32 -} - -func (m *Defaults) GetF_Sint64() int64 { - if m != nil && m.F_Sint64 != nil { - return *m.F_Sint64 - } - return Default_Defaults_F_Sint64 -} - -func (m *Defaults) GetF_Enum() Defaults_Color { - if m != nil && m.F_Enum != nil { - return *m.F_Enum - } - return Default_Defaults_F_Enum -} - -func (m *Defaults) GetF_Pinf() float32 { - if m != nil && m.F_Pinf != nil { - return *m.F_Pinf - } - return Default_Defaults_F_Pinf -} - -func (m *Defaults) GetF_Ninf() float32 { - if m != nil && m.F_Ninf != nil { - return *m.F_Ninf - } - return Default_Defaults_F_Ninf -} - -func (m *Defaults) GetF_Nan() float32 { - if m != nil && m.F_Nan != nil { - return *m.F_Nan - } - return Default_Defaults_F_Nan -} - -func (m *Defaults) GetSub() *SubDefaults { - if m != nil { - return m.Sub - } - return nil -} - -func (m *Defaults) GetStrZero() string { - if m != nil && m.StrZero != nil { - return *m.StrZero - } - return "" -} - -type SubDefaults struct { - N *int64 `protobuf:"varint,1,opt,name=n,def=7" json:"n,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *SubDefaults) Reset() { *m = SubDefaults{} } -func (m *SubDefaults) String() string { return proto.CompactTextString(m) } -func (*SubDefaults) ProtoMessage() {} - -const Default_SubDefaults_N int64 = 7 - -func (m *SubDefaults) GetN() int64 { - if m != nil && m.N != nil { - return *m.N - } - return Default_SubDefaults_N -} - -type RepeatedEnum struct { - Color []RepeatedEnum_Color `protobuf:"varint,1,rep,name=color,enum=testdata.RepeatedEnum_Color" json:"color,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *RepeatedEnum) Reset() { *m = RepeatedEnum{} } -func (m *RepeatedEnum) String() string { return proto.CompactTextString(m) } -func (*RepeatedEnum) ProtoMessage() {} - -func (m *RepeatedEnum) GetColor() []RepeatedEnum_Color { - if m != nil { - return m.Color - } - return nil -} - -type MoreRepeated struct { - Bools []bool `protobuf:"varint,1,rep,name=bools" json:"bools,omitempty"` - BoolsPacked []bool `protobuf:"varint,2,rep,packed,name=bools_packed" json:"bools_packed,omitempty"` - Ints []int32 `protobuf:"varint,3,rep,name=ints" json:"ints,omitempty"` - IntsPacked []int32 `protobuf:"varint,4,rep,packed,name=ints_packed" json:"ints_packed,omitempty"` - Int64SPacked []int64 `protobuf:"varint,7,rep,packed,name=int64s_packed" json:"int64s_packed,omitempty"` - Strings []string `protobuf:"bytes,5,rep,name=strings" json:"strings,omitempty"` - Fixeds []uint32 `protobuf:"fixed32,6,rep,name=fixeds" json:"fixeds,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *MoreRepeated) Reset() { *m = MoreRepeated{} } -func (m *MoreRepeated) String() string { return proto.CompactTextString(m) } -func (*MoreRepeated) ProtoMessage() {} - -func (m *MoreRepeated) GetBools() []bool { - if m != nil { - return m.Bools - } - return nil -} - -func (m *MoreRepeated) GetBoolsPacked() []bool { - if m != nil { - return m.BoolsPacked - } - return nil -} - -func (m *MoreRepeated) GetInts() []int32 { - if m != nil { - return m.Ints - } - return nil -} - -func (m *MoreRepeated) GetIntsPacked() []int32 { - if m != nil { - return m.IntsPacked - } - return nil -} - -func (m *MoreRepeated) GetInt64SPacked() []int64 { - if m != nil { - return m.Int64SPacked - } - return nil -} - -func (m *MoreRepeated) GetStrings() []string { - if m != nil { - return m.Strings - } - return nil -} - -func (m *MoreRepeated) GetFixeds() []uint32 { - if m != nil { - return m.Fixeds - } - return nil -} - -type GroupOld struct { - G *GroupOld_G `protobuf:"group,101,opt" json:"g,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GroupOld) Reset() { *m = GroupOld{} } -func (m *GroupOld) String() string { return proto.CompactTextString(m) } -func (*GroupOld) ProtoMessage() {} - -func (m *GroupOld) GetG() *GroupOld_G { - if m != nil { - return m.G - } - return nil -} - -type GroupOld_G struct { - X *int32 `protobuf:"varint,2,opt,name=x" json:"x,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GroupOld_G) Reset() { *m = GroupOld_G{} } -func (m *GroupOld_G) String() string { return proto.CompactTextString(m) } -func (*GroupOld_G) ProtoMessage() {} - -func (m *GroupOld_G) GetX() int32 { - if m != nil && m.X != nil { - return *m.X - } - return 0 -} - -type GroupNew struct { - G *GroupNew_G `protobuf:"group,101,opt" json:"g,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GroupNew) Reset() { *m = GroupNew{} } -func (m *GroupNew) String() string { return proto.CompactTextString(m) } -func (*GroupNew) ProtoMessage() {} - -func (m *GroupNew) GetG() *GroupNew_G { - if m != nil { - return m.G - } - return nil -} - -type GroupNew_G struct { - X *int32 `protobuf:"varint,2,opt,name=x" json:"x,omitempty"` - Y *int32 `protobuf:"varint,3,opt,name=y" json:"y,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GroupNew_G) Reset() { *m = GroupNew_G{} } -func (m *GroupNew_G) String() string { return proto.CompactTextString(m) } -func (*GroupNew_G) ProtoMessage() {} - -func (m *GroupNew_G) GetX() int32 { - if m != nil && m.X != nil { - return *m.X - } - return 0 -} - -func (m *GroupNew_G) GetY() int32 { - if m != nil && m.Y != nil { - return *m.Y - } - return 0 -} - -type FloatingPoint struct { - F *float64 `protobuf:"fixed64,1,req,name=f" json:"f,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *FloatingPoint) Reset() { *m = FloatingPoint{} } -func (m *FloatingPoint) String() string { return proto.CompactTextString(m) } -func (*FloatingPoint) ProtoMessage() {} - -func (m *FloatingPoint) GetF() float64 { - if m != nil && m.F != nil { - return *m.F - } - return 0 -} - -type MessageWithMap struct { - NameMapping map[int32]string `protobuf:"bytes,1,rep,name=name_mapping" json:"name_mapping,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - MsgMapping map[int64]*FloatingPoint `protobuf:"bytes,2,rep,name=msg_mapping" json:"msg_mapping,omitempty" protobuf_key:"zigzag64,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - ByteMapping map[bool][]byte `protobuf:"bytes,3,rep,name=byte_mapping" json:"byte_mapping,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - StrToStr map[string]string `protobuf:"bytes,4,rep,name=str_to_str" json:"str_to_str,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *MessageWithMap) Reset() { *m = MessageWithMap{} } -func (m *MessageWithMap) String() string { return proto.CompactTextString(m) } -func (*MessageWithMap) ProtoMessage() {} - -func (m *MessageWithMap) GetNameMapping() map[int32]string { - if m != nil { - return m.NameMapping - } - return nil -} - -func (m *MessageWithMap) GetMsgMapping() map[int64]*FloatingPoint { - if m != nil { - return m.MsgMapping - } - return nil -} - -func (m *MessageWithMap) GetByteMapping() map[bool][]byte { - if m != nil { - return m.ByteMapping - } - return nil -} - -func (m *MessageWithMap) GetStrToStr() map[string]string { - if m != nil { - return m.StrToStr - } - return nil -} - -var E_Greeting = &proto.ExtensionDesc{ - ExtendedType: (*MyMessage)(nil), - ExtensionType: ([]string)(nil), - Field: 106, - Name: "testdata.greeting", - Tag: "bytes,106,rep,name=greeting", -} - -var E_NoDefaultDouble = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: (*float64)(nil), - Field: 101, - Name: "testdata.no_default_double", - Tag: "fixed64,101,opt,name=no_default_double", -} - -var E_NoDefaultFloat = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: (*float32)(nil), - Field: 102, - Name: "testdata.no_default_float", - Tag: "fixed32,102,opt,name=no_default_float", -} - -var E_NoDefaultInt32 = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: (*int32)(nil), - Field: 103, - Name: "testdata.no_default_int32", - Tag: "varint,103,opt,name=no_default_int32", -} - -var E_NoDefaultInt64 = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: (*int64)(nil), - Field: 104, - Name: "testdata.no_default_int64", - Tag: "varint,104,opt,name=no_default_int64", -} - -var E_NoDefaultUint32 = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: (*uint32)(nil), - Field: 105, - Name: "testdata.no_default_uint32", - Tag: "varint,105,opt,name=no_default_uint32", -} - -var E_NoDefaultUint64 = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: (*uint64)(nil), - Field: 106, - Name: "testdata.no_default_uint64", - Tag: "varint,106,opt,name=no_default_uint64", -} - -var E_NoDefaultSint32 = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: (*int32)(nil), - Field: 107, - Name: "testdata.no_default_sint32", - Tag: "zigzag32,107,opt,name=no_default_sint32", -} - -var E_NoDefaultSint64 = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: (*int64)(nil), - Field: 108, - Name: "testdata.no_default_sint64", - Tag: "zigzag64,108,opt,name=no_default_sint64", -} - -var E_NoDefaultFixed32 = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: (*uint32)(nil), - Field: 109, - Name: "testdata.no_default_fixed32", - Tag: "fixed32,109,opt,name=no_default_fixed32", -} - -var E_NoDefaultFixed64 = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: (*uint64)(nil), - Field: 110, - Name: "testdata.no_default_fixed64", - Tag: "fixed64,110,opt,name=no_default_fixed64", -} - -var E_NoDefaultSfixed32 = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: (*int32)(nil), - Field: 111, - Name: "testdata.no_default_sfixed32", - Tag: "fixed32,111,opt,name=no_default_sfixed32", -} - -var E_NoDefaultSfixed64 = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: (*int64)(nil), - Field: 112, - Name: "testdata.no_default_sfixed64", - Tag: "fixed64,112,opt,name=no_default_sfixed64", -} - -var E_NoDefaultBool = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: (*bool)(nil), - Field: 113, - Name: "testdata.no_default_bool", - Tag: "varint,113,opt,name=no_default_bool", -} - -var E_NoDefaultString = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: (*string)(nil), - Field: 114, - Name: "testdata.no_default_string", - Tag: "bytes,114,opt,name=no_default_string", -} - -var E_NoDefaultBytes = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: ([]byte)(nil), - Field: 115, - Name: "testdata.no_default_bytes", - Tag: "bytes,115,opt,name=no_default_bytes", -} - -var E_NoDefaultEnum = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: (*DefaultsMessage_DefaultsEnum)(nil), - Field: 116, - Name: "testdata.no_default_enum", - Tag: "varint,116,opt,name=no_default_enum,enum=testdata.DefaultsMessage_DefaultsEnum", -} - -var E_DefaultDouble = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: (*float64)(nil), - Field: 201, - Name: "testdata.default_double", - Tag: "fixed64,201,opt,name=default_double,def=3.1415", -} - -var E_DefaultFloat = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: (*float32)(nil), - Field: 202, - Name: "testdata.default_float", - Tag: "fixed32,202,opt,name=default_float,def=3.14", -} - -var E_DefaultInt32 = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: (*int32)(nil), - Field: 203, - Name: "testdata.default_int32", - Tag: "varint,203,opt,name=default_int32,def=42", -} - -var E_DefaultInt64 = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: (*int64)(nil), - Field: 204, - Name: "testdata.default_int64", - Tag: "varint,204,opt,name=default_int64,def=43", -} - -var E_DefaultUint32 = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: (*uint32)(nil), - Field: 205, - Name: "testdata.default_uint32", - Tag: "varint,205,opt,name=default_uint32,def=44", -} - -var E_DefaultUint64 = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: (*uint64)(nil), - Field: 206, - Name: "testdata.default_uint64", - Tag: "varint,206,opt,name=default_uint64,def=45", -} - -var E_DefaultSint32 = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: (*int32)(nil), - Field: 207, - Name: "testdata.default_sint32", - Tag: "zigzag32,207,opt,name=default_sint32,def=46", -} - -var E_DefaultSint64 = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: (*int64)(nil), - Field: 208, - Name: "testdata.default_sint64", - Tag: "zigzag64,208,opt,name=default_sint64,def=47", -} - -var E_DefaultFixed32 = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: (*uint32)(nil), - Field: 209, - Name: "testdata.default_fixed32", - Tag: "fixed32,209,opt,name=default_fixed32,def=48", -} - -var E_DefaultFixed64 = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: (*uint64)(nil), - Field: 210, - Name: "testdata.default_fixed64", - Tag: "fixed64,210,opt,name=default_fixed64,def=49", -} - -var E_DefaultSfixed32 = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: (*int32)(nil), - Field: 211, - Name: "testdata.default_sfixed32", - Tag: "fixed32,211,opt,name=default_sfixed32,def=50", -} - -var E_DefaultSfixed64 = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: (*int64)(nil), - Field: 212, - Name: "testdata.default_sfixed64", - Tag: "fixed64,212,opt,name=default_sfixed64,def=51", -} - -var E_DefaultBool = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: (*bool)(nil), - Field: 213, - Name: "testdata.default_bool", - Tag: "varint,213,opt,name=default_bool,def=1", -} - -var E_DefaultString = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: (*string)(nil), - Field: 214, - Name: "testdata.default_string", - Tag: "bytes,214,opt,name=default_string,def=Hello, string", -} - -var E_DefaultBytes = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: ([]byte)(nil), - Field: 215, - Name: "testdata.default_bytes", - Tag: "bytes,215,opt,name=default_bytes,def=Hello, bytes", -} - -var E_DefaultEnum = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: (*DefaultsMessage_DefaultsEnum)(nil), - Field: 216, - Name: "testdata.default_enum", - Tag: "varint,216,opt,name=default_enum,enum=testdata.DefaultsMessage_DefaultsEnum,def=1", -} - -var E_X201 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 201, - Name: "testdata.x201", - Tag: "bytes,201,opt,name=x201", -} - -var E_X202 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 202, - Name: "testdata.x202", - Tag: "bytes,202,opt,name=x202", -} - -var E_X203 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 203, - Name: "testdata.x203", - Tag: "bytes,203,opt,name=x203", -} - -var E_X204 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 204, - Name: "testdata.x204", - Tag: "bytes,204,opt,name=x204", -} - -var E_X205 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 205, - Name: "testdata.x205", - Tag: "bytes,205,opt,name=x205", -} - -var E_X206 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 206, - Name: "testdata.x206", - Tag: "bytes,206,opt,name=x206", -} - -var E_X207 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 207, - Name: "testdata.x207", - Tag: "bytes,207,opt,name=x207", -} - -var E_X208 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 208, - Name: "testdata.x208", - Tag: "bytes,208,opt,name=x208", -} - -var E_X209 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 209, - Name: "testdata.x209", - Tag: "bytes,209,opt,name=x209", -} - -var E_X210 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 210, - Name: "testdata.x210", - Tag: "bytes,210,opt,name=x210", -} - -var E_X211 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 211, - Name: "testdata.x211", - Tag: "bytes,211,opt,name=x211", -} - -var E_X212 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 212, - Name: "testdata.x212", - Tag: "bytes,212,opt,name=x212", -} - -var E_X213 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 213, - Name: "testdata.x213", - Tag: "bytes,213,opt,name=x213", -} - -var E_X214 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 214, - Name: "testdata.x214", - Tag: "bytes,214,opt,name=x214", -} - -var E_X215 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 215, - Name: "testdata.x215", - Tag: "bytes,215,opt,name=x215", -} - -var E_X216 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 216, - Name: "testdata.x216", - Tag: "bytes,216,opt,name=x216", -} - -var E_X217 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 217, - Name: "testdata.x217", - Tag: "bytes,217,opt,name=x217", -} - -var E_X218 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 218, - Name: "testdata.x218", - Tag: "bytes,218,opt,name=x218", -} - -var E_X219 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 219, - Name: "testdata.x219", - Tag: "bytes,219,opt,name=x219", -} - -var E_X220 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 220, - Name: "testdata.x220", - Tag: "bytes,220,opt,name=x220", -} - -var E_X221 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 221, - Name: "testdata.x221", - Tag: "bytes,221,opt,name=x221", -} - -var E_X222 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 222, - Name: "testdata.x222", - Tag: "bytes,222,opt,name=x222", -} - -var E_X223 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 223, - Name: "testdata.x223", - Tag: "bytes,223,opt,name=x223", -} - -var E_X224 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 224, - Name: "testdata.x224", - Tag: "bytes,224,opt,name=x224", -} - -var E_X225 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 225, - Name: "testdata.x225", - Tag: "bytes,225,opt,name=x225", -} - -var E_X226 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 226, - Name: "testdata.x226", - Tag: "bytes,226,opt,name=x226", -} - -var E_X227 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 227, - Name: "testdata.x227", - Tag: "bytes,227,opt,name=x227", -} - -var E_X228 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 228, - Name: "testdata.x228", - Tag: "bytes,228,opt,name=x228", -} - -var E_X229 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 229, - Name: "testdata.x229", - Tag: "bytes,229,opt,name=x229", -} - -var E_X230 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 230, - Name: "testdata.x230", - Tag: "bytes,230,opt,name=x230", -} - -var E_X231 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 231, - Name: "testdata.x231", - Tag: "bytes,231,opt,name=x231", -} - -var E_X232 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 232, - Name: "testdata.x232", - Tag: "bytes,232,opt,name=x232", -} - -var E_X233 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 233, - Name: "testdata.x233", - Tag: "bytes,233,opt,name=x233", -} - -var E_X234 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 234, - Name: "testdata.x234", - Tag: "bytes,234,opt,name=x234", -} - -var E_X235 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 235, - Name: "testdata.x235", - Tag: "bytes,235,opt,name=x235", -} - -var E_X236 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 236, - Name: "testdata.x236", - Tag: "bytes,236,opt,name=x236", -} - -var E_X237 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 237, - Name: "testdata.x237", - Tag: "bytes,237,opt,name=x237", -} - -var E_X238 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 238, - Name: "testdata.x238", - Tag: "bytes,238,opt,name=x238", -} - -var E_X239 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 239, - Name: "testdata.x239", - Tag: "bytes,239,opt,name=x239", -} - -var E_X240 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 240, - Name: "testdata.x240", - Tag: "bytes,240,opt,name=x240", -} - -var E_X241 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 241, - Name: "testdata.x241", - Tag: "bytes,241,opt,name=x241", -} - -var E_X242 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 242, - Name: "testdata.x242", - Tag: "bytes,242,opt,name=x242", -} - -var E_X243 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 243, - Name: "testdata.x243", - Tag: "bytes,243,opt,name=x243", -} - -var E_X244 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 244, - Name: "testdata.x244", - Tag: "bytes,244,opt,name=x244", -} - -var E_X245 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 245, - Name: "testdata.x245", - Tag: "bytes,245,opt,name=x245", -} - -var E_X246 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 246, - Name: "testdata.x246", - Tag: "bytes,246,opt,name=x246", -} - -var E_X247 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 247, - Name: "testdata.x247", - Tag: "bytes,247,opt,name=x247", -} - -var E_X248 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 248, - Name: "testdata.x248", - Tag: "bytes,248,opt,name=x248", -} - -var E_X249 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 249, - Name: "testdata.x249", - Tag: "bytes,249,opt,name=x249", -} - -var E_X250 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 250, - Name: "testdata.x250", - Tag: "bytes,250,opt,name=x250", -} - -func init() { - proto.RegisterEnum("testdata.FOO", FOO_name, FOO_value) - proto.RegisterEnum("testdata.GoTest_KIND", GoTest_KIND_name, GoTest_KIND_value) - proto.RegisterEnum("testdata.MyMessage_Color", MyMessage_Color_name, MyMessage_Color_value) - proto.RegisterEnum("testdata.DefaultsMessage_DefaultsEnum", DefaultsMessage_DefaultsEnum_name, DefaultsMessage_DefaultsEnum_value) - proto.RegisterEnum("testdata.Defaults_Color", Defaults_Color_name, Defaults_Color_value) - proto.RegisterEnum("testdata.RepeatedEnum_Color", RepeatedEnum_Color_name, RepeatedEnum_Color_value) - proto.RegisterExtension(E_Ext_More) - proto.RegisterExtension(E_Ext_Text) - proto.RegisterExtension(E_Ext_Number) - proto.RegisterExtension(E_Greeting) - proto.RegisterExtension(E_NoDefaultDouble) - proto.RegisterExtension(E_NoDefaultFloat) - proto.RegisterExtension(E_NoDefaultInt32) - proto.RegisterExtension(E_NoDefaultInt64) - proto.RegisterExtension(E_NoDefaultUint32) - proto.RegisterExtension(E_NoDefaultUint64) - proto.RegisterExtension(E_NoDefaultSint32) - proto.RegisterExtension(E_NoDefaultSint64) - proto.RegisterExtension(E_NoDefaultFixed32) - proto.RegisterExtension(E_NoDefaultFixed64) - proto.RegisterExtension(E_NoDefaultSfixed32) - proto.RegisterExtension(E_NoDefaultSfixed64) - proto.RegisterExtension(E_NoDefaultBool) - proto.RegisterExtension(E_NoDefaultString) - proto.RegisterExtension(E_NoDefaultBytes) - proto.RegisterExtension(E_NoDefaultEnum) - proto.RegisterExtension(E_DefaultDouble) - proto.RegisterExtension(E_DefaultFloat) - proto.RegisterExtension(E_DefaultInt32) - proto.RegisterExtension(E_DefaultInt64) - proto.RegisterExtension(E_DefaultUint32) - proto.RegisterExtension(E_DefaultUint64) - proto.RegisterExtension(E_DefaultSint32) - proto.RegisterExtension(E_DefaultSint64) - proto.RegisterExtension(E_DefaultFixed32) - proto.RegisterExtension(E_DefaultFixed64) - proto.RegisterExtension(E_DefaultSfixed32) - proto.RegisterExtension(E_DefaultSfixed64) - proto.RegisterExtension(E_DefaultBool) - proto.RegisterExtension(E_DefaultString) - proto.RegisterExtension(E_DefaultBytes) - proto.RegisterExtension(E_DefaultEnum) - proto.RegisterExtension(E_X201) - proto.RegisterExtension(E_X202) - proto.RegisterExtension(E_X203) - proto.RegisterExtension(E_X204) - proto.RegisterExtension(E_X205) - proto.RegisterExtension(E_X206) - proto.RegisterExtension(E_X207) - proto.RegisterExtension(E_X208) - proto.RegisterExtension(E_X209) - proto.RegisterExtension(E_X210) - proto.RegisterExtension(E_X211) - proto.RegisterExtension(E_X212) - proto.RegisterExtension(E_X213) - proto.RegisterExtension(E_X214) - proto.RegisterExtension(E_X215) - proto.RegisterExtension(E_X216) - proto.RegisterExtension(E_X217) - proto.RegisterExtension(E_X218) - proto.RegisterExtension(E_X219) - proto.RegisterExtension(E_X220) - proto.RegisterExtension(E_X221) - proto.RegisterExtension(E_X222) - proto.RegisterExtension(E_X223) - proto.RegisterExtension(E_X224) - proto.RegisterExtension(E_X225) - proto.RegisterExtension(E_X226) - proto.RegisterExtension(E_X227) - proto.RegisterExtension(E_X228) - proto.RegisterExtension(E_X229) - proto.RegisterExtension(E_X230) - proto.RegisterExtension(E_X231) - proto.RegisterExtension(E_X232) - proto.RegisterExtension(E_X233) - proto.RegisterExtension(E_X234) - proto.RegisterExtension(E_X235) - proto.RegisterExtension(E_X236) - proto.RegisterExtension(E_X237) - proto.RegisterExtension(E_X238) - proto.RegisterExtension(E_X239) - proto.RegisterExtension(E_X240) - proto.RegisterExtension(E_X241) - proto.RegisterExtension(E_X242) - proto.RegisterExtension(E_X243) - proto.RegisterExtension(E_X244) - proto.RegisterExtension(E_X245) - proto.RegisterExtension(E_X246) - proto.RegisterExtension(E_X247) - proto.RegisterExtension(E_X248) - proto.RegisterExtension(E_X249) - proto.RegisterExtension(E_X250) -} diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/testdata/test.proto b/Godeps/_workspace/src/github.com/golang/protobuf/proto/testdata/test.proto deleted file mode 100644 index 440dba38dd..0000000000 --- a/Godeps/_workspace/src/github.com/golang/protobuf/proto/testdata/test.proto +++ /dev/null @@ -1,480 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// A feature-rich test file for the protocol compiler and libraries. - -syntax = "proto2"; - -package testdata; - -enum FOO { FOO1 = 1; }; - -message GoEnum { - required FOO foo = 1; -} - -message GoTestField { - required string Label = 1; - required string Type = 2; -} - -message GoTest { - // An enum, for completeness. - enum KIND { - VOID = 0; - - // Basic types - BOOL = 1; - BYTES = 2; - FINGERPRINT = 3; - FLOAT = 4; - INT = 5; - STRING = 6; - TIME = 7; - - // Groupings - TUPLE = 8; - ARRAY = 9; - MAP = 10; - - // Table types - TABLE = 11; - - // Functions - FUNCTION = 12; // last tag - }; - - // Some typical parameters - required KIND Kind = 1; - optional string Table = 2; - optional int32 Param = 3; - - // Required, repeated and optional foreign fields. - required GoTestField RequiredField = 4; - repeated GoTestField RepeatedField = 5; - optional GoTestField OptionalField = 6; - - // Required fields of all basic types - required bool F_Bool_required = 10; - required int32 F_Int32_required = 11; - required int64 F_Int64_required = 12; - required fixed32 F_Fixed32_required = 13; - required fixed64 F_Fixed64_required = 14; - required uint32 F_Uint32_required = 15; - required uint64 F_Uint64_required = 16; - required float F_Float_required = 17; - required double F_Double_required = 18; - required string F_String_required = 19; - required bytes F_Bytes_required = 101; - required sint32 F_Sint32_required = 102; - required sint64 F_Sint64_required = 103; - - // Repeated fields of all basic types - repeated bool F_Bool_repeated = 20; - repeated int32 F_Int32_repeated = 21; - repeated int64 F_Int64_repeated = 22; - repeated fixed32 F_Fixed32_repeated = 23; - repeated fixed64 F_Fixed64_repeated = 24; - repeated uint32 F_Uint32_repeated = 25; - repeated uint64 F_Uint64_repeated = 26; - repeated float F_Float_repeated = 27; - repeated double F_Double_repeated = 28; - repeated string F_String_repeated = 29; - repeated bytes F_Bytes_repeated = 201; - repeated sint32 F_Sint32_repeated = 202; - repeated sint64 F_Sint64_repeated = 203; - - // Optional fields of all basic types - optional bool F_Bool_optional = 30; - optional int32 F_Int32_optional = 31; - optional int64 F_Int64_optional = 32; - optional fixed32 F_Fixed32_optional = 33; - optional fixed64 F_Fixed64_optional = 34; - optional uint32 F_Uint32_optional = 35; - optional uint64 F_Uint64_optional = 36; - optional float F_Float_optional = 37; - optional double F_Double_optional = 38; - optional string F_String_optional = 39; - optional bytes F_Bytes_optional = 301; - optional sint32 F_Sint32_optional = 302; - optional sint64 F_Sint64_optional = 303; - - // Default-valued fields of all basic types - optional bool F_Bool_defaulted = 40 [default=true]; - optional int32 F_Int32_defaulted = 41 [default=32]; - optional int64 F_Int64_defaulted = 42 [default=64]; - optional fixed32 F_Fixed32_defaulted = 43 [default=320]; - optional fixed64 F_Fixed64_defaulted = 44 [default=640]; - optional uint32 F_Uint32_defaulted = 45 [default=3200]; - optional uint64 F_Uint64_defaulted = 46 [default=6400]; - optional float F_Float_defaulted = 47 [default=314159.]; - optional double F_Double_defaulted = 48 [default=271828.]; - optional string F_String_defaulted = 49 [default="hello, \"world!\"\n"]; - optional bytes F_Bytes_defaulted = 401 [default="Bignose"]; - optional sint32 F_Sint32_defaulted = 402 [default = -32]; - optional sint64 F_Sint64_defaulted = 403 [default = -64]; - - // Packed repeated fields (no string or bytes). - repeated bool F_Bool_repeated_packed = 50 [packed=true]; - repeated int32 F_Int32_repeated_packed = 51 [packed=true]; - repeated int64 F_Int64_repeated_packed = 52 [packed=true]; - repeated fixed32 F_Fixed32_repeated_packed = 53 [packed=true]; - repeated fixed64 F_Fixed64_repeated_packed = 54 [packed=true]; - repeated uint32 F_Uint32_repeated_packed = 55 [packed=true]; - repeated uint64 F_Uint64_repeated_packed = 56 [packed=true]; - repeated float F_Float_repeated_packed = 57 [packed=true]; - repeated double F_Double_repeated_packed = 58 [packed=true]; - repeated sint32 F_Sint32_repeated_packed = 502 [packed=true]; - repeated sint64 F_Sint64_repeated_packed = 503 [packed=true]; - - // Required, repeated, and optional groups. - required group RequiredGroup = 70 { - required string RequiredField = 71; - }; - - repeated group RepeatedGroup = 80 { - required string RequiredField = 81; - }; - - optional group OptionalGroup = 90 { - required string RequiredField = 91; - }; -} - -// For testing skipping of unrecognized fields. -// Numbers are all big, larger than tag numbers in GoTestField, -// the message used in the corresponding test. -message GoSkipTest { - required int32 skip_int32 = 11; - required fixed32 skip_fixed32 = 12; - required fixed64 skip_fixed64 = 13; - required string skip_string = 14; - required group SkipGroup = 15 { - required int32 group_int32 = 16; - required string group_string = 17; - } -} - -// For testing packed/non-packed decoder switching. -// A serialized instance of one should be deserializable as the other. -message NonPackedTest { - repeated int32 a = 1; -} - -message PackedTest { - repeated int32 b = 1 [packed=true]; -} - -message MaxTag { - // Maximum possible tag number. - optional string last_field = 536870911; -} - -message OldMessage { - message Nested { - optional string name = 1; - } - optional Nested nested = 1; - - optional int32 num = 2; -} - -// NewMessage is wire compatible with OldMessage; -// imagine it as a future version. -message NewMessage { - message Nested { - optional string name = 1; - optional string food_group = 2; - } - optional Nested nested = 1; - - // This is an int32 in OldMessage. - optional int64 num = 2; -} - -// Smaller tests for ASCII formatting. - -message InnerMessage { - required string host = 1; - optional int32 port = 2 [default=4000]; - optional bool connected = 3; -} - -message OtherMessage { - optional int64 key = 1; - optional bytes value = 2; - optional float weight = 3; - optional InnerMessage inner = 4; -} - -message MyMessage { - required int32 count = 1; - optional string name = 2; - optional string quote = 3; - repeated string pet = 4; - optional InnerMessage inner = 5; - repeated OtherMessage others = 6; - repeated InnerMessage rep_inner = 12; - - enum Color { - RED = 0; - GREEN = 1; - BLUE = 2; - }; - optional Color bikeshed = 7; - - optional group SomeGroup = 8 { - optional int32 group_field = 9; - } - - // This field becomes [][]byte in the generated code. - repeated bytes rep_bytes = 10; - - optional double bigfloat = 11; - - extensions 100 to max; -} - -message Ext { - extend MyMessage { - optional Ext more = 103; - optional string text = 104; - optional int32 number = 105; - } - - optional string data = 1; -} - -extend MyMessage { - repeated string greeting = 106; -} - -message DefaultsMessage { - enum DefaultsEnum { - ZERO = 0; - ONE = 1; - TWO = 2; - }; - extensions 100 to max; -} - -extend DefaultsMessage { - optional double no_default_double = 101; - optional float no_default_float = 102; - optional int32 no_default_int32 = 103; - optional int64 no_default_int64 = 104; - optional uint32 no_default_uint32 = 105; - optional uint64 no_default_uint64 = 106; - optional sint32 no_default_sint32 = 107; - optional sint64 no_default_sint64 = 108; - optional fixed32 no_default_fixed32 = 109; - optional fixed64 no_default_fixed64 = 110; - optional sfixed32 no_default_sfixed32 = 111; - optional sfixed64 no_default_sfixed64 = 112; - optional bool no_default_bool = 113; - optional string no_default_string = 114; - optional bytes no_default_bytes = 115; - optional DefaultsMessage.DefaultsEnum no_default_enum = 116; - - optional double default_double = 201 [default = 3.1415]; - optional float default_float = 202 [default = 3.14]; - optional int32 default_int32 = 203 [default = 42]; - optional int64 default_int64 = 204 [default = 43]; - optional uint32 default_uint32 = 205 [default = 44]; - optional uint64 default_uint64 = 206 [default = 45]; - optional sint32 default_sint32 = 207 [default = 46]; - optional sint64 default_sint64 = 208 [default = 47]; - optional fixed32 default_fixed32 = 209 [default = 48]; - optional fixed64 default_fixed64 = 210 [default = 49]; - optional sfixed32 default_sfixed32 = 211 [default = 50]; - optional sfixed64 default_sfixed64 = 212 [default = 51]; - optional bool default_bool = 213 [default = true]; - optional string default_string = 214 [default = "Hello, string"]; - optional bytes default_bytes = 215 [default = "Hello, bytes"]; - optional DefaultsMessage.DefaultsEnum default_enum = 216 [default = ONE]; -} - -message MyMessageSet { - option message_set_wire_format = true; - extensions 100 to max; -} - -message Empty { -} - -extend MyMessageSet { - optional Empty x201 = 201; - optional Empty x202 = 202; - optional Empty x203 = 203; - optional Empty x204 = 204; - optional Empty x205 = 205; - optional Empty x206 = 206; - optional Empty x207 = 207; - optional Empty x208 = 208; - optional Empty x209 = 209; - optional Empty x210 = 210; - optional Empty x211 = 211; - optional Empty x212 = 212; - optional Empty x213 = 213; - optional Empty x214 = 214; - optional Empty x215 = 215; - optional Empty x216 = 216; - optional Empty x217 = 217; - optional Empty x218 = 218; - optional Empty x219 = 219; - optional Empty x220 = 220; - optional Empty x221 = 221; - optional Empty x222 = 222; - optional Empty x223 = 223; - optional Empty x224 = 224; - optional Empty x225 = 225; - optional Empty x226 = 226; - optional Empty x227 = 227; - optional Empty x228 = 228; - optional Empty x229 = 229; - optional Empty x230 = 230; - optional Empty x231 = 231; - optional Empty x232 = 232; - optional Empty x233 = 233; - optional Empty x234 = 234; - optional Empty x235 = 235; - optional Empty x236 = 236; - optional Empty x237 = 237; - optional Empty x238 = 238; - optional Empty x239 = 239; - optional Empty x240 = 240; - optional Empty x241 = 241; - optional Empty x242 = 242; - optional Empty x243 = 243; - optional Empty x244 = 244; - optional Empty x245 = 245; - optional Empty x246 = 246; - optional Empty x247 = 247; - optional Empty x248 = 248; - optional Empty x249 = 249; - optional Empty x250 = 250; -} - -message MessageList { - repeated group Message = 1 { - required string name = 2; - required int32 count = 3; - } -} - -message Strings { - optional string string_field = 1; - optional bytes bytes_field = 2; -} - -message Defaults { - enum Color { - RED = 0; - GREEN = 1; - BLUE = 2; - } - - // Default-valued fields of all basic types. - // Same as GoTest, but copied here to make testing easier. - optional bool F_Bool = 1 [default=true]; - optional int32 F_Int32 = 2 [default=32]; - optional int64 F_Int64 = 3 [default=64]; - optional fixed32 F_Fixed32 = 4 [default=320]; - optional fixed64 F_Fixed64 = 5 [default=640]; - optional uint32 F_Uint32 = 6 [default=3200]; - optional uint64 F_Uint64 = 7 [default=6400]; - optional float F_Float = 8 [default=314159.]; - optional double F_Double = 9 [default=271828.]; - optional string F_String = 10 [default="hello, \"world!\"\n"]; - optional bytes F_Bytes = 11 [default="Bignose"]; - optional sint32 F_Sint32 = 12 [default=-32]; - optional sint64 F_Sint64 = 13 [default=-64]; - optional Color F_Enum = 14 [default=GREEN]; - - // More fields with crazy defaults. - optional float F_Pinf = 15 [default=inf]; - optional float F_Ninf = 16 [default=-inf]; - optional float F_Nan = 17 [default=nan]; - - // Sub-message. - optional SubDefaults sub = 18; - - // Redundant but explicit defaults. - optional string str_zero = 19 [default=""]; -} - -message SubDefaults { - optional int64 n = 1 [default=7]; -} - -message RepeatedEnum { - enum Color { - RED = 1; - } - repeated Color color = 1; -} - -message MoreRepeated { - repeated bool bools = 1; - repeated bool bools_packed = 2 [packed=true]; - repeated int32 ints = 3; - repeated int32 ints_packed = 4 [packed=true]; - repeated int64 int64s_packed = 7 [packed=true]; - repeated string strings = 5; - repeated fixed32 fixeds = 6; -} - -// GroupOld and GroupNew have the same wire format. -// GroupNew has a new field inside a group. - -message GroupOld { - optional group G = 101 { - optional int32 x = 2; - } -} - -message GroupNew { - optional group G = 101 { - optional int32 x = 2; - optional int32 y = 3; - } -} - -message FloatingPoint { - required double f = 1; -} - -message MessageWithMap { - map name_mapping = 1; - map msg_mapping = 2; - map byte_mapping = 3; - map str_to_str = 4; -} diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/text.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/text.go deleted file mode 100644 index f7dc58a3e6..0000000000 --- a/Godeps/_workspace/src/github.com/golang/protobuf/proto/text.go +++ /dev/null @@ -1,769 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -// Functions for writing the text protocol buffer format. - -import ( - "bufio" - "bytes" - "encoding" - "fmt" - "io" - "log" - "math" - "reflect" - "sort" - "strings" -) - -var ( - newline = []byte("\n") - spaces = []byte(" ") - gtNewline = []byte(">\n") - endBraceNewline = []byte("}\n") - backslashN = []byte{'\\', 'n'} - backslashR = []byte{'\\', 'r'} - backslashT = []byte{'\\', 't'} - backslashDQ = []byte{'\\', '"'} - backslashBS = []byte{'\\', '\\'} - posInf = []byte("inf") - negInf = []byte("-inf") - nan = []byte("nan") -) - -type writer interface { - io.Writer - WriteByte(byte) error -} - -// textWriter is an io.Writer that tracks its indentation level. -type textWriter struct { - ind int - complete bool // if the current position is a complete line - compact bool // whether to write out as a one-liner - w writer -} - -func (w *textWriter) WriteString(s string) (n int, err error) { - if !strings.Contains(s, "\n") { - if !w.compact && w.complete { - w.writeIndent() - } - w.complete = false - return io.WriteString(w.w, s) - } - // WriteString is typically called without newlines, so this - // codepath and its copy are rare. We copy to avoid - // duplicating all of Write's logic here. - return w.Write([]byte(s)) -} - -func (w *textWriter) Write(p []byte) (n int, err error) { - newlines := bytes.Count(p, newline) - if newlines == 0 { - if !w.compact && w.complete { - w.writeIndent() - } - n, err = w.w.Write(p) - w.complete = false - return n, err - } - - frags := bytes.SplitN(p, newline, newlines+1) - if w.compact { - for i, frag := range frags { - if i > 0 { - if err := w.w.WriteByte(' '); err != nil { - return n, err - } - n++ - } - nn, err := w.w.Write(frag) - n += nn - if err != nil { - return n, err - } - } - return n, nil - } - - for i, frag := range frags { - if w.complete { - w.writeIndent() - } - nn, err := w.w.Write(frag) - n += nn - if err != nil { - return n, err - } - if i+1 < len(frags) { - if err := w.w.WriteByte('\n'); err != nil { - return n, err - } - n++ - } - } - w.complete = len(frags[len(frags)-1]) == 0 - return n, nil -} - -func (w *textWriter) WriteByte(c byte) error { - if w.compact && c == '\n' { - c = ' ' - } - if !w.compact && w.complete { - w.writeIndent() - } - err := w.w.WriteByte(c) - w.complete = c == '\n' - return err -} - -func (w *textWriter) indent() { w.ind++ } - -func (w *textWriter) unindent() { - if w.ind == 0 { - log.Printf("proto: textWriter unindented too far") - return - } - w.ind-- -} - -func writeName(w *textWriter, props *Properties) error { - if _, err := w.WriteString(props.OrigName); err != nil { - return err - } - if props.Wire != "group" { - return w.WriteByte(':') - } - return nil -} - -var ( - messageSetType = reflect.TypeOf((*MessageSet)(nil)).Elem() -) - -// raw is the interface satisfied by RawMessage. -type raw interface { - Bytes() []byte -} - -func writeStruct(w *textWriter, sv reflect.Value) error { - if sv.Type() == messageSetType { - return writeMessageSet(w, sv.Addr().Interface().(*MessageSet)) - } - - st := sv.Type() - sprops := GetProperties(st) - for i := 0; i < sv.NumField(); i++ { - fv := sv.Field(i) - props := sprops.Prop[i] - name := st.Field(i).Name - - if strings.HasPrefix(name, "XXX_") { - // There are two XXX_ fields: - // XXX_unrecognized []byte - // XXX_extensions map[int32]proto.Extension - // The first is handled here; - // the second is handled at the bottom of this function. - if name == "XXX_unrecognized" && !fv.IsNil() { - if err := writeUnknownStruct(w, fv.Interface().([]byte)); err != nil { - return err - } - } - continue - } - if fv.Kind() == reflect.Ptr && fv.IsNil() { - // Field not filled in. This could be an optional field or - // a required field that wasn't filled in. Either way, there - // isn't anything we can show for it. - continue - } - if fv.Kind() == reflect.Slice && fv.IsNil() { - // Repeated field that is empty, or a bytes field that is unused. - continue - } - - if props.Repeated && fv.Kind() == reflect.Slice { - // Repeated field. - for j := 0; j < fv.Len(); j++ { - if err := writeName(w, props); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - v := fv.Index(j) - if v.Kind() == reflect.Ptr && v.IsNil() { - // A nil message in a repeated field is not valid, - // but we can handle that more gracefully than panicking. - if _, err := w.Write([]byte("\n")); err != nil { - return err - } - continue - } - if err := writeAny(w, v, props); err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - } - continue - } - if fv.Kind() == reflect.Map { - // Map fields are rendered as a repeated struct with key/value fields. - keys := fv.MapKeys() // TODO: should we sort these for deterministic output? - sort.Sort(mapKeys(keys)) - for _, key := range keys { - val := fv.MapIndex(key) - if err := writeName(w, props); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - // open struct - if err := w.WriteByte('<'); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte('\n'); err != nil { - return err - } - } - w.indent() - // key - if _, err := w.WriteString("key:"); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - if err := writeAny(w, key, props.mkeyprop); err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - // nil values aren't legal, but we can avoid panicking because of them. - if val.Kind() != reflect.Ptr || !val.IsNil() { - // value - if _, err := w.WriteString("value:"); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - if err := writeAny(w, val, props.mvalprop); err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - } - // close struct - w.unindent() - if err := w.WriteByte('>'); err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - } - continue - } - if props.proto3 && fv.Kind() == reflect.Slice && fv.Len() == 0 { - // empty bytes field - continue - } - if fv.Kind() != reflect.Ptr && fv.Kind() != reflect.Slice { - // proto3 non-repeated scalar field; skip if zero value - if isProto3Zero(fv) { - continue - } - } - - if err := writeName(w, props); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - if b, ok := fv.Interface().(raw); ok { - if err := writeRaw(w, b.Bytes()); err != nil { - return err - } - continue - } - - // Enums have a String method, so writeAny will work fine. - if err := writeAny(w, fv, props); err != nil { - return err - } - - if err := w.WriteByte('\n'); err != nil { - return err - } - } - - // Extensions (the XXX_extensions field). - pv := sv.Addr() - if pv.Type().Implements(extendableProtoType) { - if err := writeExtensions(w, pv); err != nil { - return err - } - } - - return nil -} - -// writeRaw writes an uninterpreted raw message. -func writeRaw(w *textWriter, b []byte) error { - if err := w.WriteByte('<'); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte('\n'); err != nil { - return err - } - } - w.indent() - if err := writeUnknownStruct(w, b); err != nil { - return err - } - w.unindent() - if err := w.WriteByte('>'); err != nil { - return err - } - return nil -} - -// writeAny writes an arbitrary field. -func writeAny(w *textWriter, v reflect.Value, props *Properties) error { - v = reflect.Indirect(v) - - // Floats have special cases. - if v.Kind() == reflect.Float32 || v.Kind() == reflect.Float64 { - x := v.Float() - var b []byte - switch { - case math.IsInf(x, 1): - b = posInf - case math.IsInf(x, -1): - b = negInf - case math.IsNaN(x): - b = nan - } - if b != nil { - _, err := w.Write(b) - return err - } - // Other values are handled below. - } - - // We don't attempt to serialise every possible value type; only those - // that can occur in protocol buffers. - switch v.Kind() { - case reflect.Slice: - // Should only be a []byte; repeated fields are handled in writeStruct. - if err := writeString(w, string(v.Interface().([]byte))); err != nil { - return err - } - case reflect.String: - if err := writeString(w, v.String()); err != nil { - return err - } - case reflect.Struct: - // Required/optional group/message. - var bra, ket byte = '<', '>' - if props != nil && props.Wire == "group" { - bra, ket = '{', '}' - } - if err := w.WriteByte(bra); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte('\n'); err != nil { - return err - } - } - w.indent() - if tm, ok := v.Interface().(encoding.TextMarshaler); ok { - text, err := tm.MarshalText() - if err != nil { - return err - } - if _, err = w.Write(text); err != nil { - return err - } - } else if err := writeStruct(w, v); err != nil { - return err - } - w.unindent() - if err := w.WriteByte(ket); err != nil { - return err - } - default: - _, err := fmt.Fprint(w, v.Interface()) - return err - } - return nil -} - -// equivalent to C's isprint. -func isprint(c byte) bool { - return c >= 0x20 && c < 0x7f -} - -// writeString writes a string in the protocol buffer text format. -// It is similar to strconv.Quote except we don't use Go escape sequences, -// we treat the string as a byte sequence, and we use octal escapes. -// These differences are to maintain interoperability with the other -// languages' implementations of the text format. -func writeString(w *textWriter, s string) error { - // use WriteByte here to get any needed indent - if err := w.WriteByte('"'); err != nil { - return err - } - // Loop over the bytes, not the runes. - for i := 0; i < len(s); i++ { - var err error - // Divergence from C++: we don't escape apostrophes. - // There's no need to escape them, and the C++ parser - // copes with a naked apostrophe. - switch c := s[i]; c { - case '\n': - _, err = w.w.Write(backslashN) - case '\r': - _, err = w.w.Write(backslashR) - case '\t': - _, err = w.w.Write(backslashT) - case '"': - _, err = w.w.Write(backslashDQ) - case '\\': - _, err = w.w.Write(backslashBS) - default: - if isprint(c) { - err = w.w.WriteByte(c) - } else { - _, err = fmt.Fprintf(w.w, "\\%03o", c) - } - } - if err != nil { - return err - } - } - return w.WriteByte('"') -} - -func writeMessageSet(w *textWriter, ms *MessageSet) error { - for _, item := range ms.Item { - id := *item.TypeId - if msd, ok := messageSetMap[id]; ok { - // Known message set type. - if _, err := fmt.Fprintf(w, "[%s]: <\n", msd.name); err != nil { - return err - } - w.indent() - - pb := reflect.New(msd.t.Elem()) - if err := Unmarshal(item.Message, pb.Interface().(Message)); err != nil { - if _, err := fmt.Fprintf(w, "/* bad message: %v */\n", err); err != nil { - return err - } - } else { - if err := writeStruct(w, pb.Elem()); err != nil { - return err - } - } - } else { - // Unknown type. - if _, err := fmt.Fprintf(w, "[%d]: <\n", id); err != nil { - return err - } - w.indent() - if err := writeUnknownStruct(w, item.Message); err != nil { - return err - } - } - w.unindent() - if _, err := w.Write(gtNewline); err != nil { - return err - } - } - return nil -} - -func writeUnknownStruct(w *textWriter, data []byte) (err error) { - if !w.compact { - if _, err := fmt.Fprintf(w, "/* %d unknown bytes */\n", len(data)); err != nil { - return err - } - } - b := NewBuffer(data) - for b.index < len(b.buf) { - x, err := b.DecodeVarint() - if err != nil { - _, err := fmt.Fprintf(w, "/* %v */\n", err) - return err - } - wire, tag := x&7, x>>3 - if wire == WireEndGroup { - w.unindent() - if _, err := w.Write(endBraceNewline); err != nil { - return err - } - continue - } - if _, err := fmt.Fprint(w, tag); err != nil { - return err - } - if wire != WireStartGroup { - if err := w.WriteByte(':'); err != nil { - return err - } - } - if !w.compact || wire == WireStartGroup { - if err := w.WriteByte(' '); err != nil { - return err - } - } - switch wire { - case WireBytes: - buf, e := b.DecodeRawBytes(false) - if e == nil { - _, err = fmt.Fprintf(w, "%q", buf) - } else { - _, err = fmt.Fprintf(w, "/* %v */", e) - } - case WireFixed32: - x, err = b.DecodeFixed32() - err = writeUnknownInt(w, x, err) - case WireFixed64: - x, err = b.DecodeFixed64() - err = writeUnknownInt(w, x, err) - case WireStartGroup: - err = w.WriteByte('{') - w.indent() - case WireVarint: - x, err = b.DecodeVarint() - err = writeUnknownInt(w, x, err) - default: - _, err = fmt.Fprintf(w, "/* unknown wire type %d */", wire) - } - if err != nil { - return err - } - if err = w.WriteByte('\n'); err != nil { - return err - } - } - return nil -} - -func writeUnknownInt(w *textWriter, x uint64, err error) error { - if err == nil { - _, err = fmt.Fprint(w, x) - } else { - _, err = fmt.Fprintf(w, "/* %v */", err) - } - return err -} - -type int32Slice []int32 - -func (s int32Slice) Len() int { return len(s) } -func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] } -func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } - -// writeExtensions writes all the extensions in pv. -// pv is assumed to be a pointer to a protocol message struct that is extendable. -func writeExtensions(w *textWriter, pv reflect.Value) error { - emap := extensionMaps[pv.Type().Elem()] - ep := pv.Interface().(extendableProto) - - // Order the extensions by ID. - // This isn't strictly necessary, but it will give us - // canonical output, which will also make testing easier. - m := ep.ExtensionMap() - ids := make([]int32, 0, len(m)) - for id := range m { - ids = append(ids, id) - } - sort.Sort(int32Slice(ids)) - - for _, extNum := range ids { - ext := m[extNum] - var desc *ExtensionDesc - if emap != nil { - desc = emap[extNum] - } - if desc == nil { - // Unknown extension. - if err := writeUnknownStruct(w, ext.enc); err != nil { - return err - } - continue - } - - pb, err := GetExtension(ep, desc) - if err != nil { - return fmt.Errorf("failed getting extension: %v", err) - } - - // Repeated extensions will appear as a slice. - if !desc.repeated() { - if err := writeExtension(w, desc.Name, pb); err != nil { - return err - } - } else { - v := reflect.ValueOf(pb) - for i := 0; i < v.Len(); i++ { - if err := writeExtension(w, desc.Name, v.Index(i).Interface()); err != nil { - return err - } - } - } - } - return nil -} - -func writeExtension(w *textWriter, name string, pb interface{}) error { - if _, err := fmt.Fprintf(w, "[%s]:", name); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - if err := writeAny(w, reflect.ValueOf(pb), nil); err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - return nil -} - -func (w *textWriter) writeIndent() { - if !w.complete { - return - } - remain := w.ind * 2 - for remain > 0 { - n := remain - if n > len(spaces) { - n = len(spaces) - } - w.w.Write(spaces[:n]) - remain -= n - } - w.complete = false -} - -func marshalText(w io.Writer, pb Message, compact bool) error { - val := reflect.ValueOf(pb) - if pb == nil || val.IsNil() { - w.Write([]byte("")) - return nil - } - var bw *bufio.Writer - ww, ok := w.(writer) - if !ok { - bw = bufio.NewWriter(w) - ww = bw - } - aw := &textWriter{ - w: ww, - complete: true, - compact: compact, - } - - if tm, ok := pb.(encoding.TextMarshaler); ok { - text, err := tm.MarshalText() - if err != nil { - return err - } - if _, err = aw.Write(text); err != nil { - return err - } - if bw != nil { - return bw.Flush() - } - return nil - } - // Dereference the received pointer so we don't have outer < and >. - v := reflect.Indirect(val) - if err := writeStruct(aw, v); err != nil { - return err - } - if bw != nil { - return bw.Flush() - } - return nil -} - -// MarshalText writes a given protocol buffer in text format. -// The only errors returned are from w. -func MarshalText(w io.Writer, pb Message) error { - return marshalText(w, pb, false) -} - -// MarshalTextString is the same as MarshalText, but returns the string directly. -func MarshalTextString(pb Message) string { - var buf bytes.Buffer - marshalText(&buf, pb, false) - return buf.String() -} - -// CompactText writes a given protocol buffer in compact text format (one line). -func CompactText(w io.Writer, pb Message) error { return marshalText(w, pb, true) } - -// CompactTextString is the same as CompactText, but returns the string directly. -func CompactTextString(pb Message) string { - var buf bytes.Buffer - marshalText(&buf, pb, true) - return buf.String() -} diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/text_parser.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/text_parser.go deleted file mode 100644 index 7d0c757198..0000000000 --- a/Godeps/_workspace/src/github.com/golang/protobuf/proto/text_parser.go +++ /dev/null @@ -1,772 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -// Functions for parsing the Text protocol buffer format. -// TODO: message sets. - -import ( - "encoding" - "errors" - "fmt" - "reflect" - "strconv" - "strings" - "unicode/utf8" -) - -type ParseError struct { - Message string - Line int // 1-based line number - Offset int // 0-based byte offset from start of input -} - -func (p *ParseError) Error() string { - if p.Line == 1 { - // show offset only for first line - return fmt.Sprintf("line 1.%d: %v", p.Offset, p.Message) - } - return fmt.Sprintf("line %d: %v", p.Line, p.Message) -} - -type token struct { - value string - err *ParseError - line int // line number - offset int // byte number from start of input, not start of line - unquoted string // the unquoted version of value, if it was a quoted string -} - -func (t *token) String() string { - if t.err == nil { - return fmt.Sprintf("%q (line=%d, offset=%d)", t.value, t.line, t.offset) - } - return fmt.Sprintf("parse error: %v", t.err) -} - -type textParser struct { - s string // remaining input - done bool // whether the parsing is finished (success or error) - backed bool // whether back() was called - offset, line int - cur token -} - -func newTextParser(s string) *textParser { - p := new(textParser) - p.s = s - p.line = 1 - p.cur.line = 1 - return p -} - -func (p *textParser) errorf(format string, a ...interface{}) *ParseError { - pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset} - p.cur.err = pe - p.done = true - return pe -} - -// Numbers and identifiers are matched by [-+._A-Za-z0-9] -func isIdentOrNumberChar(c byte) bool { - switch { - case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z': - return true - case '0' <= c && c <= '9': - return true - } - switch c { - case '-', '+', '.', '_': - return true - } - return false -} - -func isWhitespace(c byte) bool { - switch c { - case ' ', '\t', '\n', '\r': - return true - } - return false -} - -func (p *textParser) skipWhitespace() { - i := 0 - for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') { - if p.s[i] == '#' { - // comment; skip to end of line or input - for i < len(p.s) && p.s[i] != '\n' { - i++ - } - if i == len(p.s) { - break - } - } - if p.s[i] == '\n' { - p.line++ - } - i++ - } - p.offset += i - p.s = p.s[i:len(p.s)] - if len(p.s) == 0 { - p.done = true - } -} - -func (p *textParser) advance() { - // Skip whitespace - p.skipWhitespace() - if p.done { - return - } - - // Start of non-whitespace - p.cur.err = nil - p.cur.offset, p.cur.line = p.offset, p.line - p.cur.unquoted = "" - switch p.s[0] { - case '<', '>', '{', '}', ':', '[', ']', ';', ',': - // Single symbol - p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)] - case '"', '\'': - // Quoted string - i := 1 - for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' { - if p.s[i] == '\\' && i+1 < len(p.s) { - // skip escaped char - i++ - } - i++ - } - if i >= len(p.s) || p.s[i] != p.s[0] { - p.errorf("unmatched quote") - return - } - unq, err := unquoteC(p.s[1:i], rune(p.s[0])) - if err != nil { - p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err) - return - } - p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)] - p.cur.unquoted = unq - default: - i := 0 - for i < len(p.s) && isIdentOrNumberChar(p.s[i]) { - i++ - } - if i == 0 { - p.errorf("unexpected byte %#x", p.s[0]) - return - } - p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)] - } - p.offset += len(p.cur.value) -} - -var ( - errBadUTF8 = errors.New("proto: bad UTF-8") - errBadHex = errors.New("proto: bad hexadecimal") -) - -func unquoteC(s string, quote rune) (string, error) { - // This is based on C++'s tokenizer.cc. - // Despite its name, this is *not* parsing C syntax. - // For instance, "\0" is an invalid quoted string. - - // Avoid allocation in trivial cases. - simple := true - for _, r := range s { - if r == '\\' || r == quote { - simple = false - break - } - } - if simple { - return s, nil - } - - buf := make([]byte, 0, 3*len(s)/2) - for len(s) > 0 { - r, n := utf8.DecodeRuneInString(s) - if r == utf8.RuneError && n == 1 { - return "", errBadUTF8 - } - s = s[n:] - if r != '\\' { - if r < utf8.RuneSelf { - buf = append(buf, byte(r)) - } else { - buf = append(buf, string(r)...) - } - continue - } - - ch, tail, err := unescape(s) - if err != nil { - return "", err - } - buf = append(buf, ch...) - s = tail - } - return string(buf), nil -} - -func unescape(s string) (ch string, tail string, err error) { - r, n := utf8.DecodeRuneInString(s) - if r == utf8.RuneError && n == 1 { - return "", "", errBadUTF8 - } - s = s[n:] - switch r { - case 'a': - return "\a", s, nil - case 'b': - return "\b", s, nil - case 'f': - return "\f", s, nil - case 'n': - return "\n", s, nil - case 'r': - return "\r", s, nil - case 't': - return "\t", s, nil - case 'v': - return "\v", s, nil - case '?': - return "?", s, nil // trigraph workaround - case '\'', '"', '\\': - return string(r), s, nil - case '0', '1', '2', '3', '4', '5', '6', '7', 'x', 'X': - if len(s) < 2 { - return "", "", fmt.Errorf(`\%c requires 2 following digits`, r) - } - base := 8 - ss := s[:2] - s = s[2:] - if r == 'x' || r == 'X' { - base = 16 - } else { - ss = string(r) + ss - } - i, err := strconv.ParseUint(ss, base, 8) - if err != nil { - return "", "", err - } - return string([]byte{byte(i)}), s, nil - case 'u', 'U': - n := 4 - if r == 'U' { - n = 8 - } - if len(s) < n { - return "", "", fmt.Errorf(`\%c requires %d digits`, r, n) - } - - bs := make([]byte, n/2) - for i := 0; i < n; i += 2 { - a, ok1 := unhex(s[i]) - b, ok2 := unhex(s[i+1]) - if !ok1 || !ok2 { - return "", "", errBadHex - } - bs[i/2] = a<<4 | b - } - s = s[n:] - return string(bs), s, nil - } - return "", "", fmt.Errorf(`unknown escape \%c`, r) -} - -// Adapted from src/pkg/strconv/quote.go. -func unhex(b byte) (v byte, ok bool) { - switch { - case '0' <= b && b <= '9': - return b - '0', true - case 'a' <= b && b <= 'f': - return b - 'a' + 10, true - case 'A' <= b && b <= 'F': - return b - 'A' + 10, true - } - return 0, false -} - -// Back off the parser by one token. Can only be done between calls to next(). -// It makes the next advance() a no-op. -func (p *textParser) back() { p.backed = true } - -// Advances the parser and returns the new current token. -func (p *textParser) next() *token { - if p.backed || p.done { - p.backed = false - return &p.cur - } - p.advance() - if p.done { - p.cur.value = "" - } else if len(p.cur.value) > 0 && p.cur.value[0] == '"' { - // Look for multiple quoted strings separated by whitespace, - // and concatenate them. - cat := p.cur - for { - p.skipWhitespace() - if p.done || p.s[0] != '"' { - break - } - p.advance() - if p.cur.err != nil { - return &p.cur - } - cat.value += " " + p.cur.value - cat.unquoted += p.cur.unquoted - } - p.done = false // parser may have seen EOF, but we want to return cat - p.cur = cat - } - return &p.cur -} - -func (p *textParser) consumeToken(s string) error { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value != s { - p.back() - return p.errorf("expected %q, found %q", s, tok.value) - } - return nil -} - -// Return a RequiredNotSetError indicating which required field was not set. -func (p *textParser) missingRequiredFieldError(sv reflect.Value) *RequiredNotSetError { - st := sv.Type() - sprops := GetProperties(st) - for i := 0; i < st.NumField(); i++ { - if !isNil(sv.Field(i)) { - continue - } - - props := sprops.Prop[i] - if props.Required { - return &RequiredNotSetError{fmt.Sprintf("%v.%v", st, props.OrigName)} - } - } - return &RequiredNotSetError{fmt.Sprintf("%v.", st)} // should not happen -} - -// Returns the index in the struct for the named field, as well as the parsed tag properties. -func structFieldByName(st reflect.Type, name string) (int, *Properties, bool) { - sprops := GetProperties(st) - i, ok := sprops.decoderOrigNames[name] - if ok { - return i, sprops.Prop[i], true - } - return -1, nil, false -} - -// Consume a ':' from the input stream (if the next token is a colon), -// returning an error if a colon is needed but not present. -func (p *textParser) checkForColon(props *Properties, typ reflect.Type) *ParseError { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value != ":" { - // Colon is optional when the field is a group or message. - needColon := true - switch props.Wire { - case "group": - needColon = false - case "bytes": - // A "bytes" field is either a message, a string, or a repeated field; - // those three become *T, *string and []T respectively, so we can check for - // this field being a pointer to a non-string. - if typ.Kind() == reflect.Ptr { - // *T or *string - if typ.Elem().Kind() == reflect.String { - break - } - } else if typ.Kind() == reflect.Slice { - // []T or []*T - if typ.Elem().Kind() != reflect.Ptr { - break - } - } else if typ.Kind() == reflect.String { - // The proto3 exception is for a string field, - // which requires a colon. - break - } - needColon = false - } - if needColon { - return p.errorf("expected ':', found %q", tok.value) - } - p.back() - } - return nil -} - -func (p *textParser) readStruct(sv reflect.Value, terminator string) error { - st := sv.Type() - reqCount := GetProperties(st).reqCount - var reqFieldErr error - fieldSet := make(map[string]bool) - // A struct is a sequence of "name: value", terminated by one of - // '>' or '}', or the end of the input. A name may also be - // "[extension]". - for { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value == terminator { - break - } - if tok.value == "[" { - // Looks like an extension. - // - // TODO: Check whether we need to handle - // namespace rooted names (e.g. ".something.Foo"). - tok = p.next() - if tok.err != nil { - return tok.err - } - var desc *ExtensionDesc - // This could be faster, but it's functional. - // TODO: Do something smarter than a linear scan. - for _, d := range RegisteredExtensions(reflect.New(st).Interface().(Message)) { - if d.Name == tok.value { - desc = d - break - } - } - if desc == nil { - return p.errorf("unrecognized extension %q", tok.value) - } - // Check the extension terminator. - tok = p.next() - if tok.err != nil { - return tok.err - } - if tok.value != "]" { - return p.errorf("unrecognized extension terminator %q", tok.value) - } - - props := &Properties{} - props.Parse(desc.Tag) - - typ := reflect.TypeOf(desc.ExtensionType) - if err := p.checkForColon(props, typ); err != nil { - return err - } - - rep := desc.repeated() - - // Read the extension structure, and set it in - // the value we're constructing. - var ext reflect.Value - if !rep { - ext = reflect.New(typ).Elem() - } else { - ext = reflect.New(typ.Elem()).Elem() - } - if err := p.readAny(ext, props); err != nil { - if _, ok := err.(*RequiredNotSetError); !ok { - return err - } - reqFieldErr = err - } - ep := sv.Addr().Interface().(extendableProto) - if !rep { - SetExtension(ep, desc, ext.Interface()) - } else { - old, err := GetExtension(ep, desc) - var sl reflect.Value - if err == nil { - sl = reflect.ValueOf(old) // existing slice - } else { - sl = reflect.MakeSlice(typ, 0, 1) - } - sl = reflect.Append(sl, ext) - SetExtension(ep, desc, sl.Interface()) - } - } else { - // This is a normal, non-extension field. - name := tok.value - fi, props, ok := structFieldByName(st, name) - if !ok { - return p.errorf("unknown field name %q in %v", name, st) - } - - dst := sv.Field(fi) - - if dst.Kind() == reflect.Map { - // Consume any colon. - if err := p.checkForColon(props, dst.Type()); err != nil { - return err - } - - // Construct the map if it doesn't already exist. - if dst.IsNil() { - dst.Set(reflect.MakeMap(dst.Type())) - } - key := reflect.New(dst.Type().Key()).Elem() - val := reflect.New(dst.Type().Elem()).Elem() - - // The map entry should be this sequence of tokens: - // < key : KEY value : VALUE > - // Technically the "key" and "value" could come in any order, - // but in practice they won't. - - tok := p.next() - var terminator string - switch tok.value { - case "<": - terminator = ">" - case "{": - terminator = "}" - default: - return p.errorf("expected '{' or '<', found %q", tok.value) - } - if err := p.consumeToken("key"); err != nil { - return err - } - if err := p.consumeToken(":"); err != nil { - return err - } - if err := p.readAny(key, props.mkeyprop); err != nil { - return err - } - if err := p.consumeOptionalSeparator(); err != nil { - return err - } - if err := p.consumeToken("value"); err != nil { - return err - } - if err := p.checkForColon(props.mvalprop, dst.Type().Elem()); err != nil { - return err - } - if err := p.readAny(val, props.mvalprop); err != nil { - return err - } - if err := p.consumeOptionalSeparator(); err != nil { - return err - } - if err := p.consumeToken(terminator); err != nil { - return err - } - - dst.SetMapIndex(key, val) - continue - } - - // Check that it's not already set if it's not a repeated field. - if !props.Repeated && fieldSet[name] { - return p.errorf("non-repeated field %q was repeated", name) - } - - if err := p.checkForColon(props, st.Field(fi).Type); err != nil { - return err - } - - // Parse into the field. - fieldSet[name] = true - if err := p.readAny(dst, props); err != nil { - if _, ok := err.(*RequiredNotSetError); !ok { - return err - } - reqFieldErr = err - } else if props.Required { - reqCount-- - } - } - - if err := p.consumeOptionalSeparator(); err != nil { - return err - } - - } - - if reqCount > 0 { - return p.missingRequiredFieldError(sv) - } - return reqFieldErr -} - -// consumeOptionalSeparator consumes an optional semicolon or comma. -// It is used in readStruct to provide backward compatibility. -func (p *textParser) consumeOptionalSeparator() error { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value != ";" && tok.value != "," { - p.back() - } - return nil -} - -func (p *textParser) readAny(v reflect.Value, props *Properties) error { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value == "" { - return p.errorf("unexpected EOF") - } - - switch fv := v; fv.Kind() { - case reflect.Slice: - at := v.Type() - if at.Elem().Kind() == reflect.Uint8 { - // Special case for []byte - if tok.value[0] != '"' && tok.value[0] != '\'' { - // Deliberately written out here, as the error after - // this switch statement would write "invalid []byte: ...", - // which is not as user-friendly. - return p.errorf("invalid string: %v", tok.value) - } - bytes := []byte(tok.unquoted) - fv.Set(reflect.ValueOf(bytes)) - return nil - } - // Repeated field. May already exist. - flen := fv.Len() - if flen == fv.Cap() { - nav := reflect.MakeSlice(at, flen, 2*flen+1) - reflect.Copy(nav, fv) - fv.Set(nav) - } - fv.SetLen(flen + 1) - - // Read one. - p.back() - return p.readAny(fv.Index(flen), props) - case reflect.Bool: - // Either "true", "false", 1 or 0. - switch tok.value { - case "true", "1": - fv.SetBool(true) - return nil - case "false", "0": - fv.SetBool(false) - return nil - } - case reflect.Float32, reflect.Float64: - v := tok.value - // Ignore 'f' for compatibility with output generated by C++, but don't - // remove 'f' when the value is "-inf" or "inf". - if strings.HasSuffix(v, "f") && tok.value != "-inf" && tok.value != "inf" { - v = v[:len(v)-1] - } - if f, err := strconv.ParseFloat(v, fv.Type().Bits()); err == nil { - fv.SetFloat(f) - return nil - } - case reflect.Int32: - if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil { - fv.SetInt(x) - return nil - } - - if len(props.Enum) == 0 { - break - } - m, ok := enumValueMaps[props.Enum] - if !ok { - break - } - x, ok := m[tok.value] - if !ok { - break - } - fv.SetInt(int64(x)) - return nil - case reflect.Int64: - if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil { - fv.SetInt(x) - return nil - } - - case reflect.Ptr: - // A basic field (indirected through pointer), or a repeated message/group - p.back() - fv.Set(reflect.New(fv.Type().Elem())) - return p.readAny(fv.Elem(), props) - case reflect.String: - if tok.value[0] == '"' || tok.value[0] == '\'' { - fv.SetString(tok.unquoted) - return nil - } - case reflect.Struct: - var terminator string - switch tok.value { - case "{": - terminator = "}" - case "<": - terminator = ">" - default: - return p.errorf("expected '{' or '<', found %q", tok.value) - } - // TODO: Handle nested messages which implement encoding.TextUnmarshaler. - return p.readStruct(fv, terminator) - case reflect.Uint32: - if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil { - fv.SetUint(uint64(x)) - return nil - } - case reflect.Uint64: - if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil { - fv.SetUint(x) - return nil - } - } - return p.errorf("invalid %v: %v", v.Type(), tok.value) -} - -// UnmarshalText reads a protocol buffer in Text format. UnmarshalText resets pb -// before starting to unmarshal, so any existing data in pb is always removed. -// If a required field is not set and no other error occurs, -// UnmarshalText returns *RequiredNotSetError. -func UnmarshalText(s string, pb Message) error { - if um, ok := pb.(encoding.TextUnmarshaler); ok { - err := um.UnmarshalText([]byte(s)) - return err - } - pb.Reset() - v := reflect.ValueOf(pb) - if pe := newTextParser(s).readStruct(v.Elem(), ""); pe != nil { - return pe - } - return nil -} diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/text_parser_test.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/text_parser_test.go deleted file mode 100644 index 0754b2626c..0000000000 --- a/Godeps/_workspace/src/github.com/golang/protobuf/proto/text_parser_test.go +++ /dev/null @@ -1,511 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto_test - -import ( - "math" - "reflect" - "testing" - - . "github.com/golang/protobuf/proto" - proto3pb "github.com/golang/protobuf/proto/proto3_proto" - . "github.com/golang/protobuf/proto/testdata" -) - -type UnmarshalTextTest struct { - in string - err string // if "", no error expected - out *MyMessage -} - -func buildExtStructTest(text string) UnmarshalTextTest { - msg := &MyMessage{ - Count: Int32(42), - } - SetExtension(msg, E_Ext_More, &Ext{ - Data: String("Hello, world!"), - }) - return UnmarshalTextTest{in: text, out: msg} -} - -func buildExtDataTest(text string) UnmarshalTextTest { - msg := &MyMessage{ - Count: Int32(42), - } - SetExtension(msg, E_Ext_Text, String("Hello, world!")) - SetExtension(msg, E_Ext_Number, Int32(1729)) - return UnmarshalTextTest{in: text, out: msg} -} - -func buildExtRepStringTest(text string) UnmarshalTextTest { - msg := &MyMessage{ - Count: Int32(42), - } - if err := SetExtension(msg, E_Greeting, []string{"bula", "hola"}); err != nil { - panic(err) - } - return UnmarshalTextTest{in: text, out: msg} -} - -var unMarshalTextTests = []UnmarshalTextTest{ - // Basic - { - in: " count:42\n name:\"Dave\" ", - out: &MyMessage{ - Count: Int32(42), - Name: String("Dave"), - }, - }, - - // Empty quoted string - { - in: `count:42 name:""`, - out: &MyMessage{ - Count: Int32(42), - Name: String(""), - }, - }, - - // Quoted string concatenation - { - in: `count:42 name: "My name is "` + "\n" + `"elsewhere"`, - out: &MyMessage{ - Count: Int32(42), - Name: String("My name is elsewhere"), - }, - }, - - // Quoted string with escaped apostrophe - { - in: `count:42 name: "HOLIDAY - New Year\'s Day"`, - out: &MyMessage{ - Count: Int32(42), - Name: String("HOLIDAY - New Year's Day"), - }, - }, - - // Quoted string with single quote - { - in: `count:42 name: 'Roger "The Ramster" Ramjet'`, - out: &MyMessage{ - Count: Int32(42), - Name: String(`Roger "The Ramster" Ramjet`), - }, - }, - - // Quoted string with all the accepted special characters from the C++ test - { - in: `count:42 name: ` + "\"\\\"A string with \\' characters \\n and \\r newlines and \\t tabs and \\001 slashes \\\\ and multiple spaces\"", - out: &MyMessage{ - Count: Int32(42), - Name: String("\"A string with ' characters \n and \r newlines and \t tabs and \001 slashes \\ and multiple spaces"), - }, - }, - - // Quoted string with quoted backslash - { - in: `count:42 name: "\\'xyz"`, - out: &MyMessage{ - Count: Int32(42), - Name: String(`\'xyz`), - }, - }, - - // Quoted string with UTF-8 bytes. - { - in: "count:42 name: '\303\277\302\201\xAB'", - out: &MyMessage{ - Count: Int32(42), - Name: String("\303\277\302\201\xAB"), - }, - }, - - // Bad quoted string - { - in: `inner: < host: "\0" >` + "\n", - err: `line 1.15: invalid quoted string "\0": \0 requires 2 following digits`, - }, - - // Number too large for int64 - { - in: "count: 1 others { key: 123456789012345678901 }", - err: "line 1.23: invalid int64: 123456789012345678901", - }, - - // Number too large for int32 - { - in: "count: 1234567890123", - err: "line 1.7: invalid int32: 1234567890123", - }, - - // Number in hexadecimal - { - in: "count: 0x2beef", - out: &MyMessage{ - Count: Int32(0x2beef), - }, - }, - - // Number in octal - { - in: "count: 024601", - out: &MyMessage{ - Count: Int32(024601), - }, - }, - - // Floating point number with "f" suffix - { - in: "count: 4 others:< weight: 17.0f >", - out: &MyMessage{ - Count: Int32(4), - Others: []*OtherMessage{ - { - Weight: Float32(17), - }, - }, - }, - }, - - // Floating point positive infinity - { - in: "count: 4 bigfloat: inf", - out: &MyMessage{ - Count: Int32(4), - Bigfloat: Float64(math.Inf(1)), - }, - }, - - // Floating point negative infinity - { - in: "count: 4 bigfloat: -inf", - out: &MyMessage{ - Count: Int32(4), - Bigfloat: Float64(math.Inf(-1)), - }, - }, - - // Number too large for float32 - { - in: "others:< weight: 12345678901234567890123456789012345678901234567890 >", - err: "line 1.17: invalid float32: 12345678901234567890123456789012345678901234567890", - }, - - // Number posing as a quoted string - { - in: `inner: < host: 12 >` + "\n", - err: `line 1.15: invalid string: 12`, - }, - - // Quoted string posing as int32 - { - in: `count: "12"`, - err: `line 1.7: invalid int32: "12"`, - }, - - // Quoted string posing a float32 - { - in: `others:< weight: "17.4" >`, - err: `line 1.17: invalid float32: "17.4"`, - }, - - // Enum - { - in: `count:42 bikeshed: BLUE`, - out: &MyMessage{ - Count: Int32(42), - Bikeshed: MyMessage_BLUE.Enum(), - }, - }, - - // Repeated field - { - in: `count:42 pet: "horsey" pet:"bunny"`, - out: &MyMessage{ - Count: Int32(42), - Pet: []string{"horsey", "bunny"}, - }, - }, - - // Repeated message with/without colon and <>/{} - { - in: `count:42 others:{} others{} others:<> others:{}`, - out: &MyMessage{ - Count: Int32(42), - Others: []*OtherMessage{ - {}, - {}, - {}, - {}, - }, - }, - }, - - // Missing colon for inner message - { - in: `count:42 inner < host: "cauchy.syd" >`, - out: &MyMessage{ - Count: Int32(42), - Inner: &InnerMessage{ - Host: String("cauchy.syd"), - }, - }, - }, - - // Missing colon for string field - { - in: `name "Dave"`, - err: `line 1.5: expected ':', found "\"Dave\""`, - }, - - // Missing colon for int32 field - { - in: `count 42`, - err: `line 1.6: expected ':', found "42"`, - }, - - // Missing required field - { - in: `name: "Pawel"`, - err: `proto: required field "testdata.MyMessage.count" not set`, - out: &MyMessage{ - Name: String("Pawel"), - }, - }, - - // Repeated non-repeated field - { - in: `name: "Rob" name: "Russ"`, - err: `line 1.12: non-repeated field "name" was repeated`, - }, - - // Group - { - in: `count: 17 SomeGroup { group_field: 12 }`, - out: &MyMessage{ - Count: Int32(17), - Somegroup: &MyMessage_SomeGroup{ - GroupField: Int32(12), - }, - }, - }, - - // Semicolon between fields - { - in: `count:3;name:"Calvin"`, - out: &MyMessage{ - Count: Int32(3), - Name: String("Calvin"), - }, - }, - // Comma between fields - { - in: `count:4,name:"Ezekiel"`, - out: &MyMessage{ - Count: Int32(4), - Name: String("Ezekiel"), - }, - }, - - // Extension - buildExtStructTest(`count: 42 [testdata.Ext.more]:`), - buildExtStructTest(`count: 42 [testdata.Ext.more] {data:"Hello, world!"}`), - buildExtDataTest(`count: 42 [testdata.Ext.text]:"Hello, world!" [testdata.Ext.number]:1729`), - buildExtRepStringTest(`count: 42 [testdata.greeting]:"bula" [testdata.greeting]:"hola"`), - - // Big all-in-one - { - in: "count:42 # Meaning\n" + - `name:"Dave" ` + - `quote:"\"I didn't want to go.\"" ` + - `pet:"bunny" ` + - `pet:"kitty" ` + - `pet:"horsey" ` + - `inner:<` + - ` host:"footrest.syd" ` + - ` port:7001 ` + - ` connected:true ` + - `> ` + - `others:<` + - ` key:3735928559 ` + - ` value:"\x01A\a\f" ` + - `> ` + - `others:<` + - " weight:58.9 # Atomic weight of Co\n" + - ` inner:<` + - ` host:"lesha.mtv" ` + - ` port:8002 ` + - ` >` + - `>`, - out: &MyMessage{ - Count: Int32(42), - Name: String("Dave"), - Quote: String(`"I didn't want to go."`), - Pet: []string{"bunny", "kitty", "horsey"}, - Inner: &InnerMessage{ - Host: String("footrest.syd"), - Port: Int32(7001), - Connected: Bool(true), - }, - Others: []*OtherMessage{ - { - Key: Int64(3735928559), - Value: []byte{0x1, 'A', '\a', '\f'}, - }, - { - Weight: Float32(58.9), - Inner: &InnerMessage{ - Host: String("lesha.mtv"), - Port: Int32(8002), - }, - }, - }, - }, - }, -} - -func TestUnmarshalText(t *testing.T) { - for i, test := range unMarshalTextTests { - pb := new(MyMessage) - err := UnmarshalText(test.in, pb) - if test.err == "" { - // We don't expect failure. - if err != nil { - t.Errorf("Test %d: Unexpected error: %v", i, err) - } else if !reflect.DeepEqual(pb, test.out) { - t.Errorf("Test %d: Incorrect populated \nHave: %v\nWant: %v", - i, pb, test.out) - } - } else { - // We do expect failure. - if err == nil { - t.Errorf("Test %d: Didn't get expected error: %v", i, test.err) - } else if err.Error() != test.err { - t.Errorf("Test %d: Incorrect error.\nHave: %v\nWant: %v", - i, err.Error(), test.err) - } else if _, ok := err.(*RequiredNotSetError); ok && test.out != nil && !reflect.DeepEqual(pb, test.out) { - t.Errorf("Test %d: Incorrect populated \nHave: %v\nWant: %v", - i, pb, test.out) - } - } - } -} - -func TestUnmarshalTextCustomMessage(t *testing.T) { - msg := &textMessage{} - if err := UnmarshalText("custom", msg); err != nil { - t.Errorf("Unexpected error from custom unmarshal: %v", err) - } - if UnmarshalText("not custom", msg) == nil { - t.Errorf("Didn't get expected error from custom unmarshal") - } -} - -// Regression test; this caused a panic. -func TestRepeatedEnum(t *testing.T) { - pb := new(RepeatedEnum) - if err := UnmarshalText("color: RED", pb); err != nil { - t.Fatal(err) - } - exp := &RepeatedEnum{ - Color: []RepeatedEnum_Color{RepeatedEnum_RED}, - } - if !Equal(pb, exp) { - t.Errorf("Incorrect populated \nHave: %v\nWant: %v", pb, exp) - } -} - -func TestProto3TextParsing(t *testing.T) { - m := new(proto3pb.Message) - const in = `name: "Wallace" true_scotsman: true` - want := &proto3pb.Message{ - Name: "Wallace", - TrueScotsman: true, - } - if err := UnmarshalText(in, m); err != nil { - t.Fatal(err) - } - if !Equal(m, want) { - t.Errorf("\n got %v\nwant %v", m, want) - } -} - -func TestMapParsing(t *testing.T) { - m := new(MessageWithMap) - const in = `name_mapping: name_mapping:` + - `msg_mapping:,>` + // separating commas are okay - `msg_mapping>` + // no colon after "value" - `byte_mapping:` - want := &MessageWithMap{ - NameMapping: map[int32]string{ - 1: "Beatles", - 1234: "Feist", - }, - MsgMapping: map[int64]*FloatingPoint{ - -4: {F: Float64(2.0)}, - -2: {F: Float64(4.0)}, - }, - ByteMapping: map[bool][]byte{ - true: []byte("so be it"), - }, - } - if err := UnmarshalText(in, m); err != nil { - t.Fatal(err) - } - if !Equal(m, want) { - t.Errorf("\n got %v\nwant %v", m, want) - } -} - -var benchInput string - -func init() { - benchInput = "count: 4\n" - for i := 0; i < 1000; i++ { - benchInput += "pet: \"fido\"\n" - } - - // Check it is valid input. - pb := new(MyMessage) - err := UnmarshalText(benchInput, pb) - if err != nil { - panic("Bad benchmark input: " + err.Error()) - } -} - -func BenchmarkUnmarshalText(b *testing.B) { - pb := new(MyMessage) - for i := 0; i < b.N; i++ { - UnmarshalText(benchInput, pb) - } - b.SetBytes(int64(len(benchInput))) -} diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/text_test.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/text_test.go deleted file mode 100644 index 39861d1ca8..0000000000 --- a/Godeps/_workspace/src/github.com/golang/protobuf/proto/text_test.go +++ /dev/null @@ -1,441 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto_test - -import ( - "bytes" - "errors" - "io/ioutil" - "math" - "strings" - "testing" - - "github.com/golang/protobuf/proto" - - proto3pb "github.com/golang/protobuf/proto/proto3_proto" - pb "github.com/golang/protobuf/proto/testdata" -) - -// textMessage implements the methods that allow it to marshal and unmarshal -// itself as text. -type textMessage struct { -} - -func (*textMessage) MarshalText() ([]byte, error) { - return []byte("custom"), nil -} - -func (*textMessage) UnmarshalText(bytes []byte) error { - if string(bytes) != "custom" { - return errors.New("expected 'custom'") - } - return nil -} - -func (*textMessage) Reset() {} -func (*textMessage) String() string { return "" } -func (*textMessage) ProtoMessage() {} - -func newTestMessage() *pb.MyMessage { - msg := &pb.MyMessage{ - Count: proto.Int32(42), - Name: proto.String("Dave"), - Quote: proto.String(`"I didn't want to go."`), - Pet: []string{"bunny", "kitty", "horsey"}, - Inner: &pb.InnerMessage{ - Host: proto.String("footrest.syd"), - Port: proto.Int32(7001), - Connected: proto.Bool(true), - }, - Others: []*pb.OtherMessage{ - { - Key: proto.Int64(0xdeadbeef), - Value: []byte{1, 65, 7, 12}, - }, - { - Weight: proto.Float32(6.022), - Inner: &pb.InnerMessage{ - Host: proto.String("lesha.mtv"), - Port: proto.Int32(8002), - }, - }, - }, - Bikeshed: pb.MyMessage_BLUE.Enum(), - Somegroup: &pb.MyMessage_SomeGroup{ - GroupField: proto.Int32(8), - }, - // One normally wouldn't do this. - // This is an undeclared tag 13, as a varint (wire type 0) with value 4. - XXX_unrecognized: []byte{13<<3 | 0, 4}, - } - ext := &pb.Ext{ - Data: proto.String("Big gobs for big rats"), - } - if err := proto.SetExtension(msg, pb.E_Ext_More, ext); err != nil { - panic(err) - } - greetings := []string{"adg", "easy", "cow"} - if err := proto.SetExtension(msg, pb.E_Greeting, greetings); err != nil { - panic(err) - } - - // Add an unknown extension. We marshal a pb.Ext, and fake the ID. - b, err := proto.Marshal(&pb.Ext{Data: proto.String("3G skiing")}) - if err != nil { - panic(err) - } - b = append(proto.EncodeVarint(201<<3|proto.WireBytes), b...) - proto.SetRawExtension(msg, 201, b) - - // Extensions can be plain fields, too, so let's test that. - b = append(proto.EncodeVarint(202<<3|proto.WireVarint), 19) - proto.SetRawExtension(msg, 202, b) - - return msg -} - -const text = `count: 42 -name: "Dave" -quote: "\"I didn't want to go.\"" -pet: "bunny" -pet: "kitty" -pet: "horsey" -inner: < - host: "footrest.syd" - port: 7001 - connected: true -> -others: < - key: 3735928559 - value: "\001A\007\014" -> -others: < - weight: 6.022 - inner: < - host: "lesha.mtv" - port: 8002 - > -> -bikeshed: BLUE -SomeGroup { - group_field: 8 -} -/* 2 unknown bytes */ -13: 4 -[testdata.Ext.more]: < - data: "Big gobs for big rats" -> -[testdata.greeting]: "adg" -[testdata.greeting]: "easy" -[testdata.greeting]: "cow" -/* 13 unknown bytes */ -201: "\t3G skiing" -/* 3 unknown bytes */ -202: 19 -` - -func TestMarshalText(t *testing.T) { - buf := new(bytes.Buffer) - if err := proto.MarshalText(buf, newTestMessage()); err != nil { - t.Fatalf("proto.MarshalText: %v", err) - } - s := buf.String() - if s != text { - t.Errorf("Got:\n===\n%v===\nExpected:\n===\n%v===\n", s, text) - } -} - -func TestMarshalTextCustomMessage(t *testing.T) { - buf := new(bytes.Buffer) - if err := proto.MarshalText(buf, &textMessage{}); err != nil { - t.Fatalf("proto.MarshalText: %v", err) - } - s := buf.String() - if s != "custom" { - t.Errorf("Got %q, expected %q", s, "custom") - } -} -func TestMarshalTextNil(t *testing.T) { - want := "" - tests := []proto.Message{nil, (*pb.MyMessage)(nil)} - for i, test := range tests { - buf := new(bytes.Buffer) - if err := proto.MarshalText(buf, test); err != nil { - t.Fatal(err) - } - if got := buf.String(); got != want { - t.Errorf("%d: got %q want %q", i, got, want) - } - } -} - -func TestMarshalTextUnknownEnum(t *testing.T) { - // The Color enum only specifies values 0-2. - m := &pb.MyMessage{Bikeshed: pb.MyMessage_Color(3).Enum()} - got := m.String() - const want = `bikeshed:3 ` - if got != want { - t.Errorf("\n got %q\nwant %q", got, want) - } -} - -func BenchmarkMarshalTextBuffered(b *testing.B) { - buf := new(bytes.Buffer) - m := newTestMessage() - for i := 0; i < b.N; i++ { - buf.Reset() - proto.MarshalText(buf, m) - } -} - -func BenchmarkMarshalTextUnbuffered(b *testing.B) { - w := ioutil.Discard - m := newTestMessage() - for i := 0; i < b.N; i++ { - proto.MarshalText(w, m) - } -} - -func compact(src string) string { - // s/[ \n]+/ /g; s/ $//; - dst := make([]byte, len(src)) - space, comment := false, false - j := 0 - for i := 0; i < len(src); i++ { - if strings.HasPrefix(src[i:], "/*") { - comment = true - i++ - continue - } - if comment && strings.HasPrefix(src[i:], "*/") { - comment = false - i++ - continue - } - if comment { - continue - } - c := src[i] - if c == ' ' || c == '\n' { - space = true - continue - } - if j > 0 && (dst[j-1] == ':' || dst[j-1] == '<' || dst[j-1] == '{') { - space = false - } - if c == '{' { - space = false - } - if space { - dst[j] = ' ' - j++ - space = false - } - dst[j] = c - j++ - } - if space { - dst[j] = ' ' - j++ - } - return string(dst[0:j]) -} - -var compactText = compact(text) - -func TestCompactText(t *testing.T) { - s := proto.CompactTextString(newTestMessage()) - if s != compactText { - t.Errorf("Got:\n===\n%v===\nExpected:\n===\n%v\n===\n", s, compactText) - } -} - -func TestStringEscaping(t *testing.T) { - testCases := []struct { - in *pb.Strings - out string - }{ - { - // Test data from C++ test (TextFormatTest.StringEscape). - // Single divergence: we don't escape apostrophes. - &pb.Strings{StringField: proto.String("\"A string with ' characters \n and \r newlines and \t tabs and \001 slashes \\ and multiple spaces")}, - "string_field: \"\\\"A string with ' characters \\n and \\r newlines and \\t tabs and \\001 slashes \\\\ and multiple spaces\"\n", - }, - { - // Test data from the same C++ test. - &pb.Strings{StringField: proto.String("\350\260\267\346\255\214")}, - "string_field: \"\\350\\260\\267\\346\\255\\214\"\n", - }, - { - // Some UTF-8. - &pb.Strings{StringField: proto.String("\x00\x01\xff\x81")}, - `string_field: "\000\001\377\201"` + "\n", - }, - } - - for i, tc := range testCases { - var buf bytes.Buffer - if err := proto.MarshalText(&buf, tc.in); err != nil { - t.Errorf("proto.MarsalText: %v", err) - continue - } - s := buf.String() - if s != tc.out { - t.Errorf("#%d: Got:\n%s\nExpected:\n%s\n", i, s, tc.out) - continue - } - - // Check round-trip. - pb := new(pb.Strings) - if err := proto.UnmarshalText(s, pb); err != nil { - t.Errorf("#%d: UnmarshalText: %v", i, err) - continue - } - if !proto.Equal(pb, tc.in) { - t.Errorf("#%d: Round-trip failed:\nstart: %v\n end: %v", i, tc.in, pb) - } - } -} - -// A limitedWriter accepts some output before it fails. -// This is a proxy for something like a nearly-full or imminently-failing disk, -// or a network connection that is about to die. -type limitedWriter struct { - b bytes.Buffer - limit int -} - -var outOfSpace = errors.New("proto: insufficient space") - -func (w *limitedWriter) Write(p []byte) (n int, err error) { - var avail = w.limit - w.b.Len() - if avail <= 0 { - return 0, outOfSpace - } - if len(p) <= avail { - return w.b.Write(p) - } - n, _ = w.b.Write(p[:avail]) - return n, outOfSpace -} - -func TestMarshalTextFailing(t *testing.T) { - // Try lots of different sizes to exercise more error code-paths. - for lim := 0; lim < len(text); lim++ { - buf := new(limitedWriter) - buf.limit = lim - err := proto.MarshalText(buf, newTestMessage()) - // We expect a certain error, but also some partial results in the buffer. - if err != outOfSpace { - t.Errorf("Got:\n===\n%v===\nExpected:\n===\n%v===\n", err, outOfSpace) - } - s := buf.b.String() - x := text[:buf.limit] - if s != x { - t.Errorf("Got:\n===\n%v===\nExpected:\n===\n%v===\n", s, x) - } - } -} - -func TestFloats(t *testing.T) { - tests := []struct { - f float64 - want string - }{ - {0, "0"}, - {4.7, "4.7"}, - {math.Inf(1), "inf"}, - {math.Inf(-1), "-inf"}, - {math.NaN(), "nan"}, - } - for _, test := range tests { - msg := &pb.FloatingPoint{F: &test.f} - got := strings.TrimSpace(msg.String()) - want := `f:` + test.want - if got != want { - t.Errorf("f=%f: got %q, want %q", test.f, got, want) - } - } -} - -func TestRepeatedNilText(t *testing.T) { - m := &pb.MessageList{ - Message: []*pb.MessageList_Message{ - nil, - &pb.MessageList_Message{ - Name: proto.String("Horse"), - }, - nil, - }, - } - want := `Message -Message { - name: "Horse" -} -Message -` - if s := proto.MarshalTextString(m); s != want { - t.Errorf(" got: %s\nwant: %s", s, want) - } -} - -func TestProto3Text(t *testing.T) { - tests := []struct { - m proto.Message - want string - }{ - // zero message - {&proto3pb.Message{}, ``}, - // zero message except for an empty byte slice - {&proto3pb.Message{Data: []byte{}}, ``}, - // trivial case - {&proto3pb.Message{Name: "Rob", HeightInCm: 175}, `name:"Rob" height_in_cm:175`}, - // empty map - {&pb.MessageWithMap{}, ``}, - // non-empty map; current map format is the same as a repeated struct - { - &pb.MessageWithMap{NameMapping: map[int32]string{1234: "Feist"}}, - `name_mapping:`, - }, - // map with nil value; not well-defined, but we shouldn't crash - { - &pb.MessageWithMap{MsgMapping: map[int64]*pb.FloatingPoint{7: nil}}, - `msg_mapping:`, - }, - } - for _, test := range tests { - got := strings.TrimSpace(test.m.String()) - if got != test.want { - t.Errorf("\n got %s\nwant %s", got, test.want) - } - } -} diff --git a/Godeps/_workspace/src/github.com/golang/snappy/AUTHORS b/Godeps/_workspace/src/github.com/golang/snappy/AUTHORS deleted file mode 100644 index 824bf2e148..0000000000 --- a/Godeps/_workspace/src/github.com/golang/snappy/AUTHORS +++ /dev/null @@ -1,14 +0,0 @@ -# This is the official list of Snappy-Go authors for copyright purposes. -# This file is distinct from the CONTRIBUTORS files. -# See the latter for an explanation. - -# Names should be added to this file as -# Name or Organization -# The email address is not required for organizations. - -# Please keep the list sorted. - -Damian Gryski -Google Inc. -Jan Mercl <0xjnml@gmail.com> -Sebastien Binet diff --git a/Godeps/_workspace/src/github.com/golang/snappy/CONTRIBUTORS b/Godeps/_workspace/src/github.com/golang/snappy/CONTRIBUTORS deleted file mode 100644 index 9f54f21ff7..0000000000 --- a/Godeps/_workspace/src/github.com/golang/snappy/CONTRIBUTORS +++ /dev/null @@ -1,36 +0,0 @@ -# This is the official list of people who can contribute -# (and typically have contributed) code to the Snappy-Go repository. -# The AUTHORS file lists the copyright holders; this file -# lists people. For example, Google employees are listed here -# but not in AUTHORS, because Google holds the copyright. -# -# The submission process automatically checks to make sure -# that people submitting code are listed in this file (by email address). -# -# Names should be added to this file only after verifying that -# the individual or the individual's organization has agreed to -# the appropriate Contributor License Agreement, found here: -# -# http://code.google.com/legal/individual-cla-v1.0.html -# http://code.google.com/legal/corporate-cla-v1.0.html -# -# The agreement for individuals can be filled out on the web. -# -# When adding J Random Contributor's name to this file, -# either J's name or J's organization's name should be -# added to the AUTHORS file, depending on whether the -# individual or corporate CLA was used. - -# Names should be added to this file like so: -# Name - -# Please keep the list sorted. - -Damian Gryski -Jan Mercl <0xjnml@gmail.com> -Kai Backman -Marc-Antoine Ruel -Nigel Tao -Rob Pike -Russ Cox -Sebastien Binet diff --git a/Godeps/_workspace/src/github.com/golang/snappy/LICENSE b/Godeps/_workspace/src/github.com/golang/snappy/LICENSE deleted file mode 100644 index 6050c10f4c..0000000000 --- a/Godeps/_workspace/src/github.com/golang/snappy/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2011 The Snappy-Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/Godeps/_workspace/src/github.com/golang/snappy/README b/Godeps/_workspace/src/github.com/golang/snappy/README deleted file mode 100644 index 5074bbab8d..0000000000 --- a/Godeps/_workspace/src/github.com/golang/snappy/README +++ /dev/null @@ -1,7 +0,0 @@ -The Snappy compression format in the Go programming language. - -To download and install from source: -$ go get github.com/golang/snappy - -Unless otherwise noted, the Snappy-Go source files are distributed -under the BSD-style license found in the LICENSE file. diff --git a/Godeps/_workspace/src/github.com/golang/snappy/decode.go b/Godeps/_workspace/src/github.com/golang/snappy/decode.go deleted file mode 100644 index a72edf0d0f..0000000000 --- a/Godeps/_workspace/src/github.com/golang/snappy/decode.go +++ /dev/null @@ -1,290 +0,0 @@ -// Copyright 2011 The Snappy-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package snappy - -import ( - "encoding/binary" - "errors" - "io" -) - -var ( - // ErrCorrupt reports that the input is invalid. - ErrCorrupt = errors.New("snappy: corrupt input") - // ErrUnsupported reports that the input isn't supported. - ErrUnsupported = errors.New("snappy: unsupported input") -) - -// DecodedLen returns the length of the decoded block. -func DecodedLen(src []byte) (int, error) { - v, _, err := decodedLen(src) - return v, err -} - -// decodedLen returns the length of the decoded block and the number of bytes -// that the length header occupied. -func decodedLen(src []byte) (blockLen, headerLen int, err error) { - v, n := binary.Uvarint(src) - if n <= 0 { - return 0, 0, ErrCorrupt - } - if uint64(int(v)) != v { - return 0, 0, errors.New("snappy: decoded block is too large") - } - return int(v), n, nil -} - -// Decode returns the decoded form of src. The returned slice may be a sub- -// slice of dst if dst was large enough to hold the entire decoded block. -// Otherwise, a newly allocated slice will be returned. -// It is valid to pass a nil dst. -func Decode(dst, src []byte) ([]byte, error) { - dLen, s, err := decodedLen(src) - if err != nil { - return nil, err - } - if len(dst) < dLen { - dst = make([]byte, dLen) - } - - var d, offset, length int - for s < len(src) { - switch src[s] & 0x03 { - case tagLiteral: - x := uint(src[s] >> 2) - switch { - case x < 60: - s++ - case x == 60: - s += 2 - if s > len(src) { - return nil, ErrCorrupt - } - x = uint(src[s-1]) - case x == 61: - s += 3 - if s > len(src) { - return nil, ErrCorrupt - } - x = uint(src[s-2]) | uint(src[s-1])<<8 - case x == 62: - s += 4 - if s > len(src) { - return nil, ErrCorrupt - } - x = uint(src[s-3]) | uint(src[s-2])<<8 | uint(src[s-1])<<16 - case x == 63: - s += 5 - if s > len(src) { - return nil, ErrCorrupt - } - x = uint(src[s-4]) | uint(src[s-3])<<8 | uint(src[s-2])<<16 | uint(src[s-1])<<24 - } - length = int(x + 1) - if length <= 0 { - return nil, errors.New("snappy: unsupported literal length") - } - if length > len(dst)-d || length > len(src)-s { - return nil, ErrCorrupt - } - copy(dst[d:], src[s:s+length]) - d += length - s += length - continue - - case tagCopy1: - s += 2 - if s > len(src) { - return nil, ErrCorrupt - } - length = 4 + int(src[s-2])>>2&0x7 - offset = int(src[s-2])&0xe0<<3 | int(src[s-1]) - - case tagCopy2: - s += 3 - if s > len(src) { - return nil, ErrCorrupt - } - length = 1 + int(src[s-3])>>2 - offset = int(src[s-2]) | int(src[s-1])<<8 - - case tagCopy4: - return nil, errors.New("snappy: unsupported COPY_4 tag") - } - - end := d + length - if offset > d || end > len(dst) { - return nil, ErrCorrupt - } - for ; d < end; d++ { - dst[d] = dst[d-offset] - } - } - if d != dLen { - return nil, ErrCorrupt - } - return dst[:d], nil -} - -// NewReader returns a new Reader that decompresses from r, using the framing -// format described at -// https://github.com/google/snappy/blob/master/framing_format.txt -func NewReader(r io.Reader) *Reader { - return &Reader{ - r: r, - decoded: make([]byte, maxUncompressedChunkLen), - buf: make([]byte, MaxEncodedLen(maxUncompressedChunkLen)+checksumSize), - } -} - -// Reader is an io.Reader than can read Snappy-compressed bytes. -type Reader struct { - r io.Reader - err error - decoded []byte - buf []byte - // decoded[i:j] contains decoded bytes that have not yet been passed on. - i, j int - readHeader bool -} - -// Reset discards any buffered data, resets all state, and switches the Snappy -// reader to read from r. This permits reusing a Reader rather than allocating -// a new one. -func (r *Reader) Reset(reader io.Reader) { - r.r = reader - r.err = nil - r.i = 0 - r.j = 0 - r.readHeader = false -} - -func (r *Reader) readFull(p []byte) (ok bool) { - if _, r.err = io.ReadFull(r.r, p); r.err != nil { - if r.err == io.ErrUnexpectedEOF { - r.err = ErrCorrupt - } - return false - } - return true -} - -// Read satisfies the io.Reader interface. -func (r *Reader) Read(p []byte) (int, error) { - if r.err != nil { - return 0, r.err - } - for { - if r.i < r.j { - n := copy(p, r.decoded[r.i:r.j]) - r.i += n - return n, nil - } - if !r.readFull(r.buf[:4]) { - return 0, r.err - } - chunkType := r.buf[0] - if !r.readHeader { - if chunkType != chunkTypeStreamIdentifier { - r.err = ErrCorrupt - return 0, r.err - } - r.readHeader = true - } - chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16 - if chunkLen > len(r.buf) { - r.err = ErrUnsupported - return 0, r.err - } - - // The chunk types are specified at - // https://github.com/google/snappy/blob/master/framing_format.txt - switch chunkType { - case chunkTypeCompressedData: - // Section 4.2. Compressed data (chunk type 0x00). - if chunkLen < checksumSize { - r.err = ErrCorrupt - return 0, r.err - } - buf := r.buf[:chunkLen] - if !r.readFull(buf) { - return 0, r.err - } - checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 - buf = buf[checksumSize:] - - n, err := DecodedLen(buf) - if err != nil { - r.err = err - return 0, r.err - } - if n > len(r.decoded) { - r.err = ErrCorrupt - return 0, r.err - } - if _, err := Decode(r.decoded, buf); err != nil { - r.err = err - return 0, r.err - } - if crc(r.decoded[:n]) != checksum { - r.err = ErrCorrupt - return 0, r.err - } - r.i, r.j = 0, n - continue - - case chunkTypeUncompressedData: - // Section 4.3. Uncompressed data (chunk type 0x01). - if chunkLen < checksumSize { - r.err = ErrCorrupt - return 0, r.err - } - buf := r.buf[:checksumSize] - if !r.readFull(buf) { - return 0, r.err - } - checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 - // Read directly into r.decoded instead of via r.buf. - n := chunkLen - checksumSize - if !r.readFull(r.decoded[:n]) { - return 0, r.err - } - if crc(r.decoded[:n]) != checksum { - r.err = ErrCorrupt - return 0, r.err - } - r.i, r.j = 0, n - continue - - case chunkTypeStreamIdentifier: - // Section 4.1. Stream identifier (chunk type 0xff). - if chunkLen != len(magicBody) { - r.err = ErrCorrupt - return 0, r.err - } - if !r.readFull(r.buf[:len(magicBody)]) { - return 0, r.err - } - for i := 0; i < len(magicBody); i++ { - if r.buf[i] != magicBody[i] { - r.err = ErrCorrupt - return 0, r.err - } - } - continue - } - - if chunkType <= 0x7f { - // Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f). - r.err = ErrUnsupported - return 0, r.err - } - // Section 4.4 Padding (chunk type 0xfe). - // Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd). - if !r.readFull(r.buf[:chunkLen]) { - return 0, r.err - } - } -} diff --git a/Godeps/_workspace/src/github.com/golang/snappy/encode.go b/Godeps/_workspace/src/github.com/golang/snappy/encode.go deleted file mode 100644 index f3b5484bc7..0000000000 --- a/Godeps/_workspace/src/github.com/golang/snappy/encode.go +++ /dev/null @@ -1,254 +0,0 @@ -// Copyright 2011 The Snappy-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package snappy - -import ( - "encoding/binary" - "io" -) - -// We limit how far copy back-references can go, the same as the C++ code. -const maxOffset = 1 << 15 - -// emitLiteral writes a literal chunk and returns the number of bytes written. -func emitLiteral(dst, lit []byte) int { - i, n := 0, uint(len(lit)-1) - switch { - case n < 60: - dst[0] = uint8(n)<<2 | tagLiteral - i = 1 - case n < 1<<8: - dst[0] = 60<<2 | tagLiteral - dst[1] = uint8(n) - i = 2 - case n < 1<<16: - dst[0] = 61<<2 | tagLiteral - dst[1] = uint8(n) - dst[2] = uint8(n >> 8) - i = 3 - case n < 1<<24: - dst[0] = 62<<2 | tagLiteral - dst[1] = uint8(n) - dst[2] = uint8(n >> 8) - dst[3] = uint8(n >> 16) - i = 4 - case int64(n) < 1<<32: - dst[0] = 63<<2 | tagLiteral - dst[1] = uint8(n) - dst[2] = uint8(n >> 8) - dst[3] = uint8(n >> 16) - dst[4] = uint8(n >> 24) - i = 5 - default: - panic("snappy: source buffer is too long") - } - if copy(dst[i:], lit) != len(lit) { - panic("snappy: destination buffer is too short") - } - return i + len(lit) -} - -// emitCopy writes a copy chunk and returns the number of bytes written. -func emitCopy(dst []byte, offset, length int) int { - i := 0 - for length > 0 { - x := length - 4 - if 0 <= x && x < 1<<3 && offset < 1<<11 { - dst[i+0] = uint8(offset>>8)&0x07<<5 | uint8(x)<<2 | tagCopy1 - dst[i+1] = uint8(offset) - i += 2 - break - } - - x = length - if x > 1<<6 { - x = 1 << 6 - } - dst[i+0] = uint8(x-1)<<2 | tagCopy2 - dst[i+1] = uint8(offset) - dst[i+2] = uint8(offset >> 8) - i += 3 - length -= x - } - return i -} - -// Encode returns the encoded form of src. The returned slice may be a sub- -// slice of dst if dst was large enough to hold the entire encoded block. -// Otherwise, a newly allocated slice will be returned. -// It is valid to pass a nil dst. -func Encode(dst, src []byte) []byte { - if n := MaxEncodedLen(len(src)); len(dst) < n { - dst = make([]byte, n) - } - - // The block starts with the varint-encoded length of the decompressed bytes. - d := binary.PutUvarint(dst, uint64(len(src))) - - // Return early if src is short. - if len(src) <= 4 { - if len(src) != 0 { - d += emitLiteral(dst[d:], src) - } - return dst[:d] - } - - // Initialize the hash table. Its size ranges from 1<<8 to 1<<14 inclusive. - const maxTableSize = 1 << 14 - shift, tableSize := uint(32-8), 1<<8 - for tableSize < maxTableSize && tableSize < len(src) { - shift-- - tableSize *= 2 - } - var table [maxTableSize]int - - // Iterate over the source bytes. - var ( - s int // The iterator position. - t int // The last position with the same hash as s. - lit int // The start position of any pending literal bytes. - ) - for s+3 < len(src) { - // Update the hash table. - b0, b1, b2, b3 := src[s], src[s+1], src[s+2], src[s+3] - h := uint32(b0) | uint32(b1)<<8 | uint32(b2)<<16 | uint32(b3)<<24 - p := &table[(h*0x1e35a7bd)>>shift] - // We need to to store values in [-1, inf) in table. To save - // some initialization time, (re)use the table's zero value - // and shift the values against this zero: add 1 on writes, - // subtract 1 on reads. - t, *p = *p-1, s+1 - // If t is invalid or src[s:s+4] differs from src[t:t+4], accumulate a literal byte. - if t < 0 || s-t >= maxOffset || b0 != src[t] || b1 != src[t+1] || b2 != src[t+2] || b3 != src[t+3] { - s++ - continue - } - // Otherwise, we have a match. First, emit any pending literal bytes. - if lit != s { - d += emitLiteral(dst[d:], src[lit:s]) - } - // Extend the match to be as long as possible. - s0 := s - s, t = s+4, t+4 - for s < len(src) && src[s] == src[t] { - s++ - t++ - } - // Emit the copied bytes. - d += emitCopy(dst[d:], s-t, s-s0) - lit = s - } - - // Emit any final pending literal bytes and return. - if lit != len(src) { - d += emitLiteral(dst[d:], src[lit:]) - } - return dst[:d] -} - -// MaxEncodedLen returns the maximum length of a snappy block, given its -// uncompressed length. -func MaxEncodedLen(srcLen int) int { - // Compressed data can be defined as: - // compressed := item* literal* - // item := literal* copy - // - // The trailing literal sequence has a space blowup of at most 62/60 - // since a literal of length 60 needs one tag byte + one extra byte - // for length information. - // - // Item blowup is trickier to measure. Suppose the "copy" op copies - // 4 bytes of data. Because of a special check in the encoding code, - // we produce a 4-byte copy only if the offset is < 65536. Therefore - // the copy op takes 3 bytes to encode, and this type of item leads - // to at most the 62/60 blowup for representing literals. - // - // Suppose the "copy" op copies 5 bytes of data. If the offset is big - // enough, it will take 5 bytes to encode the copy op. Therefore the - // worst case here is a one-byte literal followed by a five-byte copy. - // That is, 6 bytes of input turn into 7 bytes of "compressed" data. - // - // This last factor dominates the blowup, so the final estimate is: - return 32 + srcLen + srcLen/6 -} - -// NewWriter returns a new Writer that compresses to w, using the framing -// format described at -// https://github.com/google/snappy/blob/master/framing_format.txt -func NewWriter(w io.Writer) *Writer { - return &Writer{ - w: w, - enc: make([]byte, MaxEncodedLen(maxUncompressedChunkLen)), - } -} - -// Writer is an io.Writer than can write Snappy-compressed bytes. -type Writer struct { - w io.Writer - err error - enc []byte - buf [checksumSize + chunkHeaderSize]byte - wroteHeader bool -} - -// Reset discards the writer's state and switches the Snappy writer to write to -// w. This permits reusing a Writer rather than allocating a new one. -func (w *Writer) Reset(writer io.Writer) { - w.w = writer - w.err = nil - w.wroteHeader = false -} - -// Write satisfies the io.Writer interface. -func (w *Writer) Write(p []byte) (n int, errRet error) { - if w.err != nil { - return 0, w.err - } - if !w.wroteHeader { - copy(w.enc, magicChunk) - if _, err := w.w.Write(w.enc[:len(magicChunk)]); err != nil { - w.err = err - return n, err - } - w.wroteHeader = true - } - for len(p) > 0 { - var uncompressed []byte - if len(p) > maxUncompressedChunkLen { - uncompressed, p = p[:maxUncompressedChunkLen], p[maxUncompressedChunkLen:] - } else { - uncompressed, p = p, nil - } - checksum := crc(uncompressed) - - // Compress the buffer, discarding the result if the improvement - // isn't at least 12.5%. - chunkType := uint8(chunkTypeCompressedData) - chunkBody := Encode(w.enc, uncompressed) - if len(chunkBody) >= len(uncompressed)-len(uncompressed)/8 { - chunkType, chunkBody = chunkTypeUncompressedData, uncompressed - } - - chunkLen := 4 + len(chunkBody) - w.buf[0] = chunkType - w.buf[1] = uint8(chunkLen >> 0) - w.buf[2] = uint8(chunkLen >> 8) - w.buf[3] = uint8(chunkLen >> 16) - w.buf[4] = uint8(checksum >> 0) - w.buf[5] = uint8(checksum >> 8) - w.buf[6] = uint8(checksum >> 16) - w.buf[7] = uint8(checksum >> 24) - if _, err := w.w.Write(w.buf[:]); err != nil { - w.err = err - return n, err - } - if _, err := w.w.Write(chunkBody); err != nil { - w.err = err - return n, err - } - n += len(uncompressed) - } - return n, nil -} diff --git a/Godeps/_workspace/src/github.com/golang/snappy/snappy.go b/Godeps/_workspace/src/github.com/golang/snappy/snappy.go deleted file mode 100644 index e98653acff..0000000000 --- a/Godeps/_workspace/src/github.com/golang/snappy/snappy.go +++ /dev/null @@ -1,68 +0,0 @@ -// Copyright 2011 The Snappy-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package snappy implements the snappy block-based compression format. -// It aims for very high speeds and reasonable compression. -// -// The C++ snappy implementation is at https://github.com/google/snappy -package snappy - -import ( - "hash/crc32" -) - -/* -Each encoded block begins with the varint-encoded length of the decoded data, -followed by a sequence of chunks. Chunks begin and end on byte boundaries. The -first byte of each chunk is broken into its 2 least and 6 most significant bits -called l and m: l ranges in [0, 4) and m ranges in [0, 64). l is the chunk tag. -Zero means a literal tag. All other values mean a copy tag. - -For literal tags: - - If m < 60, the next 1 + m bytes are literal bytes. - - Otherwise, let n be the little-endian unsigned integer denoted by the next - m - 59 bytes. The next 1 + n bytes after that are literal bytes. - -For copy tags, length bytes are copied from offset bytes ago, in the style of -Lempel-Ziv compression algorithms. In particular: - - For l == 1, the offset ranges in [0, 1<<11) and the length in [4, 12). - The length is 4 + the low 3 bits of m. The high 3 bits of m form bits 8-10 - of the offset. The next byte is bits 0-7 of the offset. - - For l == 2, the offset ranges in [0, 1<<16) and the length in [1, 65). - The length is 1 + m. The offset is the little-endian unsigned integer - denoted by the next 2 bytes. - - For l == 3, this tag is a legacy format that is no longer supported. -*/ -const ( - tagLiteral = 0x00 - tagCopy1 = 0x01 - tagCopy2 = 0x02 - tagCopy4 = 0x03 -) - -const ( - checksumSize = 4 - chunkHeaderSize = 4 - magicChunk = "\xff\x06\x00\x00" + magicBody - magicBody = "sNaPpY" - // https://github.com/google/snappy/blob/master/framing_format.txt says - // that "the uncompressed data in a chunk must be no longer than 65536 bytes". - maxUncompressedChunkLen = 65536 -) - -const ( - chunkTypeCompressedData = 0x00 - chunkTypeUncompressedData = 0x01 - chunkTypePadding = 0xfe - chunkTypeStreamIdentifier = 0xff -) - -var crcTable = crc32.MakeTable(crc32.Castagnoli) - -// crc implements the checksum specified in section 3 of -// https://github.com/google/snappy/blob/master/framing_format.txt -func crc(b []byte) uint32 { - c := crc32.Update(0, crcTable, b) - return uint32(c>>15|c<<17) + 0xa282ead8 -} diff --git a/Godeps/_workspace/src/github.com/golang/snappy/snappy_test.go b/Godeps/_workspace/src/github.com/golang/snappy/snappy_test.go deleted file mode 100644 index d6a03973b3..0000000000 --- a/Godeps/_workspace/src/github.com/golang/snappy/snappy_test.go +++ /dev/null @@ -1,367 +0,0 @@ -// Copyright 2011 The Snappy-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package snappy - -import ( - "bytes" - "flag" - "fmt" - "io" - "io/ioutil" - "math/rand" - "net/http" - "os" - "path/filepath" - "strings" - "testing" -) - -var ( - download = flag.Bool("download", false, "If true, download any missing files before running benchmarks") - testdata = flag.String("testdata", "testdata", "Directory containing the test data") -) - -func roundtrip(b, ebuf, dbuf []byte) error { - d, err := Decode(dbuf, Encode(ebuf, b)) - if err != nil { - return fmt.Errorf("decoding error: %v", err) - } - if !bytes.Equal(b, d) { - return fmt.Errorf("roundtrip mismatch:\n\twant %v\n\tgot %v", b, d) - } - return nil -} - -func TestEmpty(t *testing.T) { - if err := roundtrip(nil, nil, nil); err != nil { - t.Fatal(err) - } -} - -func TestSmallCopy(t *testing.T) { - for _, ebuf := range [][]byte{nil, make([]byte, 20), make([]byte, 64)} { - for _, dbuf := range [][]byte{nil, make([]byte, 20), make([]byte, 64)} { - for i := 0; i < 32; i++ { - s := "aaaa" + strings.Repeat("b", i) + "aaaabbbb" - if err := roundtrip([]byte(s), ebuf, dbuf); err != nil { - t.Errorf("len(ebuf)=%d, len(dbuf)=%d, i=%d: %v", len(ebuf), len(dbuf), i, err) - } - } - } - } -} - -func TestSmallRand(t *testing.T) { - rng := rand.New(rand.NewSource(27354294)) - for n := 1; n < 20000; n += 23 { - b := make([]byte, n) - for i := range b { - b[i] = uint8(rng.Uint32()) - } - if err := roundtrip(b, nil, nil); err != nil { - t.Fatal(err) - } - } -} - -func TestSmallRegular(t *testing.T) { - for n := 1; n < 20000; n += 23 { - b := make([]byte, n) - for i := range b { - b[i] = uint8(i%10 + 'a') - } - if err := roundtrip(b, nil, nil); err != nil { - t.Fatal(err) - } - } -} - -func TestInvalidVarint(t *testing.T) { - data := []byte("\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\x00") - if _, err := DecodedLen(data); err != ErrCorrupt { - t.Errorf("DecodedLen: got %v, want ErrCorrupt", err) - } - if _, err := Decode(nil, data); err != ErrCorrupt { - t.Errorf("Decode: got %v, want ErrCorrupt", err) - } -} - -func cmp(a, b []byte) error { - if len(a) != len(b) { - return fmt.Errorf("got %d bytes, want %d", len(a), len(b)) - } - for i := range a { - if a[i] != b[i] { - return fmt.Errorf("byte #%d: got 0x%02x, want 0x%02x", i, a[i], b[i]) - } - } - return nil -} - -func TestFramingFormat(t *testing.T) { - // src is comprised of alternating 1e5-sized sequences of random - // (incompressible) bytes and repeated (compressible) bytes. 1e5 was chosen - // because it is larger than maxUncompressedChunkLen (64k). - src := make([]byte, 1e6) - rng := rand.New(rand.NewSource(1)) - for i := 0; i < 10; i++ { - if i%2 == 0 { - for j := 0; j < 1e5; j++ { - src[1e5*i+j] = uint8(rng.Intn(256)) - } - } else { - for j := 0; j < 1e5; j++ { - src[1e5*i+j] = uint8(i) - } - } - } - - buf := new(bytes.Buffer) - if _, err := NewWriter(buf).Write(src); err != nil { - t.Fatalf("Write: encoding: %v", err) - } - dst, err := ioutil.ReadAll(NewReader(buf)) - if err != nil { - t.Fatalf("ReadAll: decoding: %v", err) - } - if err := cmp(dst, src); err != nil { - t.Fatal(err) - } -} - -func TestReaderReset(t *testing.T) { - gold := bytes.Repeat([]byte("All that is gold does not glitter,\n"), 10000) - buf := new(bytes.Buffer) - if _, err := NewWriter(buf).Write(gold); err != nil { - t.Fatalf("Write: %v", err) - } - encoded, invalid, partial := buf.String(), "invalid", "partial" - r := NewReader(nil) - for i, s := range []string{encoded, invalid, partial, encoded, partial, invalid, encoded, encoded} { - if s == partial { - r.Reset(strings.NewReader(encoded)) - if _, err := r.Read(make([]byte, 101)); err != nil { - t.Errorf("#%d: %v", i, err) - continue - } - continue - } - r.Reset(strings.NewReader(s)) - got, err := ioutil.ReadAll(r) - switch s { - case encoded: - if err != nil { - t.Errorf("#%d: %v", i, err) - continue - } - if err := cmp(got, gold); err != nil { - t.Errorf("#%d: %v", i, err) - continue - } - case invalid: - if err == nil { - t.Errorf("#%d: got nil error, want non-nil", i) - continue - } - } - } -} - -func TestWriterReset(t *testing.T) { - gold := bytes.Repeat([]byte("Not all those who wander are lost;\n"), 10000) - var gots, wants [][]byte - const n = 20 - w, failed := NewWriter(nil), false - for i := 0; i <= n; i++ { - buf := new(bytes.Buffer) - w.Reset(buf) - want := gold[:len(gold)*i/n] - if _, err := w.Write(want); err != nil { - t.Errorf("#%d: Write: %v", i, err) - failed = true - continue - } - got, err := ioutil.ReadAll(NewReader(buf)) - if err != nil { - t.Errorf("#%d: ReadAll: %v", i, err) - failed = true - continue - } - gots = append(gots, got) - wants = append(wants, want) - } - if failed { - return - } - for i := range gots { - if err := cmp(gots[i], wants[i]); err != nil { - t.Errorf("#%d: %v", i, err) - } - } -} - -func benchDecode(b *testing.B, src []byte) { - encoded := Encode(nil, src) - // Bandwidth is in amount of uncompressed data. - b.SetBytes(int64(len(src))) - b.ResetTimer() - for i := 0; i < b.N; i++ { - Decode(src, encoded) - } -} - -func benchEncode(b *testing.B, src []byte) { - // Bandwidth is in amount of uncompressed data. - b.SetBytes(int64(len(src))) - dst := make([]byte, MaxEncodedLen(len(src))) - b.ResetTimer() - for i := 0; i < b.N; i++ { - Encode(dst, src) - } -} - -func readFile(b testing.TB, filename string) []byte { - src, err := ioutil.ReadFile(filename) - if err != nil { - b.Skipf("skipping benchmark: %v", err) - } - if len(src) == 0 { - b.Fatalf("%s has zero length", filename) - } - return src -} - -// expand returns a slice of length n containing repeated copies of src. -func expand(src []byte, n int) []byte { - dst := make([]byte, n) - for x := dst; len(x) > 0; { - i := copy(x, src) - x = x[i:] - } - return dst -} - -func benchWords(b *testing.B, n int, decode bool) { - // Note: the file is OS-language dependent so the resulting values are not - // directly comparable for non-US-English OS installations. - data := expand(readFile(b, "/usr/share/dict/words"), n) - if decode { - benchDecode(b, data) - } else { - benchEncode(b, data) - } -} - -func BenchmarkWordsDecode1e3(b *testing.B) { benchWords(b, 1e3, true) } -func BenchmarkWordsDecode1e4(b *testing.B) { benchWords(b, 1e4, true) } -func BenchmarkWordsDecode1e5(b *testing.B) { benchWords(b, 1e5, true) } -func BenchmarkWordsDecode1e6(b *testing.B) { benchWords(b, 1e6, true) } -func BenchmarkWordsEncode1e3(b *testing.B) { benchWords(b, 1e3, false) } -func BenchmarkWordsEncode1e4(b *testing.B) { benchWords(b, 1e4, false) } -func BenchmarkWordsEncode1e5(b *testing.B) { benchWords(b, 1e5, false) } -func BenchmarkWordsEncode1e6(b *testing.B) { benchWords(b, 1e6, false) } - -// testFiles' values are copied directly from -// https://raw.githubusercontent.com/google/snappy/master/snappy_unittest.cc -// The label field is unused in snappy-go. -var testFiles = []struct { - label string - filename string -}{ - {"html", "html"}, - {"urls", "urls.10K"}, - {"jpg", "fireworks.jpeg"}, - {"jpg_200", "fireworks.jpeg"}, - {"pdf", "paper-100k.pdf"}, - {"html4", "html_x_4"}, - {"txt1", "alice29.txt"}, - {"txt2", "asyoulik.txt"}, - {"txt3", "lcet10.txt"}, - {"txt4", "plrabn12.txt"}, - {"pb", "geo.protodata"}, - {"gaviota", "kppkn.gtb"}, -} - -// The test data files are present at this canonical URL. -const baseURL = "https://raw.githubusercontent.com/google/snappy/master/testdata/" - -func downloadTestdata(b *testing.B, basename string) (errRet error) { - filename := filepath.Join(*testdata, basename) - if stat, err := os.Stat(filename); err == nil && stat.Size() != 0 { - return nil - } - - if !*download { - b.Skipf("test data not found; skipping benchmark without the -download flag") - } - // Download the official snappy C++ implementation reference test data - // files for benchmarking. - if err := os.Mkdir(*testdata, 0777); err != nil && !os.IsExist(err) { - return fmt.Errorf("failed to create testdata: %s", err) - } - - f, err := os.Create(filename) - if err != nil { - return fmt.Errorf("failed to create %s: %s", filename, err) - } - defer f.Close() - defer func() { - if errRet != nil { - os.Remove(filename) - } - }() - url := baseURL + basename - resp, err := http.Get(url) - if err != nil { - return fmt.Errorf("failed to download %s: %s", url, err) - } - defer resp.Body.Close() - if s := resp.StatusCode; s != http.StatusOK { - return fmt.Errorf("downloading %s: HTTP status code %d (%s)", url, s, http.StatusText(s)) - } - _, err = io.Copy(f, resp.Body) - if err != nil { - return fmt.Errorf("failed to download %s to %s: %s", url, filename, err) - } - return nil -} - -func benchFile(b *testing.B, n int, decode bool) { - if err := downloadTestdata(b, testFiles[n].filename); err != nil { - b.Fatalf("failed to download testdata: %s", err) - } - data := readFile(b, filepath.Join(*testdata, testFiles[n].filename)) - if decode { - benchDecode(b, data) - } else { - benchEncode(b, data) - } -} - -// Naming convention is kept similar to what snappy's C++ implementation uses. -func Benchmark_UFlat0(b *testing.B) { benchFile(b, 0, true) } -func Benchmark_UFlat1(b *testing.B) { benchFile(b, 1, true) } -func Benchmark_UFlat2(b *testing.B) { benchFile(b, 2, true) } -func Benchmark_UFlat3(b *testing.B) { benchFile(b, 3, true) } -func Benchmark_UFlat4(b *testing.B) { benchFile(b, 4, true) } -func Benchmark_UFlat5(b *testing.B) { benchFile(b, 5, true) } -func Benchmark_UFlat6(b *testing.B) { benchFile(b, 6, true) } -func Benchmark_UFlat7(b *testing.B) { benchFile(b, 7, true) } -func Benchmark_UFlat8(b *testing.B) { benchFile(b, 8, true) } -func Benchmark_UFlat9(b *testing.B) { benchFile(b, 9, true) } -func Benchmark_UFlat10(b *testing.B) { benchFile(b, 10, true) } -func Benchmark_UFlat11(b *testing.B) { benchFile(b, 11, true) } -func Benchmark_ZFlat0(b *testing.B) { benchFile(b, 0, false) } -func Benchmark_ZFlat1(b *testing.B) { benchFile(b, 1, false) } -func Benchmark_ZFlat2(b *testing.B) { benchFile(b, 2, false) } -func Benchmark_ZFlat3(b *testing.B) { benchFile(b, 3, false) } -func Benchmark_ZFlat4(b *testing.B) { benchFile(b, 4, false) } -func Benchmark_ZFlat5(b *testing.B) { benchFile(b, 5, false) } -func Benchmark_ZFlat6(b *testing.B) { benchFile(b, 6, false) } -func Benchmark_ZFlat7(b *testing.B) { benchFile(b, 7, false) } -func Benchmark_ZFlat8(b *testing.B) { benchFile(b, 8, false) } -func Benchmark_ZFlat9(b *testing.B) { benchFile(b, 9, false) } -func Benchmark_ZFlat10(b *testing.B) { benchFile(b, 10, false) } -func Benchmark_ZFlat11(b *testing.B) { benchFile(b, 11, false) } diff --git a/Godeps/_workspace/src/github.com/hashicorp/consul/api/README.md b/Godeps/_workspace/src/github.com/hashicorp/consul/api/README.md deleted file mode 100644 index bce2ebb516..0000000000 --- a/Godeps/_workspace/src/github.com/hashicorp/consul/api/README.md +++ /dev/null @@ -1,39 +0,0 @@ -Consul API client -================= - -This package provides the `api` package which attempts to -provide programmatic access to the full Consul API. - -Currently, all of the Consul APIs included in version 0.3 are supported. - -Documentation -============= - -The full documentation is available on [Godoc](http://godoc.org/github.com/hashicorp/consul/api) - -Usage -===== - -Below is an example of using the Consul client: - -```go -// Get a new client, with KV endpoints -client, _ := api.NewClient(api.DefaultConfig()) -kv := client.KV() - -// PUT a new KV pair -p := &api.KVPair{Key: "foo", Value: []byte("test")} -_, err := kv.Put(p, nil) -if err != nil { - panic(err) -} - -// Lookup the pair -pair, _, err := kv.Get("foo", nil) -if err != nil { - panic(err) -} -fmt.Printf("KV: %v", pair) - -``` - diff --git a/Godeps/_workspace/src/github.com/hashicorp/consul/api/acl.go b/Godeps/_workspace/src/github.com/hashicorp/consul/api/acl.go deleted file mode 100644 index c3fb0d53aa..0000000000 --- a/Godeps/_workspace/src/github.com/hashicorp/consul/api/acl.go +++ /dev/null @@ -1,140 +0,0 @@ -package api - -const ( - // ACLCLientType is the client type token - ACLClientType = "client" - - // ACLManagementType is the management type token - ACLManagementType = "management" -) - -// ACLEntry is used to represent an ACL entry -type ACLEntry struct { - CreateIndex uint64 - ModifyIndex uint64 - ID string - Name string - Type string - Rules string -} - -// ACL can be used to query the ACL endpoints -type ACL struct { - c *Client -} - -// ACL returns a handle to the ACL endpoints -func (c *Client) ACL() *ACL { - return &ACL{c} -} - -// Create is used to generate a new token with the given parameters -func (a *ACL) Create(acl *ACLEntry, q *WriteOptions) (string, *WriteMeta, error) { - r := a.c.newRequest("PUT", "/v1/acl/create") - r.setWriteOptions(q) - r.obj = acl - rtt, resp, err := requireOK(a.c.doRequest(r)) - if err != nil { - return "", nil, err - } - defer resp.Body.Close() - - wm := &WriteMeta{RequestTime: rtt} - var out struct{ ID string } - if err := decodeBody(resp, &out); err != nil { - return "", nil, err - } - return out.ID, wm, nil -} - -// Update is used to update the rules of an existing token -func (a *ACL) Update(acl *ACLEntry, q *WriteOptions) (*WriteMeta, error) { - r := a.c.newRequest("PUT", "/v1/acl/update") - r.setWriteOptions(q) - r.obj = acl - rtt, resp, err := requireOK(a.c.doRequest(r)) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - wm := &WriteMeta{RequestTime: rtt} - return wm, nil -} - -// Destroy is used to destroy a given ACL token ID -func (a *ACL) Destroy(id string, q *WriteOptions) (*WriteMeta, error) { - r := a.c.newRequest("PUT", "/v1/acl/destroy/"+id) - r.setWriteOptions(q) - rtt, resp, err := requireOK(a.c.doRequest(r)) - if err != nil { - return nil, err - } - resp.Body.Close() - - wm := &WriteMeta{RequestTime: rtt} - return wm, nil -} - -// Clone is used to return a new token cloned from an existing one -func (a *ACL) Clone(id string, q *WriteOptions) (string, *WriteMeta, error) { - r := a.c.newRequest("PUT", "/v1/acl/clone/"+id) - r.setWriteOptions(q) - rtt, resp, err := requireOK(a.c.doRequest(r)) - if err != nil { - return "", nil, err - } - defer resp.Body.Close() - - wm := &WriteMeta{RequestTime: rtt} - var out struct{ ID string } - if err := decodeBody(resp, &out); err != nil { - return "", nil, err - } - return out.ID, wm, nil -} - -// Info is used to query for information about an ACL token -func (a *ACL) Info(id string, q *QueryOptions) (*ACLEntry, *QueryMeta, error) { - r := a.c.newRequest("GET", "/v1/acl/info/"+id) - r.setQueryOptions(q) - rtt, resp, err := requireOK(a.c.doRequest(r)) - if err != nil { - return nil, nil, err - } - defer resp.Body.Close() - - qm := &QueryMeta{} - parseQueryMeta(resp, qm) - qm.RequestTime = rtt - - var entries []*ACLEntry - if err := decodeBody(resp, &entries); err != nil { - return nil, nil, err - } - if len(entries) > 0 { - return entries[0], qm, nil - } - return nil, qm, nil -} - -// List is used to get all the ACL tokens -func (a *ACL) List(q *QueryOptions) ([]*ACLEntry, *QueryMeta, error) { - r := a.c.newRequest("GET", "/v1/acl/list") - r.setQueryOptions(q) - rtt, resp, err := requireOK(a.c.doRequest(r)) - if err != nil { - return nil, nil, err - } - defer resp.Body.Close() - - qm := &QueryMeta{} - parseQueryMeta(resp, qm) - qm.RequestTime = rtt - - var entries []*ACLEntry - if err := decodeBody(resp, &entries); err != nil { - return nil, nil, err - } - return entries, qm, nil -} diff --git a/Godeps/_workspace/src/github.com/hashicorp/consul/api/acl_test.go b/Godeps/_workspace/src/github.com/hashicorp/consul/api/acl_test.go deleted file mode 100644 index 2a5207a6ee..0000000000 --- a/Godeps/_workspace/src/github.com/hashicorp/consul/api/acl_test.go +++ /dev/null @@ -1,128 +0,0 @@ -package api - -import ( - "testing" -) - -func TestACL_CreateDestroy(t *testing.T) { - t.Parallel() - c, s := makeACLClient(t) - defer s.Stop() - - acl := c.ACL() - - ae := ACLEntry{ - Name: "API test", - Type: ACLClientType, - Rules: `key "" { policy = "deny" }`, - } - - id, wm, err := acl.Create(&ae, nil) - if err != nil { - t.Fatalf("err: %v", err) - } - - if wm.RequestTime == 0 { - t.Fatalf("bad: %v", wm) - } - - if id == "" { - t.Fatalf("invalid: %v", id) - } - - ae2, _, err := acl.Info(id, nil) - if err != nil { - t.Fatalf("err: %v", err) - } - - if ae2.Name != ae.Name || ae2.Type != ae.Type || ae2.Rules != ae.Rules { - t.Fatalf("Bad: %#v", ae2) - } - - wm, err = acl.Destroy(id, nil) - if err != nil { - t.Fatalf("err: %v", err) - } - - if wm.RequestTime == 0 { - t.Fatalf("bad: %v", wm) - } -} - -func TestACL_CloneDestroy(t *testing.T) { - t.Parallel() - c, s := makeACLClient(t) - defer s.Stop() - - acl := c.ACL() - - id, wm, err := acl.Clone(c.config.Token, nil) - if err != nil { - t.Fatalf("err: %v", err) - } - - if wm.RequestTime == 0 { - t.Fatalf("bad: %v", wm) - } - - if id == "" { - t.Fatalf("invalid: %v", id) - } - - wm, err = acl.Destroy(id, nil) - if err != nil { - t.Fatalf("err: %v", err) - } - - if wm.RequestTime == 0 { - t.Fatalf("bad: %v", wm) - } -} - -func TestACL_Info(t *testing.T) { - t.Parallel() - c, s := makeACLClient(t) - defer s.Stop() - - acl := c.ACL() - - ae, qm, err := acl.Info(c.config.Token, nil) - if err != nil { - t.Fatalf("err: %v", err) - } - - if qm.LastIndex == 0 { - t.Fatalf("bad: %v", qm) - } - if !qm.KnownLeader { - t.Fatalf("bad: %v", qm) - } - - if ae == nil || ae.ID != c.config.Token || ae.Type != ACLManagementType { - t.Fatalf("bad: %#v", ae) - } -} - -func TestACL_List(t *testing.T) { - t.Parallel() - c, s := makeACLClient(t) - defer s.Stop() - - acl := c.ACL() - - acls, qm, err := acl.List(nil) - if err != nil { - t.Fatalf("err: %v", err) - } - - if len(acls) < 2 { - t.Fatalf("bad: %v", acls) - } - - if qm.LastIndex == 0 { - t.Fatalf("bad: %v", qm) - } - if !qm.KnownLeader { - t.Fatalf("bad: %v", qm) - } -} diff --git a/Godeps/_workspace/src/github.com/hashicorp/consul/api/agent.go b/Godeps/_workspace/src/github.com/hashicorp/consul/api/agent.go deleted file mode 100644 index e56a18dcd2..0000000000 --- a/Godeps/_workspace/src/github.com/hashicorp/consul/api/agent.go +++ /dev/null @@ -1,334 +0,0 @@ -package api - -import ( - "fmt" -) - -// AgentCheck represents a check known to the agent -type AgentCheck struct { - Node string - CheckID string - Name string - Status string - Notes string - Output string - ServiceID string - ServiceName string -} - -// AgentService represents a service known to the agent -type AgentService struct { - ID string - Service string - Tags []string - Port int - Address string -} - -// AgentMember represents a cluster member known to the agent -type AgentMember struct { - Name string - Addr string - Port uint16 - Tags map[string]string - Status int - ProtocolMin uint8 - ProtocolMax uint8 - ProtocolCur uint8 - DelegateMin uint8 - DelegateMax uint8 - DelegateCur uint8 -} - -// AgentServiceRegistration is used to register a new service -type AgentServiceRegistration struct { - ID string `json:",omitempty"` - Name string `json:",omitempty"` - Tags []string `json:",omitempty"` - Port int `json:",omitempty"` - Address string `json:",omitempty"` - Check *AgentServiceCheck - Checks AgentServiceChecks -} - -// AgentCheckRegistration is used to register a new check -type AgentCheckRegistration struct { - ID string `json:",omitempty"` - Name string `json:",omitempty"` - Notes string `json:",omitempty"` - ServiceID string `json:",omitempty"` - AgentServiceCheck -} - -// AgentServiceCheck is used to create an associated -// check for a service -type AgentServiceCheck struct { - Script string `json:",omitempty"` - Interval string `json:",omitempty"` - Timeout string `json:",omitempty"` - TTL string `json:",omitempty"` - HTTP string `json:",omitempty"` - Status string `json:",omitempty"` -} -type AgentServiceChecks []*AgentServiceCheck - -// Agent can be used to query the Agent endpoints -type Agent struct { - c *Client - - // cache the node name - nodeName string -} - -// Agent returns a handle to the agent endpoints -func (c *Client) Agent() *Agent { - return &Agent{c: c} -} - -// Self is used to query the agent we are speaking to for -// information about itself -func (a *Agent) Self() (map[string]map[string]interface{}, error) { - r := a.c.newRequest("GET", "/v1/agent/self") - _, resp, err := requireOK(a.c.doRequest(r)) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - var out map[string]map[string]interface{} - if err := decodeBody(resp, &out); err != nil { - return nil, err - } - return out, nil -} - -// NodeName is used to get the node name of the agent -func (a *Agent) NodeName() (string, error) { - if a.nodeName != "" { - return a.nodeName, nil - } - info, err := a.Self() - if err != nil { - return "", err - } - name := info["Config"]["NodeName"].(string) - a.nodeName = name - return name, nil -} - -// Checks returns the locally registered checks -func (a *Agent) Checks() (map[string]*AgentCheck, error) { - r := a.c.newRequest("GET", "/v1/agent/checks") - _, resp, err := requireOK(a.c.doRequest(r)) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - var out map[string]*AgentCheck - if err := decodeBody(resp, &out); err != nil { - return nil, err - } - return out, nil -} - -// Services returns the locally registered services -func (a *Agent) Services() (map[string]*AgentService, error) { - r := a.c.newRequest("GET", "/v1/agent/services") - _, resp, err := requireOK(a.c.doRequest(r)) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - var out map[string]*AgentService - if err := decodeBody(resp, &out); err != nil { - return nil, err - } - return out, nil -} - -// Members returns the known gossip members. The WAN -// flag can be used to query a server for WAN members. -func (a *Agent) Members(wan bool) ([]*AgentMember, error) { - r := a.c.newRequest("GET", "/v1/agent/members") - if wan { - r.params.Set("wan", "1") - } - _, resp, err := requireOK(a.c.doRequest(r)) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - var out []*AgentMember - if err := decodeBody(resp, &out); err != nil { - return nil, err - } - return out, nil -} - -// ServiceRegister is used to register a new service with -// the local agent -func (a *Agent) ServiceRegister(service *AgentServiceRegistration) error { - r := a.c.newRequest("PUT", "/v1/agent/service/register") - r.obj = service - _, resp, err := requireOK(a.c.doRequest(r)) - if err != nil { - return err - } - resp.Body.Close() - return nil -} - -// ServiceDeregister is used to deregister a service with -// the local agent -func (a *Agent) ServiceDeregister(serviceID string) error { - r := a.c.newRequest("PUT", "/v1/agent/service/deregister/"+serviceID) - _, resp, err := requireOK(a.c.doRequest(r)) - if err != nil { - return err - } - resp.Body.Close() - return nil -} - -// PassTTL is used to set a TTL check to the passing state -func (a *Agent) PassTTL(checkID, note string) error { - return a.UpdateTTL(checkID, note, "pass") -} - -// WarnTTL is used to set a TTL check to the warning state -func (a *Agent) WarnTTL(checkID, note string) error { - return a.UpdateTTL(checkID, note, "warn") -} - -// FailTTL is used to set a TTL check to the failing state -func (a *Agent) FailTTL(checkID, note string) error { - return a.UpdateTTL(checkID, note, "fail") -} - -// UpdateTTL is used to update the TTL of a check -func (a *Agent) UpdateTTL(checkID, note, status string) error { - switch status { - case "pass": - case "warn": - case "fail": - default: - return fmt.Errorf("Invalid status: %s", status) - } - endpoint := fmt.Sprintf("/v1/agent/check/%s/%s", status, checkID) - r := a.c.newRequest("PUT", endpoint) - r.params.Set("note", note) - _, resp, err := requireOK(a.c.doRequest(r)) - if err != nil { - return err - } - resp.Body.Close() - return nil -} - -// CheckRegister is used to register a new check with -// the local agent -func (a *Agent) CheckRegister(check *AgentCheckRegistration) error { - r := a.c.newRequest("PUT", "/v1/agent/check/register") - r.obj = check - _, resp, err := requireOK(a.c.doRequest(r)) - if err != nil { - return err - } - resp.Body.Close() - return nil -} - -// CheckDeregister is used to deregister a check with -// the local agent -func (a *Agent) CheckDeregister(checkID string) error { - r := a.c.newRequest("PUT", "/v1/agent/check/deregister/"+checkID) - _, resp, err := requireOK(a.c.doRequest(r)) - if err != nil { - return err - } - resp.Body.Close() - return nil -} - -// Join is used to instruct the agent to attempt a join to -// another cluster member -func (a *Agent) Join(addr string, wan bool) error { - r := a.c.newRequest("PUT", "/v1/agent/join/"+addr) - if wan { - r.params.Set("wan", "1") - } - _, resp, err := requireOK(a.c.doRequest(r)) - if err != nil { - return err - } - resp.Body.Close() - return nil -} - -// ForceLeave is used to have the agent eject a failed node -func (a *Agent) ForceLeave(node string) error { - r := a.c.newRequest("PUT", "/v1/agent/force-leave/"+node) - _, resp, err := requireOK(a.c.doRequest(r)) - if err != nil { - return err - } - resp.Body.Close() - return nil -} - -// EnableServiceMaintenance toggles service maintenance mode on -// for the given service ID. -func (a *Agent) EnableServiceMaintenance(serviceID, reason string) error { - r := a.c.newRequest("PUT", "/v1/agent/service/maintenance/"+serviceID) - r.params.Set("enable", "true") - r.params.Set("reason", reason) - _, resp, err := requireOK(a.c.doRequest(r)) - if err != nil { - return err - } - resp.Body.Close() - return nil -} - -// DisableServiceMaintenance toggles service maintenance mode off -// for the given service ID. -func (a *Agent) DisableServiceMaintenance(serviceID string) error { - r := a.c.newRequest("PUT", "/v1/agent/service/maintenance/"+serviceID) - r.params.Set("enable", "false") - _, resp, err := requireOK(a.c.doRequest(r)) - if err != nil { - return err - } - resp.Body.Close() - return nil -} - -// EnableNodeMaintenance toggles node maintenance mode on for the -// agent we are connected to. -func (a *Agent) EnableNodeMaintenance(reason string) error { - r := a.c.newRequest("PUT", "/v1/agent/maintenance") - r.params.Set("enable", "true") - r.params.Set("reason", reason) - _, resp, err := requireOK(a.c.doRequest(r)) - if err != nil { - return err - } - resp.Body.Close() - return nil -} - -// DisableNodeMaintenance toggles node maintenance mode off for the -// agent we are connected to. -func (a *Agent) DisableNodeMaintenance() error { - r := a.c.newRequest("PUT", "/v1/agent/maintenance") - r.params.Set("enable", "false") - _, resp, err := requireOK(a.c.doRequest(r)) - if err != nil { - return err - } - resp.Body.Close() - return nil -} diff --git a/Godeps/_workspace/src/github.com/hashicorp/consul/api/agent_test.go b/Godeps/_workspace/src/github.com/hashicorp/consul/api/agent_test.go deleted file mode 100644 index 358c12a6c2..0000000000 --- a/Godeps/_workspace/src/github.com/hashicorp/consul/api/agent_test.go +++ /dev/null @@ -1,524 +0,0 @@ -package api - -import ( - "strings" - "testing" -) - -func TestAgent_Self(t *testing.T) { - t.Parallel() - c, s := makeClient(t) - defer s.Stop() - - agent := c.Agent() - - info, err := agent.Self() - if err != nil { - t.Fatalf("err: %v", err) - } - - name := info["Config"]["NodeName"] - if name == "" { - t.Fatalf("bad: %v", info) - } -} - -func TestAgent_Members(t *testing.T) { - t.Parallel() - c, s := makeClient(t) - defer s.Stop() - - agent := c.Agent() - - members, err := agent.Members(false) - if err != nil { - t.Fatalf("err: %v", err) - } - - if len(members) != 1 { - t.Fatalf("bad: %v", members) - } -} - -func TestAgent_Services(t *testing.T) { - t.Parallel() - c, s := makeClient(t) - defer s.Stop() - - agent := c.Agent() - - reg := &AgentServiceRegistration{ - Name: "foo", - Tags: []string{"bar", "baz"}, - Port: 8000, - Check: &AgentServiceCheck{ - TTL: "15s", - }, - } - if err := agent.ServiceRegister(reg); err != nil { - t.Fatalf("err: %v", err) - } - - services, err := agent.Services() - if err != nil { - t.Fatalf("err: %v", err) - } - if _, ok := services["foo"]; !ok { - t.Fatalf("missing service: %v", services) - } - checks, err := agent.Checks() - if err != nil { - t.Fatalf("err: %v", err) - } - chk, ok := checks["service:foo"] - if !ok { - t.Fatalf("missing check: %v", checks) - } - - // Checks should default to critical - if chk.Status != "critical" { - t.Fatalf("Bad: %#v", chk) - } - - if err := agent.ServiceDeregister("foo"); err != nil { - t.Fatalf("err: %v", err) - } -} - -func TestAgent_Services_CheckPassing(t *testing.T) { - t.Parallel() - c, s := makeClient(t) - defer s.Stop() - - agent := c.Agent() - reg := &AgentServiceRegistration{ - Name: "foo", - Tags: []string{"bar", "baz"}, - Port: 8000, - Check: &AgentServiceCheck{ - TTL: "15s", - Status: "passing", - }, - } - if err := agent.ServiceRegister(reg); err != nil { - t.Fatalf("err: %v", err) - } - - services, err := agent.Services() - if err != nil { - t.Fatalf("err: %v", err) - } - if _, ok := services["foo"]; !ok { - t.Fatalf("missing service: %v", services) - } - - checks, err := agent.Checks() - if err != nil { - t.Fatalf("err: %v", err) - } - chk, ok := checks["service:foo"] - if !ok { - t.Fatalf("missing check: %v", checks) - } - - if chk.Status != "passing" { - t.Fatalf("Bad: %#v", chk) - } - if err := agent.ServiceDeregister("foo"); err != nil { - t.Fatalf("err: %v", err) - } -} - -func TestAgent_Services_CheckBadStatus(t *testing.T) { - t.Parallel() - c, s := makeClient(t) - defer s.Stop() - - agent := c.Agent() - reg := &AgentServiceRegistration{ - Name: "foo", - Tags: []string{"bar", "baz"}, - Port: 8000, - Check: &AgentServiceCheck{ - TTL: "15s", - Status: "fluffy", - }, - } - if err := agent.ServiceRegister(reg); err == nil { - t.Fatalf("bad status accepted") - } -} - -func TestAgent_ServiceAddress(t *testing.T) { - t.Parallel() - c, s := makeClient(t) - defer s.Stop() - - agent := c.Agent() - - reg1 := &AgentServiceRegistration{ - Name: "foo1", - Port: 8000, - Address: "192.168.0.42", - } - reg2 := &AgentServiceRegistration{ - Name: "foo2", - Port: 8000, - } - if err := agent.ServiceRegister(reg1); err != nil { - t.Fatalf("err: %v", err) - } - if err := agent.ServiceRegister(reg2); err != nil { - t.Fatalf("err: %v", err) - } - - services, err := agent.Services() - if err != nil { - t.Fatalf("err: %v", err) - } - - if _, ok := services["foo1"]; !ok { - t.Fatalf("missing service: %v", services) - } - if _, ok := services["foo2"]; !ok { - t.Fatalf("missing service: %v", services) - } - - if services["foo1"].Address != "192.168.0.42" { - t.Fatalf("missing Address field in service foo1: %v", services) - } - if services["foo2"].Address != "" { - t.Fatalf("missing Address field in service foo2: %v", services) - } - - if err := agent.ServiceDeregister("foo"); err != nil { - t.Fatalf("err: %v", err) - } -} - -func TestAgent_Services_MultipleChecks(t *testing.T) { - t.Parallel() - c, s := makeClient(t) - defer s.Stop() - - agent := c.Agent() - - reg := &AgentServiceRegistration{ - Name: "foo", - Tags: []string{"bar", "baz"}, - Port: 8000, - Checks: AgentServiceChecks{ - &AgentServiceCheck{ - TTL: "15s", - }, - &AgentServiceCheck{ - TTL: "30s", - }, - }, - } - if err := agent.ServiceRegister(reg); err != nil { - t.Fatalf("err: %v", err) - } - - services, err := agent.Services() - if err != nil { - t.Fatalf("err: %v", err) - } - if _, ok := services["foo"]; !ok { - t.Fatalf("missing service: %v", services) - } - - checks, err := agent.Checks() - if err != nil { - t.Fatalf("err: %v", err) - } - if _, ok := checks["service:foo:1"]; !ok { - t.Fatalf("missing check: %v", checks) - } - if _, ok := checks["service:foo:2"]; !ok { - t.Fatalf("missing check: %v", checks) - } -} - -func TestAgent_SetTTLStatus(t *testing.T) { - t.Parallel() - c, s := makeClient(t) - defer s.Stop() - - agent := c.Agent() - - reg := &AgentServiceRegistration{ - Name: "foo", - Check: &AgentServiceCheck{ - TTL: "15s", - }, - } - if err := agent.ServiceRegister(reg); err != nil { - t.Fatalf("err: %v", err) - } - - if err := agent.WarnTTL("service:foo", "test"); err != nil { - t.Fatalf("err: %v", err) - } - - checks, err := agent.Checks() - if err != nil { - t.Fatalf("err: %v", err) - } - chk, ok := checks["service:foo"] - if !ok { - t.Fatalf("missing check: %v", checks) - } - if chk.Status != "warning" { - t.Fatalf("Bad: %#v", chk) - } - if chk.Output != "test" { - t.Fatalf("Bad: %#v", chk) - } - - if err := agent.ServiceDeregister("foo"); err != nil { - t.Fatalf("err: %v", err) - } -} - -func TestAgent_Checks(t *testing.T) { - t.Parallel() - c, s := makeClient(t) - defer s.Stop() - - agent := c.Agent() - - reg := &AgentCheckRegistration{ - Name: "foo", - } - reg.TTL = "15s" - if err := agent.CheckRegister(reg); err != nil { - t.Fatalf("err: %v", err) - } - - checks, err := agent.Checks() - if err != nil { - t.Fatalf("err: %v", err) - } - chk, ok := checks["foo"] - if !ok { - t.Fatalf("missing check: %v", checks) - } - if chk.Status != "critical" { - t.Fatalf("check not critical: %v", chk) - } - - if err := agent.CheckDeregister("foo"); err != nil { - t.Fatalf("err: %v", err) - } -} - -func TestAgent_CheckStartPassing(t *testing.T) { - t.Parallel() - c, s := makeClient(t) - defer s.Stop() - - agent := c.Agent() - - reg := &AgentCheckRegistration{ - Name: "foo", - AgentServiceCheck: AgentServiceCheck{ - Status: "passing", - }, - } - reg.TTL = "15s" - if err := agent.CheckRegister(reg); err != nil { - t.Fatalf("err: %v", err) - } - - checks, err := agent.Checks() - if err != nil { - t.Fatalf("err: %v", err) - } - chk, ok := checks["foo"] - if !ok { - t.Fatalf("missing check: %v", checks) - } - if chk.Status != "passing" { - t.Fatalf("check not passing: %v", chk) - } - - if err := agent.CheckDeregister("foo"); err != nil { - t.Fatalf("err: %v", err) - } -} - -func TestAgent_Checks_serviceBound(t *testing.T) { - t.Parallel() - c, s := makeClient(t) - defer s.Stop() - - agent := c.Agent() - - // First register a service - serviceReg := &AgentServiceRegistration{ - Name: "redis", - } - if err := agent.ServiceRegister(serviceReg); err != nil { - t.Fatalf("err: %v", err) - } - - // Register a check bound to the service - reg := &AgentCheckRegistration{ - Name: "redischeck", - ServiceID: "redis", - } - reg.TTL = "15s" - if err := agent.CheckRegister(reg); err != nil { - t.Fatalf("err: %v", err) - } - - checks, err := agent.Checks() - if err != nil { - t.Fatalf("err: %v", err) - } - - check, ok := checks["redischeck"] - if !ok { - t.Fatalf("missing check: %v", checks) - } - if check.ServiceID != "redis" { - t.Fatalf("missing service association for check: %v", check) - } -} - -func TestAgent_Join(t *testing.T) { - t.Parallel() - c, s := makeClient(t) - defer s.Stop() - - agent := c.Agent() - - info, err := agent.Self() - if err != nil { - t.Fatalf("err: %v", err) - } - - // Join ourself - addr := info["Config"]["AdvertiseAddr"].(string) - err = agent.Join(addr, false) - if err != nil { - t.Fatalf("err: %v", err) - } -} - -func TestAgent_ForceLeave(t *testing.T) { - t.Parallel() - c, s := makeClient(t) - defer s.Stop() - - agent := c.Agent() - - // Eject somebody - err := agent.ForceLeave("foo") - if err != nil { - t.Fatalf("err: %v", err) - } -} - -func TestServiceMaintenance(t *testing.T) { - t.Parallel() - c, s := makeClient(t) - defer s.Stop() - - agent := c.Agent() - - // First register a service - serviceReg := &AgentServiceRegistration{ - Name: "redis", - } - if err := agent.ServiceRegister(serviceReg); err != nil { - t.Fatalf("err: %v", err) - } - - // Enable maintenance mode - if err := agent.EnableServiceMaintenance("redis", "broken"); err != nil { - t.Fatalf("err: %s", err) - } - - // Ensure a critical check was added - checks, err := agent.Checks() - if err != nil { - t.Fatalf("err: %v", err) - } - found := false - for _, check := range checks { - if strings.Contains(check.CheckID, "maintenance") { - found = true - if check.Status != "critical" || check.Notes != "broken" { - t.Fatalf("bad: %#v", checks) - } - } - } - if !found { - t.Fatalf("bad: %#v", checks) - } - - // Disable maintenance mode - if err := agent.DisableServiceMaintenance("redis"); err != nil { - t.Fatalf("err: %s", err) - } - - // Ensure the critical health check was removed - checks, err = agent.Checks() - if err != nil { - t.Fatalf("err: %s", err) - } - for _, check := range checks { - if strings.Contains(check.CheckID, "maintenance") { - t.Fatalf("should have removed health check") - } - } -} - -func TestNodeMaintenance(t *testing.T) { - t.Parallel() - c, s := makeClient(t) - defer s.Stop() - - agent := c.Agent() - - // Enable maintenance mode - if err := agent.EnableNodeMaintenance("broken"); err != nil { - t.Fatalf("err: %s", err) - } - - // Check that a critical check was added - checks, err := agent.Checks() - if err != nil { - t.Fatalf("err: %s", err) - } - found := false - for _, check := range checks { - if strings.Contains(check.CheckID, "maintenance") { - found = true - if check.Status != "critical" || check.Notes != "broken" { - t.Fatalf("bad: %#v", checks) - } - } - } - if !found { - t.Fatalf("bad: %#v", checks) - } - - // Disable maintenance mode - if err := agent.DisableNodeMaintenance(); err != nil { - t.Fatalf("err: %s", err) - } - - // Ensure the check was removed - checks, err = agent.Checks() - if err != nil { - t.Fatalf("err: %s", err) - } - for _, check := range checks { - if strings.Contains(check.CheckID, "maintenance") { - t.Fatalf("should have removed health check") - } - } -} diff --git a/Godeps/_workspace/src/github.com/hashicorp/consul/api/api.go b/Godeps/_workspace/src/github.com/hashicorp/consul/api/api.go deleted file mode 100644 index 8fe2ead048..0000000000 --- a/Godeps/_workspace/src/github.com/hashicorp/consul/api/api.go +++ /dev/null @@ -1,442 +0,0 @@ -package api - -import ( - "bytes" - "crypto/tls" - "encoding/json" - "fmt" - "io" - "log" - "net" - "net/http" - "net/url" - "os" - "strconv" - "strings" - "time" -) - -// QueryOptions are used to parameterize a query -type QueryOptions struct { - // Providing a datacenter overwrites the DC provided - // by the Config - Datacenter string - - // AllowStale allows any Consul server (non-leader) to service - // a read. This allows for lower latency and higher throughput - AllowStale bool - - // RequireConsistent forces the read to be fully consistent. - // This is more expensive but prevents ever performing a stale - // read. - RequireConsistent bool - - // WaitIndex is used to enable a blocking query. Waits - // until the timeout or the next index is reached - WaitIndex uint64 - - // WaitTime is used to bound the duration of a wait. - // Defaults to that of the Config, but can be overriden. - WaitTime time.Duration - - // Token is used to provide a per-request ACL token - // which overrides the agent's default token. - Token string -} - -// WriteOptions are used to parameterize a write -type WriteOptions struct { - // Providing a datacenter overwrites the DC provided - // by the Config - Datacenter string - - // Token is used to provide a per-request ACL token - // which overrides the agent's default token. - Token string -} - -// QueryMeta is used to return meta data about a query -type QueryMeta struct { - // LastIndex. This can be used as a WaitIndex to perform - // a blocking query - LastIndex uint64 - - // Time of last contact from the leader for the - // server servicing the request - LastContact time.Duration - - // Is there a known leader - KnownLeader bool - - // How long did the request take - RequestTime time.Duration -} - -// WriteMeta is used to return meta data about a write -type WriteMeta struct { - // How long did the request take - RequestTime time.Duration -} - -// HttpBasicAuth is used to authenticate http client with HTTP Basic Authentication -type HttpBasicAuth struct { - // Username to use for HTTP Basic Authentication - Username string - - // Password to use for HTTP Basic Authentication - Password string -} - -// Config is used to configure the creation of a client -type Config struct { - // Address is the address of the Consul server - Address string - - // Scheme is the URI scheme for the Consul server - Scheme string - - // Datacenter to use. If not provided, the default agent datacenter is used. - Datacenter string - - // HttpClient is the client to use. Default will be - // used if not provided. - HttpClient *http.Client - - // HttpAuth is the auth info to use for http access. - HttpAuth *HttpBasicAuth - - // WaitTime limits how long a Watch will block. If not provided, - // the agent default values will be used. - WaitTime time.Duration - - // Token is used to provide a per-request ACL token - // which overrides the agent's default token. - Token string -} - -// DefaultConfig returns a default configuration for the client -func DefaultConfig() *Config { - config := &Config{ - Address: "127.0.0.1:8500", - Scheme: "http", - HttpClient: http.DefaultClient, - } - - if addr := os.Getenv("CONSUL_HTTP_ADDR"); addr != "" { - config.Address = addr - } - - if token := os.Getenv("CONSUL_HTTP_TOKEN"); token != "" { - config.Token = token - } - - if auth := os.Getenv("CONSUL_HTTP_AUTH"); auth != "" { - var username, password string - if strings.Contains(auth, ":") { - split := strings.SplitN(auth, ":", 2) - username = split[0] - password = split[1] - } else { - username = auth - } - - config.HttpAuth = &HttpBasicAuth{ - Username: username, - Password: password, - } - } - - if ssl := os.Getenv("CONSUL_HTTP_SSL"); ssl != "" { - enabled, err := strconv.ParseBool(ssl) - if err != nil { - log.Printf("[WARN] client: could not parse CONSUL_HTTP_SSL: %s", err) - } - - if enabled { - config.Scheme = "https" - } - } - - if verify := os.Getenv("CONSUL_HTTP_SSL_VERIFY"); verify != "" { - doVerify, err := strconv.ParseBool(verify) - if err != nil { - log.Printf("[WARN] client: could not parse CONSUL_HTTP_SSL_VERIFY: %s", err) - } - - if !doVerify { - config.HttpClient.Transport = &http.Transport{ - TLSClientConfig: &tls.Config{ - InsecureSkipVerify: true, - }, - } - } - } - - return config -} - -// Client provides a client to the Consul API -type Client struct { - config Config -} - -// NewClient returns a new client -func NewClient(config *Config) (*Client, error) { - // bootstrap the config - defConfig := DefaultConfig() - - if len(config.Address) == 0 { - config.Address = defConfig.Address - } - - if len(config.Scheme) == 0 { - config.Scheme = defConfig.Scheme - } - - if config.HttpClient == nil { - config.HttpClient = defConfig.HttpClient - } - - if parts := strings.SplitN(config.Address, "unix://", 2); len(parts) == 2 { - config.HttpClient = &http.Client{ - Transport: &http.Transport{ - Dial: func(_, _ string) (net.Conn, error) { - return net.Dial("unix", parts[1]) - }, - }, - } - config.Address = parts[1] - } - - client := &Client{ - config: *config, - } - return client, nil -} - -// request is used to help build up a request -type request struct { - config *Config - method string - url *url.URL - params url.Values - body io.Reader - obj interface{} -} - -// setQueryOptions is used to annotate the request with -// additional query options -func (r *request) setQueryOptions(q *QueryOptions) { - if q == nil { - return - } - if q.Datacenter != "" { - r.params.Set("dc", q.Datacenter) - } - if q.AllowStale { - r.params.Set("stale", "") - } - if q.RequireConsistent { - r.params.Set("consistent", "") - } - if q.WaitIndex != 0 { - r.params.Set("index", strconv.FormatUint(q.WaitIndex, 10)) - } - if q.WaitTime != 0 { - r.params.Set("wait", durToMsec(q.WaitTime)) - } - if q.Token != "" { - r.params.Set("token", q.Token) - } -} - -// durToMsec converts a duration to a millisecond specified string -func durToMsec(dur time.Duration) string { - return fmt.Sprintf("%dms", dur/time.Millisecond) -} - -// setWriteOptions is used to annotate the request with -// additional write options -func (r *request) setWriteOptions(q *WriteOptions) { - if q == nil { - return - } - if q.Datacenter != "" { - r.params.Set("dc", q.Datacenter) - } - if q.Token != "" { - r.params.Set("token", q.Token) - } -} - -// toHTTP converts the request to an HTTP request -func (r *request) toHTTP() (*http.Request, error) { - // Encode the query parameters - r.url.RawQuery = r.params.Encode() - - // Check if we should encode the body - if r.body == nil && r.obj != nil { - if b, err := encodeBody(r.obj); err != nil { - return nil, err - } else { - r.body = b - } - } - - // Create the HTTP request - req, err := http.NewRequest(r.method, r.url.RequestURI(), r.body) - if err != nil { - return nil, err - } - - req.URL.Host = r.url.Host - req.URL.Scheme = r.url.Scheme - req.Host = r.url.Host - - // Setup auth - if r.config.HttpAuth != nil { - req.SetBasicAuth(r.config.HttpAuth.Username, r.config.HttpAuth.Password) - } - - return req, nil -} - -// newRequest is used to create a new request -func (c *Client) newRequest(method, path string) *request { - r := &request{ - config: &c.config, - method: method, - url: &url.URL{ - Scheme: c.config.Scheme, - Host: c.config.Address, - Path: path, - }, - params: make(map[string][]string), - } - if c.config.Datacenter != "" { - r.params.Set("dc", c.config.Datacenter) - } - if c.config.WaitTime != 0 { - r.params.Set("wait", durToMsec(r.config.WaitTime)) - } - if c.config.Token != "" { - r.params.Set("token", r.config.Token) - } - return r -} - -// doRequest runs a request with our client -func (c *Client) doRequest(r *request) (time.Duration, *http.Response, error) { - req, err := r.toHTTP() - if err != nil { - return 0, nil, err - } - start := time.Now() - resp, err := c.config.HttpClient.Do(req) - diff := time.Now().Sub(start) - return diff, resp, err -} - -// Query is used to do a GET request against an endpoint -// and deserialize the response into an interface using -// standard Consul conventions. -func (c *Client) query(endpoint string, out interface{}, q *QueryOptions) (*QueryMeta, error) { - r := c.newRequest("GET", endpoint) - r.setQueryOptions(q) - rtt, resp, err := requireOK(c.doRequest(r)) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - qm := &QueryMeta{} - parseQueryMeta(resp, qm) - qm.RequestTime = rtt - - if err := decodeBody(resp, out); err != nil { - return nil, err - } - return qm, nil -} - -// write is used to do a PUT request against an endpoint -// and serialize/deserialized using the standard Consul conventions. -func (c *Client) write(endpoint string, in, out interface{}, q *WriteOptions) (*WriteMeta, error) { - r := c.newRequest("PUT", endpoint) - r.setWriteOptions(q) - r.obj = in - rtt, resp, err := requireOK(c.doRequest(r)) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - wm := &WriteMeta{RequestTime: rtt} - if out != nil { - if err := decodeBody(resp, &out); err != nil { - return nil, err - } - } - return wm, nil -} - -// parseQueryMeta is used to help parse query meta-data -func parseQueryMeta(resp *http.Response, q *QueryMeta) error { - header := resp.Header - - // Parse the X-Consul-Index - index, err := strconv.ParseUint(header.Get("X-Consul-Index"), 10, 64) - if err != nil { - return fmt.Errorf("Failed to parse X-Consul-Index: %v", err) - } - q.LastIndex = index - - // Parse the X-Consul-LastContact - last, err := strconv.ParseUint(header.Get("X-Consul-LastContact"), 10, 64) - if err != nil { - return fmt.Errorf("Failed to parse X-Consul-LastContact: %v", err) - } - q.LastContact = time.Duration(last) * time.Millisecond - - // Parse the X-Consul-KnownLeader - switch header.Get("X-Consul-KnownLeader") { - case "true": - q.KnownLeader = true - default: - q.KnownLeader = false - } - return nil -} - -// decodeBody is used to JSON decode a body -func decodeBody(resp *http.Response, out interface{}) error { - dec := json.NewDecoder(resp.Body) - return dec.Decode(out) -} - -// encodeBody is used to encode a request body -func encodeBody(obj interface{}) (io.Reader, error) { - buf := bytes.NewBuffer(nil) - enc := json.NewEncoder(buf) - if err := enc.Encode(obj); err != nil { - return nil, err - } - return buf, nil -} - -// requireOK is used to wrap doRequest and check for a 200 -func requireOK(d time.Duration, resp *http.Response, e error) (time.Duration, *http.Response, error) { - if e != nil { - if resp != nil { - resp.Body.Close() - } - return d, nil, e - } - if resp.StatusCode != 200 { - var buf bytes.Buffer - io.Copy(&buf, resp.Body) - resp.Body.Close() - return d, nil, fmt.Errorf("Unexpected response code: %d (%s)", resp.StatusCode, buf.Bytes()) - } - return d, resp, nil -} diff --git a/Godeps/_workspace/src/github.com/hashicorp/consul/api/api_test.go b/Godeps/_workspace/src/github.com/hashicorp/consul/api/api_test.go deleted file mode 100644 index 56f9494f89..0000000000 --- a/Godeps/_workspace/src/github.com/hashicorp/consul/api/api_test.go +++ /dev/null @@ -1,252 +0,0 @@ -package api - -import ( - crand "crypto/rand" - "fmt" - "io/ioutil" - "net/http" - "os" - "path/filepath" - "runtime" - "testing" - "time" - - "github.com/hashicorp/consul/testutil" -) - -type configCallback func(c *Config) - -func makeClient(t *testing.T) (*Client, *testutil.TestServer) { - return makeClientWithConfig(t, nil, nil) -} - -func makeACLClient(t *testing.T) (*Client, *testutil.TestServer) { - return makeClientWithConfig(t, func(clientConfig *Config) { - clientConfig.Token = "root" - }, func(serverConfig *testutil.TestServerConfig) { - serverConfig.ACLMasterToken = "root" - serverConfig.ACLDatacenter = "dc1" - serverConfig.ACLDefaultPolicy = "deny" - }) -} - -func makeClientWithConfig( - t *testing.T, - cb1 configCallback, - cb2 testutil.ServerConfigCallback) (*Client, *testutil.TestServer) { - - // Make client config - conf := DefaultConfig() - if cb1 != nil { - cb1(conf) - } - - // Create server - server := testutil.NewTestServerConfig(t, cb2) - conf.Address = server.HTTPAddr - - // Create client - client, err := NewClient(conf) - if err != nil { - t.Fatalf("err: %v", err) - } - - return client, server -} - -func testKey() string { - buf := make([]byte, 16) - if _, err := crand.Read(buf); err != nil { - panic(fmt.Errorf("Failed to read random bytes: %v", err)) - } - - return fmt.Sprintf("%08x-%04x-%04x-%04x-%12x", - buf[0:4], - buf[4:6], - buf[6:8], - buf[8:10], - buf[10:16]) -} - -func TestDefaultConfig_env(t *testing.T) { - t.Parallel() - addr := "1.2.3.4:5678" - token := "abcd1234" - auth := "username:password" - - os.Setenv("CONSUL_HTTP_ADDR", addr) - defer os.Setenv("CONSUL_HTTP_ADDR", "") - os.Setenv("CONSUL_HTTP_TOKEN", token) - defer os.Setenv("CONSUL_HTTP_TOKEN", "") - os.Setenv("CONSUL_HTTP_AUTH", auth) - defer os.Setenv("CONSUL_HTTP_AUTH", "") - os.Setenv("CONSUL_HTTP_SSL", "1") - defer os.Setenv("CONSUL_HTTP_SSL", "") - os.Setenv("CONSUL_HTTP_SSL_VERIFY", "0") - defer os.Setenv("CONSUL_HTTP_SSL_VERIFY", "") - - config := DefaultConfig() - - if config.Address != addr { - t.Errorf("expected %q to be %q", config.Address, addr) - } - - if config.Token != token { - t.Errorf("expected %q to be %q", config.Token, token) - } - - if config.HttpAuth == nil { - t.Fatalf("expected HttpAuth to be enabled") - } - if config.HttpAuth.Username != "username" { - t.Errorf("expected %q to be %q", config.HttpAuth.Username, "username") - } - if config.HttpAuth.Password != "password" { - t.Errorf("expected %q to be %q", config.HttpAuth.Password, "password") - } - - if config.Scheme != "https" { - t.Errorf("expected %q to be %q", config.Scheme, "https") - } - - if !config.HttpClient.Transport.(*http.Transport).TLSClientConfig.InsecureSkipVerify { - t.Errorf("expected SSL verification to be off") - } -} - -func TestSetQueryOptions(t *testing.T) { - t.Parallel() - c, s := makeClient(t) - defer s.Stop() - - r := c.newRequest("GET", "/v1/kv/foo") - q := &QueryOptions{ - Datacenter: "foo", - AllowStale: true, - RequireConsistent: true, - WaitIndex: 1000, - WaitTime: 100 * time.Second, - Token: "12345", - } - r.setQueryOptions(q) - - if r.params.Get("dc") != "foo" { - t.Fatalf("bad: %v", r.params) - } - if _, ok := r.params["stale"]; !ok { - t.Fatalf("bad: %v", r.params) - } - if _, ok := r.params["consistent"]; !ok { - t.Fatalf("bad: %v", r.params) - } - if r.params.Get("index") != "1000" { - t.Fatalf("bad: %v", r.params) - } - if r.params.Get("wait") != "100000ms" { - t.Fatalf("bad: %v", r.params) - } - if r.params.Get("token") != "12345" { - t.Fatalf("bad: %v", r.params) - } -} - -func TestSetWriteOptions(t *testing.T) { - t.Parallel() - c, s := makeClient(t) - defer s.Stop() - - r := c.newRequest("GET", "/v1/kv/foo") - q := &WriteOptions{ - Datacenter: "foo", - Token: "23456", - } - r.setWriteOptions(q) - - if r.params.Get("dc") != "foo" { - t.Fatalf("bad: %v", r.params) - } - if r.params.Get("token") != "23456" { - t.Fatalf("bad: %v", r.params) - } -} - -func TestRequestToHTTP(t *testing.T) { - t.Parallel() - c, s := makeClient(t) - defer s.Stop() - - r := c.newRequest("DELETE", "/v1/kv/foo") - q := &QueryOptions{ - Datacenter: "foo", - } - r.setQueryOptions(q) - req, err := r.toHTTP() - if err != nil { - t.Fatalf("err: %v", err) - } - - if req.Method != "DELETE" { - t.Fatalf("bad: %v", req) - } - if req.URL.RequestURI() != "/v1/kv/foo?dc=foo" { - t.Fatalf("bad: %v", req) - } -} - -func TestParseQueryMeta(t *testing.T) { - t.Parallel() - resp := &http.Response{ - Header: make(map[string][]string), - } - resp.Header.Set("X-Consul-Index", "12345") - resp.Header.Set("X-Consul-LastContact", "80") - resp.Header.Set("X-Consul-KnownLeader", "true") - - qm := &QueryMeta{} - if err := parseQueryMeta(resp, qm); err != nil { - t.Fatalf("err: %v", err) - } - - if qm.LastIndex != 12345 { - t.Fatalf("Bad: %v", qm) - } - if qm.LastContact != 80*time.Millisecond { - t.Fatalf("Bad: %v", qm) - } - if !qm.KnownLeader { - t.Fatalf("Bad: %v", qm) - } -} - -func TestAPI_UnixSocket(t *testing.T) { - t.Parallel() - if runtime.GOOS == "windows" { - t.SkipNow() - } - - tempDir, err := ioutil.TempDir("", "consul") - if err != nil { - t.Fatalf("err: %s", err) - } - defer os.RemoveAll(tempDir) - socket := filepath.Join(tempDir, "test.sock") - - c, s := makeClientWithConfig(t, func(c *Config) { - c.Address = "unix://" + socket - }, func(c *testutil.TestServerConfig) { - c.Addresses = &testutil.TestAddressConfig{ - HTTP: "unix://" + socket, - } - }) - defer s.Stop() - - agent := c.Agent() - - info, err := agent.Self() - if err != nil { - t.Fatalf("err: %s", err) - } - if info["Config"]["NodeName"] == "" { - t.Fatalf("bad: %v", info) - } -} diff --git a/Godeps/_workspace/src/github.com/hashicorp/consul/api/catalog.go b/Godeps/_workspace/src/github.com/hashicorp/consul/api/catalog.go deleted file mode 100644 index cf64bd9091..0000000000 --- a/Godeps/_workspace/src/github.com/hashicorp/consul/api/catalog.go +++ /dev/null @@ -1,182 +0,0 @@ -package api - -type Node struct { - Node string - Address string -} - -type CatalogService struct { - Node string - Address string - ServiceID string - ServiceName string - ServiceAddress string - ServiceTags []string - ServicePort int -} - -type CatalogNode struct { - Node *Node - Services map[string]*AgentService -} - -type CatalogRegistration struct { - Node string - Address string - Datacenter string - Service *AgentService - Check *AgentCheck -} - -type CatalogDeregistration struct { - Node string - Address string - Datacenter string - ServiceID string - CheckID string -} - -// Catalog can be used to query the Catalog endpoints -type Catalog struct { - c *Client -} - -// Catalog returns a handle to the catalog endpoints -func (c *Client) Catalog() *Catalog { - return &Catalog{c} -} - -func (c *Catalog) Register(reg *CatalogRegistration, q *WriteOptions) (*WriteMeta, error) { - r := c.c.newRequest("PUT", "/v1/catalog/register") - r.setWriteOptions(q) - r.obj = reg - rtt, resp, err := requireOK(c.c.doRequest(r)) - if err != nil { - return nil, err - } - resp.Body.Close() - - wm := &WriteMeta{} - wm.RequestTime = rtt - - return wm, nil -} - -func (c *Catalog) Deregister(dereg *CatalogDeregistration, q *WriteOptions) (*WriteMeta, error) { - r := c.c.newRequest("PUT", "/v1/catalog/deregister") - r.setWriteOptions(q) - r.obj = dereg - rtt, resp, err := requireOK(c.c.doRequest(r)) - if err != nil { - return nil, err - } - resp.Body.Close() - - wm := &WriteMeta{} - wm.RequestTime = rtt - - return wm, nil -} - -// Datacenters is used to query for all the known datacenters -func (c *Catalog) Datacenters() ([]string, error) { - r := c.c.newRequest("GET", "/v1/catalog/datacenters") - _, resp, err := requireOK(c.c.doRequest(r)) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - var out []string - if err := decodeBody(resp, &out); err != nil { - return nil, err - } - return out, nil -} - -// Nodes is used to query all the known nodes -func (c *Catalog) Nodes(q *QueryOptions) ([]*Node, *QueryMeta, error) { - r := c.c.newRequest("GET", "/v1/catalog/nodes") - r.setQueryOptions(q) - rtt, resp, err := requireOK(c.c.doRequest(r)) - if err != nil { - return nil, nil, err - } - defer resp.Body.Close() - - qm := &QueryMeta{} - parseQueryMeta(resp, qm) - qm.RequestTime = rtt - - var out []*Node - if err := decodeBody(resp, &out); err != nil { - return nil, nil, err - } - return out, qm, nil -} - -// Services is used to query for all known services -func (c *Catalog) Services(q *QueryOptions) (map[string][]string, *QueryMeta, error) { - r := c.c.newRequest("GET", "/v1/catalog/services") - r.setQueryOptions(q) - rtt, resp, err := requireOK(c.c.doRequest(r)) - if err != nil { - return nil, nil, err - } - defer resp.Body.Close() - - qm := &QueryMeta{} - parseQueryMeta(resp, qm) - qm.RequestTime = rtt - - var out map[string][]string - if err := decodeBody(resp, &out); err != nil { - return nil, nil, err - } - return out, qm, nil -} - -// Service is used to query catalog entries for a given service -func (c *Catalog) Service(service, tag string, q *QueryOptions) ([]*CatalogService, *QueryMeta, error) { - r := c.c.newRequest("GET", "/v1/catalog/service/"+service) - r.setQueryOptions(q) - if tag != "" { - r.params.Set("tag", tag) - } - rtt, resp, err := requireOK(c.c.doRequest(r)) - if err != nil { - return nil, nil, err - } - defer resp.Body.Close() - - qm := &QueryMeta{} - parseQueryMeta(resp, qm) - qm.RequestTime = rtt - - var out []*CatalogService - if err := decodeBody(resp, &out); err != nil { - return nil, nil, err - } - return out, qm, nil -} - -// Node is used to query for service information about a single node -func (c *Catalog) Node(node string, q *QueryOptions) (*CatalogNode, *QueryMeta, error) { - r := c.c.newRequest("GET", "/v1/catalog/node/"+node) - r.setQueryOptions(q) - rtt, resp, err := requireOK(c.c.doRequest(r)) - if err != nil { - return nil, nil, err - } - defer resp.Body.Close() - - qm := &QueryMeta{} - parseQueryMeta(resp, qm) - qm.RequestTime = rtt - - var out *CatalogNode - if err := decodeBody(resp, &out); err != nil { - return nil, nil, err - } - return out, qm, nil -} diff --git a/Godeps/_workspace/src/github.com/hashicorp/consul/api/catalog_test.go b/Godeps/_workspace/src/github.com/hashicorp/consul/api/catalog_test.go deleted file mode 100644 index bb8be25b00..0000000000 --- a/Godeps/_workspace/src/github.com/hashicorp/consul/api/catalog_test.go +++ /dev/null @@ -1,279 +0,0 @@ -package api - -import ( - "fmt" - "testing" - - "github.com/hashicorp/consul/testutil" -) - -func TestCatalog_Datacenters(t *testing.T) { - t.Parallel() - c, s := makeClient(t) - defer s.Stop() - - catalog := c.Catalog() - - testutil.WaitForResult(func() (bool, error) { - datacenters, err := catalog.Datacenters() - if err != nil { - return false, err - } - - if len(datacenters) == 0 { - return false, fmt.Errorf("Bad: %v", datacenters) - } - - return true, nil - }, func(err error) { - t.Fatalf("err: %s", err) - }) -} - -func TestCatalog_Nodes(t *testing.T) { - t.Parallel() - c, s := makeClient(t) - defer s.Stop() - - catalog := c.Catalog() - - testutil.WaitForResult(func() (bool, error) { - nodes, meta, err := catalog.Nodes(nil) - if err != nil { - return false, err - } - - if meta.LastIndex == 0 { - return false, fmt.Errorf("Bad: %v", meta) - } - - if len(nodes) == 0 { - return false, fmt.Errorf("Bad: %v", nodes) - } - - return true, nil - }, func(err error) { - t.Fatalf("err: %s", err) - }) -} - -func TestCatalog_Services(t *testing.T) { - t.Parallel() - c, s := makeClient(t) - defer s.Stop() - - catalog := c.Catalog() - - testutil.WaitForResult(func() (bool, error) { - services, meta, err := catalog.Services(nil) - if err != nil { - return false, err - } - - if meta.LastIndex == 0 { - return false, fmt.Errorf("Bad: %v", meta) - } - - if len(services) == 0 { - return false, fmt.Errorf("Bad: %v", services) - } - - return true, nil - }, func(err error) { - t.Fatalf("err: %s", err) - }) -} - -func TestCatalog_Service(t *testing.T) { - t.Parallel() - c, s := makeClient(t) - defer s.Stop() - - catalog := c.Catalog() - - testutil.WaitForResult(func() (bool, error) { - services, meta, err := catalog.Service("consul", "", nil) - if err != nil { - return false, err - } - - if meta.LastIndex == 0 { - return false, fmt.Errorf("Bad: %v", meta) - } - - if len(services) == 0 { - return false, fmt.Errorf("Bad: %v", services) - } - - return true, nil - }, func(err error) { - t.Fatalf("err: %s", err) - }) -} - -func TestCatalog_Node(t *testing.T) { - t.Parallel() - c, s := makeClient(t) - defer s.Stop() - - catalog := c.Catalog() - name, _ := c.Agent().NodeName() - - testutil.WaitForResult(func() (bool, error) { - info, meta, err := catalog.Node(name, nil) - if err != nil { - return false, err - } - - if meta.LastIndex == 0 { - return false, fmt.Errorf("Bad: %v", meta) - } - if len(info.Services) == 0 { - return false, fmt.Errorf("Bad: %v", info) - } - - return true, nil - }, func(err error) { - t.Fatalf("err: %s", err) - }) -} - -func TestCatalog_Registration(t *testing.T) { - t.Parallel() - c, s := makeClient(t) - defer s.Stop() - - catalog := c.Catalog() - - service := &AgentService{ - ID: "redis1", - Service: "redis", - Tags: []string{"master", "v1"}, - Port: 8000, - } - - check := &AgentCheck{ - Node: "foobar", - CheckID: "service:redis1", - Name: "Redis health check", - Notes: "Script based health check", - Status: "passing", - ServiceID: "redis1", - } - - reg := &CatalogRegistration{ - Datacenter: "dc1", - Node: "foobar", - Address: "192.168.10.10", - Service: service, - Check: check, - } - - testutil.WaitForResult(func() (bool, error) { - if _, err := catalog.Register(reg, nil); err != nil { - return false, err - } - - node, _, err := catalog.Node("foobar", nil) - if err != nil { - return false, err - } - - if _, ok := node.Services["redis1"]; !ok { - return false, fmt.Errorf("missing service: redis1") - } - - health, _, err := c.Health().Node("foobar", nil) - if err != nil { - return false, err - } - - if health[0].CheckID != "service:redis1" { - return false, fmt.Errorf("missing checkid service:redis1") - } - - return true, nil - }, func(err error) { - t.Fatalf("err: %s", err) - }) - - // Test catalog deregistration of the previously registered service - dereg := &CatalogDeregistration{ - Datacenter: "dc1", - Node: "foobar", - Address: "192.168.10.10", - ServiceID: "redis1", - } - - if _, err := catalog.Deregister(dereg, nil); err != nil { - t.Fatalf("err: %v", err) - } - - testutil.WaitForResult(func() (bool, error) { - node, _, err := catalog.Node("foobar", nil) - if err != nil { - return false, err - } - - if _, ok := node.Services["redis1"]; ok { - return false, fmt.Errorf("ServiceID:redis1 is not deregistered") - } - - return true, nil - }, func(err error) { - t.Fatalf("err: %s", err) - }) - - // Test deregistration of the previously registered check - dereg = &CatalogDeregistration{ - Datacenter: "dc1", - Node: "foobar", - Address: "192.168.10.10", - CheckID: "service:redis1", - } - - if _, err := catalog.Deregister(dereg, nil); err != nil { - t.Fatalf("err: %v", err) - } - - testutil.WaitForResult(func() (bool, error) { - health, _, err := c.Health().Node("foobar", nil) - if err != nil { - return false, err - } - - if len(health) != 0 { - return false, fmt.Errorf("CheckID:service:redis1 is not deregistered") - } - - return true, nil - }, func(err error) { - t.Fatalf("err: %s", err) - }) - - // Test node deregistration of the previously registered node - dereg = &CatalogDeregistration{ - Datacenter: "dc1", - Node: "foobar", - Address: "192.168.10.10", - } - - if _, err := catalog.Deregister(dereg, nil); err != nil { - t.Fatalf("err: %v", err) - } - - testutil.WaitForResult(func() (bool, error) { - node, _, err := catalog.Node("foobar", nil) - if err != nil { - return false, err - } - - if node != nil { - return false, fmt.Errorf("node is not deregistered: %v", node) - } - - return true, nil - }, func(err error) { - t.Fatalf("err: %s", err) - }) -} diff --git a/Godeps/_workspace/src/github.com/hashicorp/consul/api/event.go b/Godeps/_workspace/src/github.com/hashicorp/consul/api/event.go deleted file mode 100644 index 85b5b069b0..0000000000 --- a/Godeps/_workspace/src/github.com/hashicorp/consul/api/event.go +++ /dev/null @@ -1,104 +0,0 @@ -package api - -import ( - "bytes" - "strconv" -) - -// Event can be used to query the Event endpoints -type Event struct { - c *Client -} - -// UserEvent represents an event that was fired by the user -type UserEvent struct { - ID string - Name string - Payload []byte - NodeFilter string - ServiceFilter string - TagFilter string - Version int - LTime uint64 -} - -// Event returns a handle to the event endpoints -func (c *Client) Event() *Event { - return &Event{c} -} - -// Fire is used to fire a new user event. Only the Name, Payload and Filters -// are respected. This returns the ID or an associated error. Cross DC requests -// are supported. -func (e *Event) Fire(params *UserEvent, q *WriteOptions) (string, *WriteMeta, error) { - r := e.c.newRequest("PUT", "/v1/event/fire/"+params.Name) - r.setWriteOptions(q) - if params.NodeFilter != "" { - r.params.Set("node", params.NodeFilter) - } - if params.ServiceFilter != "" { - r.params.Set("service", params.ServiceFilter) - } - if params.TagFilter != "" { - r.params.Set("tag", params.TagFilter) - } - if params.Payload != nil { - r.body = bytes.NewReader(params.Payload) - } - - rtt, resp, err := requireOK(e.c.doRequest(r)) - if err != nil { - return "", nil, err - } - defer resp.Body.Close() - - wm := &WriteMeta{RequestTime: rtt} - var out UserEvent - if err := decodeBody(resp, &out); err != nil { - return "", nil, err - } - return out.ID, wm, nil -} - -// List is used to get the most recent events an agent has received. -// This list can be optionally filtered by the name. This endpoint supports -// quasi-blocking queries. The index is not monotonic, nor does it provide provide -// LastContact or KnownLeader. -func (e *Event) List(name string, q *QueryOptions) ([]*UserEvent, *QueryMeta, error) { - r := e.c.newRequest("GET", "/v1/event/list") - r.setQueryOptions(q) - if name != "" { - r.params.Set("name", name) - } - rtt, resp, err := requireOK(e.c.doRequest(r)) - if err != nil { - return nil, nil, err - } - defer resp.Body.Close() - - qm := &QueryMeta{} - parseQueryMeta(resp, qm) - qm.RequestTime = rtt - - var entries []*UserEvent - if err := decodeBody(resp, &entries); err != nil { - return nil, nil, err - } - return entries, qm, nil -} - -// IDToIndex is a bit of a hack. This simulates the index generation to -// convert an event ID into a WaitIndex. -func (e *Event) IDToIndex(uuid string) uint64 { - lower := uuid[0:8] + uuid[9:13] + uuid[14:18] - upper := uuid[19:23] + uuid[24:36] - lowVal, err := strconv.ParseUint(lower, 16, 64) - if err != nil { - panic("Failed to convert " + lower) - } - highVal, err := strconv.ParseUint(upper, 16, 64) - if err != nil { - panic("Failed to convert " + upper) - } - return lowVal ^ highVal -} diff --git a/Godeps/_workspace/src/github.com/hashicorp/consul/api/event_test.go b/Godeps/_workspace/src/github.com/hashicorp/consul/api/event_test.go deleted file mode 100644 index 1ca92e2331..0000000000 --- a/Godeps/_workspace/src/github.com/hashicorp/consul/api/event_test.go +++ /dev/null @@ -1,49 +0,0 @@ -package api - -import ( - "testing" - - "github.com/hashicorp/consul/testutil" -) - -func TestEvent_FireList(t *testing.T) { - t.Parallel() - c, s := makeClient(t) - defer s.Stop() - - event := c.Event() - - params := &UserEvent{Name: "foo"} - id, meta, err := event.Fire(params, nil) - if err != nil { - t.Fatalf("err: %v", err) - } - - if meta.RequestTime == 0 { - t.Fatalf("bad: %v", meta) - } - - if id == "" { - t.Fatalf("invalid: %v", id) - } - - var events []*UserEvent - var qm *QueryMeta - testutil.WaitForResult(func() (bool, error) { - events, qm, err = event.List("", nil) - if err != nil { - t.Fatalf("err: %v", err) - } - return len(events) > 0, err - }, func(err error) { - t.Fatalf("err: %#v", err) - }) - - if events[len(events)-1].ID != id { - t.Fatalf("bad: %#v", events) - } - - if qm.LastIndex != event.IDToIndex(id) { - t.Fatalf("Bad: %#v", qm) - } -} diff --git a/Godeps/_workspace/src/github.com/hashicorp/consul/api/health.go b/Godeps/_workspace/src/github.com/hashicorp/consul/api/health.go deleted file mode 100644 index 02b161e28e..0000000000 --- a/Godeps/_workspace/src/github.com/hashicorp/consul/api/health.go +++ /dev/null @@ -1,136 +0,0 @@ -package api - -import ( - "fmt" -) - -// HealthCheck is used to represent a single check -type HealthCheck struct { - Node string - CheckID string - Name string - Status string - Notes string - Output string - ServiceID string - ServiceName string -} - -// ServiceEntry is used for the health service endpoint -type ServiceEntry struct { - Node *Node - Service *AgentService - Checks []*HealthCheck -} - -// Health can be used to query the Health endpoints -type Health struct { - c *Client -} - -// Health returns a handle to the health endpoints -func (c *Client) Health() *Health { - return &Health{c} -} - -// Node is used to query for checks belonging to a given node -func (h *Health) Node(node string, q *QueryOptions) ([]*HealthCheck, *QueryMeta, error) { - r := h.c.newRequest("GET", "/v1/health/node/"+node) - r.setQueryOptions(q) - rtt, resp, err := requireOK(h.c.doRequest(r)) - if err != nil { - return nil, nil, err - } - defer resp.Body.Close() - - qm := &QueryMeta{} - parseQueryMeta(resp, qm) - qm.RequestTime = rtt - - var out []*HealthCheck - if err := decodeBody(resp, &out); err != nil { - return nil, nil, err - } - return out, qm, nil -} - -// Checks is used to return the checks associated with a service -func (h *Health) Checks(service string, q *QueryOptions) ([]*HealthCheck, *QueryMeta, error) { - r := h.c.newRequest("GET", "/v1/health/checks/"+service) - r.setQueryOptions(q) - rtt, resp, err := requireOK(h.c.doRequest(r)) - if err != nil { - return nil, nil, err - } - defer resp.Body.Close() - - qm := &QueryMeta{} - parseQueryMeta(resp, qm) - qm.RequestTime = rtt - - var out []*HealthCheck - if err := decodeBody(resp, &out); err != nil { - return nil, nil, err - } - return out, qm, nil -} - -// Service is used to query health information along with service info -// for a given service. It can optionally do server-side filtering on a tag -// or nodes with passing health checks only. -func (h *Health) Service(service, tag string, passingOnly bool, q *QueryOptions) ([]*ServiceEntry, *QueryMeta, error) { - r := h.c.newRequest("GET", "/v1/health/service/"+service) - r.setQueryOptions(q) - if tag != "" { - r.params.Set("tag", tag) - } - if passingOnly { - r.params.Set("passing", "1") - } - rtt, resp, err := requireOK(h.c.doRequest(r)) - if err != nil { - return nil, nil, err - } - defer resp.Body.Close() - - qm := &QueryMeta{} - parseQueryMeta(resp, qm) - qm.RequestTime = rtt - - var out []*ServiceEntry - if err := decodeBody(resp, &out); err != nil { - return nil, nil, err - } - return out, qm, nil -} - -// State is used to retreive all the checks in a given state. -// The wildcard "any" state can also be used for all checks. -func (h *Health) State(state string, q *QueryOptions) ([]*HealthCheck, *QueryMeta, error) { - switch state { - case "any": - case "warning": - case "critical": - case "passing": - case "unknown": - default: - return nil, nil, fmt.Errorf("Unsupported state: %v", state) - } - r := h.c.newRequest("GET", "/v1/health/state/"+state) - r.setQueryOptions(q) - rtt, resp, err := requireOK(h.c.doRequest(r)) - if err != nil { - return nil, nil, err - } - defer resp.Body.Close() - - qm := &QueryMeta{} - parseQueryMeta(resp, qm) - qm.RequestTime = rtt - - var out []*HealthCheck - if err := decodeBody(resp, &out); err != nil { - return nil, nil, err - } - return out, qm, nil -} diff --git a/Godeps/_workspace/src/github.com/hashicorp/consul/api/health_test.go b/Godeps/_workspace/src/github.com/hashicorp/consul/api/health_test.go deleted file mode 100644 index d80a4693ae..0000000000 --- a/Godeps/_workspace/src/github.com/hashicorp/consul/api/health_test.go +++ /dev/null @@ -1,125 +0,0 @@ -package api - -import ( - "fmt" - "testing" - - "github.com/hashicorp/consul/testutil" -) - -func TestHealth_Node(t *testing.T) { - t.Parallel() - c, s := makeClient(t) - defer s.Stop() - - agent := c.Agent() - health := c.Health() - - info, err := agent.Self() - if err != nil { - t.Fatalf("err: %v", err) - } - name := info["Config"]["NodeName"].(string) - - testutil.WaitForResult(func() (bool, error) { - checks, meta, err := health.Node(name, nil) - if err != nil { - return false, err - } - if meta.LastIndex == 0 { - return false, fmt.Errorf("bad: %v", meta) - } - if len(checks) == 0 { - return false, fmt.Errorf("bad: %v", checks) - } - return true, nil - }, func(err error) { - t.Fatalf("err: %s", err) - }) -} - -func TestHealth_Checks(t *testing.T) { - t.Parallel() - c, s := makeClient(t) - defer s.Stop() - - agent := c.Agent() - health := c.Health() - - // Make a service with a check - reg := &AgentServiceRegistration{ - Name: "foo", - Check: &AgentServiceCheck{ - TTL: "15s", - }, - } - if err := agent.ServiceRegister(reg); err != nil { - t.Fatalf("err: %v", err) - } - defer agent.ServiceDeregister("foo") - - testutil.WaitForResult(func() (bool, error) { - checks, meta, err := health.Checks("foo", nil) - if err != nil { - return false, err - } - if meta.LastIndex == 0 { - return false, fmt.Errorf("bad: %v", meta) - } - if len(checks) == 0 { - return false, fmt.Errorf("Bad: %v", checks) - } - return true, nil - }, func(err error) { - t.Fatalf("err: %s", err) - }) -} - -func TestHealth_Service(t *testing.T) { - t.Parallel() - c, s := makeClient(t) - defer s.Stop() - - health := c.Health() - - testutil.WaitForResult(func() (bool, error) { - // consul service should always exist... - checks, meta, err := health.Service("consul", "", true, nil) - if err != nil { - return false, err - } - if meta.LastIndex == 0 { - return false, fmt.Errorf("bad: %v", meta) - } - if len(checks) == 0 { - return false, fmt.Errorf("Bad: %v", checks) - } - return true, nil - }, func(err error) { - t.Fatalf("err: %s", err) - }) -} - -func TestHealth_State(t *testing.T) { - t.Parallel() - c, s := makeClient(t) - defer s.Stop() - - health := c.Health() - - testutil.WaitForResult(func() (bool, error) { - checks, meta, err := health.State("any", nil) - if err != nil { - return false, err - } - if meta.LastIndex == 0 { - return false, fmt.Errorf("bad: %v", meta) - } - if len(checks) == 0 { - return false, fmt.Errorf("Bad: %v", checks) - } - return true, nil - }, func(err error) { - t.Fatalf("err: %s", err) - }) -} diff --git a/Godeps/_workspace/src/github.com/hashicorp/consul/api/kv.go b/Godeps/_workspace/src/github.com/hashicorp/consul/api/kv.go deleted file mode 100644 index c1a8923bef..0000000000 --- a/Godeps/_workspace/src/github.com/hashicorp/consul/api/kv.go +++ /dev/null @@ -1,240 +0,0 @@ -package api - -import ( - "bytes" - "fmt" - "io" - "net/http" - "strconv" - "strings" -) - -// KVPair is used to represent a single K/V entry -type KVPair struct { - Key string - CreateIndex uint64 - ModifyIndex uint64 - LockIndex uint64 - Flags uint64 - Value []byte - Session string -} - -// KVPairs is a list of KVPair objects -type KVPairs []*KVPair - -// KV is used to manipulate the K/V API -type KV struct { - c *Client -} - -// KV is used to return a handle to the K/V apis -func (c *Client) KV() *KV { - return &KV{c} -} - -// Get is used to lookup a single key -func (k *KV) Get(key string, q *QueryOptions) (*KVPair, *QueryMeta, error) { - resp, qm, err := k.getInternal(key, nil, q) - if err != nil { - return nil, nil, err - } - if resp == nil { - return nil, qm, nil - } - defer resp.Body.Close() - - var entries []*KVPair - if err := decodeBody(resp, &entries); err != nil { - return nil, nil, err - } - if len(entries) > 0 { - return entries[0], qm, nil - } - return nil, qm, nil -} - -// List is used to lookup all keys under a prefix -func (k *KV) List(prefix string, q *QueryOptions) (KVPairs, *QueryMeta, error) { - resp, qm, err := k.getInternal(prefix, map[string]string{"recurse": ""}, q) - if err != nil { - return nil, nil, err - } - if resp == nil { - return nil, qm, nil - } - defer resp.Body.Close() - - var entries []*KVPair - if err := decodeBody(resp, &entries); err != nil { - return nil, nil, err - } - return entries, qm, nil -} - -// Keys is used to list all the keys under a prefix. Optionally, -// a separator can be used to limit the responses. -func (k *KV) Keys(prefix, separator string, q *QueryOptions) ([]string, *QueryMeta, error) { - params := map[string]string{"keys": ""} - if separator != "" { - params["separator"] = separator - } - resp, qm, err := k.getInternal(prefix, params, q) - if err != nil { - return nil, nil, err - } - if resp == nil { - return nil, qm, nil - } - defer resp.Body.Close() - - var entries []string - if err := decodeBody(resp, &entries); err != nil { - return nil, nil, err - } - return entries, qm, nil -} - -func (k *KV) getInternal(key string, params map[string]string, q *QueryOptions) (*http.Response, *QueryMeta, error) { - r := k.c.newRequest("GET", "/v1/kv/"+key) - r.setQueryOptions(q) - for param, val := range params { - r.params.Set(param, val) - } - rtt, resp, err := k.c.doRequest(r) - if err != nil { - return nil, nil, err - } - - qm := &QueryMeta{} - parseQueryMeta(resp, qm) - qm.RequestTime = rtt - - if resp.StatusCode == 404 { - resp.Body.Close() - return nil, qm, nil - } else if resp.StatusCode != 200 { - resp.Body.Close() - return nil, nil, fmt.Errorf("Unexpected response code: %d", resp.StatusCode) - } - return resp, qm, nil -} - -// Put is used to write a new value. Only the -// Key, Flags and Value is respected. -func (k *KV) Put(p *KVPair, q *WriteOptions) (*WriteMeta, error) { - params := make(map[string]string, 1) - if p.Flags != 0 { - params["flags"] = strconv.FormatUint(p.Flags, 10) - } - _, wm, err := k.put(p.Key, params, p.Value, q) - return wm, err -} - -// CAS is used for a Check-And-Set operation. The Key, -// ModifyIndex, Flags and Value are respected. Returns true -// on success or false on failures. -func (k *KV) CAS(p *KVPair, q *WriteOptions) (bool, *WriteMeta, error) { - params := make(map[string]string, 2) - if p.Flags != 0 { - params["flags"] = strconv.FormatUint(p.Flags, 10) - } - params["cas"] = strconv.FormatUint(p.ModifyIndex, 10) - return k.put(p.Key, params, p.Value, q) -} - -// Acquire is used for a lock acquisiiton operation. The Key, -// Flags, Value and Session are respected. Returns true -// on success or false on failures. -func (k *KV) Acquire(p *KVPair, q *WriteOptions) (bool, *WriteMeta, error) { - params := make(map[string]string, 2) - if p.Flags != 0 { - params["flags"] = strconv.FormatUint(p.Flags, 10) - } - params["acquire"] = p.Session - return k.put(p.Key, params, p.Value, q) -} - -// Release is used for a lock release operation. The Key, -// Flags, Value and Session are respected. Returns true -// on success or false on failures. -func (k *KV) Release(p *KVPair, q *WriteOptions) (bool, *WriteMeta, error) { - params := make(map[string]string, 2) - if p.Flags != 0 { - params["flags"] = strconv.FormatUint(p.Flags, 10) - } - params["release"] = p.Session - return k.put(p.Key, params, p.Value, q) -} - -func (k *KV) put(key string, params map[string]string, body []byte, q *WriteOptions) (bool, *WriteMeta, error) { - if len(key) > 0 && key[0] == '/' { - return false, nil, fmt.Errorf("Invalid key. Key must not begin with a '/': %s", key) - } - - r := k.c.newRequest("PUT", "/v1/kv/"+key) - r.setWriteOptions(q) - for param, val := range params { - r.params.Set(param, val) - } - r.body = bytes.NewReader(body) - rtt, resp, err := requireOK(k.c.doRequest(r)) - if err != nil { - return false, nil, err - } - defer resp.Body.Close() - - qm := &WriteMeta{} - qm.RequestTime = rtt - - var buf bytes.Buffer - if _, err := io.Copy(&buf, resp.Body); err != nil { - return false, nil, fmt.Errorf("Failed to read response: %v", err) - } - res := strings.Contains(string(buf.Bytes()), "true") - return res, qm, nil -} - -// Delete is used to delete a single key -func (k *KV) Delete(key string, w *WriteOptions) (*WriteMeta, error) { - _, qm, err := k.deleteInternal(key, nil, w) - return qm, err -} - -// DeleteCAS is used for a Delete Check-And-Set operation. The Key -// and ModifyIndex are respected. Returns true on success or false on failures. -func (k *KV) DeleteCAS(p *KVPair, q *WriteOptions) (bool, *WriteMeta, error) { - params := map[string]string{ - "cas": strconv.FormatUint(p.ModifyIndex, 10), - } - return k.deleteInternal(p.Key, params, q) -} - -// DeleteTree is used to delete all keys under a prefix -func (k *KV) DeleteTree(prefix string, w *WriteOptions) (*WriteMeta, error) { - _, qm, err := k.deleteInternal(prefix, map[string]string{"recurse": ""}, w) - return qm, err -} - -func (k *KV) deleteInternal(key string, params map[string]string, q *WriteOptions) (bool, *WriteMeta, error) { - r := k.c.newRequest("DELETE", "/v1/kv/"+key) - r.setWriteOptions(q) - for param, val := range params { - r.params.Set(param, val) - } - rtt, resp, err := requireOK(k.c.doRequest(r)) - if err != nil { - return false, nil, err - } - defer resp.Body.Close() - - qm := &WriteMeta{} - qm.RequestTime = rtt - - var buf bytes.Buffer - if _, err := io.Copy(&buf, resp.Body); err != nil { - return false, nil, fmt.Errorf("Failed to read response: %v", err) - } - res := strings.Contains(string(buf.Bytes()), "true") - return res, qm, nil -} diff --git a/Godeps/_workspace/src/github.com/hashicorp/consul/api/kv_test.go b/Godeps/_workspace/src/github.com/hashicorp/consul/api/kv_test.go deleted file mode 100644 index 758595d895..0000000000 --- a/Godeps/_workspace/src/github.com/hashicorp/consul/api/kv_test.go +++ /dev/null @@ -1,447 +0,0 @@ -package api - -import ( - "bytes" - "path" - "testing" - "time" -) - -func TestClientPutGetDelete(t *testing.T) { - t.Parallel() - c, s := makeClient(t) - defer s.Stop() - - kv := c.KV() - - // Get a get without a key - key := testKey() - pair, _, err := kv.Get(key, nil) - if err != nil { - t.Fatalf("err: %v", err) - } - if pair != nil { - t.Fatalf("unexpected value: %#v", pair) - } - - value := []byte("test") - - // Put a key that begins with a '/', this should fail - invalidKey := "/test" - p := &KVPair{Key: invalidKey, Flags: 42, Value: value} - if _, err := kv.Put(p, nil); err == nil { - t.Fatalf("Invalid key not detected: %s", invalidKey) - } - - // Put the key - p = &KVPair{Key: key, Flags: 42, Value: value} - if _, err := kv.Put(p, nil); err != nil { - t.Fatalf("err: %v", err) - } - - // Get should work - pair, meta, err := kv.Get(key, nil) - if err != nil { - t.Fatalf("err: %v", err) - } - if pair == nil { - t.Fatalf("expected value: %#v", pair) - } - if !bytes.Equal(pair.Value, value) { - t.Fatalf("unexpected value: %#v", pair) - } - if pair.Flags != 42 { - t.Fatalf("unexpected value: %#v", pair) - } - if meta.LastIndex == 0 { - t.Fatalf("unexpected value: %#v", meta) - } - - // Delete - if _, err := kv.Delete(key, nil); err != nil { - t.Fatalf("err: %v", err) - } - - // Get should fail - pair, _, err = kv.Get(key, nil) - if err != nil { - t.Fatalf("err: %v", err) - } - if pair != nil { - t.Fatalf("unexpected value: %#v", pair) - } -} - -func TestClient_List_DeleteRecurse(t *testing.T) { - t.Parallel() - c, s := makeClient(t) - defer s.Stop() - - kv := c.KV() - - // Generate some test keys - prefix := testKey() - var keys []string - for i := 0; i < 100; i++ { - keys = append(keys, path.Join(prefix, testKey())) - } - - // Set values - value := []byte("test") - for _, key := range keys { - p := &KVPair{Key: key, Value: value} - if _, err := kv.Put(p, nil); err != nil { - t.Fatalf("err: %v", err) - } - } - - // List the values - pairs, meta, err := kv.List(prefix, nil) - if err != nil { - t.Fatalf("err: %v", err) - } - if len(pairs) != len(keys) { - t.Fatalf("got %d keys", len(pairs)) - } - for _, pair := range pairs { - if !bytes.Equal(pair.Value, value) { - t.Fatalf("unexpected value: %#v", pair) - } - } - if meta.LastIndex == 0 { - t.Fatalf("unexpected value: %#v", meta) - } - - // Delete all - if _, err := kv.DeleteTree(prefix, nil); err != nil { - t.Fatalf("err: %v", err) - } - - // List the values - pairs, _, err = kv.List(prefix, nil) - if err != nil { - t.Fatalf("err: %v", err) - } - if len(pairs) != 0 { - t.Fatalf("got %d keys", len(pairs)) - } -} - -func TestClient_DeleteCAS(t *testing.T) { - t.Parallel() - c, s := makeClient(t) - defer s.Stop() - - kv := c.KV() - - // Put the key - key := testKey() - value := []byte("test") - p := &KVPair{Key: key, Value: value} - if work, _, err := kv.CAS(p, nil); err != nil { - t.Fatalf("err: %v", err) - } else if !work { - t.Fatalf("CAS failure") - } - - // Get should work - pair, meta, err := kv.Get(key, nil) - if err != nil { - t.Fatalf("err: %v", err) - } - if pair == nil { - t.Fatalf("expected value: %#v", pair) - } - if meta.LastIndex == 0 { - t.Fatalf("unexpected value: %#v", meta) - } - - // CAS update with bad index - p.ModifyIndex = 1 - if work, _, err := kv.DeleteCAS(p, nil); err != nil { - t.Fatalf("err: %v", err) - } else if work { - t.Fatalf("unexpected CAS") - } - - // CAS update with valid index - p.ModifyIndex = meta.LastIndex - if work, _, err := kv.DeleteCAS(p, nil); err != nil { - t.Fatalf("err: %v", err) - } else if !work { - t.Fatalf("unexpected CAS failure") - } -} - -func TestClient_CAS(t *testing.T) { - t.Parallel() - c, s := makeClient(t) - defer s.Stop() - - kv := c.KV() - - // Put the key - key := testKey() - value := []byte("test") - p := &KVPair{Key: key, Value: value} - if work, _, err := kv.CAS(p, nil); err != nil { - t.Fatalf("err: %v", err) - } else if !work { - t.Fatalf("CAS failure") - } - - // Get should work - pair, meta, err := kv.Get(key, nil) - if err != nil { - t.Fatalf("err: %v", err) - } - if pair == nil { - t.Fatalf("expected value: %#v", pair) - } - if meta.LastIndex == 0 { - t.Fatalf("unexpected value: %#v", meta) - } - - // CAS update with bad index - newVal := []byte("foo") - p.Value = newVal - p.ModifyIndex = 1 - if work, _, err := kv.CAS(p, nil); err != nil { - t.Fatalf("err: %v", err) - } else if work { - t.Fatalf("unexpected CAS") - } - - // CAS update with valid index - p.ModifyIndex = meta.LastIndex - if work, _, err := kv.CAS(p, nil); err != nil { - t.Fatalf("err: %v", err) - } else if !work { - t.Fatalf("unexpected CAS failure") - } -} - -func TestClient_WatchGet(t *testing.T) { - t.Parallel() - c, s := makeClient(t) - defer s.Stop() - - kv := c.KV() - - // Get a get without a key - key := testKey() - pair, meta, err := kv.Get(key, nil) - if err != nil { - t.Fatalf("err: %v", err) - } - if pair != nil { - t.Fatalf("unexpected value: %#v", pair) - } - if meta.LastIndex == 0 { - t.Fatalf("unexpected value: %#v", meta) - } - - // Put the key - value := []byte("test") - go func() { - kv := c.KV() - - time.Sleep(100 * time.Millisecond) - p := &KVPair{Key: key, Flags: 42, Value: value} - if _, err := kv.Put(p, nil); err != nil { - t.Fatalf("err: %v", err) - } - }() - - // Get should work - options := &QueryOptions{WaitIndex: meta.LastIndex} - pair, meta2, err := kv.Get(key, options) - if err != nil { - t.Fatalf("err: %v", err) - } - if pair == nil { - t.Fatalf("expected value: %#v", pair) - } - if !bytes.Equal(pair.Value, value) { - t.Fatalf("unexpected value: %#v", pair) - } - if pair.Flags != 42 { - t.Fatalf("unexpected value: %#v", pair) - } - if meta2.LastIndex <= meta.LastIndex { - t.Fatalf("unexpected value: %#v", meta2) - } -} - -func TestClient_WatchList(t *testing.T) { - t.Parallel() - c, s := makeClient(t) - defer s.Stop() - - kv := c.KV() - - // Get a get without a key - prefix := testKey() - key := path.Join(prefix, testKey()) - pairs, meta, err := kv.List(prefix, nil) - if err != nil { - t.Fatalf("err: %v", err) - } - if len(pairs) != 0 { - t.Fatalf("unexpected value: %#v", pairs) - } - if meta.LastIndex == 0 { - t.Fatalf("unexpected value: %#v", meta) - } - - // Put the key - value := []byte("test") - go func() { - kv := c.KV() - - time.Sleep(100 * time.Millisecond) - p := &KVPair{Key: key, Flags: 42, Value: value} - if _, err := kv.Put(p, nil); err != nil { - t.Fatalf("err: %v", err) - } - }() - - // Get should work - options := &QueryOptions{WaitIndex: meta.LastIndex} - pairs, meta2, err := kv.List(prefix, options) - if err != nil { - t.Fatalf("err: %v", err) - } - if len(pairs) != 1 { - t.Fatalf("expected value: %#v", pairs) - } - if !bytes.Equal(pairs[0].Value, value) { - t.Fatalf("unexpected value: %#v", pairs) - } - if pairs[0].Flags != 42 { - t.Fatalf("unexpected value: %#v", pairs) - } - if meta2.LastIndex <= meta.LastIndex { - t.Fatalf("unexpected value: %#v", meta2) - } - -} - -func TestClient_Keys_DeleteRecurse(t *testing.T) { - t.Parallel() - c, s := makeClient(t) - defer s.Stop() - - kv := c.KV() - - // Generate some test keys - prefix := testKey() - var keys []string - for i := 0; i < 100; i++ { - keys = append(keys, path.Join(prefix, testKey())) - } - - // Set values - value := []byte("test") - for _, key := range keys { - p := &KVPair{Key: key, Value: value} - if _, err := kv.Put(p, nil); err != nil { - t.Fatalf("err: %v", err) - } - } - - // List the values - out, meta, err := kv.Keys(prefix, "", nil) - if err != nil { - t.Fatalf("err: %v", err) - } - if len(out) != len(keys) { - t.Fatalf("got %d keys", len(out)) - } - if meta.LastIndex == 0 { - t.Fatalf("unexpected value: %#v", meta) - } - - // Delete all - if _, err := kv.DeleteTree(prefix, nil); err != nil { - t.Fatalf("err: %v", err) - } - - // List the values - out, _, err = kv.Keys(prefix, "", nil) - if err != nil { - t.Fatalf("err: %v", err) - } - if len(out) != 0 { - t.Fatalf("got %d keys", len(out)) - } -} - -func TestClient_AcquireRelease(t *testing.T) { - t.Parallel() - c, s := makeClient(t) - defer s.Stop() - - session := c.Session() - kv := c.KV() - - // Make a session - id, _, err := session.CreateNoChecks(nil, nil) - if err != nil { - t.Fatalf("err: %v", err) - } - defer session.Destroy(id, nil) - - // Acquire the key - key := testKey() - value := []byte("test") - p := &KVPair{Key: key, Value: value, Session: id} - if work, _, err := kv.Acquire(p, nil); err != nil { - t.Fatalf("err: %v", err) - } else if !work { - t.Fatalf("Lock failure") - } - - // Get should work - pair, meta, err := kv.Get(key, nil) - if err != nil { - t.Fatalf("err: %v", err) - } - if pair == nil { - t.Fatalf("expected value: %#v", pair) - } - if pair.LockIndex != 1 { - t.Fatalf("Expected lock: %v", pair) - } - if pair.Session != id { - t.Fatalf("Expected lock: %v", pair) - } - if meta.LastIndex == 0 { - t.Fatalf("unexpected value: %#v", meta) - } - - // Release - if work, _, err := kv.Release(p, nil); err != nil { - t.Fatalf("err: %v", err) - } else if !work { - t.Fatalf("Release fail") - } - - // Get should work - pair, meta, err = kv.Get(key, nil) - if err != nil { - t.Fatalf("err: %v", err) - } - if pair == nil { - t.Fatalf("expected value: %#v", pair) - } - if pair.LockIndex != 1 { - t.Fatalf("Expected lock: %v", pair) - } - if pair.Session != "" { - t.Fatalf("Expected unlock: %v", pair) - } - if meta.LastIndex == 0 { - t.Fatalf("unexpected value: %#v", meta) - } -} diff --git a/Godeps/_workspace/src/github.com/hashicorp/consul/api/lock.go b/Godeps/_workspace/src/github.com/hashicorp/consul/api/lock.go deleted file mode 100644 index a76685f04c..0000000000 --- a/Godeps/_workspace/src/github.com/hashicorp/consul/api/lock.go +++ /dev/null @@ -1,338 +0,0 @@ -package api - -import ( - "fmt" - "sync" - "time" -) - -const ( - // DefaultLockSessionName is the Session Name we assign if none is provided - DefaultLockSessionName = "Consul API Lock" - - // DefaultLockSessionTTL is the default session TTL if no Session is provided - // when creating a new Lock. This is used because we do not have another - // other check to depend upon. - DefaultLockSessionTTL = "15s" - - // DefaultLockWaitTime is how long we block for at a time to check if lock - // acquisition is possible. This affects the minimum time it takes to cancel - // a Lock acquisition. - DefaultLockWaitTime = 15 * time.Second - - // DefaultLockRetryTime is how long we wait after a failed lock acquisition - // before attempting to do the lock again. This is so that once a lock-delay - // is in affect, we do not hot loop retrying the acquisition. - DefaultLockRetryTime = 5 * time.Second - - // LockFlagValue is a magic flag we set to indicate a key - // is being used for a lock. It is used to detect a potential - // conflict with a semaphore. - LockFlagValue = 0x2ddccbc058a50c18 -) - -var ( - // ErrLockHeld is returned if we attempt to double lock - ErrLockHeld = fmt.Errorf("Lock already held") - - // ErrLockNotHeld is returned if we attempt to unlock a lock - // that we do not hold. - ErrLockNotHeld = fmt.Errorf("Lock not held") - - // ErrLockInUse is returned if we attempt to destroy a lock - // that is in use. - ErrLockInUse = fmt.Errorf("Lock in use") - - // ErrLockConflict is returned if the flags on a key - // used for a lock do not match expectation - ErrLockConflict = fmt.Errorf("Existing key does not match lock use") -) - -// Lock is used to implement client-side leader election. It is follows the -// algorithm as described here: https://consul.io/docs/guides/leader-election.html. -type Lock struct { - c *Client - opts *LockOptions - - isHeld bool - sessionRenew chan struct{} - lockSession string - l sync.Mutex -} - -// LockOptions is used to parameterize the Lock behavior. -type LockOptions struct { - Key string // Must be set and have write permissions - Value []byte // Optional, value to associate with the lock - Session string // Optional, created if not specified - SessionName string // Optional, defaults to DefaultLockSessionName - SessionTTL string // Optional, defaults to DefaultLockSessionTTL -} - -// LockKey returns a handle to a lock struct which can be used -// to acquire and release the mutex. The key used must have -// write permissions. -func (c *Client) LockKey(key string) (*Lock, error) { - opts := &LockOptions{ - Key: key, - } - return c.LockOpts(opts) -} - -// LockOpts returns a handle to a lock struct which can be used -// to acquire and release the mutex. The key used must have -// write permissions. -func (c *Client) LockOpts(opts *LockOptions) (*Lock, error) { - if opts.Key == "" { - return nil, fmt.Errorf("missing key") - } - if opts.SessionName == "" { - opts.SessionName = DefaultLockSessionName - } - if opts.SessionTTL == "" { - opts.SessionTTL = DefaultLockSessionTTL - } else { - if _, err := time.ParseDuration(opts.SessionTTL); err != nil { - return nil, fmt.Errorf("invalid SessionTTL: %v", err) - } - } - l := &Lock{ - c: c, - opts: opts, - } - return l, nil -} - -// Lock attempts to acquire the lock and blocks while doing so. -// Providing a non-nil stopCh can be used to abort the lock attempt. -// Returns a channel that is closed if our lock is lost or an error. -// This channel could be closed at any time due to session invalidation, -// communication errors, operator intervention, etc. It is NOT safe to -// assume that the lock is held until Unlock() unless the Session is specifically -// created without any associated health checks. By default Consul sessions -// prefer liveness over safety and an application must be able to handle -// the lock being lost. -func (l *Lock) Lock(stopCh <-chan struct{}) (<-chan struct{}, error) { - // Hold the lock as we try to acquire - l.l.Lock() - defer l.l.Unlock() - - // Check if we already hold the lock - if l.isHeld { - return nil, ErrLockHeld - } - - // Check if we need to create a session first - l.lockSession = l.opts.Session - if l.lockSession == "" { - if s, err := l.createSession(); err != nil { - return nil, fmt.Errorf("failed to create session: %v", err) - } else { - l.sessionRenew = make(chan struct{}) - l.lockSession = s - session := l.c.Session() - go session.RenewPeriodic(l.opts.SessionTTL, s, nil, l.sessionRenew) - - // If we fail to acquire the lock, cleanup the session - defer func() { - if !l.isHeld { - close(l.sessionRenew) - l.sessionRenew = nil - } - }() - } - } - - // Setup the query options - kv := l.c.KV() - qOpts := &QueryOptions{ - WaitTime: DefaultLockWaitTime, - } - -WAIT: - // Check if we should quit - select { - case <-stopCh: - return nil, nil - default: - } - - // Look for an existing lock, blocking until not taken - pair, meta, err := kv.Get(l.opts.Key, qOpts) - if err != nil { - return nil, fmt.Errorf("failed to read lock: %v", err) - } - if pair != nil && pair.Flags != LockFlagValue { - return nil, ErrLockConflict - } - locked := false - if pair != nil && pair.Session == l.lockSession { - goto HELD - } - if pair != nil && pair.Session != "" { - qOpts.WaitIndex = meta.LastIndex - goto WAIT - } - - // Try to acquire the lock - pair = l.lockEntry(l.lockSession) - locked, _, err = kv.Acquire(pair, nil) - if err != nil { - return nil, fmt.Errorf("failed to acquire lock: %v", err) - } - - // Handle the case of not getting the lock - if !locked { - // Determine why the lock failed - qOpts.WaitIndex = 0 - pair, meta, err = kv.Get(l.opts.Key, qOpts) - if pair != nil && pair.Session != "" { - //If the session is not null, this means that a wait can safely happen - //using a long poll - qOpts.WaitIndex = meta.LastIndex - goto WAIT - } else { - // If the session is empty and the lock failed to acquire, then it means - // a lock-delay is in effect and a timed wait must be used - select { - case <-time.After(DefaultLockRetryTime): - goto WAIT - case <-stopCh: - return nil, nil - } - } - } - -HELD: - // Watch to ensure we maintain leadership - leaderCh := make(chan struct{}) - go l.monitorLock(l.lockSession, leaderCh) - - // Set that we own the lock - l.isHeld = true - - // Locked! All done - return leaderCh, nil -} - -// Unlock released the lock. It is an error to call this -// if the lock is not currently held. -func (l *Lock) Unlock() error { - // Hold the lock as we try to release - l.l.Lock() - defer l.l.Unlock() - - // Ensure the lock is actually held - if !l.isHeld { - return ErrLockNotHeld - } - - // Set that we no longer own the lock - l.isHeld = false - - // Stop the session renew - if l.sessionRenew != nil { - defer func() { - close(l.sessionRenew) - l.sessionRenew = nil - }() - } - - // Get the lock entry, and clear the lock session - lockEnt := l.lockEntry(l.lockSession) - l.lockSession = "" - - // Release the lock explicitly - kv := l.c.KV() - _, _, err := kv.Release(lockEnt, nil) - if err != nil { - return fmt.Errorf("failed to release lock: %v", err) - } - return nil -} - -// Destroy is used to cleanup the lock entry. It is not necessary -// to invoke. It will fail if the lock is in use. -func (l *Lock) Destroy() error { - // Hold the lock as we try to release - l.l.Lock() - defer l.l.Unlock() - - // Check if we already hold the lock - if l.isHeld { - return ErrLockHeld - } - - // Look for an existing lock - kv := l.c.KV() - pair, _, err := kv.Get(l.opts.Key, nil) - if err != nil { - return fmt.Errorf("failed to read lock: %v", err) - } - - // Nothing to do if the lock does not exist - if pair == nil { - return nil - } - - // Check for possible flag conflict - if pair.Flags != LockFlagValue { - return ErrLockConflict - } - - // Check if it is in use - if pair.Session != "" { - return ErrLockInUse - } - - // Attempt the delete - didRemove, _, err := kv.DeleteCAS(pair, nil) - if err != nil { - return fmt.Errorf("failed to remove lock: %v", err) - } - if !didRemove { - return ErrLockInUse - } - return nil -} - -// createSession is used to create a new managed session -func (l *Lock) createSession() (string, error) { - session := l.c.Session() - se := &SessionEntry{ - Name: l.opts.SessionName, - TTL: l.opts.SessionTTL, - } - id, _, err := session.Create(se, nil) - if err != nil { - return "", err - } - return id, nil -} - -// lockEntry returns a formatted KVPair for the lock -func (l *Lock) lockEntry(session string) *KVPair { - return &KVPair{ - Key: l.opts.Key, - Value: l.opts.Value, - Session: session, - Flags: LockFlagValue, - } -} - -// monitorLock is a long running routine to monitor a lock ownership -// It closes the stopCh if we lose our leadership. -func (l *Lock) monitorLock(session string, stopCh chan struct{}) { - defer close(stopCh) - kv := l.c.KV() - opts := &QueryOptions{RequireConsistent: true} -WAIT: - pair, meta, err := kv.Get(l.opts.Key, opts) - if err != nil { - return - } - if pair != nil && pair.Session == session { - opts.WaitIndex = meta.LastIndex - goto WAIT - } -} diff --git a/Godeps/_workspace/src/github.com/hashicorp/consul/api/lock_test.go b/Godeps/_workspace/src/github.com/hashicorp/consul/api/lock_test.go deleted file mode 100644 index 0a8fa5172a..0000000000 --- a/Godeps/_workspace/src/github.com/hashicorp/consul/api/lock_test.go +++ /dev/null @@ -1,363 +0,0 @@ -package api - -import ( - "log" - "sync" - "testing" - "time" -) - -func TestLock_LockUnlock(t *testing.T) { - t.Parallel() - c, s := makeClient(t) - defer s.Stop() - - lock, err := c.LockKey("test/lock") - if err != nil { - t.Fatalf("err: %v", err) - } - - // Initial unlock should fail - err = lock.Unlock() - if err != ErrLockNotHeld { - t.Fatalf("err: %v", err) - } - - // Should work - leaderCh, err := lock.Lock(nil) - if err != nil { - t.Fatalf("err: %v", err) - } - if leaderCh == nil { - t.Fatalf("not leader") - } - - // Double lock should fail - _, err = lock.Lock(nil) - if err != ErrLockHeld { - t.Fatalf("err: %v", err) - } - - // Should be leader - select { - case <-leaderCh: - t.Fatalf("should be leader") - default: - } - - // Initial unlock should work - err = lock.Unlock() - if err != nil { - t.Fatalf("err: %v", err) - } - - // Double unlock should fail - err = lock.Unlock() - if err != ErrLockNotHeld { - t.Fatalf("err: %v", err) - } - - // Should loose leadership - select { - case <-leaderCh: - case <-time.After(time.Second): - t.Fatalf("should not be leader") - } -} - -func TestLock_ForceInvalidate(t *testing.T) { - t.Parallel() - c, s := makeClient(t) - defer s.Stop() - - lock, err := c.LockKey("test/lock") - if err != nil { - t.Fatalf("err: %v", err) - } - - // Should work - leaderCh, err := lock.Lock(nil) - if err != nil { - t.Fatalf("err: %v", err) - } - if leaderCh == nil { - t.Fatalf("not leader") - } - defer lock.Unlock() - - go func() { - // Nuke the session, simulator an operator invalidation - // or a health check failure - session := c.Session() - session.Destroy(lock.lockSession, nil) - }() - - // Should loose leadership - select { - case <-leaderCh: - case <-time.After(time.Second): - t.Fatalf("should not be leader") - } -} - -func TestLock_DeleteKey(t *testing.T) { - t.Parallel() - c, s := makeClient(t) - defer s.Stop() - - lock, err := c.LockKey("test/lock") - if err != nil { - t.Fatalf("err: %v", err) - } - - // Should work - leaderCh, err := lock.Lock(nil) - if err != nil { - t.Fatalf("err: %v", err) - } - if leaderCh == nil { - t.Fatalf("not leader") - } - defer lock.Unlock() - - go func() { - // Nuke the key, simulate an operator intervention - kv := c.KV() - kv.Delete("test/lock", nil) - }() - - // Should loose leadership - select { - case <-leaderCh: - case <-time.After(time.Second): - t.Fatalf("should not be leader") - } -} - -func TestLock_Contend(t *testing.T) { - t.Parallel() - c, s := makeClient(t) - defer s.Stop() - - wg := &sync.WaitGroup{} - acquired := make([]bool, 3) - for idx := range acquired { - wg.Add(1) - go func(idx int) { - defer wg.Done() - lock, err := c.LockKey("test/lock") - if err != nil { - t.Fatalf("err: %v", err) - } - - // Should work eventually, will contend - leaderCh, err := lock.Lock(nil) - if err != nil { - t.Fatalf("err: %v", err) - } - if leaderCh == nil { - t.Fatalf("not leader") - } - defer lock.Unlock() - log.Printf("Contender %d acquired", idx) - - // Set acquired and then leave - acquired[idx] = true - }(idx) - } - - // Wait for termination - doneCh := make(chan struct{}) - go func() { - wg.Wait() - close(doneCh) - }() - - // Wait for everybody to get a turn - select { - case <-doneCh: - case <-time.After(3 * DefaultLockRetryTime): - t.Fatalf("timeout") - } - - for idx, did := range acquired { - if !did { - t.Fatalf("contender %d never acquired", idx) - } - } -} - -func TestLock_Destroy(t *testing.T) { - t.Parallel() - c, s := makeClient(t) - defer s.Stop() - - lock, err := c.LockKey("test/lock") - if err != nil { - t.Fatalf("err: %v", err) - } - - // Should work - leaderCh, err := lock.Lock(nil) - if err != nil { - t.Fatalf("err: %v", err) - } - if leaderCh == nil { - t.Fatalf("not leader") - } - - // Destroy should fail - if err := lock.Destroy(); err != ErrLockHeld { - t.Fatalf("err: %v", err) - } - - // Should be able to release - err = lock.Unlock() - if err != nil { - t.Fatalf("err: %v", err) - } - - // Acquire with a different lock - l2, err := c.LockKey("test/lock") - if err != nil { - t.Fatalf("err: %v", err) - } - - // Should work - leaderCh, err = l2.Lock(nil) - if err != nil { - t.Fatalf("err: %v", err) - } - if leaderCh == nil { - t.Fatalf("not leader") - } - - // Destroy should still fail - if err := lock.Destroy(); err != ErrLockInUse { - t.Fatalf("err: %v", err) - } - - // Should relese - err = l2.Unlock() - if err != nil { - t.Fatalf("err: %v", err) - } - - // Destroy should work - err = lock.Destroy() - if err != nil { - t.Fatalf("err: %v", err) - } - - // Double destroy should work - err = l2.Destroy() - if err != nil { - t.Fatalf("err: %v", err) - } -} - -func TestLock_Conflict(t *testing.T) { - t.Parallel() - c, s := makeClient(t) - defer s.Stop() - - sema, err := c.SemaphorePrefix("test/lock/", 2) - if err != nil { - t.Fatalf("err: %v", err) - } - - // Should work - lockCh, err := sema.Acquire(nil) - if err != nil { - t.Fatalf("err: %v", err) - } - if lockCh == nil { - t.Fatalf("not hold") - } - defer sema.Release() - - lock, err := c.LockKey("test/lock/.lock") - if err != nil { - t.Fatalf("err: %v", err) - } - - // Should conflict with semaphore - _, err = lock.Lock(nil) - if err != ErrLockConflict { - t.Fatalf("err: %v", err) - } - - // Should conflict with semaphore - err = lock.Destroy() - if err != ErrLockConflict { - t.Fatalf("err: %v", err) - } -} - -func TestLock_ReclaimLock(t *testing.T) { - t.Parallel() - c, s := makeClient(t) - defer s.Stop() - - session, _, err := c.Session().Create(&SessionEntry{}, nil) - if err != nil { - t.Fatalf("err: %v", err) - } - - lock, err := c.LockOpts(&LockOptions{Key: "test/lock", Session: session}) - if err != nil { - t.Fatalf("err: %v", err) - } - - // Should work - leaderCh, err := lock.Lock(nil) - if err != nil { - t.Fatalf("err: %v", err) - } - if leaderCh == nil { - t.Fatalf("not leader") - } - defer lock.Unlock() - - l2, err := c.LockOpts(&LockOptions{Key: "test/lock", Session: session}) - if err != nil { - t.Fatalf("err: %v", err) - } - - reclaimed := make(chan (<-chan struct{}), 1) - go func() { - l2Ch, err := l2.Lock(nil) - if err != nil { - t.Fatalf("not locked: %v", err) - } - reclaimed <- l2Ch - }() - - // Should reclaim the lock - var leader2Ch <-chan struct{} - - select { - case leader2Ch = <-reclaimed: - case <-time.After(time.Second): - t.Fatalf("should have locked") - } - - // unlock should work - err = l2.Unlock() - if err != nil { - t.Fatalf("err: %v", err) - } - - //Both locks should see the unlock - select { - case <-leader2Ch: - case <-time.After(time.Second): - t.Fatalf("should not be leader") - } - - select { - case <-leaderCh: - case <-time.After(time.Second): - t.Fatalf("should not be leader") - } -} diff --git a/Godeps/_workspace/src/github.com/hashicorp/consul/api/raw.go b/Godeps/_workspace/src/github.com/hashicorp/consul/api/raw.go deleted file mode 100644 index 745a208c99..0000000000 --- a/Godeps/_workspace/src/github.com/hashicorp/consul/api/raw.go +++ /dev/null @@ -1,24 +0,0 @@ -package api - -// Raw can be used to do raw queries against custom endpoints -type Raw struct { - c *Client -} - -// Raw returns a handle to query endpoints -func (c *Client) Raw() *Raw { - return &Raw{c} -} - -// Query is used to do a GET request against an endpoint -// and deserialize the response into an interface using -// standard Consul conventions. -func (raw *Raw) Query(endpoint string, out interface{}, q *QueryOptions) (*QueryMeta, error) { - return raw.c.query(endpoint, out, q) -} - -// Write is used to do a PUT request against an endpoint -// and serialize/deserialized using the standard Consul conventions. -func (raw *Raw) Write(endpoint string, in, out interface{}, q *WriteOptions) (*WriteMeta, error) { - return raw.c.write(endpoint, in, out, q) -} diff --git a/Godeps/_workspace/src/github.com/hashicorp/consul/api/semaphore.go b/Godeps/_workspace/src/github.com/hashicorp/consul/api/semaphore.go deleted file mode 100644 index ff4c2058ce..0000000000 --- a/Godeps/_workspace/src/github.com/hashicorp/consul/api/semaphore.go +++ /dev/null @@ -1,477 +0,0 @@ -package api - -import ( - "encoding/json" - "fmt" - "path" - "sync" - "time" -) - -const ( - // DefaultSemaphoreSessionName is the Session Name we assign if none is provided - DefaultSemaphoreSessionName = "Consul API Semaphore" - - // DefaultSemaphoreSessionTTL is the default session TTL if no Session is provided - // when creating a new Semaphore. This is used because we do not have another - // other check to depend upon. - DefaultSemaphoreSessionTTL = "15s" - - // DefaultSemaphoreWaitTime is how long we block for at a time to check if semaphore - // acquisition is possible. This affects the minimum time it takes to cancel - // a Semaphore acquisition. - DefaultSemaphoreWaitTime = 15 * time.Second - - // DefaultSemaphoreKey is the key used within the prefix to - // use for coordination between all the contenders. - DefaultSemaphoreKey = ".lock" - - // SemaphoreFlagValue is a magic flag we set to indicate a key - // is being used for a semaphore. It is used to detect a potential - // conflict with a lock. - SemaphoreFlagValue = 0xe0f69a2baa414de0 -) - -var ( - // ErrSemaphoreHeld is returned if we attempt to double lock - ErrSemaphoreHeld = fmt.Errorf("Semaphore already held") - - // ErrSemaphoreNotHeld is returned if we attempt to unlock a semaphore - // that we do not hold. - ErrSemaphoreNotHeld = fmt.Errorf("Semaphore not held") - - // ErrSemaphoreInUse is returned if we attempt to destroy a semaphore - // that is in use. - ErrSemaphoreInUse = fmt.Errorf("Semaphore in use") - - // ErrSemaphoreConflict is returned if the flags on a key - // used for a semaphore do not match expectation - ErrSemaphoreConflict = fmt.Errorf("Existing key does not match semaphore use") -) - -// Semaphore is used to implement a distributed semaphore -// using the Consul KV primitives. -type Semaphore struct { - c *Client - opts *SemaphoreOptions - - isHeld bool - sessionRenew chan struct{} - lockSession string - l sync.Mutex -} - -// SemaphoreOptions is used to parameterize the Semaphore -type SemaphoreOptions struct { - Prefix string // Must be set and have write permissions - Limit int // Must be set, and be positive - Value []byte // Optional, value to associate with the contender entry - Session string // OPtional, created if not specified - SessionName string // Optional, defaults to DefaultLockSessionName - SessionTTL string // Optional, defaults to DefaultLockSessionTTL -} - -// semaphoreLock is written under the DefaultSemaphoreKey and -// is used to coordinate between all the contenders. -type semaphoreLock struct { - // Limit is the integer limit of holders. This is used to - // verify that all the holders agree on the value. - Limit int - - // Holders is a list of all the semaphore holders. - // It maps the session ID to true. It is used as a set effectively. - Holders map[string]bool -} - -// SemaphorePrefix is used to created a Semaphore which will operate -// at the given KV prefix and uses the given limit for the semaphore. -// The prefix must have write privileges, and the limit must be agreed -// upon by all contenders. -func (c *Client) SemaphorePrefix(prefix string, limit int) (*Semaphore, error) { - opts := &SemaphoreOptions{ - Prefix: prefix, - Limit: limit, - } - return c.SemaphoreOpts(opts) -} - -// SemaphoreOpts is used to create a Semaphore with the given options. -// The prefix must have write privileges, and the limit must be agreed -// upon by all contenders. If a Session is not provided, one will be created. -func (c *Client) SemaphoreOpts(opts *SemaphoreOptions) (*Semaphore, error) { - if opts.Prefix == "" { - return nil, fmt.Errorf("missing prefix") - } - if opts.Limit <= 0 { - return nil, fmt.Errorf("semaphore limit must be positive") - } - if opts.SessionName == "" { - opts.SessionName = DefaultSemaphoreSessionName - } - if opts.SessionTTL == "" { - opts.SessionTTL = DefaultSemaphoreSessionTTL - } else { - if _, err := time.ParseDuration(opts.SessionTTL); err != nil { - return nil, fmt.Errorf("invalid SessionTTL: %v", err) - } - } - s := &Semaphore{ - c: c, - opts: opts, - } - return s, nil -} - -// Acquire attempts to reserve a slot in the semaphore, blocking until -// success, interrupted via the stopCh or an error is encounted. -// Providing a non-nil stopCh can be used to abort the attempt. -// On success, a channel is returned that represents our slot. -// This channel could be closed at any time due to session invalidation, -// communication errors, operator intervention, etc. It is NOT safe to -// assume that the slot is held until Release() unless the Session is specifically -// created without any associated health checks. By default Consul sessions -// prefer liveness over safety and an application must be able to handle -// the session being lost. -func (s *Semaphore) Acquire(stopCh <-chan struct{}) (<-chan struct{}, error) { - // Hold the lock as we try to acquire - s.l.Lock() - defer s.l.Unlock() - - // Check if we already hold the semaphore - if s.isHeld { - return nil, ErrSemaphoreHeld - } - - // Check if we need to create a session first - s.lockSession = s.opts.Session - if s.lockSession == "" { - if sess, err := s.createSession(); err != nil { - return nil, fmt.Errorf("failed to create session: %v", err) - } else { - s.sessionRenew = make(chan struct{}) - s.lockSession = sess - session := s.c.Session() - go session.RenewPeriodic(s.opts.SessionTTL, sess, nil, s.sessionRenew) - - // If we fail to acquire the lock, cleanup the session - defer func() { - if !s.isHeld { - close(s.sessionRenew) - s.sessionRenew = nil - } - }() - } - } - - // Create the contender entry - kv := s.c.KV() - made, _, err := kv.Acquire(s.contenderEntry(s.lockSession), nil) - if err != nil || !made { - return nil, fmt.Errorf("failed to make contender entry: %v", err) - } - - // Setup the query options - qOpts := &QueryOptions{ - WaitTime: DefaultSemaphoreWaitTime, - } - -WAIT: - // Check if we should quit - select { - case <-stopCh: - return nil, nil - default: - } - - // Read the prefix - pairs, meta, err := kv.List(s.opts.Prefix, qOpts) - if err != nil { - return nil, fmt.Errorf("failed to read prefix: %v", err) - } - - // Decode the lock - lockPair := s.findLock(pairs) - if lockPair.Flags != SemaphoreFlagValue { - return nil, ErrSemaphoreConflict - } - lock, err := s.decodeLock(lockPair) - if err != nil { - return nil, err - } - - // Verify we agree with the limit - if lock.Limit != s.opts.Limit { - return nil, fmt.Errorf("semaphore limit conflict (lock: %d, local: %d)", - lock.Limit, s.opts.Limit) - } - - // Prune the dead holders - s.pruneDeadHolders(lock, pairs) - - // Check if the lock is held - if len(lock.Holders) >= lock.Limit { - qOpts.WaitIndex = meta.LastIndex - goto WAIT - } - - // Create a new lock with us as a holder - lock.Holders[s.lockSession] = true - newLock, err := s.encodeLock(lock, lockPair.ModifyIndex) - if err != nil { - return nil, err - } - - // Attempt the acquisition - didSet, _, err := kv.CAS(newLock, nil) - if err != nil { - return nil, fmt.Errorf("failed to update lock: %v", err) - } - if !didSet { - // Update failed, could have been a race with another contender, - // retry the operation - goto WAIT - } - - // Watch to ensure we maintain ownership of the slot - lockCh := make(chan struct{}) - go s.monitorLock(s.lockSession, lockCh) - - // Set that we own the lock - s.isHeld = true - - // Acquired! All done - return lockCh, nil -} - -// Release is used to voluntarily give up our semaphore slot. It is -// an error to call this if the semaphore has not been acquired. -func (s *Semaphore) Release() error { - // Hold the lock as we try to release - s.l.Lock() - defer s.l.Unlock() - - // Ensure the lock is actually held - if !s.isHeld { - return ErrSemaphoreNotHeld - } - - // Set that we no longer own the lock - s.isHeld = false - - // Stop the session renew - if s.sessionRenew != nil { - defer func() { - close(s.sessionRenew) - s.sessionRenew = nil - }() - } - - // Get and clear the lock session - lockSession := s.lockSession - s.lockSession = "" - - // Remove ourselves as a lock holder - kv := s.c.KV() - key := path.Join(s.opts.Prefix, DefaultSemaphoreKey) -READ: - pair, _, err := kv.Get(key, nil) - if err != nil { - return err - } - if pair == nil { - pair = &KVPair{} - } - lock, err := s.decodeLock(pair) - if err != nil { - return err - } - - // Create a new lock without us as a holder - if _, ok := lock.Holders[lockSession]; ok { - delete(lock.Holders, lockSession) - newLock, err := s.encodeLock(lock, pair.ModifyIndex) - if err != nil { - return err - } - - // Swap the locks - didSet, _, err := kv.CAS(newLock, nil) - if err != nil { - return fmt.Errorf("failed to update lock: %v", err) - } - if !didSet { - goto READ - } - } - - // Destroy the contender entry - contenderKey := path.Join(s.opts.Prefix, lockSession) - if _, err := kv.Delete(contenderKey, nil); err != nil { - return err - } - return nil -} - -// Destroy is used to cleanup the semaphore entry. It is not necessary -// to invoke. It will fail if the semaphore is in use. -func (s *Semaphore) Destroy() error { - // Hold the lock as we try to acquire - s.l.Lock() - defer s.l.Unlock() - - // Check if we already hold the semaphore - if s.isHeld { - return ErrSemaphoreHeld - } - - // List for the semaphore - kv := s.c.KV() - pairs, _, err := kv.List(s.opts.Prefix, nil) - if err != nil { - return fmt.Errorf("failed to read prefix: %v", err) - } - - // Find the lock pair, bail if it doesn't exist - lockPair := s.findLock(pairs) - if lockPair.ModifyIndex == 0 { - return nil - } - if lockPair.Flags != SemaphoreFlagValue { - return ErrSemaphoreConflict - } - - // Decode the lock - lock, err := s.decodeLock(lockPair) - if err != nil { - return err - } - - // Prune the dead holders - s.pruneDeadHolders(lock, pairs) - - // Check if there are any holders - if len(lock.Holders) > 0 { - return ErrSemaphoreInUse - } - - // Attempt the delete - didRemove, _, err := kv.DeleteCAS(lockPair, nil) - if err != nil { - return fmt.Errorf("failed to remove semaphore: %v", err) - } - if !didRemove { - return ErrSemaphoreInUse - } - return nil -} - -// createSession is used to create a new managed session -func (s *Semaphore) createSession() (string, error) { - session := s.c.Session() - se := &SessionEntry{ - Name: s.opts.SessionName, - TTL: s.opts.SessionTTL, - Behavior: SessionBehaviorDelete, - } - id, _, err := session.Create(se, nil) - if err != nil { - return "", err - } - return id, nil -} - -// contenderEntry returns a formatted KVPair for the contender -func (s *Semaphore) contenderEntry(session string) *KVPair { - return &KVPair{ - Key: path.Join(s.opts.Prefix, session), - Value: s.opts.Value, - Session: session, - Flags: SemaphoreFlagValue, - } -} - -// findLock is used to find the KV Pair which is used for coordination -func (s *Semaphore) findLock(pairs KVPairs) *KVPair { - key := path.Join(s.opts.Prefix, DefaultSemaphoreKey) - for _, pair := range pairs { - if pair.Key == key { - return pair - } - } - return &KVPair{Flags: SemaphoreFlagValue} -} - -// decodeLock is used to decode a semaphoreLock from an -// entry in Consul -func (s *Semaphore) decodeLock(pair *KVPair) (*semaphoreLock, error) { - // Handle if there is no lock - if pair == nil || pair.Value == nil { - return &semaphoreLock{ - Limit: s.opts.Limit, - Holders: make(map[string]bool), - }, nil - } - - l := &semaphoreLock{} - if err := json.Unmarshal(pair.Value, l); err != nil { - return nil, fmt.Errorf("lock decoding failed: %v", err) - } - return l, nil -} - -// encodeLock is used to encode a semaphoreLock into a KVPair -// that can be PUT -func (s *Semaphore) encodeLock(l *semaphoreLock, oldIndex uint64) (*KVPair, error) { - enc, err := json.Marshal(l) - if err != nil { - return nil, fmt.Errorf("lock encoding failed: %v", err) - } - pair := &KVPair{ - Key: path.Join(s.opts.Prefix, DefaultSemaphoreKey), - Value: enc, - Flags: SemaphoreFlagValue, - ModifyIndex: oldIndex, - } - return pair, nil -} - -// pruneDeadHolders is used to remove all the dead lock holders -func (s *Semaphore) pruneDeadHolders(lock *semaphoreLock, pairs KVPairs) { - // Gather all the live holders - alive := make(map[string]struct{}, len(pairs)) - for _, pair := range pairs { - if pair.Session != "" { - alive[pair.Session] = struct{}{} - } - } - - // Remove any holders that are dead - for holder := range lock.Holders { - if _, ok := alive[holder]; !ok { - delete(lock.Holders, holder) - } - } -} - -// monitorLock is a long running routine to monitor a semaphore ownership -// It closes the stopCh if we lose our slot. -func (s *Semaphore) monitorLock(session string, stopCh chan struct{}) { - defer close(stopCh) - kv := s.c.KV() - opts := &QueryOptions{RequireConsistent: true} -WAIT: - pairs, meta, err := kv.List(s.opts.Prefix, opts) - if err != nil { - return - } - lockPair := s.findLock(pairs) - lock, err := s.decodeLock(lockPair) - if err != nil { - return - } - s.pruneDeadHolders(lock, pairs) - if _, ok := lock.Holders[session]; ok { - opts.WaitIndex = meta.LastIndex - goto WAIT - } -} diff --git a/Godeps/_workspace/src/github.com/hashicorp/consul/api/semaphore_test.go b/Godeps/_workspace/src/github.com/hashicorp/consul/api/semaphore_test.go deleted file mode 100644 index 5e5e53588c..0000000000 --- a/Godeps/_workspace/src/github.com/hashicorp/consul/api/semaphore_test.go +++ /dev/null @@ -1,313 +0,0 @@ -package api - -import ( - "log" - "sync" - "testing" - "time" -) - -func TestSemaphore_AcquireRelease(t *testing.T) { - t.Parallel() - c, s := makeClient(t) - defer s.Stop() - - sema, err := c.SemaphorePrefix("test/semaphore", 2) - if err != nil { - t.Fatalf("err: %v", err) - } - - // Initial release should fail - err = sema.Release() - if err != ErrSemaphoreNotHeld { - t.Fatalf("err: %v", err) - } - - // Should work - lockCh, err := sema.Acquire(nil) - if err != nil { - t.Fatalf("err: %v", err) - } - if lockCh == nil { - t.Fatalf("not hold") - } - - // Double lock should fail - _, err = sema.Acquire(nil) - if err != ErrSemaphoreHeld { - t.Fatalf("err: %v", err) - } - - // Should be held - select { - case <-lockCh: - t.Fatalf("should be held") - default: - } - - // Initial release should work - err = sema.Release() - if err != nil { - t.Fatalf("err: %v", err) - } - - // Double unlock should fail - err = sema.Release() - if err != ErrSemaphoreNotHeld { - t.Fatalf("err: %v", err) - } - - // Should lose resource - select { - case <-lockCh: - case <-time.After(time.Second): - t.Fatalf("should not be held") - } -} - -func TestSemaphore_ForceInvalidate(t *testing.T) { - t.Parallel() - c, s := makeClient(t) - defer s.Stop() - - sema, err := c.SemaphorePrefix("test/semaphore", 2) - if err != nil { - t.Fatalf("err: %v", err) - } - - // Should work - lockCh, err := sema.Acquire(nil) - if err != nil { - t.Fatalf("err: %v", err) - } - if lockCh == nil { - t.Fatalf("not acquired") - } - defer sema.Release() - - go func() { - // Nuke the session, simulator an operator invalidation - // or a health check failure - session := c.Session() - session.Destroy(sema.lockSession, nil) - }() - - // Should loose slot - select { - case <-lockCh: - case <-time.After(time.Second): - t.Fatalf("should not be locked") - } -} - -func TestSemaphore_DeleteKey(t *testing.T) { - t.Parallel() - c, s := makeClient(t) - defer s.Stop() - - sema, err := c.SemaphorePrefix("test/semaphore", 2) - if err != nil { - t.Fatalf("err: %v", err) - } - - // Should work - lockCh, err := sema.Acquire(nil) - if err != nil { - t.Fatalf("err: %v", err) - } - if lockCh == nil { - t.Fatalf("not locked") - } - defer sema.Release() - - go func() { - // Nuke the key, simulate an operator intervention - kv := c.KV() - kv.DeleteTree("test/semaphore", nil) - }() - - // Should loose leadership - select { - case <-lockCh: - case <-time.After(time.Second): - t.Fatalf("should not be locked") - } -} - -func TestSemaphore_Contend(t *testing.T) { - t.Parallel() - c, s := makeClient(t) - defer s.Stop() - - wg := &sync.WaitGroup{} - acquired := make([]bool, 4) - for idx := range acquired { - wg.Add(1) - go func(idx int) { - defer wg.Done() - sema, err := c.SemaphorePrefix("test/semaphore", 2) - if err != nil { - t.Fatalf("err: %v", err) - } - - // Should work eventually, will contend - lockCh, err := sema.Acquire(nil) - if err != nil { - t.Fatalf("err: %v", err) - } - if lockCh == nil { - t.Fatalf("not locked") - } - defer sema.Release() - log.Printf("Contender %d acquired", idx) - - // Set acquired and then leave - acquired[idx] = true - }(idx) - } - - // Wait for termination - doneCh := make(chan struct{}) - go func() { - wg.Wait() - close(doneCh) - }() - - // Wait for everybody to get a turn - select { - case <-doneCh: - case <-time.After(3 * DefaultLockRetryTime): - t.Fatalf("timeout") - } - - for idx, did := range acquired { - if !did { - t.Fatalf("contender %d never acquired", idx) - } - } -} - -func TestSemaphore_BadLimit(t *testing.T) { - t.Parallel() - c, s := makeClient(t) - defer s.Stop() - - sema, err := c.SemaphorePrefix("test/semaphore", 0) - if err == nil { - t.Fatalf("should error") - } - - sema, err = c.SemaphorePrefix("test/semaphore", 1) - if err != nil { - t.Fatalf("err: %v", err) - } - - _, err = sema.Acquire(nil) - if err != nil { - t.Fatalf("err: %v", err) - } - - sema2, err := c.SemaphorePrefix("test/semaphore", 2) - if err != nil { - t.Fatalf("err: %v", err) - } - - _, err = sema2.Acquire(nil) - if err.Error() != "semaphore limit conflict (lock: 1, local: 2)" { - t.Fatalf("err: %v", err) - } -} - -func TestSemaphore_Destroy(t *testing.T) { - t.Parallel() - c, s := makeClient(t) - defer s.Stop() - - sema, err := c.SemaphorePrefix("test/semaphore", 2) - if err != nil { - t.Fatalf("err: %v", err) - } - - sema2, err := c.SemaphorePrefix("test/semaphore", 2) - if err != nil { - t.Fatalf("err: %v", err) - } - - _, err = sema.Acquire(nil) - if err != nil { - t.Fatalf("err: %v", err) - } - - _, err = sema2.Acquire(nil) - if err != nil { - t.Fatalf("err: %v", err) - } - - // Destroy should fail, still held - if err := sema.Destroy(); err != ErrSemaphoreHeld { - t.Fatalf("err: %v", err) - } - - err = sema.Release() - if err != nil { - t.Fatalf("err: %v", err) - } - - // Destroy should fail, still in use - if err := sema.Destroy(); err != ErrSemaphoreInUse { - t.Fatalf("err: %v", err) - } - - err = sema2.Release() - if err != nil { - t.Fatalf("err: %v", err) - } - - // Destroy should work - if err := sema.Destroy(); err != nil { - t.Fatalf("err: %v", err) - } - - // Destroy should work - if err := sema2.Destroy(); err != nil { - t.Fatalf("err: %v", err) - } -} - -func TestSemaphore_Conflict(t *testing.T) { - t.Parallel() - c, s := makeClient(t) - defer s.Stop() - - lock, err := c.LockKey("test/sema/.lock") - if err != nil { - t.Fatalf("err: %v", err) - } - - // Should work - leaderCh, err := lock.Lock(nil) - if err != nil { - t.Fatalf("err: %v", err) - } - if leaderCh == nil { - t.Fatalf("not leader") - } - defer lock.Unlock() - - sema, err := c.SemaphorePrefix("test/sema/", 2) - if err != nil { - t.Fatalf("err: %v", err) - } - - // Should conflict with lock - _, err = sema.Acquire(nil) - if err != ErrSemaphoreConflict { - t.Fatalf("err: %v", err) - } - - // Should conflict with lock - err = sema.Destroy() - if err != ErrSemaphoreConflict { - t.Fatalf("err: %v", err) - } -} diff --git a/Godeps/_workspace/src/github.com/hashicorp/consul/api/session.go b/Godeps/_workspace/src/github.com/hashicorp/consul/api/session.go deleted file mode 100644 index a99da511d6..0000000000 --- a/Godeps/_workspace/src/github.com/hashicorp/consul/api/session.go +++ /dev/null @@ -1,201 +0,0 @@ -package api - -import ( - "fmt" - "time" -) - -const ( - // SessionBehaviorRelease is the default behavior and causes - // all associated locks to be released on session invalidation. - SessionBehaviorRelease = "release" - - // SessionBehaviorDelete is new in Consul 0.5 and changes the - // behavior to delete all associated locks on session invalidation. - // It can be used in a way similar to Ephemeral Nodes in ZooKeeper. - SessionBehaviorDelete = "delete" -) - -// SessionEntry represents a session in consul -type SessionEntry struct { - CreateIndex uint64 - ID string - Name string - Node string - Checks []string - LockDelay time.Duration - Behavior string - TTL string -} - -// Session can be used to query the Session endpoints -type Session struct { - c *Client -} - -// Session returns a handle to the session endpoints -func (c *Client) Session() *Session { - return &Session{c} -} - -// CreateNoChecks is like Create but is used specifically to create -// a session with no associated health checks. -func (s *Session) CreateNoChecks(se *SessionEntry, q *WriteOptions) (string, *WriteMeta, error) { - body := make(map[string]interface{}) - body["Checks"] = []string{} - if se != nil { - if se.Name != "" { - body["Name"] = se.Name - } - if se.Node != "" { - body["Node"] = se.Node - } - if se.LockDelay != 0 { - body["LockDelay"] = durToMsec(se.LockDelay) - } - if se.Behavior != "" { - body["Behavior"] = se.Behavior - } - if se.TTL != "" { - body["TTL"] = se.TTL - } - } - return s.create(body, q) - -} - -// Create makes a new session. Providing a session entry can -// customize the session. It can also be nil to use defaults. -func (s *Session) Create(se *SessionEntry, q *WriteOptions) (string, *WriteMeta, error) { - var obj interface{} - if se != nil { - body := make(map[string]interface{}) - obj = body - if se.Name != "" { - body["Name"] = se.Name - } - if se.Node != "" { - body["Node"] = se.Node - } - if se.LockDelay != 0 { - body["LockDelay"] = durToMsec(se.LockDelay) - } - if len(se.Checks) > 0 { - body["Checks"] = se.Checks - } - if se.Behavior != "" { - body["Behavior"] = se.Behavior - } - if se.TTL != "" { - body["TTL"] = se.TTL - } - } - return s.create(obj, q) -} - -func (s *Session) create(obj interface{}, q *WriteOptions) (string, *WriteMeta, error) { - var out struct{ ID string } - wm, err := s.c.write("/v1/session/create", obj, &out, q) - if err != nil { - return "", nil, err - } - return out.ID, wm, nil -} - -// Destroy invalides a given session -func (s *Session) Destroy(id string, q *WriteOptions) (*WriteMeta, error) { - wm, err := s.c.write("/v1/session/destroy/"+id, nil, nil, q) - if err != nil { - return nil, err - } - return wm, nil -} - -// Renew renews the TTL on a given session -func (s *Session) Renew(id string, q *WriteOptions) (*SessionEntry, *WriteMeta, error) { - var entries []*SessionEntry - wm, err := s.c.write("/v1/session/renew/"+id, nil, &entries, q) - if err != nil { - return nil, nil, err - } - if len(entries) > 0 { - return entries[0], wm, nil - } - return nil, wm, nil -} - -// RenewPeriodic is used to periodically invoke Session.Renew on a -// session until a doneCh is closed. This is meant to be used in a long running -// goroutine to ensure a session stays valid. -func (s *Session) RenewPeriodic(initialTTL string, id string, q *WriteOptions, doneCh chan struct{}) error { - ttl, err := time.ParseDuration(initialTTL) - if err != nil { - return err - } - - waitDur := ttl / 2 - lastRenewTime := time.Now() - var lastErr error - for { - if time.Since(lastRenewTime) > ttl { - return lastErr - } - select { - case <-time.After(waitDur): - entry, _, err := s.Renew(id, q) - if err != nil { - waitDur = time.Second - lastErr = err - continue - } - if entry == nil { - waitDur = time.Second - lastErr = fmt.Errorf("No SessionEntry returned") - continue - } - - // Handle the server updating the TTL - ttl, _ = time.ParseDuration(entry.TTL) - waitDur = ttl / 2 - lastRenewTime = time.Now() - - case <-doneCh: - // Attempt a session destroy - s.Destroy(id, q) - return nil - } - } -} - -// Info looks up a single session -func (s *Session) Info(id string, q *QueryOptions) (*SessionEntry, *QueryMeta, error) { - var entries []*SessionEntry - qm, err := s.c.query("/v1/session/info/"+id, &entries, q) - if err != nil { - return nil, nil, err - } - if len(entries) > 0 { - return entries[0], qm, nil - } - return nil, qm, nil -} - -// List gets sessions for a node -func (s *Session) Node(node string, q *QueryOptions) ([]*SessionEntry, *QueryMeta, error) { - var entries []*SessionEntry - qm, err := s.c.query("/v1/session/node/"+node, &entries, q) - if err != nil { - return nil, nil, err - } - return entries, qm, nil -} - -// List gets all active sessions -func (s *Session) List(q *QueryOptions) ([]*SessionEntry, *QueryMeta, error) { - var entries []*SessionEntry - qm, err := s.c.query("/v1/session/list", &entries, q) - if err != nil { - return nil, nil, err - } - return entries, qm, nil -} diff --git a/Godeps/_workspace/src/github.com/hashicorp/consul/api/session_test.go b/Godeps/_workspace/src/github.com/hashicorp/consul/api/session_test.go deleted file mode 100644 index c503c21a07..0000000000 --- a/Godeps/_workspace/src/github.com/hashicorp/consul/api/session_test.go +++ /dev/null @@ -1,205 +0,0 @@ -package api - -import ( - "testing" -) - -func TestSession_CreateDestroy(t *testing.T) { - t.Parallel() - c, s := makeClient(t) - defer s.Stop() - - session := c.Session() - - id, meta, err := session.Create(nil, nil) - if err != nil { - t.Fatalf("err: %v", err) - } - - if meta.RequestTime == 0 { - t.Fatalf("bad: %v", meta) - } - - if id == "" { - t.Fatalf("invalid: %v", id) - } - - meta, err = session.Destroy(id, nil) - if err != nil { - t.Fatalf("err: %v", err) - } - - if meta.RequestTime == 0 { - t.Fatalf("bad: %v", meta) - } -} - -func TestSession_CreateRenewDestroy(t *testing.T) { - t.Parallel() - c, s := makeClient(t) - defer s.Stop() - - session := c.Session() - - se := &SessionEntry{ - TTL: "10s", - } - - id, meta, err := session.Create(se, nil) - if err != nil { - t.Fatalf("err: %v", err) - } - defer session.Destroy(id, nil) - - if meta.RequestTime == 0 { - t.Fatalf("bad: %v", meta) - } - - if id == "" { - t.Fatalf("invalid: %v", id) - } - - if meta.RequestTime == 0 { - t.Fatalf("bad: %v", meta) - } - - renew, meta, err := session.Renew(id, nil) - - if err != nil { - t.Fatalf("err: %v", err) - } - if meta.RequestTime == 0 { - t.Fatalf("bad: %v", meta) - } - - if renew == nil { - t.Fatalf("should get session") - } - - if renew.ID != id { - t.Fatalf("should have matching id") - } - - if renew.TTL != "10s" { - t.Fatalf("should get session with TTL") - } -} - -func TestSession_Info(t *testing.T) { - t.Parallel() - c, s := makeClient(t) - defer s.Stop() - - session := c.Session() - - id, _, err := session.Create(nil, nil) - if err != nil { - t.Fatalf("err: %v", err) - } - defer session.Destroy(id, nil) - - info, qm, err := session.Info(id, nil) - if err != nil { - t.Fatalf("err: %v", err) - } - - if qm.LastIndex == 0 { - t.Fatalf("bad: %v", qm) - } - if !qm.KnownLeader { - t.Fatalf("bad: %v", qm) - } - - if info == nil { - t.Fatalf("should get session") - } - if info.CreateIndex == 0 { - t.Fatalf("bad: %v", info) - } - if info.ID != id { - t.Fatalf("bad: %v", info) - } - if info.Name != "" { - t.Fatalf("bad: %v", info) - } - if info.Node == "" { - t.Fatalf("bad: %v", info) - } - if len(info.Checks) == 0 { - t.Fatalf("bad: %v", info) - } - if info.LockDelay == 0 { - t.Fatalf("bad: %v", info) - } - if info.Behavior != "release" { - t.Fatalf("bad: %v", info) - } - if info.TTL != "" { - t.Fatalf("bad: %v", info) - } -} - -func TestSession_Node(t *testing.T) { - t.Parallel() - c, s := makeClient(t) - defer s.Stop() - - session := c.Session() - - id, _, err := session.Create(nil, nil) - if err != nil { - t.Fatalf("err: %v", err) - } - defer session.Destroy(id, nil) - - info, qm, err := session.Info(id, nil) - if err != nil { - t.Fatalf("err: %v", err) - } - - sessions, qm, err := session.Node(info.Node, nil) - if err != nil { - t.Fatalf("err: %v", err) - } - - if len(sessions) != 1 { - t.Fatalf("bad: %v", sessions) - } - - if qm.LastIndex == 0 { - t.Fatalf("bad: %v", qm) - } - if !qm.KnownLeader { - t.Fatalf("bad: %v", qm) - } -} - -func TestSession_List(t *testing.T) { - t.Parallel() - c, s := makeClient(t) - defer s.Stop() - - session := c.Session() - - id, _, err := session.Create(nil, nil) - if err != nil { - t.Fatalf("err: %v", err) - } - defer session.Destroy(id, nil) - - sessions, qm, err := session.List(nil) - if err != nil { - t.Fatalf("err: %v", err) - } - - if len(sessions) != 1 { - t.Fatalf("bad: %v", sessions) - } - - if qm.LastIndex == 0 { - t.Fatalf("bad: %v", qm) - } - if !qm.KnownLeader { - t.Fatalf("bad: %v", qm) - } -} diff --git a/Godeps/_workspace/src/github.com/hashicorp/consul/api/status.go b/Godeps/_workspace/src/github.com/hashicorp/consul/api/status.go deleted file mode 100644 index 74ef61a678..0000000000 --- a/Godeps/_workspace/src/github.com/hashicorp/consul/api/status.go +++ /dev/null @@ -1,43 +0,0 @@ -package api - -// Status can be used to query the Status endpoints -type Status struct { - c *Client -} - -// Status returns a handle to the status endpoints -func (c *Client) Status() *Status { - return &Status{c} -} - -// Leader is used to query for a known leader -func (s *Status) Leader() (string, error) { - r := s.c.newRequest("GET", "/v1/status/leader") - _, resp, err := requireOK(s.c.doRequest(r)) - if err != nil { - return "", err - } - defer resp.Body.Close() - - var leader string - if err := decodeBody(resp, &leader); err != nil { - return "", err - } - return leader, nil -} - -// Peers is used to query for a known raft peers -func (s *Status) Peers() ([]string, error) { - r := s.c.newRequest("GET", "/v1/status/peers") - _, resp, err := requireOK(s.c.doRequest(r)) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - var peers []string - if err := decodeBody(resp, &peers); err != nil { - return nil, err - } - return peers, nil -} diff --git a/Godeps/_workspace/src/github.com/hashicorp/consul/api/status_test.go b/Godeps/_workspace/src/github.com/hashicorp/consul/api/status_test.go deleted file mode 100644 index 62dc1550ff..0000000000 --- a/Godeps/_workspace/src/github.com/hashicorp/consul/api/status_test.go +++ /dev/null @@ -1,37 +0,0 @@ -package api - -import ( - "testing" -) - -func TestStatusLeader(t *testing.T) { - t.Parallel() - c, s := makeClient(t) - defer s.Stop() - - status := c.Status() - - leader, err := status.Leader() - if err != nil { - t.Fatalf("err: %v", err) - } - if leader == "" { - t.Fatalf("Expected leader") - } -} - -func TestStatusPeers(t *testing.T) { - t.Parallel() - c, s := makeClient(t) - defer s.Stop() - - status := c.Status() - - peers, err := status.Peers() - if err != nil { - t.Fatalf("err: %v", err) - } - if len(peers) == 0 { - t.Fatalf("Expected peers ") - } -} diff --git a/Godeps/_workspace/src/github.com/julienschmidt/httprouter/.travis.yml b/Godeps/_workspace/src/github.com/julienschmidt/httprouter/.travis.yml deleted file mode 100644 index a4d6cc5326..0000000000 --- a/Godeps/_workspace/src/github.com/julienschmidt/httprouter/.travis.yml +++ /dev/null @@ -1,8 +0,0 @@ -sudo: false -language: go -go: - - 1.1 - - 1.2 - - 1.3 - - 1.4 - - tip diff --git a/Godeps/_workspace/src/github.com/julienschmidt/httprouter/LICENSE b/Godeps/_workspace/src/github.com/julienschmidt/httprouter/LICENSE deleted file mode 100644 index b829abc8a1..0000000000 --- a/Godeps/_workspace/src/github.com/julienschmidt/httprouter/LICENSE +++ /dev/null @@ -1,24 +0,0 @@ -Copyright (c) 2013 Julien Schmidt. All rights reserved. - - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - * The names of the contributors may not be used to endorse or promote - products derived from this software without specific prior written - permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL JULIEN SCHMIDT BE LIABLE FOR ANY -DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/julienschmidt/httprouter/README.md b/Godeps/_workspace/src/github.com/julienschmidt/httprouter/README.md deleted file mode 100644 index 785a108cae..0000000000 --- a/Godeps/_workspace/src/github.com/julienschmidt/httprouter/README.md +++ /dev/null @@ -1,323 +0,0 @@ -# HttpRouter [![Build Status](https://travis-ci.org/julienschmidt/httprouter.png?branch=master)](https://travis-ci.org/julienschmidt/httprouter) [![Coverage](http://gocover.io/_badge/github.com/julienschmidt/httprouter?0)](http://gocover.io/github.com/julienschmidt/httprouter) [![GoDoc](http://godoc.org/github.com/julienschmidt/httprouter?status.png)](http://godoc.org/github.com/julienschmidt/httprouter) - -HttpRouter is a lightweight high performance HTTP request router -(also called *multiplexer* or just *mux* for short) for [Go](http://golang.org/). - -In contrast to the [default mux](http://golang.org/pkg/net/http/#ServeMux) of Go's net/http package, this router supports -variables in the routing pattern and matches against the request method. -It also scales better. - -The router is optimized for high performance and a small memory footprint. -It scales well even with very long paths and a large number of routes. -A compressing dynamic trie (radix tree) structure is used for efficient matching. - -## Features -**Only explicit matches:** With other routers, like [http.ServeMux](http://golang.org/pkg/net/http/#ServeMux), -a requested URL path could match multiple patterns. Therefore they have some -awkward pattern priority rules, like *longest match* or *first registered, -first matched*. By design of this router, a request can only match exactly one -or no route. As a result, there are also no unintended matches, which makes it -great for SEO and improves the user experience. - -**Stop caring about trailing slashes:** Choose the URL style you like, the -router automatically redirects the client if a trailing slash is missing or if -there is one extra. Of course it only does so, if the new path has a handler. -If you don't like it, you can [turn off this behavior](http://godoc.org/github.com/julienschmidt/httprouter#Router.RedirectTrailingSlash). - -**Path auto-correction:** Besides detecting the missing or additional trailing -slash at no extra cost, the router can also fix wrong cases and remove -superfluous path elements (like `../` or `//`). -Is [CAPTAIN CAPS LOCK](http://www.urbandictionary.com/define.php?term=Captain+Caps+Lock) one of your users? -HttpRouter can help him by making a case-insensitive look-up and redirecting him -to the correct URL. - -**Parameters in your routing pattern:** Stop parsing the requested URL path, -just give the path segment a name and the router delivers the dynamic value to -you. Because of the design of the router, path parameters are very cheap. - -**Zero Garbage:** The matching and dispatching process generates zero bytes of -garbage. In fact, the only heap allocations that are made, is by building the -slice of the key-value pairs for path parameters. If the request path contains -no parameters, not a single heap allocation is necessary. - -**Best Performance:** [Benchmarks speak for themselves](https://github.com/julienschmidt/go-http-routing-benchmark). -See below for technical details of the implementation. - -**No more server crashes:** You can set a [Panic handler](http://godoc.org/github.com/julienschmidt/httprouter#Router.PanicHandler) to deal with panics -occurring during handling a HTTP request. The router then recovers and lets the -PanicHandler log what happened and deliver a nice error page. - -Of course you can also set **custom [NotFound](http://godoc.org/github.com/julienschmidt/httprouter#Router.NotFound) and [MethodNotAllowed](http://godoc.org/github.com/julienschmidt/httprouter#Router.MethodNotAllowed) handlers** and [**serve static files**](http://godoc.org/github.com/julienschmidt/httprouter#Router.ServeFiles). - -## Usage -This is just a quick introduction, view the [GoDoc](http://godoc.org/github.com/julienschmidt/httprouter) for details. - -Let's start with a trivial example: -```go -package main - -import ( - "fmt" - "github.com/julienschmidt/httprouter" - "net/http" - "log" -) - -func Index(w http.ResponseWriter, r *http.Request, _ httprouter.Params) { - fmt.Fprint(w, "Welcome!\n") -} - -func Hello(w http.ResponseWriter, r *http.Request, ps httprouter.Params) { - fmt.Fprintf(w, "hello, %s!\n", ps.ByName("name")) -} - -func main() { - router := httprouter.New() - router.GET("/", Index) - router.GET("/hello/:name", Hello) - - log.Fatal(http.ListenAndServe(":8080", router)) -} -``` - -### Named parameters -As you can see, `:name` is a *named parameter*. -The values are accessible via `httprouter.Params`, which is just a slice of `httprouter.Param`s. -You can get the value of a parameter either by its index in the slice, or by using the `ByName(name)` method: -`:name` can be retrived by `ByName("name")`. - -Named parameters only match a single path segment: -``` -Pattern: /user/:user - - /user/gordon match - /user/you match - /user/gordon/profile no match - /user/ no match -``` - -**Note:** Since this router has only explicit matches, you can not register static routes and parameters for the same path segment. For example you can not register the patterns `/user/new` and `/user/:user` for the same request method at the same time. The routing of different request methods is independent from each other. - -### Catch-All parameters -The second type are *catch-all* parameters and have the form `*name`. -Like the name suggests, they match everything. -Therefore they must always be at the **end** of the pattern: -``` -Pattern: /src/*filepath - - /src/ match - /src/somefile.go match - /src/subdir/somefile.go match -``` - -## How does it work? -The router relies on a tree structure which makes heavy use of *common prefixes*, -it is basically a *compact* [*prefix tree*](http://en.wikipedia.org/wiki/Trie) -(or just [*Radix tree*](http://en.wikipedia.org/wiki/Radix_tree)). -Nodes with a common prefix also share a common parent. Here is a short example -what the routing tree for the `GET` request method could look like: - -``` -Priority Path Handle -9 \ *<1> -3 ├s nil -2 |├earch\ *<2> -1 |└upport\ *<3> -2 ├blog\ *<4> -1 | └:post nil -1 | └\ *<5> -2 ├about-us\ *<6> -1 | └team\ *<7> -1 └contact\ *<8> -``` -Every `*` represents the memory address of a handler function (a pointer). -If you follow a path trough the tree from the root to the leaf, you get the -complete route path, e.g `\blog\:post\`, where `:post` is just a placeholder -([*parameter*](#named-parameters)) for an actual post name. Unlike hash-maps, a -tree structure also allows us to use dynamic parts like the `:post` parameter, -since we actually match against the routing patterns instead of just comparing -hashes. [As benchmarks show](https://github.com/julienschmidt/go-http-routing-benchmark), -this works very well and efficient. - -Since URL paths have a hierarchical structure and make use only of a limited set -of characters (byte values), it is very likely that there are a lot of common -prefixes. This allows us to easily reduce the routing into ever smaller problems. -Moreover the router manages a separate tree for every request method. -For one thing it is more space efficient than holding a method->handle map in -every single node, for another thing is also allows us to greatly reduce the -routing problem before even starting the look-up in the prefix-tree. - -For even better scalability, the child nodes on each tree level are ordered by -priority, where the priority is just the number of handles registered in sub -nodes (children, grandchildren, and so on..). -This helps in two ways: - -1. Nodes which are part of the most routing paths are evaluated first. This -helps to make as much routes as possible to be reachable as fast as possible. -2. It is some sort of cost compensation. The longest reachable path (highest -cost) can always be evaluated first. The following scheme visualizes the tree -structure. Nodes are evaluated from top to bottom and from left to right. - -``` -├------------ -├--------- -├----- -├---- -├-- -├-- -└- -``` - - -## Why doesn't this work with http.Handler? -**It does!** The router itself implements the http.Handler interface. -Moreover the router provides convenient [adapters for http.Handler](http://godoc.org/github.com/julienschmidt/httprouter#Router.Handler)s and [http.HandlerFunc](http://godoc.org/github.com/julienschmidt/httprouter#Router.HandlerFunc)s -which allows them to be used as a [httprouter.Handle](http://godoc.org/github.com/julienschmidt/httprouter#Router.Handle) when registering a route. -The only disadvantage is, that no parameter values can be retrieved when a -http.Handler or http.HandlerFunc is used, since there is no efficient way to -pass the values with the existing function parameters. -Therefore [httprouter.Handle](http://godoc.org/github.com/julienschmidt/httprouter#Router.Handle) has a third function parameter. - -Just try it out for yourself, the usage of HttpRouter is very straightforward. The package is compact and minimalistic, but also probably one of the easiest routers to set up. - - -## Where can I find Middleware *X*? -This package just provides a very efficient request router with a few extra -features. The router is just a [http.Handler](http://golang.org/pkg/net/http/#Handler), -you can chain any http.Handler compatible middleware before the router, -for example the [Gorilla handlers](http://www.gorillatoolkit.org/pkg/handlers). -Or you could [just write your own](http://justinas.org/writing-http-middleware-in-go/), -it's very easy! - -Alternatively, you could try [a web framework based on HttpRouter](#web-frameworks-based-on-httprouter). - -### Multi-domain / Sub-domains -Here is a quick example: Does your server serve multiple domains / hosts? -You want to use sub-domains? -Define a router per host! -```go -// We need an object that implements the http.Handler interface. -// Therefore we need a type for which we implement the ServeHTTP method. -// We just use a map here, in which we map host names (with port) to http.Handlers -type HostSwitch map[string]http.Handler - -// Implement the ServerHTTP method on our new type -func (hs HostSwitch) ServeHTTP(w http.ResponseWriter, r *http.Request) { - // Check if a http.Handler is registered for the given host. - // If yes, use it to handle the request. - if handler := hs[r.Host]; handler != nil { - handler.ServeHTTP(w, r) - } else { - // Handle host names for wich no handler is registered - http.Error(w, "Forbidden", 403) // Or Redirect? - } -} - -func main() { - // Initialize a router as usual - router := httprouter.New() - router.GET("/", Index) - router.GET("/hello/:name", Hello) - - // Make a new HostSwitch and insert the router (our http handler) - // for example.com and port 12345 - hs := make(HostSwitch) - hs["example.com:12345"] = router - - // Use the HostSwitch to listen and serve on port 12345 - log.Fatal(http.ListenAndServe(":12345", hs)) -} -``` - -### Basic Authentication -Another quick example: Basic Authentification (RFC 2617) for handles: - -```go -package main - -import ( - "bytes" - "encoding/base64" - "fmt" - "github.com/julienschmidt/httprouter" - "net/http" - "log" - "strings" -) - -func BasicAuth(h httprouter.Handle, user, pass []byte) httprouter.Handle { - return func(w http.ResponseWriter, r *http.Request, ps httprouter.Params) { - const basicAuthPrefix string = "Basic " - - // Get the Basic Authentication credentials - auth := r.Header.Get("Authorization") - if strings.HasPrefix(auth, basicAuthPrefix) { - // Check credentials - payload, err := base64.StdEncoding.DecodeString(auth[len(basicAuthPrefix):]) - if err == nil { - pair := bytes.SplitN(payload, []byte(":"), 2) - if len(pair) == 2 && - bytes.Equal(pair[0], user) && - bytes.Equal(pair[1], pass) { - - // Delegate request to the given handle - h(w, r, ps) - return - } - } - } - - // Request Basic Authentication otherwise - w.Header().Set("WWW-Authenticate", "Basic realm=Restricted") - http.Error(w, http.StatusText(http.StatusUnauthorized), http.StatusUnauthorized) - } -} - -func Index(w http.ResponseWriter, r *http.Request, _ httprouter.Params) { - fmt.Fprint(w, "Not protected!\n") -} - -func Protected(w http.ResponseWriter, r *http.Request, _ httprouter.Params) { - fmt.Fprint(w, "Protected!\n") -} - -func main() { - user := []byte("gordon") - pass := []byte("secret!") - - router := httprouter.New() - router.GET("/", Index) - router.GET("/protected/", BasicAuth(Protected, user, pass)) - - log.Fatal(http.ListenAndServe(":8080", router)) -} -``` - -## Chaining with the NotFound handler - -**NOTE: It might be required to set [Router.HandleMethodNotAllowed](http://godoc.org/github.com/julienschmidt/httprouter#Router.HandleMethodNotAllowed) to `false` to avoid problems.** - -You can use another [http.Handler](http://golang.org/pkg/net/http/#Handler), for example another router, to handle requests which could not be matched by this router by using the [Router.NotFound](http://godoc.org/github.com/julienschmidt/httprouter#Router.NotFound) handler. This allows chaining. - -### Static files -The `NotFound` handler can for example be used to serve static files from the root path `/` (like an index.html file along with other assets): -```go -// Serve static files from the ./public directory -router.NotFound = http.FileServer(http.Dir("public")).ServeHTTP -``` - -But this approach sidesteps the strict core rules of this router to avoid routing problems. A cleaner approach is to use a distinct sub-path for serving files, like `/static/*filepath` or `/files/*filepath`. - -## Web Frameworks based on HttpRouter -If the HttpRouter is a bit too minimalistic for you, you might try one of the following more high-level 3rd-party web frameworks building upon the HttpRouter package: -* [Ace](https://github.com/plimble/ace): Blazing fast Go Web Framework -* [api2go](https://github.com/univedo/api2go): A JSON API Implementation for Go -* [Gin](https://github.com/gin-gonic/gin): Features a martini-like API with much better performance -* [Goat](https://github.com/bahlo/goat): A minimalistic REST API server in Go -* [Hikaru](https://github.com/najeira/hikaru): Supports standalone and Google AppEngine -* [Hitch](https://github.com/nbio/hitch): Hitch ties httprouter, [httpcontext](https://github.com/nbio/httpcontext), and middleware up in a bow -* [kami](https://github.com/guregu/kami): A tiny web framework using x/net/context -* [Medeina](https://github.com/imdario/medeina): Inspired by Ruby's Roda and Cuba -* [Neko](https://github.com/rocwong/neko): A lightweight web application framework for Golang -* [Roxanna](https://github.com/iamthemuffinman/Roxanna): An amalgamation of httprouter, better logging, and hot reload -* [siesta](https://github.com/VividCortex/siesta): Composable HTTP handlers with contexts diff --git a/Godeps/_workspace/src/github.com/julienschmidt/httprouter/path.go b/Godeps/_workspace/src/github.com/julienschmidt/httprouter/path.go deleted file mode 100644 index 486134db37..0000000000 --- a/Godeps/_workspace/src/github.com/julienschmidt/httprouter/path.go +++ /dev/null @@ -1,123 +0,0 @@ -// Copyright 2013 Julien Schmidt. All rights reserved. -// Based on the path package, Copyright 2009 The Go Authors. -// Use of this source code is governed by a BSD-style license that can be found -// in the LICENSE file. - -package httprouter - -// CleanPath is the URL version of path.Clean, it returns a canonical URL path -// for p, eliminating . and .. elements. -// -// The following rules are applied iteratively until no further processing can -// be done: -// 1. Replace multiple slashes with a single slash. -// 2. Eliminate each . path name element (the current directory). -// 3. Eliminate each inner .. path name element (the parent directory) -// along with the non-.. element that precedes it. -// 4. Eliminate .. elements that begin a rooted path: -// that is, replace "/.." by "/" at the beginning of a path. -// -// If the result of this process is an empty string, "/" is returned -func CleanPath(p string) string { - // Turn empty string into "/" - if p == "" { - return "/" - } - - n := len(p) - var buf []byte - - // Invariants: - // reading from path; r is index of next byte to process. - // writing to buf; w is index of next byte to write. - - // path must start with '/' - r := 1 - w := 1 - - if p[0] != '/' { - r = 0 - buf = make([]byte, n+1) - buf[0] = '/' - } - - trailing := n > 2 && p[n-1] == '/' - - // A bit more clunky without a 'lazybuf' like the path package, but the loop - // gets completely inlined (bufApp). So in contrast to the path package this - // loop has no expensive function calls (except 1x make) - - for r < n { - switch { - case p[r] == '/': - // empty path element, trailing slash is added after the end - r++ - - case p[r] == '.' && r+1 == n: - trailing = true - r++ - - case p[r] == '.' && p[r+1] == '/': - // . element - r++ - - case p[r] == '.' && p[r+1] == '.' && (r+2 == n || p[r+2] == '/'): - // .. element: remove to last / - r += 2 - - if w > 1 { - // can backtrack - w-- - - if buf == nil { - for w > 1 && p[w] != '/' { - w-- - } - } else { - for w > 1 && buf[w] != '/' { - w-- - } - } - } - - default: - // real path element. - // add slash if needed - if w > 1 { - bufApp(&buf, p, w, '/') - w++ - } - - // copy element - for r < n && p[r] != '/' { - bufApp(&buf, p, w, p[r]) - w++ - r++ - } - } - } - - // re-append trailing slash - if trailing && w > 1 { - bufApp(&buf, p, w, '/') - w++ - } - - if buf == nil { - return p[:w] - } - return string(buf[:w]) -} - -// internal helper to lazily create a buffer if necessary -func bufApp(buf *[]byte, s string, w int, c byte) { - if *buf == nil { - if s[w] == c { - return - } - - *buf = make([]byte, len(s)) - copy(*buf, s[:w]) - } - (*buf)[w] = c -} diff --git a/Godeps/_workspace/src/github.com/julienschmidt/httprouter/path_test.go b/Godeps/_workspace/src/github.com/julienschmidt/httprouter/path_test.go deleted file mode 100644 index c4ceda5da2..0000000000 --- a/Godeps/_workspace/src/github.com/julienschmidt/httprouter/path_test.go +++ /dev/null @@ -1,92 +0,0 @@ -// Copyright 2013 Julien Schmidt. All rights reserved. -// Based on the path package, Copyright 2009 The Go Authors. -// Use of this source code is governed by a BSD-style license that can be found -// in the LICENSE file. - -package httprouter - -import ( - "runtime" - "testing" -) - -var cleanTests = []struct { - path, result string -}{ - // Already clean - {"/", "/"}, - {"/abc", "/abc"}, - {"/a/b/c", "/a/b/c"}, - {"/abc/", "/abc/"}, - {"/a/b/c/", "/a/b/c/"}, - - // missing root - {"", "/"}, - {"abc", "/abc"}, - {"abc/def", "/abc/def"}, - {"a/b/c", "/a/b/c"}, - - // Remove doubled slash - {"//", "/"}, - {"/abc//", "/abc/"}, - {"/abc/def//", "/abc/def/"}, - {"/a/b/c//", "/a/b/c/"}, - {"/abc//def//ghi", "/abc/def/ghi"}, - {"//abc", "/abc"}, - {"///abc", "/abc"}, - {"//abc//", "/abc/"}, - - // Remove . elements - {".", "/"}, - {"./", "/"}, - {"/abc/./def", "/abc/def"}, - {"/./abc/def", "/abc/def"}, - {"/abc/.", "/abc/"}, - - // Remove .. elements - {"..", "/"}, - {"../", "/"}, - {"../../", "/"}, - {"../..", "/"}, - {"../../abc", "/abc"}, - {"/abc/def/ghi/../jkl", "/abc/def/jkl"}, - {"/abc/def/../ghi/../jkl", "/abc/jkl"}, - {"/abc/def/..", "/abc"}, - {"/abc/def/../..", "/"}, - {"/abc/def/../../..", "/"}, - {"/abc/def/../../..", "/"}, - {"/abc/def/../../../ghi/jkl/../../../mno", "/mno"}, - - // Combinations - {"abc/./../def", "/def"}, - {"abc//./../def", "/def"}, - {"abc/../../././../def", "/def"}, -} - -func TestPathClean(t *testing.T) { - for _, test := range cleanTests { - if s := CleanPath(test.path); s != test.result { - t.Errorf("CleanPath(%q) = %q, want %q", test.path, s, test.result) - } - if s := CleanPath(test.result); s != test.result { - t.Errorf("CleanPath(%q) = %q, want %q", test.result, s, test.result) - } - } -} - -func TestPathCleanMallocs(t *testing.T) { - if testing.Short() { - t.Skip("skipping malloc count in short mode") - } - if runtime.GOMAXPROCS(0) > 1 { - t.Log("skipping AllocsPerRun checks; GOMAXPROCS>1") - return - } - - for _, test := range cleanTests { - allocs := testing.AllocsPerRun(100, func() { CleanPath(test.result) }) - if allocs > 0 { - t.Errorf("CleanPath(%q): %v allocs, want zero", test.result, allocs) - } - } -} diff --git a/Godeps/_workspace/src/github.com/julienschmidt/httprouter/router.go b/Godeps/_workspace/src/github.com/julienschmidt/httprouter/router.go deleted file mode 100644 index 8b5ff34687..0000000000 --- a/Godeps/_workspace/src/github.com/julienschmidt/httprouter/router.go +++ /dev/null @@ -1,363 +0,0 @@ -// Copyright 2013 Julien Schmidt. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be found -// in the LICENSE file. - -// Package httprouter is a trie based high performance HTTP request router. -// -// A trivial example is: -// -// package main -// -// import ( -// "fmt" -// "github.com/julienschmidt/httprouter" -// "net/http" -// "log" -// ) -// -// func Index(w http.ResponseWriter, r *http.Request, _ httprouter.Params) { -// fmt.Fprint(w, "Welcome!\n") -// } -// -// func Hello(w http.ResponseWriter, r *http.Request, ps httprouter.Params) { -// fmt.Fprintf(w, "hello, %s!\n", ps.ByName("name")) -// } -// -// func main() { -// router := httprouter.New() -// router.GET("/", Index) -// router.GET("/hello/:name", Hello) -// -// log.Fatal(http.ListenAndServe(":8080", router)) -// } -// -// The router matches incoming requests by the request method and the path. -// If a handle is registered for this path and method, the router delegates the -// request to that function. -// For the methods GET, POST, PUT, PATCH and DELETE shortcut functions exist to -// register handles, for all other methods router.Handle can be used. -// -// The registered path, against which the router matches incoming requests, can -// contain two types of parameters: -// Syntax Type -// :name named parameter -// *name catch-all parameter -// -// Named parameters are dynamic path segments. They match anything until the -// next '/' or the path end: -// Path: /blog/:category/:post -// -// Requests: -// /blog/go/request-routers match: category="go", post="request-routers" -// /blog/go/request-routers/ no match, but the router would redirect -// /blog/go/ no match -// /blog/go/request-routers/comments no match -// -// Catch-all parameters match anything until the path end, including the -// directory index (the '/' before the catch-all). Since they match anything -// until the end, catch-all parameters must always be the final path element. -// Path: /files/*filepath -// -// Requests: -// /files/ match: filepath="/" -// /files/LICENSE match: filepath="/LICENSE" -// /files/templates/article.html match: filepath="/templates/article.html" -// /files no match, but the router would redirect -// -// The value of parameters is saved as a slice of the Param struct, consisting -// each of a key and a value. The slice is passed to the Handle func as a third -// parameter. -// There are two ways to retrieve the value of a parameter: -// // by the name of the parameter -// user := ps.ByName("user") // defined by :user or *user -// -// // by the index of the parameter. This way you can also get the name (key) -// thirdKey := ps[2].Key // the name of the 3rd parameter -// thirdValue := ps[2].Value // the value of the 3rd parameter -package httprouter - -import ( - "net/http" -) - -// Handle is a function that can be registered to a route to handle HTTP -// requests. Like http.HandlerFunc, but has a third parameter for the values of -// wildcards (variables). -type Handle func(http.ResponseWriter, *http.Request, Params) - -// Param is a single URL parameter, consisting of a key and a value. -type Param struct { - Key string - Value string -} - -// Params is a Param-slice, as returned by the router. -// The slice is ordered, the first URL parameter is also the first slice value. -// It is therefore safe to read values by the index. -type Params []Param - -// ByName returns the value of the first Param which key matches the given name. -// If no matching Param is found, an empty string is returned. -func (ps Params) ByName(name string) string { - for i := range ps { - if ps[i].Key == name { - return ps[i].Value - } - } - return "" -} - -// Router is a http.Handler which can be used to dispatch requests to different -// handler functions via configurable routes -type Router struct { - trees map[string]*node - - // Enables automatic redirection if the current route can't be matched but a - // handler for the path with (without) the trailing slash exists. - // For example if /foo/ is requested but a route only exists for /foo, the - // client is redirected to /foo with http status code 301 for GET requests - // and 307 for all other request methods. - RedirectTrailingSlash bool - - // If enabled, the router tries to fix the current request path, if no - // handle is registered for it. - // First superfluous path elements like ../ or // are removed. - // Afterwards the router does a case-insensitive lookup of the cleaned path. - // If a handle can be found for this route, the router makes a redirection - // to the corrected path with status code 301 for GET requests and 307 for - // all other request methods. - // For example /FOO and /..//Foo could be redirected to /foo. - // RedirectTrailingSlash is independent of this option. - RedirectFixedPath bool - - // If enabled, the router checks if another method is allowed for the - // current route, if the current request can not be routed. - // If this is the case, the request is answered with 'Method Not Allowed' - // and HTTP status code 405. - // If no other Method is allowed, the request is delegated to the NotFound - // handler. - HandleMethodNotAllowed bool - - // Configurable http.Handler which is called when no matching route is - // found. If it is not set, http.NotFound is used. - NotFound http.Handler - - // Configurable http.Handler which is called when a request - // cannot be routed and HandleMethodNotAllowed is true. - // If it is not set, http.Error with http.StatusMethodNotAllowed is used. - MethodNotAllowed http.Handler - - // Function to handle panics recovered from http handlers. - // It should be used to generate a error page and return the http error code - // 500 (Internal Server Error). - // The handler can be used to keep your server from crashing because of - // unrecovered panics. - PanicHandler func(http.ResponseWriter, *http.Request, interface{}) -} - -// Make sure the Router conforms with the http.Handler interface -var _ http.Handler = New() - -// New returns a new initialized Router. -// Path auto-correction, including trailing slashes, is enabled by default. -func New() *Router { - return &Router{ - RedirectTrailingSlash: true, - RedirectFixedPath: true, - HandleMethodNotAllowed: true, - } -} - -// GET is a shortcut for router.Handle("GET", path, handle) -func (r *Router) GET(path string, handle Handle) { - r.Handle("GET", path, handle) -} - -// HEAD is a shortcut for router.Handle("HEAD", path, handle) -func (r *Router) HEAD(path string, handle Handle) { - r.Handle("HEAD", path, handle) -} - -// OPTIONS is a shortcut for router.Handle("OPTIONS", path, handle) -func (r *Router) OPTIONS(path string, handle Handle) { - r.Handle("OPTIONS", path, handle) -} - -// POST is a shortcut for router.Handle("POST", path, handle) -func (r *Router) POST(path string, handle Handle) { - r.Handle("POST", path, handle) -} - -// PUT is a shortcut for router.Handle("PUT", path, handle) -func (r *Router) PUT(path string, handle Handle) { - r.Handle("PUT", path, handle) -} - -// PATCH is a shortcut for router.Handle("PATCH", path, handle) -func (r *Router) PATCH(path string, handle Handle) { - r.Handle("PATCH", path, handle) -} - -// DELETE is a shortcut for router.Handle("DELETE", path, handle) -func (r *Router) DELETE(path string, handle Handle) { - r.Handle("DELETE", path, handle) -} - -// Handle registers a new request handle with the given path and method. -// -// For GET, POST, PUT, PATCH and DELETE requests the respective shortcut -// functions can be used. -// -// This function is intended for bulk loading and to allow the usage of less -// frequently used, non-standardized or custom methods (e.g. for internal -// communication with a proxy). -func (r *Router) Handle(method, path string, handle Handle) { - if path[0] != '/' { - panic("path must begin with '/' in path '" + path + "'") - } - - if r.trees == nil { - r.trees = make(map[string]*node) - } - - root := r.trees[method] - if root == nil { - root = new(node) - r.trees[method] = root - } - - root.addRoute(path, handle) -} - -// Handler is an adapter which allows the usage of an http.Handler as a -// request handle. -func (r *Router) Handler(method, path string, handler http.Handler) { - r.Handle(method, path, - func(w http.ResponseWriter, req *http.Request, _ Params) { - handler.ServeHTTP(w, req) - }, - ) -} - -// HandlerFunc is an adapter which allows the usage of an http.HandlerFunc as a -// request handle. -func (r *Router) HandlerFunc(method, path string, handler http.HandlerFunc) { - r.Handler(method, path, handler) -} - -// ServeFiles serves files from the given file system root. -// The path must end with "/*filepath", files are then served from the local -// path /defined/root/dir/*filepath. -// For example if root is "/etc" and *filepath is "passwd", the local file -// "/etc/passwd" would be served. -// Internally a http.FileServer is used, therefore http.NotFound is used instead -// of the Router's NotFound handler. -// To use the operating system's file system implementation, -// use http.Dir: -// router.ServeFiles("/src/*filepath", http.Dir("/var/www")) -func (r *Router) ServeFiles(path string, root http.FileSystem) { - if len(path) < 10 || path[len(path)-10:] != "/*filepath" { - panic("path must end with /*filepath in path '" + path + "'") - } - - fileServer := http.FileServer(root) - - r.GET(path, func(w http.ResponseWriter, req *http.Request, ps Params) { - req.URL.Path = ps.ByName("filepath") - fileServer.ServeHTTP(w, req) - }) -} - -func (r *Router) recv(w http.ResponseWriter, req *http.Request) { - if rcv := recover(); rcv != nil { - r.PanicHandler(w, req, rcv) - } -} - -// Lookup allows the manual lookup of a method + path combo. -// This is e.g. useful to build a framework around this router. -// If the path was found, it returns the handle function and the path parameter -// values. Otherwise the third return value indicates whether a redirection to -// the same path with an extra / without the trailing slash should be performed. -func (r *Router) Lookup(method, path string) (Handle, Params, bool) { - if root := r.trees[method]; root != nil { - return root.getValue(path) - } - return nil, nil, false -} - -// ServeHTTP makes the router implement the http.Handler interface. -func (r *Router) ServeHTTP(w http.ResponseWriter, req *http.Request) { - if r.PanicHandler != nil { - defer r.recv(w, req) - } - - if root := r.trees[req.Method]; root != nil { - path := req.URL.Path - - if handle, ps, tsr := root.getValue(path); handle != nil { - handle(w, req, ps) - return - } else if req.Method != "CONNECT" && path != "/" { - code := 301 // Permanent redirect, request with GET method - if req.Method != "GET" { - // Temporary redirect, request with same method - // As of Go 1.3, Go does not support status code 308. - code = 307 - } - - if tsr && r.RedirectTrailingSlash { - if len(path) > 1 && path[len(path)-1] == '/' { - req.URL.Path = path[:len(path)-1] - } else { - req.URL.Path = path + "/" - } - http.Redirect(w, req, req.URL.String(), code) - return - } - - // Try to fix the request path - if r.RedirectFixedPath { - fixedPath, found := root.findCaseInsensitivePath( - CleanPath(path), - r.RedirectTrailingSlash, - ) - if found { - req.URL.Path = string(fixedPath) - http.Redirect(w, req, req.URL.String(), code) - return - } - } - } - } - - // Handle 405 - if r.HandleMethodNotAllowed { - for method := range r.trees { - // Skip the requested method - we already tried this one - if method == req.Method { - continue - } - - handle, _, _ := r.trees[method].getValue(req.URL.Path) - if handle != nil { - if r.MethodNotAllowed != nil { - r.MethodNotAllowed.ServeHTTP(w, req) - } else { - http.Error(w, - http.StatusText(http.StatusMethodNotAllowed), - http.StatusMethodNotAllowed, - ) - } - return - } - } - } - - // Handle 404 - if r.NotFound != nil { - r.NotFound.ServeHTTP(w, req) - } else { - http.NotFound(w, req) - } -} diff --git a/Godeps/_workspace/src/github.com/julienschmidt/httprouter/router_test.go b/Godeps/_workspace/src/github.com/julienschmidt/httprouter/router_test.go deleted file mode 100644 index e3141bd281..0000000000 --- a/Godeps/_workspace/src/github.com/julienschmidt/httprouter/router_test.go +++ /dev/null @@ -1,420 +0,0 @@ -// Copyright 2013 Julien Schmidt. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be found -// in the LICENSE file. - -package httprouter - -import ( - "errors" - "fmt" - "net/http" - "net/http/httptest" - "reflect" - "testing" -) - -type mockResponseWriter struct{} - -func (m *mockResponseWriter) Header() (h http.Header) { - return http.Header{} -} - -func (m *mockResponseWriter) Write(p []byte) (n int, err error) { - return len(p), nil -} - -func (m *mockResponseWriter) WriteString(s string) (n int, err error) { - return len(s), nil -} - -func (m *mockResponseWriter) WriteHeader(int) {} - -func TestParams(t *testing.T) { - ps := Params{ - Param{"param1", "value1"}, - Param{"param2", "value2"}, - Param{"param3", "value3"}, - } - for i := range ps { - if val := ps.ByName(ps[i].Key); val != ps[i].Value { - t.Errorf("Wrong value for %s: Got %s; Want %s", ps[i].Key, val, ps[i].Value) - } - } - if val := ps.ByName("noKey"); val != "" { - t.Errorf("Expected empty string for not found key; got: %s", val) - } -} - -func TestRouter(t *testing.T) { - router := New() - - routed := false - router.Handle("GET", "/user/:name", func(w http.ResponseWriter, r *http.Request, ps Params) { - routed = true - want := Params{Param{"name", "gopher"}} - if !reflect.DeepEqual(ps, want) { - t.Fatalf("wrong wildcard values: want %v, got %v", want, ps) - } - }) - - w := new(mockResponseWriter) - - req, _ := http.NewRequest("GET", "/user/gopher", nil) - router.ServeHTTP(w, req) - - if !routed { - t.Fatal("routing failed") - } -} - -type handlerStruct struct { - handeled *bool -} - -func (h handlerStruct) ServeHTTP(w http.ResponseWriter, r *http.Request) { - *h.handeled = true -} - -func TestRouterAPI(t *testing.T) { - var get, head, options, post, put, patch, delete, handler, handlerFunc bool - - httpHandler := handlerStruct{&handler} - - router := New() - router.GET("/GET", func(w http.ResponseWriter, r *http.Request, _ Params) { - get = true - }) - router.HEAD("/GET", func(w http.ResponseWriter, r *http.Request, _ Params) { - head = true - }) - router.OPTIONS("/GET", func(w http.ResponseWriter, r *http.Request, _ Params) { - options = true - }) - router.POST("/POST", func(w http.ResponseWriter, r *http.Request, _ Params) { - post = true - }) - router.PUT("/PUT", func(w http.ResponseWriter, r *http.Request, _ Params) { - put = true - }) - router.PATCH("/PATCH", func(w http.ResponseWriter, r *http.Request, _ Params) { - patch = true - }) - router.DELETE("/DELETE", func(w http.ResponseWriter, r *http.Request, _ Params) { - delete = true - }) - router.Handler("GET", "/Handler", httpHandler) - router.HandlerFunc("GET", "/HandlerFunc", func(w http.ResponseWriter, r *http.Request) { - handlerFunc = true - }) - - w := new(mockResponseWriter) - - r, _ := http.NewRequest("GET", "/GET", nil) - router.ServeHTTP(w, r) - if !get { - t.Error("routing GET failed") - } - - r, _ = http.NewRequest("HEAD", "/GET", nil) - router.ServeHTTP(w, r) - if !head { - t.Error("routing HEAD failed") - } - - r, _ = http.NewRequest("OPTIONS", "/GET", nil) - router.ServeHTTP(w, r) - if !options { - t.Error("routing OPTIONS failed") - } - - r, _ = http.NewRequest("POST", "/POST", nil) - router.ServeHTTP(w, r) - if !post { - t.Error("routing POST failed") - } - - r, _ = http.NewRequest("PUT", "/PUT", nil) - router.ServeHTTP(w, r) - if !put { - t.Error("routing PUT failed") - } - - r, _ = http.NewRequest("PATCH", "/PATCH", nil) - router.ServeHTTP(w, r) - if !patch { - t.Error("routing PATCH failed") - } - - r, _ = http.NewRequest("DELETE", "/DELETE", nil) - router.ServeHTTP(w, r) - if !delete { - t.Error("routing DELETE failed") - } - - r, _ = http.NewRequest("GET", "/Handler", nil) - router.ServeHTTP(w, r) - if !handler { - t.Error("routing Handler failed") - } - - r, _ = http.NewRequest("GET", "/HandlerFunc", nil) - router.ServeHTTP(w, r) - if !handlerFunc { - t.Error("routing HandlerFunc failed") - } -} - -func TestRouterRoot(t *testing.T) { - router := New() - recv := catchPanic(func() { - router.GET("noSlashRoot", nil) - }) - if recv == nil { - t.Fatal("registering path not beginning with '/' did not panic") - } -} - -func TestRouterChaining(t *testing.T) { - router1 := New() - router2 := New() - router1.NotFound = router2 - - fooHit := false - router1.POST("/foo", func(w http.ResponseWriter, req *http.Request, _ Params) { - fooHit = true - w.WriteHeader(http.StatusOK) - }) - - barHit := false - router2.POST("/bar", func(w http.ResponseWriter, req *http.Request, _ Params) { - barHit = true - w.WriteHeader(http.StatusOK) - }) - - r, _ := http.NewRequest("POST", "/foo", nil) - w := httptest.NewRecorder() - router1.ServeHTTP(w, r) - if !(w.Code == http.StatusOK && fooHit) { - t.Errorf("Regular routing failed with router chaining.") - t.FailNow() - } - - r, _ = http.NewRequest("POST", "/bar", nil) - w = httptest.NewRecorder() - router1.ServeHTTP(w, r) - if !(w.Code == http.StatusOK && barHit) { - t.Errorf("Chained routing failed with router chaining.") - t.FailNow() - } - - r, _ = http.NewRequest("POST", "/qax", nil) - w = httptest.NewRecorder() - router1.ServeHTTP(w, r) - if !(w.Code == http.StatusNotFound) { - t.Errorf("NotFound behavior failed with router chaining.") - t.FailNow() - } -} - -func TestRouterNotAllowed(t *testing.T) { - handlerFunc := func(_ http.ResponseWriter, _ *http.Request, _ Params) {} - - router := New() - router.POST("/path", handlerFunc) - - // Test not allowed - r, _ := http.NewRequest("GET", "/path", nil) - w := httptest.NewRecorder() - router.ServeHTTP(w, r) - if !(w.Code == http.StatusMethodNotAllowed) { - t.Errorf("NotAllowed handling failed: Code=%d, Header=%v", w.Code, w.Header()) - } - - w = httptest.NewRecorder() - responseText := "custom method" - router.MethodNotAllowed = http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { - w.WriteHeader(http.StatusTeapot) - w.Write([]byte(responseText)) - }) - router.ServeHTTP(w, r) - if got := w.Body.String(); !(got == responseText) { - t.Errorf("unexpected response got %q want %q", got, responseText) - } - if w.Code != http.StatusTeapot { - t.Errorf("unexpected response code %d want %d", w.Code, http.StatusTeapot) - } -} - -func TestRouterNotFound(t *testing.T) { - handlerFunc := func(_ http.ResponseWriter, _ *http.Request, _ Params) {} - - router := New() - router.GET("/path", handlerFunc) - router.GET("/dir/", handlerFunc) - router.GET("/", handlerFunc) - - testRoutes := []struct { - route string - code int - header string - }{ - {"/path/", 301, "map[Location:[/path]]"}, // TSR -/ - {"/dir", 301, "map[Location:[/dir/]]"}, // TSR +/ - {"", 301, "map[Location:[/]]"}, // TSR +/ - {"/PATH", 301, "map[Location:[/path]]"}, // Fixed Case - {"/DIR/", 301, "map[Location:[/dir/]]"}, // Fixed Case - {"/PATH/", 301, "map[Location:[/path]]"}, // Fixed Case -/ - {"/DIR", 301, "map[Location:[/dir/]]"}, // Fixed Case +/ - {"/../path", 301, "map[Location:[/path]]"}, // CleanPath - {"/nope", 404, ""}, // NotFound - } - for _, tr := range testRoutes { - r, _ := http.NewRequest("GET", tr.route, nil) - w := httptest.NewRecorder() - router.ServeHTTP(w, r) - if !(w.Code == tr.code && (w.Code == 404 || fmt.Sprint(w.Header()) == tr.header)) { - t.Errorf("NotFound handling route %s failed: Code=%d, Header=%v", tr.route, w.Code, w.Header()) - } - } - - // Test custom not found handler - var notFound bool - router.NotFound = http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { - rw.WriteHeader(404) - notFound = true - }) - r, _ := http.NewRequest("GET", "/nope", nil) - w := httptest.NewRecorder() - router.ServeHTTP(w, r) - if !(w.Code == 404 && notFound == true) { - t.Errorf("Custom NotFound handler failed: Code=%d, Header=%v", w.Code, w.Header()) - } - - // Test other method than GET (want 307 instead of 301) - router.PATCH("/path", handlerFunc) - r, _ = http.NewRequest("PATCH", "/path/", nil) - w = httptest.NewRecorder() - router.ServeHTTP(w, r) - if !(w.Code == 307 && fmt.Sprint(w.Header()) == "map[Location:[/path]]") { - t.Errorf("Custom NotFound handler failed: Code=%d, Header=%v", w.Code, w.Header()) - } - - // Test special case where no node for the prefix "/" exists - router = New() - router.GET("/a", handlerFunc) - r, _ = http.NewRequest("GET", "/", nil) - w = httptest.NewRecorder() - router.ServeHTTP(w, r) - if !(w.Code == 404) { - t.Errorf("NotFound handling route / failed: Code=%d", w.Code) - } -} - -func TestRouterPanicHandler(t *testing.T) { - router := New() - panicHandled := false - - router.PanicHandler = func(rw http.ResponseWriter, r *http.Request, p interface{}) { - panicHandled = true - } - - router.Handle("PUT", "/user/:name", func(_ http.ResponseWriter, _ *http.Request, _ Params) { - panic("oops!") - }) - - w := new(mockResponseWriter) - req, _ := http.NewRequest("PUT", "/user/gopher", nil) - - defer func() { - if rcv := recover(); rcv != nil { - t.Fatal("handling panic failed") - } - }() - - router.ServeHTTP(w, req) - - if !panicHandled { - t.Fatal("simulating failed") - } -} - -func TestRouterLookup(t *testing.T) { - routed := false - wantHandle := func(_ http.ResponseWriter, _ *http.Request, _ Params) { - routed = true - } - wantParams := Params{Param{"name", "gopher"}} - - router := New() - - // try empty router first - handle, _, tsr := router.Lookup("GET", "/nope") - if handle != nil { - t.Fatalf("Got handle for unregistered pattern: %v", handle) - } - if tsr { - t.Error("Got wrong TSR recommendation!") - } - - // insert route and try again - router.GET("/user/:name", wantHandle) - - handle, params, tsr := router.Lookup("GET", "/user/gopher") - if handle == nil { - t.Fatal("Got no handle!") - } else { - handle(nil, nil, nil) - if !routed { - t.Fatal("Routing failed!") - } - } - - if !reflect.DeepEqual(params, wantParams) { - t.Fatalf("Wrong parameter values: want %v, got %v", wantParams, params) - } - - handle, _, tsr = router.Lookup("GET", "/user/gopher/") - if handle != nil { - t.Fatalf("Got handle for unregistered pattern: %v", handle) - } - if !tsr { - t.Error("Got no TSR recommendation!") - } - - handle, _, tsr = router.Lookup("GET", "/nope") - if handle != nil { - t.Fatalf("Got handle for unregistered pattern: %v", handle) - } - if tsr { - t.Error("Got wrong TSR recommendation!") - } -} - -type mockFileSystem struct { - opened bool -} - -func (mfs *mockFileSystem) Open(name string) (http.File, error) { - mfs.opened = true - return nil, errors.New("this is just a mock") -} - -func TestRouterServeFiles(t *testing.T) { - router := New() - mfs := &mockFileSystem{} - - recv := catchPanic(func() { - router.ServeFiles("/noFilepath", mfs) - }) - if recv == nil { - t.Fatal("registering path not ending with '*filepath' did not panic") - } - - router.ServeFiles("/*filepath", mfs) - w := new(mockResponseWriter) - r, _ := http.NewRequest("GET", "/favicon.ico", nil) - router.ServeHTTP(w, r) - if !mfs.opened { - t.Error("serving file failed") - } -} diff --git a/Godeps/_workspace/src/github.com/julienschmidt/httprouter/tree.go b/Godeps/_workspace/src/github.com/julienschmidt/httprouter/tree.go deleted file mode 100644 index a15bc2cbc3..0000000000 --- a/Godeps/_workspace/src/github.com/julienschmidt/httprouter/tree.go +++ /dev/null @@ -1,555 +0,0 @@ -// Copyright 2013 Julien Schmidt. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be found -// in the LICENSE file. - -package httprouter - -import ( - "strings" - "unicode" -) - -func min(a, b int) int { - if a <= b { - return a - } - return b -} - -func countParams(path string) uint8 { - var n uint - for i := 0; i < len(path); i++ { - if path[i] != ':' && path[i] != '*' { - continue - } - n++ - } - if n >= 255 { - return 255 - } - return uint8(n) -} - -type nodeType uint8 - -const ( - static nodeType = 0 - param nodeType = 1 - catchAll nodeType = 2 -) - -type node struct { - path string - wildChild bool - nType nodeType - maxParams uint8 - indices string - children []*node - handle Handle - priority uint32 -} - -// increments priority of the given child and reorders if necessary -func (n *node) incrementChildPrio(pos int) int { - n.children[pos].priority++ - prio := n.children[pos].priority - - // adjust position (move to front) - newPos := pos - for newPos > 0 && n.children[newPos-1].priority < prio { - // swap node positions - tmpN := n.children[newPos-1] - n.children[newPos-1] = n.children[newPos] - n.children[newPos] = tmpN - - newPos-- - } - - // build new index char string - if newPos != pos { - n.indices = n.indices[:newPos] + // unchanged prefix, might be empty - n.indices[pos:pos+1] + // the index char we move - n.indices[newPos:pos] + n.indices[pos+1:] // rest without char at 'pos' - } - - return newPos -} - -// addRoute adds a node with the given handle to the path. -// Not concurrency-safe! -func (n *node) addRoute(path string, handle Handle) { - fullPath := path - n.priority++ - numParams := countParams(path) - - // non-empty tree - if len(n.path) > 0 || len(n.children) > 0 { - walk: - for { - // Update maxParams of the current node - if numParams > n.maxParams { - n.maxParams = numParams - } - - // Find the longest common prefix. - // This also implies that the common prefix contains no ':' or '*' - // since the existing key can't contain those chars. - i := 0 - max := min(len(path), len(n.path)) - for i < max && path[i] == n.path[i] { - i++ - } - - // Split edge - if i < len(n.path) { - child := node{ - path: n.path[i:], - wildChild: n.wildChild, - indices: n.indices, - children: n.children, - handle: n.handle, - priority: n.priority - 1, - } - - // Update maxParams (max of all children) - for i := range child.children { - if child.children[i].maxParams > child.maxParams { - child.maxParams = child.children[i].maxParams - } - } - - n.children = []*node{&child} - // []byte for proper unicode char conversion, see #65 - n.indices = string([]byte{n.path[i]}) - n.path = path[:i] - n.handle = nil - n.wildChild = false - } - - // Make new node a child of this node - if i < len(path) { - path = path[i:] - - if n.wildChild { - n = n.children[0] - n.priority++ - - // Update maxParams of the child node - if numParams > n.maxParams { - n.maxParams = numParams - } - numParams-- - - // Check if the wildcard matches - if len(path) >= len(n.path) && n.path == path[:len(n.path)] { - // check for longer wildcard, e.g. :name and :names - if len(n.path) >= len(path) || path[len(n.path)] == '/' { - continue walk - } - } - - panic("path segment '" + path + - "' conflicts with existing wildcard '" + n.path + - "' in path '" + fullPath + "'") - } - - c := path[0] - - // slash after param - if n.nType == param && c == '/' && len(n.children) == 1 { - n = n.children[0] - n.priority++ - continue walk - } - - // Check if a child with the next path byte exists - for i := 0; i < len(n.indices); i++ { - if c == n.indices[i] { - i = n.incrementChildPrio(i) - n = n.children[i] - continue walk - } - } - - // Otherwise insert it - if c != ':' && c != '*' { - // []byte for proper unicode char conversion, see #65 - n.indices += string([]byte{c}) - child := &node{ - maxParams: numParams, - } - n.children = append(n.children, child) - n.incrementChildPrio(len(n.indices) - 1) - n = child - } - n.insertChild(numParams, path, fullPath, handle) - return - - } else if i == len(path) { // Make node a (in-path) leaf - if n.handle != nil { - panic("a handle is already registered for path ''" + fullPath + "'") - } - n.handle = handle - } - return - } - } else { // Empty tree - n.insertChild(numParams, path, fullPath, handle) - } -} - -func (n *node) insertChild(numParams uint8, path, fullPath string, handle Handle) { - var offset int // already handled bytes of the path - - // find prefix until first wildcard (beginning with ':'' or '*'') - for i, max := 0, len(path); numParams > 0; i++ { - c := path[i] - if c != ':' && c != '*' { - continue - } - - // find wildcard end (either '/' or path end) - end := i + 1 - for end < max && path[end] != '/' { - switch path[end] { - // the wildcard name must not contain ':' and '*' - case ':', '*': - panic("only one wildcard per path segment is allowed, has: '" + - path[i:] + "' in path '" + fullPath + "'") - default: - end++ - } - } - - // check if this Node existing children which would be - // unreachable if we insert the wildcard here - if len(n.children) > 0 { - panic("wildcard route '" + path[i:end] + - "' conflicts with existing children in path '" + fullPath + "'") - } - - // check if the wildcard has a name - if end-i < 2 { - panic("wildcards must be named with a non-empty name in path '" + fullPath + "'") - } - - if c == ':' { // param - // split path at the beginning of the wildcard - if i > 0 { - n.path = path[offset:i] - offset = i - } - - child := &node{ - nType: param, - maxParams: numParams, - } - n.children = []*node{child} - n.wildChild = true - n = child - n.priority++ - numParams-- - - // if the path doesn't end with the wildcard, then there - // will be another non-wildcard subpath starting with '/' - if end < max { - n.path = path[offset:end] - offset = end - - child := &node{ - maxParams: numParams, - priority: 1, - } - n.children = []*node{child} - n = child - } - - } else { // catchAll - if end != max || numParams > 1 { - panic("catch-all routes are only allowed at the end of the path in path '" + fullPath + "'") - } - - if len(n.path) > 0 && n.path[len(n.path)-1] == '/' { - panic("catch-all conflicts with existing handle for the path segment root in path '" + fullPath + "'") - } - - // currently fixed width 1 for '/' - i-- - if path[i] != '/' { - panic("no / before catch-all in path '" + fullPath + "'") - } - - n.path = path[offset:i] - - // first node: catchAll node with empty path - child := &node{ - wildChild: true, - nType: catchAll, - maxParams: 1, - } - n.children = []*node{child} - n.indices = string(path[i]) - n = child - n.priority++ - - // second node: node holding the variable - child = &node{ - path: path[i:], - nType: catchAll, - maxParams: 1, - handle: handle, - priority: 1, - } - n.children = []*node{child} - - return - } - } - - // insert remaining path part and handle to the leaf - n.path = path[offset:] - n.handle = handle -} - -// Returns the handle registered with the given path (key). The values of -// wildcards are saved to a map. -// If no handle can be found, a TSR (trailing slash redirect) recommendation is -// made if a handle exists with an extra (without the) trailing slash for the -// given path. -func (n *node) getValue(path string) (handle Handle, p Params, tsr bool) { -walk: // Outer loop for walking the tree - for { - if len(path) > len(n.path) { - if path[:len(n.path)] == n.path { - path = path[len(n.path):] - // If this node does not have a wildcard (param or catchAll) - // child, we can just look up the next child node and continue - // to walk down the tree - if !n.wildChild { - c := path[0] - for i := 0; i < len(n.indices); i++ { - if c == n.indices[i] { - n = n.children[i] - continue walk - } - } - - // Nothing found. - // We can recommend to redirect to the same URL without a - // trailing slash if a leaf exists for that path. - tsr = (path == "/" && n.handle != nil) - return - - } - - // handle wildcard child - n = n.children[0] - switch n.nType { - case param: - // find param end (either '/' or path end) - end := 0 - for end < len(path) && path[end] != '/' { - end++ - } - - // save param value - if p == nil { - // lazy allocation - p = make(Params, 0, n.maxParams) - } - i := len(p) - p = p[:i+1] // expand slice within preallocated capacity - p[i].Key = n.path[1:] - p[i].Value = path[:end] - - // we need to go deeper! - if end < len(path) { - if len(n.children) > 0 { - path = path[end:] - n = n.children[0] - continue walk - } - - // ... but we can't - tsr = (len(path) == end+1) - return - } - - if handle = n.handle; handle != nil { - return - } else if len(n.children) == 1 { - // No handle found. Check if a handle for this path + a - // trailing slash exists for TSR recommendation - n = n.children[0] - tsr = (n.path == "/" && n.handle != nil) - } - - return - - case catchAll: - // save param value - if p == nil { - // lazy allocation - p = make(Params, 0, n.maxParams) - } - i := len(p) - p = p[:i+1] // expand slice within preallocated capacity - p[i].Key = n.path[2:] - p[i].Value = path - - handle = n.handle - return - - default: - panic("invalid node type") - } - } - } else if path == n.path { - // We should have reached the node containing the handle. - // Check if this node has a handle registered. - if handle = n.handle; handle != nil { - return - } - - // No handle found. Check if a handle for this path + a - // trailing slash exists for trailing slash recommendation - for i := 0; i < len(n.indices); i++ { - if n.indices[i] == '/' { - n = n.children[i] - tsr = (len(n.path) == 1 && n.handle != nil) || - (n.nType == catchAll && n.children[0].handle != nil) - return - } - } - - return - } - - // Nothing found. We can recommend to redirect to the same URL with an - // extra trailing slash if a leaf exists for that path - tsr = (path == "/") || - (len(n.path) == len(path)+1 && n.path[len(path)] == '/' && - path == n.path[:len(n.path)-1] && n.handle != nil) - return - } -} - -// Makes a case-insensitive lookup of the given path and tries to find a handler. -// It can optionally also fix trailing slashes. -// It returns the case-corrected path and a bool indicating whether the lookup -// was successful. -func (n *node) findCaseInsensitivePath(path string, fixTrailingSlash bool) (ciPath []byte, found bool) { - ciPath = make([]byte, 0, len(path)+1) // preallocate enough memory - - // Outer loop for walking the tree - for len(path) >= len(n.path) && strings.ToLower(path[:len(n.path)]) == strings.ToLower(n.path) { - path = path[len(n.path):] - ciPath = append(ciPath, n.path...) - - if len(path) > 0 { - // If this node does not have a wildcard (param or catchAll) child, - // we can just look up the next child node and continue to walk down - // the tree - if !n.wildChild { - r := unicode.ToLower(rune(path[0])) - for i, index := range n.indices { - // must use recursive approach since both index and - // ToLower(index) could exist. We must check both. - if r == unicode.ToLower(index) { - out, found := n.children[i].findCaseInsensitivePath(path, fixTrailingSlash) - if found { - return append(ciPath, out...), true - } - } - } - - // Nothing found. We can recommend to redirect to the same URL - // without a trailing slash if a leaf exists for that path - found = (fixTrailingSlash && path == "/" && n.handle != nil) - return - } - - n = n.children[0] - switch n.nType { - case param: - // find param end (either '/' or path end) - k := 0 - for k < len(path) && path[k] != '/' { - k++ - } - - // add param value to case insensitive path - ciPath = append(ciPath, path[:k]...) - - // we need to go deeper! - if k < len(path) { - if len(n.children) > 0 { - path = path[k:] - n = n.children[0] - continue - } - - // ... but we can't - if fixTrailingSlash && len(path) == k+1 { - return ciPath, true - } - return - } - - if n.handle != nil { - return ciPath, true - } else if fixTrailingSlash && len(n.children) == 1 { - // No handle found. Check if a handle for this path + a - // trailing slash exists - n = n.children[0] - if n.path == "/" && n.handle != nil { - return append(ciPath, '/'), true - } - } - return - - case catchAll: - return append(ciPath, path...), true - - default: - panic("invalid node type") - } - } else { - // We should have reached the node containing the handle. - // Check if this node has a handle registered. - if n.handle != nil { - return ciPath, true - } - - // No handle found. - // Try to fix the path by adding a trailing slash - if fixTrailingSlash { - for i := 0; i < len(n.indices); i++ { - if n.indices[i] == '/' { - n = n.children[i] - if (len(n.path) == 1 && n.handle != nil) || - (n.nType == catchAll && n.children[0].handle != nil) { - return append(ciPath, '/'), true - } - return - } - } - } - return - } - } - - // Nothing found. - // Try to fix the path by adding / removing a trailing slash - if fixTrailingSlash { - if path == "/" { - return ciPath, true - } - if len(path)+1 == len(n.path) && n.path[len(path)] == '/' && - strings.ToLower(path) == strings.ToLower(n.path[:len(path)]) && - n.handle != nil { - return append(ciPath, n.path...), true - } - } - return -} diff --git a/Godeps/_workspace/src/github.com/julienschmidt/httprouter/tree_test.go b/Godeps/_workspace/src/github.com/julienschmidt/httprouter/tree_test.go deleted file mode 100644 index 64f26d1a38..0000000000 --- a/Godeps/_workspace/src/github.com/julienschmidt/httprouter/tree_test.go +++ /dev/null @@ -1,611 +0,0 @@ -// Copyright 2013 Julien Schmidt. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be found -// in the LICENSE file. - -package httprouter - -import ( - "fmt" - "net/http" - "reflect" - "strings" - "testing" -) - -func printChildren(n *node, prefix string) { - fmt.Printf(" %02d:%02d %s%s[%d] %v %t %d \r\n", n.priority, n.maxParams, prefix, n.path, len(n.children), n.handle, n.wildChild, n.nType) - for l := len(n.path); l > 0; l-- { - prefix += " " - } - for _, child := range n.children { - printChildren(child, prefix) - } -} - -// Used as a workaround since we can't compare functions or their adresses -var fakeHandlerValue string - -func fakeHandler(val string) Handle { - return func(http.ResponseWriter, *http.Request, Params) { - fakeHandlerValue = val - } -} - -type testRequests []struct { - path string - nilHandler bool - route string - ps Params -} - -func checkRequests(t *testing.T, tree *node, requests testRequests) { - for _, request := range requests { - handler, ps, _ := tree.getValue(request.path) - - if handler == nil { - if !request.nilHandler { - t.Errorf("handle mismatch for route '%s': Expected non-nil handle", request.path) - } - } else if request.nilHandler { - t.Errorf("handle mismatch for route '%s': Expected nil handle", request.path) - } else { - handler(nil, nil, nil) - if fakeHandlerValue != request.route { - t.Errorf("handle mismatch for route '%s': Wrong handle (%s != %s)", request.path, fakeHandlerValue, request.route) - } - } - - if !reflect.DeepEqual(ps, request.ps) { - t.Errorf("Params mismatch for route '%s'", request.path) - } - } -} - -func checkPriorities(t *testing.T, n *node) uint32 { - var prio uint32 - for i := range n.children { - prio += checkPriorities(t, n.children[i]) - } - - if n.handle != nil { - prio++ - } - - if n.priority != prio { - t.Errorf( - "priority mismatch for node '%s': is %d, should be %d", - n.path, n.priority, prio, - ) - } - - return prio -} - -func checkMaxParams(t *testing.T, n *node) uint8 { - var maxParams uint8 - for i := range n.children { - params := checkMaxParams(t, n.children[i]) - if params > maxParams { - maxParams = params - } - } - if n.nType != static && !n.wildChild { - maxParams++ - } - - if n.maxParams != maxParams { - t.Errorf( - "maxParams mismatch for node '%s': is %d, should be %d", - n.path, n.maxParams, maxParams, - ) - } - - return maxParams -} - -func TestCountParams(t *testing.T) { - if countParams("/path/:param1/static/*catch-all") != 2 { - t.Fail() - } - if countParams(strings.Repeat("/:param", 256)) != 255 { - t.Fail() - } -} - -func TestTreeAddAndGet(t *testing.T) { - tree := &node{} - - routes := [...]string{ - "/hi", - "/contact", - "/co", - "/c", - "/a", - "/ab", - "/doc/", - "/doc/go_faq.html", - "/doc/go1.html", - "/α", - "/β", - } - for _, route := range routes { - tree.addRoute(route, fakeHandler(route)) - } - - //printChildren(tree, "") - - checkRequests(t, tree, testRequests{ - {"/a", false, "/a", nil}, - {"/", true, "", nil}, - {"/hi", false, "/hi", nil}, - {"/contact", false, "/contact", nil}, - {"/co", false, "/co", nil}, - {"/con", true, "", nil}, // key mismatch - {"/cona", true, "", nil}, // key mismatch - {"/no", true, "", nil}, // no matching child - {"/ab", false, "/ab", nil}, - {"/α", false, "/α", nil}, - {"/β", false, "/β", nil}, - }) - - checkPriorities(t, tree) - checkMaxParams(t, tree) -} - -func TestTreeWildcard(t *testing.T) { - tree := &node{} - - routes := [...]string{ - "/", - "/cmd/:tool/:sub", - "/cmd/:tool/", - "/src/*filepath", - "/search/", - "/search/:query", - "/user_:name", - "/user_:name/about", - "/files/:dir/*filepath", - "/doc/", - "/doc/go_faq.html", - "/doc/go1.html", - "/info/:user/public", - "/info/:user/project/:project", - } - for _, route := range routes { - tree.addRoute(route, fakeHandler(route)) - } - - //printChildren(tree, "") - - checkRequests(t, tree, testRequests{ - {"/", false, "/", nil}, - {"/cmd/test/", false, "/cmd/:tool/", Params{Param{"tool", "test"}}}, - {"/cmd/test", true, "", Params{Param{"tool", "test"}}}, - {"/cmd/test/3", false, "/cmd/:tool/:sub", Params{Param{"tool", "test"}, Param{"sub", "3"}}}, - {"/src/", false, "/src/*filepath", Params{Param{"filepath", "/"}}}, - {"/src/some/file.png", false, "/src/*filepath", Params{Param{"filepath", "/some/file.png"}}}, - {"/search/", false, "/search/", nil}, - {"/search/someth!ng+in+ünìcodé", false, "/search/:query", Params{Param{"query", "someth!ng+in+ünìcodé"}}}, - {"/search/someth!ng+in+ünìcodé/", true, "", Params{Param{"query", "someth!ng+in+ünìcodé"}}}, - {"/user_gopher", false, "/user_:name", Params{Param{"name", "gopher"}}}, - {"/user_gopher/about", false, "/user_:name/about", Params{Param{"name", "gopher"}}}, - {"/files/js/inc/framework.js", false, "/files/:dir/*filepath", Params{Param{"dir", "js"}, Param{"filepath", "/inc/framework.js"}}}, - {"/info/gordon/public", false, "/info/:user/public", Params{Param{"user", "gordon"}}}, - {"/info/gordon/project/go", false, "/info/:user/project/:project", Params{Param{"user", "gordon"}, Param{"project", "go"}}}, - }) - - checkPriorities(t, tree) - checkMaxParams(t, tree) -} - -func catchPanic(testFunc func()) (recv interface{}) { - defer func() { - recv = recover() - }() - - testFunc() - return -} - -type testRoute struct { - path string - conflict bool -} - -func testRoutes(t *testing.T, routes []testRoute) { - tree := &node{} - - for _, route := range routes { - recv := catchPanic(func() { - tree.addRoute(route.path, nil) - }) - - if route.conflict { - if recv == nil { - t.Errorf("no panic for conflicting route '%s'", route.path) - } - } else if recv != nil { - t.Errorf("unexpected panic for route '%s': %v", route.path, recv) - } - } - - //printChildren(tree, "") -} - -func TestTreeWildcardConflict(t *testing.T) { - routes := []testRoute{ - {"/cmd/:tool/:sub", false}, - {"/cmd/vet", true}, - {"/src/*filepath", false}, - {"/src/*filepathx", true}, - {"/src/", true}, - {"/src1/", false}, - {"/src1/*filepath", true}, - {"/src2*filepath", true}, - {"/search/:query", false}, - {"/search/invalid", true}, - {"/user_:name", false}, - {"/user_x", true}, - {"/user_:name", false}, - {"/id:id", false}, - {"/id/:id", true}, - } - testRoutes(t, routes) -} - -func TestTreeChildConflict(t *testing.T) { - routes := []testRoute{ - {"/cmd/vet", false}, - {"/cmd/:tool/:sub", true}, - {"/src/AUTHORS", false}, - {"/src/*filepath", true}, - {"/user_x", false}, - {"/user_:name", true}, - {"/id/:id", false}, - {"/id:id", true}, - {"/:id", true}, - {"/*filepath", true}, - } - testRoutes(t, routes) -} - -func TestTreeDupliatePath(t *testing.T) { - tree := &node{} - - routes := [...]string{ - "/", - "/doc/", - "/src/*filepath", - "/search/:query", - "/user_:name", - } - for _, route := range routes { - recv := catchPanic(func() { - tree.addRoute(route, fakeHandler(route)) - }) - if recv != nil { - t.Fatalf("panic inserting route '%s': %v", route, recv) - } - - // Add again - recv = catchPanic(func() { - tree.addRoute(route, nil) - }) - if recv == nil { - t.Fatalf("no panic while inserting duplicate route '%s", route) - } - } - - //printChildren(tree, "") - - checkRequests(t, tree, testRequests{ - {"/", false, "/", nil}, - {"/doc/", false, "/doc/", nil}, - {"/src/some/file.png", false, "/src/*filepath", Params{Param{"filepath", "/some/file.png"}}}, - {"/search/someth!ng+in+ünìcodé", false, "/search/:query", Params{Param{"query", "someth!ng+in+ünìcodé"}}}, - {"/user_gopher", false, "/user_:name", Params{Param{"name", "gopher"}}}, - }) -} - -func TestEmptyWildcardName(t *testing.T) { - tree := &node{} - - routes := [...]string{ - "/user:", - "/user:/", - "/cmd/:/", - "/src/*", - } - for _, route := range routes { - recv := catchPanic(func() { - tree.addRoute(route, nil) - }) - if recv == nil { - t.Fatalf("no panic while inserting route with empty wildcard name '%s", route) - } - } -} - -func TestTreeCatchAllConflict(t *testing.T) { - routes := []testRoute{ - {"/src/*filepath/x", true}, - {"/src2/", false}, - {"/src2/*filepath/x", true}, - } - testRoutes(t, routes) -} - -func TestTreeCatchAllConflictRoot(t *testing.T) { - routes := []testRoute{ - {"/", false}, - {"/*filepath", true}, - } - testRoutes(t, routes) -} - -func TestTreeDoubleWildcard(t *testing.T) { - const panicMsg = "only one wildcard per path segment is allowed" - - routes := [...]string{ - "/:foo:bar", - "/:foo:bar/", - "/:foo*bar", - } - - for _, route := range routes { - tree := &node{} - recv := catchPanic(func() { - tree.addRoute(route, nil) - }) - - if rs, ok := recv.(string); !ok || !strings.HasPrefix(rs, panicMsg) { - t.Fatalf(`"Expected panic "%s" for route '%s', got "%v"`, panicMsg, route, recv) - } - } -} - -/*func TestTreeDuplicateWildcard(t *testing.T) { - tree := &node{} - - routes := [...]string{ - "/:id/:name/:id", - } - for _, route := range routes { - ... - } -}*/ - -func TestTreeTrailingSlashRedirect(t *testing.T) { - tree := &node{} - - routes := [...]string{ - "/hi", - "/b/", - "/search/:query", - "/cmd/:tool/", - "/src/*filepath", - "/x", - "/x/y", - "/y/", - "/y/z", - "/0/:id", - "/0/:id/1", - "/1/:id/", - "/1/:id/2", - "/aa", - "/a/", - "/doc", - "/doc/go_faq.html", - "/doc/go1.html", - "/no/a", - "/no/b", - "/api/hello/:name", - } - for _, route := range routes { - recv := catchPanic(func() { - tree.addRoute(route, fakeHandler(route)) - }) - if recv != nil { - t.Fatalf("panic inserting route '%s': %v", route, recv) - } - } - - //printChildren(tree, "") - - tsrRoutes := [...]string{ - "/hi/", - "/b", - "/search/gopher/", - "/cmd/vet", - "/src", - "/x/", - "/y", - "/0/go/", - "/1/go", - "/a", - "/doc/", - } - for _, route := range tsrRoutes { - handler, _, tsr := tree.getValue(route) - if handler != nil { - t.Fatalf("non-nil handler for TSR route '%s", route) - } else if !tsr { - t.Errorf("expected TSR recommendation for route '%s'", route) - } - } - - noTsrRoutes := [...]string{ - "/", - "/no", - "/no/", - "/_", - "/_/", - "/api/world/abc", - } - for _, route := range noTsrRoutes { - handler, _, tsr := tree.getValue(route) - if handler != nil { - t.Fatalf("non-nil handler for No-TSR route '%s", route) - } else if tsr { - t.Errorf("expected no TSR recommendation for route '%s'", route) - } - } -} - -func TestTreeFindCaseInsensitivePath(t *testing.T) { - tree := &node{} - - routes := [...]string{ - "/hi", - "/b/", - "/ABC/", - "/search/:query", - "/cmd/:tool/", - "/src/*filepath", - "/x", - "/x/y", - "/y/", - "/y/z", - "/0/:id", - "/0/:id/1", - "/1/:id/", - "/1/:id/2", - "/aa", - "/a/", - "/doc", - "/doc/go_faq.html", - "/doc/go1.html", - "/doc/go/away", - "/no/a", - "/no/b", - } - - for _, route := range routes { - recv := catchPanic(func() { - tree.addRoute(route, fakeHandler(route)) - }) - if recv != nil { - t.Fatalf("panic inserting route '%s': %v", route, recv) - } - } - - // Check out == in for all registered routes - // With fixTrailingSlash = true - for _, route := range routes { - out, found := tree.findCaseInsensitivePath(route, true) - if !found { - t.Errorf("Route '%s' not found!", route) - } else if string(out) != route { - t.Errorf("Wrong result for route '%s': %s", route, string(out)) - } - } - // With fixTrailingSlash = false - for _, route := range routes { - out, found := tree.findCaseInsensitivePath(route, false) - if !found { - t.Errorf("Route '%s' not found!", route) - } else if string(out) != route { - t.Errorf("Wrong result for route '%s': %s", route, string(out)) - } - } - - tests := []struct { - in string - out string - found bool - slash bool - }{ - {"/HI", "/hi", true, false}, - {"/HI/", "/hi", true, true}, - {"/B", "/b/", true, true}, - {"/B/", "/b/", true, false}, - {"/abc", "/ABC/", true, true}, - {"/abc/", "/ABC/", true, false}, - {"/aBc", "/ABC/", true, true}, - {"/aBc/", "/ABC/", true, false}, - {"/abC", "/ABC/", true, true}, - {"/abC/", "/ABC/", true, false}, - {"/SEARCH/QUERY", "/search/QUERY", true, false}, - {"/SEARCH/QUERY/", "/search/QUERY", true, true}, - {"/CMD/TOOL/", "/cmd/TOOL/", true, false}, - {"/CMD/TOOL", "/cmd/TOOL/", true, true}, - {"/SRC/FILE/PATH", "/src/FILE/PATH", true, false}, - {"/x/Y", "/x/y", true, false}, - {"/x/Y/", "/x/y", true, true}, - {"/X/y", "/x/y", true, false}, - {"/X/y/", "/x/y", true, true}, - {"/X/Y", "/x/y", true, false}, - {"/X/Y/", "/x/y", true, true}, - {"/Y/", "/y/", true, false}, - {"/Y", "/y/", true, true}, - {"/Y/z", "/y/z", true, false}, - {"/Y/z/", "/y/z", true, true}, - {"/Y/Z", "/y/z", true, false}, - {"/Y/Z/", "/y/z", true, true}, - {"/y/Z", "/y/z", true, false}, - {"/y/Z/", "/y/z", true, true}, - {"/Aa", "/aa", true, false}, - {"/Aa/", "/aa", true, true}, - {"/AA", "/aa", true, false}, - {"/AA/", "/aa", true, true}, - {"/aA", "/aa", true, false}, - {"/aA/", "/aa", true, true}, - {"/A/", "/a/", true, false}, - {"/A", "/a/", true, true}, - {"/DOC", "/doc", true, false}, - {"/DOC/", "/doc", true, true}, - {"/NO", "", false, true}, - {"/DOC/GO", "", false, true}, - } - // With fixTrailingSlash = true - for _, test := range tests { - out, found := tree.findCaseInsensitivePath(test.in, true) - if found != test.found || (found && (string(out) != test.out)) { - t.Errorf("Wrong result for '%s': got %s, %t; want %s, %t", - test.in, string(out), found, test.out, test.found) - return - } - } - // With fixTrailingSlash = false - for _, test := range tests { - out, found := tree.findCaseInsensitivePath(test.in, false) - if test.slash { - if found { // test needs a trailingSlash fix. It must not be found! - t.Errorf("Found without fixTrailingSlash: %s; got %s", test.in, string(out)) - } - } else { - if found != test.found || (found && (string(out) != test.out)) { - t.Errorf("Wrong result for '%s': got %s, %t; want %s, %t", - test.in, string(out), found, test.out, test.found) - return - } - } - } -} - -func TestTreeInvalidNodeType(t *testing.T) { - const panicMsg = "invalid node type" - - tree := &node{} - tree.addRoute("/", fakeHandler("/")) - tree.addRoute("/:page", fakeHandler("/:page")) - - // set invalid node type - tree.children[0].nType = 42 - - // normal lookup - recv := catchPanic(func() { - tree.getValue("/test") - }) - if rs, ok := recv.(string); !ok || rs != panicMsg { - t.Fatalf("Expected panic '"+panicMsg+"', got '%v'", recv) - } - - // case-insensitive lookup - recv = catchPanic(func() { - tree.findCaseInsensitivePath("/test", true) - }) - if rs, ok := recv.(string); !ok || rs != panicMsg { - t.Fatalf("Expected panic '"+panicMsg+"', got '%v'", recv) - } -} diff --git a/Godeps/_workspace/src/github.com/matttproud/golang_protobuf_extensions/LICENSE b/Godeps/_workspace/src/github.com/matttproud/golang_protobuf_extensions/LICENSE deleted file mode 100644 index 13f15dfce0..0000000000 --- a/Godeps/_workspace/src/github.com/matttproud/golang_protobuf_extensions/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright 2013 Matt T. Proud - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/Godeps/_workspace/src/github.com/matttproud/golang_protobuf_extensions/pbutil/all_test.go b/Godeps/_workspace/src/github.com/matttproud/golang_protobuf_extensions/pbutil/all_test.go deleted file mode 100644 index 094156e66c..0000000000 --- a/Godeps/_workspace/src/github.com/matttproud/golang_protobuf_extensions/pbutil/all_test.go +++ /dev/null @@ -1,320 +0,0 @@ -// Copyright 2013 Matt T. Proud -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package pbutil - -import ( - "bytes" - "math/rand" - "reflect" - "testing" - "testing/quick" - - "github.com/matttproud/golang_protobuf_extensions/pbtest" - - . "github.com/golang/protobuf/proto" - . "github.com/golang/protobuf/proto/testdata" -) - -func TestWriteDelimited(t *testing.T) { - for _, test := range []struct { - msg Message - buf []byte - n int - err error - }{ - { - msg: &Empty{}, - n: 1, - buf: []byte{0}, - }, - { - msg: &GoEnum{Foo: FOO_FOO1.Enum()}, - n: 3, - buf: []byte{2, 8, 1}, - }, - { - msg: &Strings{ - StringField: String(`This is my gigantic, unhappy string. It exceeds -the encoding size of a single byte varint. We are using it to fuzz test the -correctness of the header decoding mechanisms, which may prove problematic. -I expect it may. Let's hope you enjoy testing as much as we do.`), - }, - n: 271, - buf: []byte{141, 2, 10, 138, 2, 84, 104, 105, 115, 32, 105, 115, 32, 109, - 121, 32, 103, 105, 103, 97, 110, 116, 105, 99, 44, 32, 117, 110, 104, - 97, 112, 112, 121, 32, 115, 116, 114, 105, 110, 103, 46, 32, 32, 73, - 116, 32, 101, 120, 99, 101, 101, 100, 115, 10, 116, 104, 101, 32, 101, - 110, 99, 111, 100, 105, 110, 103, 32, 115, 105, 122, 101, 32, 111, 102, - 32, 97, 32, 115, 105, 110, 103, 108, 101, 32, 98, 121, 116, 101, 32, - 118, 97, 114, 105, 110, 116, 46, 32, 32, 87, 101, 32, 97, 114, 101, 32, - 117, 115, 105, 110, 103, 32, 105, 116, 32, 116, 111, 32, 102, 117, 122, - 122, 32, 116, 101, 115, 116, 32, 116, 104, 101, 10, 99, 111, 114, 114, - 101, 99, 116, 110, 101, 115, 115, 32, 111, 102, 32, 116, 104, 101, 32, - 104, 101, 97, 100, 101, 114, 32, 100, 101, 99, 111, 100, 105, 110, 103, - 32, 109, 101, 99, 104, 97, 110, 105, 115, 109, 115, 44, 32, 119, 104, - 105, 99, 104, 32, 109, 97, 121, 32, 112, 114, 111, 118, 101, 32, 112, - 114, 111, 98, 108, 101, 109, 97, 116, 105, 99, 46, 10, 73, 32, 101, 120, - 112, 101, 99, 116, 32, 105, 116, 32, 109, 97, 121, 46, 32, 32, 76, 101, - 116, 39, 115, 32, 104, 111, 112, 101, 32, 121, 111, 117, 32, 101, 110, - 106, 111, 121, 32, 116, 101, 115, 116, 105, 110, 103, 32, 97, 115, 32, - 109, 117, 99, 104, 32, 97, 115, 32, 119, 101, 32, 100, 111, 46}, - }, - } { - var buf bytes.Buffer - if n, err := WriteDelimited(&buf, test.msg); n != test.n || err != test.err { - t.Fatalf("WriteDelimited(buf, %#v) = %v, %v; want %v, %v", test.msg, n, err, test.n, test.err) - } - if out := buf.Bytes(); !bytes.Equal(out, test.buf) { - t.Fatalf("WriteDelimited(buf, %#v); buf = %v; want %v", test.msg, out, test.buf) - } - } -} - -func TestReadDelimited(t *testing.T) { - for _, test := range []struct { - buf []byte - msg Message - n int - err error - }{ - { - buf: []byte{0}, - msg: &Empty{}, - n: 1, - }, - { - n: 3, - buf: []byte{2, 8, 1}, - msg: &GoEnum{Foo: FOO_FOO1.Enum()}, - }, - { - buf: []byte{141, 2, 10, 138, 2, 84, 104, 105, 115, 32, 105, 115, 32, 109, - 121, 32, 103, 105, 103, 97, 110, 116, 105, 99, 44, 32, 117, 110, 104, - 97, 112, 112, 121, 32, 115, 116, 114, 105, 110, 103, 46, 32, 32, 73, - 116, 32, 101, 120, 99, 101, 101, 100, 115, 10, 116, 104, 101, 32, 101, - 110, 99, 111, 100, 105, 110, 103, 32, 115, 105, 122, 101, 32, 111, 102, - 32, 97, 32, 115, 105, 110, 103, 108, 101, 32, 98, 121, 116, 101, 32, - 118, 97, 114, 105, 110, 116, 46, 32, 32, 87, 101, 32, 97, 114, 101, 32, - 117, 115, 105, 110, 103, 32, 105, 116, 32, 116, 111, 32, 102, 117, 122, - 122, 32, 116, 101, 115, 116, 32, 116, 104, 101, 10, 99, 111, 114, 114, - 101, 99, 116, 110, 101, 115, 115, 32, 111, 102, 32, 116, 104, 101, 32, - 104, 101, 97, 100, 101, 114, 32, 100, 101, 99, 111, 100, 105, 110, 103, - 32, 109, 101, 99, 104, 97, 110, 105, 115, 109, 115, 44, 32, 119, 104, - 105, 99, 104, 32, 109, 97, 121, 32, 112, 114, 111, 118, 101, 32, 112, - 114, 111, 98, 108, 101, 109, 97, 116, 105, 99, 46, 10, 73, 32, 101, 120, - 112, 101, 99, 116, 32, 105, 116, 32, 109, 97, 121, 46, 32, 32, 76, 101, - 116, 39, 115, 32, 104, 111, 112, 101, 32, 121, 111, 117, 32, 101, 110, - 106, 111, 121, 32, 116, 101, 115, 116, 105, 110, 103, 32, 97, 115, 32, - 109, 117, 99, 104, 32, 97, 115, 32, 119, 101, 32, 100, 111, 46}, - msg: &Strings{ - StringField: String(`This is my gigantic, unhappy string. It exceeds -the encoding size of a single byte varint. We are using it to fuzz test the -correctness of the header decoding mechanisms, which may prove problematic. -I expect it may. Let's hope you enjoy testing as much as we do.`), - }, - n: 271, - }, - } { - msg := Clone(test.msg) - msg.Reset() - if n, err := ReadDelimited(bytes.NewBuffer(test.buf), msg); n != test.n || err != test.err { - t.Fatalf("ReadDelimited(%v, msg) = %v, %v; want %v, %v", test.buf, n, err, test.n, test.err) - } - if !Equal(msg, test.msg) { - t.Fatalf("ReadDelimited(%v, msg); msg = %v; want %v", test.buf, msg, test.msg) - } - } -} - -func TestEndToEndValid(t *testing.T) { - for _, test := range [][]Message{ - {&Empty{}}, - {&GoEnum{Foo: FOO_FOO1.Enum()}, &Empty{}, &GoEnum{Foo: FOO_FOO1.Enum()}}, - {&GoEnum{Foo: FOO_FOO1.Enum()}}, - {&Strings{ - StringField: String(`This is my gigantic, unhappy string. It exceeds -the encoding size of a single byte varint. We are using it to fuzz test the -correctness of the header decoding mechanisms, which may prove problematic. -I expect it may. Let's hope you enjoy testing as much as we do.`), - }}, - } { - var buf bytes.Buffer - var written int - for i, msg := range test { - n, err := WriteDelimited(&buf, msg) - if err != nil { - // Assumption: TestReadDelimited and TestWriteDelimited are sufficient - // and inputs for this test are explicitly exercised there. - t.Fatalf("WriteDelimited(buf, %v[%d]) = ?, %v; wanted ?, nil", test, i, err) - } - written += n - } - var read int - for i, msg := range test { - out := Clone(msg) - out.Reset() - n, _ := ReadDelimited(&buf, out) - // Decide to do EOF checking? - read += n - if !Equal(out, msg) { - t.Fatalf("out = %v; want %v[%d] = %#v", out, test, i, msg) - } - } - if read != written { - t.Fatalf("%v read = %d; want %d", test, read, written) - } - } -} - -// rndMessage generates a random valid Protocol Buffer message. -func rndMessage(r *rand.Rand) Message { - var t reflect.Type - switch v := rand.Intn(23); v { - // TODO(br): Uncomment the elements below once fix is incorporated, except - // for the elements marked as patently incompatible. - // case 0: - // t = reflect.TypeOf(&GoEnum{}) - // break - // case 1: - // t = reflect.TypeOf(&GoTestField{}) - // break - case 2: - t = reflect.TypeOf(&GoTest{}) - break - // case 3: - // t = reflect.TypeOf(&GoSkipTest{}) - // break - // case 4: - // t = reflect.TypeOf(&NonPackedTest{}) - // break - // case 5: - // t = reflect.TypeOf(&PackedTest{}) - // break - case 6: - t = reflect.TypeOf(&MaxTag{}) - break - case 7: - t = reflect.TypeOf(&OldMessage{}) - break - case 8: - t = reflect.TypeOf(&NewMessage{}) - break - case 9: - t = reflect.TypeOf(&InnerMessage{}) - break - case 10: - t = reflect.TypeOf(&OtherMessage{}) - break - case 11: - // PATENTLY INVALID FOR FUZZ GENERATION - // t = reflect.TypeOf(&MyMessage{}) - break - // case 12: - // t = reflect.TypeOf(&Ext{}) - // break - case 13: - // PATENTLY INVALID FOR FUZZ GENERATION - // t = reflect.TypeOf(&MyMessageSet{}) - break - // case 14: - // t = reflect.TypeOf(&Empty{}) - // break - // case 15: - // t = reflect.TypeOf(&MessageList{}) - // break - // case 16: - // t = reflect.TypeOf(&Strings{}) - // break - // case 17: - // t = reflect.TypeOf(&Defaults{}) - // break - // case 17: - // t = reflect.TypeOf(&SubDefaults{}) - // break - // case 18: - // t = reflect.TypeOf(&RepeatedEnum{}) - // break - case 19: - t = reflect.TypeOf(&MoreRepeated{}) - break - // case 20: - // t = reflect.TypeOf(&GroupOld{}) - // break - // case 21: - // t = reflect.TypeOf(&GroupNew{}) - // break - case 22: - t = reflect.TypeOf(&FloatingPoint{}) - break - default: - // TODO(br): Replace with an unreachable once fixed. - t = reflect.TypeOf(&GoTest{}) - break - } - if t == nil { - t = reflect.TypeOf(&GoTest{}) - } - v, ok := quick.Value(t, r) - if !ok { - panic("attempt to generate illegal item; consult item 11") - } - if err := pbtest.SanitizeGenerated(v.Interface().(Message)); err != nil { - panic(err) - } - return v.Interface().(Message) -} - -// rndMessages generates several random Protocol Buffer messages. -func rndMessages(r *rand.Rand) []Message { - n := r.Intn(128) - out := make([]Message, 0, n) - for i := 0; i < n; i++ { - out = append(out, rndMessage(r)) - } - return out -} - -func TestFuzz(t *testing.T) { - rnd := rand.New(rand.NewSource(42)) - check := func() bool { - messages := rndMessages(rnd) - var buf bytes.Buffer - var written int - for i, msg := range messages { - n, err := WriteDelimited(&buf, msg) - if err != nil { - t.Fatalf("WriteDelimited(buf, %v[%d]) = ?, %v; wanted ?, nil", messages, i, err) - } - written += n - } - var read int - for i, msg := range messages { - out := Clone(msg) - out.Reset() - n, _ := ReadDelimited(&buf, out) - read += n - if !Equal(out, msg) { - t.Fatalf("out = %v; want %v[%d] = %#v", out, messages, i, msg) - } - } - if read != written { - t.Fatalf("%v read = %d; want %d", messages, read, written) - } - return true - } - if err := quick.Check(check, nil); err != nil { - t.Fatal(err) - } -} diff --git a/Godeps/_workspace/src/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go b/Godeps/_workspace/src/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go deleted file mode 100644 index 66d9b5458f..0000000000 --- a/Godeps/_workspace/src/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go +++ /dev/null @@ -1,75 +0,0 @@ -// Copyright 2013 Matt T. Proud -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package pbutil - -import ( - "encoding/binary" - "errors" - "io" - - "github.com/golang/protobuf/proto" -) - -var errInvalidVarint = errors.New("invalid varint32 encountered") - -// ReadDelimited decodes a message from the provided length-delimited stream, -// where the length is encoded as 32-bit varint prefix to the message body. -// It returns the total number of bytes read and any applicable error. This is -// roughly equivalent to the companion Java API's -// MessageLite#parseDelimitedFrom. As per the reader contract, this function -// calls r.Read repeatedly as required until exactly one message including its -// prefix is read and decoded (or an error has occurred). The function never -// reads more bytes from the stream than required. The function never returns -// an error if a message has been read and decoded correctly, even if the end -// of the stream has been reached in doing so. In that case, any subsequent -// calls return (0, io.EOF). -func ReadDelimited(r io.Reader, m proto.Message) (n int, err error) { - // Per AbstractParser#parsePartialDelimitedFrom with - // CodedInputStream#readRawVarint32. - headerBuf := make([]byte, binary.MaxVarintLen32) - var bytesRead, varIntBytes int - var messageLength uint64 - for varIntBytes == 0 { // i.e. no varint has been decoded yet. - if bytesRead >= len(headerBuf) { - return bytesRead, errInvalidVarint - } - // We have to read byte by byte here to avoid reading more bytes - // than required. Each read byte is appended to what we have - // read before. - newBytesRead, err := r.Read(headerBuf[bytesRead : bytesRead+1]) - if newBytesRead == 0 { - if err != nil { - return bytesRead, err - } - // A Reader should not return (0, nil), but if it does, - // it should be treated as no-op (according to the - // Reader contract). So let's go on... - continue - } - bytesRead += newBytesRead - // Now present everything read so far to the varint decoder and - // see if a varint can be decoded already. - messageLength, varIntBytes = proto.DecodeVarint(headerBuf[:bytesRead]) - } - - messageBuf := make([]byte, messageLength) - newBytesRead, err := io.ReadFull(r, messageBuf) - bytesRead += newBytesRead - if err != nil { - return bytesRead, err - } - - return bytesRead, proto.Unmarshal(messageBuf, m) -} diff --git a/Godeps/_workspace/src/github.com/matttproud/golang_protobuf_extensions/pbutil/doc.go b/Godeps/_workspace/src/github.com/matttproud/golang_protobuf_extensions/pbutil/doc.go deleted file mode 100644 index c318385cbe..0000000000 --- a/Godeps/_workspace/src/github.com/matttproud/golang_protobuf_extensions/pbutil/doc.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2013 Matt T. Proud -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package pbutil provides record length-delimited Protocol Buffer streaming. -package pbutil diff --git a/Godeps/_workspace/src/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go b/Godeps/_workspace/src/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go deleted file mode 100644 index 4b76ea9a1d..0000000000 --- a/Godeps/_workspace/src/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright 2013 Matt T. Proud -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package pbutil - -import ( - "encoding/binary" - "io" - - "github.com/golang/protobuf/proto" -) - -// WriteDelimited encodes and dumps a message to the provided writer prefixed -// with a 32-bit varint indicating the length of the encoded message, producing -// a length-delimited record stream, which can be used to chain together -// encoded messages of the same type together in a file. It returns the total -// number of bytes written and any applicable error. This is roughly -// equivalent to the companion Java API's MessageLite#writeDelimitedTo. -func WriteDelimited(w io.Writer, m proto.Message) (n int, err error) { - buffer, err := proto.Marshal(m) - if err != nil { - return 0, err - } - - buf := make([]byte, binary.MaxVarintLen32) - encodedLength := binary.PutUvarint(buf, uint64(len(buffer))) - - sync, err := w.Write(buf[:encodedLength]) - if err != nil { - return sync, err - } - - n, err = w.Write(buffer) - return n + sync, err -} diff --git a/Godeps/_workspace/src/github.com/matttproud/golang_protobuf_extensions/pbutil/fixtures_test.go b/Godeps/_workspace/src/github.com/matttproud/golang_protobuf_extensions/pbutil/fixtures_test.go deleted file mode 100644 index d6d9b25594..0000000000 --- a/Godeps/_workspace/src/github.com/matttproud/golang_protobuf_extensions/pbutil/fixtures_test.go +++ /dev/null @@ -1,103 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// http://github.com/golang/protobuf/ -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package pbutil - -import ( - . "github.com/golang/protobuf/proto" - . "github.com/golang/protobuf/proto/testdata" -) - -// FROM https://github.com/golang/protobuf/blob/master/proto/all_test.go. - -func initGoTestField() *GoTestField { - f := new(GoTestField) - f.Label = String("label") - f.Type = String("type") - return f -} - -// These are all structurally equivalent but the tag numbers differ. -// (It's remarkable that required, optional, and repeated all have -// 8 letters.) -func initGoTest_RequiredGroup() *GoTest_RequiredGroup { - return &GoTest_RequiredGroup{ - RequiredField: String("required"), - } -} - -func initGoTest_OptionalGroup() *GoTest_OptionalGroup { - return &GoTest_OptionalGroup{ - RequiredField: String("optional"), - } -} - -func initGoTest_RepeatedGroup() *GoTest_RepeatedGroup { - return &GoTest_RepeatedGroup{ - RequiredField: String("repeated"), - } -} - -func initGoTest(setdefaults bool) *GoTest { - pb := new(GoTest) - if setdefaults { - pb.F_BoolDefaulted = Bool(Default_GoTest_F_BoolDefaulted) - pb.F_Int32Defaulted = Int32(Default_GoTest_F_Int32Defaulted) - pb.F_Int64Defaulted = Int64(Default_GoTest_F_Int64Defaulted) - pb.F_Fixed32Defaulted = Uint32(Default_GoTest_F_Fixed32Defaulted) - pb.F_Fixed64Defaulted = Uint64(Default_GoTest_F_Fixed64Defaulted) - pb.F_Uint32Defaulted = Uint32(Default_GoTest_F_Uint32Defaulted) - pb.F_Uint64Defaulted = Uint64(Default_GoTest_F_Uint64Defaulted) - pb.F_FloatDefaulted = Float32(Default_GoTest_F_FloatDefaulted) - pb.F_DoubleDefaulted = Float64(Default_GoTest_F_DoubleDefaulted) - pb.F_StringDefaulted = String(Default_GoTest_F_StringDefaulted) - pb.F_BytesDefaulted = Default_GoTest_F_BytesDefaulted - pb.F_Sint32Defaulted = Int32(Default_GoTest_F_Sint32Defaulted) - pb.F_Sint64Defaulted = Int64(Default_GoTest_F_Sint64Defaulted) - } - - pb.Kind = GoTest_TIME.Enum() - pb.RequiredField = initGoTestField() - pb.F_BoolRequired = Bool(true) - pb.F_Int32Required = Int32(3) - pb.F_Int64Required = Int64(6) - pb.F_Fixed32Required = Uint32(32) - pb.F_Fixed64Required = Uint64(64) - pb.F_Uint32Required = Uint32(3232) - pb.F_Uint64Required = Uint64(6464) - pb.F_FloatRequired = Float32(3232) - pb.F_DoubleRequired = Float64(6464) - pb.F_StringRequired = String("string") - pb.F_BytesRequired = []byte("bytes") - pb.F_Sint32Required = Int32(-32) - pb.F_Sint64Required = Int64(-64) - pb.Requiredgroup = initGoTest_RequiredGroup() - - return pb -} diff --git a/Godeps/_workspace/src/github.com/miekg/dns/.gitignore b/Godeps/_workspace/src/github.com/miekg/dns/.gitignore deleted file mode 100644 index 776cd950c2..0000000000 --- a/Godeps/_workspace/src/github.com/miekg/dns/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -*.6 -tags -test.out -a.out diff --git a/Godeps/_workspace/src/github.com/miekg/dns/.travis.yml b/Godeps/_workspace/src/github.com/miekg/dns/.travis.yml deleted file mode 100644 index f0a9d223c3..0000000000 --- a/Godeps/_workspace/src/github.com/miekg/dns/.travis.yml +++ /dev/null @@ -1,6 +0,0 @@ -language: go -go: - - 1.3 - - 1.4 -script: - - go test -short -bench=. diff --git a/Godeps/_workspace/src/github.com/miekg/dns/AUTHORS b/Godeps/_workspace/src/github.com/miekg/dns/AUTHORS deleted file mode 100644 index 1965683525..0000000000 --- a/Godeps/_workspace/src/github.com/miekg/dns/AUTHORS +++ /dev/null @@ -1 +0,0 @@ -Miek Gieben diff --git a/Godeps/_workspace/src/github.com/miekg/dns/CONTRIBUTORS b/Godeps/_workspace/src/github.com/miekg/dns/CONTRIBUTORS deleted file mode 100644 index f77e8a895f..0000000000 --- a/Godeps/_workspace/src/github.com/miekg/dns/CONTRIBUTORS +++ /dev/null @@ -1,9 +0,0 @@ -Alex A. Skinner -Andrew Tunnell-Jones -Ask Bjørn Hansen -Dave Cheney -Dusty Wilson -Marek Majkowski -Peter van Dijk -Omri Bahumi -Alex Sergeyev diff --git a/Godeps/_workspace/src/github.com/miekg/dns/COPYRIGHT b/Godeps/_workspace/src/github.com/miekg/dns/COPYRIGHT deleted file mode 100644 index 35702b10e8..0000000000 --- a/Godeps/_workspace/src/github.com/miekg/dns/COPYRIGHT +++ /dev/null @@ -1,9 +0,0 @@ -Copyright 2009 The Go Authors. All rights reserved. Use of this source code -is governed by a BSD-style license that can be found in the LICENSE file. -Extensions of the original work are copyright (c) 2011 Miek Gieben - -Copyright 2011 Miek Gieben. All rights reserved. Use of this source code is -governed by a BSD-style license that can be found in the LICENSE file. - -Copyright 2014 CloudFlare. All rights reserved. Use of this source code is -governed by a BSD-style license that can be found in the LICENSE file. diff --git a/Godeps/_workspace/src/github.com/miekg/dns/LICENSE b/Godeps/_workspace/src/github.com/miekg/dns/LICENSE deleted file mode 100644 index 5763fa7fe5..0000000000 --- a/Godeps/_workspace/src/github.com/miekg/dns/LICENSE +++ /dev/null @@ -1,32 +0,0 @@ -Extensions of the original work are copyright (c) 2011 Miek Gieben - -As this is fork of the official Go code the same license applies: - -Copyright (c) 2009 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - diff --git a/Godeps/_workspace/src/github.com/miekg/dns/README.md b/Godeps/_workspace/src/github.com/miekg/dns/README.md deleted file mode 100644 index 73d6270985..0000000000 --- a/Godeps/_workspace/src/github.com/miekg/dns/README.md +++ /dev/null @@ -1,144 +0,0 @@ -[![Build Status](https://travis-ci.org/miekg/dns.svg?branch=master)](https://travis-ci.org/miekg/dns) - -# Alternative (more granular) approach to a DNS library - -> Less is more. - -Complete and usable DNS library. All widely used Resource Records are -supported, including the DNSSEC types. It follows a lean and mean philosophy. -If there is stuff you should know as a DNS programmer there isn't a convenience -function for it. Server side and client side programming is supported, i.e. you -can build servers and resolvers with it. - -If you like this, you may also be interested in: - -* https://github.com/miekg/unbound -- Go wrapper for the Unbound resolver. - -# Goals - -* KISS; -* Fast; -* Small API, if its easy to code in Go, don't make a function for it. - -# Users - -A not-so-up-to-date-list-that-may-be-actually-current: - -* https://github.com/abh/geodns -* http://www.statdns.com/ -* http://www.dnsinspect.com/ -* https://github.com/chuangbo/jianbing-dictionary-dns -* http://www.dns-lg.com/ -* https://github.com/fcambus/rrda -* https://github.com/kenshinx/godns -* https://github.com/skynetservices/skydns -* https://github.com/DevelopersPL/godnsagent -* https://github.com/duedil-ltd/discodns -* https://github.com/StalkR/dns-reverse-proxy -* https://github.com/tianon/rawdns -* https://mesosphere.github.io/mesos-dns/ -* https://pulse.turbobytes.com/ - -Send pull request if you want to be listed here. - -# Features - -* UDP/TCP queries, IPv4 and IPv6; -* RFC 1035 zone file parsing ($INCLUDE, $ORIGIN, $TTL and $GENERATE (for all record types) are supported; -* Fast: - * Reply speed around ~ 80K qps (faster hardware results in more qps); - * Parsing RRs ~ 100K RR/s, that's 5M records in about 50 seconds; -* Server side programming (mimicking the net/http package); -* Client side programming; -* DNSSEC: signing, validating and key generation for DSA, RSA and ECDSA; -* EDNS0, NSID; -* AXFR/IXFR; -* TSIG, SIG(0); -* DNS name compression; -* Depends only on the standard library. - -Have fun! - -Miek Gieben - 2010-2012 - - -# Building - -Building is done with the `go` tool. If you have setup your GOPATH -correctly, the following should work: - - go get github.com/miekg/dns - go build github.com/miekg/dns - -## Examples - -A short "how to use the API" is at the beginning of doc.go (this also will show -when you call `godoc github.com/miekg/dns`). - -Example programs can be found in the `github.com/miekg/exdns` repository. - -## Supported RFCs - -*all of them* - -* 103{4,5} - DNS standard -* 1348 - NSAP record -* 1982 - Serial Arithmetic -* 1876 - LOC record -* 1995 - IXFR -* 1996 - DNS notify -* 2136 - DNS Update (dynamic updates) -* 2181 - RRset definition - there is no RRset type though, just []RR -* 2537 - RSAMD5 DNS keys -* 2065 - DNSSEC (updated in later RFCs) -* 2671 - EDNS record -* 2782 - SRV record -* 2845 - TSIG record -* 2915 - NAPTR record -* 2929 - DNS IANA Considerations -* 3110 - RSASHA1 DNS keys -* 3225 - DO bit (DNSSEC OK) -* 340{1,2,3} - NAPTR record -* 3445 - Limiting the scope of (DNS)KEY -* 3597 - Unknown RRs -* 4025 - IPSECKEY -* 403{3,4,5} - DNSSEC + validation functions -* 4255 - SSHFP record -* 4343 - Case insensitivity -* 4408 - SPF record -* 4509 - SHA256 Hash in DS -* 4592 - Wildcards in the DNS -* 4635 - HMAC SHA TSIG -* 4701 - DHCID -* 4892 - id.server -* 5001 - NSID -* 5155 - NSEC3 record -* 5205 - HIP record -* 5702 - SHA2 in the DNS -* 5936 - AXFR -* 5966 - TCP implementation recommendations -* 6605 - ECDSA -* 6725 - IANA Registry Update -* 6742 - ILNP DNS -* 6844 - CAA record -* 6891 - EDNS0 update -* 6895 - DNS IANA considerations -* 6975 - Algorithm Understanding in DNSSEC -* 7043 - EUI48/EUI64 records -* 7314 - DNS (EDNS) EXPIRE Option -* xxxx - URI record (draft) -* xxxx - EDNS0 DNS Update Lease (draft) - -## Loosely based upon - -* `ldns` -* `NSD` -* `Net::DNS` -* `GRONG` - -## TODO - -* privatekey.Precompute() when signing? -* Last remaining RRs: APL, ATMA, A6 and NXT and IPSECKEY; -* Missing in parsing: ISDN, UNSPEC, ATMA; -* NSEC(3) cover/match/closest enclose; -* Replies with TC bit are not parsed to the end; diff --git a/Godeps/_workspace/src/github.com/miekg/dns/client.go b/Godeps/_workspace/src/github.com/miekg/dns/client.go deleted file mode 100644 index 140a7add7b..0000000000 --- a/Godeps/_workspace/src/github.com/miekg/dns/client.go +++ /dev/null @@ -1,378 +0,0 @@ -package dns - -// A client implementation. - -import ( - "bytes" - "io" - "net" - "time" -) - -const dnsTimeout time.Duration = 2 * time.Second -const tcpIdleTimeout time.Duration = 8 * time.Second - -// A Conn represents a connection to a DNS server. -type Conn struct { - net.Conn // a net.Conn holding the connection - UDPSize uint16 // minimum receive buffer for UDP messages - TsigSecret map[string]string // secret(s) for Tsig map[], zonename must be fully qualified - rtt time.Duration - t time.Time - tsigRequestMAC string -} - -// A Client defines parameters for a DNS client. -type Client struct { - Net string // if "tcp" a TCP query will be initiated, otherwise an UDP one (default is "" for UDP) - UDPSize uint16 // minimum receive buffer for UDP messages - DialTimeout time.Duration // net.DialTimeout, defaults to 2 seconds - ReadTimeout time.Duration // net.Conn.SetReadTimeout value for connections, defaults to 2 seconds - WriteTimeout time.Duration // net.Conn.SetWriteTimeout value for connections, defaults to 2 seconds - TsigSecret map[string]string // secret(s) for Tsig map[], zonename must be fully qualified - SingleInflight bool // if true suppress multiple outstanding queries for the same Qname, Qtype and Qclass - group singleflight -} - -// Exchange performs a synchronous UDP query. It sends the message m to the address -// contained in a and waits for an reply. Exchange does not retry a failed query, nor -// will it fall back to TCP in case of truncation. -// If you need to send a DNS message on an already existing connection, you can use the -// following: -// -// co := &dns.Conn{Conn: c} // c is your net.Conn -// co.WriteMsg(m) -// in, err := co.ReadMsg() -// co.Close() -// -func Exchange(m *Msg, a string) (r *Msg, err error) { - var co *Conn - co, err = DialTimeout("udp", a, dnsTimeout) - if err != nil { - return nil, err - } - - defer co.Close() - co.SetReadDeadline(time.Now().Add(dnsTimeout)) - co.SetWriteDeadline(time.Now().Add(dnsTimeout)) - - opt := m.IsEdns0() - // If EDNS0 is used use that for size. - if opt != nil && opt.UDPSize() >= MinMsgSize { - co.UDPSize = opt.UDPSize() - } - - if err = co.WriteMsg(m); err != nil { - return nil, err - } - r, err = co.ReadMsg() - if err == nil && r.Id != m.Id { - err = ErrId - } - return r, err -} - -// ExchangeConn performs a synchronous query. It sends the message m via the connection -// c and waits for a reply. The connection c is not closed by ExchangeConn. -// This function is going away, but can easily be mimicked: -// -// co := &dns.Conn{Conn: c} // c is your net.Conn -// co.WriteMsg(m) -// in, _ := co.ReadMsg() -// co.Close() -// -func ExchangeConn(c net.Conn, m *Msg) (r *Msg, err error) { - println("dns: this function is deprecated") - co := new(Conn) - co.Conn = c - if err = co.WriteMsg(m); err != nil { - return nil, err - } - r, err = co.ReadMsg() - if err == nil && r.Id != m.Id { - err = ErrId - } - return r, err -} - -// Exchange performs an synchronous query. It sends the message m to the address -// contained in a and waits for an reply. Basic use pattern with a *dns.Client: -// -// c := new(dns.Client) -// in, rtt, err := c.Exchange(message, "127.0.0.1:53") -// -// Exchange does not retry a failed query, nor will it fall back to TCP in -// case of truncation. -func (c *Client) Exchange(m *Msg, a string) (r *Msg, rtt time.Duration, err error) { - if !c.SingleInflight { - return c.exchange(m, a) - } - // This adds a bunch of garbage, TODO(miek). - t := "nop" - if t1, ok := TypeToString[m.Question[0].Qtype]; ok { - t = t1 - } - cl := "nop" - if cl1, ok := ClassToString[m.Question[0].Qclass]; ok { - cl = cl1 - } - r, rtt, err, shared := c.group.Do(m.Question[0].Name+t+cl, func() (*Msg, time.Duration, error) { - return c.exchange(m, a) - }) - if err != nil { - return r, rtt, err - } - if shared { - return r.Copy(), rtt, nil - } - return r, rtt, nil -} - -func (c *Client) dialTimeout() time.Duration { - if c.DialTimeout != 0 { - return c.DialTimeout - } - return dnsTimeout -} - -func (c *Client) readTimeout() time.Duration { - if c.ReadTimeout != 0 { - return c.ReadTimeout - } - return dnsTimeout -} - -func (c *Client) writeTimeout() time.Duration { - if c.WriteTimeout != 0 { - return c.WriteTimeout - } - return dnsTimeout -} - -func (c *Client) exchange(m *Msg, a string) (r *Msg, rtt time.Duration, err error) { - var co *Conn - if c.Net == "" { - co, err = DialTimeout("udp", a, c.dialTimeout()) - } else { - co, err = DialTimeout(c.Net, a, c.dialTimeout()) - } - if err != nil { - return nil, 0, err - } - defer co.Close() - - opt := m.IsEdns0() - // If EDNS0 is used use that for size. - if opt != nil && opt.UDPSize() >= MinMsgSize { - co.UDPSize = opt.UDPSize() - } - // Otherwise use the client's configured UDP size. - if opt == nil && c.UDPSize >= MinMsgSize { - co.UDPSize = c.UDPSize - } - - co.SetReadDeadline(time.Now().Add(c.readTimeout())) - co.SetWriteDeadline(time.Now().Add(c.writeTimeout())) - - co.TsigSecret = c.TsigSecret - if err = co.WriteMsg(m); err != nil { - return nil, 0, err - } - r, err = co.ReadMsg() - if err == nil && r.Id != m.Id { - err = ErrId - } - return r, co.rtt, err -} - -// ReadMsg reads a message from the connection co. -// If the received message contains a TSIG record the transaction -// signature is verified. -func (co *Conn) ReadMsg() (*Msg, error) { - p, err := co.ReadMsgHeader(nil) - if err != nil { - return nil, err - } - - m := new(Msg) - if err := m.Unpack(p); err != nil { - return nil, err - } - if t := m.IsTsig(); t != nil { - if _, ok := co.TsigSecret[t.Hdr.Name]; !ok { - return m, ErrSecret - } - // Need to work on the original message p, as that was used to calculate the tsig. - err = TsigVerify(p, co.TsigSecret[t.Hdr.Name], co.tsigRequestMAC, false) - } - return m, err -} - -// ReadMsgHeader reads a DNS message, parses and populates hdr (when hdr is not nil). -// Returns message as a byte slice to be parsed with Msg.Unpack later on. -// Note that error handling on the message body is not possible as only the header is parsed. -func (co *Conn) ReadMsgHeader(hdr *Header) ([]byte, error) { - var ( - p []byte - n int - err error - ) - - if t, ok := co.Conn.(*net.TCPConn); ok { - // First two bytes specify the length of the entire message. - l, err := tcpMsgLen(t) - if err != nil { - return nil, err - } - p = make([]byte, l) - n, err = tcpRead(t, p) - } else { - if co.UDPSize > MinMsgSize { - p = make([]byte, co.UDPSize) - } else { - p = make([]byte, MinMsgSize) - } - n, err = co.Read(p) - } - - if err != nil { - return nil, err - } else if n < headerSize { - return nil, ErrShortRead - } - - p = p[:n] - if hdr != nil { - if _, err = UnpackStruct(hdr, p, 0); err != nil { - return nil, err - } - } - return p, err -} - -// tcpMsgLen is a helper func to read first two bytes of stream as uint16 packet length. -func tcpMsgLen(t *net.TCPConn) (int, error) { - p := []byte{0, 0} - n, err := t.Read(p) - if err != nil { - return 0, err - } - if n != 2 { - return 0, ErrShortRead - } - l, _ := unpackUint16(p, 0) - if l == 0 { - return 0, ErrShortRead - } - return int(l), nil -} - -// tcpRead calls TCPConn.Read enough times to fill allocated buffer. -func tcpRead(t *net.TCPConn, p []byte) (int, error) { - n, err := t.Read(p) - if err != nil { - return n, err - } - for n < len(p) { - j, err := t.Read(p[n:]) - if err != nil { - return n, err - } - n += j - } - return n, err -} - -// Read implements the net.Conn read method. -func (co *Conn) Read(p []byte) (n int, err error) { - if co.Conn == nil { - return 0, ErrConnEmpty - } - if len(p) < 2 { - return 0, io.ErrShortBuffer - } - if t, ok := co.Conn.(*net.TCPConn); ok { - l, err := tcpMsgLen(t) - if err != nil { - return 0, err - } - if l > len(p) { - return int(l), io.ErrShortBuffer - } - return tcpRead(t, p[:l]) - } - // UDP connection - n, err = co.Conn.Read(p) - if err != nil { - return n, err - } - - co.rtt = time.Since(co.t) - return n, err -} - -// WriteMsg sends a message throught the connection co. -// If the message m contains a TSIG record the transaction -// signature is calculated. -func (co *Conn) WriteMsg(m *Msg) (err error) { - var out []byte - if t := m.IsTsig(); t != nil { - mac := "" - if _, ok := co.TsigSecret[t.Hdr.Name]; !ok { - return ErrSecret - } - out, mac, err = TsigGenerate(m, co.TsigSecret[t.Hdr.Name], co.tsigRequestMAC, false) - // Set for the next read, allthough only used in zone transfers - co.tsigRequestMAC = mac - } else { - out, err = m.Pack() - } - if err != nil { - return err - } - co.t = time.Now() - if _, err = co.Write(out); err != nil { - return err - } - return nil -} - -// Write implements the net.Conn Write method. -func (co *Conn) Write(p []byte) (n int, err error) { - if t, ok := co.Conn.(*net.TCPConn); ok { - lp := len(p) - if lp < 2 { - return 0, io.ErrShortBuffer - } - if lp > MaxMsgSize { - return 0, &Error{err: "message too large"} - } - l := make([]byte, 2, lp+2) - l[0], l[1] = packUint16(uint16(lp)) - p = append(l, p...) - n, err := io.Copy(t, bytes.NewReader(p)) - return int(n), err - } - n, err = co.Conn.(*net.UDPConn).Write(p) - return n, err -} - -// Dial connects to the address on the named network. -func Dial(network, address string) (conn *Conn, err error) { - conn = new(Conn) - conn.Conn, err = net.Dial(network, address) - if err != nil { - return nil, err - } - return conn, nil -} - -// DialTimeout acts like Dial but takes a timeout. -func DialTimeout(network, address string, timeout time.Duration) (conn *Conn, err error) { - conn = new(Conn) - conn.Conn, err = net.DialTimeout(network, address, timeout) - if err != nil { - return nil, err - } - return conn, nil -} diff --git a/Godeps/_workspace/src/github.com/miekg/dns/client_test.go b/Godeps/_workspace/src/github.com/miekg/dns/client_test.go deleted file mode 100644 index f2ade1485a..0000000000 --- a/Godeps/_workspace/src/github.com/miekg/dns/client_test.go +++ /dev/null @@ -1,286 +0,0 @@ -package dns - -import ( - "strconv" - "testing" - "time" -) - -func TestClientSync(t *testing.T) { - HandleFunc("miek.nl.", HelloServer) - defer HandleRemove("miek.nl.") - - s, addrstr, err := RunLocalUDPServer("127.0.0.1:0") - if err != nil { - t.Fatalf("Unable to run test server: %v", err) - } - defer s.Shutdown() - - m := new(Msg) - m.SetQuestion("miek.nl.", TypeSOA) - - c := new(Client) - r, _, err := c.Exchange(m, addrstr) - if err != nil { - t.Errorf("failed to exchange: %v", err) - } - if r != nil && r.Rcode != RcodeSuccess { - t.Errorf("failed to get an valid answer\n%v", r) - } - // And now with plain Exchange(). - r, err = Exchange(m, addrstr) - if err != nil { - t.Errorf("failed to exchange: %v", err) - } - if r == nil || r.Rcode != RcodeSuccess { - t.Errorf("failed to get an valid answer\n%v", r) - } -} - -func TestClientSyncBadId(t *testing.T) { - HandleFunc("miek.nl.", HelloServerBadId) - defer HandleRemove("miek.nl.") - - s, addrstr, err := RunLocalUDPServer("127.0.0.1:0") - if err != nil { - t.Fatalf("Unable to run test server: %v", err) - } - defer s.Shutdown() - - m := new(Msg) - m.SetQuestion("miek.nl.", TypeSOA) - - c := new(Client) - if _, _, err := c.Exchange(m, addrstr); err != ErrId { - t.Errorf("did not find a bad Id") - } - // And now with plain Exchange(). - if _, err := Exchange(m, addrstr); err != ErrId { - t.Errorf("did not find a bad Id") - } -} - -func TestClientEDNS0(t *testing.T) { - HandleFunc("miek.nl.", HelloServer) - defer HandleRemove("miek.nl.") - - s, addrstr, err := RunLocalUDPServer("127.0.0.1:0") - if err != nil { - t.Fatalf("Unable to run test server: %v", err) - } - defer s.Shutdown() - - m := new(Msg) - m.SetQuestion("miek.nl.", TypeDNSKEY) - - m.SetEdns0(2048, true) - - c := new(Client) - r, _, err := c.Exchange(m, addrstr) - if err != nil { - t.Errorf("failed to exchange: %v", err) - } - - if r != nil && r.Rcode != RcodeSuccess { - t.Errorf("failed to get an valid answer\n%v", r) - } -} - -// Validates the transmission and parsing of local EDNS0 options. -func TestClientEDNS0Local(t *testing.T) { - - optStr1 := "1979:0x0707" - optStr2 := strconv.Itoa(EDNS0LOCALSTART) + ":0x0601" - - handler := func(w ResponseWriter, req *Msg) { - m := new(Msg) - m.SetReply(req) - - m.Extra = make([]RR, 1, 2) - m.Extra[0] = &TXT{Hdr: RR_Header{Name: m.Question[0].Name, Rrtype: TypeTXT, Class: ClassINET, Ttl: 0}, Txt: []string{"Hello local edns"}} - - // If the local options are what we expect, then reflect them back. - ec1 := req.Extra[0].(*OPT).Option[0].(*EDNS0_LOCAL).String() - ec2 := req.Extra[0].(*OPT).Option[1].(*EDNS0_LOCAL).String() - if ec1 == optStr1 && ec2 == optStr2 { - m.Extra = append(m.Extra, req.Extra[0]) - } - - w.WriteMsg(m) - } - - HandleFunc("miek.nl.", handler) - defer HandleRemove("miek.nl.") - - s, addrstr, err := RunLocalUDPServer("127.0.0.1:0") - if err != nil { - t.Fatalf("Unable to run test server: %s", err) - } - defer s.Shutdown() - - m := new(Msg) - m.SetQuestion("miek.nl.", TypeTXT) - - // Add two local edns options to the query. - ec1 := &EDNS0_LOCAL{Code: 1979, Data: []byte{7, 7}} - ec2 := &EDNS0_LOCAL{Code: EDNS0LOCALSTART, Data: []byte{6, 1}} - o := &OPT{Hdr: RR_Header{Name: ".", Rrtype: TypeOPT}, Option: []EDNS0{ec1, ec2}} - m.Extra = append(m.Extra, o) - - c := new(Client) - r, _, e := c.Exchange(m, addrstr) - if e != nil { - t.Logf("failed to exchange: %s", e.Error()) - t.Fail() - } - - if r != nil && r.Rcode != RcodeSuccess { - t.Log("failed to get a valid answer") - t.Fail() - t.Logf("%v\n", r) - } - - txt := r.Extra[0].(*TXT).Txt[0] - if txt != "Hello local edns" { - t.Log("Unexpected result for miek.nl", txt, "!= Hello local edns") - t.Fail() - } - - // Validate the local options in the reply. - got := r.Extra[1].(*OPT).Option[0].(*EDNS0_LOCAL).String() - if got != optStr1 { - t.Log("failed to get local edns0 answer; got %s, expected %s", got, optStr1) - t.Fail() - t.Logf("%v\n", r) - } - - got = r.Extra[1].(*OPT).Option[1].(*EDNS0_LOCAL).String() - if got != optStr2 { - t.Log("failed to get local edns0 answer; got %s, expected %s", got, optStr2) - t.Fail() - t.Logf("%v\n", r) - } -} - -func TestSingleSingleInflight(t *testing.T) { - HandleFunc("miek.nl.", HelloServer) - defer HandleRemove("miek.nl.") - - s, addrstr, err := RunLocalUDPServer("127.0.0.1:0") - if err != nil { - t.Fatalf("Unable to run test server: %v", err) - } - defer s.Shutdown() - - m := new(Msg) - m.SetQuestion("miek.nl.", TypeDNSKEY) - - c := new(Client) - c.SingleInflight = true - nr := 10 - ch := make(chan time.Duration) - for i := 0; i < nr; i++ { - go func() { - _, rtt, _ := c.Exchange(m, addrstr) - ch <- rtt - }() - } - i := 0 - var first time.Duration - // With inflight *all* rtt are identical, and by doing actual lookups - // the changes that this is a coincidence is small. -Loop: - for { - select { - case rtt := <-ch: - if i == 0 { - first = rtt - } else { - if first != rtt { - t.Errorf("all rtts should be equal. got %d want %d", rtt, first) - } - } - i++ - if i == 10 { - break Loop - } - } - } -} - -// ExampleUpdateLeaseTSIG shows how to update a lease signed with TSIG. -func ExampleUpdateLeaseTSIG(t *testing.T) { - m := new(Msg) - m.SetUpdate("t.local.ip6.io.") - rr, _ := NewRR("t.local.ip6.io. 30 A 127.0.0.1") - rrs := make([]RR, 1) - rrs[0] = rr - m.Insert(rrs) - - leaseRr := new(OPT) - leaseRr.Hdr.Name = "." - leaseRr.Hdr.Rrtype = TypeOPT - e := new(EDNS0_UL) - e.Code = EDNS0UL - e.Lease = 120 - leaseRr.Option = append(leaseRr.Option, e) - m.Extra = append(m.Extra, leaseRr) - - c := new(Client) - m.SetTsig("polvi.", HmacMD5, 300, time.Now().Unix()) - c.TsigSecret = map[string]string{"polvi.": "pRZgBrBvI4NAHZYhxmhs/Q=="} - - _, _, err := c.Exchange(m, "127.0.0.1:53") - if err != nil { - t.Error(err) - } -} - -func TestClientConn(t *testing.T) { - HandleFunc("miek.nl.", HelloServer) - defer HandleRemove("miek.nl.") - - // This uses TCP just to make it slightly different than TestClientSync - s, addrstr, err := RunLocalTCPServer("127.0.0.1:0") - if err != nil { - t.Fatalf("Unable to run test server: %v", err) - } - defer s.Shutdown() - - m := new(Msg) - m.SetQuestion("miek.nl.", TypeSOA) - - cn, err := Dial("tcp", addrstr) - if err != nil { - t.Errorf("failed to dial %s: %v", addrstr, err) - } - - err = cn.WriteMsg(m) - if err != nil { - t.Errorf("failed to exchange: %v", err) - } - r, err := cn.ReadMsg() - if r == nil || r.Rcode != RcodeSuccess { - t.Errorf("failed to get an valid answer\n%v", r) - } - - err = cn.WriteMsg(m) - if err != nil { - t.Errorf("failed to exchange: %v", err) - } - h := new(Header) - buf, err := cn.ReadMsgHeader(h) - if buf == nil { - t.Errorf("failed to get an valid answer\n%v", r) - } - if int(h.Bits&0xF) != RcodeSuccess { - t.Errorf("failed to get an valid answer in ReadMsgHeader\n%v", r) - } - if h.Ancount != 0 || h.Qdcount != 1 || h.Nscount != 0 || h.Arcount != 1 { - t.Errorf("expected to have question and additional in response; got something else: %+v", h) - } - if err = r.Unpack(buf); err != nil { - t.Errorf("unable to unpack message fully: %v", err) - } - -} diff --git a/Godeps/_workspace/src/github.com/miekg/dns/clientconfig.go b/Godeps/_workspace/src/github.com/miekg/dns/clientconfig.go deleted file mode 100644 index cfa9ad0b22..0000000000 --- a/Godeps/_workspace/src/github.com/miekg/dns/clientconfig.go +++ /dev/null @@ -1,99 +0,0 @@ -package dns - -import ( - "bufio" - "os" - "strconv" - "strings" -) - -// ClientConfig wraps the contents of the /etc/resolv.conf file. -type ClientConfig struct { - Servers []string // servers to use - Search []string // suffixes to append to local name - Port string // what port to use - Ndots int // number of dots in name to trigger absolute lookup - Timeout int // seconds before giving up on packet - Attempts int // lost packets before giving up on server, not used in the package dns -} - -// ClientConfigFromFile parses a resolv.conf(5) like file and returns -// a *ClientConfig. -func ClientConfigFromFile(resolvconf string) (*ClientConfig, error) { - file, err := os.Open(resolvconf) - if err != nil { - return nil, err - } - defer file.Close() - c := new(ClientConfig) - scanner := bufio.NewScanner(file) - c.Servers = make([]string, 0) - c.Search = make([]string, 0) - c.Port = "53" - c.Ndots = 1 - c.Timeout = 5 - c.Attempts = 2 - - for scanner.Scan() { - if err := scanner.Err(); err != nil { - return nil, err - } - line := scanner.Text() - f := strings.Fields(line) - if len(f) < 1 { - continue - } - switch f[0] { - case "nameserver": // add one name server - if len(f) > 1 { - // One more check: make sure server name is - // just an IP address. Otherwise we need DNS - // to look it up. - name := f[1] - c.Servers = append(c.Servers, name) - } - - case "domain": // set search path to just this domain - if len(f) > 1 { - c.Search = make([]string, 1) - c.Search[0] = f[1] - } else { - c.Search = make([]string, 0) - } - - case "search": // set search path to given servers - c.Search = make([]string, len(f)-1) - for i := 0; i < len(c.Search); i++ { - c.Search[i] = f[i+1] - } - - case "options": // magic options - for i := 1; i < len(f); i++ { - s := f[i] - switch { - case len(s) >= 6 && s[:6] == "ndots:": - n, _ := strconv.Atoi(s[6:]) - if n < 1 { - n = 1 - } - c.Ndots = n - case len(s) >= 8 && s[:8] == "timeout:": - n, _ := strconv.Atoi(s[8:]) - if n < 1 { - n = 1 - } - c.Timeout = n - case len(s) >= 8 && s[:9] == "attempts:": - n, _ := strconv.Atoi(s[9:]) - if n < 1 { - n = 1 - } - c.Attempts = n - case s == "rotate": - /* not imp */ - } - } - } - } - return c, nil -} diff --git a/Godeps/_workspace/src/github.com/miekg/dns/clientconfig_test.go b/Godeps/_workspace/src/github.com/miekg/dns/clientconfig_test.go deleted file mode 100644 index f01a59be6a..0000000000 --- a/Godeps/_workspace/src/github.com/miekg/dns/clientconfig_test.go +++ /dev/null @@ -1,55 +0,0 @@ -package dns - -import ( - "io/ioutil" - "os" - "path/filepath" - "testing" -) - -const normal string = ` -# Comment -domain somedomain.com -nameserver 10.28.10.2 -nameserver 11.28.10.1 -` - -const missingNewline string = ` -domain somedomain.com -nameserver 10.28.10.2 -nameserver 11.28.10.1` // <- NOTE: NO newline. - -func testConfig(t *testing.T, data string) { - tempDir, err := ioutil.TempDir("", "") - if err != nil { - t.Fatalf("TempDir: %v", err) - } - defer os.RemoveAll(tempDir) - - path := filepath.Join(tempDir, "resolv.conf") - if err := ioutil.WriteFile(path, []byte(data), 0644); err != nil { - t.Fatalf("WriteFile: %v", err) - } - cc, err := ClientConfigFromFile(path) - if err != nil { - t.Errorf("error parsing resolv.conf: %v", err) - } - if l := len(cc.Servers); l != 2 { - t.Errorf("incorrect number of nameservers detected: %d", l) - } - if l := len(cc.Search); l != 1 { - t.Errorf("domain directive not parsed correctly: %v", cc.Search) - } else { - if cc.Search[0] != "somedomain.com" { - t.Errorf("domain is unexpected: %v", cc.Search[0]) - } - } -} - -func TestNameserver(t *testing.T) { - testConfig(t, normal) -} - -func TestMissingFinalNewLine(t *testing.T) { - testConfig(t, missingNewline) -} diff --git a/Godeps/_workspace/src/github.com/miekg/dns/defaults.go b/Godeps/_workspace/src/github.com/miekg/dns/defaults.go deleted file mode 100644 index 09331c9f60..0000000000 --- a/Godeps/_workspace/src/github.com/miekg/dns/defaults.go +++ /dev/null @@ -1,248 +0,0 @@ -package dns - -import ( - "errors" - "net" - "strconv" -) - -const hexDigit = "0123456789abcdef" - -// Everything is assumed in ClassINET. - -// SetReply creates a reply message from a request message. -func (dns *Msg) SetReply(request *Msg) *Msg { - dns.Id = request.Id - dns.RecursionDesired = request.RecursionDesired // Copy rd bit - dns.Response = true - dns.Opcode = OpcodeQuery - dns.Rcode = RcodeSuccess - if len(request.Question) > 0 { - dns.Question = make([]Question, 1) - dns.Question[0] = request.Question[0] - } - return dns -} - -// SetQuestion creates a question message, it sets the Question -// section, generates an Id and sets the RecursionDesired (RD) -// bit to true. -func (dns *Msg) SetQuestion(z string, t uint16) *Msg { - dns.Id = Id() - dns.RecursionDesired = true - dns.Question = make([]Question, 1) - dns.Question[0] = Question{z, t, ClassINET} - return dns -} - -// SetNotify creates a notify message, it sets the Question -// section, generates an Id and sets the Authoritative (AA) -// bit to true. -func (dns *Msg) SetNotify(z string) *Msg { - dns.Opcode = OpcodeNotify - dns.Authoritative = true - dns.Id = Id() - dns.Question = make([]Question, 1) - dns.Question[0] = Question{z, TypeSOA, ClassINET} - return dns -} - -// SetRcode creates an error message suitable for the request. -func (dns *Msg) SetRcode(request *Msg, rcode int) *Msg { - dns.SetReply(request) - dns.Rcode = rcode - return dns -} - -// SetRcodeFormatError creates a message with FormError set. -func (dns *Msg) SetRcodeFormatError(request *Msg) *Msg { - dns.Rcode = RcodeFormatError - dns.Opcode = OpcodeQuery - dns.Response = true - dns.Authoritative = false - dns.Id = request.Id - return dns -} - -// SetUpdate makes the message a dynamic update message. It -// sets the ZONE section to: z, TypeSOA, ClassINET. -func (dns *Msg) SetUpdate(z string) *Msg { - dns.Id = Id() - dns.Response = false - dns.Opcode = OpcodeUpdate - dns.Compress = false // BIND9 cannot handle compression - dns.Question = make([]Question, 1) - dns.Question[0] = Question{z, TypeSOA, ClassINET} - return dns -} - -// SetIxfr creates message for requesting an IXFR. -func (dns *Msg) SetIxfr(z string, serial uint32, ns, mbox string) *Msg { - dns.Id = Id() - dns.Question = make([]Question, 1) - dns.Ns = make([]RR, 1) - s := new(SOA) - s.Hdr = RR_Header{z, TypeSOA, ClassINET, defaultTtl, 0} - s.Serial = serial - s.Ns = ns - s.Mbox = mbox - dns.Question[0] = Question{z, TypeIXFR, ClassINET} - dns.Ns[0] = s - return dns -} - -// SetAxfr creates message for requesting an AXFR. -func (dns *Msg) SetAxfr(z string) *Msg { - dns.Id = Id() - dns.Question = make([]Question, 1) - dns.Question[0] = Question{z, TypeAXFR, ClassINET} - return dns -} - -// SetTsig appends a TSIG RR to the message. -// This is only a skeleton TSIG RR that is added as the last RR in the -// additional section. The Tsig is calculated when the message is being send. -func (dns *Msg) SetTsig(z, algo string, fudge, timesigned int64) *Msg { - t := new(TSIG) - t.Hdr = RR_Header{z, TypeTSIG, ClassANY, 0, 0} - t.Algorithm = algo - t.Fudge = 300 - t.TimeSigned = uint64(timesigned) - t.OrigId = dns.Id - dns.Extra = append(dns.Extra, t) - return dns -} - -// SetEdns0 appends a EDNS0 OPT RR to the message. -// TSIG should always the last RR in a message. -func (dns *Msg) SetEdns0(udpsize uint16, do bool) *Msg { - e := new(OPT) - e.Hdr.Name = "." - e.Hdr.Rrtype = TypeOPT - e.SetUDPSize(udpsize) - if do { - e.SetDo() - } - dns.Extra = append(dns.Extra, e) - return dns -} - -// IsTsig checks if the message has a TSIG record as the last record -// in the additional section. It returns the TSIG record found or nil. -func (dns *Msg) IsTsig() *TSIG { - if len(dns.Extra) > 0 { - if dns.Extra[len(dns.Extra)-1].Header().Rrtype == TypeTSIG { - return dns.Extra[len(dns.Extra)-1].(*TSIG) - } - } - return nil -} - -// IsEdns0 checks if the message has a EDNS0 (OPT) record, any EDNS0 -// record in the additional section will do. It returns the OPT record -// found or nil. -func (dns *Msg) IsEdns0() *OPT { - for _, r := range dns.Extra { - if r.Header().Rrtype == TypeOPT { - return r.(*OPT) - } - } - return nil -} - -// IsDomainName checks if s is a valid domainname, it returns -// the number of labels and true, when a domain name is valid. -// Note that non fully qualified domain name is considered valid, in this case the -// last label is counted in the number of labels. -// When false is returned the number of labels is not defined. -func IsDomainName(s string) (labels int, ok bool) { - _, labels, err := packDomainName(s, nil, 0, nil, false) - return labels, err == nil -} - -// IsSubDomain checks if child is indeed a child of the parent. Both child and -// parent are *not* downcased before doing the comparison. -func IsSubDomain(parent, child string) bool { - // Entire child is contained in parent - return CompareDomainName(parent, child) == CountLabel(parent) -} - -// IsMsg sanity checks buf and returns an error if it isn't a valid DNS packet. -// The checking is performed on the binary payload. -func IsMsg(buf []byte) error { - // Header - if len(buf) < 12 { - return errors.New("dns: bad message header") - } - // Header: Opcode - // TODO(miek): more checks here, e.g. check all header bits. - return nil -} - -// IsFqdn checks if a domain name is fully qualified. -func IsFqdn(s string) bool { - l := len(s) - if l == 0 { - return false - } - return s[l-1] == '.' -} - -// Fqdn return the fully qualified domain name from s. -// If s is already fully qualified, it behaves as the identity function. -func Fqdn(s string) string { - if IsFqdn(s) { - return s - } - return s + "." -} - -// Copied from the official Go code. - -// ReverseAddr returns the in-addr.arpa. or ip6.arpa. hostname of the IP -// address suitable for reverse DNS (PTR) record lookups or an error if it fails -// to parse the IP address. -func ReverseAddr(addr string) (arpa string, err error) { - ip := net.ParseIP(addr) - if ip == nil { - return "", &Error{err: "unrecognized address: " + addr} - } - if ip.To4() != nil { - return strconv.Itoa(int(ip[15])) + "." + strconv.Itoa(int(ip[14])) + "." + strconv.Itoa(int(ip[13])) + "." + - strconv.Itoa(int(ip[12])) + ".in-addr.arpa.", nil - } - // Must be IPv6 - buf := make([]byte, 0, len(ip)*4+len("ip6.arpa.")) - // Add it, in reverse, to the buffer - for i := len(ip) - 1; i >= 0; i-- { - v := ip[i] - buf = append(buf, hexDigit[v&0xF]) - buf = append(buf, '.') - buf = append(buf, hexDigit[v>>4]) - buf = append(buf, '.') - } - // Append "ip6.arpa." and return (buf already has the final .) - buf = append(buf, "ip6.arpa."...) - return string(buf), nil -} - -// String returns the string representation for the type t. -func (t Type) String() string { - if t1, ok := TypeToString[uint16(t)]; ok { - return t1 - } - return "TYPE" + strconv.Itoa(int(t)) -} - -// String returns the string representation for the class c. -func (c Class) String() string { - if c1, ok := ClassToString[uint16(c)]; ok { - return c1 - } - return "CLASS" + strconv.Itoa(int(c)) -} - -// String returns the string representation for the name n. -func (n Name) String() string { - return sprintName(string(n)) -} diff --git a/Godeps/_workspace/src/github.com/miekg/dns/dns.go b/Godeps/_workspace/src/github.com/miekg/dns/dns.go deleted file mode 100644 index 1c37a09dd2..0000000000 --- a/Godeps/_workspace/src/github.com/miekg/dns/dns.go +++ /dev/null @@ -1,102 +0,0 @@ -package dns - -import "strconv" - -const ( - year68 = 1 << 31 // For RFC1982 (Serial Arithmetic) calculations in 32 bits. - // DefaultMsgSize is the standard default for messages larger than 512 bytes. - DefaultMsgSize = 4096 - // MinMsgSize is the minimal size of a DNS packet. - MinMsgSize = 512 - // MaxMsgSize is the largest possible DNS packet. - MaxMsgSize = 65535 - defaultTtl = 3600 // Default internal TTL. -) - -// Error represents a DNS error -type Error struct{ err string } - -func (e *Error) Error() string { - if e == nil { - return "dns: " - } - return "dns: " + e.err -} - -// An RR represents a resource record. -type RR interface { - // Header returns the header of an resource record. The header contains - // everything up to the rdata. - Header() *RR_Header - // String returns the text representation of the resource record. - String() string - // copy returns a copy of the RR - copy() RR - // len returns the length (in octets) of the uncompressed RR in wire format. - len() int -} - -// DNS resource records. -// There are many types of RRs, -// but they all share the same header. -type RR_Header struct { - Name string `dns:"cdomain-name"` - Rrtype uint16 - Class uint16 - Ttl uint32 - Rdlength uint16 // length of data after header -} - -// Header returns itself. This is here to make RR_Header implement the RR interface. -func (h *RR_Header) Header() *RR_Header { return h } - -// Just to imlement the RR interface. -func (h *RR_Header) copy() RR { return nil } - -func (h *RR_Header) copyHeader() *RR_Header { - r := new(RR_Header) - r.Name = h.Name - r.Rrtype = h.Rrtype - r.Class = h.Class - r.Ttl = h.Ttl - r.Rdlength = h.Rdlength - return r -} - -func (h *RR_Header) String() string { - var s string - - if h.Rrtype == TypeOPT { - s = ";" - // and maybe other things - } - - s += sprintName(h.Name) + "\t" - s += strconv.FormatInt(int64(h.Ttl), 10) + "\t" - s += Class(h.Class).String() + "\t" - s += Type(h.Rrtype).String() + "\t" - return s -} - -func (h *RR_Header) len() int { - l := len(h.Name) + 1 - l += 10 // rrtype(2) + class(2) + ttl(4) + rdlength(2) - return l -} - -// ToRFC3597 converts a known RR to the unknown RR representation -// from RFC 3597. -func (rr *RFC3597) ToRFC3597(r RR) error { - buf := make([]byte, r.len()*2) - off, err := PackStruct(r, buf, 0) - if err != nil { - return err - } - buf = buf[:off] - rawSetRdlength(buf, 0, off) - _, err = UnpackStruct(rr, buf, 0) - if err != nil { - return err - } - return nil -} diff --git a/Godeps/_workspace/src/github.com/miekg/dns/dns_test.go b/Godeps/_workspace/src/github.com/miekg/dns/dns_test.go deleted file mode 100644 index 7cf810eac7..0000000000 --- a/Godeps/_workspace/src/github.com/miekg/dns/dns_test.go +++ /dev/null @@ -1,578 +0,0 @@ -package dns - -import ( - "encoding/hex" - "net" - "testing" -) - -func TestPackUnpack(t *testing.T) { - out := new(Msg) - out.Answer = make([]RR, 1) - key := new(DNSKEY) - key = &DNSKEY{Flags: 257, Protocol: 3, Algorithm: RSASHA1} - key.Hdr = RR_Header{Name: "miek.nl.", Rrtype: TypeDNSKEY, Class: ClassINET, Ttl: 3600} - key.PublicKey = "AwEAAaHIwpx3w4VHKi6i1LHnTaWeHCL154Jug0Rtc9ji5qwPXpBo6A5sRv7cSsPQKPIwxLpyCrbJ4mr2L0EPOdvP6z6YfljK2ZmTbogU9aSU2fiq/4wjxbdkLyoDVgtO+JsxNN4bjr4WcWhsmk1Hg93FV9ZpkWb0Tbad8DFqNDzr//kZ" - - out.Answer[0] = key - msg, err := out.Pack() - if err != nil { - t.Error("failed to pack msg with DNSKEY") - } - in := new(Msg) - if in.Unpack(msg) != nil { - t.Error("failed to unpack msg with DNSKEY") - } - - sig := new(RRSIG) - sig = &RRSIG{TypeCovered: TypeDNSKEY, Algorithm: RSASHA1, Labels: 2, - OrigTtl: 3600, Expiration: 4000, Inception: 4000, KeyTag: 34641, SignerName: "miek.nl.", - Signature: "AwEAAaHIwpx3w4VHKi6i1LHnTaWeHCL154Jug0Rtc9ji5qwPXpBo6A5sRv7cSsPQKPIwxLpyCrbJ4mr2L0EPOdvP6z6YfljK2ZmTbogU9aSU2fiq/4wjxbdkLyoDVgtO+JsxNN4bjr4WcWhsmk1Hg93FV9ZpkWb0Tbad8DFqNDzr//kZ"} - sig.Hdr = RR_Header{Name: "miek.nl.", Rrtype: TypeRRSIG, Class: ClassINET, Ttl: 3600} - - out.Answer[0] = sig - msg, err = out.Pack() - if err != nil { - t.Error("failed to pack msg with RRSIG") - } - - if in.Unpack(msg) != nil { - t.Error("failed to unpack msg with RRSIG") - } -} - -func TestPackUnpack2(t *testing.T) { - m := new(Msg) - m.Extra = make([]RR, 1) - m.Answer = make([]RR, 1) - dom := "miek.nl." - rr := new(A) - rr.Hdr = RR_Header{Name: dom, Rrtype: TypeA, Class: ClassINET, Ttl: 0} - rr.A = net.IPv4(127, 0, 0, 1) - - x := new(TXT) - x.Hdr = RR_Header{Name: dom, Rrtype: TypeTXT, Class: ClassINET, Ttl: 0} - x.Txt = []string{"heelalaollo"} - - m.Extra[0] = x - m.Answer[0] = rr - _, err := m.Pack() - if err != nil { - t.Error("Packing failed: ", err) - return - } -} - -func TestPackUnpack3(t *testing.T) { - m := new(Msg) - m.Extra = make([]RR, 2) - m.Answer = make([]RR, 1) - dom := "miek.nl." - rr := new(A) - rr.Hdr = RR_Header{Name: dom, Rrtype: TypeA, Class: ClassINET, Ttl: 0} - rr.A = net.IPv4(127, 0, 0, 1) - - x1 := new(TXT) - x1.Hdr = RR_Header{Name: dom, Rrtype: TypeTXT, Class: ClassINET, Ttl: 0} - x1.Txt = []string{} - - x2 := new(TXT) - x2.Hdr = RR_Header{Name: dom, Rrtype: TypeTXT, Class: ClassINET, Ttl: 0} - x2.Txt = []string{"heelalaollo"} - - m.Extra[0] = x1 - m.Extra[1] = x2 - m.Answer[0] = rr - b, err := m.Pack() - if err != nil { - t.Error("packing failed: ", err) - return - } - - var unpackMsg Msg - err = unpackMsg.Unpack(b) - if err != nil { - t.Error("unpacking failed") - return - } -} - -func TestBailiwick(t *testing.T) { - yes := map[string]string{ - "miek.nl": "ns.miek.nl", - ".": "miek.nl", - } - for parent, child := range yes { - if !IsSubDomain(parent, child) { - t.Errorf("%s should be child of %s", child, parent) - t.Errorf("comparelabels %d", CompareDomainName(parent, child)) - t.Errorf("lenlabels %d %d", CountLabel(parent), CountLabel(child)) - } - } - no := map[string]string{ - "www.miek.nl": "ns.miek.nl", - "m\\.iek.nl": "ns.miek.nl", - "w\\.iek.nl": "w.iek.nl", - "p\\\\.iek.nl": "ns.p.iek.nl", // p\\.iek.nl , literal \ in domain name - "miek.nl": ".", - } - for parent, child := range no { - if IsSubDomain(parent, child) { - t.Errorf("%s should not be child of %s", child, parent) - t.Errorf("comparelabels %d", CompareDomainName(parent, child)) - t.Errorf("lenlabels %d %d", CountLabel(parent), CountLabel(child)) - } - } -} - -func TestPack(t *testing.T) { - rr := []string{"US. 86400 IN NSEC 0-.us. NS SOA RRSIG NSEC DNSKEY TYPE65534"} - m := new(Msg) - var err error - m.Answer = make([]RR, 1) - for _, r := range rr { - m.Answer[0], err = NewRR(r) - if err != nil { - t.Errorf("failed to create RR: %v", err) - continue - } - if _, err := m.Pack(); err != nil { - t.Errorf("packing failed: %v", err) - } - } - x := new(Msg) - ns, _ := NewRR("pool.ntp.org. 390 IN NS a.ntpns.org") - ns.(*NS).Ns = "a.ntpns.org" - x.Ns = append(m.Ns, ns) - x.Ns = append(m.Ns, ns) - x.Ns = append(m.Ns, ns) - // This crashes due to the fact the a.ntpns.org isn't a FQDN - // How to recover() from a remove panic()? - if _, err := x.Pack(); err == nil { - t.Error("packing should fail") - } - x.Answer = make([]RR, 1) - x.Answer[0], err = NewRR(rr[0]) - if _, err := x.Pack(); err == nil { - t.Error("packing should fail") - } - x.Question = make([]Question, 1) - x.Question[0] = Question{";sd#edddds鍛↙赏‘℅∥↙xzztsestxssweewwsssstx@s@Z嵌e@cn.pool.ntp.org.", TypeA, ClassINET} - if _, err := x.Pack(); err == nil { - t.Error("packing should fail") - } -} - -func TestPackNAPTR(t *testing.T) { - for _, n := range []string{ - `apple.com. IN NAPTR 100 50 "se" "SIP+D2U" "" _sip._udp.apple.com.`, - `apple.com. IN NAPTR 90 50 "se" "SIP+D2T" "" _sip._tcp.apple.com.`, - `apple.com. IN NAPTR 50 50 "se" "SIPS+D2T" "" _sips._tcp.apple.com.`, - } { - rr, _ := NewRR(n) - msg := make([]byte, rr.len()) - if off, err := PackRR(rr, msg, 0, nil, false); err != nil { - t.Errorf("packing failed: %v", err) - t.Errorf("length %d, need more than %d", rr.len(), off) - } else { - t.Logf("buf size needed: %d", off) - } - } -} - -func TestCompressLength(t *testing.T) { - m := new(Msg) - m.SetQuestion("miek.nl", TypeMX) - ul := m.Len() - m.Compress = true - if ul != m.Len() { - t.Fatalf("should be equal") - } -} - -// Does the predicted length match final packed length? -func TestMsgCompressLength(t *testing.T) { - makeMsg := func(question string, ans, ns, e []RR) *Msg { - msg := new(Msg) - msg.SetQuestion(Fqdn(question), TypeANY) - msg.Answer = append(msg.Answer, ans...) - msg.Ns = append(msg.Ns, ns...) - msg.Extra = append(msg.Extra, e...) - msg.Compress = true - return msg - } - - name1 := "12345678901234567890123456789012345.12345678.123." - rrA, _ := NewRR(name1 + " 3600 IN A 192.0.2.1") - rrMx, _ := NewRR(name1 + " 3600 IN MX 10 " + name1) - tests := []*Msg{ - makeMsg(name1, []RR{rrA}, nil, nil), - makeMsg(name1, []RR{rrMx, rrMx}, nil, nil)} - - for _, msg := range tests { - predicted := msg.Len() - buf, err := msg.Pack() - if err != nil { - t.Error(err) - } - if predicted < len(buf) { - t.Errorf("predicted compressed length is wrong: predicted %s (len=%d) %d, actual %d", - msg.Question[0].Name, len(msg.Answer), predicted, len(buf)) - } - } -} - -func TestMsgLength(t *testing.T) { - makeMsg := func(question string, ans, ns, e []RR) *Msg { - msg := new(Msg) - msg.SetQuestion(Fqdn(question), TypeANY) - msg.Answer = append(msg.Answer, ans...) - msg.Ns = append(msg.Ns, ns...) - msg.Extra = append(msg.Extra, e...) - return msg - } - - name1 := "12345678901234567890123456789012345.12345678.123." - rrA, _ := NewRR(name1 + " 3600 IN A 192.0.2.1") - rrMx, _ := NewRR(name1 + " 3600 IN MX 10 " + name1) - tests := []*Msg{ - makeMsg(name1, []RR{rrA}, nil, nil), - makeMsg(name1, []RR{rrMx, rrMx}, nil, nil)} - - for _, msg := range tests { - predicted := msg.Len() - buf, err := msg.Pack() - if err != nil { - t.Error(err) - } - if predicted < len(buf) { - t.Errorf("predicted length is wrong: predicted %s (len=%d), actual %d", - msg.Question[0].Name, predicted, len(buf)) - } - } -} - -func TestMsgLength2(t *testing.T) { - // Serialized replies - var testMessages = []string{ - // google.com. IN A? - "064e81800001000b0004000506676f6f676c6503636f6d0000010001c00c00010001000000050004adc22986c00c00010001000000050004adc22987c00c00010001000000050004adc22988c00c00010001000000050004adc22989c00c00010001000000050004adc2298ec00c00010001000000050004adc22980c00c00010001000000050004adc22981c00c00010001000000050004adc22982c00c00010001000000050004adc22983c00c00010001000000050004adc22984c00c00010001000000050004adc22985c00c00020001000000050006036e7331c00cc00c00020001000000050006036e7332c00cc00c00020001000000050006036e7333c00cc00c00020001000000050006036e7334c00cc0d800010001000000050004d8ef200ac0ea00010001000000050004d8ef220ac0fc00010001000000050004d8ef240ac10e00010001000000050004d8ef260a0000290500000000050000", - // amazon.com. IN A? (reply has no EDNS0 record) - // TODO(miek): this one is off-by-one, need to find out why - //"6de1818000010004000a000806616d617a6f6e03636f6d0000010001c00c000100010000000500044815c2d4c00c000100010000000500044815d7e8c00c00010001000000050004b02062a6c00c00010001000000050004cdfbf236c00c000200010000000500140570646e733408756c747261646e73036f726700c00c000200010000000500150570646e733508756c747261646e7304696e666f00c00c000200010000000500160570646e733608756c747261646e7302636f02756b00c00c00020001000000050014036e7331037033310664796e656374036e657400c00c00020001000000050006036e7332c0cfc00c00020001000000050006036e7333c0cfc00c00020001000000050006036e7334c0cfc00c000200010000000500110570646e733108756c747261646e73c0dac00c000200010000000500080570646e7332c127c00c000200010000000500080570646e7333c06ec0cb00010001000000050004d04e461fc0eb00010001000000050004cc0dfa1fc0fd00010001000000050004d04e471fc10f00010001000000050004cc0dfb1fc12100010001000000050004cc4a6c01c121001c000100000005001020010502f3ff00000000000000000001c13e00010001000000050004cc4a6d01c13e001c0001000000050010261000a1101400000000000000000001", - // yahoo.com. IN A? - "fc2d81800001000300070008057961686f6f03636f6d0000010001c00c00010001000000050004628afd6dc00c00010001000000050004628bb718c00c00010001000000050004cebe242dc00c00020001000000050006036e7336c00cc00c00020001000000050006036e7338c00cc00c00020001000000050006036e7331c00cc00c00020001000000050006036e7332c00cc00c00020001000000050006036e7333c00cc00c00020001000000050006036e7334c00cc00c00020001000000050006036e7335c00cc07b0001000100000005000444b48310c08d00010001000000050004448eff10c09f00010001000000050004cb54dd35c0b100010001000000050004628a0b9dc0c30001000100000005000477a0f77cc05700010001000000050004ca2bdfaac06900010001000000050004caa568160000290500000000050000", - // microsoft.com. IN A? - "f4368180000100020005000b096d6963726f736f667403636f6d0000010001c00c0001000100000005000440040b25c00c0001000100000005000441373ac9c00c0002000100000005000e036e7331046d736674036e657400c00c00020001000000050006036e7332c04fc00c00020001000000050006036e7333c04fc00c00020001000000050006036e7334c04fc00c00020001000000050006036e7335c04fc04b000100010000000500044137253ec04b001c00010000000500102a010111200500000000000000010001c0650001000100000005000440043badc065001c00010000000500102a010111200600060000000000010001c07700010001000000050004d5c7b435c077001c00010000000500102a010111202000000000000000010001c08900010001000000050004cf2e4bfec089001c00010000000500102404f800200300000000000000010001c09b000100010000000500044137e28cc09b001c00010000000500102a010111200f000100000000000100010000290500000000050000", - // google.com. IN MX? - "724b8180000100050004000b06676f6f676c6503636f6d00000f0001c00c000f000100000005000c000a056173706d78016cc00cc00c000f0001000000050009001404616c7431c02ac00c000f0001000000050009001e04616c7432c02ac00c000f0001000000050009002804616c7433c02ac00c000f0001000000050009003204616c7434c02ac00c00020001000000050006036e7332c00cc00c00020001000000050006036e7333c00cc00c00020001000000050006036e7334c00cc00c00020001000000050006036e7331c00cc02a00010001000000050004adc2421bc02a001c00010000000500102a00145040080c01000000000000001bc04200010001000000050004adc2461bc05700010001000000050004adc2451bc06c000100010000000500044a7d8f1bc081000100010000000500044a7d191bc0ca00010001000000050004d8ef200ac09400010001000000050004d8ef220ac0a600010001000000050004d8ef240ac0b800010001000000050004d8ef260a0000290500000000050000", - // reddit.com. IN A? - "12b98180000100080000000c0672656464697403636f6d0000020001c00c0002000100000005000f046175733204616b616d036e657400c00c000200010000000500070475736534c02dc00c000200010000000500070475737733c02dc00c000200010000000500070475737735c02dc00c00020001000000050008056173696131c02dc00c00020001000000050008056173696139c02dc00c00020001000000050008056e73312d31c02dc00c0002000100000005000a076e73312d313935c02dc02800010001000000050004c30a242ec04300010001000000050004451f1d39c05600010001000000050004451f3bc7c0690001000100000005000460073240c07c000100010000000500046007fb81c090000100010000000500047c283484c090001c00010000000500102a0226f0006700000000000000000064c0a400010001000000050004c16c5b01c0a4001c000100000005001026001401000200000000000000000001c0b800010001000000050004c16c5bc3c0b8001c0001000000050010260014010002000000000000000000c30000290500000000050000", - } - - for i, hexData := range testMessages { - // we won't fail the decoding of the hex - input, _ := hex.DecodeString(hexData) - m := new(Msg) - m.Unpack(input) - //println(m.String()) - m.Compress = true - lenComp := m.Len() - b, _ := m.Pack() - pacComp := len(b) - m.Compress = false - lenUnComp := m.Len() - b, _ = m.Pack() - pacUnComp := len(b) - if pacComp+1 != lenComp { - t.Errorf("msg.Len(compressed)=%d actual=%d for test %d", lenComp, pacComp, i) - } - if pacUnComp+1 != lenUnComp { - t.Errorf("msg.Len(uncompressed)=%d actual=%d for test %d", lenUnComp, pacUnComp, i) - } - } -} - -func TestMsgLengthCompressionMalformed(t *testing.T) { - // SOA with empty hostmaster, which is illegal - soa := &SOA{Hdr: RR_Header{Name: ".", Rrtype: TypeSOA, Class: ClassINET, Ttl: 12345}, - Ns: ".", - Mbox: "", - Serial: 0, - Refresh: 28800, - Retry: 7200, - Expire: 604800, - Minttl: 60} - m := new(Msg) - m.Compress = true - m.Ns = []RR{soa} - m.Len() // Should not crash. -} - -func BenchmarkMsgLength(b *testing.B) { - b.StopTimer() - makeMsg := func(question string, ans, ns, e []RR) *Msg { - msg := new(Msg) - msg.SetQuestion(Fqdn(question), TypeANY) - msg.Answer = append(msg.Answer, ans...) - msg.Ns = append(msg.Ns, ns...) - msg.Extra = append(msg.Extra, e...) - msg.Compress = true - return msg - } - name1 := "12345678901234567890123456789012345.12345678.123." - rrMx, _ := NewRR(name1 + " 3600 IN MX 10 " + name1) - msg := makeMsg(name1, []RR{rrMx, rrMx}, nil, nil) - b.StartTimer() - for i := 0; i < b.N; i++ { - msg.Len() - } -} - -func BenchmarkMsgLengthPack(b *testing.B) { - makeMsg := func(question string, ans, ns, e []RR) *Msg { - msg := new(Msg) - msg.SetQuestion(Fqdn(question), TypeANY) - msg.Answer = append(msg.Answer, ans...) - msg.Ns = append(msg.Ns, ns...) - msg.Extra = append(msg.Extra, e...) - msg.Compress = true - return msg - } - name1 := "12345678901234567890123456789012345.12345678.123." - rrMx, _ := NewRR(name1 + " 3600 IN MX 10 " + name1) - msg := makeMsg(name1, []RR{rrMx, rrMx}, nil, nil) - b.ResetTimer() - for i := 0; i < b.N; i++ { - _, _ = msg.Pack() - } -} - -func BenchmarkMsgPackBuffer(b *testing.B) { - makeMsg := func(question string, ans, ns, e []RR) *Msg { - msg := new(Msg) - msg.SetQuestion(Fqdn(question), TypeANY) - msg.Answer = append(msg.Answer, ans...) - msg.Ns = append(msg.Ns, ns...) - msg.Extra = append(msg.Extra, e...) - msg.Compress = true - return msg - } - name1 := "12345678901234567890123456789012345.12345678.123." - rrMx, _ := NewRR(name1 + " 3600 IN MX 10 " + name1) - msg := makeMsg(name1, []RR{rrMx, rrMx}, nil, nil) - buf := make([]byte, 512) - b.ResetTimer() - for i := 0; i < b.N; i++ { - _, _ = msg.PackBuffer(buf) - } -} - -func BenchmarkMsgUnpack(b *testing.B) { - makeMsg := func(question string, ans, ns, e []RR) *Msg { - msg := new(Msg) - msg.SetQuestion(Fqdn(question), TypeANY) - msg.Answer = append(msg.Answer, ans...) - msg.Ns = append(msg.Ns, ns...) - msg.Extra = append(msg.Extra, e...) - msg.Compress = true - return msg - } - name1 := "12345678901234567890123456789012345.12345678.123." - rrMx, _ := NewRR(name1 + " 3600 IN MX 10 " + name1) - msg := makeMsg(name1, []RR{rrMx, rrMx}, nil, nil) - msgBuf, _ := msg.Pack() - b.ResetTimer() - for i := 0; i < b.N; i++ { - _ = msg.Unpack(msgBuf) - } -} - -func BenchmarkPackDomainName(b *testing.B) { - name1 := "12345678901234567890123456789012345.12345678.123." - buf := make([]byte, len(name1)+1) - b.ResetTimer() - for i := 0; i < b.N; i++ { - _, _ = PackDomainName(name1, buf, 0, nil, false) - } -} - -func BenchmarkUnpackDomainName(b *testing.B) { - name1 := "12345678901234567890123456789012345.12345678.123." - buf := make([]byte, len(name1)+1) - _, _ = PackDomainName(name1, buf, 0, nil, false) - b.ResetTimer() - for i := 0; i < b.N; i++ { - _, _, _ = UnpackDomainName(buf, 0) - } -} - -func BenchmarkUnpackDomainNameUnprintable(b *testing.B) { - name1 := "\x02\x02\x02\x025\x02\x02\x02\x02.12345678.123." - buf := make([]byte, len(name1)+1) - _, _ = PackDomainName(name1, buf, 0, nil, false) - b.ResetTimer() - for i := 0; i < b.N; i++ { - _, _, _ = UnpackDomainName(buf, 0) - } -} - -func TestToRFC3597(t *testing.T) { - a, _ := NewRR("miek.nl. IN A 10.0.1.1") - x := new(RFC3597) - x.ToRFC3597(a) - if x.String() != `miek.nl. 3600 CLASS1 TYPE1 \# 4 0a000101` { - t.Error("string mismatch") - } -} - -func TestNoRdataPack(t *testing.T) { - data := make([]byte, 1024) - for typ, fn := range typeToRR { - r := fn() - *r.Header() = RR_Header{Name: "miek.nl.", Rrtype: typ, Class: ClassINET, Ttl: 3600} - _, err := PackRR(r, data, 0, nil, false) - if err != nil { - t.Errorf("failed to pack RR with zero rdata: %s: %v", TypeToString[typ], err) - } - } -} - -// TODO(miek): fix dns buffer too small errors this throws -func TestNoRdataUnpack(t *testing.T) { - data := make([]byte, 1024) - for typ, fn := range typeToRR { - if typ == TypeSOA || typ == TypeTSIG || typ == TypeWKS { - // SOA, TSIG will not be seen (like this) in dyn. updates? - // WKS is an bug, but...deprecated record. - continue - } - r := fn() - *r.Header() = RR_Header{Name: "miek.nl.", Rrtype: typ, Class: ClassINET, Ttl: 3600} - off, err := PackRR(r, data, 0, nil, false) - if err != nil { - // Should always works, TestNoDataPack should have caught this - t.Errorf("failed to pack RR: %v", err) - continue - } - rr, _, err := UnpackRR(data[:off], 0) - if err != nil { - t.Errorf("failed to unpack RR with zero rdata: %s: %v", TypeToString[typ], err) - } - t.Log(rr) - } -} - -func TestRdataOverflow(t *testing.T) { - rr := new(RFC3597) - rr.Hdr.Name = "." - rr.Hdr.Class = ClassINET - rr.Hdr.Rrtype = 65280 - rr.Rdata = hex.EncodeToString(make([]byte, 0xFFFF)) - buf := make([]byte, 0xFFFF*2) - if _, err := PackRR(rr, buf, 0, nil, false); err != nil { - t.Fatalf("maximum size rrdata pack failed: %v", err) - } - rr.Rdata += "00" - if _, err := PackRR(rr, buf, 0, nil, false); err != ErrRdata { - t.Fatalf("oversize rrdata pack didn't return ErrRdata - instead: %v", err) - } -} - -func TestCopy(t *testing.T) { - rr, _ := NewRR("miek.nl. 2311 IN A 127.0.0.1") // Weird TTL to avoid catching TTL - rr1 := Copy(rr) - if rr.String() != rr1.String() { - t.Fatalf("Copy() failed %s != %s", rr.String(), rr1.String()) - } -} - -func TestMsgCopy(t *testing.T) { - m := new(Msg) - m.SetQuestion("miek.nl.", TypeA) - rr, _ := NewRR("miek.nl. 2311 IN A 127.0.0.1") - m.Answer = []RR{rr} - rr, _ = NewRR("miek.nl. 2311 IN NS 127.0.0.1") - m.Ns = []RR{rr} - - m1 := m.Copy() - if m.String() != m1.String() { - t.Fatalf("Msg.Copy() failed %s != %s", m.String(), m1.String()) - } - - m1.Answer[0], _ = NewRR("somethingelse.nl. 2311 IN A 127.0.0.1") - if m.String() == m1.String() { - t.Fatalf("Msg.Copy() failed; change to copy changed template %s", m.String()) - } - - rr, _ = NewRR("miek.nl. 2311 IN A 127.0.0.2") - m1.Answer = append(m1.Answer, rr) - if m1.Ns[0].String() == m1.Answer[1].String() { - t.Fatalf("Msg.Copy() failed; append changed underlying array %s", m1.Ns[0].String()) - } -} - -func BenchmarkCopy(b *testing.B) { - b.ReportAllocs() - m := new(Msg) - m.SetQuestion("miek.nl.", TypeA) - rr, _ := NewRR("miek.nl. 2311 IN A 127.0.0.1") - m.Answer = []RR{rr} - rr, _ = NewRR("miek.nl. 2311 IN NS 127.0.0.1") - m.Ns = []RR{rr} - rr, _ = NewRR("miek.nl. 2311 IN A 127.0.0.1") - m.Extra = []RR{rr} - - b.ResetTimer() - for i := 0; i < b.N; i++ { - m.Copy() - } -} - -func TestPackIPSECKEY(t *testing.T) { - tests := []string{ - "38.2.0.192.in-addr.arpa. 7200 IN IPSECKEY ( 10 1 2 192.0.2.38 AQNRU3mG7TVTO2BkR47usntb102uFJtugbo6BSGvgqt4AQ== )", - "38.2.0.192.in-addr.arpa. 7200 IN IPSECKEY ( 10 0 2 . AQNRU3mG7TVTO2BkR47usntb102uFJtugbo6BSGvgqt4AQ== )", - "38.2.0.192.in-addr.arpa. 7200 IN IPSECKEY ( 10 1 2 192.0.2.3 AQNRU3mG7TVTO2BkR47usntb102uFJtugbo6BSGvgqt4AQ== )", - "38.1.0.192.in-addr.arpa. 7200 IN IPSECKEY ( 10 3 2 mygateway.example.com. AQNRU3mG7TVTO2BkR47usntb102uFJtugbo6BSGvgqt4AQ== )", - "0.d.4.0.3.0.e.f.f.f.3.f.0.1.2.0 7200 IN IPSECKEY ( 10 2 2 2001:0DB8:0:8002::2000:1 AQNRU3mG7TVTO2BkR47usntb102uFJtugbo6BSGvgqt4AQ== )", - } - buf := make([]byte, 1024) - for _, t1 := range tests { - rr, _ := NewRR(t1) - off, err := PackRR(rr, buf, 0, nil, false) - if err != nil { - t.Errorf("failed to pack IPSECKEY %v: %s", err, t1) - continue - } - - rr, _, err = UnpackRR(buf[:off], 0) - if err != nil { - t.Errorf("failed to unpack IPSECKEY %v: %s", err, t1) - } - t.Log(rr) - } -} - -func TestMsgPackBuffer(t *testing.T) { - var testMessages = []string{ - // news.ycombinator.com.in.escapemg.com. IN A, response - "586285830001000000010000046e6577730b79636f6d62696e61746f7203636f6d02696e086573636170656d6703636f6d0000010001c0210006000100000e10002c036e7332c02103646e730b67726f6f7665736861726bc02d77ed50e600002a3000000e1000093a8000000e10", - - // news.ycombinator.com.in.escapemg.com. IN A, question - "586201000001000000000000046e6577730b79636f6d62696e61746f7203636f6d02696e086573636170656d6703636f6d0000010001", - - "398781020001000000000000046e6577730b79636f6d62696e61746f7203636f6d0000010001", - } - - for i, hexData := range testMessages { - // we won't fail the decoding of the hex - input, _ := hex.DecodeString(hexData) - m := new(Msg) - if err := m.Unpack(input); err != nil { - t.Errorf("packet %d failed to unpack", i) - continue - } - t.Logf("packet %d %s", i, m.String()) - } -} diff --git a/Godeps/_workspace/src/github.com/miekg/dns/dnssec.go b/Godeps/_workspace/src/github.com/miekg/dns/dnssec.go deleted file mode 100644 index 21ef3775a3..0000000000 --- a/Godeps/_workspace/src/github.com/miekg/dns/dnssec.go +++ /dev/null @@ -1,634 +0,0 @@ -package dns - -import ( - "bytes" - "crypto" - "crypto/dsa" - "crypto/ecdsa" - "crypto/elliptic" - "crypto/md5" - "crypto/rsa" - "crypto/sha1" - "crypto/sha256" - "crypto/sha512" - "encoding/hex" - "hash" - "io" - "math/big" - "sort" - "strings" - "time" -) - -// DNSSEC encryption algorithm codes. -const ( - _ uint8 = iota - RSAMD5 - DH - DSA - _ // Skip 4, RFC 6725, section 2.1 - RSASHA1 - DSANSEC3SHA1 - RSASHA1NSEC3SHA1 - RSASHA256 - _ // Skip 9, RFC 6725, section 2.1 - RSASHA512 - _ // Skip 11, RFC 6725, section 2.1 - ECCGOST - ECDSAP256SHA256 - ECDSAP384SHA384 - INDIRECT uint8 = 252 - PRIVATEDNS uint8 = 253 // Private (experimental keys) - PRIVATEOID uint8 = 254 -) - -// DNSSEC hashing algorithm codes. -const ( - _ uint8 = iota - SHA1 // RFC 4034 - SHA256 // RFC 4509 - GOST94 // RFC 5933 - SHA384 // Experimental - SHA512 // Experimental -) - -// DNSKEY flag values. -const ( - SEP = 1 - REVOKE = 1 << 7 - ZONE = 1 << 8 -) - -// The RRSIG needs to be converted to wireformat with some of -// the rdata (the signature) missing. Use this struct to easy -// the conversion (and re-use the pack/unpack functions). -type rrsigWireFmt struct { - TypeCovered uint16 - Algorithm uint8 - Labels uint8 - OrigTtl uint32 - Expiration uint32 - Inception uint32 - KeyTag uint16 - SignerName string `dns:"domain-name"` - /* No Signature */ -} - -// Used for converting DNSKEY's rdata to wirefmt. -type dnskeyWireFmt struct { - Flags uint16 - Protocol uint8 - Algorithm uint8 - PublicKey string `dns:"base64"` - /* Nothing is left out */ -} - -func divRoundUp(a, b int) int { - return (a + b - 1) / b -} - -// KeyTag calculates the keytag (or key-id) of the DNSKEY. -func (k *DNSKEY) KeyTag() uint16 { - if k == nil { - return 0 - } - var keytag int - switch k.Algorithm { - case RSAMD5: - // Look at the bottom two bytes of the modules, which the last - // item in the pubkey. We could do this faster by looking directly - // at the base64 values. But I'm lazy. - modulus, _ := fromBase64([]byte(k.PublicKey)) - if len(modulus) > 1 { - x, _ := unpackUint16(modulus, len(modulus)-2) - keytag = int(x) - } - default: - keywire := new(dnskeyWireFmt) - keywire.Flags = k.Flags - keywire.Protocol = k.Protocol - keywire.Algorithm = k.Algorithm - keywire.PublicKey = k.PublicKey - wire := make([]byte, DefaultMsgSize) - n, err := PackStruct(keywire, wire, 0) - if err != nil { - return 0 - } - wire = wire[:n] - for i, v := range wire { - if i&1 != 0 { - keytag += int(v) // must be larger than uint32 - } else { - keytag += int(v) << 8 - } - } - keytag += (keytag >> 16) & 0xFFFF - keytag &= 0xFFFF - } - return uint16(keytag) -} - -// ToDS converts a DNSKEY record to a DS record. -func (k *DNSKEY) ToDS(h uint8) *DS { - if k == nil { - return nil - } - ds := new(DS) - ds.Hdr.Name = k.Hdr.Name - ds.Hdr.Class = k.Hdr.Class - ds.Hdr.Rrtype = TypeDS - ds.Hdr.Ttl = k.Hdr.Ttl - ds.Algorithm = k.Algorithm - ds.DigestType = h - ds.KeyTag = k.KeyTag() - - keywire := new(dnskeyWireFmt) - keywire.Flags = k.Flags - keywire.Protocol = k.Protocol - keywire.Algorithm = k.Algorithm - keywire.PublicKey = k.PublicKey - wire := make([]byte, DefaultMsgSize) - n, err := PackStruct(keywire, wire, 0) - if err != nil { - return nil - } - wire = wire[:n] - - owner := make([]byte, 255) - off, err1 := PackDomainName(strings.ToLower(k.Hdr.Name), owner, 0, nil, false) - if err1 != nil { - return nil - } - owner = owner[:off] - // RFC4034: - // digest = digest_algorithm( DNSKEY owner name | DNSKEY RDATA); - // "|" denotes concatenation - // DNSKEY RDATA = Flags | Protocol | Algorithm | Public Key. - - // digest buffer - digest := append(owner, wire...) // another copy - - switch h { - case SHA1: - s := sha1.New() - io.WriteString(s, string(digest)) - ds.Digest = hex.EncodeToString(s.Sum(nil)) - case SHA256: - s := sha256.New() - io.WriteString(s, string(digest)) - ds.Digest = hex.EncodeToString(s.Sum(nil)) - case SHA384: - s := sha512.New384() - io.WriteString(s, string(digest)) - ds.Digest = hex.EncodeToString(s.Sum(nil)) - case GOST94: - /* I have no clue */ - default: - return nil - } - return ds -} - -// ToCDNSKEY converts a DNSKEY record to a CDNSKEY record. -func (k *DNSKEY) ToCDNSKEY() *CDNSKEY { - c := &CDNSKEY{DNSKEY: *k} - c.Hdr = *k.Hdr.copyHeader() - c.Hdr.Rrtype = TypeCDNSKEY - return c -} - -// ToCDS converts a DS record to a CDS record. -func (d *DS) ToCDS() *CDS { - c := &CDS{DS: *d} - c.Hdr = *d.Hdr.copyHeader() - c.Hdr.Rrtype = TypeCDS - return c -} - -// Sign signs an RRSet. The signature needs to be filled in with -// the values: Inception, Expiration, KeyTag, SignerName and Algorithm. -// The rest is copied from the RRset. Sign returns true when the signing went OK, -// otherwise false. -// There is no check if RRSet is a proper (RFC 2181) RRSet. -// If OrigTTL is non zero, it is used as-is, otherwise the TTL of the RRset -// is used as the OrigTTL. -func (rr *RRSIG) Sign(k PrivateKey, rrset []RR) error { - if k == nil { - return ErrPrivKey - } - // s.Inception and s.Expiration may be 0 (rollover etc.), the rest must be set - if rr.KeyTag == 0 || len(rr.SignerName) == 0 || rr.Algorithm == 0 { - return ErrKey - } - - rr.Hdr.Rrtype = TypeRRSIG - rr.Hdr.Name = rrset[0].Header().Name - rr.Hdr.Class = rrset[0].Header().Class - if rr.OrigTtl == 0 { // If set don't override - rr.OrigTtl = rrset[0].Header().Ttl - } - rr.TypeCovered = rrset[0].Header().Rrtype - rr.Labels = uint8(CountLabel(rrset[0].Header().Name)) - - if strings.HasPrefix(rrset[0].Header().Name, "*") { - rr.Labels-- // wildcard, remove from label count - } - - sigwire := new(rrsigWireFmt) - sigwire.TypeCovered = rr.TypeCovered - sigwire.Algorithm = rr.Algorithm - sigwire.Labels = rr.Labels - sigwire.OrigTtl = rr.OrigTtl - sigwire.Expiration = rr.Expiration - sigwire.Inception = rr.Inception - sigwire.KeyTag = rr.KeyTag - // For signing, lowercase this name - sigwire.SignerName = strings.ToLower(rr.SignerName) - - // Create the desired binary blob - signdata := make([]byte, DefaultMsgSize) - n, err := PackStruct(sigwire, signdata, 0) - if err != nil { - return err - } - signdata = signdata[:n] - wire, err := rawSignatureData(rrset, rr) - if err != nil { - return err - } - signdata = append(signdata, wire...) - - var h hash.Hash - switch rr.Algorithm { - case DSA, DSANSEC3SHA1: - // TODO: this seems bugged, will panic - case RSASHA1, RSASHA1NSEC3SHA1: - h = sha1.New() - case RSASHA256, ECDSAP256SHA256: - h = sha256.New() - case ECDSAP384SHA384: - h = sha512.New384() - case RSASHA512: - h = sha512.New() - case RSAMD5: - fallthrough // Deprecated in RFC 6725 - default: - return ErrAlg - } - - _, err = h.Write(signdata) - if err != nil { - return err - } - sighash := h.Sum(nil) - - signature, err := k.Sign(sighash, rr.Algorithm) - if err != nil { - return err - } - rr.Signature = toBase64(signature) - - return nil -} - -// Verify validates an RRSet with the signature and key. This is only the -// cryptographic test, the signature validity period must be checked separately. -// This function copies the rdata of some RRs (to lowercase domain names) for the validation to work. -func (rr *RRSIG) Verify(k *DNSKEY, rrset []RR) error { - // First the easy checks - if len(rrset) == 0 { - return ErrRRset - } - if rr.KeyTag != k.KeyTag() { - return ErrKey - } - if rr.Hdr.Class != k.Hdr.Class { - return ErrKey - } - if rr.Algorithm != k.Algorithm { - return ErrKey - } - if strings.ToLower(rr.SignerName) != strings.ToLower(k.Hdr.Name) { - return ErrKey - } - if k.Protocol != 3 { - return ErrKey - } - for _, r := range rrset { - if r.Header().Class != rr.Hdr.Class { - return ErrRRset - } - if r.Header().Rrtype != rr.TypeCovered { - return ErrRRset - } - } - // RFC 4035 5.3.2. Reconstructing the Signed Data - // Copy the sig, except the rrsig data - sigwire := new(rrsigWireFmt) - sigwire.TypeCovered = rr.TypeCovered - sigwire.Algorithm = rr.Algorithm - sigwire.Labels = rr.Labels - sigwire.OrigTtl = rr.OrigTtl - sigwire.Expiration = rr.Expiration - sigwire.Inception = rr.Inception - sigwire.KeyTag = rr.KeyTag - sigwire.SignerName = strings.ToLower(rr.SignerName) - // Create the desired binary blob - signeddata := make([]byte, DefaultMsgSize) - n, err := PackStruct(sigwire, signeddata, 0) - if err != nil { - return err - } - signeddata = signeddata[:n] - wire, err := rawSignatureData(rrset, rr) - if err != nil { - return err - } - signeddata = append(signeddata, wire...) - - sigbuf := rr.sigBuf() // Get the binary signature data - if rr.Algorithm == PRIVATEDNS { // PRIVATEOID - // TODO(mg) - // remove the domain name and assume its our - } - - switch rr.Algorithm { - case RSASHA1, RSASHA1NSEC3SHA1, RSASHA256, RSASHA512, RSAMD5: - // TODO(mg): this can be done quicker, ie. cache the pubkey data somewhere?? - pubkey := k.publicKeyRSA() // Get the key - if pubkey == nil { - return ErrKey - } - // Setup the hash as defined for this alg. - var h hash.Hash - var ch crypto.Hash - switch rr.Algorithm { - case RSAMD5: - h = md5.New() - ch = crypto.MD5 - case RSASHA1, RSASHA1NSEC3SHA1: - h = sha1.New() - ch = crypto.SHA1 - case RSASHA256: - h = sha256.New() - ch = crypto.SHA256 - case RSASHA512: - h = sha512.New() - ch = crypto.SHA512 - } - io.WriteString(h, string(signeddata)) - sighash := h.Sum(nil) - return rsa.VerifyPKCS1v15(pubkey, ch, sighash, sigbuf) - case ECDSAP256SHA256, ECDSAP384SHA384: - pubkey := k.publicKeyECDSA() - if pubkey == nil { - return ErrKey - } - var h hash.Hash - switch rr.Algorithm { - case ECDSAP256SHA256: - h = sha256.New() - case ECDSAP384SHA384: - h = sha512.New384() - } - io.WriteString(h, string(signeddata)) - sighash := h.Sum(nil) - // Split sigbuf into the r and s coordinates - r := big.NewInt(0) - r.SetBytes(sigbuf[:len(sigbuf)/2]) - s := big.NewInt(0) - s.SetBytes(sigbuf[len(sigbuf)/2:]) - if ecdsa.Verify(pubkey, sighash, r, s) { - return nil - } - return ErrSig - } - // Unknown alg - return ErrAlg -} - -// ValidityPeriod uses RFC1982 serial arithmetic to calculate -// if a signature period is valid. If t is the zero time, the -// current time is taken other t is. -func (rr *RRSIG) ValidityPeriod(t time.Time) bool { - var utc int64 - if t.IsZero() { - utc = time.Now().UTC().Unix() - } else { - utc = t.UTC().Unix() - } - modi := (int64(rr.Inception) - utc) / year68 - mode := (int64(rr.Expiration) - utc) / year68 - ti := int64(rr.Inception) + (modi * year68) - te := int64(rr.Expiration) + (mode * year68) - return ti <= utc && utc <= te -} - -// Return the signatures base64 encodedig sigdata as a byte slice. -func (rr *RRSIG) sigBuf() []byte { - sigbuf, err := fromBase64([]byte(rr.Signature)) - if err != nil { - return nil - } - return sigbuf -} - -// publicKeyRSA returns the RSA public key from a DNSKEY record. -func (k *DNSKEY) publicKeyRSA() *rsa.PublicKey { - keybuf, err := fromBase64([]byte(k.PublicKey)) - if err != nil { - return nil - } - - // RFC 2537/3110, section 2. RSA Public KEY Resource Records - // Length is in the 0th byte, unless its zero, then it - // it in bytes 1 and 2 and its a 16 bit number - explen := uint16(keybuf[0]) - keyoff := 1 - if explen == 0 { - explen = uint16(keybuf[1])<<8 | uint16(keybuf[2]) - keyoff = 3 - } - pubkey := new(rsa.PublicKey) - - pubkey.N = big.NewInt(0) - shift := uint64((explen - 1) * 8) - expo := uint64(0) - for i := int(explen - 1); i > 0; i-- { - expo += uint64(keybuf[keyoff+i]) << shift - shift -= 8 - } - // Remainder - expo += uint64(keybuf[keyoff]) - if expo > 2<<31 { - // Larger expo than supported. - // println("dns: F5 primes (or larger) are not supported") - return nil - } - pubkey.E = int(expo) - - pubkey.N.SetBytes(keybuf[keyoff+int(explen):]) - return pubkey -} - -// publicKeyECDSA returns the Curve public key from the DNSKEY record. -func (k *DNSKEY) publicKeyECDSA() *ecdsa.PublicKey { - keybuf, err := fromBase64([]byte(k.PublicKey)) - if err != nil { - return nil - } - pubkey := new(ecdsa.PublicKey) - switch k.Algorithm { - case ECDSAP256SHA256: - pubkey.Curve = elliptic.P256() - if len(keybuf) != 64 { - // wrongly encoded key - return nil - } - case ECDSAP384SHA384: - pubkey.Curve = elliptic.P384() - if len(keybuf) != 96 { - // Wrongly encoded key - return nil - } - } - pubkey.X = big.NewInt(0) - pubkey.X.SetBytes(keybuf[:len(keybuf)/2]) - pubkey.Y = big.NewInt(0) - pubkey.Y.SetBytes(keybuf[len(keybuf)/2:]) - return pubkey -} - -func (k *DNSKEY) publicKeyDSA() *dsa.PublicKey { - keybuf, err := fromBase64([]byte(k.PublicKey)) - if err != nil { - return nil - } - if len(keybuf) < 22 { - return nil - } - t, keybuf := int(keybuf[0]), keybuf[1:] - size := 64 + t*8 - q, keybuf := keybuf[:20], keybuf[20:] - if len(keybuf) != 3*size { - return nil - } - p, keybuf := keybuf[:size], keybuf[size:] - g, y := keybuf[:size], keybuf[size:] - pubkey := new(dsa.PublicKey) - pubkey.Parameters.Q = big.NewInt(0).SetBytes(q) - pubkey.Parameters.P = big.NewInt(0).SetBytes(p) - pubkey.Parameters.G = big.NewInt(0).SetBytes(g) - pubkey.Y = big.NewInt(0).SetBytes(y) - return pubkey -} - -type wireSlice [][]byte - -func (p wireSlice) Len() int { return len(p) } -func (p wireSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } -func (p wireSlice) Less(i, j int) bool { - _, ioff, _ := UnpackDomainName(p[i], 0) - _, joff, _ := UnpackDomainName(p[j], 0) - return bytes.Compare(p[i][ioff+10:], p[j][joff+10:]) < 0 -} - -// Return the raw signature data. -func rawSignatureData(rrset []RR, s *RRSIG) (buf []byte, err error) { - wires := make(wireSlice, len(rrset)) - for i, r := range rrset { - r1 := r.copy() - r1.Header().Ttl = s.OrigTtl - labels := SplitDomainName(r1.Header().Name) - // 6.2. Canonical RR Form. (4) - wildcards - if len(labels) > int(s.Labels) { - // Wildcard - r1.Header().Name = "*." + strings.Join(labels[len(labels)-int(s.Labels):], ".") + "." - } - // RFC 4034: 6.2. Canonical RR Form. (2) - domain name to lowercase - r1.Header().Name = strings.ToLower(r1.Header().Name) - // 6.2. Canonical RR Form. (3) - domain rdata to lowercase. - // NS, MD, MF, CNAME, SOA, MB, MG, MR, PTR, - // HINFO, MINFO, MX, RP, AFSDB, RT, SIG, PX, NXT, NAPTR, KX, - // SRV, DNAME, A6 - switch x := r1.(type) { - case *NS: - x.Ns = strings.ToLower(x.Ns) - case *CNAME: - x.Target = strings.ToLower(x.Target) - case *SOA: - x.Ns = strings.ToLower(x.Ns) - x.Mbox = strings.ToLower(x.Mbox) - case *MB: - x.Mb = strings.ToLower(x.Mb) - case *MG: - x.Mg = strings.ToLower(x.Mg) - case *MR: - x.Mr = strings.ToLower(x.Mr) - case *PTR: - x.Ptr = strings.ToLower(x.Ptr) - case *MINFO: - x.Rmail = strings.ToLower(x.Rmail) - x.Email = strings.ToLower(x.Email) - case *MX: - x.Mx = strings.ToLower(x.Mx) - case *NAPTR: - x.Replacement = strings.ToLower(x.Replacement) - case *KX: - x.Exchanger = strings.ToLower(x.Exchanger) - case *SRV: - x.Target = strings.ToLower(x.Target) - case *DNAME: - x.Target = strings.ToLower(x.Target) - } - // 6.2. Canonical RR Form. (5) - origTTL - wire := make([]byte, r1.len()+1) // +1 to be safe(r) - off, err1 := PackRR(r1, wire, 0, nil, false) - if err1 != nil { - return nil, err1 - } - wire = wire[:off] - wires[i] = wire - } - sort.Sort(wires) - for i, wire := range wires { - if i > 0 && bytes.Equal(wire, wires[i-1]) { - continue - } - buf = append(buf, wire...) - } - return buf, nil -} - -// Map for algorithm names. -var AlgorithmToString = map[uint8]string{ - RSAMD5: "RSAMD5", - DH: "DH", - DSA: "DSA", - RSASHA1: "RSASHA1", - DSANSEC3SHA1: "DSA-NSEC3-SHA1", - RSASHA1NSEC3SHA1: "RSASHA1-NSEC3-SHA1", - RSASHA256: "RSASHA256", - RSASHA512: "RSASHA512", - ECCGOST: "ECC-GOST", - ECDSAP256SHA256: "ECDSAP256SHA256", - ECDSAP384SHA384: "ECDSAP384SHA384", - INDIRECT: "INDIRECT", - PRIVATEDNS: "PRIVATEDNS", - PRIVATEOID: "PRIVATEOID", -} - -// Map of algorithm strings. -var StringToAlgorithm = reverseInt8(AlgorithmToString) - -// Map for hash names. -var HashToString = map[uint8]string{ - SHA1: "SHA1", - SHA256: "SHA256", - GOST94: "GOST94", - SHA384: "SHA384", - SHA512: "SHA512", -} - -// Map of hash strings. -var StringToHash = reverseInt8(HashToString) diff --git a/Godeps/_workspace/src/github.com/miekg/dns/dnssec_keygen.go b/Godeps/_workspace/src/github.com/miekg/dns/dnssec_keygen.go deleted file mode 100644 index 739beebe06..0000000000 --- a/Godeps/_workspace/src/github.com/miekg/dns/dnssec_keygen.go +++ /dev/null @@ -1,155 +0,0 @@ -package dns - -import ( - "crypto/dsa" - "crypto/ecdsa" - "crypto/elliptic" - "crypto/rand" - "crypto/rsa" - "math/big" -) - -// Generate generates a DNSKEY of the given bit size. -// The public part is put inside the DNSKEY record. -// The Algorithm in the key must be set as this will define -// what kind of DNSKEY will be generated. -// The ECDSA algorithms imply a fixed keysize, in that case -// bits should be set to the size of the algorithm. -func (k *DNSKEY) Generate(bits int) (PrivateKey, error) { - switch k.Algorithm { - case DSA, DSANSEC3SHA1: - if bits != 1024 { - return nil, ErrKeySize - } - case RSAMD5, RSASHA1, RSASHA256, RSASHA1NSEC3SHA1: - if bits < 512 || bits > 4096 { - return nil, ErrKeySize - } - case RSASHA512: - if bits < 1024 || bits > 4096 { - return nil, ErrKeySize - } - case ECDSAP256SHA256: - if bits != 256 { - return nil, ErrKeySize - } - case ECDSAP384SHA384: - if bits != 384 { - return nil, ErrKeySize - } - } - - switch k.Algorithm { - case DSA, DSANSEC3SHA1: - params := new(dsa.Parameters) - if err := dsa.GenerateParameters(params, rand.Reader, dsa.L1024N160); err != nil { - return nil, err - } - priv := new(dsa.PrivateKey) - priv.PublicKey.Parameters = *params - err := dsa.GenerateKey(priv, rand.Reader) - if err != nil { - return nil, err - } - k.setPublicKeyDSA(params.Q, params.P, params.G, priv.PublicKey.Y) - return (*DSAPrivateKey)(priv), nil - case RSAMD5, RSASHA1, RSASHA256, RSASHA512, RSASHA1NSEC3SHA1: - priv, err := rsa.GenerateKey(rand.Reader, bits) - if err != nil { - return nil, err - } - k.setPublicKeyRSA(priv.PublicKey.E, priv.PublicKey.N) - return (*RSAPrivateKey)(priv), nil - case ECDSAP256SHA256, ECDSAP384SHA384: - var c elliptic.Curve - switch k.Algorithm { - case ECDSAP256SHA256: - c = elliptic.P256() - case ECDSAP384SHA384: - c = elliptic.P384() - } - priv, err := ecdsa.GenerateKey(c, rand.Reader) - if err != nil { - return nil, err - } - k.setPublicKeyECDSA(priv.PublicKey.X, priv.PublicKey.Y) - return (*ECDSAPrivateKey)(priv), nil - default: - return nil, ErrAlg - } -} - -// Set the public key (the value E and N) -func (k *DNSKEY) setPublicKeyRSA(_E int, _N *big.Int) bool { - if _E == 0 || _N == nil { - return false - } - buf := exponentToBuf(_E) - buf = append(buf, _N.Bytes()...) - k.PublicKey = toBase64(buf) - return true -} - -// Set the public key for Elliptic Curves -func (k *DNSKEY) setPublicKeyECDSA(_X, _Y *big.Int) bool { - if _X == nil || _Y == nil { - return false - } - var intlen int - switch k.Algorithm { - case ECDSAP256SHA256: - intlen = 32 - case ECDSAP384SHA384: - intlen = 48 - } - k.PublicKey = toBase64(curveToBuf(_X, _Y, intlen)) - return true -} - -// Set the public key for DSA -func (k *DNSKEY) setPublicKeyDSA(_Q, _P, _G, _Y *big.Int) bool { - if _Q == nil || _P == nil || _G == nil || _Y == nil { - return false - } - buf := dsaToBuf(_Q, _P, _G, _Y) - k.PublicKey = toBase64(buf) - return true -} - -// Set the public key (the values E and N) for RSA -// RFC 3110: Section 2. RSA Public KEY Resource Records -func exponentToBuf(_E int) []byte { - var buf []byte - i := big.NewInt(int64(_E)) - if len(i.Bytes()) < 256 { - buf = make([]byte, 1) - buf[0] = uint8(len(i.Bytes())) - } else { - buf = make([]byte, 3) - buf[0] = 0 - buf[1] = uint8(len(i.Bytes()) >> 8) - buf[2] = uint8(len(i.Bytes())) - } - buf = append(buf, i.Bytes()...) - return buf -} - -// Set the public key for X and Y for Curve. The two -// values are just concatenated. -func curveToBuf(_X, _Y *big.Int, intlen int) []byte { - buf := intToBytes(_X, intlen) - buf = append(buf, intToBytes(_Y, intlen)...) - return buf -} - -// Set the public key for X and Y for Curve. The two -// values are just concatenated. -func dsaToBuf(_Q, _P, _G, _Y *big.Int) []byte { - t := divRoundUp(divRoundUp(_G.BitLen(), 8)-64, 8) - buf := []byte{byte(t)} - buf = append(buf, intToBytes(_Q, 20)...) - buf = append(buf, intToBytes(_P, 64+t*8)...) - buf = append(buf, intToBytes(_G, 64+t*8)...) - buf = append(buf, intToBytes(_Y, 64+t*8)...) - return buf -} diff --git a/Godeps/_workspace/src/github.com/miekg/dns/dnssec_keyscan.go b/Godeps/_workspace/src/github.com/miekg/dns/dnssec_keyscan.go deleted file mode 100644 index ac84fbcb4f..0000000000 --- a/Godeps/_workspace/src/github.com/miekg/dns/dnssec_keyscan.go +++ /dev/null @@ -1,248 +0,0 @@ -package dns - -import ( - "crypto/dsa" - "crypto/ecdsa" - "crypto/rsa" - "io" - "math/big" - "strconv" - "strings" -) - -// NewPrivateKey returns a PrivateKey by parsing the string s. -// s should be in the same form of the BIND private key files. -func (k *DNSKEY) NewPrivateKey(s string) (PrivateKey, error) { - if s[len(s)-1] != '\n' { // We need a closing newline - return k.ReadPrivateKey(strings.NewReader(s+"\n"), "") - } - return k.ReadPrivateKey(strings.NewReader(s), "") -} - -// ReadPrivateKey reads a private key from the io.Reader q. The string file is -// only used in error reporting. -// The public key must be known, because some cryptographic algorithms embed -// the public inside the privatekey. -func (k *DNSKEY) ReadPrivateKey(q io.Reader, file string) (PrivateKey, error) { - m, e := parseKey(q, file) - if m == nil { - return nil, e - } - if _, ok := m["private-key-format"]; !ok { - return nil, ErrPrivKey - } - if m["private-key-format"] != "v1.2" && m["private-key-format"] != "v1.3" { - return nil, ErrPrivKey - } - // TODO(mg): check if the pubkey matches the private key - algo, err := strconv.Atoi(strings.SplitN(m["algorithm"], " ", 2)[0]) - if err != nil { - return nil, ErrPrivKey - } - switch uint8(algo) { - case DSA: - priv, e := readPrivateKeyDSA(m) - if e != nil { - return nil, e - } - pub := k.publicKeyDSA() - if pub == nil { - return nil, ErrKey - } - priv.PublicKey = *pub - return (*DSAPrivateKey)(priv), e - case RSAMD5: - fallthrough - case RSASHA1: - fallthrough - case RSASHA1NSEC3SHA1: - fallthrough - case RSASHA256: - fallthrough - case RSASHA512: - priv, e := readPrivateKeyRSA(m) - if e != nil { - return nil, e - } - pub := k.publicKeyRSA() - if pub == nil { - return nil, ErrKey - } - priv.PublicKey = *pub - return (*RSAPrivateKey)(priv), e - case ECCGOST: - return nil, ErrPrivKey - case ECDSAP256SHA256: - fallthrough - case ECDSAP384SHA384: - priv, e := readPrivateKeyECDSA(m) - if e != nil { - return nil, e - } - pub := k.publicKeyECDSA() - if pub == nil { - return nil, ErrKey - } - priv.PublicKey = *pub - return (*ECDSAPrivateKey)(priv), e - default: - return nil, ErrPrivKey - } -} - -// Read a private key (file) string and create a public key. Return the private key. -func readPrivateKeyRSA(m map[string]string) (*rsa.PrivateKey, error) { - p := new(rsa.PrivateKey) - p.Primes = []*big.Int{nil, nil} - for k, v := range m { - switch k { - case "modulus", "publicexponent", "privateexponent", "prime1", "prime2": - v1, err := fromBase64([]byte(v)) - if err != nil { - return nil, err - } - switch k { - case "modulus": - p.PublicKey.N = big.NewInt(0) - p.PublicKey.N.SetBytes(v1) - case "publicexponent": - i := big.NewInt(0) - i.SetBytes(v1) - p.PublicKey.E = int(i.Int64()) // int64 should be large enough - case "privateexponent": - p.D = big.NewInt(0) - p.D.SetBytes(v1) - case "prime1": - p.Primes[0] = big.NewInt(0) - p.Primes[0].SetBytes(v1) - case "prime2": - p.Primes[1] = big.NewInt(0) - p.Primes[1].SetBytes(v1) - } - case "exponent1", "exponent2", "coefficient": - // not used in Go (yet) - case "created", "publish", "activate": - // not used in Go (yet) - } - } - return p, nil -} - -func readPrivateKeyDSA(m map[string]string) (*dsa.PrivateKey, error) { - p := new(dsa.PrivateKey) - p.X = big.NewInt(0) - for k, v := range m { - switch k { - case "private_value(x)": - v1, err := fromBase64([]byte(v)) - if err != nil { - return nil, err - } - p.X.SetBytes(v1) - case "created", "publish", "activate": - /* not used in Go (yet) */ - } - } - return p, nil -} - -func readPrivateKeyECDSA(m map[string]string) (*ecdsa.PrivateKey, error) { - p := new(ecdsa.PrivateKey) - p.D = big.NewInt(0) - // TODO: validate that the required flags are present - for k, v := range m { - switch k { - case "privatekey": - v1, err := fromBase64([]byte(v)) - if err != nil { - return nil, err - } - p.D.SetBytes(v1) - case "created", "publish", "activate": - /* not used in Go (yet) */ - } - } - return p, nil -} - -// parseKey reads a private key from r. It returns a map[string]string, -// with the key-value pairs, or an error when the file is not correct. -func parseKey(r io.Reader, file string) (map[string]string, error) { - s := scanInit(r) - m := make(map[string]string) - c := make(chan lex) - k := "" - // Start the lexer - go klexer(s, c) - for l := range c { - // It should alternate - switch l.value { - case zKey: - k = l.token - case zValue: - if k == "" { - return nil, &ParseError{file, "no private key seen", l} - } - //println("Setting", strings.ToLower(k), "to", l.token, "b") - m[strings.ToLower(k)] = l.token - k = "" - } - } - return m, nil -} - -// klexer scans the sourcefile and returns tokens on the channel c. -func klexer(s *scan, c chan lex) { - var l lex - str := "" // Hold the current read text - commt := false - key := true - x, err := s.tokenText() - defer close(c) - for err == nil { - l.column = s.position.Column - l.line = s.position.Line - switch x { - case ':': - if commt { - break - } - l.token = str - if key { - l.value = zKey - c <- l - // Next token is a space, eat it - s.tokenText() - key = false - str = "" - } else { - l.value = zValue - } - case ';': - commt = true - case '\n': - if commt { - // Reset a comment - commt = false - } - l.value = zValue - l.token = str - c <- l - str = "" - commt = false - key = true - default: - if commt { - break - } - str += string(x) - } - x, err = s.tokenText() - } - if len(str) > 0 { - // Send remainder - l.token = str - l.value = zValue - c <- l - } -} diff --git a/Godeps/_workspace/src/github.com/miekg/dns/dnssec_privkey.go b/Godeps/_workspace/src/github.com/miekg/dns/dnssec_privkey.go deleted file mode 100644 index 0b8f282b63..0000000000 --- a/Godeps/_workspace/src/github.com/miekg/dns/dnssec_privkey.go +++ /dev/null @@ -1,144 +0,0 @@ -package dns - -import ( - "crypto" - "crypto/dsa" - "crypto/ecdsa" - "crypto/rand" - "crypto/rsa" - "math/big" - "strconv" -) - -const format = "Private-key-format: v1.3\n" - -// PrivateKey ... TODO(miek) -type PrivateKey interface { - Sign([]byte, uint8) ([]byte, error) - String(uint8) string -} - -// PrivateKeyString converts a PrivateKey to a string. This string has the same -// format as the private-key-file of BIND9 (Private-key-format: v1.3). -// It needs some info from the key (the algorithm), so its a method of the -// DNSKEY and calls PrivateKey.String(alg). -func (r *DNSKEY) PrivateKeyString(p PrivateKey) string { - return p.String(r.Algorithm) -} - -type RSAPrivateKey rsa.PrivateKey - -func (p *RSAPrivateKey) Sign(hashed []byte, alg uint8) ([]byte, error) { - var hash crypto.Hash - switch alg { - case RSASHA1, RSASHA1NSEC3SHA1: - hash = crypto.SHA1 - case RSASHA256: - hash = crypto.SHA256 - case RSASHA512: - hash = crypto.SHA512 - default: - return nil, ErrAlg - } - return rsa.SignPKCS1v15(nil, (*rsa.PrivateKey)(p), hash, hashed) -} - -func (p *RSAPrivateKey) String(alg uint8) string { - algorithm := strconv.Itoa(int(alg)) + " (" + AlgorithmToString[alg] + ")" - modulus := toBase64(p.PublicKey.N.Bytes()) - e := big.NewInt(int64(p.PublicKey.E)) - publicExponent := toBase64(e.Bytes()) - privateExponent := toBase64(p.D.Bytes()) - prime1 := toBase64(p.Primes[0].Bytes()) - prime2 := toBase64(p.Primes[1].Bytes()) - // Calculate Exponent1/2 and Coefficient as per: http://en.wikipedia.org/wiki/RSA#Using_the_Chinese_remainder_algorithm - // and from: http://code.google.com/p/go/issues/detail?id=987 - one := big.NewInt(1) - p1 := big.NewInt(0).Sub(p.Primes[0], one) - q1 := big.NewInt(0).Sub(p.Primes[1], one) - exp1 := big.NewInt(0).Mod(p.D, p1) - exp2 := big.NewInt(0).Mod(p.D, q1) - coeff := big.NewInt(0).ModInverse(p.Primes[1], p.Primes[0]) - - exponent1 := toBase64(exp1.Bytes()) - exponent2 := toBase64(exp2.Bytes()) - coefficient := toBase64(coeff.Bytes()) - - return format + - "Algorithm: " + algorithm + "\n" + - "Modulus: " + modulus + "\n" + - "PublicExponent: " + publicExponent + "\n" + - "PrivateExponent: " + privateExponent + "\n" + - "Prime1: " + prime1 + "\n" + - "Prime2: " + prime2 + "\n" + - "Exponent1: " + exponent1 + "\n" + - "Exponent2: " + exponent2 + "\n" + - "Coefficient: " + coefficient + "\n" -} - -type ECDSAPrivateKey ecdsa.PrivateKey - -func (p *ECDSAPrivateKey) Sign(hashed []byte, alg uint8) ([]byte, error) { - var intlen int - switch alg { - case ECDSAP256SHA256: - intlen = 32 - case ECDSAP384SHA384: - intlen = 48 - default: - return nil, ErrAlg - } - r1, s1, err := ecdsa.Sign(rand.Reader, (*ecdsa.PrivateKey)(p), hashed) - if err != nil { - return nil, err - } - signature := intToBytes(r1, intlen) - signature = append(signature, intToBytes(s1, intlen)...) - return signature, nil -} - -func (p *ECDSAPrivateKey) String(alg uint8) string { - algorithm := strconv.Itoa(int(alg)) + " (" + AlgorithmToString[alg] + ")" - var intlen int - switch alg { - case ECDSAP256SHA256: - intlen = 32 - case ECDSAP384SHA384: - intlen = 48 - } - private := toBase64(intToBytes(p.D, intlen)) - return format + - "Algorithm: " + algorithm + "\n" + - "PrivateKey: " + private + "\n" -} - -type DSAPrivateKey dsa.PrivateKey - -func (p *DSAPrivateKey) Sign(hashed []byte, alg uint8) ([]byte, error) { - r1, s1, err := dsa.Sign(rand.Reader, (*dsa.PrivateKey)(p), hashed) - if err != nil { - return nil, err - } - t := divRoundUp(divRoundUp(p.PublicKey.Y.BitLen(), 8)-64, 8) - signature := []byte{byte(t)} - signature = append(signature, intToBytes(r1, 20)...) - signature = append(signature, intToBytes(s1, 20)...) - return signature, nil -} - -func (p *DSAPrivateKey) String(alg uint8) string { - algorithm := strconv.Itoa(int(alg)) + " (" + AlgorithmToString[alg] + ")" - T := divRoundUp(divRoundUp(p.PublicKey.Parameters.G.BitLen(), 8)-64, 8) - prime := toBase64(intToBytes(p.PublicKey.Parameters.P, 64+T*8)) - subprime := toBase64(intToBytes(p.PublicKey.Parameters.Q, 20)) - base := toBase64(intToBytes(p.PublicKey.Parameters.G, 64+T*8)) - priv := toBase64(intToBytes(p.X, 20)) - pub := toBase64(intToBytes(p.PublicKey.Y, 64+T*8)) - return format + - "Algorithm: " + algorithm + "\n" + - "Prime(p): " + prime + "\n" + - "Subprime(q): " + subprime + "\n" + - "Base(g): " + base + "\n" + - "Private_value(x): " + priv + "\n" + - "Public_value(y): " + pub + "\n" -} diff --git a/Godeps/_workspace/src/github.com/miekg/dns/dnssec_test.go b/Godeps/_workspace/src/github.com/miekg/dns/dnssec_test.go deleted file mode 100644 index 48c22362c5..0000000000 --- a/Godeps/_workspace/src/github.com/miekg/dns/dnssec_test.go +++ /dev/null @@ -1,658 +0,0 @@ -package dns - -import ( - "reflect" - "strings" - "testing" - "time" -) - -func getKey() *DNSKEY { - key := new(DNSKEY) - key.Hdr.Name = "miek.nl." - key.Hdr.Class = ClassINET - key.Hdr.Ttl = 14400 - key.Flags = 256 - key.Protocol = 3 - key.Algorithm = RSASHA256 - key.PublicKey = "AwEAAcNEU67LJI5GEgF9QLNqLO1SMq1EdoQ6E9f85ha0k0ewQGCblyW2836GiVsm6k8Kr5ECIoMJ6fZWf3CQSQ9ycWfTyOHfmI3eQ/1Covhb2y4bAmL/07PhrL7ozWBW3wBfM335Ft9xjtXHPy7ztCbV9qZ4TVDTW/Iyg0PiwgoXVesz" - return key -} - -func getSoa() *SOA { - soa := new(SOA) - soa.Hdr = RR_Header{"miek.nl.", TypeSOA, ClassINET, 14400, 0} - soa.Ns = "open.nlnetlabs.nl." - soa.Mbox = "miekg.atoom.net." - soa.Serial = 1293945905 - soa.Refresh = 14400 - soa.Retry = 3600 - soa.Expire = 604800 - soa.Minttl = 86400 - return soa -} - -func TestGenerateEC(t *testing.T) { - if testing.Short() { - t.Skip("skipping test in short mode.") - } - key := new(DNSKEY) - key.Hdr.Rrtype = TypeDNSKEY - key.Hdr.Name = "miek.nl." - key.Hdr.Class = ClassINET - key.Hdr.Ttl = 14400 - key.Flags = 256 - key.Protocol = 3 - key.Algorithm = ECDSAP256SHA256 - privkey, _ := key.Generate(256) - t.Log(key.String()) - t.Log(key.PrivateKeyString(privkey)) -} - -func TestGenerateDSA(t *testing.T) { - if testing.Short() { - t.Skip("skipping test in short mode.") - } - key := new(DNSKEY) - key.Hdr.Rrtype = TypeDNSKEY - key.Hdr.Name = "miek.nl." - key.Hdr.Class = ClassINET - key.Hdr.Ttl = 14400 - key.Flags = 256 - key.Protocol = 3 - key.Algorithm = DSA - privkey, _ := key.Generate(1024) - t.Log(key.String()) - t.Log(key.PrivateKeyString(privkey)) -} - -func TestGenerateRSA(t *testing.T) { - if testing.Short() { - t.Skip("skipping test in short mode.") - } - key := new(DNSKEY) - key.Hdr.Rrtype = TypeDNSKEY - key.Hdr.Name = "miek.nl." - key.Hdr.Class = ClassINET - key.Hdr.Ttl = 14400 - key.Flags = 256 - key.Protocol = 3 - key.Algorithm = RSASHA256 - privkey, _ := key.Generate(1024) - t.Log(key.String()) - t.Log(key.PrivateKeyString(privkey)) -} - -func TestSecure(t *testing.T) { - soa := getSoa() - - sig := new(RRSIG) - sig.Hdr = RR_Header{"miek.nl.", TypeRRSIG, ClassINET, 14400, 0} - sig.TypeCovered = TypeSOA - sig.Algorithm = RSASHA256 - sig.Labels = 2 - sig.Expiration = 1296534305 // date -u '+%s' -d"2011-02-01 04:25:05" - sig.Inception = 1293942305 // date -u '+%s' -d"2011-01-02 04:25:05" - sig.OrigTtl = 14400 - sig.KeyTag = 12051 - sig.SignerName = "miek.nl." - sig.Signature = "oMCbslaAVIp/8kVtLSms3tDABpcPRUgHLrOR48OOplkYo+8TeEGWwkSwaz/MRo2fB4FxW0qj/hTlIjUGuACSd+b1wKdH5GvzRJc2pFmxtCbm55ygAh4EUL0F6U5cKtGJGSXxxg6UFCQ0doJCmiGFa78LolaUOXImJrk6AFrGa0M=" - - key := new(DNSKEY) - key.Hdr.Name = "miek.nl." - key.Hdr.Class = ClassINET - key.Hdr.Ttl = 14400 - key.Flags = 256 - key.Protocol = 3 - key.Algorithm = RSASHA256 - key.PublicKey = "AwEAAcNEU67LJI5GEgF9QLNqLO1SMq1EdoQ6E9f85ha0k0ewQGCblyW2836GiVsm6k8Kr5ECIoMJ6fZWf3CQSQ9ycWfTyOHfmI3eQ/1Covhb2y4bAmL/07PhrL7ozWBW3wBfM335Ft9xjtXHPy7ztCbV9qZ4TVDTW/Iyg0PiwgoXVesz" - - // It should validate. Period is checked separately, so this will keep on working - if sig.Verify(key, []RR{soa}) != nil { - t.Error("failure to validate") - } -} - -func TestSignature(t *testing.T) { - sig := new(RRSIG) - sig.Hdr.Name = "miek.nl." - sig.Hdr.Class = ClassINET - sig.Hdr.Ttl = 3600 - sig.TypeCovered = TypeDNSKEY - sig.Algorithm = RSASHA1 - sig.Labels = 2 - sig.OrigTtl = 4000 - sig.Expiration = 1000 //Thu Jan 1 02:06:40 CET 1970 - sig.Inception = 800 //Thu Jan 1 01:13:20 CET 1970 - sig.KeyTag = 34641 - sig.SignerName = "miek.nl." - sig.Signature = "AwEAAaHIwpx3w4VHKi6i1LHnTaWeHCL154Jug0Rtc9ji5qwPXpBo6A5sRv7cSsPQKPIwxLpyCrbJ4mr2L0EPOdvP6z6YfljK2ZmTbogU9aSU2fiq/4wjxbdkLyoDVgtO+JsxNN4bjr4WcWhsmk1Hg93FV9ZpkWb0Tbad8DFqNDzr//kZ" - - // Should not be valid - if sig.ValidityPeriod(time.Now()) { - t.Error("should not be valid") - } - - sig.Inception = 315565800 //Tue Jan 1 10:10:00 CET 1980 - sig.Expiration = 4102477800 //Fri Jan 1 10:10:00 CET 2100 - if !sig.ValidityPeriod(time.Now()) { - t.Error("should be valid") - } -} - -func TestSignVerify(t *testing.T) { - // The record we want to sign - soa := new(SOA) - soa.Hdr = RR_Header{"miek.nl.", TypeSOA, ClassINET, 14400, 0} - soa.Ns = "open.nlnetlabs.nl." - soa.Mbox = "miekg.atoom.net." - soa.Serial = 1293945905 - soa.Refresh = 14400 - soa.Retry = 3600 - soa.Expire = 604800 - soa.Minttl = 86400 - - soa1 := new(SOA) - soa1.Hdr = RR_Header{"*.miek.nl.", TypeSOA, ClassINET, 14400, 0} - soa1.Ns = "open.nlnetlabs.nl." - soa1.Mbox = "miekg.atoom.net." - soa1.Serial = 1293945905 - soa1.Refresh = 14400 - soa1.Retry = 3600 - soa1.Expire = 604800 - soa1.Minttl = 86400 - - srv := new(SRV) - srv.Hdr = RR_Header{"srv.miek.nl.", TypeSRV, ClassINET, 14400, 0} - srv.Port = 1000 - srv.Weight = 800 - srv.Target = "web1.miek.nl." - - // With this key - key := new(DNSKEY) - key.Hdr.Rrtype = TypeDNSKEY - key.Hdr.Name = "miek.nl." - key.Hdr.Class = ClassINET - key.Hdr.Ttl = 14400 - key.Flags = 256 - key.Protocol = 3 - key.Algorithm = RSASHA256 - privkey, _ := key.Generate(512) - - // Fill in the values of the Sig, before signing - sig := new(RRSIG) - sig.Hdr = RR_Header{"miek.nl.", TypeRRSIG, ClassINET, 14400, 0} - sig.TypeCovered = soa.Hdr.Rrtype - sig.Labels = uint8(CountLabel(soa.Hdr.Name)) // works for all 3 - sig.OrigTtl = soa.Hdr.Ttl - sig.Expiration = 1296534305 // date -u '+%s' -d"2011-02-01 04:25:05" - sig.Inception = 1293942305 // date -u '+%s' -d"2011-01-02 04:25:05" - sig.KeyTag = key.KeyTag() // Get the keyfrom the Key - sig.SignerName = key.Hdr.Name - sig.Algorithm = RSASHA256 - - for _, r := range []RR{soa, soa1, srv} { - if sig.Sign(privkey, []RR{r}) != nil { - t.Error("failure to sign the record") - continue - } - if sig.Verify(key, []RR{r}) != nil { - t.Error("failure to validate") - continue - } - t.Logf("validated: %s", r.Header().Name) - } -} - -func Test65534(t *testing.T) { - t6 := new(RFC3597) - t6.Hdr = RR_Header{"miek.nl.", 65534, ClassINET, 14400, 0} - t6.Rdata = "505D870001" - key := new(DNSKEY) - key.Hdr.Name = "miek.nl." - key.Hdr.Rrtype = TypeDNSKEY - key.Hdr.Class = ClassINET - key.Hdr.Ttl = 14400 - key.Flags = 256 - key.Protocol = 3 - key.Algorithm = RSASHA256 - privkey, _ := key.Generate(1024) - - sig := new(RRSIG) - sig.Hdr = RR_Header{"miek.nl.", TypeRRSIG, ClassINET, 14400, 0} - sig.TypeCovered = t6.Hdr.Rrtype - sig.Labels = uint8(CountLabel(t6.Hdr.Name)) - sig.OrigTtl = t6.Hdr.Ttl - sig.Expiration = 1296534305 // date -u '+%s' -d"2011-02-01 04:25:05" - sig.Inception = 1293942305 // date -u '+%s' -d"2011-01-02 04:25:05" - sig.KeyTag = key.KeyTag() - sig.SignerName = key.Hdr.Name - sig.Algorithm = RSASHA256 - if err := sig.Sign(privkey, []RR{t6}); err != nil { - t.Error(err) - t.Error("failure to sign the TYPE65534 record") - } - if err := sig.Verify(key, []RR{t6}); err != nil { - t.Error(err) - t.Error("failure to validate") - } else { - t.Logf("validated: %s", t6.Header().Name) - } -} - -func TestDnskey(t *testing.T) { - pubkey, err := ReadRR(strings.NewReader(` -miek.nl. IN DNSKEY 256 3 10 AwEAAZuMCu2FdugHkTrXYgl5qixvcDw1aDDlvL46/xJKbHBAHY16fNUb2b65cwko2Js/aJxUYJbZk5dwCDZxYfrfbZVtDPQuc3o8QaChVxC7/JYz2AHc9qHvqQ1j4VrH71RWINlQo6VYjzN/BGpMhOZoZOEwzp1HfsOE3lNYcoWU1smL ;{id = 5240 (zsk), size = 1024b} -`), "Kmiek.nl.+010+05240.key") - if err != nil { - t.Fatal(err) - } - privStr := `Private-key-format: v1.3 -Algorithm: 10 (RSASHA512) -Modulus: m4wK7YV26AeROtdiCXmqLG9wPDVoMOW8vjr/EkpscEAdjXp81RvZvrlzCSjYmz9onFRgltmTl3AINnFh+t9tlW0M9C5zejxBoKFXELv8ljPYAdz2oe+pDWPhWsfvVFYg2VCjpViPM38EakyE5mhk4TDOnUd+w4TeU1hyhZTWyYs= -PublicExponent: AQAB -PrivateExponent: UfCoIQ/Z38l8vB6SSqOI/feGjHEl/fxIPX4euKf0D/32k30fHbSaNFrFOuIFmWMB3LimWVEs6u3dpbB9CQeCVg7hwU5puG7OtuiZJgDAhNeOnxvo5btp4XzPZrJSxR4WNQnwIiYWbl0aFlL1VGgHC/3By89ENZyWaZcMLW4KGWE= -Prime1: yxwC6ogAu8aVcDx2wg1V0b5M5P6jP8qkRFVMxWNTw60Vkn+ECvw6YAZZBHZPaMyRYZLzPgUlyYRd0cjupy4+fQ== -Prime2: xA1bF8M0RTIQ6+A11AoVG6GIR/aPGg5sogRkIZ7ID/sF6g9HMVU/CM2TqVEBJLRPp73cv6ZeC3bcqOCqZhz+pw== -Exponent1: xzkblyZ96bGYxTVZm2/vHMOXswod4KWIyMoOepK6B/ZPcZoIT6omLCgtypWtwHLfqyCz3MK51Nc0G2EGzg8rFQ== -Exponent2: Pu5+mCEb7T5F+kFNZhQadHUklt0JUHbi3hsEvVoHpEGSw3BGDQrtIflDde0/rbWHgDPM4WQY+hscd8UuTXrvLw== -Coefficient: UuRoNqe7YHnKmQzE6iDWKTMIWTuoqqrFAmXPmKQnC+Y+BQzOVEHUo9bXdDnoI9hzXP1gf8zENMYwYLeWpuYlFQ== -` - privkey, err := pubkey.(*DNSKEY).ReadPrivateKey(strings.NewReader(privStr), - "Kmiek.nl.+010+05240.private") - if err != nil { - t.Fatal(err) - } - if pubkey.(*DNSKEY).PublicKey != "AwEAAZuMCu2FdugHkTrXYgl5qixvcDw1aDDlvL46/xJKbHBAHY16fNUb2b65cwko2Js/aJxUYJbZk5dwCDZxYfrfbZVtDPQuc3o8QaChVxC7/JYz2AHc9qHvqQ1j4VrH71RWINlQo6VYjzN/BGpMhOZoZOEwzp1HfsOE3lNYcoWU1smL" { - t.Error("pubkey is not what we've read") - } - if pubkey.(*DNSKEY).PrivateKeyString(privkey) != privStr { - t.Error("privkey is not what we've read") - t.Errorf("%v", pubkey.(*DNSKEY).PrivateKeyString(privkey)) - } -} - -func TestTag(t *testing.T) { - key := new(DNSKEY) - key.Hdr.Name = "miek.nl." - key.Hdr.Rrtype = TypeDNSKEY - key.Hdr.Class = ClassINET - key.Hdr.Ttl = 3600 - key.Flags = 256 - key.Protocol = 3 - key.Algorithm = RSASHA256 - key.PublicKey = "AwEAAcNEU67LJI5GEgF9QLNqLO1SMq1EdoQ6E9f85ha0k0ewQGCblyW2836GiVsm6k8Kr5ECIoMJ6fZWf3CQSQ9ycWfTyOHfmI3eQ/1Covhb2y4bAmL/07PhrL7ozWBW3wBfM335Ft9xjtXHPy7ztCbV9qZ4TVDTW/Iyg0PiwgoXVesz" - - tag := key.KeyTag() - if tag != 12051 { - t.Errorf("wrong key tag: %d for key %v", tag, key) - } -} - -func TestKeyRSA(t *testing.T) { - if testing.Short() { - t.Skip("skipping test in short mode.") - } - key := new(DNSKEY) - key.Hdr.Name = "miek.nl." - key.Hdr.Rrtype = TypeDNSKEY - key.Hdr.Class = ClassINET - key.Hdr.Ttl = 3600 - key.Flags = 256 - key.Protocol = 3 - key.Algorithm = RSASHA256 - priv, _ := key.Generate(2048) - - soa := new(SOA) - soa.Hdr = RR_Header{"miek.nl.", TypeSOA, ClassINET, 14400, 0} - soa.Ns = "open.nlnetlabs.nl." - soa.Mbox = "miekg.atoom.net." - soa.Serial = 1293945905 - soa.Refresh = 14400 - soa.Retry = 3600 - soa.Expire = 604800 - soa.Minttl = 86400 - - sig := new(RRSIG) - sig.Hdr = RR_Header{"miek.nl.", TypeRRSIG, ClassINET, 14400, 0} - sig.TypeCovered = TypeSOA - sig.Algorithm = RSASHA256 - sig.Labels = 2 - sig.Expiration = 1296534305 // date -u '+%s' -d"2011-02-01 04:25:05" - sig.Inception = 1293942305 // date -u '+%s' -d"2011-01-02 04:25:05" - sig.OrigTtl = soa.Hdr.Ttl - sig.KeyTag = key.KeyTag() - sig.SignerName = key.Hdr.Name - - if err := sig.Sign(priv, []RR{soa}); err != nil { - t.Error("failed to sign") - return - } - if err := sig.Verify(key, []RR{soa}); err != nil { - t.Error("failed to verify") - } -} - -func TestKeyToDS(t *testing.T) { - key := new(DNSKEY) - key.Hdr.Name = "miek.nl." - key.Hdr.Rrtype = TypeDNSKEY - key.Hdr.Class = ClassINET - key.Hdr.Ttl = 3600 - key.Flags = 256 - key.Protocol = 3 - key.Algorithm = RSASHA256 - key.PublicKey = "AwEAAcNEU67LJI5GEgF9QLNqLO1SMq1EdoQ6E9f85ha0k0ewQGCblyW2836GiVsm6k8Kr5ECIoMJ6fZWf3CQSQ9ycWfTyOHfmI3eQ/1Covhb2y4bAmL/07PhrL7ozWBW3wBfM335Ft9xjtXHPy7ztCbV9qZ4TVDTW/Iyg0PiwgoXVesz" - - ds := key.ToDS(SHA1) - if strings.ToUpper(ds.Digest) != "B5121BDB5B8D86D0CC5FFAFBAAABE26C3E20BAC1" { - t.Errorf("wrong DS digest for SHA1\n%v", ds) - } -} - -func TestSignRSA(t *testing.T) { - pub := "miek.nl. IN DNSKEY 256 3 5 AwEAAb+8lGNCxJgLS8rYVer6EnHVuIkQDghdjdtewDzU3G5R7PbMbKVRvH2Ma7pQyYceoaqWZQirSj72euPWfPxQnMy9ucCylA+FuH9cSjIcPf4PqJfdupHk9X6EBYjxrCLY4p1/yBwgyBIRJtZtAqM3ceAH2WovEJD6rTtOuHo5AluJ" - - priv := `Private-key-format: v1.3 -Algorithm: 5 (RSASHA1) -Modulus: v7yUY0LEmAtLythV6voScdW4iRAOCF2N217APNTcblHs9sxspVG8fYxrulDJhx6hqpZlCKtKPvZ649Z8/FCczL25wLKUD4W4f1xKMhw9/g+ol926keT1foQFiPGsItjinX/IHCDIEhEm1m0Cozdx4AfZai8QkPqtO064ejkCW4k= -PublicExponent: AQAB -PrivateExponent: YPwEmwjk5HuiROKU4xzHQ6l1hG8Iiha4cKRG3P5W2b66/EN/GUh07ZSf0UiYB67o257jUDVEgwCuPJz776zfApcCB4oGV+YDyEu7Hp/rL8KcSN0la0k2r9scKwxTp4BTJT23zyBFXsV/1wRDK1A5NxsHPDMYi2SoK63Enm/1ptk= -Prime1: /wjOG+fD0ybNoSRn7nQ79udGeR1b0YhUA5mNjDx/x2fxtIXzygYk0Rhx9QFfDy6LOBvz92gbNQlzCLz3DJt5hw== -Prime2: wHZsJ8OGhkp5p3mrJFZXMDc2mbYusDVTA+t+iRPdS797Tj0pjvU2HN4vTnTj8KBQp6hmnY7dLp9Y1qserySGbw== -Exponent1: N0A7FsSRIg+IAN8YPQqlawoTtG1t1OkJ+nWrurPootScApX6iMvn8fyvw3p2k51rv84efnzpWAYiC8SUaQDNxQ== -Exponent2: SvuYRaGyvo0zemE3oS+WRm2scxR8eiA8WJGeOc+obwOKCcBgeZblXzfdHGcEC1KaOcetOwNW/vwMA46lpLzJNw== -Coefficient: 8+7ZN/JgByqv0NfULiFKTjtyegUcijRuyij7yNxYbCBneDvZGxJwKNi4YYXWx743pcAj4Oi4Oh86gcmxLs+hGw== -Created: 20110302104537 -Publish: 20110302104537 -Activate: 20110302104537` - - xk, _ := NewRR(pub) - k := xk.(*DNSKEY) - p, err := k.NewPrivateKey(priv) - if err != nil { - t.Error(err) - } - switch priv := p.(type) { - case *RSAPrivateKey: - if 65537 != priv.PublicKey.E { - t.Error("exponenent should be 65537") - } - default: - t.Errorf("we should have read an RSA key: %v", priv) - } - if k.KeyTag() != 37350 { - t.Errorf("keytag should be 37350, got %d %v", k.KeyTag(), k) - } - - soa := new(SOA) - soa.Hdr = RR_Header{"miek.nl.", TypeSOA, ClassINET, 14400, 0} - soa.Ns = "open.nlnetlabs.nl." - soa.Mbox = "miekg.atoom.net." - soa.Serial = 1293945905 - soa.Refresh = 14400 - soa.Retry = 3600 - soa.Expire = 604800 - soa.Minttl = 86400 - - sig := new(RRSIG) - sig.Hdr = RR_Header{"miek.nl.", TypeRRSIG, ClassINET, 14400, 0} - sig.Expiration = 1296534305 // date -u '+%s' -d"2011-02-01 04:25:05" - sig.Inception = 1293942305 // date -u '+%s' -d"2011-01-02 04:25:05" - sig.KeyTag = k.KeyTag() - sig.SignerName = k.Hdr.Name - sig.Algorithm = k.Algorithm - - sig.Sign(p, []RR{soa}) - if sig.Signature != "D5zsobpQcmMmYsUMLxCVEtgAdCvTu8V/IEeP4EyLBjqPJmjt96bwM9kqihsccofA5LIJ7DN91qkCORjWSTwNhzCv7bMyr2o5vBZElrlpnRzlvsFIoAZCD9xg6ZY7ZyzUJmU6IcTwG4v3xEYajcpbJJiyaw/RqR90MuRdKPiBzSo=" { - t.Errorf("signature is not correct: %v", sig) - } -} - -func TestSignVerifyECDSA(t *testing.T) { - pub := `example.net. 3600 IN DNSKEY 257 3 14 ( - xKYaNhWdGOfJ+nPrL8/arkwf2EY3MDJ+SErKivBVSum1 - w/egsXvSADtNJhyem5RCOpgQ6K8X1DRSEkrbYQ+OB+v8 - /uX45NBwY8rp65F6Glur8I/mlVNgF6W/qTI37m40 )` - priv := `Private-key-format: v1.2 -Algorithm: 14 (ECDSAP384SHA384) -PrivateKey: WURgWHCcYIYUPWgeLmiPY2DJJk02vgrmTfitxgqcL4vwW7BOrbawVmVe0d9V94SR` - - eckey, err := NewRR(pub) - if err != nil { - t.Fatal(err) - } - privkey, err := eckey.(*DNSKEY).NewPrivateKey(priv) - if err != nil { - t.Fatal(err) - } - // TODO: Create separate test for this - ds := eckey.(*DNSKEY).ToDS(SHA384) - if ds.KeyTag != 10771 { - t.Fatal("wrong keytag on DS") - } - if ds.Digest != "72d7b62976ce06438e9c0bf319013cf801f09ecc84b8d7e9495f27e305c6a9b0563a9b5f4d288405c3008a946df983d6" { - t.Fatal("wrong DS Digest") - } - a, _ := NewRR("www.example.net. 3600 IN A 192.0.2.1") - sig := new(RRSIG) - sig.Hdr = RR_Header{"example.net.", TypeRRSIG, ClassINET, 14400, 0} - sig.Expiration, _ = StringToTime("20100909102025") - sig.Inception, _ = StringToTime("20100812102025") - sig.KeyTag = eckey.(*DNSKEY).KeyTag() - sig.SignerName = eckey.(*DNSKEY).Hdr.Name - sig.Algorithm = eckey.(*DNSKEY).Algorithm - - if sig.Sign(privkey, []RR{a}) != nil { - t.Fatal("failure to sign the record") - } - - if err := sig.Verify(eckey.(*DNSKEY), []RR{a}); err != nil { - t.Fatalf("Failure to validate:\n%s\n%s\n%s\n\n%s\n\n%v", - eckey.(*DNSKEY).String(), - a.String(), - sig.String(), - eckey.(*DNSKEY).PrivateKeyString(privkey), - err, - ) - } -} - -func TestSignVerifyECDSA2(t *testing.T) { - srv1, err := NewRR("srv.miek.nl. IN SRV 1000 800 0 web1.miek.nl.") - if err != nil { - t.Fatal(err) - } - srv := srv1.(*SRV) - - // With this key - key := new(DNSKEY) - key.Hdr.Rrtype = TypeDNSKEY - key.Hdr.Name = "miek.nl." - key.Hdr.Class = ClassINET - key.Hdr.Ttl = 14400 - key.Flags = 256 - key.Protocol = 3 - key.Algorithm = ECDSAP256SHA256 - privkey, err := key.Generate(256) - if err != nil { - t.Fatal("failure to generate key") - } - - // Fill in the values of the Sig, before signing - sig := new(RRSIG) - sig.Hdr = RR_Header{"miek.nl.", TypeRRSIG, ClassINET, 14400, 0} - sig.TypeCovered = srv.Hdr.Rrtype - sig.Labels = uint8(CountLabel(srv.Hdr.Name)) // works for all 3 - sig.OrigTtl = srv.Hdr.Ttl - sig.Expiration = 1296534305 // date -u '+%s' -d"2011-02-01 04:25:05" - sig.Inception = 1293942305 // date -u '+%s' -d"2011-01-02 04:25:05" - sig.KeyTag = key.KeyTag() // Get the keyfrom the Key - sig.SignerName = key.Hdr.Name - sig.Algorithm = ECDSAP256SHA256 - - if sig.Sign(privkey, []RR{srv}) != nil { - t.Fatal("failure to sign the record") - } - - err = sig.Verify(key, []RR{srv}) - if err != nil { - t.Logf("Failure to validate:\n%s\n%s\n%s\n\n%s\n\n%v", - key.String(), - srv.String(), - sig.String(), - key.PrivateKeyString(privkey), - err, - ) - } -} - -// Here the test vectors from the relevant RFCs are checked. -// rfc6605 6.1 -func TestRFC6605P256(t *testing.T) { - exDNSKEY := `example.net. 3600 IN DNSKEY 257 3 13 ( - GojIhhXUN/u4v54ZQqGSnyhWJwaubCvTmeexv7bR6edb - krSqQpF64cYbcB7wNcP+e+MAnLr+Wi9xMWyQLc8NAA== )` - exPriv := `Private-key-format: v1.2 -Algorithm: 13 (ECDSAP256SHA256) -PrivateKey: GU6SnQ/Ou+xC5RumuIUIuJZteXT2z0O/ok1s38Et6mQ=` - rrDNSKEY, err := NewRR(exDNSKEY) - if err != nil { - t.Fatal(err) - } - priv, err := rrDNSKEY.(*DNSKEY).NewPrivateKey(exPriv) - if err != nil { - t.Fatal(err) - } - - exDS := `example.net. 3600 IN DS 55648 13 2 ( - b4c8c1fe2e7477127b27115656ad6256f424625bf5c1 - e2770ce6d6e37df61d17 )` - rrDS, err := NewRR(exDS) - if err != nil { - t.Fatal(err) - } - ourDS := rrDNSKEY.(*DNSKEY).ToDS(SHA256) - if !reflect.DeepEqual(ourDS, rrDS.(*DS)) { - t.Errorf("DS record differs:\n%v\n%v", ourDS, rrDS.(*DS)) - } - - exA := `www.example.net. 3600 IN A 192.0.2.1` - exRRSIG := `www.example.net. 3600 IN RRSIG A 13 3 3600 ( - 20100909100439 20100812100439 55648 example.net. - qx6wLYqmh+l9oCKTN6qIc+bw6ya+KJ8oMz0YP107epXA - yGmt+3SNruPFKG7tZoLBLlUzGGus7ZwmwWep666VCw== )` - rrA, err := NewRR(exA) - if err != nil { - t.Fatal(err) - } - rrRRSIG, err := NewRR(exRRSIG) - if err != nil { - t.Fatal(err) - } - if err = rrRRSIG.(*RRSIG).Verify(rrDNSKEY.(*DNSKEY), []RR{rrA}); err != nil { - t.Errorf("Failure to validate the spec RRSIG: %v", err) - } - - ourRRSIG := &RRSIG{ - Hdr: RR_Header{ - Ttl: rrA.Header().Ttl, - }, - KeyTag: rrDNSKEY.(*DNSKEY).KeyTag(), - SignerName: rrDNSKEY.(*DNSKEY).Hdr.Name, - Algorithm: rrDNSKEY.(*DNSKEY).Algorithm, - } - ourRRSIG.Expiration, _ = StringToTime("20100909100439") - ourRRSIG.Inception, _ = StringToTime("20100812100439") - err = ourRRSIG.Sign(priv, []RR{rrA}) - if err != nil { - t.Fatal(err) - } - - if err = ourRRSIG.Verify(rrDNSKEY.(*DNSKEY), []RR{rrA}); err != nil { - t.Errorf("Failure to validate our RRSIG: %v", err) - } - - // Signatures are randomized - rrRRSIG.(*RRSIG).Signature = "" - ourRRSIG.Signature = "" - if !reflect.DeepEqual(ourRRSIG, rrRRSIG.(*RRSIG)) { - t.Fatalf("RRSIG record differs:\n%v\n%v", ourRRSIG, rrRRSIG.(*RRSIG)) - } -} - -// rfc6605 6.2 -func TestRFC6605P384(t *testing.T) { - exDNSKEY := `example.net. 3600 IN DNSKEY 257 3 14 ( - xKYaNhWdGOfJ+nPrL8/arkwf2EY3MDJ+SErKivBVSum1 - w/egsXvSADtNJhyem5RCOpgQ6K8X1DRSEkrbYQ+OB+v8 - /uX45NBwY8rp65F6Glur8I/mlVNgF6W/qTI37m40 )` - exPriv := `Private-key-format: v1.2 -Algorithm: 14 (ECDSAP384SHA384) -PrivateKey: WURgWHCcYIYUPWgeLmiPY2DJJk02vgrmTfitxgqcL4vwW7BOrbawVmVe0d9V94SR` - rrDNSKEY, err := NewRR(exDNSKEY) - if err != nil { - t.Fatal(err) - } - priv, err := rrDNSKEY.(*DNSKEY).NewPrivateKey(exPriv) - if err != nil { - t.Fatal(err) - } - - exDS := `example.net. 3600 IN DS 10771 14 4 ( - 72d7b62976ce06438e9c0bf319013cf801f09ecc84b8 - d7e9495f27e305c6a9b0563a9b5f4d288405c3008a94 - 6df983d6 )` - rrDS, err := NewRR(exDS) - if err != nil { - t.Fatal(err) - } - ourDS := rrDNSKEY.(*DNSKEY).ToDS(SHA384) - if !reflect.DeepEqual(ourDS, rrDS.(*DS)) { - t.Fatalf("DS record differs:\n%v\n%v", ourDS, rrDS.(*DS)) - } - - exA := `www.example.net. 3600 IN A 192.0.2.1` - exRRSIG := `www.example.net. 3600 IN RRSIG A 14 3 3600 ( - 20100909102025 20100812102025 10771 example.net. - /L5hDKIvGDyI1fcARX3z65qrmPsVz73QD1Mr5CEqOiLP - 95hxQouuroGCeZOvzFaxsT8Glr74hbavRKayJNuydCuz - WTSSPdz7wnqXL5bdcJzusdnI0RSMROxxwGipWcJm )` - rrA, err := NewRR(exA) - if err != nil { - t.Fatal(err) - } - rrRRSIG, err := NewRR(exRRSIG) - if err != nil { - t.Fatal(err) - } - if err = rrRRSIG.(*RRSIG).Verify(rrDNSKEY.(*DNSKEY), []RR{rrA}); err != nil { - t.Errorf("Failure to validate the spec RRSIG: %v", err) - } - - ourRRSIG := &RRSIG{ - Hdr: RR_Header{ - Ttl: rrA.Header().Ttl, - }, - KeyTag: rrDNSKEY.(*DNSKEY).KeyTag(), - SignerName: rrDNSKEY.(*DNSKEY).Hdr.Name, - Algorithm: rrDNSKEY.(*DNSKEY).Algorithm, - } - ourRRSIG.Expiration, _ = StringToTime("20100909102025") - ourRRSIG.Inception, _ = StringToTime("20100812102025") - err = ourRRSIG.Sign(priv, []RR{rrA}) - if err != nil { - t.Fatal(err) - } - - if err = ourRRSIG.Verify(rrDNSKEY.(*DNSKEY), []RR{rrA}); err != nil { - t.Errorf("Failure to validate our RRSIG: %v", err) - } - - // Signatures are randomized - rrRRSIG.(*RRSIG).Signature = "" - ourRRSIG.Signature = "" - if !reflect.DeepEqual(ourRRSIG, rrRRSIG.(*RRSIG)) { - t.Fatalf("RRSIG record differs:\n%v\n%v", ourRRSIG, rrRRSIG.(*RRSIG)) - } -} diff --git a/Godeps/_workspace/src/github.com/miekg/dns/doc.go b/Godeps/_workspace/src/github.com/miekg/dns/doc.go deleted file mode 100644 index 34d59ab144..0000000000 --- a/Godeps/_workspace/src/github.com/miekg/dns/doc.go +++ /dev/null @@ -1,250 +0,0 @@ -/* -Package dns implements a full featured interface to the Domain Name System. -Server- and client-side programming is supported. -The package allows complete control over what is send out to the DNS. The package -API follows the less-is-more principle, by presenting a small, clean interface. - -The package dns supports (asynchronous) querying/replying, incoming/outgoing zone transfers, -TSIG, EDNS0, dynamic updates, notifies and DNSSEC validation/signing. -Note that domain names MUST be fully qualified, before sending them, unqualified -names in a message will result in a packing failure. - -Resource records are native types. They are not stored in wire format. -Basic usage pattern for creating a new resource record: - - r := new(dns.MX) - r.Hdr = dns.RR_Header{Name: "miek.nl.", Rrtype: dns.TypeMX, Class: dns.ClassINET, Ttl: 3600} - r.Preference = 10 - r.Mx = "mx.miek.nl." - -Or directly from a string: - - mx, err := dns.NewRR("miek.nl. 3600 IN MX 10 mx.miek.nl.") - -Or when the default TTL (3600) and class (IN) suit you: - - mx, err := dns.NewRR("miek.nl. MX 10 mx.miek.nl.") - -Or even: - - mx, err := dns.NewRR("$ORIGIN nl.\nmiek 1H IN MX 10 mx.miek") - -In the DNS messages are exchanged, these messages contain resource -records (sets). Use pattern for creating a message: - - m := new(dns.Msg) - m.SetQuestion("miek.nl.", dns.TypeMX) - -Or when not certain if the domain name is fully qualified: - - m.SetQuestion(dns.Fqdn("miek.nl"), dns.TypeMX) - -The message m is now a message with the question section set to ask -the MX records for the miek.nl. zone. - -The following is slightly more verbose, but more flexible: - - m1 := new(dns.Msg) - m1.Id = dns.Id() - m1.RecursionDesired = true - m1.Question = make([]dns.Question, 1) - m1.Question[0] = dns.Question{"miek.nl.", dns.TypeMX, dns.ClassINET} - -After creating a message it can be send. -Basic use pattern for synchronous querying the DNS at a -server configured on 127.0.0.1 and port 53: - - c := new(dns.Client) - in, rtt, err := c.Exchange(m1, "127.0.0.1:53") - -Suppressing -multiple outstanding queries (with the same question, type and class) is as easy as setting: - - c.SingleInflight = true - -If these "advanced" features are not needed, a simple UDP query can be send, -with: - - in, err := dns.Exchange(m1, "127.0.0.1:53") - -When this functions returns you will get dns message. A dns message consists -out of four sections. -The question section: in.Question, the answer section: in.Answer, -the authority section: in.Ns and the additional section: in.Extra. - -Each of these sections (except the Question section) contain a []RR. Basic -use pattern for accessing the rdata of a TXT RR as the first RR in -the Answer section: - - if t, ok := in.Answer[0].(*dns.TXT); ok { - // do something with t.Txt - } - -Domain Name and TXT Character String Representations - -Both domain names and TXT character strings are converted to presentation -form both when unpacked and when converted to strings. - -For TXT character strings, tabs, carriage returns and line feeds will be -converted to \t, \r and \n respectively. Back slashes and quotations marks -will be escaped. Bytes below 32 and above 127 will be converted to \DDD -form. - -For domain names, in addition to the above rules brackets, periods, -spaces, semicolons and the at symbol are escaped. - -DNSSEC - -DNSSEC (DNS Security Extension) adds a layer of security to the DNS. It -uses public key cryptography to sign resource records. The -public keys are stored in DNSKEY records and the signatures in RRSIG records. - -Requesting DNSSEC information for a zone is done by adding the DO (DNSSEC OK) bit -to an request. - - m := new(dns.Msg) - m.SetEdns0(4096, true) - -Signature generation, signature verification and key generation are all supported. - -DYNAMIC UPDATES - -Dynamic updates reuses the DNS message format, but renames three of -the sections. Question is Zone, Answer is Prerequisite, Authority is -Update, only the Additional is not renamed. See RFC 2136 for the gory details. - -You can set a rather complex set of rules for the existence of absence of -certain resource records or names in a zone to specify if resource records -should be added or removed. The table from RFC 2136 supplemented with the Go -DNS function shows which functions exist to specify the prerequisites. - -3.2.4 - Table Of Metavalues Used In Prerequisite Section - - CLASS TYPE RDATA Meaning Function - -------------------------------------------------------------- - ANY ANY empty Name is in use dns.NameUsed - ANY rrset empty RRset exists (value indep) dns.RRsetUsed - NONE ANY empty Name is not in use dns.NameNotUsed - NONE rrset empty RRset does not exist dns.RRsetNotUsed - zone rrset rr RRset exists (value dep) dns.Used - -The prerequisite section can also be left empty. -If you have decided on the prerequisites you can tell what RRs should -be added or deleted. The next table shows the options you have and -what functions to call. - -3.4.2.6 - Table Of Metavalues Used In Update Section - - CLASS TYPE RDATA Meaning Function - --------------------------------------------------------------- - ANY ANY empty Delete all RRsets from name dns.RemoveName - ANY rrset empty Delete an RRset dns.RemoveRRset - NONE rrset rr Delete an RR from RRset dns.Remove - zone rrset rr Add to an RRset dns.Insert - -TRANSACTION SIGNATURE - -An TSIG or transaction signature adds a HMAC TSIG record to each message sent. -The supported algorithms include: HmacMD5, HmacSHA1, HmacSHA256 and HmacSHA512. - -Basic use pattern when querying with a TSIG name "axfr." (note that these key names -must be fully qualified - as they are domain names) and the base64 secret -"so6ZGir4GPAqINNh9U5c3A==": - - c := new(dns.Client) - c.TsigSecret = map[string]string{"axfr.": "so6ZGir4GPAqINNh9U5c3A=="} - m := new(dns.Msg) - m.SetQuestion("miek.nl.", dns.TypeMX) - m.SetTsig("axfr.", dns.HmacMD5, 300, time.Now().Unix()) - ... - // When sending the TSIG RR is calculated and filled in before sending - -When requesting an zone transfer (almost all TSIG usage is when requesting zone transfers), with -TSIG, this is the basic use pattern. In this example we request an AXFR for -miek.nl. with TSIG key named "axfr." and secret "so6ZGir4GPAqINNh9U5c3A==" -and using the server 176.58.119.54: - - t := new(dns.Transfer) - m := new(dns.Msg) - t.TsigSecret = map[string]string{"axfr.": "so6ZGir4GPAqINNh9U5c3A=="} - m.SetAxfr("miek.nl.") - m.SetTsig("axfr.", dns.HmacMD5, 300, time.Now().Unix()) - c, err := t.In(m, "176.58.119.54:53") - for r := range c { ... } - -You can now read the records from the transfer as they come in. Each envelope is checked with TSIG. -If something is not correct an error is returned. - -Basic use pattern validating and replying to a message that has TSIG set. - - server := &dns.Server{Addr: ":53", Net: "udp"} - server.TsigSecret = map[string]string{"axfr.": "so6ZGir4GPAqINNh9U5c3A=="} - go server.ListenAndServe() - dns.HandleFunc(".", handleRequest) - - func handleRequest(w dns.ResponseWriter, r *dns.Msg) { - m := new(Msg) - m.SetReply(r) - if r.IsTsig() { - if w.TsigStatus() == nil { - // *Msg r has an TSIG record and it was validated - m.SetTsig("axfr.", dns.HmacMD5, 300, time.Now().Unix()) - } else { - // *Msg r has an TSIG records and it was not valided - } - } - w.WriteMsg(m) - } - -PRIVATE RRS - -RFC 6895 sets aside a range of type codes for private use. This range -is 65,280 - 65,534 (0xFF00 - 0xFFFE). When experimenting with new Resource Records these -can be used, before requesting an official type code from IANA. - -see http://miek.nl/posts/2014/Sep/21/Private%20RRs%20and%20IDN%20in%20Go%20DNS/ for more -information. - -EDNS0 - -EDNS0 is an extension mechanism for the DNS defined in RFC 2671 and updated -by RFC 6891. It defines an new RR type, the OPT RR, which is then completely -abused. -Basic use pattern for creating an (empty) OPT RR: - - o := new(dns.OPT) - o.Hdr.Name = "." // MUST be the root zone, per definition. - o.Hdr.Rrtype = dns.TypeOPT - -The rdata of an OPT RR consists out of a slice of EDNS0 (RFC 6891) -interfaces. Currently only a few have been standardized: EDNS0_NSID -(RFC 5001) and EDNS0_SUBNET (draft-vandergaast-edns-client-subnet-02). Note -that these options may be combined in an OPT RR. -Basic use pattern for a server to check if (and which) options are set: - - // o is a dns.OPT - for _, s := range o.Option { - switch e := s.(type) { - case *dns.EDNS0_NSID: - // do stuff with e.Nsid - case *dns.EDNS0_SUBNET: - // access e.Family, e.Address, etc. - } - } - -SIG(0) - -From RFC 2931: - - SIG(0) provides protection for DNS transactions and requests .... - ... protection for glue records, DNS requests, protection for message headers - on requests and responses, and protection of the overall integrity of a response. - -It works like TSIG, except that SIG(0) uses public key cryptography, instead of the shared -secret approach in TSIG. -Supported algorithms: DSA, ECDSAP256SHA256, ECDSAP384SHA384, RSASHA1, RSASHA256 and -RSASHA512. - -Signing subsequent messages in multi-message sessions is not implemented. -*/ -package dns diff --git a/Godeps/_workspace/src/github.com/miekg/dns/dyn_test.go b/Godeps/_workspace/src/github.com/miekg/dns/dyn_test.go deleted file mode 100644 index 09986a5e4e..0000000000 --- a/Godeps/_workspace/src/github.com/miekg/dns/dyn_test.go +++ /dev/null @@ -1,3 +0,0 @@ -package dns - -// Find better solution diff --git a/Godeps/_workspace/src/github.com/miekg/dns/edns.go b/Godeps/_workspace/src/github.com/miekg/dns/edns.go deleted file mode 100644 index d2bfecbb2f..0000000000 --- a/Godeps/_workspace/src/github.com/miekg/dns/edns.go +++ /dev/null @@ -1,523 +0,0 @@ -package dns - -import ( - "encoding/hex" - "errors" - "net" - "strconv" -) - -// EDNS0 Option codes. -const ( - EDNS0LLQ = 0x1 // long lived queries: http://tools.ietf.org/html/draft-sekar-dns-llq-01 - EDNS0UL = 0x2 // update lease draft: http://files.dns-sd.org/draft-sekar-dns-ul.txt - EDNS0NSID = 0x3 // nsid (RFC5001) - EDNS0DAU = 0x5 // DNSSEC Algorithm Understood - EDNS0DHU = 0x6 // DS Hash Understood - EDNS0N3U = 0x7 // NSEC3 Hash Understood - EDNS0SUBNET = 0x8 // client-subnet (RFC6891) - EDNS0EXPIRE = 0x9 // EDNS0 expire - EDNS0SUBNETDRAFT = 0x50fa // Don't use! Use EDNS0SUBNET - EDNS0LOCALSTART = 0xFDE9 // Beginning of range reserved for local/experimental use (RFC6891) - EDNS0LOCALEND = 0xFFFE // End of range reserved for local/experimental use (RFC6891) - _DO = 1 << 15 // dnssec ok -) - -// OPT is the EDNS0 RR appended to messages to convey extra (meta) information. -// See RFC 6891. -type OPT struct { - Hdr RR_Header - Option []EDNS0 `dns:"opt"` -} - -func (rr *OPT) Header() *RR_Header { - return &rr.Hdr -} - -func (rr *OPT) String() string { - s := "\n;; OPT PSEUDOSECTION:\n; EDNS: version " + strconv.Itoa(int(rr.Version())) + "; " - if rr.Do() { - s += "flags: do; " - } else { - s += "flags: ; " - } - s += "udp: " + strconv.Itoa(int(rr.UDPSize())) - - for _, o := range rr.Option { - switch o.(type) { - case *EDNS0_NSID: - s += "\n; NSID: " + o.String() - h, e := o.pack() - var r string - if e == nil { - for _, c := range h { - r += "(" + string(c) + ")" - } - s += " " + r - } - case *EDNS0_SUBNET: - s += "\n; SUBNET: " + o.String() - if o.(*EDNS0_SUBNET).DraftOption { - s += " (draft)" - } - case *EDNS0_UL: - s += "\n; UPDATE LEASE: " + o.String() - case *EDNS0_LLQ: - s += "\n; LONG LIVED QUERIES: " + o.String() - case *EDNS0_DAU: - s += "\n; DNSSEC ALGORITHM UNDERSTOOD: " + o.String() - case *EDNS0_DHU: - s += "\n; DS HASH UNDERSTOOD: " + o.String() - case *EDNS0_N3U: - s += "\n; NSEC3 HASH UNDERSTOOD: " + o.String() - case *EDNS0_LOCAL: - s += "\n; LOCAL OPT: " + o.String() - } - } - return s -} - -func (rr *OPT) len() int { - l := rr.Hdr.len() - for i := 0; i < len(rr.Option); i++ { - l += 4 // Account for 2-byte option code and 2-byte option length. - lo, _ := rr.Option[i].pack() - l += len(lo) - } - return l -} - -func (rr *OPT) copy() RR { - return &OPT{*rr.Hdr.copyHeader(), rr.Option} -} - -// return the old value -> delete SetVersion? - -// Version returns the EDNS version used. Only zero is defined. -func (rr *OPT) Version() uint8 { - return uint8((rr.Hdr.Ttl & 0x00FF0000) >> 16) -} - -// SetVersion sets the version of EDNS. This is usually zero. -func (rr *OPT) SetVersion(v uint8) { - rr.Hdr.Ttl = rr.Hdr.Ttl&0xFF00FFFF | (uint32(v) << 16) -} - -// ExtendedRcode returns the EDNS extended RCODE field (the upper 8 bits of the TTL). -func (rr *OPT) ExtendedRcode() uint8 { - return uint8((rr.Hdr.Ttl & 0xFF000000) >> 24) -} - -// SetExtendedRcode sets the EDNS extended RCODE field. -func (rr *OPT) SetExtendedRcode(v uint8) { - rr.Hdr.Ttl = rr.Hdr.Ttl&0x00FFFFFF | (uint32(v) << 24) -} - -// UDPSize returns the UDP buffer size. -func (rr *OPT) UDPSize() uint16 { - return rr.Hdr.Class -} - -// SetUDPSize sets the UDP buffer size. -func (rr *OPT) SetUDPSize(size uint16) { - rr.Hdr.Class = size -} - -// Do returns the value of the DO (DNSSEC OK) bit. -func (rr *OPT) Do() bool { - return rr.Hdr.Ttl&_DO == _DO -} - -// SetDo sets the DO (DNSSEC OK) bit. -func (rr *OPT) SetDo() { - rr.Hdr.Ttl |= _DO -} - -// EDNS0 defines an EDNS0 Option. An OPT RR can have multiple options appended to -// it. -type EDNS0 interface { - // Option returns the option code for the option. - Option() uint16 - // pack returns the bytes of the option data. - pack() ([]byte, error) - // unpack sets the data as found in the buffer. Is also sets - // the length of the slice as the length of the option data. - unpack([]byte) error - // String returns the string representation of the option. - String() string -} - -// The nsid EDNS0 option is used to retrieve a nameserver -// identifier. When sending a request Nsid must be set to the empty string -// The identifier is an opaque string encoded as hex. -// Basic use pattern for creating an nsid option: -// -// o := new(dns.OPT) -// o.Hdr.Name = "." -// o.Hdr.Rrtype = dns.TypeOPT -// e := new(dns.EDNS0_NSID) -// e.Code = dns.EDNS0NSID -// e.Nsid = "AA" -// o.Option = append(o.Option, e) -type EDNS0_NSID struct { - Code uint16 // Always EDNS0NSID - Nsid string // This string needs to be hex encoded -} - -func (e *EDNS0_NSID) pack() ([]byte, error) { - h, err := hex.DecodeString(e.Nsid) - if err != nil { - return nil, err - } - return h, nil -} - -func (e *EDNS0_NSID) Option() uint16 { return EDNS0NSID } -func (e *EDNS0_NSID) unpack(b []byte) error { e.Nsid = hex.EncodeToString(b); return nil } -func (e *EDNS0_NSID) String() string { return string(e.Nsid) } - -// The subnet EDNS0 option is used to give the remote nameserver -// an idea of where the client lives. It can then give back a different -// answer depending on the location or network topology. -// Basic use pattern for creating an subnet option: -// -// o := new(dns.OPT) -// o.Hdr.Name = "." -// o.Hdr.Rrtype = dns.TypeOPT -// e := new(dns.EDNS0_SUBNET) -// e.Code = dns.EDNS0SUBNET -// e.Family = 1 // 1 for IPv4 source address, 2 for IPv6 -// e.NetMask = 32 // 32 for IPV4, 128 for IPv6 -// e.SourceScope = 0 -// e.Address = net.ParseIP("127.0.0.1").To4() // for IPv4 -// // e.Address = net.ParseIP("2001:7b8:32a::2") // for IPV6 -// o.Option = append(o.Option, e) -type EDNS0_SUBNET struct { - Code uint16 // Always EDNS0SUBNET - Family uint16 // 1 for IP, 2 for IP6 - SourceNetmask uint8 - SourceScope uint8 - Address net.IP - DraftOption bool // Set to true if using the old (0x50fa) option code -} - -func (e *EDNS0_SUBNET) Option() uint16 { - if e.DraftOption { - return EDNS0SUBNETDRAFT - } - return EDNS0SUBNET -} - -func (e *EDNS0_SUBNET) pack() ([]byte, error) { - b := make([]byte, 4) - b[0], b[1] = packUint16(e.Family) - b[2] = e.SourceNetmask - b[3] = e.SourceScope - switch e.Family { - case 1: - if e.SourceNetmask > net.IPv4len*8 { - return nil, errors.New("dns: bad netmask") - } - ip := make([]byte, net.IPv4len) - a := e.Address.To4().Mask(net.CIDRMask(int(e.SourceNetmask), net.IPv4len*8)) - for i := 0; i < net.IPv4len; i++ { - if i+1 > len(e.Address) { - break - } - ip[i] = a[i] - } - needLength := e.SourceNetmask / 8 - if e.SourceNetmask%8 > 0 { - needLength++ - } - ip = ip[:needLength] - b = append(b, ip...) - case 2: - if e.SourceNetmask > net.IPv6len*8 { - return nil, errors.New("dns: bad netmask") - } - ip := make([]byte, net.IPv6len) - a := e.Address.Mask(net.CIDRMask(int(e.SourceNetmask), net.IPv6len*8)) - for i := 0; i < net.IPv6len; i++ { - if i+1 > len(e.Address) { - break - } - ip[i] = a[i] - } - needLength := e.SourceNetmask / 8 - if e.SourceNetmask%8 > 0 { - needLength++ - } - ip = ip[:needLength] - b = append(b, ip...) - default: - return nil, errors.New("dns: bad address family") - } - return b, nil -} - -func (e *EDNS0_SUBNET) unpack(b []byte) error { - lb := len(b) - if lb < 4 { - return ErrBuf - } - e.Family, _ = unpackUint16(b, 0) - e.SourceNetmask = b[2] - e.SourceScope = b[3] - switch e.Family { - case 1: - addr := make([]byte, 4) - for i := 0; i < int(e.SourceNetmask/8); i++ { - if i >= len(addr) || 4+i >= len(b) { - return ErrBuf - } - addr[i] = b[4+i] - } - e.Address = net.IPv4(addr[0], addr[1], addr[2], addr[3]) - case 2: - addr := make([]byte, 16) - for i := 0; i < int(e.SourceNetmask/8); i++ { - if i >= len(addr) || 4+i >= len(b) { - return ErrBuf - } - addr[i] = b[4+i] - } - e.Address = net.IP{addr[0], addr[1], addr[2], addr[3], addr[4], - addr[5], addr[6], addr[7], addr[8], addr[9], addr[10], - addr[11], addr[12], addr[13], addr[14], addr[15]} - } - return nil -} - -func (e *EDNS0_SUBNET) String() (s string) { - if e.Address == nil { - s = "" - } else if e.Address.To4() != nil { - s = e.Address.String() - } else { - s = "[" + e.Address.String() + "]" - } - s += "/" + strconv.Itoa(int(e.SourceNetmask)) + "/" + strconv.Itoa(int(e.SourceScope)) - return -} - -// The UL (Update Lease) EDNS0 (draft RFC) option is used to tell the server to set -// an expiration on an update RR. This is helpful for clients that cannot clean -// up after themselves. This is a draft RFC and more information can be found at -// http://files.dns-sd.org/draft-sekar-dns-ul.txt -// -// o := new(dns.OPT) -// o.Hdr.Name = "." -// o.Hdr.Rrtype = dns.TypeOPT -// e := new(dns.EDNS0_UL) -// e.Code = dns.EDNS0UL -// e.Lease = 120 // in seconds -// o.Option = append(o.Option, e) -type EDNS0_UL struct { - Code uint16 // Always EDNS0UL - Lease uint32 -} - -func (e *EDNS0_UL) Option() uint16 { return EDNS0UL } -func (e *EDNS0_UL) String() string { return strconv.FormatUint(uint64(e.Lease), 10) } - -// Copied: http://golang.org/src/pkg/net/dnsmsg.go -func (e *EDNS0_UL) pack() ([]byte, error) { - b := make([]byte, 4) - b[0] = byte(e.Lease >> 24) - b[1] = byte(e.Lease >> 16) - b[2] = byte(e.Lease >> 8) - b[3] = byte(e.Lease) - return b, nil -} - -func (e *EDNS0_UL) unpack(b []byte) error { - if len(b) < 4 { - return ErrBuf - } - e.Lease = uint32(b[0])<<24 | uint32(b[1])<<16 | uint32(b[2])<<8 | uint32(b[3]) - return nil -} - -// Long Lived Queries: http://tools.ietf.org/html/draft-sekar-dns-llq-01 -// Implemented for completeness, as the EDNS0 type code is assigned. -type EDNS0_LLQ struct { - Code uint16 // Always EDNS0LLQ - Version uint16 - Opcode uint16 - Error uint16 - Id uint64 - LeaseLife uint32 -} - -func (e *EDNS0_LLQ) Option() uint16 { return EDNS0LLQ } - -func (e *EDNS0_LLQ) pack() ([]byte, error) { - b := make([]byte, 18) - b[0], b[1] = packUint16(e.Version) - b[2], b[3] = packUint16(e.Opcode) - b[4], b[5] = packUint16(e.Error) - b[6] = byte(e.Id >> 56) - b[7] = byte(e.Id >> 48) - b[8] = byte(e.Id >> 40) - b[9] = byte(e.Id >> 32) - b[10] = byte(e.Id >> 24) - b[11] = byte(e.Id >> 16) - b[12] = byte(e.Id >> 8) - b[13] = byte(e.Id) - b[14] = byte(e.LeaseLife >> 24) - b[15] = byte(e.LeaseLife >> 16) - b[16] = byte(e.LeaseLife >> 8) - b[17] = byte(e.LeaseLife) - return b, nil -} - -func (e *EDNS0_LLQ) unpack(b []byte) error { - if len(b) < 18 { - return ErrBuf - } - e.Version, _ = unpackUint16(b, 0) - e.Opcode, _ = unpackUint16(b, 2) - e.Error, _ = unpackUint16(b, 4) - e.Id = uint64(b[6])<<56 | uint64(b[6+1])<<48 | uint64(b[6+2])<<40 | - uint64(b[6+3])<<32 | uint64(b[6+4])<<24 | uint64(b[6+5])<<16 | uint64(b[6+6])<<8 | uint64(b[6+7]) - e.LeaseLife = uint32(b[14])<<24 | uint32(b[14+1])<<16 | uint32(b[14+2])<<8 | uint32(b[14+3]) - return nil -} - -func (e *EDNS0_LLQ) String() string { - s := strconv.FormatUint(uint64(e.Version), 10) + " " + strconv.FormatUint(uint64(e.Opcode), 10) + - " " + strconv.FormatUint(uint64(e.Error), 10) + " " + strconv.FormatUint(uint64(e.Id), 10) + - " " + strconv.FormatUint(uint64(e.LeaseLife), 10) - return s -} - -type EDNS0_DAU struct { - Code uint16 // Always EDNS0DAU - AlgCode []uint8 -} - -func (e *EDNS0_DAU) Option() uint16 { return EDNS0DAU } -func (e *EDNS0_DAU) pack() ([]byte, error) { return e.AlgCode, nil } -func (e *EDNS0_DAU) unpack(b []byte) error { e.AlgCode = b; return nil } - -func (e *EDNS0_DAU) String() string { - s := "" - for i := 0; i < len(e.AlgCode); i++ { - if a, ok := AlgorithmToString[e.AlgCode[i]]; ok { - s += " " + a - } else { - s += " " + strconv.Itoa(int(e.AlgCode[i])) - } - } - return s -} - -type EDNS0_DHU struct { - Code uint16 // Always EDNS0DHU - AlgCode []uint8 -} - -func (e *EDNS0_DHU) Option() uint16 { return EDNS0DHU } -func (e *EDNS0_DHU) pack() ([]byte, error) { return e.AlgCode, nil } -func (e *EDNS0_DHU) unpack(b []byte) error { e.AlgCode = b; return nil } - -func (e *EDNS0_DHU) String() string { - s := "" - for i := 0; i < len(e.AlgCode); i++ { - if a, ok := HashToString[e.AlgCode[i]]; ok { - s += " " + a - } else { - s += " " + strconv.Itoa(int(e.AlgCode[i])) - } - } - return s -} - -type EDNS0_N3U struct { - Code uint16 // Always EDNS0N3U - AlgCode []uint8 -} - -func (e *EDNS0_N3U) Option() uint16 { return EDNS0N3U } -func (e *EDNS0_N3U) pack() ([]byte, error) { return e.AlgCode, nil } -func (e *EDNS0_N3U) unpack(b []byte) error { e.AlgCode = b; return nil } - -func (e *EDNS0_N3U) String() string { - // Re-use the hash map - s := "" - for i := 0; i < len(e.AlgCode); i++ { - if a, ok := HashToString[e.AlgCode[i]]; ok { - s += " " + a - } else { - s += " " + strconv.Itoa(int(e.AlgCode[i])) - } - } - return s -} - -type EDNS0_EXPIRE struct { - Code uint16 // Always EDNS0EXPIRE - Expire uint32 -} - -func (e *EDNS0_EXPIRE) Option() uint16 { return EDNS0EXPIRE } -func (e *EDNS0_EXPIRE) String() string { return strconv.FormatUint(uint64(e.Expire), 10) } - -func (e *EDNS0_EXPIRE) pack() ([]byte, error) { - b := make([]byte, 4) - b[0] = byte(e.Expire >> 24) - b[1] = byte(e.Expire >> 16) - b[2] = byte(e.Expire >> 8) - b[3] = byte(e.Expire) - return b, nil -} - -func (e *EDNS0_EXPIRE) unpack(b []byte) error { - if len(b) < 4 { - return ErrBuf - } - e.Expire = uint32(b[0])<<24 | uint32(b[1])<<16 | uint32(b[2])<<8 | uint32(b[3]) - return nil -} - -// The local EDNS0 option is used for local/experimental purposes. The option -// code is recommended to be within the range [EDNS0LOCALSTART, EDNS0LOCALEND] -// (RFC6891), although any unassigned code can actually be used. The content of -// the option is made available in Data, unaltered. -// Basic use pattern for creating a local option: -// -// o := new(dns.OPT) -// o.Hdr.Name = "." -// o.Hdr.Rrtype = dns.TypeOPT -// e := new(dns.EDNS0_LOCAL) -// e.Code = dns.EDNS0LOCALSTART -// e.Data = []byte{72, 82, 74} -// o.Option = append(o.Option, e) -type EDNS0_LOCAL struct { - Code uint16 - Data []byte -} - -func (e *EDNS0_LOCAL) Option() uint16 { return e.Code } -func (e *EDNS0_LOCAL) String() string { - return strconv.FormatInt(int64(e.Code), 10) + ":0x" + hex.EncodeToString(e.Data) -} - -func (e *EDNS0_LOCAL) pack() ([]byte, error) { - b := make([]byte, len(e.Data)) - copied := copy(b, e.Data) - if copied != len(e.Data) { - return nil, ErrBuf - } - return b, nil -} - -func (e *EDNS0_LOCAL) unpack(b []byte) error { - e.Data = make([]byte, len(b)) - copied := copy(e.Data, b) - if copied != len(b) { - return ErrBuf - } - return nil -} diff --git a/Godeps/_workspace/src/github.com/miekg/dns/edns_test.go b/Godeps/_workspace/src/github.com/miekg/dns/edns_test.go deleted file mode 100644 index 8ee82ab426..0000000000 --- a/Godeps/_workspace/src/github.com/miekg/dns/edns_test.go +++ /dev/null @@ -1,48 +0,0 @@ -package dns - -import "testing" - -func TestOPTTtl(t *testing.T) { - e := &OPT{} - e.Hdr.Name = "." - e.Hdr.Rrtype = TypeOPT - - if e.Do() { - t.Fail() - } - - e.SetDo() - if !e.Do() { - t.Fail() - } - - oldTtl := e.Hdr.Ttl - - if e.Version() != 0 { - t.Fail() - } - - e.SetVersion(42) - if e.Version() != 42 { - t.Fail() - } - - e.SetVersion(0) - if e.Hdr.Ttl != oldTtl { - t.Fail() - } - - if e.ExtendedRcode() != 0 { - t.Fail() - } - - e.SetExtendedRcode(42) - if e.ExtendedRcode() != 42 { - t.Fail() - } - - e.SetExtendedRcode(0) - if e.Hdr.Ttl != oldTtl { - t.Fail() - } -} diff --git a/Godeps/_workspace/src/github.com/miekg/dns/example_test.go b/Godeps/_workspace/src/github.com/miekg/dns/example_test.go deleted file mode 100644 index 1578a4d053..0000000000 --- a/Godeps/_workspace/src/github.com/miekg/dns/example_test.go +++ /dev/null @@ -1,147 +0,0 @@ -package dns_test - -import ( - "errors" - "fmt" - "github.com/miekg/dns" - "log" - "net" -) - -// Retrieve the MX records for miek.nl. -func ExampleMX() { - config, _ := dns.ClientConfigFromFile("/etc/resolv.conf") - c := new(dns.Client) - m := new(dns.Msg) - m.SetQuestion("miek.nl.", dns.TypeMX) - m.RecursionDesired = true - r, _, err := c.Exchange(m, config.Servers[0]+":"+config.Port) - if err != nil { - return - } - if r.Rcode != dns.RcodeSuccess { - return - } - for _, a := range r.Answer { - if mx, ok := a.(*dns.MX); ok { - fmt.Printf("%s\n", mx.String()) - } - } -} - -// Retrieve the DNSKEY records of a zone and convert them -// to DS records for SHA1, SHA256 and SHA384. -func ExampleDS(zone string) { - config, _ := dns.ClientConfigFromFile("/etc/resolv.conf") - c := new(dns.Client) - m := new(dns.Msg) - if zone == "" { - zone = "miek.nl" - } - m.SetQuestion(dns.Fqdn(zone), dns.TypeDNSKEY) - m.SetEdns0(4096, true) - r, _, err := c.Exchange(m, config.Servers[0]+":"+config.Port) - if err != nil { - return - } - if r.Rcode != dns.RcodeSuccess { - return - } - for _, k := range r.Answer { - if key, ok := k.(*dns.DNSKEY); ok { - for _, alg := range []uint8{dns.SHA1, dns.SHA256, dns.SHA384} { - fmt.Printf("%s; %d\n", key.ToDS(alg).String(), key.Flags) - } - } - } -} - -const TypeAPAIR = 0x0F99 - -type APAIR struct { - addr [2]net.IP -} - -func NewAPAIR() dns.PrivateRdata { return new(APAIR) } - -func (rd *APAIR) String() string { return rd.addr[0].String() + " " + rd.addr[1].String() } -func (rd *APAIR) Parse(txt []string) error { - if len(txt) != 2 { - return errors.New("two addresses required for APAIR") - } - for i, s := range txt { - ip := net.ParseIP(s) - if ip == nil { - return errors.New("invalid IP in APAIR text representation") - } - rd.addr[i] = ip - } - return nil -} - -func (rd *APAIR) Pack(buf []byte) (int, error) { - b := append([]byte(rd.addr[0]), []byte(rd.addr[1])...) - n := copy(buf, b) - if n != len(b) { - return n, dns.ErrBuf - } - return n, nil -} - -func (rd *APAIR) Unpack(buf []byte) (int, error) { - ln := net.IPv4len * 2 - if len(buf) != ln { - return 0, errors.New("invalid length of APAIR rdata") - } - cp := make([]byte, ln) - copy(cp, buf) // clone bytes to use them in IPs - - rd.addr[0] = net.IP(cp[:3]) - rd.addr[1] = net.IP(cp[4:]) - - return len(buf), nil -} - -func (rd *APAIR) Copy(dest dns.PrivateRdata) error { - cp := make([]byte, rd.Len()) - _, err := rd.Pack(cp) - if err != nil { - return err - } - - d := dest.(*APAIR) - d.addr[0] = net.IP(cp[:3]) - d.addr[1] = net.IP(cp[4:]) - return nil -} - -func (rd *APAIR) Len() int { - return net.IPv4len * 2 -} - -func ExamplePrivateHandle() { - dns.PrivateHandle("APAIR", TypeAPAIR, NewAPAIR) - defer dns.PrivateHandleRemove(TypeAPAIR) - - rr, err := dns.NewRR("miek.nl. APAIR (1.2.3.4 1.2.3.5)") - if err != nil { - log.Fatal("could not parse APAIR record: ", err) - } - fmt.Println(rr) - // Output: miek.nl. 3600 IN APAIR 1.2.3.4 1.2.3.5 - - m := new(dns.Msg) - m.Id = 12345 - m.SetQuestion("miek.nl.", TypeAPAIR) - m.Answer = append(m.Answer, rr) - - fmt.Println(m) - // ;; opcode: QUERY, status: NOERROR, id: 12345 - // ;; flags: rd; QUERY: 1, ANSWER: 1, AUTHORITY: 0, ADDITIONAL: 0 - // - // ;; QUESTION SECTION: - // ;miek.nl. IN APAIR - // - // ;; ANSWER SECTION: - // miek.nl. 3600 IN APAIR 1.2.3.4 1.2.3.5 -} diff --git a/Godeps/_workspace/src/github.com/miekg/dns/format.go b/Godeps/_workspace/src/github.com/miekg/dns/format.go deleted file mode 100644 index 1ac1664fe2..0000000000 --- a/Godeps/_workspace/src/github.com/miekg/dns/format.go +++ /dev/null @@ -1,96 +0,0 @@ -package dns - -import ( - "net" - "reflect" - "strconv" -) - -// NumField returns the number of rdata fields r has. -func NumField(r RR) int { - return reflect.ValueOf(r).Elem().NumField() - 1 // Remove RR_Header -} - -// Field returns the rdata field i as a string. Fields are indexed starting from 1. -// RR types that holds slice data, for instance the NSEC type bitmap will return a single -// string where the types are concatenated using a space. -// Accessing non existing fields will cause a panic. -func Field(r RR, i int) string { - if i == 0 { - return "" - } - d := reflect.ValueOf(r).Elem().Field(i) - switch k := d.Kind(); k { - case reflect.String: - return d.String() - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return strconv.FormatInt(d.Int(), 10) - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: - return strconv.FormatUint(d.Uint(), 10) - case reflect.Slice: - switch reflect.ValueOf(r).Elem().Type().Field(i).Tag { - case `dns:"a"`: - // TODO(miek): Hmm store this as 16 bytes - if d.Len() < net.IPv6len { - return net.IPv4(byte(d.Index(0).Uint()), - byte(d.Index(1).Uint()), - byte(d.Index(2).Uint()), - byte(d.Index(3).Uint())).String() - } - return net.IPv4(byte(d.Index(12).Uint()), - byte(d.Index(13).Uint()), - byte(d.Index(14).Uint()), - byte(d.Index(15).Uint())).String() - case `dns:"aaaa"`: - return net.IP{ - byte(d.Index(0).Uint()), - byte(d.Index(1).Uint()), - byte(d.Index(2).Uint()), - byte(d.Index(3).Uint()), - byte(d.Index(4).Uint()), - byte(d.Index(5).Uint()), - byte(d.Index(6).Uint()), - byte(d.Index(7).Uint()), - byte(d.Index(8).Uint()), - byte(d.Index(9).Uint()), - byte(d.Index(10).Uint()), - byte(d.Index(11).Uint()), - byte(d.Index(12).Uint()), - byte(d.Index(13).Uint()), - byte(d.Index(14).Uint()), - byte(d.Index(15).Uint()), - }.String() - case `dns:"nsec"`: - if d.Len() == 0 { - return "" - } - s := Type(d.Index(0).Uint()).String() - for i := 1; i < d.Len(); i++ { - s += " " + Type(d.Index(i).Uint()).String() - } - return s - case `dns:"wks"`: - if d.Len() == 0 { - return "" - } - s := strconv.Itoa(int(d.Index(0).Uint())) - for i := 0; i < d.Len(); i++ { - s += " " + strconv.Itoa(int(d.Index(i).Uint())) - } - return s - default: - // if it does not have a tag its a string slice - fallthrough - case `dns:"txt"`: - if d.Len() == 0 { - return "" - } - s := d.Index(0).String() - for i := 1; i < d.Len(); i++ { - s += " " + d.Index(i).String() - } - return s - } - } - return "" -} diff --git a/Godeps/_workspace/src/github.com/miekg/dns/idn/code_points.go b/Godeps/_workspace/src/github.com/miekg/dns/idn/code_points.go deleted file mode 100644 index 129c3742f5..0000000000 --- a/Godeps/_workspace/src/github.com/miekg/dns/idn/code_points.go +++ /dev/null @@ -1,2346 +0,0 @@ -package idn - -const ( - propertyUnknown property = iota // unknown character property - propertyPVALID // allowed to be used in IDNs - propertyCONTEXTJ // invisible or problematic characters (join controls) - propertyCONTEXTO // invisible or problematic characters (others) - propertyDISALLOWED // should not be included in IDNs - propertyUNASSIGNED // code points that are not designated in the Unicode Standard -) - -// property stores the property of a code point, as described in RFC 5892, -// section 1 -type property int - -// codePoints list all code points in Unicode Character Database (UCD) Format -// according to RFC 5892, appendix B.1. Thanks to libidn2 (GNU) - -// http://www.gnu.org/software/libidn/libidn2/ -var codePoints = []struct { - start rune - end rune - state property -}{ - {0x0000, 0x002C, propertyDISALLOWED}, // ..COMMA - {0x002D, 0x0, propertyPVALID}, // HYPHEN-MINUS - {0x002E, 0x002F, propertyDISALLOWED}, // FULL STOP..SOLIDUS - {0x0030, 0x0039, propertyPVALID}, // DIGIT ZERO..DIGIT NINE - {0x003A, 0x0060, propertyDISALLOWED}, // COLON..GRAVE ACCENT - {0x0041, 0x005A, propertyPVALID}, // LATIN CAPITAL LETTER A..LATIN CAPITAL LETTER Z - {0x0061, 0x007A, propertyPVALID}, // LATIN SMALL LETTER A..LATIN SMALL LETTER Z - {0x007B, 0x00B6, propertyDISALLOWED}, // LEFT CURLY BRACKET..PILCROW SIGN - {0x00B7, 0x0, propertyCONTEXTO}, // MIDDLE DOT - {0x00B8, 0x00DE, propertyDISALLOWED}, // CEDILLA..LATIN CAPITAL LETTER THORN - {0x00DF, 0x00F6, propertyPVALID}, // LATIN SMALL LETTER SHARP S..LATIN SMALL LETT - {0x00F7, 0x0, propertyDISALLOWED}, // DIVISION SIGN - {0x00F8, 0x00FF, propertyPVALID}, // LATIN SMALL LETTER O WITH STROKE..LATIN SMAL - {0x0100, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER A WITH MACRON - {0x0101, 0x0, propertyPVALID}, // LATIN SMALL LETTER A WITH MACRON - {0x0102, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER A WITH BREVE - {0x0103, 0x0, propertyPVALID}, // LATIN SMALL LETTER A WITH BREVE - {0x0104, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER A WITH OGONEK - {0x0105, 0x0, propertyPVALID}, // LATIN SMALL LETTER A WITH OGONEK - {0x0106, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER C WITH ACUTE - {0x0107, 0x0, propertyPVALID}, // LATIN SMALL LETTER C WITH ACUTE - {0x0108, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER C WITH CIRCUMFLEX - {0x0109, 0x0, propertyPVALID}, // LATIN SMALL LETTER C WITH CIRCUMFLEX - {0x010A, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER C WITH DOT ABOVE - {0x010B, 0x0, propertyPVALID}, // LATIN SMALL LETTER C WITH DOT ABOVE - {0x010C, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER C WITH CARON - {0x010D, 0x0, propertyPVALID}, // LATIN SMALL LETTER C WITH CARON - {0x010E, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER D WITH CARON - {0x010F, 0x0, propertyPVALID}, // LATIN SMALL LETTER D WITH CARON - {0x0110, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER D WITH STROKE - {0x0111, 0x0, propertyPVALID}, // LATIN SMALL LETTER D WITH STROKE - {0x0112, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER E WITH MACRON - {0x0113, 0x0, propertyPVALID}, // LATIN SMALL LETTER E WITH MACRON - {0x0114, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER E WITH BREVE - {0x0115, 0x0, propertyPVALID}, // LATIN SMALL LETTER E WITH BREVE - {0x0116, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER E WITH DOT ABOVE - {0x0117, 0x0, propertyPVALID}, // LATIN SMALL LETTER E WITH DOT ABOVE - {0x0118, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER E WITH OGONEK - {0x0119, 0x0, propertyPVALID}, // LATIN SMALL LETTER E WITH OGONEK - {0x011A, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER E WITH CARON - {0x011B, 0x0, propertyPVALID}, // LATIN SMALL LETTER E WITH CARON - {0x011C, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER G WITH CIRCUMFLEX - {0x011D, 0x0, propertyPVALID}, // LATIN SMALL LETTER G WITH CIRCUMFLEX - {0x011E, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER G WITH BREVE - {0x011F, 0x0, propertyPVALID}, // LATIN SMALL LETTER G WITH BREVE - {0x0120, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER G WITH DOT ABOVE - {0x0121, 0x0, propertyPVALID}, // LATIN SMALL LETTER G WITH DOT ABOVE - {0x0122, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER G WITH CEDILLA - {0x0123, 0x0, propertyPVALID}, // LATIN SMALL LETTER G WITH CEDILLA - {0x0124, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER H WITH CIRCUMFLEX - {0x0125, 0x0, propertyPVALID}, // LATIN SMALL LETTER H WITH CIRCUMFLEX - {0x0126, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER H WITH STROKE - {0x0127, 0x0, propertyPVALID}, // LATIN SMALL LETTER H WITH STROKE - {0x0128, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER I WITH TILDE - {0x0129, 0x0, propertyPVALID}, // LATIN SMALL LETTER I WITH TILDE - {0x012A, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER I WITH MACRON - {0x012B, 0x0, propertyPVALID}, // LATIN SMALL LETTER I WITH MACRON - {0x012C, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER I WITH BREVE - {0x012D, 0x0, propertyPVALID}, // LATIN SMALL LETTER I WITH BREVE - {0x012E, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER I WITH OGONEK - {0x012F, 0x0, propertyPVALID}, // LATIN SMALL LETTER I WITH OGONEK - {0x0130, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER I WITH DOT ABOVE - {0x0131, 0x0, propertyPVALID}, // LATIN SMALL LETTER DOTLESS I - {0x0132, 0x0134, propertyDISALLOWED}, // LATIN CAPITAL LIGATURE IJ..LATIN CAPITAL LET - {0x0135, 0x0, propertyPVALID}, // LATIN SMALL LETTER J WITH CIRCUMFLEX - {0x0136, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER K WITH CEDILLA - {0x0137, 0x0138, propertyPVALID}, // LATIN SMALL LETTER K WITH CEDILLA..LATIN SMA - {0x0139, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER L WITH ACUTE - {0x013A, 0x0, propertyPVALID}, // LATIN SMALL LETTER L WITH ACUTE - {0x013B, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER L WITH CEDILLA - {0x013C, 0x0, propertyPVALID}, // LATIN SMALL LETTER L WITH CEDILLA - {0x013D, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER L WITH CARON - {0x013E, 0x0, propertyPVALID}, // LATIN SMALL LETTER L WITH CARON - {0x013F, 0x0141, propertyDISALLOWED}, // LATIN CAPITAL LETTER L WITH MIDDLE DOT..LATI - {0x0142, 0x0, propertyPVALID}, // LATIN SMALL LETTER L WITH STROKE - {0x0143, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER N WITH ACUTE - {0x0144, 0x0, propertyPVALID}, // LATIN SMALL LETTER N WITH ACUTE - {0x0145, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER N WITH CEDILLA - {0x0146, 0x0, propertyPVALID}, // LATIN SMALL LETTER N WITH CEDILLA - {0x0147, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER N WITH CARON - {0x0148, 0x0, propertyPVALID}, // LATIN SMALL LETTER N WITH CARON - {0x0149, 0x014A, propertyDISALLOWED}, // LATIN SMALL LETTER N PRECEDED BY APOSTROPHE. - {0x014B, 0x0, propertyPVALID}, // LATIN SMALL LETTER ENG - {0x014C, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH MACRON - {0x014D, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH MACRON - {0x014E, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH BREVE - {0x014F, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH BREVE - {0x0150, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH DOUBLE ACUTE - {0x0151, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH DOUBLE ACUTE - {0x0152, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LIGATURE OE - {0x0153, 0x0, propertyPVALID}, // LATIN SMALL LIGATURE OE - {0x0154, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER R WITH ACUTE - {0x0155, 0x0, propertyPVALID}, // LATIN SMALL LETTER R WITH ACUTE - {0x0156, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER R WITH CEDILLA - {0x0157, 0x0, propertyPVALID}, // LATIN SMALL LETTER R WITH CEDILLA - {0x0158, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER R WITH CARON - {0x0159, 0x0, propertyPVALID}, // LATIN SMALL LETTER R WITH CARON - {0x015A, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER S WITH ACUTE - {0x015B, 0x0, propertyPVALID}, // LATIN SMALL LETTER S WITH ACUTE - {0x015C, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER S WITH CIRCUMFLEX - {0x015D, 0x0, propertyPVALID}, // LATIN SMALL LETTER S WITH CIRCUMFLEX - {0x015E, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER S WITH CEDILLA - {0x015F, 0x0, propertyPVALID}, // LATIN SMALL LETTER S WITH CEDILLA - {0x0160, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER S WITH CARON - {0x0161, 0x0, propertyPVALID}, // LATIN SMALL LETTER S WITH CARON - {0x0162, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER T WITH CEDILLA - {0x0163, 0x0, propertyPVALID}, // LATIN SMALL LETTER T WITH CEDILLA - {0x0164, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER T WITH CARON - {0x0165, 0x0, propertyPVALID}, // LATIN SMALL LETTER T WITH CARON - {0x0166, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER T WITH STROKE - {0x0167, 0x0, propertyPVALID}, // LATIN SMALL LETTER T WITH STROKE - {0x0168, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER U WITH TILDE - {0x0169, 0x0, propertyPVALID}, // LATIN SMALL LETTER U WITH TILDE - {0x016A, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER U WITH MACRON - {0x016B, 0x0, propertyPVALID}, // LATIN SMALL LETTER U WITH MACRON - {0x016C, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER U WITH BREVE - {0x016D, 0x0, propertyPVALID}, // LATIN SMALL LETTER U WITH BREVE - {0x016E, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER U WITH RING ABOVE - {0x016F, 0x0, propertyPVALID}, // LATIN SMALL LETTER U WITH RING ABOVE - {0x0170, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER U WITH DOUBLE ACUTE - {0x0171, 0x0, propertyPVALID}, // LATIN SMALL LETTER U WITH DOUBLE ACUTE - {0x0172, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER U WITH OGONEK - {0x0173, 0x0, propertyPVALID}, // LATIN SMALL LETTER U WITH OGONEK - {0x0174, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER W WITH CIRCUMFLEX - {0x0175, 0x0, propertyPVALID}, // LATIN SMALL LETTER W WITH CIRCUMFLEX - {0x0176, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER Y WITH CIRCUMFLEX - {0x0177, 0x0, propertyPVALID}, // LATIN SMALL LETTER Y WITH CIRCUMFLEX - {0x0178, 0x0179, propertyDISALLOWED}, // LATIN CAPITAL LETTER Y WITH DIAERESIS..LATIN - {0x017A, 0x0, propertyPVALID}, // LATIN SMALL LETTER Z WITH ACUTE - {0x017B, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER Z WITH DOT ABOVE - {0x017C, 0x0, propertyPVALID}, // LATIN SMALL LETTER Z WITH DOT ABOVE - {0x017D, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER Z WITH CARON - {0x017E, 0x0, propertyPVALID}, // LATIN SMALL LETTER Z WITH CARON - {0x017F, 0x0, propertyDISALLOWED}, // LATIN SMALL LETTER LONG S - {0x0180, 0x0, propertyPVALID}, // LATIN SMALL LETTER B WITH STROKE - {0x0181, 0x0182, propertyDISALLOWED}, // LATIN CAPITAL LETTER B WITH HOOK..LATIN CAPI - {0x0183, 0x0, propertyPVALID}, // LATIN SMALL LETTER B WITH TOPBAR - {0x0184, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER TONE SIX - {0x0185, 0x0, propertyPVALID}, // LATIN SMALL LETTER TONE SIX - {0x0186, 0x0187, propertyDISALLOWED}, // LATIN CAPITAL LETTER OPEN O..LATIN CAPITAL L - {0x0188, 0x0, propertyPVALID}, // LATIN SMALL LETTER C WITH HOOK - {0x0189, 0x018B, propertyDISALLOWED}, // LATIN CAPITAL LETTER AFRICAN D..LATIN CAPITA - {0x018C, 0x018D, propertyPVALID}, // LATIN SMALL LETTER D WITH TOPBAR..LATIN SMAL - {0x018E, 0x0191, propertyDISALLOWED}, // LATIN CAPITAL LETTER REVERSED E..LATIN CAPIT - {0x0192, 0x0, propertyPVALID}, // LATIN SMALL LETTER F WITH HOOK - {0x0193, 0x0194, propertyDISALLOWED}, // LATIN CAPITAL LETTER G WITH HOOK..LATIN CAPI - {0x0195, 0x0, propertyPVALID}, // LATIN SMALL LETTER HV - {0x0196, 0x0198, propertyDISALLOWED}, // LATIN CAPITAL LETTER IOTA..LATIN CAPITAL LET - {0x0199, 0x019B, propertyPVALID}, // LATIN SMALL LETTER K WITH HOOK..LATIN SMALL - {0x019C, 0x019D, propertyDISALLOWED}, // LATIN CAPITAL LETTER TURNED M..LATIN CAPITAL - {0x019E, 0x0, propertyPVALID}, // LATIN SMALL LETTER N WITH LONG RIGHT LEG - {0x019F, 0x01A0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH MIDDLE TILDE..LA - {0x01A1, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH HORN - {0x01A2, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER OI - {0x01A3, 0x0, propertyPVALID}, // LATIN SMALL LETTER OI - {0x01A4, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER P WITH HOOK - {0x01A5, 0x0, propertyPVALID}, // LATIN SMALL LETTER P WITH HOOK - {0x01A6, 0x01A7, propertyDISALLOWED}, // LATIN LETTER YR..LATIN CAPITAL LETTER TONE T - {0x01A8, 0x0, propertyPVALID}, // LATIN SMALL LETTER TONE TWO - {0x01A9, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER ESH - {0x01AA, 0x01AB, propertyPVALID}, // LATIN LETTER REVERSED ESH LOOP..LATIN SMALL - {0x01AC, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER T WITH HOOK - {0x01AD, 0x0, propertyPVALID}, // LATIN SMALL LETTER T WITH HOOK - {0x01AE, 0x01AF, propertyDISALLOWED}, // LATIN CAPITAL LETTER T WITH RETROFLEX HOOK.. - {0x01B0, 0x0, propertyPVALID}, // LATIN SMALL LETTER U WITH HORN - {0x01B1, 0x01B3, propertyDISALLOWED}, // LATIN CAPITAL LETTER UPSILON..LATIN CAPITAL - {0x01B4, 0x0, propertyPVALID}, // LATIN SMALL LETTER Y WITH HOOK - {0x01B5, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER Z WITH STROKE - {0x01B6, 0x0, propertyPVALID}, // LATIN SMALL LETTER Z WITH STROKE - {0x01B7, 0x01B8, propertyDISALLOWED}, // LATIN CAPITAL LETTER EZH..LATIN CAPITAL LETT - {0x01B9, 0x01BB, propertyPVALID}, // LATIN SMALL LETTER EZH REVERSED..LATIN LETTE - {0x01BC, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER TONE FIVE - {0x01BD, 0x01C3, propertyPVALID}, // LATIN SMALL LETTER TONE FIVE..LATIN LETTER R - {0x01C4, 0x01CD, propertyDISALLOWED}, // LATIN CAPITAL LETTER DZ WITH CARON..LATIN CA - {0x01CE, 0x0, propertyPVALID}, // LATIN SMALL LETTER A WITH CARON - {0x01CF, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER I WITH CARON - {0x01D0, 0x0, propertyPVALID}, // LATIN SMALL LETTER I WITH CARON - {0x01D1, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH CARON - {0x01D2, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH CARON - {0x01D3, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER U WITH CARON - {0x01D4, 0x0, propertyPVALID}, // LATIN SMALL LETTER U WITH CARON - {0x01D5, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER U WITH DIAERESIS AND MA - {0x01D6, 0x0, propertyPVALID}, // LATIN SMALL LETTER U WITH DIAERESIS AND MACR - {0x01D7, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER U WITH DIAERESIS AND AC - {0x01D8, 0x0, propertyPVALID}, // LATIN SMALL LETTER U WITH DIAERESIS AND ACUT - {0x01D9, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER U WITH DIAERESIS AND CA - {0x01DA, 0x0, propertyPVALID}, // LATIN SMALL LETTER U WITH DIAERESIS AND CARO - {0x01DB, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER U WITH DIAERESIS AND GR - {0x01DC, 0x01DD, propertyPVALID}, // LATIN SMALL LETTER U WITH DIAERESIS AND GRAV - {0x01DE, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER A WITH DIAERESIS AND MA - {0x01DF, 0x0, propertyPVALID}, // LATIN SMALL LETTER A WITH DIAERESIS AND MACR - {0x01E0, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER A WITH DOT ABOVE AND MA - {0x01E1, 0x0, propertyPVALID}, // LATIN SMALL LETTER A WITH DOT ABOVE AND MACR - {0x01E2, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER AE WITH MACRON - {0x01E3, 0x0, propertyPVALID}, // LATIN SMALL LETTER AE WITH MACRON - {0x01E4, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER G WITH STROKE - {0x01E5, 0x0, propertyPVALID}, // LATIN SMALL LETTER G WITH STROKE - {0x01E6, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER G WITH CARON - {0x01E7, 0x0, propertyPVALID}, // LATIN SMALL LETTER G WITH CARON - {0x01E8, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER K WITH CARON - {0x01E9, 0x0, propertyPVALID}, // LATIN SMALL LETTER K WITH CARON - {0x01EA, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH OGONEK - {0x01EB, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH OGONEK - {0x01EC, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH OGONEK AND MACRO - {0x01ED, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH OGONEK AND MACRON - {0x01EE, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER EZH WITH CARON - {0x01EF, 0x01F0, propertyPVALID}, // LATIN SMALL LETTER EZH WITH CARON..LATIN SMA - {0x01F1, 0x01F4, propertyDISALLOWED}, // LATIN CAPITAL LETTER DZ..LATIN CAPITAL LETTE - {0x01F5, 0x0, propertyPVALID}, // LATIN SMALL LETTER G WITH ACUTE - {0x01F6, 0x01F8, propertyDISALLOWED}, // LATIN CAPITAL LETTER HWAIR..LATIN CAPITAL LE - {0x01F9, 0x0, propertyPVALID}, // LATIN SMALL LETTER N WITH GRAVE - {0x01FA, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER A WITH RING ABOVE AND A - {0x01FB, 0x0, propertyPVALID}, // LATIN SMALL LETTER A WITH RING ABOVE AND ACU - {0x01FC, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER AE WITH ACUTE - {0x01FD, 0x0, propertyPVALID}, // LATIN SMALL LETTER AE WITH ACUTE - {0x01FE, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH STROKE AND ACUTE - {0x01FF, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH STROKE AND ACUTE - {0x0200, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER A WITH DOUBLE GRAVE - {0x0201, 0x0, propertyPVALID}, // LATIN SMALL LETTER A WITH DOUBLE GRAVE - {0x0202, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER A WITH INVERTED BREVE - {0x0203, 0x0, propertyPVALID}, // LATIN SMALL LETTER A WITH INVERTED BREVE - {0x0204, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER E WITH DOUBLE GRAVE - {0x0205, 0x0, propertyPVALID}, // LATIN SMALL LETTER E WITH DOUBLE GRAVE - {0x0206, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER E WITH INVERTED BREVE - {0x0207, 0x0, propertyPVALID}, // LATIN SMALL LETTER E WITH INVERTED BREVE - {0x0208, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER I WITH DOUBLE GRAVE - {0x0209, 0x0, propertyPVALID}, // LATIN SMALL LETTER I WITH DOUBLE GRAVE - {0x020A, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER I WITH INVERTED BREVE - {0x020B, 0x0, propertyPVALID}, // LATIN SMALL LETTER I WITH INVERTED BREVE - {0x020C, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH DOUBLE GRAVE - {0x020D, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH DOUBLE GRAVE - {0x020E, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH INVERTED BREVE - {0x020F, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH INVERTED BREVE - {0x0210, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER R WITH DOUBLE GRAVE - {0x0211, 0x0, propertyPVALID}, // LATIN SMALL LETTER R WITH DOUBLE GRAVE - {0x0212, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER R WITH INVERTED BREVE - {0x0213, 0x0, propertyPVALID}, // LATIN SMALL LETTER R WITH INVERTED BREVE - {0x0214, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER U WITH DOUBLE GRAVE - {0x0215, 0x0, propertyPVALID}, // LATIN SMALL LETTER U WITH DOUBLE GRAVE - {0x0216, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER U WITH INVERTED BREVE - {0x0217, 0x0, propertyPVALID}, // LATIN SMALL LETTER U WITH INVERTED BREVE - {0x0218, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER S WITH COMMA BELOW - {0x0219, 0x0, propertyPVALID}, // LATIN SMALL LETTER S WITH COMMA BELOW - {0x021A, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER T WITH COMMA BELOW - {0x021B, 0x0, propertyPVALID}, // LATIN SMALL LETTER T WITH COMMA BELOW - {0x021C, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER YOGH - {0x021D, 0x0, propertyPVALID}, // LATIN SMALL LETTER YOGH - {0x021E, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER H WITH CARON - {0x021F, 0x0, propertyPVALID}, // LATIN SMALL LETTER H WITH CARON - {0x0220, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER N WITH LONG RIGHT LEG - {0x0221, 0x0, propertyPVALID}, // LATIN SMALL LETTER D WITH CURL - {0x0222, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER OU - {0x0223, 0x0, propertyPVALID}, // LATIN SMALL LETTER OU - {0x0224, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER Z WITH HOOK - {0x0225, 0x0, propertyPVALID}, // LATIN SMALL LETTER Z WITH HOOK - {0x0226, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER A WITH DOT ABOVE - {0x0227, 0x0, propertyPVALID}, // LATIN SMALL LETTER A WITH DOT ABOVE - {0x0228, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER E WITH CEDILLA - {0x0229, 0x0, propertyPVALID}, // LATIN SMALL LETTER E WITH CEDILLA - {0x022A, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH DIAERESIS AND MA - {0x022B, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH DIAERESIS AND MACR - {0x022C, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH TILDE AND MACRON - {0x022D, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH TILDE AND MACRON - {0x022E, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH DOT ABOVE - {0x022F, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH DOT ABOVE - {0x0230, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH DOT ABOVE AND MA - {0x0231, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH DOT ABOVE AND MACR - {0x0232, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER Y WITH MACRON - {0x0233, 0x0239, propertyPVALID}, // LATIN SMALL LETTER Y WITH MACRON..LATIN SMAL - {0x023A, 0x023B, propertyDISALLOWED}, // LATIN CAPITAL LETTER A WITH STROKE..LATIN CA - {0x023C, 0x0, propertyPVALID}, // LATIN SMALL LETTER C WITH STROKE - {0x023D, 0x023E, propertyDISALLOWED}, // LATIN CAPITAL LETTER L WITH BAR..LATIN CAPIT - {0x023F, 0x0240, propertyPVALID}, // LATIN SMALL LETTER S WITH SWASH TAIL..LATIN - {0x0241, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER GLOTTAL STOP - {0x0242, 0x0, propertyPVALID}, // LATIN SMALL LETTER GLOTTAL STOP - {0x0243, 0x0246, propertyDISALLOWED}, // LATIN CAPITAL LETTER B WITH STROKE..LATIN CA - {0x0247, 0x0, propertyPVALID}, // LATIN SMALL LETTER E WITH STROKE - {0x0248, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER J WITH STROKE - {0x0249, 0x0, propertyPVALID}, // LATIN SMALL LETTER J WITH STROKE - {0x024A, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER SMALL Q WITH HOOK TAIL - {0x024B, 0x0, propertyPVALID}, // LATIN SMALL LETTER Q WITH HOOK TAIL - {0x024C, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER R WITH STROKE - {0x024D, 0x0, propertyPVALID}, // LATIN SMALL LETTER R WITH STROKE - {0x024E, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER Y WITH STROKE - {0x024F, 0x02AF, propertyPVALID}, // LATIN SMALL LETTER Y WITH STROKE..LATIN SMAL - {0x02B0, 0x02B8, propertyDISALLOWED}, // MODIFIER LETTER SMALL H..MODIFIER LETTER SMA - {0x02B9, 0x02C1, propertyPVALID}, // MODIFIER LETTER PRIME..MODIFIER LETTER REVER - {0x02C2, 0x02C5, propertyDISALLOWED}, // MODIFIER LETTER LEFT ARROWHEAD..MODIFIER LET - {0x02C6, 0x02D1, propertyPVALID}, // MODIFIER LETTER CIRCUMFLEX ACCENT..MODIFIER - {0x02D2, 0x02EB, propertyDISALLOWED}, // MODIFIER LETTER CENTRED RIGHT HALF RING..MOD - {0x02EC, 0x0, propertyPVALID}, // MODIFIER LETTER VOICING - {0x02ED, 0x0, propertyDISALLOWED}, // MODIFIER LETTER UNASPIRATED - {0x02EE, 0x0, propertyPVALID}, // MODIFIER LETTER DOUBLE APOSTROPHE - {0x02EF, 0x02FF, propertyDISALLOWED}, // MODIFIER LETTER LOW DOWN ARROWHEAD..MODIFIER - {0x0300, 0x033F, propertyPVALID}, // COMBINING GRAVE ACCENT..COMBINING DOUBLE OVE - {0x0340, 0x0341, propertyDISALLOWED}, // COMBINING GRAVE TONE MARK..COMBINING ACUTE T - {0x0342, 0x0, propertyPVALID}, // COMBINING GREEK PERISPOMENI - {0x0343, 0x0345, propertyDISALLOWED}, // COMBINING GREEK KORONIS..COMBINING GREEK YPO - {0x0346, 0x034E, propertyPVALID}, // COMBINING BRIDGE ABOVE..COMBINING UPWARDS AR - {0x034F, 0x0, propertyDISALLOWED}, // COMBINING GRAPHEME JOINER - {0x0350, 0x036F, propertyPVALID}, // COMBINING RIGHT ARROWHEAD ABOVE..COMBINING L - {0x0370, 0x0, propertyDISALLOWED}, // GREEK CAPITAL LETTER HETA - {0x0371, 0x0, propertyPVALID}, // GREEK SMALL LETTER HETA - {0x0372, 0x0, propertyDISALLOWED}, // GREEK CAPITAL LETTER ARCHAIC SAMPI - {0x0373, 0x0, propertyPVALID}, // GREEK SMALL LETTER ARCHAIC SAMPI - {0x0374, 0x0, propertyDISALLOWED}, // GREEK NUMERAL SIGN - {0x0375, 0x0, propertyCONTEXTO}, // GREEK LOWER NUMERAL SIGN - {0x0376, 0x0, propertyDISALLOWED}, // GREEK CAPITAL LETTER PAMPHYLIAN DIGAMMA - {0x0377, 0x0, propertyPVALID}, // GREEK SMALL LETTER PAMPHYLIAN DIGAMMA - {0x0378, 0x0379, propertyUNASSIGNED}, // .. - {0x037A, 0x0, propertyDISALLOWED}, // GREEK YPOGEGRAMMENI - {0x037B, 0x037D, propertyPVALID}, // GREEK SMALL REVERSED LUNATE SIGMA SYMBOL..GR - {0x037E, 0x0, propertyDISALLOWED}, // GREEK QUESTION MARK - {0x037F, 0x0383, propertyUNASSIGNED}, // .. - {0x0384, 0x038A, propertyDISALLOWED}, // GREEK TONOS..GREEK CAPITAL LETTER IOTA WITH - {0x038B, 0x0, propertyUNASSIGNED}, // - {0x038C, 0x0, propertyDISALLOWED}, // GREEK CAPITAL LETTER OMICRON WITH TONOS - {0x038D, 0x0, propertyUNASSIGNED}, // - {0x038E, 0x038F, propertyDISALLOWED}, // GREEK CAPITAL LETTER UPSILON WITH TONOS..GRE - {0x0390, 0x0, propertyPVALID}, // GREEK SMALL LETTER IOTA WITH DIALYTIKA AND T - {0x0391, 0x03A1, propertyDISALLOWED}, // GREEK CAPITAL LETTER ALPHA..GREEK CAPITAL LE - {0x03A2, 0x0, propertyUNASSIGNED}, // - {0x03A3, 0x03AB, propertyDISALLOWED}, // GREEK CAPITAL LETTER SIGMA..GREEK CAPITAL LE - {0x03AC, 0x03CE, propertyPVALID}, // GREEK SMALL LETTER ALPHA WITH TONOS..GREEK S - {0x03CF, 0x03D6, propertyDISALLOWED}, // GREEK CAPITAL KAI SYMBOL..GREEK PI SYMBOL - {0x03D7, 0x0, propertyPVALID}, // GREEK KAI SYMBOL - {0x03D8, 0x0, propertyDISALLOWED}, // GREEK LETTER ARCHAIC KOPPA - {0x03D9, 0x0, propertyPVALID}, // GREEK SMALL LETTER ARCHAIC KOPPA - {0x03DA, 0x0, propertyDISALLOWED}, // GREEK LETTER STIGMA - {0x03DB, 0x0, propertyPVALID}, // GREEK SMALL LETTER STIGMA - {0x03DC, 0x0, propertyDISALLOWED}, // GREEK LETTER DIGAMMA - {0x03DD, 0x0, propertyPVALID}, // GREEK SMALL LETTER DIGAMMA - {0x03DE, 0x0, propertyDISALLOWED}, // GREEK LETTER KOPPA - {0x03DF, 0x0, propertyPVALID}, // GREEK SMALL LETTER KOPPA - {0x03E0, 0x0, propertyDISALLOWED}, // GREEK LETTER SAMPI - {0x03E1, 0x0, propertyPVALID}, // GREEK SMALL LETTER SAMPI - {0x03E2, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER SHEI - {0x03E3, 0x0, propertyPVALID}, // COPTIC SMALL LETTER SHEI - {0x03E4, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER FEI - {0x03E5, 0x0, propertyPVALID}, // COPTIC SMALL LETTER FEI - {0x03E6, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER KHEI - {0x03E7, 0x0, propertyPVALID}, // COPTIC SMALL LETTER KHEI - {0x03E8, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER HORI - {0x03E9, 0x0, propertyPVALID}, // COPTIC SMALL LETTER HORI - {0x03EA, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER GANGIA - {0x03EB, 0x0, propertyPVALID}, // COPTIC SMALL LETTER GANGIA - {0x03EC, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER SHIMA - {0x03ED, 0x0, propertyPVALID}, // COPTIC SMALL LETTER SHIMA - {0x03EE, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER DEI - {0x03EF, 0x0, propertyPVALID}, // COPTIC SMALL LETTER DEI - {0x03F0, 0x03F2, propertyDISALLOWED}, // GREEK KAPPA SYMBOL..GREEK LUNATE SIGMA SYMBO - {0x03F3, 0x0, propertyPVALID}, // GREEK LETTER YOT - {0x03F4, 0x03F7, propertyDISALLOWED}, // GREEK CAPITAL THETA SYMBOL..GREEK CAPITAL LE - {0x03F8, 0x0, propertyPVALID}, // GREEK SMALL LETTER SHO - {0x03F9, 0x03FA, propertyDISALLOWED}, // GREEK CAPITAL LUNATE SIGMA SYMBOL..GREEK CAP - {0x03FB, 0x03FC, propertyPVALID}, // GREEK SMALL LETTER SAN..GREEK RHO WITH STROK - {0x03FD, 0x042F, propertyDISALLOWED}, // GREEK CAPITAL REVERSED LUNATE SIGMA SYMBOL.. - {0x0430, 0x045F, propertyPVALID}, // CYRILLIC SMALL LETTER A..CYRILLIC SMALL LETT - {0x0460, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER OMEGA - {0x0461, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER OMEGA - {0x0462, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER YAT - {0x0463, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER YAT - {0x0464, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER IOTIFIED E - {0x0465, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER IOTIFIED E - {0x0466, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER LITTLE YUS - {0x0467, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER LITTLE YUS - {0x0468, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER IOTIFIED LITTLE YUS - {0x0469, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER IOTIFIED LITTLE YUS - {0x046A, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER BIG YUS - {0x046B, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER BIG YUS - {0x046C, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER IOTIFIED BIG YUS - {0x046D, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER IOTIFIED BIG YUS - {0x046E, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER KSI - {0x046F, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER KSI - {0x0470, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER PSI - {0x0471, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER PSI - {0x0472, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER FITA - {0x0473, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER FITA - {0x0474, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER IZHITSA - {0x0475, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER IZHITSA - {0x0476, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER IZHITSA WITH DOUBLE - {0x0477, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER IZHITSA WITH DOUBLE GR - {0x0478, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER UK - {0x0479, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER UK - {0x047A, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER ROUND OMEGA - {0x047B, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER ROUND OMEGA - {0x047C, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER OMEGA WITH TITLO - {0x047D, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER OMEGA WITH TITLO - {0x047E, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER OT - {0x047F, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER OT - {0x0480, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER KOPPA - {0x0481, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER KOPPA - {0x0482, 0x0, propertyDISALLOWED}, // CYRILLIC THOUSANDS SIGN - {0x0483, 0x0487, propertyPVALID}, // COMBINING CYRILLIC TITLO..COMBINING CYRILLIC - {0x0488, 0x048A, propertyDISALLOWED}, // COMBINING CYRILLIC HUNDRED THOUSANDS SIGN..C - {0x048B, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER SHORT I WITH TAIL - {0x048C, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER SEMISOFT SIGN - {0x048D, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER SEMISOFT SIGN - {0x048E, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER ER WITH TICK - {0x048F, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER ER WITH TICK - {0x0490, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER GHE WITH UPTURN - {0x0491, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER GHE WITH UPTURN - {0x0492, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER GHE WITH STROKE - {0x0493, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER GHE WITH STROKE - {0x0494, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER GHE WITH MIDDLE HOOK - {0x0495, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER GHE WITH MIDDLE HOOK - {0x0496, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER ZHE WITH DESCENDER - {0x0497, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER ZHE WITH DESCENDER - {0x0498, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER ZE WITH DESCENDER - {0x0499, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER ZE WITH DESCENDER - {0x049A, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER KA WITH DESCENDER - {0x049B, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER KA WITH DESCENDER - {0x049C, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER KA WITH VERTICAL STR - {0x049D, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER KA WITH VERTICAL STROK - {0x049E, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER KA WITH STROKE - {0x049F, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER KA WITH STROKE - {0x04A0, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER BASHKIR KA - {0x04A1, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER BASHKIR KA - {0x04A2, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER EN WITH DESCENDER - {0x04A3, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER EN WITH DESCENDER - {0x04A4, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LIGATURE EN GHE - {0x04A5, 0x0, propertyPVALID}, // CYRILLIC SMALL LIGATURE EN GHE - {0x04A6, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER PE WITH MIDDLE HOOK - {0x04A7, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER PE WITH MIDDLE HOOK - {0x04A8, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER ABKHASIAN HA - {0x04A9, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER ABKHASIAN HA - {0x04AA, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER ES WITH DESCENDER - {0x04AB, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER ES WITH DESCENDER - {0x04AC, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER TE WITH DESCENDER - {0x04AD, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER TE WITH DESCENDER - {0x04AE, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER STRAIGHT U - {0x04AF, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER STRAIGHT U - {0x04B0, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER STRAIGHT U WITH STRO - {0x04B1, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER STRAIGHT U WITH STROKE - {0x04B2, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER HA WITH DESCENDER - {0x04B3, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER HA WITH DESCENDER - {0x04B4, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LIGATURE TE TSE - {0x04B5, 0x0, propertyPVALID}, // CYRILLIC SMALL LIGATURE TE TSE - {0x04B6, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER CHE WITH DESCENDER - {0x04B7, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER CHE WITH DESCENDER - {0x04B8, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER CHE WITH VERTICAL ST - {0x04B9, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER CHE WITH VERTICAL STRO - {0x04BA, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER SHHA - {0x04BB, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER SHHA - {0x04BC, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER ABKHASIAN CHE - {0x04BD, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER ABKHASIAN CHE - {0x04BE, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER ABKHASIAN CHE WITH D - {0x04BF, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER ABKHASIAN CHE WITH DES - {0x04C0, 0x04C1, propertyDISALLOWED}, // CYRILLIC LETTER PALOCHKA..CYRILLIC CAPITAL L - {0x04C2, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER ZHE WITH BREVE - {0x04C3, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER KA WITH HOOK - {0x04C4, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER KA WITH HOOK - {0x04C5, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER EL WITH TAIL - {0x04C6, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER EL WITH TAIL - {0x04C7, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER EN WITH HOOK - {0x04C8, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER EN WITH HOOK - {0x04C9, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER EN WITH TAIL - {0x04CA, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER EN WITH TAIL - {0x04CB, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER KHAKASSIAN CHE - {0x04CC, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER KHAKASSIAN CHE - {0x04CD, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER EM WITH TAIL - {0x04CE, 0x04CF, propertyPVALID}, // CYRILLIC SMALL LETTER EM WITH TAIL..CYRILLIC - {0x04D0, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER A WITH BREVE - {0x04D1, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER A WITH BREVE - {0x04D2, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER A WITH DIAERESIS - {0x04D3, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER A WITH DIAERESIS - {0x04D4, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LIGATURE A IE - {0x04D5, 0x0, propertyPVALID}, // CYRILLIC SMALL LIGATURE A IE - {0x04D6, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER IE WITH BREVE - {0x04D7, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER IE WITH BREVE - {0x04D8, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER SCHWA - {0x04D9, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER SCHWA - {0x04DA, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER SCHWA WITH DIAERESIS - {0x04DB, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER SCHWA WITH DIAERESIS - {0x04DC, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER ZHE WITH DIAERESIS - {0x04DD, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER ZHE WITH DIAERESIS - {0x04DE, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER ZE WITH DIAERESIS - {0x04DF, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER ZE WITH DIAERESIS - {0x04E0, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER ABKHASIAN DZE - {0x04E1, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER ABKHASIAN DZE - {0x04E2, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER I WITH MACRON - {0x04E3, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER I WITH MACRON - {0x04E4, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER I WITH DIAERESIS - {0x04E5, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER I WITH DIAERESIS - {0x04E6, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER O WITH DIAERESIS - {0x04E7, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER O WITH DIAERESIS - {0x04E8, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER BARRED O - {0x04E9, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER BARRED O - {0x04EA, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER BARRED O WITH DIAERE - {0x04EB, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER BARRED O WITH DIAERESI - {0x04EC, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER E WITH DIAERESIS - {0x04ED, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER E WITH DIAERESIS - {0x04EE, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER U WITH MACRON - {0x04EF, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER U WITH MACRON - {0x04F0, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER U WITH DIAERESIS - {0x04F1, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER U WITH DIAERESIS - {0x04F2, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER U WITH DOUBLE ACUTE - {0x04F3, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER U WITH DOUBLE ACUTE - {0x04F4, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER CHE WITH DIAERESIS - {0x04F5, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER CHE WITH DIAERESIS - {0x04F6, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER GHE WITH DESCENDER - {0x04F7, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER GHE WITH DESCENDER - {0x04F8, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER YERU WITH DIAERESIS - {0x04F9, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER YERU WITH DIAERESIS - {0x04FA, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER GHE WITH STROKE AND - {0x04FB, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER GHE WITH STROKE AND HO - {0x04FC, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER HA WITH HOOK - {0x04FD, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER HA WITH HOOK - {0x04FE, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER HA WITH STROKE - {0x04FF, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER HA WITH STROKE - {0x0500, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER KOMI DE - {0x0501, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER KOMI DE - {0x0502, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER KOMI DJE - {0x0503, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER KOMI DJE - {0x0504, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER KOMI ZJE - {0x0505, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER KOMI ZJE - {0x0506, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER KOMI DZJE - {0x0507, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER KOMI DZJE - {0x0508, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER KOMI LJE - {0x0509, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER KOMI LJE - {0x050A, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER KOMI NJE - {0x050B, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER KOMI NJE - {0x050C, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER KOMI SJE - {0x050D, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER KOMI SJE - {0x050E, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER KOMI TJE - {0x050F, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER KOMI TJE - {0x0510, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER REVERSED ZE - {0x0511, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER REVERSED ZE - {0x0512, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER EL WITH HOOK - {0x0513, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER EL WITH HOOK - {0x0514, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER LHA - {0x0515, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER LHA - {0x0516, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER RHA - {0x0517, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER RHA - {0x0518, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER YAE - {0x0519, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER YAE - {0x051A, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER QA - {0x051B, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER QA - {0x051C, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER WE - {0x051D, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER WE - {0x051E, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER ALEUT KA - {0x051F, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER ALEUT KA - {0x0520, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER EL WITH MIDDLE HOOK - {0x0521, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER EL WITH MIDDLE HOOK - {0x0522, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER EN WITH MIDDLE HOOK - {0x0523, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER EN WITH MIDDLE HOOK - {0x0524, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER PE WITH DESCENDER - {0x0525, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER PE WITH DESCENDER - {0x0526, 0x0530, propertyUNASSIGNED}, // .. - {0x0531, 0x0556, propertyDISALLOWED}, // ARMENIAN CAPITAL LETTER AYB..ARMENIAN CAPITA - {0x0557, 0x0558, propertyUNASSIGNED}, // .. - {0x0559, 0x0, propertyPVALID}, // ARMENIAN MODIFIER LETTER LEFT HALF RING - {0x055A, 0x055F, propertyDISALLOWED}, // ARMENIAN APOSTROPHE..ARMENIAN ABBREVIATION M - {0x0560, 0x0, propertyUNASSIGNED}, // - {0x0561, 0x0586, propertyPVALID}, // ARMENIAN SMALL LETTER AYB..ARMENIAN SMALL LE - {0x0587, 0x0, propertyDISALLOWED}, // ARMENIAN SMALL LIGATURE ECH YIWN - {0x0588, 0x0, propertyUNASSIGNED}, // - {0x0589, 0x058A, propertyDISALLOWED}, // ARMENIAN FULL STOP..ARMENIAN HYPHEN - {0x058B, 0x0590, propertyUNASSIGNED}, // .. - {0x0591, 0x05BD, propertyPVALID}, // HEBREW ACCENT ETNAHTA..HEBREW POINT METEG - {0x05BE, 0x0, propertyDISALLOWED}, // HEBREW PUNCTUATION MAQAF - {0x05BF, 0x0, propertyPVALID}, // HEBREW POINT RAFE - {0x05C0, 0x0, propertyDISALLOWED}, // HEBREW PUNCTUATION PASEQ - {0x05C1, 0x05C2, propertyPVALID}, // HEBREW POINT SHIN DOT..HEBREW POINT SIN DOT - {0x05C3, 0x0, propertyDISALLOWED}, // HEBREW PUNCTUATION SOF PASUQ - {0x05C4, 0x05C5, propertyPVALID}, // HEBREW MARK UPPER DOT..HEBREW MARK LOWER DOT - {0x05C6, 0x0, propertyDISALLOWED}, // HEBREW PUNCTUATION NUN HAFUKHA - {0x05C7, 0x0, propertyPVALID}, // HEBREW POINT QAMATS QATAN - {0x05C8, 0x05CF, propertyUNASSIGNED}, // .. - {0x05D0, 0x05EA, propertyPVALID}, // HEBREW LETTER ALEF..HEBREW LETTER TAV - {0x05EB, 0x05EF, propertyUNASSIGNED}, // .. - {0x05F0, 0x05F2, propertyPVALID}, // HEBREW LIGATURE YIDDISH DOUBLE VAV..HEBREW L - {0x05F3, 0x05F4, propertyCONTEXTO}, // HEBREW PUNCTUATION GERESH..HEBREW PUNCTUATIO - {0x05F5, 0x05FF, propertyUNASSIGNED}, // .. - {0x0600, 0x0603, propertyDISALLOWED}, // ARABIC NUMBER SIGN..ARABIC SIGN SAFHA - {0x0604, 0x0605, propertyUNASSIGNED}, // .. - {0x0606, 0x060F, propertyDISALLOWED}, // ARABIC-INDIC CUBE ROOT..ARABIC SIGN MISRA - {0x0610, 0x061A, propertyPVALID}, // ARABIC SIGN SALLALLAHOU ALAYHE WASSALLAM..AR - {0x061B, 0x0, propertyDISALLOWED}, // ARABIC SEMICOLON - {0x061C, 0x061D, propertyUNASSIGNED}, // .. - {0x061E, 0x061F, propertyDISALLOWED}, // ARABIC TRIPLE DOT PUNCTUATION MARK..ARABIC Q - {0x0620, 0x0, propertyUNASSIGNED}, // - {0x0621, 0x063F, propertyPVALID}, // ARABIC LETTER HAMZA..ARABIC LETTER FARSI YEH - {0x0640, 0x0, propertyDISALLOWED}, // ARABIC TATWEEL - {0x0641, 0x065E, propertyPVALID}, // ARABIC LETTER FEH..ARABIC FATHA WITH TWO DOT - {0x065F, 0x0, propertyUNASSIGNED}, // - {0x0660, 0x0669, propertyCONTEXTO}, // ARABIC-INDIC DIGIT ZERO..ARABIC-INDIC DIGIT - {0x066A, 0x066D, propertyDISALLOWED}, // ARABIC PERCENT SIGN..ARABIC FIVE POINTED STA - {0x066E, 0x0674, propertyPVALID}, // ARABIC LETTER DOTLESS BEH..ARABIC LETTER HIG - {0x0675, 0x0678, propertyDISALLOWED}, // ARABIC LETTER HIGH HAMZA ALEF..ARABIC LETTER - {0x0679, 0x06D3, propertyPVALID}, // ARABIC LETTER TTEH..ARABIC LETTER YEH BARREE - {0x06D4, 0x0, propertyDISALLOWED}, // ARABIC FULL STOP - {0x06D5, 0x06DC, propertyPVALID}, // ARABIC LETTER AE..ARABIC SMALL HIGH SEEN - {0x06DD, 0x06DE, propertyDISALLOWED}, // ARABIC END OF AYAH..ARABIC START OF RUB EL H - {0x06DF, 0x06E8, propertyPVALID}, // ARABIC SMALL HIGH ROUNDED ZERO..ARABIC SMALL - {0x06E9, 0x0, propertyDISALLOWED}, // ARABIC PLACE OF SAJDAH - {0x06EA, 0x06EF, propertyPVALID}, // ARABIC EMPTY CENTRE LOW STOP..ARABIC LETTER - {0x06F0, 0x06F9, propertyCONTEXTO}, // EXTENDED ARABIC-INDIC DIGIT ZERO..EXTENDED A - {0x06FA, 0x06FF, propertyPVALID}, // ARABIC LETTER SHEEN WITH DOT BELOW..ARABIC L - {0x0700, 0x070D, propertyDISALLOWED}, // SYRIAC END OF PARAGRAPH..SYRIAC HARKLEAN AST - {0x070E, 0x0, propertyUNASSIGNED}, // - {0x070F, 0x0, propertyDISALLOWED}, // SYRIAC ABBREVIATION MARK - {0x0710, 0x074A, propertyPVALID}, // SYRIAC LETTER ALAPH..SYRIAC BARREKH - {0x074B, 0x074C, propertyUNASSIGNED}, // .. - {0x074D, 0x07B1, propertyPVALID}, // SYRIAC LETTER SOGDIAN ZHAIN..THAANA LETTER N - {0x07B2, 0x07BF, propertyUNASSIGNED}, // .. - {0x07C0, 0x07F5, propertyPVALID}, // NKO DIGIT ZERO..NKO LOW TONE APOSTROPHE - {0x07F6, 0x07FA, propertyDISALLOWED}, // NKO SYMBOL OO DENNEN..NKO LAJANYALAN - {0x07FB, 0x07FF, propertyUNASSIGNED}, // .. - {0x0800, 0x082D, propertyPVALID}, // SAMARITAN LETTER ALAF..SAMARITAN MARK NEQUDA - {0x082E, 0x082F, propertyUNASSIGNED}, // .. - {0x0830, 0x083E, propertyDISALLOWED}, // SAMARITAN PUNCTUATION NEQUDAA..SAMARITAN PUN - {0x083F, 0x08FF, propertyUNASSIGNED}, // .. - {0x0900, 0x0939, propertyPVALID}, // DEVANAGARI SIGN INVERTED CANDRABINDU..DEVANA - {0x093A, 0x093B, propertyUNASSIGNED}, // .. - {0x093C, 0x094E, propertyPVALID}, // DEVANAGARI SIGN NUKTA..DEVANAGARI VOWEL SIGN - {0x094F, 0x0, propertyUNASSIGNED}, // - {0x0950, 0x0955, propertyPVALID}, // DEVANAGARI OM..DEVANAGARI VOWEL SIGN CANDRA - {0x0956, 0x0957, propertyUNASSIGNED}, // .. - {0x0958, 0x095F, propertyDISALLOWED}, // DEVANAGARI LETTER QA..DEVANAGARI LETTER YYA - {0x0960, 0x0963, propertyPVALID}, // DEVANAGARI LETTER VOCALIC RR..DEVANAGARI VOW - {0x0964, 0x0965, propertyDISALLOWED}, // DEVANAGARI DANDA..DEVANAGARI DOUBLE DANDA - {0x0966, 0x096F, propertyPVALID}, // DEVANAGARI DIGIT ZERO..DEVANAGARI DIGIT NINE - {0x0970, 0x0, propertyDISALLOWED}, // DEVANAGARI ABBREVIATION SIGN - {0x0971, 0x0972, propertyPVALID}, // DEVANAGARI SIGN HIGH SPACING DOT..DEVANAGARI - {0x0973, 0x0978, propertyUNASSIGNED}, // .. - {0x0979, 0x097F, propertyPVALID}, // DEVANAGARI LETTER ZHA..DEVANAGARI LETTER BBA - {0x0980, 0x0, propertyUNASSIGNED}, // - {0x0981, 0x0983, propertyPVALID}, // BENGALI SIGN CANDRABINDU..BENGALI SIGN VISAR - {0x0984, 0x0, propertyUNASSIGNED}, // - {0x0985, 0x098C, propertyPVALID}, // BENGALI LETTER A..BENGALI LETTER VOCALIC L - {0x098D, 0x098E, propertyUNASSIGNED}, // .. - {0x098F, 0x0990, propertyPVALID}, // BENGALI LETTER E..BENGALI LETTER AI - {0x0991, 0x0992, propertyUNASSIGNED}, // .. - {0x0993, 0x09A8, propertyPVALID}, // BENGALI LETTER O..BENGALI LETTER NA - {0x09A9, 0x0, propertyUNASSIGNED}, // - {0x09AA, 0x09B0, propertyPVALID}, // BENGALI LETTER PA..BENGALI LETTER RA - {0x09B1, 0x0, propertyUNASSIGNED}, // - {0x09B2, 0x0, propertyPVALID}, // BENGALI LETTER LA - {0x09B3, 0x09B5, propertyUNASSIGNED}, // .. - {0x09B6, 0x09B9, propertyPVALID}, // BENGALI LETTER SHA..BENGALI LETTER HA - {0x09BA, 0x09BB, propertyUNASSIGNED}, // .. - {0x09BC, 0x09C4, propertyPVALID}, // BENGALI SIGN NUKTA..BENGALI VOWEL SIGN VOCAL - {0x09C5, 0x09C6, propertyUNASSIGNED}, // .. - {0x09C7, 0x09C8, propertyPVALID}, // BENGALI VOWEL SIGN E..BENGALI VOWEL SIGN AI - {0x09C9, 0x09CA, propertyUNASSIGNED}, // .. - {0x09CB, 0x09CE, propertyPVALID}, // BENGALI VOWEL SIGN O..BENGALI LETTER KHANDA - {0x09CF, 0x09D6, propertyUNASSIGNED}, // .. - {0x09D7, 0x0, propertyPVALID}, // BENGALI AU LENGTH MARK - {0x09D8, 0x09DB, propertyUNASSIGNED}, // .. - {0x09DC, 0x09DD, propertyDISALLOWED}, // BENGALI LETTER RRA..BENGALI LETTER RHA - {0x09DE, 0x0, propertyUNASSIGNED}, // - {0x09DF, 0x0, propertyDISALLOWED}, // BENGALI LETTER YYA - {0x09E0, 0x09E3, propertyPVALID}, // BENGALI LETTER VOCALIC RR..BENGALI VOWEL SIG - {0x09E4, 0x09E5, propertyUNASSIGNED}, // .. - {0x09E6, 0x09F1, propertyPVALID}, // BENGALI DIGIT ZERO..BENGALI LETTER RA WITH L - {0x09F2, 0x09FB, propertyDISALLOWED}, // BENGALI RUPEE MARK..BENGALI GANDA MARK - {0x09FC, 0x0A00, propertyUNASSIGNED}, // .. - {0x0A01, 0x0A03, propertyPVALID}, // GURMUKHI SIGN ADAK BINDI..GURMUKHI SIGN VISA - {0x0A04, 0x0, propertyUNASSIGNED}, // - {0x0A05, 0x0A0A, propertyPVALID}, // GURMUKHI LETTER A..GURMUKHI LETTER UU - {0x0A0B, 0x0A0E, propertyUNASSIGNED}, // .. - {0x0A0F, 0x0A10, propertyPVALID}, // GURMUKHI LETTER EE..GURMUKHI LETTER AI - {0x0A11, 0x0A12, propertyUNASSIGNED}, // .. - {0x0A13, 0x0A28, propertyPVALID}, // GURMUKHI LETTER OO..GURMUKHI LETTER NA - {0x0A29, 0x0, propertyUNASSIGNED}, // - {0x0A2A, 0x0A30, propertyPVALID}, // GURMUKHI LETTER PA..GURMUKHI LETTER RA - {0x0A31, 0x0, propertyUNASSIGNED}, // - {0x0A32, 0x0, propertyPVALID}, // GURMUKHI LETTER LA - {0x0A33, 0x0, propertyDISALLOWED}, // GURMUKHI LETTER LLA - {0x0A34, 0x0, propertyUNASSIGNED}, // - {0x0A35, 0x0, propertyPVALID}, // GURMUKHI LETTER VA - {0x0A36, 0x0, propertyDISALLOWED}, // GURMUKHI LETTER SHA - {0x0A37, 0x0, propertyUNASSIGNED}, // - {0x0A38, 0x0A39, propertyPVALID}, // GURMUKHI LETTER SA..GURMUKHI LETTER HA - {0x0A3A, 0x0A3B, propertyUNASSIGNED}, // .. - {0x0A3C, 0x0, propertyPVALID}, // GURMUKHI SIGN NUKTA - {0x0A3D, 0x0, propertyUNASSIGNED}, // - {0x0A3E, 0x0A42, propertyPVALID}, // GURMUKHI VOWEL SIGN AA..GURMUKHI VOWEL SIGN - {0x0A43, 0x0A46, propertyUNASSIGNED}, // .. - {0x0A47, 0x0A48, propertyPVALID}, // GURMUKHI VOWEL SIGN EE..GURMUKHI VOWEL SIGN - {0x0A49, 0x0A4A, propertyUNASSIGNED}, // .. - {0x0A4B, 0x0A4D, propertyPVALID}, // GURMUKHI VOWEL SIGN OO..GURMUKHI SIGN VIRAMA - {0x0A4E, 0x0A50, propertyUNASSIGNED}, // .. - {0x0A51, 0x0, propertyPVALID}, // GURMUKHI SIGN UDAAT - {0x0A52, 0x0A58, propertyUNASSIGNED}, // .. - {0x0A59, 0x0A5B, propertyDISALLOWED}, // GURMUKHI LETTER KHHA..GURMUKHI LETTER ZA - {0x0A5C, 0x0, propertyPVALID}, // GURMUKHI LETTER RRA - {0x0A5D, 0x0, propertyUNASSIGNED}, // - {0x0A5E, 0x0, propertyDISALLOWED}, // GURMUKHI LETTER FA - {0x0A5F, 0x0A65, propertyUNASSIGNED}, // .. - {0x0A66, 0x0A75, propertyPVALID}, // GURMUKHI DIGIT ZERO..GURMUKHI SIGN YAKASH - {0x0A76, 0x0A80, propertyUNASSIGNED}, // .. - {0x0A81, 0x0A83, propertyPVALID}, // GUJARATI SIGN CANDRABINDU..GUJARATI SIGN VIS - {0x0A84, 0x0, propertyUNASSIGNED}, // - {0x0A85, 0x0A8D, propertyPVALID}, // GUJARATI LETTER A..GUJARATI VOWEL CANDRA E - {0x0A8E, 0x0, propertyUNASSIGNED}, // - {0x0A8F, 0x0A91, propertyPVALID}, // GUJARATI LETTER E..GUJARATI VOWEL CANDRA O - {0x0A92, 0x0, propertyUNASSIGNED}, // - {0x0A93, 0x0AA8, propertyPVALID}, // GUJARATI LETTER O..GUJARATI LETTER NA - {0x0AA9, 0x0, propertyUNASSIGNED}, // - {0x0AAA, 0x0AB0, propertyPVALID}, // GUJARATI LETTER PA..GUJARATI LETTER RA - {0x0AB1, 0x0, propertyUNASSIGNED}, // - {0x0AB2, 0x0AB3, propertyPVALID}, // GUJARATI LETTER LA..GUJARATI LETTER LLA - {0x0AB4, 0x0, propertyUNASSIGNED}, // - {0x0AB5, 0x0AB9, propertyPVALID}, // GUJARATI LETTER VA..GUJARATI LETTER HA - {0x0ABA, 0x0ABB, propertyUNASSIGNED}, // .. - {0x0ABC, 0x0AC5, propertyPVALID}, // GUJARATI SIGN NUKTA..GUJARATI VOWEL SIGN CAN - {0x0AC6, 0x0, propertyUNASSIGNED}, // - {0x0AC7, 0x0AC9, propertyPVALID}, // GUJARATI VOWEL SIGN E..GUJARATI VOWEL SIGN C - {0x0ACA, 0x0, propertyUNASSIGNED}, // - {0x0ACB, 0x0ACD, propertyPVALID}, // GUJARATI VOWEL SIGN O..GUJARATI SIGN VIRAMA - {0x0ACE, 0x0ACF, propertyUNASSIGNED}, // .. - {0x0AD0, 0x0, propertyPVALID}, // GUJARATI OM - {0x0AD1, 0x0ADF, propertyUNASSIGNED}, // .. - {0x0AE0, 0x0AE3, propertyPVALID}, // GUJARATI LETTER VOCALIC RR..GUJARATI VOWEL S - {0x0AE4, 0x0AE5, propertyUNASSIGNED}, // .. - {0x0AE6, 0x0AEF, propertyPVALID}, // GUJARATI DIGIT ZERO..GUJARATI DIGIT NINE - {0x0AF0, 0x0, propertyUNASSIGNED}, // - {0x0AF1, 0x0, propertyDISALLOWED}, // GUJARATI RUPEE SIGN - {0x0AF2, 0x0B00, propertyUNASSIGNED}, // .. - {0x0B01, 0x0B03, propertyPVALID}, // ORIYA SIGN CANDRABINDU..ORIYA SIGN VISARGA - {0x0B04, 0x0, propertyUNASSIGNED}, // - {0x0B05, 0x0B0C, propertyPVALID}, // ORIYA LETTER A..ORIYA LETTER VOCALIC L - {0x0B0D, 0x0B0E, propertyUNASSIGNED}, // .. - {0x0B0F, 0x0B10, propertyPVALID}, // ORIYA LETTER E..ORIYA LETTER AI - {0x0B11, 0x0B12, propertyUNASSIGNED}, // .. - {0x0B13, 0x0B28, propertyPVALID}, // ORIYA LETTER O..ORIYA LETTER NA - {0x0B29, 0x0, propertyUNASSIGNED}, // - {0x0B2A, 0x0B30, propertyPVALID}, // ORIYA LETTER PA..ORIYA LETTER RA - {0x0B31, 0x0, propertyUNASSIGNED}, // - {0x0B32, 0x0B33, propertyPVALID}, // ORIYA LETTER LA..ORIYA LETTER LLA - {0x0B34, 0x0, propertyUNASSIGNED}, // - {0x0B35, 0x0B39, propertyPVALID}, // ORIYA LETTER VA..ORIYA LETTER HA - {0x0B3A, 0x0B3B, propertyUNASSIGNED}, // .. - {0x0B3C, 0x0B44, propertyPVALID}, // ORIYA SIGN NUKTA..ORIYA VOWEL SIGN VOCALIC R - {0x0B45, 0x0B46, propertyUNASSIGNED}, // .. - {0x0B47, 0x0B48, propertyPVALID}, // ORIYA VOWEL SIGN E..ORIYA VOWEL SIGN AI - {0x0B49, 0x0B4A, propertyUNASSIGNED}, // .. - {0x0B4B, 0x0B4D, propertyPVALID}, // ORIYA VOWEL SIGN O..ORIYA SIGN VIRAMA - {0x0B4E, 0x0B55, propertyUNASSIGNED}, // .. - {0x0B56, 0x0B57, propertyPVALID}, // ORIYA AI LENGTH MARK..ORIYA AU LENGTH MARK - {0x0B58, 0x0B5B, propertyUNASSIGNED}, // .. - {0x0B5C, 0x0B5D, propertyDISALLOWED}, // ORIYA LETTER RRA..ORIYA LETTER RHA - {0x0B5E, 0x0, propertyUNASSIGNED}, // - {0x0B5F, 0x0B63, propertyPVALID}, // ORIYA LETTER YYA..ORIYA VOWEL SIGN VOCALIC L - {0x0B64, 0x0B65, propertyUNASSIGNED}, // .. - {0x0B66, 0x0B6F, propertyPVALID}, // ORIYA DIGIT ZERO..ORIYA DIGIT NINE - {0x0B70, 0x0, propertyDISALLOWED}, // ORIYA ISSHAR - {0x0B71, 0x0, propertyPVALID}, // ORIYA LETTER WA - {0x0B72, 0x0B81, propertyUNASSIGNED}, // .. - {0x0B82, 0x0B83, propertyPVALID}, // TAMIL SIGN ANUSVARA..TAMIL SIGN VISARGA - {0x0B84, 0x0, propertyUNASSIGNED}, // - {0x0B85, 0x0B8A, propertyPVALID}, // TAMIL LETTER A..TAMIL LETTER UU - {0x0B8B, 0x0B8D, propertyUNASSIGNED}, // .. - {0x0B8E, 0x0B90, propertyPVALID}, // TAMIL LETTER E..TAMIL LETTER AI - {0x0B91, 0x0, propertyUNASSIGNED}, // - {0x0B92, 0x0B95, propertyPVALID}, // TAMIL LETTER O..TAMIL LETTER KA - {0x0B96, 0x0B98, propertyUNASSIGNED}, // .. - {0x0B99, 0x0B9A, propertyPVALID}, // TAMIL LETTER NGA..TAMIL LETTER CA - {0x0B9B, 0x0, propertyUNASSIGNED}, // - {0x0B9C, 0x0, propertyPVALID}, // TAMIL LETTER JA - {0x0B9D, 0x0, propertyUNASSIGNED}, // - {0x0B9E, 0x0B9F, propertyPVALID}, // TAMIL LETTER NYA..TAMIL LETTER TTA - {0x0BA0, 0x0BA2, propertyUNASSIGNED}, // .. - {0x0BA3, 0x0BA4, propertyPVALID}, // TAMIL LETTER NNA..TAMIL LETTER TA - {0x0BA5, 0x0BA7, propertyUNASSIGNED}, // .. - {0x0BA8, 0x0BAA, propertyPVALID}, // TAMIL LETTER NA..TAMIL LETTER PA - {0x0BAB, 0x0BAD, propertyUNASSIGNED}, // .. - {0x0BAE, 0x0BB9, propertyPVALID}, // TAMIL LETTER MA..TAMIL LETTER HA - {0x0BBA, 0x0BBD, propertyUNASSIGNED}, // .. - {0x0BBE, 0x0BC2, propertyPVALID}, // TAMIL VOWEL SIGN AA..TAMIL VOWEL SIGN UU - {0x0BC3, 0x0BC5, propertyUNASSIGNED}, // .. - {0x0BC6, 0x0BC8, propertyPVALID}, // TAMIL VOWEL SIGN E..TAMIL VOWEL SIGN AI - {0x0BC9, 0x0, propertyUNASSIGNED}, // - {0x0BCA, 0x0BCD, propertyPVALID}, // TAMIL VOWEL SIGN O..TAMIL SIGN VIRAMA - {0x0BCE, 0x0BCF, propertyUNASSIGNED}, // .. - {0x0BD0, 0x0, propertyPVALID}, // TAMIL OM - {0x0BD1, 0x0BD6, propertyUNASSIGNED}, // .. - {0x0BD7, 0x0, propertyPVALID}, // TAMIL AU LENGTH MARK - {0x0BD8, 0x0BE5, propertyUNASSIGNED}, // .. - {0x0BE6, 0x0BEF, propertyPVALID}, // TAMIL DIGIT ZERO..TAMIL DIGIT NINE - {0x0BF0, 0x0BFA, propertyDISALLOWED}, // TAMIL NUMBER TEN..TAMIL NUMBER SIGN - {0x0BFB, 0x0C00, propertyUNASSIGNED}, // .. - {0x0C01, 0x0C03, propertyPVALID}, // TELUGU SIGN CANDRABINDU..TELUGU SIGN VISARGA - {0x0C04, 0x0, propertyUNASSIGNED}, // - {0x0C05, 0x0C0C, propertyPVALID}, // TELUGU LETTER A..TELUGU LETTER VOCALIC L - {0x0C0D, 0x0, propertyUNASSIGNED}, // - {0x0C0E, 0x0C10, propertyPVALID}, // TELUGU LETTER E..TELUGU LETTER AI - {0x0C11, 0x0, propertyUNASSIGNED}, // - {0x0C12, 0x0C28, propertyPVALID}, // TELUGU LETTER O..TELUGU LETTER NA - {0x0C29, 0x0, propertyUNASSIGNED}, // - {0x0C2A, 0x0C33, propertyPVALID}, // TELUGU LETTER PA..TELUGU LETTER LLA - {0x0C34, 0x0, propertyUNASSIGNED}, // - {0x0C35, 0x0C39, propertyPVALID}, // TELUGU LETTER VA..TELUGU LETTER HA - {0x0C3A, 0x0C3C, propertyUNASSIGNED}, // .. - {0x0C3D, 0x0C44, propertyPVALID}, // TELUGU SIGN AVAGRAHA..TELUGU VOWEL SIGN VOCA - {0x0C45, 0x0, propertyUNASSIGNED}, // - {0x0C46, 0x0C48, propertyPVALID}, // TELUGU VOWEL SIGN E..TELUGU VOWEL SIGN AI - {0x0C49, 0x0, propertyUNASSIGNED}, // - {0x0C4A, 0x0C4D, propertyPVALID}, // TELUGU VOWEL SIGN O..TELUGU SIGN VIRAMA - {0x0C4E, 0x0C54, propertyUNASSIGNED}, // .. - {0x0C55, 0x0C56, propertyPVALID}, // TELUGU LENGTH MARK..TELUGU AI LENGTH MARK - {0x0C57, 0x0, propertyUNASSIGNED}, // - {0x0C58, 0x0C59, propertyPVALID}, // TELUGU LETTER TSA..TELUGU LETTER DZA - {0x0C5A, 0x0C5F, propertyUNASSIGNED}, // .. - {0x0C60, 0x0C63, propertyPVALID}, // TELUGU LETTER VOCALIC RR..TELUGU VOWEL SIGN - {0x0C64, 0x0C65, propertyUNASSIGNED}, // .. - {0x0C66, 0x0C6F, propertyPVALID}, // TELUGU DIGIT ZERO..TELUGU DIGIT NINE - {0x0C70, 0x0C77, propertyUNASSIGNED}, // .. - {0x0C78, 0x0C7F, propertyDISALLOWED}, // TELUGU FRACTION DIGIT ZERO FOR ODD POWERS OF - {0x0C80, 0x0C81, propertyUNASSIGNED}, // .. - {0x0C82, 0x0C83, propertyPVALID}, // KANNADA SIGN ANUSVARA..KANNADA SIGN VISARGA - {0x0C84, 0x0, propertyUNASSIGNED}, // - {0x0C85, 0x0C8C, propertyPVALID}, // KANNADA LETTER A..KANNADA LETTER VOCALIC L - {0x0C8D, 0x0, propertyUNASSIGNED}, // - {0x0C8E, 0x0C90, propertyPVALID}, // KANNADA LETTER E..KANNADA LETTER AI - {0x0C91, 0x0, propertyUNASSIGNED}, // - {0x0C92, 0x0CA8, propertyPVALID}, // KANNADA LETTER O..KANNADA LETTER NA - {0x0CA9, 0x0, propertyUNASSIGNED}, // - {0x0CAA, 0x0CB3, propertyPVALID}, // KANNADA LETTER PA..KANNADA LETTER LLA - {0x0CB4, 0x0, propertyUNASSIGNED}, // - {0x0CB5, 0x0CB9, propertyPVALID}, // KANNADA LETTER VA..KANNADA LETTER HA - {0x0CBA, 0x0CBB, propertyUNASSIGNED}, // .. - {0x0CBC, 0x0CC4, propertyPVALID}, // KANNADA SIGN NUKTA..KANNADA VOWEL SIGN VOCAL - {0x0CC5, 0x0, propertyUNASSIGNED}, // - {0x0CC6, 0x0CC8, propertyPVALID}, // KANNADA VOWEL SIGN E..KANNADA VOWEL SIGN AI - {0x0CC9, 0x0, propertyUNASSIGNED}, // - {0x0CCA, 0x0CCD, propertyPVALID}, // KANNADA VOWEL SIGN O..KANNADA SIGN VIRAMA - {0x0CCE, 0x0CD4, propertyUNASSIGNED}, // .. - {0x0CD5, 0x0CD6, propertyPVALID}, // KANNADA LENGTH MARK..KANNADA AI LENGTH MARK - {0x0CD7, 0x0CDD, propertyUNASSIGNED}, // .. - {0x0CDE, 0x0, propertyPVALID}, // KANNADA LETTER FA - {0x0CDF, 0x0, propertyUNASSIGNED}, // - {0x0CE0, 0x0CE3, propertyPVALID}, // KANNADA LETTER VOCALIC RR..KANNADA VOWEL SIG - {0x0CE4, 0x0CE5, propertyUNASSIGNED}, // .. - {0x0CE6, 0x0CEF, propertyPVALID}, // KANNADA DIGIT ZERO..KANNADA DIGIT NINE - {0x0CF0, 0x0, propertyUNASSIGNED}, // - {0x0CF1, 0x0CF2, propertyDISALLOWED}, // KANNADA SIGN JIHVAMULIYA..KANNADA SIGN UPADH - {0x0CF3, 0x0D01, propertyUNASSIGNED}, // .. - {0x0D02, 0x0D03, propertyPVALID}, // MALAYALAM SIGN ANUSVARA..MALAYALAM SIGN VISA - {0x0D04, 0x0, propertyUNASSIGNED}, // - {0x0D05, 0x0D0C, propertyPVALID}, // MALAYALAM LETTER A..MALAYALAM LETTER VOCALIC - {0x0D0D, 0x0, propertyUNASSIGNED}, // - {0x0D0E, 0x0D10, propertyPVALID}, // MALAYALAM LETTER E..MALAYALAM LETTER AI - {0x0D11, 0x0, propertyUNASSIGNED}, // - {0x0D12, 0x0D28, propertyPVALID}, // MALAYALAM LETTER O..MALAYALAM LETTER NA - {0x0D29, 0x0, propertyUNASSIGNED}, // - {0x0D2A, 0x0D39, propertyPVALID}, // MALAYALAM LETTER PA..MALAYALAM LETTER HA - {0x0D3A, 0x0D3C, propertyUNASSIGNED}, // .. - {0x0D3D, 0x0D44, propertyPVALID}, // MALAYALAM SIGN AVAGRAHA..MALAYALAM VOWEL SIG - {0x0D45, 0x0, propertyUNASSIGNED}, // - {0x0D46, 0x0D48, propertyPVALID}, // MALAYALAM VOWEL SIGN E..MALAYALAM VOWEL SIGN - {0x0D49, 0x0, propertyUNASSIGNED}, // - {0x0D4A, 0x0D4D, propertyPVALID}, // MALAYALAM VOWEL SIGN O..MALAYALAM SIGN VIRAM - {0x0D4E, 0x0D56, propertyUNASSIGNED}, // .. - {0x0D57, 0x0, propertyPVALID}, // MALAYALAM AU LENGTH MARK - {0x0D58, 0x0D5F, propertyUNASSIGNED}, // .. - {0x0D60, 0x0D63, propertyPVALID}, // MALAYALAM LETTER VOCALIC RR..MALAYALAM VOWEL - {0x0D64, 0x0D65, propertyUNASSIGNED}, // .. - {0x0D66, 0x0D6F, propertyPVALID}, // MALAYALAM DIGIT ZERO..MALAYALAM DIGIT NINE - {0x0D70, 0x0D75, propertyDISALLOWED}, // MALAYALAM NUMBER TEN..MALAYALAM FRACTION THR - {0x0D76, 0x0D78, propertyUNASSIGNED}, // .. - {0x0D79, 0x0, propertyDISALLOWED}, // MALAYALAM DATE MARK - {0x0D7A, 0x0D7F, propertyPVALID}, // MALAYALAM LETTER CHILLU NN..MALAYALAM LETTER - {0x0D80, 0x0D81, propertyUNASSIGNED}, // .. - {0x0D82, 0x0D83, propertyPVALID}, // SINHALA SIGN ANUSVARAYA..SINHALA SIGN VISARG - {0x0D84, 0x0, propertyUNASSIGNED}, // - {0x0D85, 0x0D96, propertyPVALID}, // SINHALA LETTER AYANNA..SINHALA LETTER AUYANN - {0x0D97, 0x0D99, propertyUNASSIGNED}, // .. - {0x0D9A, 0x0DB1, propertyPVALID}, // SINHALA LETTER ALPAPRAANA KAYANNA..SINHALA L - {0x0DB2, 0x0, propertyUNASSIGNED}, // - {0x0DB3, 0x0DBB, propertyPVALID}, // SINHALA LETTER SANYAKA DAYANNA..SINHALA LETT - {0x0DBC, 0x0, propertyUNASSIGNED}, // - {0x0DBD, 0x0, propertyPVALID}, // SINHALA LETTER DANTAJA LAYANNA - {0x0DBE, 0x0DBF, propertyUNASSIGNED}, // .. - {0x0DC0, 0x0DC6, propertyPVALID}, // SINHALA LETTER VAYANNA..SINHALA LETTER FAYAN - {0x0DC7, 0x0DC9, propertyUNASSIGNED}, // .. - {0x0DCA, 0x0, propertyPVALID}, // SINHALA SIGN AL-LAKUNA - {0x0DCB, 0x0DCE, propertyUNASSIGNED}, // .. - {0x0DCF, 0x0DD4, propertyPVALID}, // SINHALA VOWEL SIGN AELA-PILLA..SINHALA VOWEL - {0x0DD5, 0x0, propertyUNASSIGNED}, // - {0x0DD6, 0x0, propertyPVALID}, // SINHALA VOWEL SIGN DIGA PAA-PILLA - {0x0DD7, 0x0, propertyUNASSIGNED}, // - {0x0DD8, 0x0DDF, propertyPVALID}, // SINHALA VOWEL SIGN GAETTA-PILLA..SINHALA VOW - {0x0DE0, 0x0DF1, propertyUNASSIGNED}, // .. - {0x0DF2, 0x0DF3, propertyPVALID}, // SINHALA VOWEL SIGN DIGA GAETTA-PILLA..SINHAL - {0x0DF4, 0x0, propertyDISALLOWED}, // SINHALA PUNCTUATION KUNDDALIYA - {0x0DF5, 0x0E00, propertyUNASSIGNED}, // .. - {0x0E01, 0x0E32, propertyPVALID}, // THAI CHARACTER KO KAI..THAI CHARACTER SARA A - {0x0E33, 0x0, propertyDISALLOWED}, // THAI CHARACTER SARA AM - {0x0E34, 0x0E3A, propertyPVALID}, // THAI CHARACTER SARA I..THAI CHARACTER PHINTH - {0x0E3B, 0x0E3E, propertyUNASSIGNED}, // .. - {0x0E3F, 0x0, propertyDISALLOWED}, // THAI CURRENCY SYMBOL BAHT - {0x0E40, 0x0E4E, propertyPVALID}, // THAI CHARACTER SARA E..THAI CHARACTER YAMAKK - {0x0E4F, 0x0, propertyDISALLOWED}, // THAI CHARACTER FONGMAN - {0x0E50, 0x0E59, propertyPVALID}, // THAI DIGIT ZERO..THAI DIGIT NINE - {0x0E5A, 0x0E5B, propertyDISALLOWED}, // THAI CHARACTER ANGKHANKHU..THAI CHARACTER KH - {0x0E5C, 0x0E80, propertyUNASSIGNED}, // .. - {0x0E81, 0x0E82, propertyPVALID}, // LAO LETTER KO..LAO LETTER KHO SUNG - {0x0E83, 0x0, propertyUNASSIGNED}, // - {0x0E84, 0x0, propertyPVALID}, // LAO LETTER KHO TAM - {0x0E85, 0x0E86, propertyUNASSIGNED}, // .. - {0x0E87, 0x0E88, propertyPVALID}, // LAO LETTER NGO..LAO LETTER CO - {0x0E89, 0x0, propertyUNASSIGNED}, // - {0x0E8A, 0x0, propertyPVALID}, // LAO LETTER SO TAM - {0x0E8B, 0x0E8C, propertyUNASSIGNED}, // .. - {0x0E8D, 0x0, propertyPVALID}, // LAO LETTER NYO - {0x0E8E, 0x0E93, propertyUNASSIGNED}, // .. - {0x0E94, 0x0E97, propertyPVALID}, // LAO LETTER DO..LAO LETTER THO TAM - {0x0E98, 0x0, propertyUNASSIGNED}, // - {0x0E99, 0x0E9F, propertyPVALID}, // LAO LETTER NO..LAO LETTER FO SUNG - {0x0EA0, 0x0, propertyUNASSIGNED}, // - {0x0EA1, 0x0EA3, propertyPVALID}, // LAO LETTER MO..LAO LETTER LO LING - {0x0EA4, 0x0, propertyUNASSIGNED}, // - {0x0EA5, 0x0, propertyPVALID}, // LAO LETTER LO LOOT - {0x0EA6, 0x0, propertyUNASSIGNED}, // - {0x0EA7, 0x0, propertyPVALID}, // LAO LETTER WO - {0x0EA8, 0x0EA9, propertyUNASSIGNED}, // .. - {0x0EAA, 0x0EAB, propertyPVALID}, // LAO LETTER SO SUNG..LAO LETTER HO SUNG - {0x0EAC, 0x0, propertyUNASSIGNED}, // - {0x0EAD, 0x0EB2, propertyPVALID}, // LAO LETTER O..LAO VOWEL SIGN AA - {0x0EB3, 0x0, propertyDISALLOWED}, // LAO VOWEL SIGN AM - {0x0EB4, 0x0EB9, propertyPVALID}, // LAO VOWEL SIGN I..LAO VOWEL SIGN UU - {0x0EBA, 0x0, propertyUNASSIGNED}, // - {0x0EBB, 0x0EBD, propertyPVALID}, // LAO VOWEL SIGN MAI KON..LAO SEMIVOWEL SIGN N - {0x0EBE, 0x0EBF, propertyUNASSIGNED}, // .. - {0x0EC0, 0x0EC4, propertyPVALID}, // LAO VOWEL SIGN E..LAO VOWEL SIGN AI - {0x0EC5, 0x0, propertyUNASSIGNED}, // - {0x0EC6, 0x0, propertyPVALID}, // LAO KO LA - {0x0EC7, 0x0, propertyUNASSIGNED}, // - {0x0EC8, 0x0ECD, propertyPVALID}, // LAO TONE MAI EK..LAO NIGGAHITA - {0x0ECE, 0x0ECF, propertyUNASSIGNED}, // .. - {0x0ED0, 0x0ED9, propertyPVALID}, // LAO DIGIT ZERO..LAO DIGIT NINE - {0x0EDA, 0x0EDB, propertyUNASSIGNED}, // .. - {0x0EDC, 0x0EDD, propertyDISALLOWED}, // LAO HO NO..LAO HO MO - {0x0EDE, 0x0EFF, propertyUNASSIGNED}, // .. - {0x0F00, 0x0, propertyPVALID}, // TIBETAN SYLLABLE OM - {0x0F01, 0x0F0A, propertyDISALLOWED}, // TIBETAN MARK GTER YIG MGO TRUNCATED A..TIBET - {0x0F0B, 0x0, propertyPVALID}, // TIBETAN MARK INTERSYLLABIC TSHEG - {0x0F0C, 0x0F17, propertyDISALLOWED}, // TIBETAN MARK DELIMITER TSHEG BSTAR..TIBETAN - {0x0F18, 0x0F19, propertyPVALID}, // TIBETAN ASTROLOGICAL SIGN -KHYUD PA..TIBETAN - {0x0F1A, 0x0F1F, propertyDISALLOWED}, // TIBETAN SIGN RDEL DKAR GCIG..TIBETAN SIGN RD - {0x0F20, 0x0F29, propertyPVALID}, // TIBETAN DIGIT ZERO..TIBETAN DIGIT NINE - {0x0F2A, 0x0F34, propertyDISALLOWED}, // TIBETAN DIGIT HALF ONE..TIBETAN MARK BSDUS R - {0x0F35, 0x0, propertyPVALID}, // TIBETAN MARK NGAS BZUNG NYI ZLA - {0x0F36, 0x0, propertyDISALLOWED}, // TIBETAN MARK CARET -DZUD RTAGS BZHI MIG CAN - {0x0F37, 0x0, propertyPVALID}, // TIBETAN MARK NGAS BZUNG SGOR RTAGS - {0x0F38, 0x0, propertyDISALLOWED}, // TIBETAN MARK CHE MGO - {0x0F39, 0x0, propertyPVALID}, // TIBETAN MARK TSA -PHRU - {0x0F3A, 0x0F3D, propertyDISALLOWED}, // TIBETAN MARK GUG RTAGS GYON..TIBETAN MARK AN - {0x0F3E, 0x0F42, propertyPVALID}, // TIBETAN SIGN YAR TSHES..TIBETAN LETTER GA - {0x0F43, 0x0, propertyDISALLOWED}, // TIBETAN LETTER GHA - {0x0F44, 0x0F47, propertyPVALID}, // TIBETAN LETTER NGA..TIBETAN LETTER JA - {0x0F48, 0x0, propertyUNASSIGNED}, // - {0x0F49, 0x0F4C, propertyPVALID}, // TIBETAN LETTER NYA..TIBETAN LETTER DDA - {0x0F4D, 0x0, propertyDISALLOWED}, // TIBETAN LETTER DDHA - {0x0F4E, 0x0F51, propertyPVALID}, // TIBETAN LETTER NNA..TIBETAN LETTER DA - {0x0F52, 0x0, propertyDISALLOWED}, // TIBETAN LETTER DHA - {0x0F53, 0x0F56, propertyPVALID}, // TIBETAN LETTER NA..TIBETAN LETTER BA - {0x0F57, 0x0, propertyDISALLOWED}, // TIBETAN LETTER BHA - {0x0F58, 0x0F5B, propertyPVALID}, // TIBETAN LETTER MA..TIBETAN LETTER DZA - {0x0F5C, 0x0, propertyDISALLOWED}, // TIBETAN LETTER DZHA - {0x0F5D, 0x0F68, propertyPVALID}, // TIBETAN LETTER WA..TIBETAN LETTER A - {0x0F69, 0x0, propertyDISALLOWED}, // TIBETAN LETTER KSSA - {0x0F6A, 0x0F6C, propertyPVALID}, // TIBETAN LETTER FIXED-FORM RA..TIBETAN LETTER - {0x0F6D, 0x0F70, propertyUNASSIGNED}, // .. - {0x0F71, 0x0F72, propertyPVALID}, // TIBETAN VOWEL SIGN AA..TIBETAN VOWEL SIGN I - {0x0F73, 0x0, propertyDISALLOWED}, // TIBETAN VOWEL SIGN II - {0x0F74, 0x0, propertyPVALID}, // TIBETAN VOWEL SIGN U - {0x0F75, 0x0F79, propertyDISALLOWED}, // TIBETAN VOWEL SIGN UU..TIBETAN VOWEL SIGN VO - {0x0F7A, 0x0F80, propertyPVALID}, // TIBETAN VOWEL SIGN E..TIBETAN VOWEL SIGN REV - {0x0F81, 0x0, propertyDISALLOWED}, // TIBETAN VOWEL SIGN REVERSED II - {0x0F82, 0x0F84, propertyPVALID}, // TIBETAN SIGN NYI ZLA NAA DA..TIBETAN MARK HA - {0x0F85, 0x0, propertyDISALLOWED}, // TIBETAN MARK PALUTA - {0x0F86, 0x0F8B, propertyPVALID}, // TIBETAN SIGN LCI RTAGS..TIBETAN SIGN GRU MED - {0x0F8C, 0x0F8F, propertyUNASSIGNED}, // .. - {0x0F90, 0x0F92, propertyPVALID}, // TIBETAN SUBJOINED LETTER KA..TIBETAN SUBJOIN - {0x0F93, 0x0, propertyDISALLOWED}, // TIBETAN SUBJOINED LETTER GHA - {0x0F94, 0x0F97, propertyPVALID}, // TIBETAN SUBJOINED LETTER NGA..TIBETAN SUBJOI - {0x0F98, 0x0, propertyUNASSIGNED}, // - {0x0F99, 0x0F9C, propertyPVALID}, // TIBETAN SUBJOINED LETTER NYA..TIBETAN SUBJOI - {0x0F9D, 0x0, propertyDISALLOWED}, // TIBETAN SUBJOINED LETTER DDHA - {0x0F9E, 0x0FA1, propertyPVALID}, // TIBETAN SUBJOINED LETTER NNA..TIBETAN SUBJOI - {0x0FA2, 0x0, propertyDISALLOWED}, // TIBETAN SUBJOINED LETTER DHA - {0x0FA3, 0x0FA6, propertyPVALID}, // TIBETAN SUBJOINED LETTER NA..TIBETAN SUBJOIN - {0x0FA7, 0x0, propertyDISALLOWED}, // TIBETAN SUBJOINED LETTER BHA - {0x0FA8, 0x0FAB, propertyPVALID}, // TIBETAN SUBJOINED LETTER MA..TIBETAN SUBJOIN - {0x0FAC, 0x0, propertyDISALLOWED}, // TIBETAN SUBJOINED LETTER DZHA - {0x0FAD, 0x0FB8, propertyPVALID}, // TIBETAN SUBJOINED LETTER WA..TIBETAN SUBJOIN - {0x0FB9, 0x0, propertyDISALLOWED}, // TIBETAN SUBJOINED LETTER KSSA - {0x0FBA, 0x0FBC, propertyPVALID}, // TIBETAN SUBJOINED LETTER FIXED-FORM WA..TIBE - {0x0FBD, 0x0, propertyUNASSIGNED}, // - {0x0FBE, 0x0FC5, propertyDISALLOWED}, // TIBETAN KU RU KHA..TIBETAN SYMBOL RDO RJE - {0x0FC6, 0x0, propertyPVALID}, // TIBETAN SYMBOL PADMA GDAN - {0x0FC7, 0x0FCC, propertyDISALLOWED}, // TIBETAN SYMBOL RDO RJE RGYA GRAM..TIBETAN SY - {0x0FCD, 0x0, propertyUNASSIGNED}, // - {0x0FCE, 0x0FD8, propertyDISALLOWED}, // TIBETAN SIGN RDEL NAG RDEL DKAR..LEFT-FACING - {0x0FD9, 0x0FFF, propertyUNASSIGNED}, // .. - {0x1000, 0x1049, propertyPVALID}, // MYANMAR LETTER KA..MYANMAR DIGIT NINE - {0x104A, 0x104F, propertyDISALLOWED}, // MYANMAR SIGN LITTLE SECTION..MYANMAR SYMBOL - {0x1050, 0x109D, propertyPVALID}, // MYANMAR LETTER SHA..MYANMAR VOWEL SIGN AITON - {0x109E, 0x10C5, propertyDISALLOWED}, // MYANMAR SYMBOL SHAN ONE..GEORGIAN CAPITAL LE - {0x10C6, 0x10CF, propertyUNASSIGNED}, // .. - {0x10D0, 0x10FA, propertyPVALID}, // GEORGIAN LETTER AN..GEORGIAN LETTER AIN - {0x10FB, 0x10FC, propertyDISALLOWED}, // GEORGIAN PARAGRAPH SEPARATOR..MODIFIER LETTE - {0x10FD, 0x10FF, propertyUNASSIGNED}, // .. - {0x1100, 0x11FF, propertyDISALLOWED}, // HANGUL CHOSEONG KIYEOK..HANGUL JONGSEONG SSA - {0x1200, 0x1248, propertyPVALID}, // ETHIOPIC SYLLABLE HA..ETHIOPIC SYLLABLE QWA - {0x1249, 0x0, propertyUNASSIGNED}, // - {0x124A, 0x124D, propertyPVALID}, // ETHIOPIC SYLLABLE QWI..ETHIOPIC SYLLABLE QWE - {0x124E, 0x124F, propertyUNASSIGNED}, // .. - {0x1250, 0x1256, propertyPVALID}, // ETHIOPIC SYLLABLE QHA..ETHIOPIC SYLLABLE QHO - {0x1257, 0x0, propertyUNASSIGNED}, // - {0x1258, 0x0, propertyPVALID}, // ETHIOPIC SYLLABLE QHWA - {0x1259, 0x0, propertyUNASSIGNED}, // - {0x125A, 0x125D, propertyPVALID}, // ETHIOPIC SYLLABLE QHWI..ETHIOPIC SYLLABLE QH - {0x125E, 0x125F, propertyUNASSIGNED}, // .. - {0x1260, 0x1288, propertyPVALID}, // ETHIOPIC SYLLABLE BA..ETHIOPIC SYLLABLE XWA - {0x1289, 0x0, propertyUNASSIGNED}, // - {0x128A, 0x128D, propertyPVALID}, // ETHIOPIC SYLLABLE XWI..ETHIOPIC SYLLABLE XWE - {0x128E, 0x128F, propertyUNASSIGNED}, // .. - {0x1290, 0x12B0, propertyPVALID}, // ETHIOPIC SYLLABLE NA..ETHIOPIC SYLLABLE KWA - {0x12B1, 0x0, propertyUNASSIGNED}, // - {0x12B2, 0x12B5, propertyPVALID}, // ETHIOPIC SYLLABLE KWI..ETHIOPIC SYLLABLE KWE - {0x12B6, 0x12B7, propertyUNASSIGNED}, // .. - {0x12B8, 0x12BE, propertyPVALID}, // ETHIOPIC SYLLABLE KXA..ETHIOPIC SYLLABLE KXO - {0x12BF, 0x0, propertyUNASSIGNED}, // - {0x12C0, 0x0, propertyPVALID}, // ETHIOPIC SYLLABLE KXWA - {0x12C1, 0x0, propertyUNASSIGNED}, // - {0x12C2, 0x12C5, propertyPVALID}, // ETHIOPIC SYLLABLE KXWI..ETHIOPIC SYLLABLE KX - {0x12C6, 0x12C7, propertyUNASSIGNED}, // .. - {0x12C8, 0x12D6, propertyPVALID}, // ETHIOPIC SYLLABLE WA..ETHIOPIC SYLLABLE PHAR - {0x12D7, 0x0, propertyUNASSIGNED}, // - {0x12D8, 0x1310, propertyPVALID}, // ETHIOPIC SYLLABLE ZA..ETHIOPIC SYLLABLE GWA - {0x1311, 0x0, propertyUNASSIGNED}, // - {0x1312, 0x1315, propertyPVALID}, // ETHIOPIC SYLLABLE GWI..ETHIOPIC SYLLABLE GWE - {0x1316, 0x1317, propertyUNASSIGNED}, // .. - {0x1318, 0x135A, propertyPVALID}, // ETHIOPIC SYLLABLE GGA..ETHIOPIC SYLLABLE FYA - {0x135B, 0x135E, propertyUNASSIGNED}, // .. - {0x135F, 0x0, propertyPVALID}, // ETHIOPIC COMBINING GEMINATION MARK - {0x1360, 0x137C, propertyDISALLOWED}, // ETHIOPIC SECTION MARK..ETHIOPIC NUMBER TEN T - {0x137D, 0x137F, propertyUNASSIGNED}, // .. - {0x1380, 0x138F, propertyPVALID}, // ETHIOPIC SYLLABLE SEBATBEIT MWA..ETHIOPIC SY - {0x1390, 0x1399, propertyDISALLOWED}, // ETHIOPIC TONAL MARK YIZET..ETHIOPIC TONAL MA - {0x139A, 0x139F, propertyUNASSIGNED}, // .. - {0x13A0, 0x13F4, propertyPVALID}, // CHEROKEE LETTER A..CHEROKEE LETTER YV - {0x13F5, 0x13FF, propertyUNASSIGNED}, // .. - {0x1400, 0x0, propertyDISALLOWED}, // CANADIAN SYLLABICS HYPHEN - {0x1401, 0x166C, propertyPVALID}, // CANADIAN SYLLABICS E..CANADIAN SYLLABICS CAR - {0x166D, 0x166E, propertyDISALLOWED}, // CANADIAN SYLLABICS CHI SIGN..CANADIAN SYLLAB - {0x166F, 0x167F, propertyPVALID}, // CANADIAN SYLLABICS QAI..CANADIAN SYLLABICS B - {0x1680, 0x0, propertyDISALLOWED}, // OGHAM SPACE MARK - {0x1681, 0x169A, propertyPVALID}, // OGHAM LETTER BEITH..OGHAM LETTER PEITH - {0x169B, 0x169C, propertyDISALLOWED}, // OGHAM FEATHER MARK..OGHAM REVERSED FEATHER M - {0x169D, 0x169F, propertyUNASSIGNED}, // .. - {0x16A0, 0x16EA, propertyPVALID}, // RUNIC LETTER FEHU FEOH FE F..RUNIC LETTER X - {0x16EB, 0x16F0, propertyDISALLOWED}, // RUNIC SINGLE PUNCTUATION..RUNIC BELGTHOR SYM - {0x16F1, 0x16FF, propertyUNASSIGNED}, // .. - {0x1700, 0x170C, propertyPVALID}, // TAGALOG LETTER A..TAGALOG LETTER YA - {0x170D, 0x0, propertyUNASSIGNED}, // - {0x170E, 0x1714, propertyPVALID}, // TAGALOG LETTER LA..TAGALOG SIGN VIRAMA - {0x1715, 0x171F, propertyUNASSIGNED}, // .. - {0x1720, 0x1734, propertyPVALID}, // HANUNOO LETTER A..HANUNOO SIGN PAMUDPOD - {0x1735, 0x1736, propertyDISALLOWED}, // PHILIPPINE SINGLE PUNCTUATION..PHILIPPINE DO - {0x1737, 0x173F, propertyUNASSIGNED}, // .. - {0x1740, 0x1753, propertyPVALID}, // BUHID LETTER A..BUHID VOWEL SIGN U - {0x1754, 0x175F, propertyUNASSIGNED}, // .. - {0x1760, 0x176C, propertyPVALID}, // TAGBANWA LETTER A..TAGBANWA LETTER YA - {0x176D, 0x0, propertyUNASSIGNED}, // - {0x176E, 0x1770, propertyPVALID}, // TAGBANWA LETTER LA..TAGBANWA LETTER SA - {0x1771, 0x0, propertyUNASSIGNED}, // - {0x1772, 0x1773, propertyPVALID}, // TAGBANWA VOWEL SIGN I..TAGBANWA VOWEL SIGN U - {0x1774, 0x177F, propertyUNASSIGNED}, // .. - {0x1780, 0x17B3, propertyPVALID}, // KHMER LETTER KA..KHMER INDEPENDENT VOWEL QAU - {0x17B4, 0x17B5, propertyDISALLOWED}, // KHMER VOWEL INHERENT AQ..KHMER VOWEL INHEREN - {0x17B6, 0x17D3, propertyPVALID}, // KHMER VOWEL SIGN AA..KHMER SIGN BATHAMASAT - {0x17D4, 0x17D6, propertyDISALLOWED}, // KHMER SIGN KHAN..KHMER SIGN CAMNUC PII KUUH - {0x17D7, 0x0, propertyPVALID}, // KHMER SIGN LEK TOO - {0x17D8, 0x17DB, propertyDISALLOWED}, // KHMER SIGN BEYYAL..KHMER CURRENCY SYMBOL RIE - {0x17DC, 0x17DD, propertyPVALID}, // KHMER SIGN AVAKRAHASANYA..KHMER SIGN ATTHACA - {0x17DE, 0x17DF, propertyUNASSIGNED}, // .. - {0x17E0, 0x17E9, propertyPVALID}, // KHMER DIGIT ZERO..KHMER DIGIT NINE - {0x17EA, 0x17EF, propertyUNASSIGNED}, // .. - {0x17F0, 0x17F9, propertyDISALLOWED}, // KHMER SYMBOL LEK ATTAK SON..KHMER SYMBOL LEK - {0x17FA, 0x17FF, propertyUNASSIGNED}, // .. - {0x1800, 0x180E, propertyDISALLOWED}, // MONGOLIAN BIRGA..MONGOLIAN VOWEL SEPARATOR - {0x180F, 0x0, propertyUNASSIGNED}, // - {0x1810, 0x1819, propertyPVALID}, // MONGOLIAN DIGIT ZERO..MONGOLIAN DIGIT NINE - {0x181A, 0x181F, propertyUNASSIGNED}, // .. - {0x1820, 0x1877, propertyPVALID}, // MONGOLIAN LETTER A..MONGOLIAN LETTER MANCHU - {0x1878, 0x187F, propertyUNASSIGNED}, // .. - {0x1880, 0x18AA, propertyPVALID}, // MONGOLIAN LETTER ALI GALI ANUSVARA ONE..MONG - {0x18AB, 0x18AF, propertyUNASSIGNED}, // .. - {0x18B0, 0x18F5, propertyPVALID}, // CANADIAN SYLLABICS OY..CANADIAN SYLLABICS CA - {0x18F6, 0x18FF, propertyUNASSIGNED}, // .. - {0x1900, 0x191C, propertyPVALID}, // LIMBU VOWEL-CARRIER LETTER..LIMBU LETTER HA - {0x191D, 0x191F, propertyUNASSIGNED}, // .. - {0x1920, 0x192B, propertyPVALID}, // LIMBU VOWEL SIGN A..LIMBU SUBJOINED LETTER W - {0x192C, 0x192F, propertyUNASSIGNED}, // .. - {0x1930, 0x193B, propertyPVALID}, // LIMBU SMALL LETTER KA..LIMBU SIGN SA-I - {0x193C, 0x193F, propertyUNASSIGNED}, // .. - {0x1940, 0x0, propertyDISALLOWED}, // LIMBU SIGN LOO - {0x1941, 0x1943, propertyUNASSIGNED}, // .. - {0x1944, 0x1945, propertyDISALLOWED}, // LIMBU EXCLAMATION MARK..LIMBU QUESTION MARK - {0x1946, 0x196D, propertyPVALID}, // LIMBU DIGIT ZERO..TAI LE LETTER AI - {0x196E, 0x196F, propertyUNASSIGNED}, // .. - {0x1970, 0x1974, propertyPVALID}, // TAI LE LETTER TONE-2..TAI LE LETTER TONE-6 - {0x1975, 0x197F, propertyUNASSIGNED}, // .. - {0x1980, 0x19AB, propertyPVALID}, // NEW TAI LUE LETTER HIGH QA..NEW TAI LUE LETT - {0x19AC, 0x19AF, propertyUNASSIGNED}, // .. - {0x19B0, 0x19C9, propertyPVALID}, // NEW TAI LUE VOWEL SIGN VOWEL SHORTENER..NEW - {0x19CA, 0x19CF, propertyUNASSIGNED}, // .. - {0x19D0, 0x19DA, propertyPVALID}, // NEW TAI LUE DIGIT ZERO..NEW TAI LUE THAM DIG - {0x19DB, 0x19DD, propertyUNASSIGNED}, // .. - {0x19DE, 0x19FF, propertyDISALLOWED}, // NEW TAI LUE SIGN LAE..KHMER SYMBOL DAP-PRAM - {0x1A00, 0x1A1B, propertyPVALID}, // BUGINESE LETTER KA..BUGINESE VOWEL SIGN AE - {0x1A1C, 0x1A1D, propertyUNASSIGNED}, // .. - {0x1A1E, 0x1A1F, propertyDISALLOWED}, // BUGINESE PALLAWA..BUGINESE END OF SECTION - {0x1A20, 0x1A5E, propertyPVALID}, // TAI THAM LETTER HIGH KA..TAI THAM CONSONANT - {0x1A5F, 0x0, propertyUNASSIGNED}, // - {0x1A60, 0x1A7C, propertyPVALID}, // TAI THAM SIGN SAKOT..TAI THAM SIGN KHUEN-LUE - {0x1A7D, 0x1A7E, propertyUNASSIGNED}, // .. - {0x1A7F, 0x1A89, propertyPVALID}, // TAI THAM COMBINING CRYPTOGRAMMIC DOT..TAI TH - {0x1A8A, 0x1A8F, propertyUNASSIGNED}, // .. - {0x1A90, 0x1A99, propertyPVALID}, // TAI THAM THAM DIGIT ZERO..TAI THAM THAM DIGI - {0x1A9A, 0x1A9F, propertyUNASSIGNED}, // .. - {0x1AA0, 0x1AA6, propertyDISALLOWED}, // TAI THAM SIGN WIANG..TAI THAM SIGN REVERSED - {0x1AA7, 0x0, propertyPVALID}, // TAI THAM SIGN MAI YAMOK - {0x1AA8, 0x1AAD, propertyDISALLOWED}, // TAI THAM SIGN KAAN..TAI THAM SIGN CAANG - {0x1AAE, 0x1AFF, propertyUNASSIGNED}, // .. - {0x1B00, 0x1B4B, propertyPVALID}, // BALINESE SIGN ULU RICEM..BALINESE LETTER ASY - {0x1B4C, 0x1B4F, propertyUNASSIGNED}, // .. - {0x1B50, 0x1B59, propertyPVALID}, // BALINESE DIGIT ZERO..BALINESE DIGIT NINE - {0x1B5A, 0x1B6A, propertyDISALLOWED}, // BALINESE PANTI..BALINESE MUSICAL SYMBOL DANG - {0x1B6B, 0x1B73, propertyPVALID}, // BALINESE MUSICAL SYMBOL COMBINING TEGEH..BAL - {0x1B74, 0x1B7C, propertyDISALLOWED}, // BALINESE MUSICAL SYMBOL RIGHT-HAND OPEN DUG. - {0x1B7D, 0x1B7F, propertyUNASSIGNED}, // .. - {0x1B80, 0x1BAA, propertyPVALID}, // SUNDANESE SIGN PANYECEK..SUNDANESE SIGN PAMA - {0x1BAB, 0x1BAD, propertyUNASSIGNED}, // .. - {0x1BAE, 0x1BB9, propertyPVALID}, // SUNDANESE LETTER KHA..SUNDANESE DIGIT NINE - {0x1BBA, 0x1BFF, propertyUNASSIGNED}, // .. - {0x1C00, 0x1C37, propertyPVALID}, // LEPCHA LETTER KA..LEPCHA SIGN NUKTA - {0x1C38, 0x1C3A, propertyUNASSIGNED}, // .. - {0x1C3B, 0x1C3F, propertyDISALLOWED}, // LEPCHA PUNCTUATION TA-ROL..LEPCHA PUNCTUATIO - {0x1C40, 0x1C49, propertyPVALID}, // LEPCHA DIGIT ZERO..LEPCHA DIGIT NINE - {0x1C4A, 0x1C4C, propertyUNASSIGNED}, // .. - {0x1C4D, 0x1C7D, propertyPVALID}, // LEPCHA LETTER TTA..OL CHIKI AHAD - {0x1C7E, 0x1C7F, propertyDISALLOWED}, // OL CHIKI PUNCTUATION MUCAAD..OL CHIKI PUNCTU - {0x1C80, 0x1CCF, propertyUNASSIGNED}, // .. - {0x1CD0, 0x1CD2, propertyPVALID}, // VEDIC TONE KARSHANA..VEDIC TONE PRENKHA - {0x1CD3, 0x0, propertyDISALLOWED}, // VEDIC SIGN NIHSHVASA - {0x1CD4, 0x1CF2, propertyPVALID}, // VEDIC SIGN YAJURVEDIC MIDLINE SVARITA..VEDIC - {0x1CF3, 0x1CFF, propertyUNASSIGNED}, // .. - {0x1D00, 0x1D2B, propertyPVALID}, // LATIN LETTER SMALL CAPITAL A..CYRILLIC LETTE - {0x1D2C, 0x1D2E, propertyDISALLOWED}, // MODIFIER LETTER CAPITAL A..MODIFIER LETTER C - {0x1D2F, 0x0, propertyPVALID}, // MODIFIER LETTER CAPITAL BARRED B - {0x1D30, 0x1D3A, propertyDISALLOWED}, // MODIFIER LETTER CAPITAL D..MODIFIER LETTER C - {0x1D3B, 0x0, propertyPVALID}, // MODIFIER LETTER CAPITAL REVERSED N - {0x1D3C, 0x1D4D, propertyDISALLOWED}, // MODIFIER LETTER CAPITAL O..MODIFIER LETTER S - {0x1D4E, 0x0, propertyPVALID}, // MODIFIER LETTER SMALL TURNED I - {0x1D4F, 0x1D6A, propertyDISALLOWED}, // MODIFIER LETTER SMALL K..GREEK SUBSCRIPT SMA - {0x1D6B, 0x1D77, propertyPVALID}, // LATIN SMALL LETTER UE..LATIN SMALL LETTER TU - {0x1D78, 0x0, propertyDISALLOWED}, // MODIFIER LETTER CYRILLIC EN - {0x1D79, 0x1D9A, propertyPVALID}, // LATIN SMALL LETTER INSULAR G..LATIN SMALL LE - {0x1D9B, 0x1DBF, propertyDISALLOWED}, // MODIFIER LETTER SMALL TURNED ALPHA..MODIFIER - {0x1DC0, 0x1DE6, propertyPVALID}, // COMBINING DOTTED GRAVE ACCENT..COMBINING LAT - {0x1DE7, 0x1DFC, propertyUNASSIGNED}, // .. - {0x1DFD, 0x1DFF, propertyPVALID}, // COMBINING ALMOST EQUAL TO BELOW..COMBINING R - {0x1E00, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER A WITH RING BELOW - {0x1E01, 0x0, propertyPVALID}, // LATIN SMALL LETTER A WITH RING BELOW - {0x1E02, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER B WITH DOT ABOVE - {0x1E03, 0x0, propertyPVALID}, // LATIN SMALL LETTER B WITH DOT ABOVE - {0x1E04, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER B WITH DOT BELOW - {0x1E05, 0x0, propertyPVALID}, // LATIN SMALL LETTER B WITH DOT BELOW - {0x1E06, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER B WITH LINE BELOW - {0x1E07, 0x0, propertyPVALID}, // LATIN SMALL LETTER B WITH LINE BELOW - {0x1E08, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER C WITH CEDILLA AND ACUT - {0x1E09, 0x0, propertyPVALID}, // LATIN SMALL LETTER C WITH CEDILLA AND ACUTE - {0x1E0A, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER D WITH DOT ABOVE - {0x1E0B, 0x0, propertyPVALID}, // LATIN SMALL LETTER D WITH DOT ABOVE - {0x1E0C, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER D WITH DOT BELOW - {0x1E0D, 0x0, propertyPVALID}, // LATIN SMALL LETTER D WITH DOT BELOW - {0x1E0E, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER D WITH LINE BELOW - {0x1E0F, 0x0, propertyPVALID}, // LATIN SMALL LETTER D WITH LINE BELOW - {0x1E10, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER D WITH CEDILLA - {0x1E11, 0x0, propertyPVALID}, // LATIN SMALL LETTER D WITH CEDILLA - {0x1E12, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER D WITH CIRCUMFLEX BELOW - {0x1E13, 0x0, propertyPVALID}, // LATIN SMALL LETTER D WITH CIRCUMFLEX BELOW - {0x1E14, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER E WITH MACRON AND GRAVE - {0x1E15, 0x0, propertyPVALID}, // LATIN SMALL LETTER E WITH MACRON AND GRAVE - {0x1E16, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER E WITH MACRON AND ACUTE - {0x1E17, 0x0, propertyPVALID}, // LATIN SMALL LETTER E WITH MACRON AND ACUTE - {0x1E18, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER E WITH CIRCUMFLEX BELOW - {0x1E19, 0x0, propertyPVALID}, // LATIN SMALL LETTER E WITH CIRCUMFLEX BELOW - {0x1E1A, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER E WITH TILDE BELOW - {0x1E1B, 0x0, propertyPVALID}, // LATIN SMALL LETTER E WITH TILDE BELOW - {0x1E1C, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER E WITH CEDILLA AND BREV - {0x1E1D, 0x0, propertyPVALID}, // LATIN SMALL LETTER E WITH CEDILLA AND BREVE - {0x1E1E, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER F WITH DOT ABOVE - {0x1E1F, 0x0, propertyPVALID}, // LATIN SMALL LETTER F WITH DOT ABOVE - {0x1E20, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER G WITH MACRON - {0x1E21, 0x0, propertyPVALID}, // LATIN SMALL LETTER G WITH MACRON - {0x1E22, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER H WITH DOT ABOVE - {0x1E23, 0x0, propertyPVALID}, // LATIN SMALL LETTER H WITH DOT ABOVE - {0x1E24, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER H WITH DOT BELOW - {0x1E25, 0x0, propertyPVALID}, // LATIN SMALL LETTER H WITH DOT BELOW - {0x1E26, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER H WITH DIAERESIS - {0x1E27, 0x0, propertyPVALID}, // LATIN SMALL LETTER H WITH DIAERESIS - {0x1E28, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER H WITH CEDILLA - {0x1E29, 0x0, propertyPVALID}, // LATIN SMALL LETTER H WITH CEDILLA - {0x1E2A, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER H WITH BREVE BELOW - {0x1E2B, 0x0, propertyPVALID}, // LATIN SMALL LETTER H WITH BREVE BELOW - {0x1E2C, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER I WITH TILDE BELOW - {0x1E2D, 0x0, propertyPVALID}, // LATIN SMALL LETTER I WITH TILDE BELOW - {0x1E2E, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER I WITH DIAERESIS AND AC - {0x1E2F, 0x0, propertyPVALID}, // LATIN SMALL LETTER I WITH DIAERESIS AND ACUT - {0x1E30, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER K WITH ACUTE - {0x1E31, 0x0, propertyPVALID}, // LATIN SMALL LETTER K WITH ACUTE - {0x1E32, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER K WITH DOT BELOW - {0x1E33, 0x0, propertyPVALID}, // LATIN SMALL LETTER K WITH DOT BELOW - {0x1E34, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER K WITH LINE BELOW - {0x1E35, 0x0, propertyPVALID}, // LATIN SMALL LETTER K WITH LINE BELOW - {0x1E36, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER L WITH DOT BELOW - {0x1E37, 0x0, propertyPVALID}, // LATIN SMALL LETTER L WITH DOT BELOW - {0x1E38, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER L WITH DOT BELOW AND MA - {0x1E39, 0x0, propertyPVALID}, // LATIN SMALL LETTER L WITH DOT BELOW AND MACR - {0x1E3A, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER L WITH LINE BELOW - {0x1E3B, 0x0, propertyPVALID}, // LATIN SMALL LETTER L WITH LINE BELOW - {0x1E3C, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER L WITH CIRCUMFLEX BELOW - {0x1E3D, 0x0, propertyPVALID}, // LATIN SMALL LETTER L WITH CIRCUMFLEX BELOW - {0x1E3E, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER M WITH ACUTE - {0x1E3F, 0x0, propertyPVALID}, // LATIN SMALL LETTER M WITH ACUTE - {0x1E40, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER M WITH DOT ABOVE - {0x1E41, 0x0, propertyPVALID}, // LATIN SMALL LETTER M WITH DOT ABOVE - {0x1E42, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER M WITH DOT BELOW - {0x1E43, 0x0, propertyPVALID}, // LATIN SMALL LETTER M WITH DOT BELOW - {0x1E44, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER N WITH DOT ABOVE - {0x1E45, 0x0, propertyPVALID}, // LATIN SMALL LETTER N WITH DOT ABOVE - {0x1E46, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER N WITH DOT BELOW - {0x1E47, 0x0, propertyPVALID}, // LATIN SMALL LETTER N WITH DOT BELOW - {0x1E48, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER N WITH LINE BELOW - {0x1E49, 0x0, propertyPVALID}, // LATIN SMALL LETTER N WITH LINE BELOW - {0x1E4A, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER N WITH CIRCUMFLEX BELOW - {0x1E4B, 0x0, propertyPVALID}, // LATIN SMALL LETTER N WITH CIRCUMFLEX BELOW - {0x1E4C, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH TILDE AND ACUTE - {0x1E4D, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH TILDE AND ACUTE - {0x1E4E, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH TILDE AND DIAERE - {0x1E4F, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH TILDE AND DIAERESI - {0x1E50, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH MACRON AND GRAVE - {0x1E51, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH MACRON AND GRAVE - {0x1E52, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH MACRON AND ACUTE - {0x1E53, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH MACRON AND ACUTE - {0x1E54, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER P WITH ACUTE - {0x1E55, 0x0, propertyPVALID}, // LATIN SMALL LETTER P WITH ACUTE - {0x1E56, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER P WITH DOT ABOVE - {0x1E57, 0x0, propertyPVALID}, // LATIN SMALL LETTER P WITH DOT ABOVE - {0x1E58, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER R WITH DOT ABOVE - {0x1E59, 0x0, propertyPVALID}, // LATIN SMALL LETTER R WITH DOT ABOVE - {0x1E5A, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER R WITH DOT BELOW - {0x1E5B, 0x0, propertyPVALID}, // LATIN SMALL LETTER R WITH DOT BELOW - {0x1E5C, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER R WITH DOT BELOW AND MA - {0x1E5D, 0x0, propertyPVALID}, // LATIN SMALL LETTER R WITH DOT BELOW AND MACR - {0x1E5E, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER R WITH LINE BELOW - {0x1E5F, 0x0, propertyPVALID}, // LATIN SMALL LETTER R WITH LINE BELOW - {0x1E60, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER S WITH DOT ABOVE - {0x1E61, 0x0, propertyPVALID}, // LATIN SMALL LETTER S WITH DOT ABOVE - {0x1E62, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER S WITH DOT BELOW - {0x1E63, 0x0, propertyPVALID}, // LATIN SMALL LETTER S WITH DOT BELOW - {0x1E64, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER S WITH ACUTE AND DOT AB - {0x1E65, 0x0, propertyPVALID}, // LATIN SMALL LETTER S WITH ACUTE AND DOT ABOV - {0x1E66, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER S WITH CARON AND DOT AB - {0x1E67, 0x0, propertyPVALID}, // LATIN SMALL LETTER S WITH CARON AND DOT ABOV - {0x1E68, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER S WITH DOT BELOW AND DO - {0x1E69, 0x0, propertyPVALID}, // LATIN SMALL LETTER S WITH DOT BELOW AND DOT - {0x1E6A, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER T WITH DOT ABOVE - {0x1E6B, 0x0, propertyPVALID}, // LATIN SMALL LETTER T WITH DOT ABOVE - {0x1E6C, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER T WITH DOT BELOW - {0x1E6D, 0x0, propertyPVALID}, // LATIN SMALL LETTER T WITH DOT BELOW - {0x1E6E, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER T WITH LINE BELOW - {0x1E6F, 0x0, propertyPVALID}, // LATIN SMALL LETTER T WITH LINE BELOW - {0x1E70, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER T WITH CIRCUMFLEX BELOW - {0x1E71, 0x0, propertyPVALID}, // LATIN SMALL LETTER T WITH CIRCUMFLEX BELOW - {0x1E72, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER U WITH DIAERESIS BELOW - {0x1E73, 0x0, propertyPVALID}, // LATIN SMALL LETTER U WITH DIAERESIS BELOW - {0x1E74, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER U WITH TILDE BELOW - {0x1E75, 0x0, propertyPVALID}, // LATIN SMALL LETTER U WITH TILDE BELOW - {0x1E76, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER U WITH CIRCUMFLEX BELOW - {0x1E77, 0x0, propertyPVALID}, // LATIN SMALL LETTER U WITH CIRCUMFLEX BELOW - {0x1E78, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER U WITH TILDE AND ACUTE - {0x1E79, 0x0, propertyPVALID}, // LATIN SMALL LETTER U WITH TILDE AND ACUTE - {0x1E7A, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER U WITH MACRON AND DIAER - {0x1E7B, 0x0, propertyPVALID}, // LATIN SMALL LETTER U WITH MACRON AND DIAERES - {0x1E7C, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER V WITH TILDE - {0x1E7D, 0x0, propertyPVALID}, // LATIN SMALL LETTER V WITH TILDE - {0x1E7E, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER V WITH DOT BELOW - {0x1E7F, 0x0, propertyPVALID}, // LATIN SMALL LETTER V WITH DOT BELOW - {0x1E80, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER W WITH GRAVE - {0x1E81, 0x0, propertyPVALID}, // LATIN SMALL LETTER W WITH GRAVE - {0x1E82, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER W WITH ACUTE - {0x1E83, 0x0, propertyPVALID}, // LATIN SMALL LETTER W WITH ACUTE - {0x1E84, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER W WITH DIAERESIS - {0x1E85, 0x0, propertyPVALID}, // LATIN SMALL LETTER W WITH DIAERESIS - {0x1E86, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER W WITH DOT ABOVE - {0x1E87, 0x0, propertyPVALID}, // LATIN SMALL LETTER W WITH DOT ABOVE - {0x1E88, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER W WITH DOT BELOW - {0x1E89, 0x0, propertyPVALID}, // LATIN SMALL LETTER W WITH DOT BELOW - {0x1E8A, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER X WITH DOT ABOVE - {0x1E8B, 0x0, propertyPVALID}, // LATIN SMALL LETTER X WITH DOT ABOVE - {0x1E8C, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER X WITH DIAERESIS - {0x1E8D, 0x0, propertyPVALID}, // LATIN SMALL LETTER X WITH DIAERESIS - {0x1E8E, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER Y WITH DOT ABOVE - {0x1E8F, 0x0, propertyPVALID}, // LATIN SMALL LETTER Y WITH DOT ABOVE - {0x1E90, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER Z WITH CIRCUMFLEX - {0x1E91, 0x0, propertyPVALID}, // LATIN SMALL LETTER Z WITH CIRCUMFLEX - {0x1E92, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER Z WITH DOT BELOW - {0x1E93, 0x0, propertyPVALID}, // LATIN SMALL LETTER Z WITH DOT BELOW - {0x1E94, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER Z WITH LINE BELOW - {0x1E95, 0x1E99, propertyPVALID}, // LATIN SMALL LETTER Z WITH LINE BELOW..LATIN - {0x1E9A, 0x1E9B, propertyDISALLOWED}, // LATIN SMALL LETTER A WITH RIGHT HALF RING..L - {0x1E9C, 0x1E9D, propertyPVALID}, // LATIN SMALL LETTER LONG S WITH DIAGONAL STRO - {0x1E9E, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER SHARP S - {0x1E9F, 0x0, propertyPVALID}, // LATIN SMALL LETTER DELTA - {0x1EA0, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER A WITH DOT BELOW - {0x1EA1, 0x0, propertyPVALID}, // LATIN SMALL LETTER A WITH DOT BELOW - {0x1EA2, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER A WITH HOOK ABOVE - {0x1EA3, 0x0, propertyPVALID}, // LATIN SMALL LETTER A WITH HOOK ABOVE - {0x1EA4, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER A WITH CIRCUMFLEX AND A - {0x1EA5, 0x0, propertyPVALID}, // LATIN SMALL LETTER A WITH CIRCUMFLEX AND ACU - {0x1EA6, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER A WITH CIRCUMFLEX AND G - {0x1EA7, 0x0, propertyPVALID}, // LATIN SMALL LETTER A WITH CIRCUMFLEX AND GRA - {0x1EA8, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER A WITH CIRCUMFLEX AND H - {0x1EA9, 0x0, propertyPVALID}, // LATIN SMALL LETTER A WITH CIRCUMFLEX AND HOO - {0x1EAA, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER A WITH CIRCUMFLEX AND T - {0x1EAB, 0x0, propertyPVALID}, // LATIN SMALL LETTER A WITH CIRCUMFLEX AND TIL - {0x1EAC, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER A WITH CIRCUMFLEX AND D - {0x1EAD, 0x0, propertyPVALID}, // LATIN SMALL LETTER A WITH CIRCUMFLEX AND DOT - {0x1EAE, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER A WITH BREVE AND ACUTE - {0x1EAF, 0x0, propertyPVALID}, // LATIN SMALL LETTER A WITH BREVE AND ACUTE - {0x1EB0, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER A WITH BREVE AND GRAVE - {0x1EB1, 0x0, propertyPVALID}, // LATIN SMALL LETTER A WITH BREVE AND GRAVE - {0x1EB2, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER A WITH BREVE AND HOOK A - {0x1EB3, 0x0, propertyPVALID}, // LATIN SMALL LETTER A WITH BREVE AND HOOK ABO - {0x1EB4, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER A WITH BREVE AND TILDE - {0x1EB5, 0x0, propertyPVALID}, // LATIN SMALL LETTER A WITH BREVE AND TILDE - {0x1EB6, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER A WITH BREVE AND DOT BE - {0x1EB7, 0x0, propertyPVALID}, // LATIN SMALL LETTER A WITH BREVE AND DOT BELO - {0x1EB8, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER E WITH DOT BELOW - {0x1EB9, 0x0, propertyPVALID}, // LATIN SMALL LETTER E WITH DOT BELOW - {0x1EBA, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER E WITH HOOK ABOVE - {0x1EBB, 0x0, propertyPVALID}, // LATIN SMALL LETTER E WITH HOOK ABOVE - {0x1EBC, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER E WITH TILDE - {0x1EBD, 0x0, propertyPVALID}, // LATIN SMALL LETTER E WITH TILDE - {0x1EBE, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER E WITH CIRCUMFLEX AND A - {0x1EBF, 0x0, propertyPVALID}, // LATIN SMALL LETTER E WITH CIRCUMFLEX AND ACU - {0x1EC0, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER E WITH CIRCUMFLEX AND G - {0x1EC1, 0x0, propertyPVALID}, // LATIN SMALL LETTER E WITH CIRCUMFLEX AND GRA - {0x1EC2, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER E WITH CIRCUMFLEX AND H - {0x1EC3, 0x0, propertyPVALID}, // LATIN SMALL LETTER E WITH CIRCUMFLEX AND HOO - {0x1EC4, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER E WITH CIRCUMFLEX AND T - {0x1EC5, 0x0, propertyPVALID}, // LATIN SMALL LETTER E WITH CIRCUMFLEX AND TIL - {0x1EC6, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER E WITH CIRCUMFLEX AND D - {0x1EC7, 0x0, propertyPVALID}, // LATIN SMALL LETTER E WITH CIRCUMFLEX AND DOT - {0x1EC8, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER I WITH HOOK ABOVE - {0x1EC9, 0x0, propertyPVALID}, // LATIN SMALL LETTER I WITH HOOK ABOVE - {0x1ECA, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER I WITH DOT BELOW - {0x1ECB, 0x0, propertyPVALID}, // LATIN SMALL LETTER I WITH DOT BELOW - {0x1ECC, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH DOT BELOW - {0x1ECD, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH DOT BELOW - {0x1ECE, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH HOOK ABOVE - {0x1ECF, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH HOOK ABOVE - {0x1ED0, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH CIRCUMFLEX AND A - {0x1ED1, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH CIRCUMFLEX AND ACU - {0x1ED2, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH CIRCUMFLEX AND G - {0x1ED3, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH CIRCUMFLEX AND GRA - {0x1ED4, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH CIRCUMFLEX AND H - {0x1ED5, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH CIRCUMFLEX AND HOO - {0x1ED6, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH CIRCUMFLEX AND T - {0x1ED7, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH CIRCUMFLEX AND TIL - {0x1ED8, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH CIRCUMFLEX AND D - {0x1ED9, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH CIRCUMFLEX AND DOT - {0x1EDA, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH HORN AND ACUTE - {0x1EDB, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH HORN AND ACUTE - {0x1EDC, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH HORN AND GRAVE - {0x1EDD, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH HORN AND GRAVE - {0x1EDE, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH HORN AND HOOK AB - {0x1EDF, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH HORN AND HOOK ABOV - {0x1EE0, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH HORN AND TILDE - {0x1EE1, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH HORN AND TILDE - {0x1EE2, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH HORN AND DOT BEL - {0x1EE3, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH HORN AND DOT BELOW - {0x1EE4, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER U WITH DOT BELOW - {0x1EE5, 0x0, propertyPVALID}, // LATIN SMALL LETTER U WITH DOT BELOW - {0x1EE6, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER U WITH HOOK ABOVE - {0x1EE7, 0x0, propertyPVALID}, // LATIN SMALL LETTER U WITH HOOK ABOVE - {0x1EE8, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER U WITH HORN AND ACUTE - {0x1EE9, 0x0, propertyPVALID}, // LATIN SMALL LETTER U WITH HORN AND ACUTE - {0x1EEA, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER U WITH HORN AND GRAVE - {0x1EEB, 0x0, propertyPVALID}, // LATIN SMALL LETTER U WITH HORN AND GRAVE - {0x1EEC, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER U WITH HORN AND HOOK AB - {0x1EED, 0x0, propertyPVALID}, // LATIN SMALL LETTER U WITH HORN AND HOOK ABOV - {0x1EEE, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER U WITH HORN AND TILDE - {0x1EEF, 0x0, propertyPVALID}, // LATIN SMALL LETTER U WITH HORN AND TILDE - {0x1EF0, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER U WITH HORN AND DOT BEL - {0x1EF1, 0x0, propertyPVALID}, // LATIN SMALL LETTER U WITH HORN AND DOT BELOW - {0x1EF2, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER Y WITH GRAVE - {0x1EF3, 0x0, propertyPVALID}, // LATIN SMALL LETTER Y WITH GRAVE - {0x1EF4, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER Y WITH DOT BELOW - {0x1EF5, 0x0, propertyPVALID}, // LATIN SMALL LETTER Y WITH DOT BELOW - {0x1EF6, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER Y WITH HOOK ABOVE - {0x1EF7, 0x0, propertyPVALID}, // LATIN SMALL LETTER Y WITH HOOK ABOVE - {0x1EF8, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER Y WITH TILDE - {0x1EF9, 0x0, propertyPVALID}, // LATIN SMALL LETTER Y WITH TILDE - {0x1EFA, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER MIDDLE-WELSH LL - {0x1EFB, 0x0, propertyPVALID}, // LATIN SMALL LETTER MIDDLE-WELSH LL - {0x1EFC, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER MIDDLE-WELSH V - {0x1EFD, 0x0, propertyPVALID}, // LATIN SMALL LETTER MIDDLE-WELSH V - {0x1EFE, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER Y WITH LOOP - {0x1EFF, 0x1F07, propertyPVALID}, // LATIN SMALL LETTER Y WITH LOOP..GREEK SMALL - {0x1F08, 0x1F0F, propertyDISALLOWED}, // GREEK CAPITAL LETTER ALPHA WITH PSILI..GREEK - {0x1F10, 0x1F15, propertyPVALID}, // GREEK SMALL LETTER EPSILON WITH PSILI..GREEK - {0x1F16, 0x1F17, propertyUNASSIGNED}, // .. - {0x1F18, 0x1F1D, propertyDISALLOWED}, // GREEK CAPITAL LETTER EPSILON WITH PSILI..GRE - {0x1F1E, 0x1F1F, propertyUNASSIGNED}, // .. - {0x1F20, 0x1F27, propertyPVALID}, // GREEK SMALL LETTER ETA WITH PSILI..GREEK SMA - {0x1F28, 0x1F2F, propertyDISALLOWED}, // GREEK CAPITAL LETTER ETA WITH PSILI..GREEK C - {0x1F30, 0x1F37, propertyPVALID}, // GREEK SMALL LETTER IOTA WITH PSILI..GREEK SM - {0x1F38, 0x1F3F, propertyDISALLOWED}, // GREEK CAPITAL LETTER IOTA WITH PSILI..GREEK - {0x1F40, 0x1F45, propertyPVALID}, // GREEK SMALL LETTER OMICRON WITH PSILI..GREEK - {0x1F46, 0x1F47, propertyUNASSIGNED}, // .. - {0x1F48, 0x1F4D, propertyDISALLOWED}, // GREEK CAPITAL LETTER OMICRON WITH PSILI..GRE - {0x1F4E, 0x1F4F, propertyUNASSIGNED}, // .. - {0x1F50, 0x1F57, propertyPVALID}, // GREEK SMALL LETTER UPSILON WITH PSILI..GREEK - {0x1F58, 0x0, propertyUNASSIGNED}, // - {0x1F59, 0x0, propertyDISALLOWED}, // GREEK CAPITAL LETTER UPSILON WITH DASIA - {0x1F5A, 0x0, propertyUNASSIGNED}, // - {0x1F5B, 0x0, propertyDISALLOWED}, // GREEK CAPITAL LETTER UPSILON WITH DASIA AND - {0x1F5C, 0x0, propertyUNASSIGNED}, // - {0x1F5D, 0x0, propertyDISALLOWED}, // GREEK CAPITAL LETTER UPSILON WITH DASIA AND - {0x1F5E, 0x0, propertyUNASSIGNED}, // - {0x1F5F, 0x0, propertyDISALLOWED}, // GREEK CAPITAL LETTER UPSILON WITH DASIA AND - {0x1F60, 0x1F67, propertyPVALID}, // GREEK SMALL LETTER OMEGA WITH PSILI..GREEK S - {0x1F68, 0x1F6F, propertyDISALLOWED}, // GREEK CAPITAL LETTER OMEGA WITH PSILI..GREEK - {0x1F70, 0x0, propertyPVALID}, // GREEK SMALL LETTER ALPHA WITH VARIA - {0x1F71, 0x0, propertyDISALLOWED}, // GREEK SMALL LETTER ALPHA WITH OXIA - {0x1F72, 0x0, propertyPVALID}, // GREEK SMALL LETTER EPSILON WITH VARIA - {0x1F73, 0x0, propertyDISALLOWED}, // GREEK SMALL LETTER EPSILON WITH OXIA - {0x1F74, 0x0, propertyPVALID}, // GREEK SMALL LETTER ETA WITH VARIA - {0x1F75, 0x0, propertyDISALLOWED}, // GREEK SMALL LETTER ETA WITH OXIA - {0x1F76, 0x0, propertyPVALID}, // GREEK SMALL LETTER IOTA WITH VARIA - {0x1F77, 0x0, propertyDISALLOWED}, // GREEK SMALL LETTER IOTA WITH OXIA - {0x1F78, 0x0, propertyPVALID}, // GREEK SMALL LETTER OMICRON WITH VARIA - {0x1F79, 0x0, propertyDISALLOWED}, // GREEK SMALL LETTER OMICRON WITH OXIA - {0x1F7A, 0x0, propertyPVALID}, // GREEK SMALL LETTER UPSILON WITH VARIA - {0x1F7B, 0x0, propertyDISALLOWED}, // GREEK SMALL LETTER UPSILON WITH OXIA - {0x1F7C, 0x0, propertyPVALID}, // GREEK SMALL LETTER OMEGA WITH VARIA - {0x1F7D, 0x0, propertyDISALLOWED}, // GREEK SMALL LETTER OMEGA WITH OXIA - {0x1F7E, 0x1F7F, propertyUNASSIGNED}, // .. - {0x1F80, 0x1FAF, propertyDISALLOWED}, // GREEK SMALL LETTER ALPHA WITH PSILI AND YPOG - {0x1FB0, 0x1FB1, propertyPVALID}, // GREEK SMALL LETTER ALPHA WITH VRACHY..GREEK - {0x1FB2, 0x1FB4, propertyDISALLOWED}, // GREEK SMALL LETTER ALPHA WITH VARIA AND YPOG - {0x1FB5, 0x0, propertyUNASSIGNED}, // - {0x1FB6, 0x0, propertyPVALID}, // GREEK SMALL LETTER ALPHA WITH PERISPOMENI - {0x1FB7, 0x1FC4, propertyDISALLOWED}, // GREEK SMALL LETTER ALPHA WITH PERISPOMENI AN - {0x1FC5, 0x0, propertyUNASSIGNED}, // - {0x1FC6, 0x0, propertyPVALID}, // GREEK SMALL LETTER ETA WITH PERISPOMENI - {0x1FC7, 0x1FCF, propertyDISALLOWED}, // GREEK SMALL LETTER ETA WITH PERISPOMENI AND - {0x1FD0, 0x1FD2, propertyPVALID}, // GREEK SMALL LETTER IOTA WITH VRACHY..GREEK S - {0x1FD3, 0x0, propertyDISALLOWED}, // GREEK SMALL LETTER IOTA WITH DIALYTIKA AND O - {0x1FD4, 0x1FD5, propertyUNASSIGNED}, // .. - {0x1FD6, 0x1FD7, propertyPVALID}, // GREEK SMALL LETTER IOTA WITH PERISPOMENI..GR - {0x1FD8, 0x1FDB, propertyDISALLOWED}, // GREEK CAPITAL LETTER IOTA WITH VRACHY..GREEK - {0x1FDC, 0x0, propertyUNASSIGNED}, // - {0x1FDD, 0x1FDF, propertyDISALLOWED}, // GREEK DASIA AND VARIA..GREEK DASIA AND PERIS - {0x1FE0, 0x1FE2, propertyPVALID}, // GREEK SMALL LETTER UPSILON WITH VRACHY..GREE - {0x1FE3, 0x0, propertyDISALLOWED}, // GREEK SMALL LETTER UPSILON WITH DIALYTIKA AN - {0x1FE4, 0x1FE7, propertyPVALID}, // GREEK SMALL LETTER RHO WITH PSILI..GREEK SMA - {0x1FE8, 0x1FEF, propertyDISALLOWED}, // GREEK CAPITAL LETTER UPSILON WITH VRACHY..GR - {0x1FF0, 0x1FF1, propertyUNASSIGNED}, // .. - {0x1FF2, 0x1FF4, propertyDISALLOWED}, // GREEK SMALL LETTER OMEGA WITH VARIA AND YPOG - {0x1FF5, 0x0, propertyUNASSIGNED}, // - {0x1FF6, 0x0, propertyPVALID}, // GREEK SMALL LETTER OMEGA WITH PERISPOMENI - {0x1FF7, 0x1FFE, propertyDISALLOWED}, // GREEK SMALL LETTER OMEGA WITH PERISPOMENI AN - {0x1FFF, 0x0, propertyUNASSIGNED}, // - {0x2000, 0x200B, propertyDISALLOWED}, // EN QUAD..ZERO WIDTH SPACE - {0x200C, 0x200D, propertyCONTEXTJ}, // ZERO WIDTH NON-JOINER..ZERO WIDTH JOINER - {0x200E, 0x2064, propertyDISALLOWED}, // LEFT-TO-RIGHT MARK..INVISIBLE PLUS - {0x2065, 0x2069, propertyUNASSIGNED}, // .. - {0x206A, 0x2071, propertyDISALLOWED}, // INHIBIT SYMMETRIC SWAPPING..SUPERSCRIPT LATI - {0x2072, 0x2073, propertyUNASSIGNED}, // .. - {0x2074, 0x208E, propertyDISALLOWED}, // SUPERSCRIPT FOUR..SUBSCRIPT RIGHT PARENTHESI - {0x208F, 0x0, propertyUNASSIGNED}, // - {0x2090, 0x2094, propertyDISALLOWED}, // LATIN SUBSCRIPT SMALL LETTER A..LATIN SUBSCR - {0x2095, 0x209F, propertyUNASSIGNED}, // .. - {0x20A0, 0x20B8, propertyDISALLOWED}, // EURO-CURRENCY SIGN..TENGE SIGN - {0x20B9, 0x20CF, propertyUNASSIGNED}, // .. - {0x20D0, 0x20F0, propertyDISALLOWED}, // COMBINING LEFT HARPOON ABOVE..COMBINING ASTE - {0x20F1, 0x20FF, propertyUNASSIGNED}, // .. - {0x2100, 0x214D, propertyDISALLOWED}, // ACCOUNT OF..AKTIESELSKAB - {0x214E, 0x0, propertyPVALID}, // TURNED SMALL F - {0x214F, 0x2183, propertyDISALLOWED}, // SYMBOL FOR SAMARITAN SOURCE..ROMAN NUMERAL R - {0x2184, 0x0, propertyPVALID}, // LATIN SMALL LETTER REVERSED C - {0x2185, 0x2189, propertyDISALLOWED}, // ROMAN NUMERAL SIX LATE FORM..VULGAR FRACTION - {0x218A, 0x218F, propertyUNASSIGNED}, // .. - {0x2190, 0x23E8, propertyDISALLOWED}, // LEFTWARDS ARROW..DECIMAL EXPONENT SYMBOL - {0x23E9, 0x23FF, propertyUNASSIGNED}, // .. - {0x2400, 0x2426, propertyDISALLOWED}, // SYMBOL FOR NULL..SYMBOL FOR SUBSTITUTE FORM - {0x2427, 0x243F, propertyUNASSIGNED}, // .. - {0x2440, 0x244A, propertyDISALLOWED}, // OCR HOOK..OCR DOUBLE BACKSLASH - {0x244B, 0x245F, propertyUNASSIGNED}, // .. - {0x2460, 0x26CD, propertyDISALLOWED}, // CIRCLED DIGIT ONE..DISABLED CAR - {0x26CE, 0x0, propertyUNASSIGNED}, // - {0x26CF, 0x26E1, propertyDISALLOWED}, // PICK..RESTRICTED LEFT ENTRY-2 - {0x26E2, 0x0, propertyUNASSIGNED}, // - {0x26E3, 0x0, propertyDISALLOWED}, // HEAVY CIRCLE WITH STROKE AND TWO DOTS ABOVE - {0x26E4, 0x26E7, propertyUNASSIGNED}, // .. - {0x26E8, 0x26FF, propertyDISALLOWED}, // BLACK CROSS ON SHIELD..WHITE FLAG WITH HORIZ - {0x2700, 0x0, propertyUNASSIGNED}, // - {0x2701, 0x2704, propertyDISALLOWED}, // UPPER BLADE SCISSORS..WHITE SCISSORS - {0x2705, 0x0, propertyUNASSIGNED}, // - {0x2706, 0x2709, propertyDISALLOWED}, // TELEPHONE LOCATION SIGN..ENVELOPE - {0x270A, 0x270B, propertyUNASSIGNED}, // .. - {0x270C, 0x2727, propertyDISALLOWED}, // VICTORY HAND..WHITE FOUR POINTED STAR - {0x2728, 0x0, propertyUNASSIGNED}, // - {0x2729, 0x274B, propertyDISALLOWED}, // STRESS OUTLINED WHITE STAR..HEAVY EIGHT TEAR - {0x274C, 0x0, propertyUNASSIGNED}, // - {0x274D, 0x0, propertyDISALLOWED}, // SHADOWED WHITE CIRCLE - {0x274E, 0x0, propertyUNASSIGNED}, // - {0x274F, 0x2752, propertyDISALLOWED}, // LOWER RIGHT DROP-SHADOWED WHITE SQUARE..UPPE - {0x2753, 0x2755, propertyUNASSIGNED}, // .. - {0x2756, 0x275E, propertyDISALLOWED}, // BLACK DIAMOND MINUS WHITE X..HEAVY DOUBLE CO - {0x275F, 0x2760, propertyUNASSIGNED}, // .. - {0x2761, 0x2794, propertyDISALLOWED}, // CURVED STEM PARAGRAPH SIGN ORNAMENT..HEAVY W - {0x2795, 0x2797, propertyUNASSIGNED}, // .. - {0x2798, 0x27AF, propertyDISALLOWED}, // HEAVY SOUTH EAST ARROW..NOTCHED LOWER RIGHT- - {0x27B0, 0x0, propertyUNASSIGNED}, // - {0x27B1, 0x27BE, propertyDISALLOWED}, // NOTCHED UPPER RIGHT-SHADOWED WHITE RIGHTWARD - {0x27BF, 0x0, propertyUNASSIGNED}, // - {0x27C0, 0x27CA, propertyDISALLOWED}, // THREE DIMENSIONAL ANGLE..VERTICAL BAR WITH H - {0x27CB, 0x0, propertyUNASSIGNED}, // - {0x27CC, 0x0, propertyDISALLOWED}, // LONG DIVISION - {0x27CD, 0x27CF, propertyUNASSIGNED}, // .. - {0x27D0, 0x2B4C, propertyDISALLOWED}, // WHITE DIAMOND WITH CENTRED DOT..RIGHTWARDS A - {0x2B4D, 0x2B4F, propertyUNASSIGNED}, // .. - {0x2B50, 0x2B59, propertyDISALLOWED}, // WHITE MEDIUM STAR..HEAVY CIRCLED SALTIRE - {0x2B5A, 0x2BFF, propertyUNASSIGNED}, // .. - {0x2C00, 0x2C2E, propertyDISALLOWED}, // GLAGOLITIC CAPITAL LETTER AZU..GLAGOLITIC CA - {0x2C2F, 0x0, propertyUNASSIGNED}, // - {0x2C30, 0x2C5E, propertyPVALID}, // GLAGOLITIC SMALL LETTER AZU..GLAGOLITIC SMAL - {0x2C5F, 0x0, propertyUNASSIGNED}, // - {0x2C60, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER L WITH DOUBLE BAR - {0x2C61, 0x0, propertyPVALID}, // LATIN SMALL LETTER L WITH DOUBLE BAR - {0x2C62, 0x2C64, propertyDISALLOWED}, // LATIN CAPITAL LETTER L WITH MIDDLE TILDE..LA - {0x2C65, 0x2C66, propertyPVALID}, // LATIN SMALL LETTER A WITH STROKE..LATIN SMAL - {0x2C67, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER H WITH DESCENDER - {0x2C68, 0x0, propertyPVALID}, // LATIN SMALL LETTER H WITH DESCENDER - {0x2C69, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER K WITH DESCENDER - {0x2C6A, 0x0, propertyPVALID}, // LATIN SMALL LETTER K WITH DESCENDER - {0x2C6B, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER Z WITH DESCENDER - {0x2C6C, 0x0, propertyPVALID}, // LATIN SMALL LETTER Z WITH DESCENDER - {0x2C6D, 0x2C70, propertyDISALLOWED}, // LATIN CAPITAL LETTER ALPHA..LATIN CAPITAL LE - {0x2C71, 0x0, propertyPVALID}, // LATIN SMALL LETTER V WITH RIGHT HOOK - {0x2C72, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER W WITH HOOK - {0x2C73, 0x2C74, propertyPVALID}, // LATIN SMALL LETTER W WITH HOOK..LATIN SMALL - {0x2C75, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER HALF H - {0x2C76, 0x2C7B, propertyPVALID}, // LATIN SMALL LETTER HALF H..LATIN LETTER SMAL - {0x2C7C, 0x2C80, propertyDISALLOWED}, // LATIN SUBSCRIPT SMALL LETTER J..COPTIC CAPIT - {0x2C81, 0x0, propertyPVALID}, // COPTIC SMALL LETTER ALFA - {0x2C82, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER VIDA - {0x2C83, 0x0, propertyPVALID}, // COPTIC SMALL LETTER VIDA - {0x2C84, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER GAMMA - {0x2C85, 0x0, propertyPVALID}, // COPTIC SMALL LETTER GAMMA - {0x2C86, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER DALDA - {0x2C87, 0x0, propertyPVALID}, // COPTIC SMALL LETTER DALDA - {0x2C88, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER EIE - {0x2C89, 0x0, propertyPVALID}, // COPTIC SMALL LETTER EIE - {0x2C8A, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER SOU - {0x2C8B, 0x0, propertyPVALID}, // COPTIC SMALL LETTER SOU - {0x2C8C, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER ZATA - {0x2C8D, 0x0, propertyPVALID}, // COPTIC SMALL LETTER ZATA - {0x2C8E, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER HATE - {0x2C8F, 0x0, propertyPVALID}, // COPTIC SMALL LETTER HATE - {0x2C90, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER THETHE - {0x2C91, 0x0, propertyPVALID}, // COPTIC SMALL LETTER THETHE - {0x2C92, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER IAUDA - {0x2C93, 0x0, propertyPVALID}, // COPTIC SMALL LETTER IAUDA - {0x2C94, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER KAPA - {0x2C95, 0x0, propertyPVALID}, // COPTIC SMALL LETTER KAPA - {0x2C96, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER LAULA - {0x2C97, 0x0, propertyPVALID}, // COPTIC SMALL LETTER LAULA - {0x2C98, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER MI - {0x2C99, 0x0, propertyPVALID}, // COPTIC SMALL LETTER MI - {0x2C9A, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER NI - {0x2C9B, 0x0, propertyPVALID}, // COPTIC SMALL LETTER NI - {0x2C9C, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER KSI - {0x2C9D, 0x0, propertyPVALID}, // COPTIC SMALL LETTER KSI - {0x2C9E, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER O - {0x2C9F, 0x0, propertyPVALID}, // COPTIC SMALL LETTER O - {0x2CA0, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER PI - {0x2CA1, 0x0, propertyPVALID}, // COPTIC SMALL LETTER PI - {0x2CA2, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER RO - {0x2CA3, 0x0, propertyPVALID}, // COPTIC SMALL LETTER RO - {0x2CA4, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER SIMA - {0x2CA5, 0x0, propertyPVALID}, // COPTIC SMALL LETTER SIMA - {0x2CA6, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER TAU - {0x2CA7, 0x0, propertyPVALID}, // COPTIC SMALL LETTER TAU - {0x2CA8, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER UA - {0x2CA9, 0x0, propertyPVALID}, // COPTIC SMALL LETTER UA - {0x2CAA, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER FI - {0x2CAB, 0x0, propertyPVALID}, // COPTIC SMALL LETTER FI - {0x2CAC, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER KHI - {0x2CAD, 0x0, propertyPVALID}, // COPTIC SMALL LETTER KHI - {0x2CAE, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER PSI - {0x2CAF, 0x0, propertyPVALID}, // COPTIC SMALL LETTER PSI - {0x2CB0, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER OOU - {0x2CB1, 0x0, propertyPVALID}, // COPTIC SMALL LETTER OOU - {0x2CB2, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER DIALECT-P ALEF - {0x2CB3, 0x0, propertyPVALID}, // COPTIC SMALL LETTER DIALECT-P ALEF - {0x2CB4, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER OLD COPTIC AIN - {0x2CB5, 0x0, propertyPVALID}, // COPTIC SMALL LETTER OLD COPTIC AIN - {0x2CB6, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER CRYPTOGRAMMIC EIE - {0x2CB7, 0x0, propertyPVALID}, // COPTIC SMALL LETTER CRYPTOGRAMMIC EIE - {0x2CB8, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER DIALECT-P KAPA - {0x2CB9, 0x0, propertyPVALID}, // COPTIC SMALL LETTER DIALECT-P KAPA - {0x2CBA, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER DIALECT-P NI - {0x2CBB, 0x0, propertyPVALID}, // COPTIC SMALL LETTER DIALECT-P NI - {0x2CBC, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER CRYPTOGRAMMIC NI - {0x2CBD, 0x0, propertyPVALID}, // COPTIC SMALL LETTER CRYPTOGRAMMIC NI - {0x2CBE, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER OLD COPTIC OOU - {0x2CBF, 0x0, propertyPVALID}, // COPTIC SMALL LETTER OLD COPTIC OOU - {0x2CC0, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER SAMPI - {0x2CC1, 0x0, propertyPVALID}, // COPTIC SMALL LETTER SAMPI - {0x2CC2, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER CROSSED SHEI - {0x2CC3, 0x0, propertyPVALID}, // COPTIC SMALL LETTER CROSSED SHEI - {0x2CC4, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER OLD COPTIC SHEI - {0x2CC5, 0x0, propertyPVALID}, // COPTIC SMALL LETTER OLD COPTIC SHEI - {0x2CC6, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER OLD COPTIC ESH - {0x2CC7, 0x0, propertyPVALID}, // COPTIC SMALL LETTER OLD COPTIC ESH - {0x2CC8, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER AKHMIMIC KHEI - {0x2CC9, 0x0, propertyPVALID}, // COPTIC SMALL LETTER AKHMIMIC KHEI - {0x2CCA, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER DIALECT-P HORI - {0x2CCB, 0x0, propertyPVALID}, // COPTIC SMALL LETTER DIALECT-P HORI - {0x2CCC, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER OLD COPTIC HORI - {0x2CCD, 0x0, propertyPVALID}, // COPTIC SMALL LETTER OLD COPTIC HORI - {0x2CCE, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER OLD COPTIC HA - {0x2CCF, 0x0, propertyPVALID}, // COPTIC SMALL LETTER OLD COPTIC HA - {0x2CD0, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER L-SHAPED HA - {0x2CD1, 0x0, propertyPVALID}, // COPTIC SMALL LETTER L-SHAPED HA - {0x2CD2, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER OLD COPTIC HEI - {0x2CD3, 0x0, propertyPVALID}, // COPTIC SMALL LETTER OLD COPTIC HEI - {0x2CD4, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER OLD COPTIC HAT - {0x2CD5, 0x0, propertyPVALID}, // COPTIC SMALL LETTER OLD COPTIC HAT - {0x2CD6, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER OLD COPTIC GANGIA - {0x2CD7, 0x0, propertyPVALID}, // COPTIC SMALL LETTER OLD COPTIC GANGIA - {0x2CD8, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER OLD COPTIC DJA - {0x2CD9, 0x0, propertyPVALID}, // COPTIC SMALL LETTER OLD COPTIC DJA - {0x2CDA, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER OLD COPTIC SHIMA - {0x2CDB, 0x0, propertyPVALID}, // COPTIC SMALL LETTER OLD COPTIC SHIMA - {0x2CDC, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER OLD NUBIAN SHIMA - {0x2CDD, 0x0, propertyPVALID}, // COPTIC SMALL LETTER OLD NUBIAN SHIMA - {0x2CDE, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER OLD NUBIAN NGI - {0x2CDF, 0x0, propertyPVALID}, // COPTIC SMALL LETTER OLD NUBIAN NGI - {0x2CE0, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER OLD NUBIAN NYI - {0x2CE1, 0x0, propertyPVALID}, // COPTIC SMALL LETTER OLD NUBIAN NYI - {0x2CE2, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER OLD NUBIAN WAU - {0x2CE3, 0x2CE4, propertyPVALID}, // COPTIC SMALL LETTER OLD NUBIAN WAU..COPTIC S - {0x2CE5, 0x2CEB, propertyDISALLOWED}, // COPTIC SYMBOL MI RO..COPTIC CAPITAL LETTER C - {0x2CEC, 0x0, propertyPVALID}, // COPTIC SMALL LETTER CRYPTOGRAMMIC SHEI - {0x2CED, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER CRYPTOGRAMMIC GANGIA - {0x2CEE, 0x2CF1, propertyPVALID}, // COPTIC SMALL LETTER CRYPTOGRAMMIC GANGIA..CO - {0x2CF2, 0x2CF8, propertyUNASSIGNED}, // .. - {0x2CF9, 0x2CFF, propertyDISALLOWED}, // COPTIC OLD NUBIAN FULL STOP..COPTIC MORPHOLO - {0x2D00, 0x2D25, propertyPVALID}, // GEORGIAN SMALL LETTER AN..GEORGIAN SMALL LET - {0x2D26, 0x2D2F, propertyUNASSIGNED}, // .. - {0x2D30, 0x2D65, propertyPVALID}, // TIFINAGH LETTER YA..TIFINAGH LETTER YAZZ - {0x2D66, 0x2D6E, propertyUNASSIGNED}, // .. - {0x2D6F, 0x0, propertyDISALLOWED}, // TIFINAGH MODIFIER LETTER LABIALIZATION MARK - {0x2D70, 0x2D7F, propertyUNASSIGNED}, // .. - {0x2D80, 0x2D96, propertyPVALID}, // ETHIOPIC SYLLABLE LOA..ETHIOPIC SYLLABLE GGW - {0x2D97, 0x2D9F, propertyUNASSIGNED}, // .. - {0x2DA0, 0x2DA6, propertyPVALID}, // ETHIOPIC SYLLABLE SSA..ETHIOPIC SYLLABLE SSO - {0x2DA7, 0x0, propertyUNASSIGNED}, // - {0x2DA8, 0x2DAE, propertyPVALID}, // ETHIOPIC SYLLABLE CCA..ETHIOPIC SYLLABLE CCO - {0x2DAF, 0x0, propertyUNASSIGNED}, // - {0x2DB0, 0x2DB6, propertyPVALID}, // ETHIOPIC SYLLABLE ZZA..ETHIOPIC SYLLABLE ZZO - {0x2DB7, 0x0, propertyUNASSIGNED}, // - {0x2DB8, 0x2DBE, propertyPVALID}, // ETHIOPIC SYLLABLE CCHA..ETHIOPIC SYLLABLE CC - {0x2DBF, 0x0, propertyUNASSIGNED}, // - {0x2DC0, 0x2DC6, propertyPVALID}, // ETHIOPIC SYLLABLE QYA..ETHIOPIC SYLLABLE QYO - {0x2DC7, 0x0, propertyUNASSIGNED}, // - {0x2DC8, 0x2DCE, propertyPVALID}, // ETHIOPIC SYLLABLE KYA..ETHIOPIC SYLLABLE KYO - {0x2DCF, 0x0, propertyUNASSIGNED}, // - {0x2DD0, 0x2DD6, propertyPVALID}, // ETHIOPIC SYLLABLE XYA..ETHIOPIC SYLLABLE XYO - {0x2DD7, 0x0, propertyUNASSIGNED}, // - {0x2DD8, 0x2DDE, propertyPVALID}, // ETHIOPIC SYLLABLE GYA..ETHIOPIC SYLLABLE GYO - {0x2DDF, 0x0, propertyUNASSIGNED}, // - {0x2DE0, 0x2DFF, propertyPVALID}, // COMBINING CYRILLIC LETTER BE..COMBINING CYRI - {0x2E00, 0x2E2E, propertyDISALLOWED}, // RIGHT ANGLE SUBSTITUTION MARKER..REVERSED QU - {0x2E2F, 0x0, propertyPVALID}, // VERTICAL TILDE - {0x2E30, 0x2E31, propertyDISALLOWED}, // RING POINT..WORD SEPARATOR MIDDLE DOT - {0x2E32, 0x2E7F, propertyUNASSIGNED}, // .. - {0x2E80, 0x2E99, propertyDISALLOWED}, // CJK RADICAL REPEAT..CJK RADICAL RAP - {0x2E9A, 0x0, propertyUNASSIGNED}, // - {0x2E9B, 0x2EF3, propertyDISALLOWED}, // CJK RADICAL CHOKE..CJK RADICAL C-SIMPLIFIED - {0x2EF4, 0x2EFF, propertyUNASSIGNED}, // .. - {0x2F00, 0x2FD5, propertyDISALLOWED}, // KANGXI RADICAL ONE..KANGXI RADICAL FLUTE - {0x2FD6, 0x2FEF, propertyUNASSIGNED}, // .. - {0x2FF0, 0x2FFB, propertyDISALLOWED}, // IDEOGRAPHIC DESCRIPTION CHARACTER LEFT TO RI - {0x2FFC, 0x2FFF, propertyUNASSIGNED}, // .. - {0x3000, 0x3004, propertyDISALLOWED}, // IDEOGRAPHIC SPACE..JAPANESE INDUSTRIAL STAND - {0x3005, 0x3007, propertyPVALID}, // IDEOGRAPHIC ITERATION MARK..IDEOGRAPHIC NUMB - {0x3008, 0x3029, propertyDISALLOWED}, // LEFT ANGLE BRACKET..HANGZHOU NUMERAL NINE - {0x302A, 0x302D, propertyPVALID}, // IDEOGRAPHIC LEVEL TONE MARK..IDEOGRAPHIC ENT - {0x302E, 0x303B, propertyDISALLOWED}, // HANGUL SINGLE DOT TONE MARK..VERTICAL IDEOGR - {0x303C, 0x0, propertyPVALID}, // MASU MARK - {0x303D, 0x303F, propertyDISALLOWED}, // PART ALTERNATION MARK..IDEOGRAPHIC HALF FILL - {0x3040, 0x0, propertyUNASSIGNED}, // - {0x3041, 0x3096, propertyPVALID}, // HIRAGANA LETTER SMALL A..HIRAGANA LETTER SMA - {0x3097, 0x3098, propertyUNASSIGNED}, // .. - {0x3099, 0x309A, propertyPVALID}, // COMBINING KATAKANA-HIRAGANA VOICED SOUND MAR - {0x309B, 0x309C, propertyDISALLOWED}, // KATAKANA-HIRAGANA VOICED SOUND MARK..KATAKAN - {0x309D, 0x309E, propertyPVALID}, // HIRAGANA ITERATION MARK..HIRAGANA VOICED ITE - {0x309F, 0x30A0, propertyDISALLOWED}, // HIRAGANA DIGRAPH YORI..KATAKANA-HIRAGANA DOU - {0x30A1, 0x30FA, propertyPVALID}, // KATAKANA LETTER SMALL A..KATAKANA LETTER VO - {0x30FB, 0x0, propertyCONTEXTO}, // KATAKANA MIDDLE DOT - {0x30FC, 0x30FE, propertyPVALID}, // KATAKANA-HIRAGANA PROLONGED SOUND MARK..KATA - {0x30FF, 0x0, propertyDISALLOWED}, // KATAKANA DIGRAPH KOTO - {0x3100, 0x3104, propertyUNASSIGNED}, // .. - {0x3105, 0x312D, propertyPVALID}, // BOPOMOFO LETTER B..BOPOMOFO LETTER IH - {0x312E, 0x3130, propertyUNASSIGNED}, // .. - {0x3131, 0x318E, propertyDISALLOWED}, // HANGUL LETTER KIYEOK..HANGUL LETTER ARAEAE - {0x318F, 0x0, propertyUNASSIGNED}, // - {0x3190, 0x319F, propertyDISALLOWED}, // IDEOGRAPHIC ANNOTATION LINKING MARK..IDEOGRA - {0x31A0, 0x31B7, propertyPVALID}, // BOPOMOFO LETTER BU..BOPOMOFO FINAL LETTER H - {0x31B8, 0x31BF, propertyUNASSIGNED}, // .. - {0x31C0, 0x31E3, propertyDISALLOWED}, // CJK STROKE T..CJK STROKE Q - {0x31E4, 0x31EF, propertyUNASSIGNED}, // .. - {0x31F0, 0x31FF, propertyPVALID}, // KATAKANA LETTER SMALL KU..KATAKANA LETTER SM - {0x3200, 0x321E, propertyDISALLOWED}, // PARENTHESIZED HANGUL KIYEOK..PARENTHESIZED K - {0x321F, 0x0, propertyUNASSIGNED}, // - {0x3220, 0x32FE, propertyDISALLOWED}, // PARENTHESIZED IDEOGRAPH ONE..CIRCLED KATAKAN - {0x32FF, 0x0, propertyUNASSIGNED}, // - {0x3300, 0x33FF, propertyDISALLOWED}, // SQUARE APAATO..SQUARE GAL - {0x3400, 0x4DB5, propertyPVALID}, // .... - {0x4DC0, 0x4DFF, propertyDISALLOWED}, // HEXAGRAM FOR THE CREATIVE HEAVEN..HEXAGRAM F - {0x4E00, 0x9FCB, propertyPVALID}, // .. - {0x9FCC, 0x9FFF, propertyUNASSIGNED}, // .. - {0xA000, 0xA48C, propertyPVALID}, // YI SYLLABLE IT..YI SYLLABLE YYR - {0xA48D, 0xA48F, propertyUNASSIGNED}, // .. - {0xA490, 0xA4C6, propertyDISALLOWED}, // YI RADICAL QOT..YI RADICAL KE - {0xA4C7, 0xA4CF, propertyUNASSIGNED}, // .. - {0xA4D0, 0xA4FD, propertyPVALID}, // LISU LETTER BA..LISU LETTER TONE MYA JEU - {0xA4FE, 0xA4FF, propertyDISALLOWED}, // LISU PUNCTUATION COMMA..LISU PUNCTUATION FUL - {0xA500, 0xA60C, propertyPVALID}, // VAI SYLLABLE EE..VAI SYLLABLE LENGTHENER - {0xA60D, 0xA60F, propertyDISALLOWED}, // VAI COMMA..VAI QUESTION MARK - {0xA610, 0xA62B, propertyPVALID}, // VAI SYLLABLE NDOLE FA..VAI SYLLABLE NDOLE DO - {0xA62C, 0xA63F, propertyUNASSIGNED}, // .. - {0xA640, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER ZEMLYA - {0xA641, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER ZEMLYA - {0xA642, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER DZELO - {0xA643, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER DZELO - {0xA644, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER REVERSED DZE - {0xA645, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER REVERSED DZE - {0xA646, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER IOTA - {0xA647, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER IOTA - {0xA648, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER DJERV - {0xA649, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER DJERV - {0xA64A, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER MONOGRAPH UK - {0xA64B, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER MONOGRAPH UK - {0xA64C, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER BROAD OMEGA - {0xA64D, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER BROAD OMEGA - {0xA64E, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER NEUTRAL YER - {0xA64F, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER NEUTRAL YER - {0xA650, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER YERU WITH BACK YER - {0xA651, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER YERU WITH BACK YER - {0xA652, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER IOTIFIED YAT - {0xA653, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER IOTIFIED YAT - {0xA654, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER REVERSED YU - {0xA655, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER REVERSED YU - {0xA656, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER IOTIFIED A - {0xA657, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER IOTIFIED A - {0xA658, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER CLOSED LITTLE YUS - {0xA659, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER CLOSED LITTLE YUS - {0xA65A, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER BLENDED YUS - {0xA65B, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER BLENDED YUS - {0xA65C, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER IOTIFIED CLOSED LITT - {0xA65D, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER IOTIFIED CLOSED LITTLE - {0xA65E, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER YN - {0xA65F, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER YN - {0xA660, 0xA661, propertyUNASSIGNED}, // .. - {0xA662, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER SOFT DE - {0xA663, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER SOFT DE - {0xA664, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER SOFT EL - {0xA665, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER SOFT EL - {0xA666, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER SOFT EM - {0xA667, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER SOFT EM - {0xA668, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER MONOCULAR O - {0xA669, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER MONOCULAR O - {0xA66A, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER BINOCULAR O - {0xA66B, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER BINOCULAR O - {0xA66C, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER DOUBLE MONOCULAR O - {0xA66D, 0xA66F, propertyPVALID}, // CYRILLIC SMALL LETTER DOUBLE MONOCULAR O..CO - {0xA670, 0xA673, propertyDISALLOWED}, // COMBINING CYRILLIC TEN MILLIONS SIGN..SLAVON - {0xA674, 0xA67B, propertyUNASSIGNED}, // .. - {0xA67C, 0xA67D, propertyPVALID}, // COMBINING CYRILLIC KAVYKA..COMBINING CYRILLI - {0xA67E, 0x0, propertyDISALLOWED}, // CYRILLIC KAVYKA - {0xA67F, 0x0, propertyPVALID}, // CYRILLIC PAYEROK - {0xA680, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER DWE - {0xA681, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER DWE - {0xA682, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER DZWE - {0xA683, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER DZWE - {0xA684, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER ZHWE - {0xA685, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER ZHWE - {0xA686, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER CCHE - {0xA687, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER CCHE - {0xA688, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER DZZE - {0xA689, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER DZZE - {0xA68A, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER TE WITH MIDDLE HOOK - {0xA68B, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER TE WITH MIDDLE HOOK - {0xA68C, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER TWE - {0xA68D, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER TWE - {0xA68E, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER TSWE - {0xA68F, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER TSWE - {0xA690, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER TSSE - {0xA691, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER TSSE - {0xA692, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER TCHE - {0xA693, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER TCHE - {0xA694, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER HWE - {0xA695, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER HWE - {0xA696, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER SHWE - {0xA697, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER SHWE - {0xA698, 0xA69F, propertyUNASSIGNED}, // .. - {0xA6A0, 0xA6E5, propertyPVALID}, // BAMUM LETTER A..BAMUM LETTER KI - {0xA6E6, 0xA6EF, propertyDISALLOWED}, // BAMUM LETTER MO..BAMUM LETTER KOGHOM - {0xA6F0, 0xA6F1, propertyPVALID}, // BAMUM COMBINING MARK KOQNDON..BAMUM COMBININ - {0xA6F2, 0xA6F7, propertyDISALLOWED}, // BAMUM NJAEMLI..BAMUM QUESTION MARK - {0xA6F8, 0xA6FF, propertyUNASSIGNED}, // .. - {0xA700, 0xA716, propertyDISALLOWED}, // MODIFIER LETTER CHINESE TONE YIN PING..MODIF - {0xA717, 0xA71F, propertyPVALID}, // MODIFIER LETTER DOT VERTICAL BAR..MODIFIER L - {0xA720, 0xA722, propertyDISALLOWED}, // MODIFIER LETTER STRESS AND HIGH TONE..LATIN - {0xA723, 0x0, propertyPVALID}, // LATIN SMALL LETTER EGYPTOLOGICAL ALEF - {0xA724, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER EGYPTOLOGICAL AIN - {0xA725, 0x0, propertyPVALID}, // LATIN SMALL LETTER EGYPTOLOGICAL AIN - {0xA726, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER HENG - {0xA727, 0x0, propertyPVALID}, // LATIN SMALL LETTER HENG - {0xA728, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER TZ - {0xA729, 0x0, propertyPVALID}, // LATIN SMALL LETTER TZ - {0xA72A, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER TRESILLO - {0xA72B, 0x0, propertyPVALID}, // LATIN SMALL LETTER TRESILLO - {0xA72C, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER CUATRILLO - {0xA72D, 0x0, propertyPVALID}, // LATIN SMALL LETTER CUATRILLO - {0xA72E, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER CUATRILLO WITH COMMA - {0xA72F, 0xA731, propertyPVALID}, // LATIN SMALL LETTER CUATRILLO WITH COMMA..LAT - {0xA732, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER AA - {0xA733, 0x0, propertyPVALID}, // LATIN SMALL LETTER AA - {0xA734, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER AO - {0xA735, 0x0, propertyPVALID}, // LATIN SMALL LETTER AO - {0xA736, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER AU - {0xA737, 0x0, propertyPVALID}, // LATIN SMALL LETTER AU - {0xA738, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER AV - {0xA739, 0x0, propertyPVALID}, // LATIN SMALL LETTER AV - {0xA73A, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER AV WITH HORIZONTAL BAR - {0xA73B, 0x0, propertyPVALID}, // LATIN SMALL LETTER AV WITH HORIZONTAL BAR - {0xA73C, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER AY - {0xA73D, 0x0, propertyPVALID}, // LATIN SMALL LETTER AY - {0xA73E, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER REVERSED C WITH DOT - {0xA73F, 0x0, propertyPVALID}, // LATIN SMALL LETTER REVERSED C WITH DOT - {0xA740, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER K WITH STROKE - {0xA741, 0x0, propertyPVALID}, // LATIN SMALL LETTER K WITH STROKE - {0xA742, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER K WITH DIAGONAL STROKE - {0xA743, 0x0, propertyPVALID}, // LATIN SMALL LETTER K WITH DIAGONAL STROKE - {0xA744, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER K WITH STROKE AND DIAGO - {0xA745, 0x0, propertyPVALID}, // LATIN SMALL LETTER K WITH STROKE AND DIAGONA - {0xA746, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER BROKEN L - {0xA747, 0x0, propertyPVALID}, // LATIN SMALL LETTER BROKEN L - {0xA748, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER L WITH HIGH STROKE - {0xA749, 0x0, propertyPVALID}, // LATIN SMALL LETTER L WITH HIGH STROKE - {0xA74A, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH LONG STROKE OVER - {0xA74B, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH LONG STROKE OVERLA - {0xA74C, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH LOOP - {0xA74D, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH LOOP - {0xA74E, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER OO - {0xA74F, 0x0, propertyPVALID}, // LATIN SMALL LETTER OO - {0xA750, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER P WITH STROKE THROUGH D - {0xA751, 0x0, propertyPVALID}, // LATIN SMALL LETTER P WITH STROKE THROUGH DES - {0xA752, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER P WITH FLOURISH - {0xA753, 0x0, propertyPVALID}, // LATIN SMALL LETTER P WITH FLOURISH - {0xA754, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER P WITH SQUIRREL TAIL - {0xA755, 0x0, propertyPVALID}, // LATIN SMALL LETTER P WITH SQUIRREL TAIL - {0xA756, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER Q WITH STROKE THROUGH D - {0xA757, 0x0, propertyPVALID}, // LATIN SMALL LETTER Q WITH STROKE THROUGH DES - {0xA758, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER Q WITH DIAGONAL STROKE - {0xA759, 0x0, propertyPVALID}, // LATIN SMALL LETTER Q WITH DIAGONAL STROKE - {0xA75A, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER R ROTUNDA - {0xA75B, 0x0, propertyPVALID}, // LATIN SMALL LETTER R ROTUNDA - {0xA75C, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER RUM ROTUNDA - {0xA75D, 0x0, propertyPVALID}, // LATIN SMALL LETTER RUM ROTUNDA - {0xA75E, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER V WITH DIAGONAL STROKE - {0xA75F, 0x0, propertyPVALID}, // LATIN SMALL LETTER V WITH DIAGONAL STROKE - {0xA760, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER VY - {0xA761, 0x0, propertyPVALID}, // LATIN SMALL LETTER VY - {0xA762, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER VISIGOTHIC Z - {0xA763, 0x0, propertyPVALID}, // LATIN SMALL LETTER VISIGOTHIC Z - {0xA764, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER THORN WITH STROKE - {0xA765, 0x0, propertyPVALID}, // LATIN SMALL LETTER THORN WITH STROKE - {0xA766, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER THORN WITH STROKE THROU - {0xA767, 0x0, propertyPVALID}, // LATIN SMALL LETTER THORN WITH STROKE THROUGH - {0xA768, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER VEND - {0xA769, 0x0, propertyPVALID}, // LATIN SMALL LETTER VEND - {0xA76A, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER ET - {0xA76B, 0x0, propertyPVALID}, // LATIN SMALL LETTER ET - {0xA76C, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER IS - {0xA76D, 0x0, propertyPVALID}, // LATIN SMALL LETTER IS - {0xA76E, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER CON - {0xA76F, 0x0, propertyPVALID}, // LATIN SMALL LETTER CON - {0xA770, 0x0, propertyDISALLOWED}, // MODIFIER LETTER US - {0xA771, 0xA778, propertyPVALID}, // LATIN SMALL LETTER DUM..LATIN SMALL LETTER U - {0xA779, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER INSULAR D - {0xA77A, 0x0, propertyPVALID}, // LATIN SMALL LETTER INSULAR D - {0xA77B, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER INSULAR F - {0xA77C, 0x0, propertyPVALID}, // LATIN SMALL LETTER INSULAR F - {0xA77D, 0xA77E, propertyDISALLOWED}, // LATIN CAPITAL LETTER INSULAR G..LATIN CAPITA - {0xA77F, 0x0, propertyPVALID}, // LATIN SMALL LETTER TURNED INSULAR G - {0xA780, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER TURNED L - {0xA781, 0x0, propertyPVALID}, // LATIN SMALL LETTER TURNED L - {0xA782, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER INSULAR R - {0xA783, 0x0, propertyPVALID}, // LATIN SMALL LETTER INSULAR R - {0xA784, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER INSULAR S - {0xA785, 0x0, propertyPVALID}, // LATIN SMALL LETTER INSULAR S - {0xA786, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER INSULAR T - {0xA787, 0xA788, propertyPVALID}, // LATIN SMALL LETTER INSULAR T..MODIFIER LETTE - {0xA789, 0xA78B, propertyDISALLOWED}, // MODIFIER LETTER COLON..LATIN CAPITAL LETTER - {0xA78C, 0x0, propertyPVALID}, // LATIN SMALL LETTER SALTILLO - {0xA78D, 0xA7FA, propertyUNASSIGNED}, // .. - {0xA7FB, 0xA827, propertyPVALID}, // LATIN EPIGRAPHIC LETTER REVERSED F..SYLOTI N - {0xA828, 0xA82B, propertyDISALLOWED}, // SYLOTI NAGRI POETRY MARK-1..SYLOTI NAGRI POE - {0xA82C, 0xA82F, propertyUNASSIGNED}, // .. - {0xA830, 0xA839, propertyDISALLOWED}, // NORTH INDIC FRACTION ONE QUARTER..NORTH INDI - {0xA83A, 0xA83F, propertyUNASSIGNED}, // .. - {0xA840, 0xA873, propertyPVALID}, // PHAGS-PA LETTER KA..PHAGS-PA LETTER CANDRABI - {0xA874, 0xA877, propertyDISALLOWED}, // PHAGS-PA SINGLE HEAD MARK..PHAGS-PA MARK DOU - {0xA878, 0xA87F, propertyUNASSIGNED}, // .. - {0xA880, 0xA8C4, propertyPVALID}, // SAURASHTRA SIGN ANUSVARA..SAURASHTRA SIGN VI - {0xA8C5, 0xA8CD, propertyUNASSIGNED}, // .. - {0xA8CE, 0xA8CF, propertyDISALLOWED}, // SAURASHTRA DANDA..SAURASHTRA DOUBLE DANDA - {0xA8D0, 0xA8D9, propertyPVALID}, // SAURASHTRA DIGIT ZERO..SAURASHTRA DIGIT NINE - {0xA8DA, 0xA8DF, propertyUNASSIGNED}, // .. - {0xA8E0, 0xA8F7, propertyPVALID}, // COMBINING DEVANAGARI DIGIT ZERO..DEVANAGARI - {0xA8F8, 0xA8FA, propertyDISALLOWED}, // DEVANAGARI SIGN PUSHPIKA..DEVANAGARI CARET - {0xA8FB, 0x0, propertyPVALID}, // DEVANAGARI HEADSTROKE - {0xA8FC, 0xA8FF, propertyUNASSIGNED}, // .. - {0xA900, 0xA92D, propertyPVALID}, // KAYAH LI DIGIT ZERO..KAYAH LI TONE CALYA PLO - {0xA92E, 0xA92F, propertyDISALLOWED}, // KAYAH LI SIGN CWI..KAYAH LI SIGN SHYA - {0xA930, 0xA953, propertyPVALID}, // REJANG LETTER KA..REJANG VIRAMA - {0xA954, 0xA95E, propertyUNASSIGNED}, // .. - {0xA95F, 0xA97C, propertyDISALLOWED}, // REJANG SECTION MARK..HANGUL CHOSEONG SSANGYE - {0xA97D, 0xA97F, propertyUNASSIGNED}, // .. - {0xA980, 0xA9C0, propertyPVALID}, // JAVANESE SIGN PANYANGGA..JAVANESE PANGKON - {0xA9C1, 0xA9CD, propertyDISALLOWED}, // JAVANESE LEFT RERENGGAN..JAVANESE TURNED PAD - {0xA9CE, 0x0, propertyUNASSIGNED}, // - {0xA9CF, 0xA9D9, propertyPVALID}, // JAVANESE PANGRANGKEP..JAVANESE DIGIT NINE - {0xA9DA, 0xA9DD, propertyUNASSIGNED}, // .. - {0xA9DE, 0xA9DF, propertyDISALLOWED}, // JAVANESE PADA TIRTA TUMETES..JAVANESE PADA I - {0xA9E0, 0xA9FF, propertyUNASSIGNED}, // .. - {0xAA00, 0xAA36, propertyPVALID}, // CHAM LETTER A..CHAM CONSONANT SIGN WA - {0xAA37, 0xAA3F, propertyUNASSIGNED}, // .. - {0xAA40, 0xAA4D, propertyPVALID}, // CHAM LETTER FINAL K..CHAM CONSONANT SIGN FIN - {0xAA4E, 0xAA4F, propertyUNASSIGNED}, // .. - {0xAA50, 0xAA59, propertyPVALID}, // CHAM DIGIT ZERO..CHAM DIGIT NINE - {0xAA5A, 0xAA5B, propertyUNASSIGNED}, // .. - {0xAA5C, 0xAA5F, propertyDISALLOWED}, // CHAM PUNCTUATION SPIRAL..CHAM PUNCTUATION TR - {0xAA60, 0xAA76, propertyPVALID}, // MYANMAR LETTER KHAMTI GA..MYANMAR LOGOGRAM K - {0xAA77, 0xAA79, propertyDISALLOWED}, // MYANMAR SYMBOL AITON EXCLAMATION..MYANMAR SY - {0xAA7A, 0xAA7B, propertyPVALID}, // MYANMAR LETTER AITON RA..MYANMAR SIGN PAO KA - {0xAA7C, 0xAA7F, propertyUNASSIGNED}, // .. - {0xAA80, 0xAAC2, propertyPVALID}, // TAI VIET LETTER LOW KO..TAI VIET TONE MAI SO - {0xAAC3, 0xAADA, propertyUNASSIGNED}, // .. - {0xAADB, 0xAADD, propertyPVALID}, // TAI VIET SYMBOL KON..TAI VIET SYMBOL SAM - {0xAADE, 0xAADF, propertyDISALLOWED}, // TAI VIET SYMBOL HO HOI..TAI VIET SYMBOL KOI - {0xAAE0, 0xABBF, propertyUNASSIGNED}, // .. - {0xABC0, 0xABEA, propertyPVALID}, // MEETEI MAYEK LETTER KOK..MEETEI MAYEK VOWEL - {0xABEB, 0x0, propertyDISALLOWED}, // MEETEI MAYEK CHEIKHEI - {0xABEC, 0xABED, propertyPVALID}, // MEETEI MAYEK LUM IYEK..MEETEI MAYEK APUN IYE - {0xABEE, 0xABEF, propertyUNASSIGNED}, // .. - {0xABF0, 0xABF9, propertyPVALID}, // MEETEI MAYEK DIGIT ZERO..MEETEI MAYEK DIGIT - {0xABFA, 0xABFF, propertyUNASSIGNED}, // .. - {0xAC00, 0xD7A3, propertyPVALID}, // .. - {0xD7A4, 0xD7AF, propertyUNASSIGNED}, // .. - {0xD7B0, 0xD7C6, propertyDISALLOWED}, // HANGUL JUNGSEONG O-YEO..HANGUL JUNGSEONG ARA - {0xD7C7, 0xD7CA, propertyUNASSIGNED}, // .. - {0xD7CB, 0xD7FB, propertyDISALLOWED}, // HANGUL JONGSEONG NIEUN-RIEUL..HANGUL JONGSEO - {0xD7FC, 0xD7FF, propertyUNASSIGNED}, // .. - {0xD800, 0xFA0D, propertyDISALLOWED}, // ..CJK COMPAT - {0xFA0E, 0xFA0F, propertyPVALID}, // CJK COMPATIBILITY IDEOGRAPH-FA0E..CJK COMPAT - {0xFA10, 0x0, propertyDISALLOWED}, // CJK COMPATIBILITY IDEOGRAPH-FA10 - {0xFA11, 0x0, propertyPVALID}, // CJK COMPATIBILITY IDEOGRAPH-FA11 - {0xFA12, 0x0, propertyDISALLOWED}, // CJK COMPATIBILITY IDEOGRAPH-FA12 - {0xFA13, 0xFA14, propertyPVALID}, // CJK COMPATIBILITY IDEOGRAPH-FA13..CJK COMPAT - {0xFA15, 0xFA1E, propertyDISALLOWED}, // CJK COMPATIBILITY IDEOGRAPH-FA15..CJK COMPAT - {0xFA1F, 0x0, propertyPVALID}, // CJK COMPATIBILITY IDEOGRAPH-FA1F - {0xFA20, 0x0, propertyDISALLOWED}, // CJK COMPATIBILITY IDEOGRAPH-FA20 - {0xFA21, 0x0, propertyPVALID}, // CJK COMPATIBILITY IDEOGRAPH-FA21 - {0xFA22, 0x0, propertyDISALLOWED}, // CJK COMPATIBILITY IDEOGRAPH-FA22 - {0xFA23, 0xFA24, propertyPVALID}, // CJK COMPATIBILITY IDEOGRAPH-FA23..CJK COMPAT - {0xFA25, 0xFA26, propertyDISALLOWED}, // CJK COMPATIBILITY IDEOGRAPH-FA25..CJK COMPAT - {0xFA27, 0xFA29, propertyPVALID}, // CJK COMPATIBILITY IDEOGRAPH-FA27..CJK COMPAT - {0xFA2A, 0xFA2D, propertyDISALLOWED}, // CJK COMPATIBILITY IDEOGRAPH-FA2A..CJK COMPAT - {0xFA2E, 0xFA2F, propertyUNASSIGNED}, // .. - {0xFA30, 0xFA6D, propertyDISALLOWED}, // CJK COMPATIBILITY IDEOGRAPH-FA30..CJK COMPAT - {0xFA6E, 0xFA6F, propertyUNASSIGNED}, // .. - {0xFA70, 0xFAD9, propertyDISALLOWED}, // CJK COMPATIBILITY IDEOGRAPH-FA70..CJK COMPAT - {0xFADA, 0xFAFF, propertyUNASSIGNED}, // .. - {0xFB00, 0xFB06, propertyDISALLOWED}, // LATIN SMALL LIGATURE FF..LATIN SMALL LIGATUR - {0xFB07, 0xFB12, propertyUNASSIGNED}, // .. - {0xFB13, 0xFB17, propertyDISALLOWED}, // ARMENIAN SMALL LIGATURE MEN NOW..ARMENIAN SM - {0xFB18, 0xFB1C, propertyUNASSIGNED}, // .. - {0xFB1D, 0x0, propertyDISALLOWED}, // HEBREW LETTER YOD WITH HIRIQ - {0xFB1E, 0x0, propertyPVALID}, // HEBREW POINT JUDEO-SPANISH VARIKA - {0xFB1F, 0xFB36, propertyDISALLOWED}, // HEBREW LIGATURE YIDDISH YOD YOD PATAH..HEBRE - {0xFB37, 0x0, propertyUNASSIGNED}, // - {0xFB38, 0xFB3C, propertyDISALLOWED}, // HEBREW LETTER TET WITH DAGESH..HEBREW LETTER - {0xFB3D, 0x0, propertyUNASSIGNED}, // - {0xFB3E, 0x0, propertyDISALLOWED}, // HEBREW LETTER MEM WITH DAGESH - {0xFB3F, 0x0, propertyUNASSIGNED}, // - {0xFB40, 0xFB41, propertyDISALLOWED}, // HEBREW LETTER NUN WITH DAGESH..HEBREW LETTER - {0xFB42, 0x0, propertyUNASSIGNED}, // - {0xFB43, 0xFB44, propertyDISALLOWED}, // HEBREW LETTER FINAL PE WITH DAGESH..HEBREW L - {0xFB45, 0x0, propertyUNASSIGNED}, // - {0xFB46, 0xFBB1, propertyDISALLOWED}, // HEBREW LETTER TSADI WITH DAGESH..ARABIC LETT - {0xFBB2, 0xFBD2, propertyUNASSIGNED}, // .. - {0xFBD3, 0xFD3F, propertyDISALLOWED}, // ARABIC LETTER NG ISOLATED FORM..ORNATE RIGHT - {0xFD40, 0xFD4F, propertyUNASSIGNED}, // .. - {0xFD50, 0xFD8F, propertyDISALLOWED}, // ARABIC LIGATURE TEH WITH JEEM WITH MEEM INIT - {0xFD90, 0xFD91, propertyUNASSIGNED}, // .. - {0xFD92, 0xFDC7, propertyDISALLOWED}, // ARABIC LIGATURE MEEM WITH JEEM WITH KHAH INI - {0xFDC8, 0xFDCF, propertyUNASSIGNED}, // .. - {0xFDD0, 0xFDFD, propertyDISALLOWED}, // ..ARABIC LIGATURE BISMILLAH AR - {0xFDFE, 0xFDFF, propertyUNASSIGNED}, // .. - {0xFE00, 0xFE19, propertyDISALLOWED}, // VARIATION SELECTOR-1..PRESENTATION FORM FOR - {0xFE1A, 0xFE1F, propertyUNASSIGNED}, // .. - {0xFE20, 0xFE26, propertyPVALID}, // COMBINING LIGATURE LEFT HALF..COMBINING CONJ - {0xFE27, 0xFE2F, propertyUNASSIGNED}, // .. - {0xFE30, 0xFE52, propertyDISALLOWED}, // PRESENTATION FORM FOR VERTICAL TWO DOT LEADE - {0xFE53, 0x0, propertyUNASSIGNED}, // - {0xFE54, 0xFE66, propertyDISALLOWED}, // SMALL SEMICOLON..SMALL EQUALS SIGN - {0xFE67, 0x0, propertyUNASSIGNED}, // - {0xFE68, 0xFE6B, propertyDISALLOWED}, // SMALL REVERSE SOLIDUS..SMALL COMMERCIAL AT - {0xFE6C, 0xFE6F, propertyUNASSIGNED}, // .. - {0xFE70, 0xFE72, propertyDISALLOWED}, // ARABIC FATHATAN ISOLATED FORM..ARABIC DAMMAT - {0xFE73, 0x0, propertyPVALID}, // ARABIC TAIL FRAGMENT - {0xFE74, 0x0, propertyDISALLOWED}, // ARABIC KASRATAN ISOLATED FORM - {0xFE75, 0x0, propertyUNASSIGNED}, // - {0xFE76, 0xFEFC, propertyDISALLOWED}, // ARABIC FATHA ISOLATED FORM..ARABIC LIGATURE - {0xFEFD, 0xFEFE, propertyUNASSIGNED}, // .. - {0xFEFF, 0x0, propertyDISALLOWED}, // ZERO WIDTH NO-BREAK SPACE - {0xFF00, 0x0, propertyUNASSIGNED}, // - {0xFF01, 0xFFBE, propertyDISALLOWED}, // FULLWIDTH EXCLAMATION MARK..HALFWIDTH HANGUL - {0xFFBF, 0xFFC1, propertyUNASSIGNED}, // .. - {0xFFC2, 0xFFC7, propertyDISALLOWED}, // HALFWIDTH HANGUL LETTER A..HALFWIDTH HANGUL - {0xFFC8, 0xFFC9, propertyUNASSIGNED}, // .. - {0xFFCA, 0xFFCF, propertyDISALLOWED}, // HALFWIDTH HANGUL LETTER YEO..HALFWIDTH HANGU - {0xFFD0, 0xFFD1, propertyUNASSIGNED}, // .. - {0xFFD2, 0xFFD7, propertyDISALLOWED}, // HALFWIDTH HANGUL LETTER YO..HALFWIDTH HANGUL - {0xFFD8, 0xFFD9, propertyUNASSIGNED}, // .. - {0xFFDA, 0xFFDC, propertyDISALLOWED}, // HALFWIDTH HANGUL LETTER EU..HALFWIDTH HANGUL - {0xFFDD, 0xFFDF, propertyUNASSIGNED}, // .. - {0xFFE0, 0xFFE6, propertyDISALLOWED}, // FULLWIDTH CENT SIGN..FULLWIDTH WON SIGN - {0xFFE7, 0x0, propertyUNASSIGNED}, // - {0xFFE8, 0xFFEE, propertyDISALLOWED}, // HALFWIDTH FORMS LIGHT VERTICAL..HALFWIDTH WH - {0xFFEF, 0xFFF8, propertyUNASSIGNED}, // .. - {0xFFF9, 0xFFFF, propertyDISALLOWED}, // INTERLINEAR ANNOTATION ANCHOR.. - {0x1000D, 0x10026, propertyPVALID}, // LINEAR B SYLLABLE B036 JO..LINEAR B SYLLABLE - {0x10027, 0x0, propertyUNASSIGNED}, // - {0x10028, 0x1003A, propertyPVALID}, // LINEAR B SYLLABLE B060 RA..LINEAR B SYLLABLE - {0x1003B, 0x0, propertyUNASSIGNED}, // - {0x1003C, 0x1003D, propertyPVALID}, // LINEAR B SYLLABLE B017 ZA..LINEAR B SYLLABLE - {0x1003E, 0x0, propertyUNASSIGNED}, // - {0x1003F, 0x1004D, propertyPVALID}, // LINEAR B SYLLABLE B020 ZO..LINEAR B SYLLABLE - {0x1004E, 0x1004F, propertyUNASSIGNED}, // .. - {0x10050, 0x1005D, propertyPVALID}, // LINEAR B SYMBOL B018..LINEAR B SYMBOL B089 - {0x1005E, 0x1007F, propertyUNASSIGNED}, // .. - {0x10080, 0x100FA, propertyPVALID}, // LINEAR B IDEOGRAM B100 MAN..LINEAR B IDEOGRA - {0x100FB, 0x100FF, propertyUNASSIGNED}, // .. - {0x10100, 0x10102, propertyDISALLOWED}, // AEGEAN WORD SEPARATOR LINE..AEGEAN CHECK MAR - {0x10103, 0x10106, propertyUNASSIGNED}, // .. - {0x10107, 0x10133, propertyDISALLOWED}, // AEGEAN NUMBER ONE..AEGEAN NUMBER NINETY THOU - {0x10134, 0x10136, propertyUNASSIGNED}, // .. - {0x10137, 0x1018A, propertyDISALLOWED}, // AEGEAN WEIGHT BASE UNIT..GREEK ZERO SIGN - {0x1018B, 0x1018F, propertyUNASSIGNED}, // .. - {0x10190, 0x1019B, propertyDISALLOWED}, // ROMAN SEXTANS SIGN..ROMAN CENTURIAL SIGN - {0x1019C, 0x101CF, propertyUNASSIGNED}, // .. - {0x101D0, 0x101FC, propertyDISALLOWED}, // PHAISTOS DISC SIGN PEDESTRIAN..PHAISTOS DISC - {0x101FD, 0x0, propertyPVALID}, // PHAISTOS DISC SIGN COMBINING OBLIQUE STROKE - {0x101FE, 0x1027F, propertyUNASSIGNED}, // .. - {0x10280, 0x1029C, propertyPVALID}, // LYCIAN LETTER A..LYCIAN LETTER X - {0x1029D, 0x1029F, propertyUNASSIGNED}, // .. - {0x102A0, 0x102D0, propertyPVALID}, // CARIAN LETTER A..CARIAN LETTER UUU3 - {0x102D1, 0x102FF, propertyUNASSIGNED}, // .. - {0x10300, 0x1031E, propertyPVALID}, // OLD ITALIC LETTER A..OLD ITALIC LETTER UU - {0x1031F, 0x0, propertyUNASSIGNED}, // - {0x10320, 0x10323, propertyDISALLOWED}, // OLD ITALIC NUMERAL ONE..OLD ITALIC NUMERAL F - {0x10324, 0x1032F, propertyUNASSIGNED}, // .. - {0x10330, 0x10340, propertyPVALID}, // GOTHIC LETTER AHSA..GOTHIC LETTER PAIRTHRA - {0x10341, 0x0, propertyDISALLOWED}, // GOTHIC LETTER NINETY - {0x10342, 0x10349, propertyPVALID}, // GOTHIC LETTER RAIDA..GOTHIC LETTER OTHAL - {0x1034A, 0x0, propertyDISALLOWED}, // GOTHIC LETTER NINE HUNDRED - {0x1034B, 0x1037F, propertyUNASSIGNED}, // .. - {0x10380, 0x1039D, propertyPVALID}, // UGARITIC LETTER ALPA..UGARITIC LETTER SSU - {0x1039E, 0x0, propertyUNASSIGNED}, // - {0x1039F, 0x0, propertyDISALLOWED}, // UGARITIC WORD DIVIDER - {0x103A0, 0x103C3, propertyPVALID}, // OLD PERSIAN SIGN A..OLD PERSIAN SIGN HA - {0x103C4, 0x103C7, propertyUNASSIGNED}, // .. - {0x103C8, 0x103CF, propertyPVALID}, // OLD PERSIAN SIGN AURAMAZDAA..OLD PERSIAN SIG - {0x103D0, 0x103D5, propertyDISALLOWED}, // OLD PERSIAN WORD DIVIDER..OLD PERSIAN NUMBER - {0x103D6, 0x103FF, propertyUNASSIGNED}, // .. - {0x10400, 0x10427, propertyDISALLOWED}, // DESERET CAPITAL LETTER LONG I..DESERET CAPIT - {0x10428, 0x1049D, propertyPVALID}, // DESERET SMALL LETTER LONG I..OSMANYA LETTER - {0x1049E, 0x1049F, propertyUNASSIGNED}, // .. - {0x104A0, 0x104A9, propertyPVALID}, // OSMANYA DIGIT ZERO..OSMANYA DIGIT NINE - {0x104AA, 0x107FF, propertyUNASSIGNED}, // .. - {0x10800, 0x10805, propertyPVALID}, // CYPRIOT SYLLABLE A..CYPRIOT SYLLABLE JA - {0x10806, 0x10807, propertyUNASSIGNED}, // .. - {0x10808, 0x0, propertyPVALID}, // CYPRIOT SYLLABLE JO - {0x10809, 0x0, propertyUNASSIGNED}, // - {0x1080A, 0x10835, propertyPVALID}, // CYPRIOT SYLLABLE KA..CYPRIOT SYLLABLE WO - {0x10836, 0x0, propertyUNASSIGNED}, // - {0x10837, 0x10838, propertyPVALID}, // CYPRIOT SYLLABLE XA..CYPRIOT SYLLABLE XE - {0x10839, 0x1083B, propertyUNASSIGNED}, // .. - {0x1083C, 0x0, propertyPVALID}, // CYPRIOT SYLLABLE ZA - {0x1083D, 0x1083E, propertyUNASSIGNED}, // .. - {0x1083F, 0x10855, propertyPVALID}, // CYPRIOT SYLLABLE ZO..IMPERIAL ARAMAIC LETTER - {0x10856, 0x0, propertyUNASSIGNED}, // - {0x10857, 0x1085F, propertyDISALLOWED}, // IMPERIAL ARAMAIC SECTION SIGN..IMPERIAL ARAM - {0x10860, 0x108FF, propertyUNASSIGNED}, // .. - {0x10900, 0x10915, propertyPVALID}, // PHOENICIAN LETTER ALF..PHOENICIAN LETTER TAU - {0x10916, 0x1091B, propertyDISALLOWED}, // PHOENICIAN NUMBER ONE..PHOENICIAN NUMBER THR - {0x1091C, 0x1091E, propertyUNASSIGNED}, // .. - {0x1091F, 0x0, propertyDISALLOWED}, // PHOENICIAN WORD SEPARATOR - {0x10920, 0x10939, propertyPVALID}, // LYDIAN LETTER A..LYDIAN LETTER C - {0x1093A, 0x1093E, propertyUNASSIGNED}, // .. - {0x1093F, 0x0, propertyDISALLOWED}, // LYDIAN TRIANGULAR MARK - {0x10940, 0x109FF, propertyUNASSIGNED}, // .. - {0x10A00, 0x10A03, propertyPVALID}, // KHAROSHTHI LETTER A..KHAROSHTHI VOWEL SIGN V - {0x10A04, 0x0, propertyUNASSIGNED}, // - {0x10A05, 0x10A06, propertyPVALID}, // KHAROSHTHI VOWEL SIGN E..KHAROSHTHI VOWEL SI - {0x10A07, 0x10A0B, propertyUNASSIGNED}, // .. - {0x10A0C, 0x10A13, propertyPVALID}, // KHAROSHTHI VOWEL LENGTH MARK..KHAROSHTHI LET - {0x10A14, 0x0, propertyUNASSIGNED}, // - {0x10A15, 0x10A17, propertyPVALID}, // KHAROSHTHI LETTER CA..KHAROSHTHI LETTER JA - {0x10A18, 0x0, propertyUNASSIGNED}, // - {0x10A19, 0x10A33, propertyPVALID}, // KHAROSHTHI LETTER NYA..KHAROSHTHI LETTER TTT - {0x10A34, 0x10A37, propertyUNASSIGNED}, // .. - {0x10A38, 0x10A3A, propertyPVALID}, // KHAROSHTHI SIGN BAR ABOVE..KHAROSHTHI SIGN D - {0x10A3B, 0x10A3E, propertyUNASSIGNED}, // .. - {0x10A3F, 0x0, propertyPVALID}, // KHAROSHTHI VIRAMA - {0x10A40, 0x10A47, propertyDISALLOWED}, // KHAROSHTHI DIGIT ONE..KHAROSHTHI NUMBER ONE - {0x10A48, 0x10A4F, propertyUNASSIGNED}, // .. - {0x10A50, 0x10A58, propertyDISALLOWED}, // KHAROSHTHI PUNCTUATION DOT..KHAROSHTHI PUNCT - {0x10A59, 0x10A5F, propertyUNASSIGNED}, // .. - {0x10A60, 0x10A7C, propertyPVALID}, // OLD SOUTH ARABIAN LETTER HE..OLD SOUTH ARABI - {0x10A7D, 0x10A7F, propertyDISALLOWED}, // OLD SOUTH ARABIAN NUMBER ONE..OLD SOUTH ARAB - {0x10A80, 0x10AFF, propertyUNASSIGNED}, // .. - {0x10B00, 0x10B35, propertyPVALID}, // AVESTAN LETTER A..AVESTAN LETTER HE - {0x10B36, 0x10B38, propertyUNASSIGNED}, // .. - {0x10B39, 0x10B3F, propertyDISALLOWED}, // AVESTAN ABBREVIATION MARK..LARGE ONE RING OV - {0x10B40, 0x10B55, propertyPVALID}, // INSCRIPTIONAL PARTHIAN LETTER ALEPH..INSCRIP - {0x10B56, 0x10B57, propertyUNASSIGNED}, // .. - {0x10B58, 0x10B5F, propertyDISALLOWED}, // INSCRIPTIONAL PARTHIAN NUMBER ONE..INSCRIPTI - {0x10B60, 0x10B72, propertyPVALID}, // INSCRIPTIONAL PAHLAVI LETTER ALEPH..INSCRIPT - {0x10B73, 0x10B77, propertyUNASSIGNED}, // .. - {0x10B78, 0x10B7F, propertyDISALLOWED}, // INSCRIPTIONAL PAHLAVI NUMBER ONE..INSCRIPTIO - {0x10B80, 0x10BFF, propertyUNASSIGNED}, // .. - {0x10C00, 0x10C48, propertyPVALID}, // OLD TURKIC LETTER ORKHON A..OLD TURKIC LETTE - {0x10C49, 0x10E5F, propertyUNASSIGNED}, // .. - {0x10E60, 0x10E7E, propertyDISALLOWED}, // RUMI DIGIT ONE..RUMI FRACTION TWO THIRDS - {0x10E7F, 0x1107F, propertyUNASSIGNED}, // .. - {0x11080, 0x110BA, propertyPVALID}, // KAITHI SIGN CANDRABINDU..KAITHI SIGN NUKTA - {0x110BB, 0x110C1, propertyDISALLOWED}, // KAITHI ABBREVIATION SIGN..KAITHI DOUBLE DAND - {0x110C2, 0x11FFF, propertyUNASSIGNED}, // .. - {0x12000, 0x1236E, propertyPVALID}, // CUNEIFORM SIGN A..CUNEIFORM SIGN ZUM - {0x1236F, 0x123FF, propertyUNASSIGNED}, // .. - {0x12400, 0x12462, propertyDISALLOWED}, // CUNEIFORM NUMERIC SIGN TWO ASH..CUNEIFORM NU - {0x12463, 0x1246F, propertyUNASSIGNED}, // .. - {0x12470, 0x12473, propertyDISALLOWED}, // CUNEIFORM PUNCTUATION SIGN OLD ASSYRIAN WORD - {0x12474, 0x12FFF, propertyUNASSIGNED}, // .. - {0x13000, 0x1342E, propertyPVALID}, // EGYPTIAN HIEROGLYPH A001..EGYPTIAN HIEROGLYP - {0x1342F, 0x1CFFF, propertyUNASSIGNED}, // .. - {0x1D000, 0x1D0F5, propertyDISALLOWED}, // BYZANTINE MUSICAL SYMBOL PSILI..BYZANTINE MU - {0x1D0F6, 0x1D0FF, propertyUNASSIGNED}, // .. - {0x1D100, 0x1D126, propertyDISALLOWED}, // MUSICAL SYMBOL SINGLE BARLINE..MUSICAL SYMBO - {0x1D127, 0x1D128, propertyUNASSIGNED}, // .. - {0x1D129, 0x1D1DD, propertyDISALLOWED}, // MUSICAL SYMBOL MULTIPLE MEASURE REST..MUSICA - {0x1D1DE, 0x1D1FF, propertyUNASSIGNED}, // .. - {0x1D200, 0x1D245, propertyDISALLOWED}, // GREEK VOCAL NOTATION SYMBOL-1..GREEK MUSICAL - {0x1D246, 0x1D2FF, propertyUNASSIGNED}, // .. - {0x1D300, 0x1D356, propertyDISALLOWED}, // MONOGRAM FOR EARTH..TETRAGRAM FOR FOSTERING - {0x1D357, 0x1D35F, propertyUNASSIGNED}, // .. - {0x1D360, 0x1D371, propertyDISALLOWED}, // COUNTING ROD UNIT DIGIT ONE..COUNTING ROD TE - {0x1D372, 0x1D3FF, propertyUNASSIGNED}, // .. - {0x1D400, 0x1D454, propertyDISALLOWED}, // MATHEMATICAL BOLD CAPITAL A..MATHEMATICAL IT - {0x1D455, 0x0, propertyUNASSIGNED}, // - {0x1D456, 0x1D49C, propertyDISALLOWED}, // MATHEMATICAL ITALIC SMALL I..MATHEMATICAL SC - {0x1D49D, 0x0, propertyUNASSIGNED}, // - {0x1D49E, 0x1D49F, propertyDISALLOWED}, // MATHEMATICAL SCRIPT CAPITAL C..MATHEMATICAL - {0x1D4A0, 0x1D4A1, propertyUNASSIGNED}, // .. - {0x1D4A2, 0x0, propertyDISALLOWED}, // MATHEMATICAL SCRIPT CAPITAL G - {0x1D4A3, 0x1D4A4, propertyUNASSIGNED}, // .. - {0x1D4A5, 0x1D4A6, propertyDISALLOWED}, // MATHEMATICAL SCRIPT CAPITAL J..MATHEMATICAL - {0x1D4A7, 0x1D4A8, propertyUNASSIGNED}, // .. - {0x1D4A9, 0x1D4AC, propertyDISALLOWED}, // MATHEMATICAL SCRIPT CAPITAL N..MATHEMATICAL - {0x1D4AD, 0x0, propertyUNASSIGNED}, // - {0x1D4AE, 0x1D4B9, propertyDISALLOWED}, // MATHEMATICAL SCRIPT CAPITAL S..MATHEMATICAL - {0x1D4BA, 0x0, propertyUNASSIGNED}, // - {0x1D4BB, 0x0, propertyDISALLOWED}, // MATHEMATICAL SCRIPT SMALL F - {0x1D4BC, 0x0, propertyUNASSIGNED}, // - {0x1D4BD, 0x1D4C3, propertyDISALLOWED}, // MATHEMATICAL SCRIPT SMALL H..MATHEMATICAL SC - {0x1D4C4, 0x0, propertyUNASSIGNED}, // - {0x1D4C5, 0x1D505, propertyDISALLOWED}, // MATHEMATICAL SCRIPT SMALL P..MATHEMATICAL FR - {0x1D506, 0x0, propertyUNASSIGNED}, // - {0x1D507, 0x1D50A, propertyDISALLOWED}, // MATHEMATICAL FRAKTUR CAPITAL D..MATHEMATICAL - {0x1D50B, 0x1D50C, propertyUNASSIGNED}, // .. - {0x1D50D, 0x1D514, propertyDISALLOWED}, // MATHEMATICAL FRAKTUR CAPITAL J..MATHEMATICAL - {0x1D515, 0x0, propertyUNASSIGNED}, // - {0x1D516, 0x1D51C, propertyDISALLOWED}, // MATHEMATICAL FRAKTUR CAPITAL S..MATHEMATICAL - {0x1D51D, 0x0, propertyUNASSIGNED}, // - {0x1D51E, 0x1D539, propertyDISALLOWED}, // MATHEMATICAL FRAKTUR SMALL A..MATHEMATICAL D - {0x1D53A, 0x0, propertyUNASSIGNED}, // - {0x1D53B, 0x1D53E, propertyDISALLOWED}, // MATHEMATICAL DOUBLE-STRUCK CAPITAL D..MATHEM - {0x1D53F, 0x0, propertyUNASSIGNED}, // - {0x1D540, 0x1D544, propertyDISALLOWED}, // MATHEMATICAL DOUBLE-STRUCK CAPITAL I..MATHEM - {0x1D545, 0x0, propertyUNASSIGNED}, // - {0x1D546, 0x0, propertyDISALLOWED}, // MATHEMATICAL DOUBLE-STRUCK CAPITAL O - {0x1D547, 0x1D549, propertyUNASSIGNED}, // .. - {0x1D54A, 0x1D550, propertyDISALLOWED}, // MATHEMATICAL DOUBLE-STRUCK CAPITAL S..MATHEM - {0x1D551, 0x0, propertyUNASSIGNED}, // - {0x1D552, 0x1D6A5, propertyDISALLOWED}, // MATHEMATICAL DOUBLE-STRUCK SMALL A..MATHEMAT - {0x1D6A6, 0x1D6A7, propertyUNASSIGNED}, // .. - {0x1D6A8, 0x1D7CB, propertyDISALLOWED}, // MATHEMATICAL BOLD CAPITAL ALPHA..MATHEMATICA - {0x1D7CC, 0x1D7CD, propertyUNASSIGNED}, // .. - {0x1D7CE, 0x1D7FF, propertyDISALLOWED}, // MATHEMATICAL BOLD DIGIT ZERO..MATHEMATICAL M - {0x1D800, 0x1EFFF, propertyUNASSIGNED}, // .. - {0x1F000, 0x1F02B, propertyDISALLOWED}, // MAHJONG TILE EAST WIND..MAHJONG TILE BACK - {0x1F02C, 0x1F02F, propertyUNASSIGNED}, // .. - {0x1F030, 0x1F093, propertyDISALLOWED}, // DOMINO TILE HORIZONTAL BACK..DOMINO TILE VER - {0x1F094, 0x1F0FF, propertyUNASSIGNED}, // .. - {0x1F100, 0x1F10A, propertyDISALLOWED}, // DIGIT ZERO FULL STOP..DIGIT NINE COMMA - {0x1F10B, 0x1F10F, propertyUNASSIGNED}, // .. - {0x1F110, 0x1F12E, propertyDISALLOWED}, // PARENTHESIZED LATIN CAPITAL LETTER A..CIRCLE - {0x1F12F, 0x1F130, propertyUNASSIGNED}, // .. - {0x1F131, 0x0, propertyDISALLOWED}, // SQUARED LATIN CAPITAL LETTER B - {0x1F132, 0x1F13C, propertyUNASSIGNED}, // .. - {0x1F13D, 0x0, propertyDISALLOWED}, // SQUARED LATIN CAPITAL LETTER N - {0x1F13E, 0x0, propertyUNASSIGNED}, // - {0x1F13F, 0x0, propertyDISALLOWED}, // SQUARED LATIN CAPITAL LETTER P - {0x1F140, 0x1F141, propertyUNASSIGNED}, // .. - {0x1F142, 0x0, propertyDISALLOWED}, // SQUARED LATIN CAPITAL LETTER S - {0x1F143, 0x1F145, propertyUNASSIGNED}, // .. - {0x1F146, 0x0, propertyDISALLOWED}, // SQUARED LATIN CAPITAL LETTER W - {0x1F147, 0x1F149, propertyUNASSIGNED}, // .. - {0x1F14A, 0x1F14E, propertyDISALLOWED}, // SQUARED HV..SQUARED PPV - {0x1F14F, 0x1F156, propertyUNASSIGNED}, // .. - {0x1F157, 0x0, propertyDISALLOWED}, // NEGATIVE CIRCLED LATIN CAPITAL LETTER H - {0x1F158, 0x1F15E, propertyUNASSIGNED}, // .. - {0x1F15F, 0x0, propertyDISALLOWED}, // NEGATIVE CIRCLED LATIN CAPITAL LETTER P - {0x1F160, 0x1F178, propertyUNASSIGNED}, // .. - {0x1F179, 0x0, propertyDISALLOWED}, // NEGATIVE SQUARED LATIN CAPITAL LETTER J - {0x1F17A, 0x0, propertyUNASSIGNED}, // - {0x1F17B, 0x1F17C, propertyDISALLOWED}, // NEGATIVE SQUARED LATIN CAPITAL LETTER L..NEG - {0x1F17D, 0x1F17E, propertyUNASSIGNED}, // .. - {0x1F17F, 0x0, propertyDISALLOWED}, // NEGATIVE SQUARED LATIN CAPITAL LETTER P - {0x1F180, 0x1F189, propertyUNASSIGNED}, // .. - {0x1F18A, 0x1F18D, propertyDISALLOWED}, // CROSSED NEGATIVE SQUARED LATIN CAPITAL LETTE - {0x1F18E, 0x1F18F, propertyUNASSIGNED}, // .. - {0x1F190, 0x0, propertyDISALLOWED}, // SQUARE DJ - {0x1F191, 0x1F1FF, propertyUNASSIGNED}, // .. - {0x1F200, 0x0, propertyDISALLOWED}, // SQUARE HIRAGANA HOKA - {0x1F201, 0x1F20F, propertyUNASSIGNED}, // .. - {0x1F210, 0x1F231, propertyDISALLOWED}, // SQUARED CJK UNIFIED IDEOGRAPH-624B..SQUARED - {0x1F232, 0x1F23F, propertyUNASSIGNED}, // .. - {0x1F240, 0x1F248, propertyDISALLOWED}, // TORTOISE SHELL BRACKETED CJK UNIFIED IDEOGRA - {0x1F249, 0x1FFFD, propertyUNASSIGNED}, // .. - {0x1FFFE, 0x1FFFF, propertyDISALLOWED}, // .. - {0x20000, 0x2A6D6, propertyPVALID}, // .... - {0x2A700, 0x2B734, propertyPVALID}, // .... - {0x2F800, 0x2FA1D, propertyDISALLOWED}, // CJK COMPATIBILITY IDEOGRAPH-2F800..CJK COMPA - {0x2FA1E, 0x2FFFD, propertyUNASSIGNED}, // .. - {0x2FFFE, 0x2FFFF, propertyDISALLOWED}, // .. - {0x30000, 0x3FFFD, propertyUNASSIGNED}, // .. - {0x3FFFE, 0x3FFFF, propertyDISALLOWED}, // .. - {0x40000, 0x4FFFD, propertyUNASSIGNED}, // .. - {0x4FFFE, 0x4FFFF, propertyDISALLOWED}, // .. - {0x50000, 0x5FFFD, propertyUNASSIGNED}, // .. - {0x5FFFE, 0x5FFFF, propertyDISALLOWED}, // .. - {0x60000, 0x6FFFD, propertyUNASSIGNED}, // .. - {0x6FFFE, 0x6FFFF, propertyDISALLOWED}, // .. - {0x70000, 0x7FFFD, propertyUNASSIGNED}, // .. - {0x7FFFE, 0x7FFFF, propertyDISALLOWED}, // .. - {0x80000, 0x8FFFD, propertyUNASSIGNED}, // .. - {0x8FFFE, 0x8FFFF, propertyDISALLOWED}, // .. - {0x90000, 0x9FFFD, propertyUNASSIGNED}, // .. - {0x9FFFE, 0x9FFFF, propertyDISALLOWED}, // .. - {0xA0000, 0xAFFFD, propertyUNASSIGNED}, // .. - {0xAFFFE, 0xAFFFF, propertyDISALLOWED}, // .. - {0xB0000, 0xBFFFD, propertyUNASSIGNED}, // .. - {0xBFFFE, 0xBFFFF, propertyDISALLOWED}, // .. - {0xC0000, 0xCFFFD, propertyUNASSIGNED}, // .. - {0xCFFFE, 0xCFFFF, propertyDISALLOWED}, // .. - {0xD0000, 0xDFFFD, propertyUNASSIGNED}, // .. - {0xDFFFE, 0xDFFFF, propertyDISALLOWED}, // .. - {0xE0000, 0x0, propertyUNASSIGNED}, // - {0xE0001, 0x0, propertyDISALLOWED}, // LANGUAGE TAG - {0xE0002, 0xE001F, propertyUNASSIGNED}, // .. - {0xE0020, 0xE007F, propertyDISALLOWED}, // TAG SPACE..CANCEL TAG - {0xE0080, 0xE00FF, propertyUNASSIGNED}, // .. - {0xE0100, 0xE01EF, propertyDISALLOWED}, // VARIATION SELECTOR-17..VARIATION SELECTOR-25 - {0xE01F0, 0xEFFFD, propertyUNASSIGNED}, // .. - {0xEFFFE, 0x10FFFF, propertyDISALLOWED}, // .. -} diff --git a/Godeps/_workspace/src/github.com/miekg/dns/idn/example_test.go b/Godeps/_workspace/src/github.com/miekg/dns/idn/example_test.go deleted file mode 100644 index 8833cd91de..0000000000 --- a/Godeps/_workspace/src/github.com/miekg/dns/idn/example_test.go +++ /dev/null @@ -1,18 +0,0 @@ -package idn_test - -import ( - "fmt" - "github.com/miekg/dns/idn" -) - -func ExampleToPunycode() { - name := "インターネット.テスト" - fmt.Printf("%s -> %s", name, idn.ToPunycode(name)) - // Output: インターネット.テスト -> xn--eckucmux0ukc.xn--zckzah -} - -func ExampleFromPunycode() { - name := "xn--mgbaja8a1hpac.xn--mgbachtv" - fmt.Printf("%s -> %s", name, idn.FromPunycode(name)) - // Output: xn--mgbaja8a1hpac.xn--mgbachtv -> الانترنت.اختبار -} diff --git a/Godeps/_workspace/src/github.com/miekg/dns/idn/punycode.go b/Godeps/_workspace/src/github.com/miekg/dns/idn/punycode.go deleted file mode 100644 index 09363821b1..0000000000 --- a/Godeps/_workspace/src/github.com/miekg/dns/idn/punycode.go +++ /dev/null @@ -1,311 +0,0 @@ -// Package idn implements encoding from and to punycode as speficied by RFC 3492. -package idn - -import ( - "bytes" - "strings" - "unicode" - - "github.com/miekg/dns" -) - -// Implementation idea from RFC itself and from from IDNA::Punycode created by -// Tatsuhiko Miyagawa and released under Perl Artistic -// License in 2002. - -const ( - _MIN rune = 1 - _MAX rune = 26 - _SKEW rune = 38 - _BASE rune = 36 - _BIAS rune = 72 - _N rune = 128 - _DAMP rune = 700 - - _DELIMITER = '-' - _PREFIX = "xn--" -) - -// ToPunycode converts unicode domain names to DNS-appropriate punycode names. -// This function would return an empty string result for domain names with -// invalid unicode strings. This function expects domain names in lowercase. -func ToPunycode(s string) string { - tokens := dns.SplitDomainName(s) - switch { - case s == "": - return "" - case tokens == nil: // s == . - return "." - case s[len(s)-1] == '.': - tokens = append(tokens, "") - } - - for i := range tokens { - t := encode([]byte(tokens[i])) - if t == nil { - return "" - } - tokens[i] = string(t) - } - return strings.Join(tokens, ".") -} - -// FromPunycode returns unicode domain name from provided punycode string. -func FromPunycode(s string) string { - tokens := dns.SplitDomainName(s) - switch { - case s == "": - return "" - case tokens == nil: // s == . - return "." - case s[len(s)-1] == '.': - tokens = append(tokens, "") - } - for i := range tokens { - tokens[i] = string(decode([]byte(tokens[i]))) - } - return strings.Join(tokens, ".") -} - -// digitval converts single byte into meaningful value that's used to calculate decoded unicode character. -const errdigit = 0xffff - -func digitval(code rune) rune { - switch { - case code >= 'A' && code <= 'Z': - return code - 'A' - case code >= 'a' && code <= 'z': - return code - 'a' - case code >= '0' && code <= '9': - return code - '0' + 26 - } - return errdigit -} - -// lettercode finds BASE36 byte (a-z0-9) based on calculated number. -func lettercode(digit rune) rune { - switch { - case digit >= 0 && digit <= 25: - return digit + 'a' - case digit >= 26 && digit <= 36: - return digit - 26 + '0' - } - panic("dns: not reached") -} - -// adapt calculates next bias to be used for next iteration delta. -func adapt(delta rune, numpoints int, firsttime bool) rune { - if firsttime { - delta /= _DAMP - } else { - delta /= 2 - } - - var k rune - for delta = delta + delta/rune(numpoints); delta > (_BASE-_MIN)*_MAX/2; k += _BASE { - delta /= _BASE - _MIN - } - - return k + ((_BASE-_MIN+1)*delta)/(delta+_SKEW) -} - -// next finds minimal rune (one with lowest codepoint value) that should be equal or above boundary. -func next(b []rune, boundary rune) rune { - if len(b) == 0 { - panic("dns: invalid set of runes to determine next one") - } - m := b[0] - for _, x := range b[1:] { - if x >= boundary && (m < boundary || x < m) { - m = x - } - } - return m -} - -// preprune converts unicode rune to lower case. At this time it's not -// supporting all things described in RFCs -func preprune(r rune) rune { - if unicode.IsUpper(r) { - r = unicode.ToLower(r) - } - return r -} - -// tfunc is a function that helps calculate each character weight -func tfunc(k, bias rune) rune { - switch { - case k <= bias: - return _MIN - case k >= bias+_MAX: - return _MAX - } - return k - bias -} - -// encode transforms Unicode input bytes (that represent DNS label) into -// punycode bytestream. This function would return nil if there's an invalid -// character in the label. -func encode(input []byte) []byte { - n, bias := _N, _BIAS - - b := bytes.Runes(input) - for i := range b { - if !isValidRune(b[i]) { - return nil - } - - b[i] = preprune(b[i]) - } - - basic := make([]byte, 0, len(b)) - for _, ltr := range b { - if ltr <= 0x7f { - basic = append(basic, byte(ltr)) - } - } - basiclen := len(basic) - fulllen := len(b) - if basiclen == fulllen { - return basic - } - - var out bytes.Buffer - - out.WriteString(_PREFIX) - if basiclen > 0 { - out.Write(basic) - out.WriteByte(_DELIMITER) - } - - var ( - ltr, nextltr rune - delta, q rune // delta calculation (see rfc) - t, k, cp rune // weight and codepoint calculation - ) - - s := &bytes.Buffer{} - for h := basiclen; h < fulllen; n, delta = n+1, delta+1 { - nextltr = next(b, n) - s.Truncate(0) - s.WriteRune(nextltr) - delta, n = delta+(nextltr-n)*rune(h+1), nextltr - - for _, ltr = range b { - if ltr < n { - delta++ - } - if ltr == n { - q = delta - for k = _BASE; ; k += _BASE { - t = tfunc(k, bias) - if q < t { - break - } - cp = t + ((q - t) % (_BASE - t)) - out.WriteRune(lettercode(cp)) - q = (q - t) / (_BASE - t) - } - - out.WriteRune(lettercode(q)) - - bias = adapt(delta, h+1, h == basiclen) - h, delta = h+1, 0 - } - } - } - return out.Bytes() -} - -// decode transforms punycode input bytes (that represent DNS label) into Unicode bytestream -func decode(b []byte) []byte { - src := b // b would move and we need to keep it - - n, bias := _N, _BIAS - if !bytes.HasPrefix(b, []byte(_PREFIX)) { - return b - } - out := make([]rune, 0, len(b)) - b = b[len(_PREFIX):] - for pos := len(b) - 1; pos >= 0; pos-- { - // only last delimiter is our interest - if b[pos] == _DELIMITER { - out = append(out, bytes.Runes(b[:pos])...) - b = b[pos+1:] // trim source string - break - } - } - if len(b) == 0 { - return src - } - var ( - i, oldi, w rune - ch byte - t, digit rune - ln int - ) - - for i = 0; len(b) > 0; i++ { - oldi, w = i, 1 - for k := _BASE; len(b) > 0; k += _BASE { - ch, b = b[0], b[1:] - digit = digitval(rune(ch)) - if digit == errdigit { - return src - } - i += digit * w - - t = tfunc(k, bias) - if digit < t { - break - } - - w *= _BASE - t - } - ln = len(out) + 1 - bias = adapt(i-oldi, ln, oldi == 0) - n += i / rune(ln) - i = i % rune(ln) - // insert - out = append(out, 0) - copy(out[i+1:], out[i:]) - out[i] = n - } - - var ret bytes.Buffer - for _, r := range out { - ret.WriteRune(r) - } - return ret.Bytes() -} - -// isValidRune checks if the character is valid. We will look for the -// character property in the code points list. For now we aren't checking special -// rules in case of contextual property -func isValidRune(r rune) bool { - return findProperty(r) == propertyPVALID -} - -// findProperty will try to check the code point property of the given -// character. It will use a binary search algorithm as we have a slice of -// ordered ranges (average case performance O(log n)) -func findProperty(r rune) property { - imin, imax := 0, len(codePoints) - - for imax >= imin { - imid := (imin + imax) / 2 - - codePoint := codePoints[imid] - if (codePoint.start == r && codePoint.end == 0) || (codePoint.start <= r && codePoint.end >= r) { - return codePoint.state - } - - if (codePoint.end > 0 && codePoint.end < r) || (codePoint.end == 0 && codePoint.start < r) { - imin = imid + 1 - } else { - imax = imid - 1 - } - } - - return propertyUnknown -} diff --git a/Godeps/_workspace/src/github.com/miekg/dns/idn/punycode_test.go b/Godeps/_workspace/src/github.com/miekg/dns/idn/punycode_test.go deleted file mode 100644 index f8b355ca72..0000000000 --- a/Godeps/_workspace/src/github.com/miekg/dns/idn/punycode_test.go +++ /dev/null @@ -1,115 +0,0 @@ -package idn - -import ( - "strings" - "testing" -) - -var testcases = [][2]string{ - {"", ""}, - {"a", "a"}, - {"A-B", "a-b"}, - {"A-B-C", "a-b-c"}, - {"AbC", "abc"}, - {"я", "xn--41a"}, - {"zя", "xn--z-0ub"}, - {"яZ", "xn--z-zub"}, - {"а-я", "xn----7sb8g"}, - {"إختبار", "xn--kgbechtv"}, - {"آزمایشی", "xn--hgbk6aj7f53bba"}, - {"测试", "xn--0zwm56d"}, - {"測試", "xn--g6w251d"}, - {"испытание", "xn--80akhbyknj4f"}, - {"परीक्षा", "xn--11b5bs3a9aj6g"}, - {"δοκιμή", "xn--jxalpdlp"}, - {"테스트", "xn--9t4b11yi5a"}, - {"טעסט", "xn--deba0ad"}, - {"テスト", "xn--zckzah"}, - {"பரிட்சை", "xn--hlcj6aya9esc7a"}, - {"mamão-com-açúcar", "xn--mamo-com-acar-yeb1e6q"}, - {"σ", "xn--4xa"}, -} - -func TestEncodeDecodePunycode(t *testing.T) { - for _, tst := range testcases { - enc := encode([]byte(tst[0])) - if string(enc) != tst[1] { - t.Errorf("%s encodeded as %s but should be %s", tst[0], enc, tst[1]) - } - dec := decode([]byte(tst[1])) - if string(dec) != strings.ToLower(tst[0]) { - t.Errorf("%s decoded as %s but should be %s", tst[1], dec, strings.ToLower(tst[0])) - } - } -} - -func TestToFromPunycode(t *testing.T) { - for _, tst := range testcases { - // assert unicode.com == punycode.com - full := ToPunycode(tst[0] + ".com") - if full != tst[1]+".com" { - t.Errorf("invalid result from string conversion to punycode, %s and should be %s.com", full, tst[1]) - } - // assert punycode.punycode == unicode.unicode - decoded := FromPunycode(tst[1] + "." + tst[1]) - if decoded != strings.ToLower(tst[0]+"."+tst[0]) { - t.Errorf("invalid result from string conversion to punycode, %s and should be %s.%s", decoded, tst[0], tst[0]) - } - } -} - -func TestEncodeDecodeFinalPeriod(t *testing.T) { - for _, tst := range testcases { - // assert unicode.com. == punycode.com. - full := ToPunycode(tst[0] + ".") - if full != tst[1]+"." { - t.Errorf("invalid result from string conversion to punycode when period added at the end, %#v and should be %#v", full, tst[1]+".") - } - // assert punycode.com. == unicode.com. - decoded := FromPunycode(tst[1] + ".") - if decoded != strings.ToLower(tst[0]+".") { - t.Errorf("invalid result from string conversion to punycode when period added, %#v and should be %#v", decoded, tst[0]+".") - } - full = ToPunycode(tst[0]) - if full != tst[1] { - t.Errorf("invalid result from string conversion to punycode when no period added at the end, %#v and should be %#v", full, tst[1]+".") - } - // assert punycode.com. == unicode.com. - decoded = FromPunycode(tst[1]) - if decoded != strings.ToLower(tst[0]) { - t.Errorf("invalid result from string conversion to punycode when no period added, %#v and should be %#v", decoded, tst[0]+".") - } - } -} - -var invalidACEs = []string{ - "xn--*", - "xn--", - "xn---", -} - -func TestInvalidPunycode(t *testing.T) { - for _, d := range invalidACEs { - s := FromPunycode(d) - if s != d { - t.Errorf("Changed invalid name %s to %#v", d, s) - } - } -} - -// You can verify the labels that are valid or not comparing to the Verisign -// website: http://mct.verisign-grs.com/ -var invalidUnicodes = []string{ - "Σ", - "ЯZ", - "Испытание", -} - -func TestInvalidUnicodes(t *testing.T) { - for _, d := range invalidUnicodes { - s := ToPunycode(d) - if s != "" { - t.Errorf("Changed invalid name %s to %#v", d, s) - } - } -} diff --git a/Godeps/_workspace/src/github.com/miekg/dns/labels.go b/Godeps/_workspace/src/github.com/miekg/dns/labels.go deleted file mode 100644 index 758e5783de..0000000000 --- a/Godeps/_workspace/src/github.com/miekg/dns/labels.go +++ /dev/null @@ -1,162 +0,0 @@ -package dns - -// Holds a bunch of helper functions for dealing with labels. - -// SplitDomainName splits a name string into it's labels. -// www.miek.nl. returns []string{"www", "miek", "nl"} -// The root label (.) returns nil. Note that using -// strings.Split(s) will work in most cases, but does not handle -// escaped dots (\.) for instance. -func SplitDomainName(s string) (labels []string) { - if len(s) == 0 { - return nil - } - fqdnEnd := 0 // offset of the final '.' or the length of the name - idx := Split(s) - begin := 0 - if s[len(s)-1] == '.' { - fqdnEnd = len(s) - 1 - } else { - fqdnEnd = len(s) - } - - switch len(idx) { - case 0: - return nil - case 1: - // no-op - default: - end := 0 - for i := 1; i < len(idx); i++ { - end = idx[i] - labels = append(labels, s[begin:end-1]) - begin = end - } - } - - labels = append(labels, s[begin:fqdnEnd]) - return labels -} - -// CompareDomainName compares the names s1 and s2 and -// returns how many labels they have in common starting from the *right*. -// The comparison stops at the first inequality. The names are not downcased -// before the comparison. -// -// www.miek.nl. and miek.nl. have two labels in common: miek and nl -// www.miek.nl. and www.bla.nl. have one label in common: nl -func CompareDomainName(s1, s2 string) (n int) { - s1 = Fqdn(s1) - s2 = Fqdn(s2) - l1 := Split(s1) - l2 := Split(s2) - - // the first check: root label - if l1 == nil || l2 == nil { - return - } - - j1 := len(l1) - 1 // end - i1 := len(l1) - 2 // start - j2 := len(l2) - 1 - i2 := len(l2) - 2 - // the second check can be done here: last/only label - // before we fall through into the for-loop below - if s1[l1[j1]:] == s2[l2[j2]:] { - n++ - } else { - return - } - for { - if i1 < 0 || i2 < 0 { - break - } - if s1[l1[i1]:l1[j1]] == s2[l2[i2]:l2[j2]] { - n++ - } else { - break - } - j1-- - i1-- - j2-- - i2-- - } - return -} - -// CountLabel counts the the number of labels in the string s. -func CountLabel(s string) (labels int) { - if s == "." { - return - } - off := 0 - end := false - for { - off, end = NextLabel(s, off) - labels++ - if end { - return - } - } - panic("dns: not reached") -} - -// Split splits a name s into its label indexes. -// www.miek.nl. returns []int{0, 4, 9}, www.miek.nl also returns []int{0, 4, 9}. -// The root name (.) returns nil. Also see dns.SplitDomainName. -func Split(s string) []int { - if s == "." { - return nil - } - idx := make([]int, 1, 3) - off := 0 - end := false - - for { - off, end = NextLabel(s, off) - if end { - return idx - } - idx = append(idx, off) - } - panic("dns: not reached") -} - -// NextLabel returns the index of the start of the next label in the -// string s starting at offset. -// The bool end is true when the end of the string has been reached. -func NextLabel(s string, offset int) (i int, end bool) { - quote := false - for i = offset; i < len(s)-1; i++ { - switch s[i] { - case '\\': - quote = !quote - default: - quote = false - case '.': - if quote { - quote = !quote - continue - } - return i + 1, false - } - } - return i + 1, true -} - -// PrevLabel returns the index of the label when starting from the right and -// jumping n labels to the left. -// The bool start is true when the start of the string has been overshot. -func PrevLabel(s string, n int) (i int, start bool) { - if n == 0 { - return len(s), false - } - lab := Split(s) - if lab == nil { - return 0, true - } - if n > len(lab) { - return 0, true - } - return lab[len(lab)-n], false -} diff --git a/Godeps/_workspace/src/github.com/miekg/dns/labels_test.go b/Godeps/_workspace/src/github.com/miekg/dns/labels_test.go deleted file mode 100644 index 2a3f3d05d9..0000000000 --- a/Godeps/_workspace/src/github.com/miekg/dns/labels_test.go +++ /dev/null @@ -1,199 +0,0 @@ -package dns - -import ( - "testing" -) - -func TestCompareDomainName(t *testing.T) { - s1 := "www.miek.nl." - s2 := "miek.nl." - s3 := "www.bla.nl." - s4 := "nl.www.bla." - s5 := "nl" - s6 := "miek.nl" - - if CompareDomainName(s1, s2) != 2 { - t.Errorf("%s with %s should be %d", s1, s2, 2) - } - if CompareDomainName(s1, s3) != 1 { - t.Errorf("%s with %s should be %d", s1, s3, 1) - } - if CompareDomainName(s3, s4) != 0 { - t.Errorf("%s with %s should be %d", s3, s4, 0) - } - // Non qualified tests - if CompareDomainName(s1, s5) != 1 { - t.Errorf("%s with %s should be %d", s1, s5, 1) - } - if CompareDomainName(s1, s6) != 2 { - t.Errorf("%s with %s should be %d", s1, s5, 2) - } - - if CompareDomainName(s1, ".") != 0 { - t.Errorf("%s with %s should be %d", s1, s5, 0) - } - if CompareDomainName(".", ".") != 0 { - t.Errorf("%s with %s should be %d", ".", ".", 0) - } -} - -func TestSplit(t *testing.T) { - splitter := map[string]int{ - "www.miek.nl.": 3, - "www.miek.nl": 3, - "www..miek.nl": 4, - `www\.miek.nl.`: 2, - `www\\.miek.nl.`: 3, - ".": 0, - "nl.": 1, - "nl": 1, - "com.": 1, - ".com.": 2, - } - for s, i := range splitter { - if x := len(Split(s)); x != i { - t.Errorf("labels should be %d, got %d: %s %v", i, x, s, Split(s)) - } else { - t.Logf("%s %v", s, Split(s)) - } - } -} - -func TestSplit2(t *testing.T) { - splitter := map[string][]int{ - "www.miek.nl.": []int{0, 4, 9}, - "www.miek.nl": []int{0, 4, 9}, - "nl": []int{0}, - } - for s, i := range splitter { - x := Split(s) - switch len(i) { - case 1: - if x[0] != i[0] { - t.Errorf("labels should be %v, got %v: %s", i, x, s) - } - default: - if x[0] != i[0] || x[1] != i[1] || x[2] != i[2] { - t.Errorf("labels should be %v, got %v: %s", i, x, s) - } - } - } -} - -func TestPrevLabel(t *testing.T) { - type prev struct { - string - int - } - prever := map[prev]int{ - prev{"www.miek.nl.", 0}: 12, - prev{"www.miek.nl.", 1}: 9, - prev{"www.miek.nl.", 2}: 4, - - prev{"www.miek.nl", 0}: 11, - prev{"www.miek.nl", 1}: 9, - prev{"www.miek.nl", 2}: 4, - - prev{"www.miek.nl.", 5}: 0, - prev{"www.miek.nl", 5}: 0, - - prev{"www.miek.nl.", 3}: 0, - prev{"www.miek.nl", 3}: 0, - } - for s, i := range prever { - x, ok := PrevLabel(s.string, s.int) - if i != x { - t.Errorf("label should be %d, got %d, %t: preving %d, %s", i, x, ok, s.int, s.string) - } - } -} - -func TestCountLabel(t *testing.T) { - splitter := map[string]int{ - "www.miek.nl.": 3, - "www.miek.nl": 3, - "nl": 1, - ".": 0, - } - for s, i := range splitter { - x := CountLabel(s) - if x != i { - t.Errorf("CountLabel should have %d, got %d", i, x) - } - } -} - -func TestSplitDomainName(t *testing.T) { - labels := map[string][]string{ - "miek.nl": []string{"miek", "nl"}, - ".": nil, - "www.miek.nl.": []string{"www", "miek", "nl"}, - "www.miek.nl": []string{"www", "miek", "nl"}, - "www..miek.nl": []string{"www", "", "miek", "nl"}, - `www\.miek.nl`: []string{`www\.miek`, "nl"}, - `www\\.miek.nl`: []string{`www\\`, "miek", "nl"}, - } -domainLoop: - for domain, splits := range labels { - parts := SplitDomainName(domain) - if len(parts) != len(splits) { - t.Errorf("SplitDomainName returned %v for %s, expected %v", parts, domain, splits) - continue domainLoop - } - for i := range parts { - if parts[i] != splits[i] { - t.Errorf("SplitDomainName returned %v for %s, expected %v", parts, domain, splits) - continue domainLoop - } - } - } -} - -func TestIsDomainName(t *testing.T) { - type ret struct { - ok bool - lab int - } - names := map[string]*ret{ - "..": &ret{false, 1}, - "@.": &ret{true, 1}, - "www.example.com": &ret{true, 3}, - "www.e%ample.com": &ret{true, 3}, - "www.example.com.": &ret{true, 3}, - "mi\\k.nl.": &ret{true, 2}, - "mi\\k.nl": &ret{true, 2}, - } - for d, ok := range names { - l, k := IsDomainName(d) - if ok.ok != k || ok.lab != l { - t.Errorf(" got %v %d for %s ", k, l, d) - t.Errorf("have %v %d for %s ", ok.ok, ok.lab, d) - } - } -} - -func BenchmarkSplitLabels(b *testing.B) { - for i := 0; i < b.N; i++ { - Split("www.example.com") - } -} - -func BenchmarkLenLabels(b *testing.B) { - for i := 0; i < b.N; i++ { - CountLabel("www.example.com") - } -} - -func BenchmarkCompareLabels(b *testing.B) { - for i := 0; i < b.N; i++ { - CompareDomainName("www.example.com", "aa.example.com") - } -} - -func BenchmarkIsSubDomain(b *testing.B) { - for i := 0; i < b.N; i++ { - IsSubDomain("www.example.com", "aa.example.com") - IsSubDomain("example.com", "aa.example.com") - IsSubDomain("miek.nl", "aa.example.com") - } -} diff --git a/Godeps/_workspace/src/github.com/miekg/dns/msg.go b/Godeps/_workspace/src/github.com/miekg/dns/msg.go deleted file mode 100644 index 34896a75c7..0000000000 --- a/Godeps/_workspace/src/github.com/miekg/dns/msg.go +++ /dev/null @@ -1,2010 +0,0 @@ -// DNS packet assembly, see RFC 1035. Converting from - Unpack() - -// and to - Pack() - wire format. -// All the packers and unpackers take a (msg []byte, off int) -// and return (off1 int, ok bool). If they return ok==false, they -// also return off1==len(msg), so that the next unpacker will -// also fail. This lets us avoid checks of ok until the end of a -// packing sequence. - -package dns - -import ( - "encoding/base32" - "encoding/base64" - "encoding/hex" - "math/big" - "math/rand" - "net" - "reflect" - "strconv" - "time" -) - -const maxCompressionOffset = 2 << 13 // We have 14 bits for the compression pointer - -var ( - // ErrAlg indicates an error with the (DNSSEC) algorithm. - ErrAlg error = &Error{err: "bad algorithm"} - // ErrAuth indicates an error in the TSIG authentication. - ErrAuth error = &Error{err: "bad authentication"} - // ErrBuf indicates that the buffer used it too small for the message. - ErrBuf error = &Error{err: "buffer size too small"} - // ErrConn indicates that a connection has both a TCP and UDP socket. - ErrConn error = &Error{err: "conn holds both UDP and TCP connection"} - // ErrConnEmpty indicates a connection is being uses before it is initialized. - ErrConnEmpty error = &Error{err: "conn has no connection"} - // ErrExtendedRcode ... - ErrExtendedRcode error = &Error{err: "bad extended rcode"} - // ErrFqdn indicates that a domain name does not have a closing dot. - ErrFqdn error = &Error{err: "domain must be fully qualified"} - // ErrId indicates there is a mismatch with the message's ID. - ErrId error = &Error{err: "id mismatch"} - ErrKeyAlg error = &Error{err: "bad key algorithm"} - ErrKey error = &Error{err: "bad key"} - ErrKeySize error = &Error{err: "bad key size"} - ErrNoSig error = &Error{err: "no signature found"} - ErrPrivKey error = &Error{err: "bad private key"} - ErrRcode error = &Error{err: "bad rcode"} - ErrRdata error = &Error{err: "bad rdata"} - ErrRRset error = &Error{err: "bad rrset"} - ErrSecret error = &Error{err: "no secrets defined"} - ErrShortRead error = &Error{err: "short read"} - // ErrSig indicates that a signature can not be cryptographically validated. - ErrSig error = &Error{err: "bad signature"} - // ErrSigGen indicates a faulure to generate a signature. - ErrSigGen error = &Error{err: "bad signature generation"} - // ErrSOA indicates that no SOA RR was seen when doing zone transfers. - ErrSoa error = &Error{err: "no SOA"} - // ErrTime indicates a timing error in TSIG authentication. - ErrTime error = &Error{err: "bad time"} -) - -// Id, by default, returns a 16 bits random number to be used as a -// message id. The random provided should be good enough. This being a -// variable the function can be reassigned to a custom function. -// For instance, to make it return a static value: -// -// dns.Id = func() uint16 { return 3 } -var Id func() uint16 = id - -// MsgHdr is a a manually-unpacked version of (id, bits). -type MsgHdr struct { - Id uint16 - Response bool - Opcode int - Authoritative bool - Truncated bool - RecursionDesired bool - RecursionAvailable bool - Zero bool - AuthenticatedData bool - CheckingDisabled bool - Rcode int -} - -// Msg contains the layout of a DNS message. -type Msg struct { - MsgHdr - Compress bool `json:"-"` // If true, the message will be compressed when converted to wire format. This not part of the official DNS packet format. - Question []Question // Holds the RR(s) of the question section. - Answer []RR // Holds the RR(s) of the answer section. - Ns []RR // Holds the RR(s) of the authority section. - Extra []RR // Holds the RR(s) of the additional section. -} - -// TypeToString is a map of strings for each RR wire type. -var TypeToString = map[uint16]string{ - TypeA: "A", - TypeAAAA: "AAAA", - TypeAFSDB: "AFSDB", - TypeANY: "ANY", // Meta RR - TypeATMA: "ATMA", - TypeAXFR: "AXFR", // Meta RR - TypeCAA: "CAA", - TypeCDNSKEY: "CDNSKEY", - TypeCDS: "CDS", - TypeCERT: "CERT", - TypeCNAME: "CNAME", - TypeDHCID: "DHCID", - TypeDLV: "DLV", - TypeDNAME: "DNAME", - TypeDNSKEY: "DNSKEY", - TypeDS: "DS", - TypeEID: "EID", - TypeEUI48: "EUI48", - TypeEUI64: "EUI64", - TypeGID: "GID", - TypeGPOS: "GPOS", - TypeHINFO: "HINFO", - TypeHIP: "HIP", - TypeIPSECKEY: "IPSECKEY", - TypeISDN: "ISDN", - TypeIXFR: "IXFR", // Meta RR - TypeKEY: "KEY", - TypeKX: "KX", - TypeL32: "L32", - TypeL64: "L64", - TypeLOC: "LOC", - TypeLP: "LP", - TypeMB: "MB", - TypeMD: "MD", - TypeMF: "MF", - TypeMG: "MG", - TypeMINFO: "MINFO", - TypeMR: "MR", - TypeMX: "MX", - TypeNAPTR: "NAPTR", - TypeNID: "NID", - TypeNINFO: "NINFO", - TypeNIMLOC: "NIMLOC", - TypeNS: "NS", - TypeNSAP: "NSAP", - TypeNSAPPTR: "NSAP-PTR", - TypeNSEC3: "NSEC3", - TypeNSEC3PARAM: "NSEC3PARAM", - TypeNSEC: "NSEC", - TypeNULL: "NULL", - TypeOPT: "OPT", - TypeOPENPGPKEY: "OPENPGPKEY", - TypePTR: "PTR", - TypeRKEY: "RKEY", - TypeRP: "RP", - TypeRRSIG: "RRSIG", - TypeRT: "RT", - TypeSIG: "SIG", - TypeSOA: "SOA", - TypeSPF: "SPF", - TypeSRV: "SRV", - TypeSSHFP: "SSHFP", - TypeTA: "TA", - TypeTALINK: "TALINK", - TypeTKEY: "TKEY", // Meta RR - TypeTLSA: "TLSA", - TypeTSIG: "TSIG", // Meta RR - TypeTXT: "TXT", - TypePX: "PX", - TypeUID: "UID", - TypeUINFO: "UINFO", - TypeUNSPEC: "UNSPEC", - TypeURI: "URI", - TypeWKS: "WKS", - TypeX25: "X25", -} - -// StringToType is the reverse of TypeToString, needed for string parsing. -var StringToType = reverseInt16(TypeToString) - -// StringToClass is the reverse of ClassToString, needed for string parsing. -var StringToClass = reverseInt16(ClassToString) - -// Map of opcodes strings. -var StringToOpcode = reverseInt(OpcodeToString) - -// Map of rcodes strings. -var StringToRcode = reverseInt(RcodeToString) - -// ClassToString is a maps Classes to strings for each CLASS wire type. -var ClassToString = map[uint16]string{ - ClassINET: "IN", - ClassCSNET: "CS", - ClassCHAOS: "CH", - ClassHESIOD: "HS", - ClassNONE: "NONE", - ClassANY: "ANY", -} - -// OpcodeToString maps Opcodes to strings. -var OpcodeToString = map[int]string{ - OpcodeQuery: "QUERY", - OpcodeIQuery: "IQUERY", - OpcodeStatus: "STATUS", - OpcodeNotify: "NOTIFY", - OpcodeUpdate: "UPDATE", -} - -// RcodeToString maps Rcodes to strings. -var RcodeToString = map[int]string{ - RcodeSuccess: "NOERROR", - RcodeFormatError: "FORMERR", - RcodeServerFailure: "SERVFAIL", - RcodeNameError: "NXDOMAIN", - RcodeNotImplemented: "NOTIMPL", - RcodeRefused: "REFUSED", - RcodeYXDomain: "YXDOMAIN", // From RFC 2136 - RcodeYXRrset: "YXRRSET", - RcodeNXRrset: "NXRRSET", - RcodeNotAuth: "NOTAUTH", - RcodeNotZone: "NOTZONE", - RcodeBadSig: "BADSIG", // Also known as RcodeBadVers, see RFC 6891 - // RcodeBadVers: "BADVERS", - RcodeBadKey: "BADKEY", - RcodeBadTime: "BADTIME", - RcodeBadMode: "BADMODE", - RcodeBadName: "BADNAME", - RcodeBadAlg: "BADALG", - RcodeBadTrunc: "BADTRUNC", -} - -// Rather than write the usual handful of routines to pack and -// unpack every message that can appear on the wire, we use -// reflection to write a generic pack/unpack for structs and then -// use it. Thus, if in the future we need to define new message -// structs, no new pack/unpack/printing code needs to be written. - -// Domain names are a sequence of counted strings -// split at the dots. They end with a zero-length string. - -// PackDomainName packs a domain name s into msg[off:]. -// If compression is wanted compress must be true and the compression -// map needs to hold a mapping between domain names and offsets -// pointing into msg[]. -func PackDomainName(s string, msg []byte, off int, compression map[string]int, compress bool) (off1 int, err error) { - off1, _, err = packDomainName(s, msg, off, compression, compress) - return -} - -func packDomainName(s string, msg []byte, off int, compression map[string]int, compress bool) (off1 int, labels int, err error) { - // special case if msg == nil - lenmsg := 256 - if msg != nil { - lenmsg = len(msg) - } - ls := len(s) - if ls == 0 { // Ok, for instance when dealing with update RR without any rdata. - return off, 0, nil - } - // If not fully qualified, error out, but only if msg == nil #ugly - switch { - case msg == nil: - if s[ls-1] != '.' { - s += "." - ls++ - } - case msg != nil: - if s[ls-1] != '.' { - return lenmsg, 0, ErrFqdn - } - } - // Each dot ends a segment of the name. - // We trade each dot byte for a length byte. - // Except for escaped dots (\.), which are normal dots. - // There is also a trailing zero. - - // Compression - nameoffset := -1 - pointer := -1 - // Emit sequence of counted strings, chopping at dots. - begin := 0 - bs := []byte(s) - roBs, bsFresh, escapedDot := s, true, false - for i := 0; i < ls; i++ { - if bs[i] == '\\' { - for j := i; j < ls-1; j++ { - bs[j] = bs[j+1] - } - ls-- - if off+1 > lenmsg { - return lenmsg, labels, ErrBuf - } - // check for \DDD - if i+2 < ls && isDigit(bs[i]) && isDigit(bs[i+1]) && isDigit(bs[i+2]) { - bs[i] = dddToByte(bs[i:]) - for j := i + 1; j < ls-2; j++ { - bs[j] = bs[j+2] - } - ls -= 2 - } else if bs[i] == 't' { - bs[i] = '\t' - } else if bs[i] == 'r' { - bs[i] = '\r' - } else if bs[i] == 'n' { - bs[i] = '\n' - } - escapedDot = bs[i] == '.' - bsFresh = false - continue - } - - if bs[i] == '.' { - if i > 0 && bs[i-1] == '.' && !escapedDot { - // two dots back to back is not legal - return lenmsg, labels, ErrRdata - } - if i-begin >= 1<<6 { // top two bits of length must be clear - return lenmsg, labels, ErrRdata - } - // off can already (we're in a loop) be bigger than len(msg) - // this happens when a name isn't fully qualified - if off+1 > lenmsg { - return lenmsg, labels, ErrBuf - } - if msg != nil { - msg[off] = byte(i - begin) - } - offset := off - off++ - for j := begin; j < i; j++ { - if off+1 > lenmsg { - return lenmsg, labels, ErrBuf - } - if msg != nil { - msg[off] = bs[j] - } - off++ - } - if compress && !bsFresh { - roBs = string(bs) - bsFresh = true - } - // Dont try to compress '.' - if compress && roBs[begin:] != "." { - if p, ok := compression[roBs[begin:]]; !ok { - // Only offsets smaller than this can be used. - if offset < maxCompressionOffset { - compression[roBs[begin:]] = offset - } - } else { - // The first hit is the longest matching dname - // keep the pointer offset we get back and store - // the offset of the current name, because that's - // where we need to insert the pointer later - - // If compress is true, we're allowed to compress this dname - if pointer == -1 && compress { - pointer = p // Where to point to - nameoffset = offset // Where to point from - break - } - } - } - labels++ - begin = i + 1 - } - escapedDot = false - } - // Root label is special - if len(bs) == 1 && bs[0] == '.' { - return off, labels, nil - } - // If we did compression and we find something add the pointer here - if pointer != -1 { - // We have two bytes (14 bits) to put the pointer in - // if msg == nil, we will never do compression - msg[nameoffset], msg[nameoffset+1] = packUint16(uint16(pointer ^ 0xC000)) - off = nameoffset + 1 - goto End - } - if msg != nil { - msg[off] = 0 - } -End: - off++ - return off, labels, nil -} - -// Unpack a domain name. -// In addition to the simple sequences of counted strings above, -// domain names are allowed to refer to strings elsewhere in the -// packet, to avoid repeating common suffixes when returning -// many entries in a single domain. The pointers are marked -// by a length byte with the top two bits set. Ignoring those -// two bits, that byte and the next give a 14 bit offset from msg[0] -// where we should pick up the trail. -// Note that if we jump elsewhere in the packet, -// we return off1 == the offset after the first pointer we found, -// which is where the next record will start. -// In theory, the pointers are only allowed to jump backward. -// We let them jump anywhere and stop jumping after a while. - -// UnpackDomainName unpacks a domain name into a string. -func UnpackDomainName(msg []byte, off int) (string, int, error) { - s := make([]byte, 0, 64) - off1 := 0 - lenmsg := len(msg) - ptr := 0 // number of pointers followed -Loop: - for { - if off >= lenmsg { - return "", lenmsg, ErrBuf - } - c := int(msg[off]) - off++ - switch c & 0xC0 { - case 0x00: - if c == 0x00 { - // end of name - if len(s) == 0 { - return ".", off, nil - } - break Loop - } - // literal string - if off+c > lenmsg { - return "", lenmsg, ErrBuf - } - for j := off; j < off+c; j++ { - switch b := msg[j]; b { - case '.', '(', ')', ';', ' ', '@': - fallthrough - case '"', '\\': - s = append(s, '\\', b) - case '\t': - s = append(s, '\\', 't') - case '\r': - s = append(s, '\\', 'r') - default: - if b < 32 || b >= 127 { // unprintable use \DDD - var buf [3]byte - bufs := strconv.AppendInt(buf[:0], int64(b), 10) - s = append(s, '\\') - for i := 0; i < 3-len(bufs); i++ { - s = append(s, '0') - } - for _, r := range bufs { - s = append(s, r) - } - } else { - s = append(s, b) - } - } - } - s = append(s, '.') - off += c - case 0xC0: - // pointer to somewhere else in msg. - // remember location after first ptr, - // since that's how many bytes we consumed. - // also, don't follow too many pointers -- - // maybe there's a loop. - if off >= lenmsg { - return "", lenmsg, ErrBuf - } - c1 := msg[off] - off++ - if ptr == 0 { - off1 = off - } - if ptr++; ptr > 10 { - return "", lenmsg, &Error{err: "too many compression pointers"} - } - off = (c^0xC0)<<8 | int(c1) - default: - // 0x80 and 0x40 are reserved - return "", lenmsg, ErrRdata - } - } - if ptr == 0 { - off1 = off - } - return string(s), off1, nil -} - -func packTxt(txt []string, msg []byte, offset int, tmp []byte) (int, error) { - var err error - if len(txt) == 0 { - if offset >= len(msg) { - return offset, ErrBuf - } - msg[offset] = 0 - return offset, nil - } - for i := range txt { - if len(txt[i]) > len(tmp) { - return offset, ErrBuf - } - offset, err = packTxtString(txt[i], msg, offset, tmp) - if err != nil { - return offset, err - } - } - return offset, err -} - -func packTxtString(s string, msg []byte, offset int, tmp []byte) (int, error) { - lenByteOffset := offset - if offset >= len(msg) { - return offset, ErrBuf - } - offset++ - bs := tmp[:len(s)] - copy(bs, s) - for i := 0; i < len(bs); i++ { - if len(msg) <= offset { - return offset, ErrBuf - } - if bs[i] == '\\' { - i++ - if i == len(bs) { - break - } - // check for \DDD - if i+2 < len(bs) && isDigit(bs[i]) && isDigit(bs[i+1]) && isDigit(bs[i+2]) { - msg[offset] = dddToByte(bs[i:]) - i += 2 - } else if bs[i] == 't' { - msg[offset] = '\t' - } else if bs[i] == 'r' { - msg[offset] = '\r' - } else if bs[i] == 'n' { - msg[offset] = '\n' - } else { - msg[offset] = bs[i] - } - } else { - msg[offset] = bs[i] - } - offset++ - } - l := offset - lenByteOffset - 1 - if l > 255 { - return offset, &Error{err: "string exceeded 255 bytes in txt"} - } - msg[lenByteOffset] = byte(l) - return offset, nil -} - -func packOctetString(s string, msg []byte, offset int, tmp []byte) (int, error) { - if offset >= len(msg) { - return offset, ErrBuf - } - bs := tmp[:len(s)] - copy(bs, s) - for i := 0; i < len(bs); i++ { - if len(msg) <= offset { - return offset, ErrBuf - } - if bs[i] == '\\' { - i++ - if i == len(bs) { - break - } - // check for \DDD - if i+2 < len(bs) && isDigit(bs[i]) && isDigit(bs[i+1]) && isDigit(bs[i+2]) { - msg[offset] = dddToByte(bs[i:]) - i += 2 - } else { - msg[offset] = bs[i] - } - } else { - msg[offset] = bs[i] - } - offset++ - } - return offset, nil -} - -func unpackTxt(msg []byte, offset, rdend int) ([]string, int, error) { - var err error - var ss []string - var s string - for offset < rdend && err == nil { - s, offset, err = unpackTxtString(msg, offset) - if err == nil { - ss = append(ss, s) - } - } - return ss, offset, err -} - -func unpackTxtString(msg []byte, offset int) (string, int, error) { - if offset+1 > len(msg) { - return "", offset, &Error{err: "overflow unpacking txt"} - } - l := int(msg[offset]) - if offset+l+1 > len(msg) { - return "", offset, &Error{err: "overflow unpacking txt"} - } - s := make([]byte, 0, l) - for _, b := range msg[offset+1 : offset+1+l] { - switch b { - case '"', '\\': - s = append(s, '\\', b) - case '\t': - s = append(s, `\t`...) - case '\r': - s = append(s, `\r`...) - case '\n': - s = append(s, `\n`...) - default: - if b < 32 || b > 127 { // unprintable - var buf [3]byte - bufs := strconv.AppendInt(buf[:0], int64(b), 10) - s = append(s, '\\') - for i := 0; i < 3-len(bufs); i++ { - s = append(s, '0') - } - for _, r := range bufs { - s = append(s, r) - } - } else { - s = append(s, b) - } - } - } - offset += 1 + l - return string(s), offset, nil -} - -// Pack a reflect.StructValue into msg. Struct members can only be uint8, uint16, uint32, string, -// slices and other (often anonymous) structs. -func packStructValue(val reflect.Value, msg []byte, off int, compression map[string]int, compress bool) (off1 int, err error) { - var txtTmp []byte - lenmsg := len(msg) - numfield := val.NumField() - for i := 0; i < numfield; i++ { - typefield := val.Type().Field(i) - if typefield.Tag == `dns:"-"` { - continue - } - switch fv := val.Field(i); fv.Kind() { - default: - return lenmsg, &Error{err: "bad kind packing"} - case reflect.Interface: - // PrivateRR is the only RR implementation that has interface field. - // therefore it's expected that this interface would be PrivateRdata - switch data := fv.Interface().(type) { - case PrivateRdata: - n, err := data.Pack(msg[off:]) - if err != nil { - return lenmsg, err - } - off += n - default: - return lenmsg, &Error{err: "bad kind interface packing"} - } - case reflect.Slice: - switch typefield.Tag { - default: - return lenmsg, &Error{"bad tag packing slice: " + typefield.Tag.Get("dns")} - case `dns:"domain-name"`: - for j := 0; j < val.Field(i).Len(); j++ { - element := val.Field(i).Index(j).String() - off, err = PackDomainName(element, msg, off, compression, false && compress) - if err != nil { - return lenmsg, err - } - } - case `dns:"txt"`: - if txtTmp == nil { - txtTmp = make([]byte, 256*4+1) - } - off, err = packTxt(fv.Interface().([]string), msg, off, txtTmp) - if err != nil { - return lenmsg, err - } - case `dns:"opt"`: // edns - for j := 0; j < val.Field(i).Len(); j++ { - element := val.Field(i).Index(j).Interface() - b, e := element.(EDNS0).pack() - if e != nil { - return lenmsg, &Error{err: "overflow packing opt"} - } - // Option code - msg[off], msg[off+1] = packUint16(element.(EDNS0).Option()) - // Length - msg[off+2], msg[off+3] = packUint16(uint16(len(b))) - off += 4 - if off+len(b) > lenmsg { - copy(msg[off:], b) - off = lenmsg - continue - } - // Actual data - copy(msg[off:off+len(b)], b) - off += len(b) - } - case `dns:"a"`: - if val.Type().String() == "dns.IPSECKEY" { - // Field(2) is GatewayType, must be 1 - if val.Field(2).Uint() != 1 { - continue - } - } - // It must be a slice of 4, even if it is 16, we encode - // only the first 4 - if off+net.IPv4len > lenmsg { - return lenmsg, &Error{err: "overflow packing a"} - } - switch fv.Len() { - case net.IPv6len: - msg[off] = byte(fv.Index(12).Uint()) - msg[off+1] = byte(fv.Index(13).Uint()) - msg[off+2] = byte(fv.Index(14).Uint()) - msg[off+3] = byte(fv.Index(15).Uint()) - off += net.IPv4len - case net.IPv4len: - msg[off] = byte(fv.Index(0).Uint()) - msg[off+1] = byte(fv.Index(1).Uint()) - msg[off+2] = byte(fv.Index(2).Uint()) - msg[off+3] = byte(fv.Index(3).Uint()) - off += net.IPv4len - case 0: - // Allowed, for dynamic updates - default: - return lenmsg, &Error{err: "overflow packing a"} - } - case `dns:"aaaa"`: - if val.Type().String() == "dns.IPSECKEY" { - // Field(2) is GatewayType, must be 2 - if val.Field(2).Uint() != 2 { - continue - } - } - if fv.Len() == 0 { - break - } - if fv.Len() > net.IPv6len || off+fv.Len() > lenmsg { - return lenmsg, &Error{err: "overflow packing aaaa"} - } - for j := 0; j < net.IPv6len; j++ { - msg[off] = byte(fv.Index(j).Uint()) - off++ - } - case `dns:"wks"`: - // TODO(miek): this is wrong should be lenrd - if off == lenmsg { - break // dyn. updates - } - if val.Field(i).Len() == 0 { - break - } - var bitmapbyte uint16 - for j := 0; j < val.Field(i).Len(); j++ { - serv := uint16((fv.Index(j).Uint())) - bitmapbyte = uint16(serv / 8) - if int(bitmapbyte) > lenmsg { - return lenmsg, &Error{err: "overflow packing wks"} - } - bit := uint16(serv) - bitmapbyte*8 - msg[bitmapbyte] = byte(1 << (7 - bit)) - } - off += int(bitmapbyte) - case `dns:"nsec"`: // NSEC/NSEC3 - // This is the uint16 type bitmap - if val.Field(i).Len() == 0 { - // Do absolutely nothing - break - } - - lastwindow := uint16(0) - length := uint16(0) - if off+2 > lenmsg { - return lenmsg, &Error{err: "overflow packing nsecx"} - } - for j := 0; j < val.Field(i).Len(); j++ { - t := uint16((fv.Index(j).Uint())) - window := uint16(t / 256) - if lastwindow != window { - // New window, jump to the new offset - off += int(length) + 3 - if off > lenmsg { - return lenmsg, &Error{err: "overflow packing nsecx bitmap"} - } - } - length = (t - window*256) / 8 - bit := t - (window * 256) - (length * 8) - if off+2+int(length) > lenmsg { - return lenmsg, &Error{err: "overflow packing nsecx bitmap"} - } - - // Setting the window # - msg[off] = byte(window) - // Setting the octets length - msg[off+1] = byte(length + 1) - // Setting the bit value for the type in the right octet - msg[off+2+int(length)] |= byte(1 << (7 - bit)) - lastwindow = window - } - off += 2 + int(length) - off++ - if off > lenmsg { - return lenmsg, &Error{err: "overflow packing nsecx bitmap"} - } - } - case reflect.Struct: - off, err = packStructValue(fv, msg, off, compression, compress) - if err != nil { - return lenmsg, err - } - case reflect.Uint8: - if off+1 > lenmsg { - return lenmsg, &Error{err: "overflow packing uint8"} - } - msg[off] = byte(fv.Uint()) - off++ - case reflect.Uint16: - if off+2 > lenmsg { - return lenmsg, &Error{err: "overflow packing uint16"} - } - i := fv.Uint() - msg[off] = byte(i >> 8) - msg[off+1] = byte(i) - off += 2 - case reflect.Uint32: - if off+4 > lenmsg { - return lenmsg, &Error{err: "overflow packing uint32"} - } - i := fv.Uint() - msg[off] = byte(i >> 24) - msg[off+1] = byte(i >> 16) - msg[off+2] = byte(i >> 8) - msg[off+3] = byte(i) - off += 4 - case reflect.Uint64: - switch typefield.Tag { - default: - if off+8 > lenmsg { - return lenmsg, &Error{err: "overflow packing uint64"} - } - i := fv.Uint() - msg[off] = byte(i >> 56) - msg[off+1] = byte(i >> 48) - msg[off+2] = byte(i >> 40) - msg[off+3] = byte(i >> 32) - msg[off+4] = byte(i >> 24) - msg[off+5] = byte(i >> 16) - msg[off+6] = byte(i >> 8) - msg[off+7] = byte(i) - off += 8 - case `dns:"uint48"`: - // Used in TSIG, where it stops at 48 bits, so we discard the upper 16 - if off+6 > lenmsg { - return lenmsg, &Error{err: "overflow packing uint64 as uint48"} - } - i := fv.Uint() - msg[off] = byte(i >> 40) - msg[off+1] = byte(i >> 32) - msg[off+2] = byte(i >> 24) - msg[off+3] = byte(i >> 16) - msg[off+4] = byte(i >> 8) - msg[off+5] = byte(i) - off += 6 - } - case reflect.String: - // There are multiple string encodings. - // The tag distinguishes ordinary strings from domain names. - s := fv.String() - switch typefield.Tag { - default: - return lenmsg, &Error{"bad tag packing string: " + typefield.Tag.Get("dns")} - case `dns:"base64"`: - b64, e := fromBase64([]byte(s)) - if e != nil { - return lenmsg, e - } - copy(msg[off:off+len(b64)], b64) - off += len(b64) - case `dns:"domain-name"`: - if val.Type().String() == "dns.IPSECKEY" { - // Field(2) is GatewayType, 1 and 2 or used for addresses - x := val.Field(2).Uint() - if x == 1 || x == 2 { - continue - } - } - if off, err = PackDomainName(s, msg, off, compression, false && compress); err != nil { - return lenmsg, err - } - case `dns:"cdomain-name"`: - if off, err = PackDomainName(s, msg, off, compression, true && compress); err != nil { - return lenmsg, err - } - case `dns:"size-base32"`: - // This is purely for NSEC3 atm, the previous byte must - // holds the length of the encoded string. As NSEC3 - // is only defined to SHA1, the hashlength is 20 (160 bits) - msg[off-1] = 20 - fallthrough - case `dns:"base32"`: - b32, e := fromBase32([]byte(s)) - if e != nil { - return lenmsg, e - } - copy(msg[off:off+len(b32)], b32) - off += len(b32) - case `dns:"size-hex"`: - fallthrough - case `dns:"hex"`: - // There is no length encoded here - h, e := hex.DecodeString(s) - if e != nil { - return lenmsg, e - } - if off+hex.DecodedLen(len(s)) > lenmsg { - return lenmsg, &Error{err: "overflow packing hex"} - } - copy(msg[off:off+hex.DecodedLen(len(s))], h) - off += hex.DecodedLen(len(s)) - case `dns:"size"`: - // the size is already encoded in the RR, we can safely use the - // length of string. String is RAW (not encoded in hex, nor base64) - copy(msg[off:off+len(s)], s) - off += len(s) - case `dns:"octet"`: - bytesTmp := make([]byte, 256) - off, err = packOctetString(fv.String(), msg, off, bytesTmp) - if err != nil { - return lenmsg, err - } - case `dns:"txt"`: - fallthrough - case "": - if txtTmp == nil { - txtTmp = make([]byte, 256*4+1) - } - off, err = packTxtString(fv.String(), msg, off, txtTmp) - if err != nil { - return lenmsg, err - } - } - } - } - return off, nil -} - -func structValue(any interface{}) reflect.Value { - return reflect.ValueOf(any).Elem() -} - -// PackStruct packs any structure to wire format. -func PackStruct(any interface{}, msg []byte, off int) (off1 int, err error) { - off, err = packStructValue(structValue(any), msg, off, nil, false) - return off, err -} - -func packStructCompress(any interface{}, msg []byte, off int, compression map[string]int, compress bool) (off1 int, err error) { - off, err = packStructValue(structValue(any), msg, off, compression, compress) - return off, err -} - -// TODO(miek): Fix use of rdlength here - -// Unpack a reflect.StructValue from msg. -// Same restrictions as packStructValue. -func unpackStructValue(val reflect.Value, msg []byte, off int) (off1 int, err error) { - var lenrd int - lenmsg := len(msg) - for i := 0; i < val.NumField(); i++ { - if lenrd != 0 && lenrd == off { - break - } - if off > lenmsg { - return lenmsg, &Error{"bad offset unpacking"} - } - switch fv := val.Field(i); fv.Kind() { - default: - return lenmsg, &Error{err: "bad kind unpacking"} - case reflect.Interface: - // PrivateRR is the only RR implementation that has interface field. - // therefore it's expected that this interface would be PrivateRdata - switch data := fv.Interface().(type) { - case PrivateRdata: - n, err := data.Unpack(msg[off:lenrd]) - if err != nil { - return lenmsg, err - } - off += n - default: - return lenmsg, &Error{err: "bad kind interface unpacking"} - } - case reflect.Slice: - switch val.Type().Field(i).Tag { - default: - return lenmsg, &Error{"bad tag unpacking slice: " + val.Type().Field(i).Tag.Get("dns")} - case `dns:"domain-name"`: - // HIP record slice of name (or none) - var servers []string - var s string - for off < lenrd { - s, off, err = UnpackDomainName(msg, off) - if err != nil { - return lenmsg, err - } - servers = append(servers, s) - } - fv.Set(reflect.ValueOf(servers)) - case `dns:"txt"`: - if off == lenmsg || lenrd == off { - break - } - var txt []string - txt, off, err = unpackTxt(msg, off, lenrd) - if err != nil { - return lenmsg, err - } - fv.Set(reflect.ValueOf(txt)) - case `dns:"opt"`: // edns0 - if off == lenrd { - // This is an EDNS0 (OPT Record) with no rdata - // We can safely return here. - break - } - var edns []EDNS0 - Option: - code := uint16(0) - if off+2 > lenmsg { - return lenmsg, &Error{err: "overflow unpacking opt"} - } - code, off = unpackUint16(msg, off) - optlen, off1 := unpackUint16(msg, off) - if off1+int(optlen) > lenrd { - return lenmsg, &Error{err: "overflow unpacking opt"} - } - switch code { - case EDNS0NSID: - e := new(EDNS0_NSID) - if err := e.unpack(msg[off1 : off1+int(optlen)]); err != nil { - return lenmsg, err - } - edns = append(edns, e) - off = off1 + int(optlen) - case EDNS0SUBNET, EDNS0SUBNETDRAFT: - e := new(EDNS0_SUBNET) - if err := e.unpack(msg[off1 : off1+int(optlen)]); err != nil { - return lenmsg, err - } - edns = append(edns, e) - off = off1 + int(optlen) - if code == EDNS0SUBNETDRAFT { - e.DraftOption = true - } - case EDNS0UL: - e := new(EDNS0_UL) - if err := e.unpack(msg[off1 : off1+int(optlen)]); err != nil { - return lenmsg, err - } - edns = append(edns, e) - off = off1 + int(optlen) - case EDNS0LLQ: - e := new(EDNS0_LLQ) - if err := e.unpack(msg[off1 : off1+int(optlen)]); err != nil { - return lenmsg, err - } - edns = append(edns, e) - off = off1 + int(optlen) - case EDNS0DAU: - e := new(EDNS0_DAU) - if err := e.unpack(msg[off1 : off1+int(optlen)]); err != nil { - return lenmsg, err - } - edns = append(edns, e) - off = off1 + int(optlen) - case EDNS0DHU: - e := new(EDNS0_DHU) - if err := e.unpack(msg[off1 : off1+int(optlen)]); err != nil { - return lenmsg, err - } - edns = append(edns, e) - off = off1 + int(optlen) - case EDNS0N3U: - e := new(EDNS0_N3U) - if err := e.unpack(msg[off1 : off1+int(optlen)]); err != nil { - return lenmsg, err - } - edns = append(edns, e) - off = off1 + int(optlen) - default: - e := new(EDNS0_LOCAL) - e.Code = code - if err := e.unpack(msg[off1 : off1+int(optlen)]); err != nil { - return lenmsg, err - } - edns = append(edns, e) - off = off1 + int(optlen) - } - if off < lenrd { - goto Option - } - fv.Set(reflect.ValueOf(edns)) - case `dns:"a"`: - if val.Type().String() == "dns.IPSECKEY" { - // Field(2) is GatewayType, must be 1 - if val.Field(2).Uint() != 1 { - continue - } - } - if off == lenrd { - break // dyn. update - } - if off+net.IPv4len > lenrd || off+net.IPv4len > lenmsg { - return lenmsg, &Error{err: "overflow unpacking a"} - } - fv.Set(reflect.ValueOf(net.IPv4(msg[off], msg[off+1], msg[off+2], msg[off+3]))) - off += net.IPv4len - case `dns:"aaaa"`: - if val.Type().String() == "dns.IPSECKEY" { - // Field(2) is GatewayType, must be 2 - if val.Field(2).Uint() != 2 { - continue - } - } - if off == lenrd { - break - } - if off+net.IPv6len > lenrd || off+net.IPv6len > lenmsg { - return lenmsg, &Error{err: "overflow unpacking aaaa"} - } - fv.Set(reflect.ValueOf(net.IP{msg[off], msg[off+1], msg[off+2], msg[off+3], msg[off+4], - msg[off+5], msg[off+6], msg[off+7], msg[off+8], msg[off+9], msg[off+10], - msg[off+11], msg[off+12], msg[off+13], msg[off+14], msg[off+15]})) - off += net.IPv6len - case `dns:"wks"`: - // Rest of the record is the bitmap - var serv []uint16 - j := 0 - for off < lenrd { - if off+1 > lenmsg { - return lenmsg, &Error{err: "overflow unpacking wks"} - } - b := msg[off] - // Check the bits one by one, and set the type - if b&0x80 == 0x80 { - serv = append(serv, uint16(j*8+0)) - } - if b&0x40 == 0x40 { - serv = append(serv, uint16(j*8+1)) - } - if b&0x20 == 0x20 { - serv = append(serv, uint16(j*8+2)) - } - if b&0x10 == 0x10 { - serv = append(serv, uint16(j*8+3)) - } - if b&0x8 == 0x8 { - serv = append(serv, uint16(j*8+4)) - } - if b&0x4 == 0x4 { - serv = append(serv, uint16(j*8+5)) - } - if b&0x2 == 0x2 { - serv = append(serv, uint16(j*8+6)) - } - if b&0x1 == 0x1 { - serv = append(serv, uint16(j*8+7)) - } - j++ - off++ - } - fv.Set(reflect.ValueOf(serv)) - case `dns:"nsec"`: // NSEC/NSEC3 - if off == lenrd { - break - } - // Rest of the record is the type bitmap - if off+2 > lenrd || off+2 > lenmsg { - return lenmsg, &Error{err: "overflow unpacking nsecx"} - } - var nsec []uint16 - length := 0 - window := 0 - for off+2 < lenrd { - window = int(msg[off]) - length = int(msg[off+1]) - //println("off, windows, length, end", off, window, length, endrr) - if length == 0 { - // A length window of zero is strange. If there - // the window should not have been specified. Bail out - // println("dns: length == 0 when unpacking NSEC") - return lenmsg, &Error{err: "overflow unpacking nsecx"} - } - if length > 32 { - return lenmsg, &Error{err: "overflow unpacking nsecx"} - } - - // Walk the bytes in the window - and check the bit settings... - off += 2 - for j := 0; j < length; j++ { - if off+j+1 > lenmsg { - return lenmsg, &Error{err: "overflow unpacking nsecx"} - } - b := msg[off+j] - // Check the bits one by one, and set the type - if b&0x80 == 0x80 { - nsec = append(nsec, uint16(window*256+j*8+0)) - } - if b&0x40 == 0x40 { - nsec = append(nsec, uint16(window*256+j*8+1)) - } - if b&0x20 == 0x20 { - nsec = append(nsec, uint16(window*256+j*8+2)) - } - if b&0x10 == 0x10 { - nsec = append(nsec, uint16(window*256+j*8+3)) - } - if b&0x8 == 0x8 { - nsec = append(nsec, uint16(window*256+j*8+4)) - } - if b&0x4 == 0x4 { - nsec = append(nsec, uint16(window*256+j*8+5)) - } - if b&0x2 == 0x2 { - nsec = append(nsec, uint16(window*256+j*8+6)) - } - if b&0x1 == 0x1 { - nsec = append(nsec, uint16(window*256+j*8+7)) - } - } - off += length - } - fv.Set(reflect.ValueOf(nsec)) - } - case reflect.Struct: - off, err = unpackStructValue(fv, msg, off) - if err != nil { - return lenmsg, err - } - if val.Type().Field(i).Name == "Hdr" { - lenrd = off + int(val.FieldByName("Hdr").FieldByName("Rdlength").Uint()) - } - case reflect.Uint8: - if off == lenmsg { - break - } - if off+1 > lenmsg { - return lenmsg, &Error{err: "overflow unpacking uint8"} - } - fv.SetUint(uint64(uint8(msg[off]))) - off++ - case reflect.Uint16: - if off == lenmsg { - break - } - var i uint16 - if off+2 > lenmsg { - return lenmsg, &Error{err: "overflow unpacking uint16"} - } - i, off = unpackUint16(msg, off) - fv.SetUint(uint64(i)) - case reflect.Uint32: - if off == lenmsg { - break - } - if off+4 > lenmsg { - return lenmsg, &Error{err: "overflow unpacking uint32"} - } - fv.SetUint(uint64(uint32(msg[off])<<24 | uint32(msg[off+1])<<16 | uint32(msg[off+2])<<8 | uint32(msg[off+3]))) - off += 4 - case reflect.Uint64: - switch val.Type().Field(i).Tag { - default: - if off+8 > lenmsg { - return lenmsg, &Error{err: "overflow unpacking uint64"} - } - fv.SetUint(uint64(uint64(msg[off])<<56 | uint64(msg[off+1])<<48 | uint64(msg[off+2])<<40 | - uint64(msg[off+3])<<32 | uint64(msg[off+4])<<24 | uint64(msg[off+5])<<16 | uint64(msg[off+6])<<8 | uint64(msg[off+7]))) - off += 8 - case `dns:"uint48"`: - // Used in TSIG where the last 48 bits are occupied, so for now, assume a uint48 (6 bytes) - if off+6 > lenmsg { - return lenmsg, &Error{err: "overflow unpacking uint64 as uint48"} - } - fv.SetUint(uint64(uint64(msg[off])<<40 | uint64(msg[off+1])<<32 | uint64(msg[off+2])<<24 | uint64(msg[off+3])<<16 | - uint64(msg[off+4])<<8 | uint64(msg[off+5]))) - off += 6 - } - case reflect.String: - var s string - if off == lenmsg { - break - } - switch val.Type().Field(i).Tag { - default: - return lenmsg, &Error{"bad tag unpacking string: " + val.Type().Field(i).Tag.Get("dns")} - case `dns:"octet"`: - strend := lenrd - if strend > lenmsg { - return lenmsg, &Error{err: "overflow unpacking octet"} - } - s = string(msg[off:strend]) - off = strend - case `dns:"hex"`: - hexend := lenrd - if val.FieldByName("Hdr").FieldByName("Rrtype").Uint() == uint64(TypeHIP) { - hexend = off + int(val.FieldByName("HitLength").Uint()) - } - if hexend > lenrd || hexend > lenmsg { - return lenmsg, &Error{err: "overflow unpacking hex"} - } - s = hex.EncodeToString(msg[off:hexend]) - off = hexend - case `dns:"base64"`: - // Rest of the RR is base64 encoded value - b64end := lenrd - if val.FieldByName("Hdr").FieldByName("Rrtype").Uint() == uint64(TypeHIP) { - b64end = off + int(val.FieldByName("PublicKeyLength").Uint()) - } - if b64end > lenrd || b64end > lenmsg { - return lenmsg, &Error{err: "overflow unpacking base64"} - } - s = toBase64(msg[off:b64end]) - off = b64end - case `dns:"cdomain-name"`: - fallthrough - case `dns:"domain-name"`: - if val.Type().String() == "dns.IPSECKEY" { - // Field(2) is GatewayType, 1 and 2 or used for addresses - x := val.Field(2).Uint() - if x == 1 || x == 2 { - continue - } - } - if off == lenmsg { - // zero rdata foo, OK for dyn. updates - break - } - s, off, err = UnpackDomainName(msg, off) - if err != nil { - return lenmsg, err - } - case `dns:"size-base32"`: - var size int - switch val.Type().Name() { - case "NSEC3": - switch val.Type().Field(i).Name { - case "NextDomain": - name := val.FieldByName("HashLength") - size = int(name.Uint()) - } - } - if off+size > lenmsg { - return lenmsg, &Error{err: "overflow unpacking base32"} - } - s = toBase32(msg[off : off+size]) - off += size - case `dns:"size-hex"`: - // a "size" string, but it must be encoded in hex in the string - var size int - switch val.Type().Name() { - case "NSEC3": - switch val.Type().Field(i).Name { - case "Salt": - name := val.FieldByName("SaltLength") - size = int(name.Uint()) - case "NextDomain": - name := val.FieldByName("HashLength") - size = int(name.Uint()) - } - case "TSIG": - switch val.Type().Field(i).Name { - case "MAC": - name := val.FieldByName("MACSize") - size = int(name.Uint()) - case "OtherData": - name := val.FieldByName("OtherLen") - size = int(name.Uint()) - } - } - if off+size > lenmsg { - return lenmsg, &Error{err: "overflow unpacking hex"} - } - s = hex.EncodeToString(msg[off : off+size]) - off += size - case `dns:"txt"`: - fallthrough - case "": - s, off, err = unpackTxtString(msg, off) - } - fv.SetString(s) - } - } - return off, nil -} - -// Helpers for dealing with escaped bytes -func isDigit(b byte) bool { return b >= '0' && b <= '9' } - -func dddToByte(s []byte) byte { - return byte((s[0]-'0')*100 + (s[1]-'0')*10 + (s[2] - '0')) -} - -// UnpackStruct unpacks a binary message from offset off to the interface -// value given. -func UnpackStruct(any interface{}, msg []byte, off int) (int, error) { - return unpackStructValue(structValue(any), msg, off) -} - -// Helper function for packing and unpacking -func intToBytes(i *big.Int, length int) []byte { - buf := i.Bytes() - if len(buf) < length { - b := make([]byte, length) - copy(b[length-len(buf):], buf) - return b - } - return buf -} - -func unpackUint16(msg []byte, off int) (uint16, int) { - return uint16(msg[off])<<8 | uint16(msg[off+1]), off + 2 -} - -func packUint16(i uint16) (byte, byte) { - return byte(i >> 8), byte(i) -} - -func toBase32(b []byte) string { - return base32.HexEncoding.EncodeToString(b) -} - -func fromBase32(s []byte) (buf []byte, err error) { - buflen := base32.HexEncoding.DecodedLen(len(s)) - buf = make([]byte, buflen) - n, err := base32.HexEncoding.Decode(buf, s) - buf = buf[:n] - return -} - -func toBase64(b []byte) string { - return base64.StdEncoding.EncodeToString(b) -} - -func fromBase64(s []byte) (buf []byte, err error) { - buflen := base64.StdEncoding.DecodedLen(len(s)) - buf = make([]byte, buflen) - n, err := base64.StdEncoding.Decode(buf, s) - buf = buf[:n] - return -} - -// PackRR packs a resource record rr into msg[off:]. -// See PackDomainName for documentation about the compression. -func PackRR(rr RR, msg []byte, off int, compression map[string]int, compress bool) (off1 int, err error) { - if rr == nil { - return len(msg), &Error{err: "nil rr"} - } - - off1, err = packStructCompress(rr, msg, off, compression, compress) - if err != nil { - return len(msg), err - } - if rawSetRdlength(msg, off, off1) { - return off1, nil - } - return off, ErrRdata -} - -// UnpackRR unpacks msg[off:] into an RR. -func UnpackRR(msg []byte, off int) (rr RR, off1 int, err error) { - // unpack just the header, to find the rr type and length - var h RR_Header - off0 := off - if off, err = UnpackStruct(&h, msg, off); err != nil { - return nil, len(msg), err - } - end := off + int(h.Rdlength) - // make an rr of that type and re-unpack. - mk, known := typeToRR[h.Rrtype] - if !known { - rr = new(RFC3597) - } else { - rr = mk() - } - off, err = UnpackStruct(rr, msg, off0) - if off != end { - return &h, end, &Error{err: "bad rdlength"} - } - return rr, off, err -} - -// Reverse a map -func reverseInt8(m map[uint8]string) map[string]uint8 { - n := make(map[string]uint8) - for u, s := range m { - n[s] = u - } - return n -} - -func reverseInt16(m map[uint16]string) map[string]uint16 { - n := make(map[string]uint16) - for u, s := range m { - n[s] = u - } - return n -} - -func reverseInt(m map[int]string) map[string]int { - n := make(map[string]int) - for u, s := range m { - n[s] = u - } - return n -} - -// Convert a MsgHdr to a string, with dig-like headers: -// -//;; opcode: QUERY, status: NOERROR, id: 48404 -// -//;; flags: qr aa rd ra; -func (h *MsgHdr) String() string { - if h == nil { - return " MsgHdr" - } - - s := ";; opcode: " + OpcodeToString[h.Opcode] - s += ", status: " + RcodeToString[h.Rcode] - s += ", id: " + strconv.Itoa(int(h.Id)) + "\n" - - s += ";; flags:" - if h.Response { - s += " qr" - } - if h.Authoritative { - s += " aa" - } - if h.Truncated { - s += " tc" - } - if h.RecursionDesired { - s += " rd" - } - if h.RecursionAvailable { - s += " ra" - } - if h.Zero { // Hmm - s += " z" - } - if h.AuthenticatedData { - s += " ad" - } - if h.CheckingDisabled { - s += " cd" - } - - s += ";" - return s -} - -// Pack packs a Msg: it is converted to to wire format. -// If the dns.Compress is true the message will be in compressed wire format. -func (dns *Msg) Pack() (msg []byte, err error) { - return dns.PackBuffer(nil) -} - -// PackBuffer packs a Msg, using the given buffer buf. If buf is too small -// a new buffer is allocated. -func (dns *Msg) PackBuffer(buf []byte) (msg []byte, err error) { - var dh Header - var compression map[string]int - if dns.Compress { - compression = make(map[string]int) // Compression pointer mappings - } - - if dns.Rcode < 0 || dns.Rcode > 0xFFF { - return nil, ErrRcode - } - if dns.Rcode > 0xF { - // Regular RCODE field is 4 bits - opt := dns.IsEdns0() - if opt == nil { - return nil, ErrExtendedRcode - } - opt.SetExtendedRcode(uint8(dns.Rcode >> 4)) - dns.Rcode &= 0xF - } - - // Convert convenient Msg into wire-like Header. - dh.Id = dns.Id - dh.Bits = uint16(dns.Opcode)<<11 | uint16(dns.Rcode) - if dns.Response { - dh.Bits |= _QR - } - if dns.Authoritative { - dh.Bits |= _AA - } - if dns.Truncated { - dh.Bits |= _TC - } - if dns.RecursionDesired { - dh.Bits |= _RD - } - if dns.RecursionAvailable { - dh.Bits |= _RA - } - if dns.Zero { - dh.Bits |= _Z - } - if dns.AuthenticatedData { - dh.Bits |= _AD - } - if dns.CheckingDisabled { - dh.Bits |= _CD - } - - // Prepare variable sized arrays. - question := dns.Question - answer := dns.Answer - ns := dns.Ns - extra := dns.Extra - - dh.Qdcount = uint16(len(question)) - dh.Ancount = uint16(len(answer)) - dh.Nscount = uint16(len(ns)) - dh.Arcount = uint16(len(extra)) - - // We need the uncompressed length here, because we first pack it and then compress it. - msg = buf - compress := dns.Compress - dns.Compress = false - if packLen := dns.Len() + 1; len(msg) < packLen { - msg = make([]byte, packLen) - } - dns.Compress = compress - - // Pack it in: header and then the pieces. - off := 0 - off, err = packStructCompress(&dh, msg, off, compression, dns.Compress) - if err != nil { - return nil, err - } - for i := 0; i < len(question); i++ { - off, err = packStructCompress(&question[i], msg, off, compression, dns.Compress) - if err != nil { - return nil, err - } - } - for i := 0; i < len(answer); i++ { - off, err = PackRR(answer[i], msg, off, compression, dns.Compress) - if err != nil { - return nil, err - } - } - for i := 0; i < len(ns); i++ { - off, err = PackRR(ns[i], msg, off, compression, dns.Compress) - if err != nil { - return nil, err - } - } - for i := 0; i < len(extra); i++ { - off, err = PackRR(extra[i], msg, off, compression, dns.Compress) - if err != nil { - return nil, err - } - } - return msg[:off], nil -} - -// Unpack unpacks a binary message to a Msg structure. -func (dns *Msg) Unpack(msg []byte) (err error) { - // Header. - var dh Header - off := 0 - if off, err = UnpackStruct(&dh, msg, off); err != nil { - return err - } - dns.Id = dh.Id - dns.Response = (dh.Bits & _QR) != 0 - dns.Opcode = int(dh.Bits>>11) & 0xF - dns.Authoritative = (dh.Bits & _AA) != 0 - dns.Truncated = (dh.Bits & _TC) != 0 - dns.RecursionDesired = (dh.Bits & _RD) != 0 - dns.RecursionAvailable = (dh.Bits & _RA) != 0 - dns.Zero = (dh.Bits & _Z) != 0 - dns.AuthenticatedData = (dh.Bits & _AD) != 0 - dns.CheckingDisabled = (dh.Bits & _CD) != 0 - dns.Rcode = int(dh.Bits & 0xF) - - // Arrays. - dns.Question = make([]Question, dh.Qdcount) - dns.Answer = make([]RR, dh.Ancount) - dns.Ns = make([]RR, dh.Nscount) - dns.Extra = make([]RR, dh.Arcount) - - for i := 0; i < len(dns.Question); i++ { - off, err = UnpackStruct(&dns.Question[i], msg, off) - if err != nil { - return err - } - } - // If we see a TC bit being set we return here, without - // an error, because technically it isn't an error. So return - // without parsing the potentially corrupt packet and hitting an error. - // TODO(miek): this isn't the best strategy! - if dns.Truncated { - dns.Answer = nil - dns.Ns = nil - dns.Extra = nil - return nil - } - for i := 0; i < len(dns.Answer); i++ { - dns.Answer[i], off, err = UnpackRR(msg, off) - if err != nil { - return err - } - } - for i := 0; i < len(dns.Ns); i++ { - dns.Ns[i], off, err = UnpackRR(msg, off) - if err != nil { - return err - } - } - for i := 0; i < len(dns.Extra); i++ { - dns.Extra[i], off, err = UnpackRR(msg, off) - if err != nil { - return err - } - } - if off != len(msg) { - // TODO(miek) make this an error? - // use PackOpt to let people tell how detailed the error reporting should be? - // println("dns: extra bytes in dns packet", off, "<", len(msg)) - } - return nil -} - -// Convert a complete message to a string with dig-like output. -func (dns *Msg) String() string { - if dns == nil { - return " MsgHdr" - } - s := dns.MsgHdr.String() + " " - s += "QUERY: " + strconv.Itoa(len(dns.Question)) + ", " - s += "ANSWER: " + strconv.Itoa(len(dns.Answer)) + ", " - s += "AUTHORITY: " + strconv.Itoa(len(dns.Ns)) + ", " - s += "ADDITIONAL: " + strconv.Itoa(len(dns.Extra)) + "\n" - if len(dns.Question) > 0 { - s += "\n;; QUESTION SECTION:\n" - for i := 0; i < len(dns.Question); i++ { - s += dns.Question[i].String() + "\n" - } - } - if len(dns.Answer) > 0 { - s += "\n;; ANSWER SECTION:\n" - for i := 0; i < len(dns.Answer); i++ { - if dns.Answer[i] != nil { - s += dns.Answer[i].String() + "\n" - } - } - } - if len(dns.Ns) > 0 { - s += "\n;; AUTHORITY SECTION:\n" - for i := 0; i < len(dns.Ns); i++ { - if dns.Ns[i] != nil { - s += dns.Ns[i].String() + "\n" - } - } - } - if len(dns.Extra) > 0 { - s += "\n;; ADDITIONAL SECTION:\n" - for i := 0; i < len(dns.Extra); i++ { - if dns.Extra[i] != nil { - s += dns.Extra[i].String() + "\n" - } - } - } - return s -} - -// Len returns the message length when in (un)compressed wire format. -// If dns.Compress is true compression it is taken into account. Len() -// is provided to be a faster way to get the size of the resulting packet, -// than packing it, measuring the size and discarding the buffer. -func (dns *Msg) Len() int { - // We always return one more than needed. - l := 12 // Message header is always 12 bytes - var compression map[string]int - if dns.Compress { - compression = make(map[string]int) - } - for i := 0; i < len(dns.Question); i++ { - l += dns.Question[i].len() - if dns.Compress { - compressionLenHelper(compression, dns.Question[i].Name) - } - } - for i := 0; i < len(dns.Answer); i++ { - l += dns.Answer[i].len() - if dns.Compress { - k, ok := compressionLenSearch(compression, dns.Answer[i].Header().Name) - if ok { - l += 1 - k - } - compressionLenHelper(compression, dns.Answer[i].Header().Name) - k, ok = compressionLenSearchType(compression, dns.Answer[i]) - if ok { - l += 1 - k - } - compressionLenHelperType(compression, dns.Answer[i]) - } - } - for i := 0; i < len(dns.Ns); i++ { - l += dns.Ns[i].len() - if dns.Compress { - k, ok := compressionLenSearch(compression, dns.Ns[i].Header().Name) - if ok { - l += 1 - k - } - compressionLenHelper(compression, dns.Ns[i].Header().Name) - k, ok = compressionLenSearchType(compression, dns.Ns[i]) - if ok { - l += 1 - k - } - compressionLenHelperType(compression, dns.Ns[i]) - } - } - for i := 0; i < len(dns.Extra); i++ { - l += dns.Extra[i].len() - if dns.Compress { - k, ok := compressionLenSearch(compression, dns.Extra[i].Header().Name) - if ok { - l += 1 - k - } - compressionLenHelper(compression, dns.Extra[i].Header().Name) - k, ok = compressionLenSearchType(compression, dns.Extra[i]) - if ok { - l += 1 - k - } - compressionLenHelperType(compression, dns.Extra[i]) - } - } - return l -} - -// Put the parts of the name in the compression map. -func compressionLenHelper(c map[string]int, s string) { - pref := "" - lbs := Split(s) - for j := len(lbs) - 1; j >= 0; j-- { - pref = s[lbs[j]:] - if _, ok := c[pref]; !ok { - c[pref] = len(pref) - } - } -} - -// Look for each part in the compression map and returns its length, -// keep on searching so we get the longest match. -func compressionLenSearch(c map[string]int, s string) (int, bool) { - off := 0 - end := false - if s == "" { // don't bork on bogus data - return 0, false - } - for { - if _, ok := c[s[off:]]; ok { - return len(s[off:]), true - } - if end { - break - } - off, end = NextLabel(s, off) - } - return 0, false -} - -// TODO(miek): should add all types, because the all can be *used* for compression. -func compressionLenHelperType(c map[string]int, r RR) { - switch x := r.(type) { - case *NS: - compressionLenHelper(c, x.Ns) - case *MX: - compressionLenHelper(c, x.Mx) - case *CNAME: - compressionLenHelper(c, x.Target) - case *PTR: - compressionLenHelper(c, x.Ptr) - case *SOA: - compressionLenHelper(c, x.Ns) - compressionLenHelper(c, x.Mbox) - case *MB: - compressionLenHelper(c, x.Mb) - case *MG: - compressionLenHelper(c, x.Mg) - case *MR: - compressionLenHelper(c, x.Mr) - case *MF: - compressionLenHelper(c, x.Mf) - case *MD: - compressionLenHelper(c, x.Md) - case *RT: - compressionLenHelper(c, x.Host) - case *MINFO: - compressionLenHelper(c, x.Rmail) - compressionLenHelper(c, x.Email) - case *AFSDB: - compressionLenHelper(c, x.Hostname) - } -} - -// Only search on compressing these types. -func compressionLenSearchType(c map[string]int, r RR) (int, bool) { - switch x := r.(type) { - case *NS: - return compressionLenSearch(c, x.Ns) - case *MX: - return compressionLenSearch(c, x.Mx) - case *CNAME: - return compressionLenSearch(c, x.Target) - case *PTR: - return compressionLenSearch(c, x.Ptr) - case *SOA: - k, ok := compressionLenSearch(c, x.Ns) - k1, ok1 := compressionLenSearch(c, x.Mbox) - if !ok && !ok1 { - return 0, false - } - return k + k1, true - case *MB: - return compressionLenSearch(c, x.Mb) - case *MG: - return compressionLenSearch(c, x.Mg) - case *MR: - return compressionLenSearch(c, x.Mr) - case *MF: - return compressionLenSearch(c, x.Mf) - case *MD: - return compressionLenSearch(c, x.Md) - case *RT: - return compressionLenSearch(c, x.Host) - case *MINFO: - k, ok := compressionLenSearch(c, x.Rmail) - k1, ok1 := compressionLenSearch(c, x.Email) - if !ok && !ok1 { - return 0, false - } - return k + k1, true - case *AFSDB: - return compressionLenSearch(c, x.Hostname) - } - return 0, false -} - -// id returns a 16 bits random number to be used as a -// message id. The random provided should be good enough. -func id() uint16 { - return uint16(rand.Int()) ^ uint16(time.Now().Nanosecond()) -} - -// Copy returns a new RR which is a deep-copy of r. -func Copy(r RR) RR { - r1 := r.copy() - return r1 -} - -// Copy returns a new *Msg which is a deep-copy of dns. -func (dns *Msg) Copy() *Msg { - return dns.CopyTo(new(Msg)) -} - -// CopyTo copies the contents to the provided message using a deep-copy and returns the copy. -func (dns *Msg) CopyTo(r1 *Msg) *Msg { - r1.MsgHdr = dns.MsgHdr - r1.Compress = dns.Compress - - if len(dns.Question) > 0 { - r1.Question = make([]Question, len(dns.Question)) - copy(r1.Question, dns.Question) // TODO(miek): Question is an immutable value, ok to do a shallow-copy - } - - rrArr := make([]RR, len(dns.Answer)+len(dns.Ns)+len(dns.Extra)) - var rri int - - if len(dns.Answer) > 0 { - rrbegin := rri - for i := 0; i < len(dns.Answer); i++ { - rrArr[rri] = dns.Answer[i].copy() - rri++ - } - r1.Answer = rrArr[rrbegin:rri:rri] - } - - if len(dns.Ns) > 0 { - rrbegin := rri - for i := 0; i < len(dns.Ns); i++ { - rrArr[rri] = dns.Ns[i].copy() - rri++ - } - r1.Ns = rrArr[rrbegin:rri:rri] - } - - if len(dns.Extra) > 0 { - rrbegin := rri - for i := 0; i < len(dns.Extra); i++ { - rrArr[rri] = dns.Extra[i].copy() - rri++ - } - r1.Extra = rrArr[rrbegin:rri:rri] - } - - return r1 -} diff --git a/Godeps/_workspace/src/github.com/miekg/dns/nsecx.go b/Godeps/_workspace/src/github.com/miekg/dns/nsecx.go deleted file mode 100644 index d2392c6ec6..0000000000 --- a/Godeps/_workspace/src/github.com/miekg/dns/nsecx.go +++ /dev/null @@ -1,112 +0,0 @@ -package dns - -import ( - "crypto/sha1" - "hash" - "io" - "strings" -) - -type saltWireFmt struct { - Salt string `dns:"size-hex"` -} - -// HashName hashes a string (label) according to RFC 5155. It returns the hashed string in -// uppercase. -func HashName(label string, ha uint8, iter uint16, salt string) string { - saltwire := new(saltWireFmt) - saltwire.Salt = salt - wire := make([]byte, DefaultMsgSize) - n, err := PackStruct(saltwire, wire, 0) - if err != nil { - return "" - } - wire = wire[:n] - name := make([]byte, 255) - off, err := PackDomainName(strings.ToLower(label), name, 0, nil, false) - if err != nil { - return "" - } - name = name[:off] - var s hash.Hash - switch ha { - case SHA1: - s = sha1.New() - default: - return "" - } - - // k = 0 - name = append(name, wire...) - io.WriteString(s, string(name)) - nsec3 := s.Sum(nil) - // k > 0 - for k := uint16(0); k < iter; k++ { - s.Reset() - nsec3 = append(nsec3, wire...) - io.WriteString(s, string(nsec3)) - nsec3 = s.Sum(nil) - } - return toBase32(nsec3) -} - -// Denialer is an interface that should be implemented by types that are used to denial -// answers in DNSSEC. -type Denialer interface { - // Cover will check if the (unhashed) name is being covered by this NSEC or NSEC3. - Cover(name string) bool - // Match will check if the ownername matches the (unhashed) name for this NSEC3 or NSEC3. - Match(name string) bool -} - -// Cover implements the Denialer interface. -func (rr *NSEC) Cover(name string) bool { - return true -} - -// Match implements the Denialer interface. -func (rr *NSEC) Match(name string) bool { - return true -} - -// Cover implements the Denialer interface. -func (rr *NSEC3) Cover(name string) bool { - // FIXME(miek): check if the zones match - // FIXME(miek): check if we're not dealing with parent nsec3 - hname := HashName(name, rr.Hash, rr.Iterations, rr.Salt) - labels := Split(rr.Hdr.Name) - if len(labels) < 2 { - return false - } - hash := strings.ToUpper(rr.Hdr.Name[labels[0] : labels[1]-1]) // -1 to remove the dot - if hash == rr.NextDomain { - return false // empty interval - } - if hash > rr.NextDomain { // last name, points to apex - // hname > hash - // hname > rr.NextDomain - // TODO(miek) - } - if hname <= hash { - return false - } - if hname >= rr.NextDomain { - return false - } - return true -} - -// Match implements the Denialer interface. -func (rr *NSEC3) Match(name string) bool { - // FIXME(miek): Check if we are in the same zone - hname := HashName(name, rr.Hash, rr.Iterations, rr.Salt) - labels := Split(rr.Hdr.Name) - if len(labels) < 2 { - return false - } - hash := strings.ToUpper(rr.Hdr.Name[labels[0] : labels[1]-1]) // -1 to remove the . - if hash == hname { - return true - } - return false -} diff --git a/Godeps/_workspace/src/github.com/miekg/dns/nsecx_test.go b/Godeps/_workspace/src/github.com/miekg/dns/nsecx_test.go deleted file mode 100644 index 93e0c63fce..0000000000 --- a/Godeps/_workspace/src/github.com/miekg/dns/nsecx_test.go +++ /dev/null @@ -1,29 +0,0 @@ -package dns - -import ( - "testing" -) - -func TestPackNsec3(t *testing.T) { - nsec3 := HashName("dnsex.nl.", SHA1, 0, "DEAD") - if nsec3 != "ROCCJAE8BJJU7HN6T7NG3TNM8ACRS87J" { - t.Error(nsec3) - } - - nsec3 = HashName("a.b.c.example.org.", SHA1, 2, "DEAD") - if nsec3 != "6LQ07OAHBTOOEU2R9ANI2AT70K5O0RCG" { - t.Error(nsec3) - } -} - -func TestNsec3(t *testing.T) { - // examples taken from .nl - nsec3, _ := NewRR("39p91242oslggest5e6a7cci4iaeqvnk.nl. IN NSEC3 1 1 5 F10E9F7EA83FC8F3 39P99DCGG0MDLARTCRMCF6OFLLUL7PR6 NS DS RRSIG") - if !nsec3.(*NSEC3).Cover("snasajsksasasa.nl.") { // 39p94jrinub66hnpem8qdpstrec86pg3 - t.Error("39p94jrinub66hnpem8qdpstrec86pg3. should be covered by 39p91242oslggest5e6a7cci4iaeqvnk.nl. - 39P99DCGG0MDLARTCRMCF6OFLLUL7PR6") - } - nsec3, _ = NewRR("sk4e8fj94u78smusb40o1n0oltbblu2r.nl. IN NSEC3 1 1 5 F10E9F7EA83FC8F3 SK4F38CQ0ATIEI8MH3RGD0P5I4II6QAN NS SOA TXT RRSIG DNSKEY NSEC3PARAM") - if !nsec3.(*NSEC3).Match("nl.") { // sk4e8fj94u78smusb40o1n0oltbblu2r.nl. - t.Error("sk4e8fj94u78smusb40o1n0oltbblu2r.nl. should match sk4e8fj94u78smusb40o1n0oltbblu2r.nl.") - } -} diff --git a/Godeps/_workspace/src/github.com/miekg/dns/parse_test.go b/Godeps/_workspace/src/github.com/miekg/dns/parse_test.go deleted file mode 100644 index 1f25172701..0000000000 --- a/Godeps/_workspace/src/github.com/miekg/dns/parse_test.go +++ /dev/null @@ -1,1508 +0,0 @@ -package dns - -import ( - "bytes" - "crypto/rsa" - "encoding/hex" - "fmt" - "math/rand" - "net" - "reflect" - "strconv" - "strings" - "testing" - "testing/quick" - "time" -) - -func TestDotInName(t *testing.T) { - buf := make([]byte, 20) - PackDomainName("aa\\.bb.nl.", buf, 0, nil, false) - // index 3 must be a real dot - if buf[3] != '.' { - t.Error("dot should be a real dot") - } - - if buf[6] != 2 { - t.Error("this must have the value 2") - } - dom, _, _ := UnpackDomainName(buf, 0) - // printing it should yield the backspace again - if dom != "aa\\.bb.nl." { - t.Error("dot should have been escaped: ", dom) - } -} - -func TestDotLastInLabel(t *testing.T) { - sample := "aa\\..au." - buf := make([]byte, 20) - _, err := PackDomainName(sample, buf, 0, nil, false) - if err != nil { - t.Fatalf("unexpected error packing domain: %v", err) - } - dom, _, _ := UnpackDomainName(buf, 0) - if dom != sample { - t.Fatalf("unpacked domain `%s' doesn't match packed domain", dom) - } -} - -func TestTooLongDomainName(t *testing.T) { - l := "aaabbbcccdddeeefffggghhhiiijjjkkklllmmmnnnooopppqqqrrrsssttt." - dom := l + l + l + l + l + l + l - _, err := NewRR(dom + " IN A 127.0.0.1") - if err == nil { - t.Error("should be too long") - } else { - t.Logf("error is %v", err) - } - _, err = NewRR("..com. IN A 127.0.0.1") - if err == nil { - t.Error("should fail") - } else { - t.Logf("error is %v", err) - } -} - -func TestDomainName(t *testing.T) { - tests := []string{"r\\.gieben.miek.nl.", "www\\.www.miek.nl.", - "www.*.miek.nl.", "www.*.miek.nl.", - } - dbuff := make([]byte, 40) - - for _, ts := range tests { - if _, err := PackDomainName(ts, dbuff, 0, nil, false); err != nil { - t.Error("not a valid domain name") - continue - } - n, _, err := UnpackDomainName(dbuff, 0) - if err != nil { - t.Error("failed to unpack packed domain name") - continue - } - if ts != n { - t.Errorf("must be equal: in: %s, out: %s", ts, n) - } - } -} - -func TestDomainNameAndTXTEscapes(t *testing.T) { - tests := []byte{'.', '(', ')', ';', ' ', '@', '"', '\\', '\t', '\r', '\n', 0, 255} - for _, b := range tests { - rrbytes := []byte{ - 1, b, 0, // owner - byte(TypeTXT >> 8), byte(TypeTXT), - byte(ClassINET >> 8), byte(ClassINET), - 0, 0, 0, 1, // TTL - 0, 2, 1, b, // Data - } - rr1, _, err := UnpackRR(rrbytes, 0) - if err != nil { - panic(err) - } - s := rr1.String() - rr2, err := NewRR(s) - if err != nil { - t.Errorf("Error parsing unpacked RR's string: %v", err) - t.Errorf(" Bytes: %v", rrbytes) - t.Errorf("String: %v", s) - } - repacked := make([]byte, len(rrbytes)) - if _, err := PackRR(rr2, repacked, 0, nil, false); err != nil { - t.Errorf("error packing parsed RR: %v", err) - t.Errorf(" original Bytes: %v", rrbytes) - t.Errorf("unpacked Struct: %v", rr1) - t.Errorf(" parsed Struct: %v", rr2) - } - if !bytes.Equal(repacked, rrbytes) { - t.Error("packed bytes don't match original bytes") - t.Errorf(" original bytes: %v", rrbytes) - t.Errorf(" packed bytes: %v", repacked) - t.Errorf("unpacked struct: %v", rr1) - t.Errorf(" parsed struct: %v", rr2) - } - } -} - -func TestTXTEscapeParsing(t *testing.T) { - test := [][]string{ - {`";"`, `";"`}, - {`\;`, `";"`}, - {`"\t"`, `"\t"`}, - {`"\r"`, `"\r"`}, - {`"\ "`, `" "`}, - {`"\;"`, `";"`}, - {`"\;\""`, `";\""`}, - {`"\(a\)"`, `"(a)"`}, - {`"\(a)"`, `"(a)"`}, - {`"(a\)"`, `"(a)"`}, - {`"(a)"`, `"(a)"`}, - {`"\048"`, `"0"`}, - {`"\` + "\n" + `"`, `"\n"`}, - {`"\` + "\r" + `"`, `"\r"`}, - {`"\` + "\x11" + `"`, `"\017"`}, - {`"\'"`, `"'"`}, - } - for _, s := range test { - rr, err := NewRR(fmt.Sprintf("example.com. IN TXT %v", s[0])) - if err != nil { - t.Errorf("Could not parse %v TXT: %s", s[0], err) - continue - } - - txt := sprintTxt(rr.(*TXT).Txt) - if txt != s[1] { - t.Errorf("Mismatch after parsing `%v` TXT record: `%v` != `%v`", s[0], txt, s[1]) - } - } -} - -func GenerateDomain(r *rand.Rand, size int) []byte { - dnLen := size % 70 // artificially limit size so there's less to intrepret if a failure occurs - var dn []byte - done := false - for i := 0; i < dnLen && !done; { - max := dnLen - i - if max > 63 { - max = 63 - } - lLen := max - if lLen != 0 { - lLen = int(r.Int31()) % max - } - done = lLen == 0 - if done { - continue - } - l := make([]byte, lLen+1) - l[0] = byte(lLen) - for j := 0; j < lLen; j++ { - l[j+1] = byte(rand.Int31()) - } - dn = append(dn, l...) - i += 1 + lLen - } - return append(dn, 0) -} - -func TestDomainQuick(t *testing.T) { - r := rand.New(rand.NewSource(0)) - f := func(l int) bool { - db := GenerateDomain(r, l) - ds, _, err := UnpackDomainName(db, 0) - if err != nil { - panic(err) - } - buf := make([]byte, 255) - off, err := PackDomainName(ds, buf, 0, nil, false) - if err != nil { - t.Errorf("error packing domain: %v", err) - t.Errorf(" bytes: %v", db) - t.Errorf("string: %v", ds) - return false - } - if !bytes.Equal(db, buf[:off]) { - t.Errorf("repacked domain doesn't match original:") - t.Errorf("src bytes: %v", db) - t.Errorf(" string: %v", ds) - t.Errorf("out bytes: %v", buf[:off]) - return false - } - return true - } - if err := quick.Check(f, nil); err != nil { - t.Error(err) - } -} - -func GenerateTXT(r *rand.Rand, size int) []byte { - rdLen := size % 300 // artificially limit size so there's less to intrepret if a failure occurs - var rd []byte - for i := 0; i < rdLen; { - max := rdLen - 1 - if max > 255 { - max = 255 - } - sLen := max - if max != 0 { - sLen = int(r.Int31()) % max - } - s := make([]byte, sLen+1) - s[0] = byte(sLen) - for j := 0; j < sLen; j++ { - s[j+1] = byte(rand.Int31()) - } - rd = append(rd, s...) - i += 1 + sLen - } - return rd -} - -// Ok, 2 things. 1) this test breaks with the new functionality of splitting up larger txt -// chunks into 255 byte pieces. 2) I don't like the random nature of this thing, because I can't -// place the quotes where they need to be. -// So either add some code the places the quotes in just the right spots, make this non random -// or do something else. -// Disabled for now. (miek) -func testTXTRRQuick(t *testing.T) { - s := rand.NewSource(0) - r := rand.New(s) - typeAndClass := []byte{ - byte(TypeTXT >> 8), byte(TypeTXT), - byte(ClassINET >> 8), byte(ClassINET), - 0, 0, 0, 1, // TTL - } - f := func(l int) bool { - owner := GenerateDomain(r, l) - rdata := GenerateTXT(r, l) - rrbytes := make([]byte, 0, len(owner)+2+2+4+2+len(rdata)) - rrbytes = append(rrbytes, owner...) - rrbytes = append(rrbytes, typeAndClass...) - rrbytes = append(rrbytes, byte(len(rdata)>>8)) - rrbytes = append(rrbytes, byte(len(rdata))) - rrbytes = append(rrbytes, rdata...) - rr, _, err := UnpackRR(rrbytes, 0) - if err != nil { - panic(err) - } - buf := make([]byte, len(rrbytes)*3) - off, err := PackRR(rr, buf, 0, nil, false) - if err != nil { - t.Errorf("pack Error: %v\nRR: %v", err, rr) - return false - } - buf = buf[:off] - if !bytes.Equal(buf, rrbytes) { - t.Errorf("packed bytes don't match original bytes") - t.Errorf("src bytes: %v", rrbytes) - t.Errorf(" struct: %v", rr) - t.Errorf("out bytes: %v", buf) - return false - } - if len(rdata) == 0 { - // string'ing won't produce any data to parse - return true - } - rrString := rr.String() - rr2, err := NewRR(rrString) - if err != nil { - t.Errorf("error parsing own output: %v", err) - t.Errorf("struct: %v", rr) - t.Errorf("string: %v", rrString) - return false - } - if rr2.String() != rrString { - t.Errorf("parsed rr.String() doesn't match original string") - t.Errorf("original: %v", rrString) - t.Errorf(" parsed: %v", rr2.String()) - return false - } - - buf = make([]byte, len(rrbytes)*3) - off, err = PackRR(rr2, buf, 0, nil, false) - if err != nil { - t.Errorf("error packing parsed rr: %v", err) - t.Errorf("unpacked Struct: %v", rr) - t.Errorf(" string: %v", rrString) - t.Errorf(" parsed Struct: %v", rr2) - return false - } - buf = buf[:off] - if !bytes.Equal(buf, rrbytes) { - t.Errorf("parsed packed bytes don't match original bytes") - t.Errorf(" source bytes: %v", rrbytes) - t.Errorf("unpacked struct: %v", rr) - t.Errorf(" string: %v", rrString) - t.Errorf(" parsed struct: %v", rr2) - t.Errorf(" repacked bytes: %v", buf) - return false - } - return true - } - c := &quick.Config{MaxCountScale: 10} - if err := quick.Check(f, c); err != nil { - t.Error(err) - } -} - -func TestParseDirectiveMisc(t *testing.T) { - tests := map[string]string{ - "$ORIGIN miek.nl.\na IN NS b": "a.miek.nl.\t3600\tIN\tNS\tb.miek.nl.", - "$TTL 2H\nmiek.nl. IN NS b.": "miek.nl.\t7200\tIN\tNS\tb.", - "miek.nl. 1D IN NS b.": "miek.nl.\t86400\tIN\tNS\tb.", - `name. IN SOA a6.nstld.com. hostmaster.nic.name. ( - 203362132 ; serial - 5m ; refresh (5 minutes) - 5m ; retry (5 minutes) - 2w ; expire (2 weeks) - 300 ; minimum (5 minutes) -)`: "name.\t3600\tIN\tSOA\ta6.nstld.com. hostmaster.nic.name. 203362132 300 300 1209600 300", - ". 3600000 IN NS ONE.MY-ROOTS.NET.": ".\t3600000\tIN\tNS\tONE.MY-ROOTS.NET.", - "ONE.MY-ROOTS.NET. 3600000 IN A 192.168.1.1": "ONE.MY-ROOTS.NET.\t3600000\tIN\tA\t192.168.1.1", - } - for i, o := range tests { - rr, err := NewRR(i) - if err != nil { - t.Error("failed to parse RR: ", err) - continue - } - if rr.String() != o { - t.Errorf("`%s' should be equal to\n`%s', but is `%s'", i, o, rr.String()) - } else { - t.Logf("RR is OK: `%s'", rr.String()) - } - } -} - -func TestNSEC(t *testing.T) { - nsectests := map[string]string{ - "nl. IN NSEC3PARAM 1 0 5 30923C44C6CBBB8F": "nl.\t3600\tIN\tNSEC3PARAM\t1 0 5 30923C44C6CBBB8F", - "p2209hipbpnm681knjnu0m1febshlv4e.nl. IN NSEC3 1 1 5 30923C44C6CBBB8F P90DG1KE8QEAN0B01613LHQDG0SOJ0TA NS SOA TXT RRSIG DNSKEY NSEC3PARAM": "p2209hipbpnm681knjnu0m1febshlv4e.nl.\t3600\tIN\tNSEC3\t1 1 5 30923C44C6CBBB8F P90DG1KE8QEAN0B01613LHQDG0SOJ0TA NS SOA TXT RRSIG DNSKEY NSEC3PARAM", - "localhost.dnssex.nl. IN NSEC www.dnssex.nl. A RRSIG NSEC": "localhost.dnssex.nl.\t3600\tIN\tNSEC\twww.dnssex.nl. A RRSIG NSEC", - "localhost.dnssex.nl. IN NSEC www.dnssex.nl. A RRSIG NSEC TYPE65534": "localhost.dnssex.nl.\t3600\tIN\tNSEC\twww.dnssex.nl. A RRSIG NSEC TYPE65534", - "localhost.dnssex.nl. IN NSEC www.dnssex.nl. A RRSIG NSec Type65534": "localhost.dnssex.nl.\t3600\tIN\tNSEC\twww.dnssex.nl. A RRSIG NSEC TYPE65534", - } - for i, o := range nsectests { - rr, err := NewRR(i) - if err != nil { - t.Error("failed to parse RR: ", err) - continue - } - if rr.String() != o { - t.Errorf("`%s' should be equal to\n`%s', but is `%s'", i, o, rr.String()) - } else { - t.Logf("RR is OK: `%s'", rr.String()) - } - } -} - -func TestParseLOC(t *testing.T) { - lt := map[string]string{ - "SW1A2AA.find.me.uk. LOC 51 30 12.748 N 00 07 39.611 W 0.00m 0.00m 0.00m 0.00m": "SW1A2AA.find.me.uk.\t3600\tIN\tLOC\t51 30 12.748 N 00 07 39.611 W 0m 0.00m 0.00m 0.00m", - "SW1A2AA.find.me.uk. LOC 51 0 0.0 N 00 07 39.611 W 0.00m 0.00m 0.00m 0.00m": "SW1A2AA.find.me.uk.\t3600\tIN\tLOC\t51 00 0.000 N 00 07 39.611 W 0m 0.00m 0.00m 0.00m", - } - for i, o := range lt { - rr, err := NewRR(i) - if err != nil { - t.Error("failed to parse RR: ", err) - continue - } - if rr.String() != o { - t.Errorf("`%s' should be equal to\n`%s', but is `%s'", i, o, rr.String()) - } else { - t.Logf("RR is OK: `%s'", rr.String()) - } - } -} - -func TestParseDS(t *testing.T) { - dt := map[string]string{ - "example.net. 3600 IN DS 40692 12 3 22261A8B0E0D799183E35E24E2AD6BB58533CBA7E3B14D659E9CA09B 2071398F": "example.net.\t3600\tIN\tDS\t40692 12 3 22261A8B0E0D799183E35E24E2AD6BB58533CBA7E3B14D659E9CA09B2071398F", - } - for i, o := range dt { - rr, err := NewRR(i) - if err != nil { - t.Error("failed to parse RR: ", err) - continue - } - if rr.String() != o { - t.Errorf("`%s' should be equal to\n`%s', but is `%s'", i, o, rr.String()) - } else { - t.Logf("RR is OK: `%s'", rr.String()) - } - } -} - -func TestQuotes(t *testing.T) { - tests := map[string]string{ - `t.example.com. IN TXT "a bc"`: "t.example.com.\t3600\tIN\tTXT\t\"a bc\"", - `t.example.com. IN TXT "a - bc"`: "t.example.com.\t3600\tIN\tTXT\t\"a\\n bc\"", - `t.example.com. IN TXT ""`: "t.example.com.\t3600\tIN\tTXT\t\"\"", - `t.example.com. IN TXT "a"`: "t.example.com.\t3600\tIN\tTXT\t\"a\"", - `t.example.com. IN TXT "aa"`: "t.example.com.\t3600\tIN\tTXT\t\"aa\"", - `t.example.com. IN TXT "aaa" ;`: "t.example.com.\t3600\tIN\tTXT\t\"aaa\"", - `t.example.com. IN TXT "abc" "DEF"`: "t.example.com.\t3600\tIN\tTXT\t\"abc\" \"DEF\"", - `t.example.com. IN TXT "abc" ( "DEF" )`: "t.example.com.\t3600\tIN\tTXT\t\"abc\" \"DEF\"", - `t.example.com. IN TXT aaa ;`: "t.example.com.\t3600\tIN\tTXT\t\"aaa \"", - `t.example.com. IN TXT aaa aaa;`: "t.example.com.\t3600\tIN\tTXT\t\"aaa aaa\"", - `t.example.com. IN TXT aaa aaa`: "t.example.com.\t3600\tIN\tTXT\t\"aaa aaa\"", - `t.example.com. IN TXT aaa`: "t.example.com.\t3600\tIN\tTXT\t\"aaa\"", - "cid.urn.arpa. NAPTR 100 50 \"s\" \"z3950+I2L+I2C\" \"\" _z3950._tcp.gatech.edu.": "cid.urn.arpa.\t3600\tIN\tNAPTR\t100 50 \"s\" \"z3950+I2L+I2C\" \"\" _z3950._tcp.gatech.edu.", - "cid.urn.arpa. NAPTR 100 50 \"s\" \"rcds+I2C\" \"\" _rcds._udp.gatech.edu.": "cid.urn.arpa.\t3600\tIN\tNAPTR\t100 50 \"s\" \"rcds+I2C\" \"\" _rcds._udp.gatech.edu.", - "cid.urn.arpa. NAPTR 100 50 \"s\" \"http+I2L+I2C+I2R\" \"\" _http._tcp.gatech.edu.": "cid.urn.arpa.\t3600\tIN\tNAPTR\t100 50 \"s\" \"http+I2L+I2C+I2R\" \"\" _http._tcp.gatech.edu.", - "cid.urn.arpa. NAPTR 100 10 \"\" \"\" \"/urn:cid:.+@([^\\.]+\\.)(.*)$/\\2/i\" .": "cid.urn.arpa.\t3600\tIN\tNAPTR\t100 10 \"\" \"\" \"/urn:cid:.+@([^\\.]+\\.)(.*)$/\\2/i\" .", - } - for i, o := range tests { - rr, err := NewRR(i) - if err != nil { - t.Error("failed to parse RR: ", err) - continue - } - if rr.String() != o { - t.Errorf("`%s' should be equal to\n`%s', but is\n`%s'", i, o, rr.String()) - } else { - t.Logf("RR is OK: `%s'", rr.String()) - } - } -} - -func TestParseClass(t *testing.T) { - tests := map[string]string{ - "t.example.com. IN A 127.0.0.1": "t.example.com. 3600 IN A 127.0.0.1", - "t.example.com. CS A 127.0.0.1": "t.example.com. 3600 CS A 127.0.0.1", - "t.example.com. CH A 127.0.0.1": "t.example.com. 3600 CH A 127.0.0.1", - // ClassANY can not occur in zone files - // "t.example.com. ANY A 127.0.0.1": "t.example.com. 3600 ANY A 127.0.0.1", - "t.example.com. NONE A 127.0.0.1": "t.example.com. 3600 NONE A 127.0.0.1", - } - for i, o := range tests { - rr, err := NewRR(i) - if err != nil { - t.Error("failed to parse RR: ", err) - continue - } - if rr.String() != o { - t.Errorf("`%s' should be equal to\n`%s', but is\n`%s'", i, o, rr.String()) - } else { - t.Logf("RR is OK: `%s'", rr.String()) - } - } -} - -func TestBrace(t *testing.T) { - tests := map[string]string{ - "(miek.nl.) 3600 IN A 127.0.1.1": "miek.nl.\t3600\tIN\tA\t127.0.1.1", - "miek.nl. (3600) IN MX (10) elektron.atoom.net.": "miek.nl.\t3600\tIN\tMX\t10 elektron.atoom.net.", - `miek.nl. IN ( - 3600 A 127.0.0.1)`: "miek.nl.\t3600\tIN\tA\t127.0.0.1", - "(miek.nl.) (A) (127.0.2.1)": "miek.nl.\t3600\tIN\tA\t127.0.2.1", - "miek.nl A 127.0.3.1": "miek.nl.\t3600\tIN\tA\t127.0.3.1", - "_ssh._tcp.local. 60 IN (PTR) stora._ssh._tcp.local.": "_ssh._tcp.local.\t60\tIN\tPTR\tstora._ssh._tcp.local.", - "miek.nl. NS ns.miek.nl": "miek.nl.\t3600\tIN\tNS\tns.miek.nl.", - `(miek.nl.) ( - (IN) - (AAAA) - (::1) )`: "miek.nl.\t3600\tIN\tAAAA\t::1", - `(miek.nl.) ( - (IN) - (AAAA) - (::1))`: "miek.nl.\t3600\tIN\tAAAA\t::1", - "miek.nl. IN AAAA ::2": "miek.nl.\t3600\tIN\tAAAA\t::2", - `((m)(i)ek.(n)l.) (SOA) (soa.) (soa.) ( - 2009032802 ; serial - 21600 ; refresh (6 hours) - 7(2)00 ; retry (2 hours) - 604()800 ; expire (1 week) - 3600 ; minimum (1 hour) - )`: "miek.nl.\t3600\tIN\tSOA\tsoa. soa. 2009032802 21600 7200 604800 3600", - "miek\\.nl. IN A 127.0.0.10": "miek\\.nl.\t3600\tIN\tA\t127.0.0.10", - "miek.nl. IN A 127.0.0.11": "miek.nl.\t3600\tIN\tA\t127.0.0.11", - "miek.nl. A 127.0.0.12": "miek.nl.\t3600\tIN\tA\t127.0.0.12", - `miek.nl. 86400 IN SOA elektron.atoom.net. miekg.atoom.net. ( - 2009032802 ; serial - 21600 ; refresh (6 hours) - 7200 ; retry (2 hours) - 604800 ; expire (1 week) - 3600 ; minimum (1 hour) - )`: "miek.nl.\t86400\tIN\tSOA\telektron.atoom.net. miekg.atoom.net. 2009032802 21600 7200 604800 3600", - } - for i, o := range tests { - rr, err := NewRR(i) - if err != nil { - t.Errorf("failed to parse RR: %v\n\t%s", err, i) - continue - } - if rr.String() != o { - t.Errorf("`%s' should be equal to\n`%s', but is `%s'", i, o, rr.String()) - } else { - t.Logf("RR is OK: `%s'", rr.String()) - } - } -} - -func TestParseFailure(t *testing.T) { - tests := []string{"miek.nl. IN A 327.0.0.1", - "miek.nl. IN AAAA ::x", - "miek.nl. IN MX a0 miek.nl.", - "miek.nl aap IN MX mx.miek.nl.", - "miek.nl 200 IN mxx 10 mx.miek.nl.", - "miek.nl. inn MX 10 mx.miek.nl.", - // "miek.nl. IN CNAME ", // actually valid nowadays, zero size rdata - "miek.nl. IN CNAME ..", - "miek.nl. PA MX 10 miek.nl.", - "miek.nl. ) IN MX 10 miek.nl.", - } - - for _, s := range tests { - _, err := NewRR(s) - if err == nil { - t.Errorf("should have triggered an error: \"%s\"", s) - } - } -} - -func TestZoneParsing(t *testing.T) { - // parse_test.db - db := ` -a.example.com. IN A 127.0.0.1 -8db7._openpgpkey.example.com. IN OPENPGPKEY mQCNAzIG -$ORIGIN a.example.com. -test IN A 127.0.0.1 - IN SSHFP 1 2 ( - BC6533CDC95A79078A39A56EA7635984ED655318ADA9 - B6159E30723665DA95BB ) -$ORIGIN b.example.com. -test IN CNAME test.a.example.com. -` - start := time.Now().UnixNano() - to := ParseZone(strings.NewReader(db), "", "parse_test.db") - var i int - for x := range to { - i++ - if x.Error != nil { - t.Error(x.Error) - continue - } - t.Log(x.RR) - } - delta := time.Now().UnixNano() - start - t.Logf("%d RRs parsed in %.2f s (%.2f RR/s)", i, float32(delta)/1e9, float32(i)/(float32(delta)/1e9)) -} - -func ExampleZone() { - zone := `$ORIGIN . -$TTL 3600 ; 1 hour -name IN SOA a6.nstld.com. hostmaster.nic.name. ( - 203362132 ; serial - 300 ; refresh (5 minutes) - 300 ; retry (5 minutes) - 1209600 ; expire (2 weeks) - 300 ; minimum (5 minutes) - ) -$TTL 10800 ; 3 hours -name. 10800 IN NS name. - IN NS g6.nstld.com. - 7200 NS h6.nstld.com. - 3600 IN NS j6.nstld.com. - IN 3600 NS k6.nstld.com. - NS l6.nstld.com. - NS a6.nstld.com. - NS c6.nstld.com. - NS d6.nstld.com. - NS f6.nstld.com. - NS m6.nstld.com. -( - NS m7.nstld.com. -) -$ORIGIN name. -0-0onlus NS ns7.ehiweb.it. - NS ns8.ehiweb.it. -0-g MX 10 mx01.nic - MX 10 mx02.nic - MX 10 mx03.nic - MX 10 mx04.nic -$ORIGIN 0-g.name -moutamassey NS ns01.yahoodomains.jp. - NS ns02.yahoodomains.jp. -` - to := ParseZone(strings.NewReader(zone), "", "testzone") - for x := range to { - fmt.Println(x.RR) - } - // Output: - // name. 3600 IN SOA a6.nstld.com. hostmaster.nic.name. 203362132 300 300 1209600 300 - // name. 10800 IN NS name. - // name. 10800 IN NS g6.nstld.com. - // name. 7200 IN NS h6.nstld.com. - // name. 3600 IN NS j6.nstld.com. - // name. 3600 IN NS k6.nstld.com. - // name. 10800 IN NS l6.nstld.com. - // name. 10800 IN NS a6.nstld.com. - // name. 10800 IN NS c6.nstld.com. - // name. 10800 IN NS d6.nstld.com. - // name. 10800 IN NS f6.nstld.com. - // name. 10800 IN NS m6.nstld.com. - // name. 10800 IN NS m7.nstld.com. - // 0-0onlus.name. 10800 IN NS ns7.ehiweb.it. - // 0-0onlus.name. 10800 IN NS ns8.ehiweb.it. - // 0-g.name. 10800 IN MX 10 mx01.nic.name. - // 0-g.name. 10800 IN MX 10 mx02.nic.name. - // 0-g.name. 10800 IN MX 10 mx03.nic.name. - // 0-g.name. 10800 IN MX 10 mx04.nic.name. - // moutamassey.0-g.name.name. 10800 IN NS ns01.yahoodomains.jp. - // moutamassey.0-g.name.name. 10800 IN NS ns02.yahoodomains.jp. -} - -func ExampleHIP() { - h := `www.example.com IN HIP ( 2 200100107B1A74DF365639CC39F1D578 - AwEAAbdxyhNuSutc5EMzxTs9LBPCIkOFH8cIvM4p -9+LrV4e19WzK00+CI6zBCQTdtWsuxKbWIy87UOoJTwkUs7lBu+Upr1gsNrut79ryra+bSRGQ -b1slImA8YVJyuIDsj7kwzG7jnERNqnWxZ48AWkskmdHaVDP4BcelrTI3rMXdXF5D - rvs.example.com. )` - if hip, err := NewRR(h); err == nil { - fmt.Println(hip.String()) - } - // Output: - // www.example.com. 3600 IN HIP 2 200100107B1A74DF365639CC39F1D578 AwEAAbdxyhNuSutc5EMzxTs9LBPCIkOFH8cIvM4p9+LrV4e19WzK00+CI6zBCQTdtWsuxKbWIy87UOoJTwkUs7lBu+Upr1gsNrut79ryra+bSRGQb1slImA8YVJyuIDsj7kwzG7jnERNqnWxZ48AWkskmdHaVDP4BcelrTI3rMXdXF5D rvs.example.com. -} - -func TestHIP(t *testing.T) { - h := `www.example.com. IN HIP ( 2 200100107B1A74DF365639CC39F1D578 - AwEAAbdxyhNuSutc5EMzxTs9LBPCIkOFH8cIvM4p -9+LrV4e19WzK00+CI6zBCQTdtWsuxKbWIy87UOoJTwkUs7lBu+Upr1gsNrut79ryra+bSRGQ -b1slImA8YVJyuIDsj7kwzG7jnERNqnWxZ48AWkskmdHaVDP4BcelrTI3rMXdXF5D - rvs1.example.com. - rvs2.example.com. )` - rr, err := NewRR(h) - if err != nil { - t.Fatalf("failed to parse RR: %v", err) - } - t.Logf("RR: %s", rr) - msg := new(Msg) - msg.Answer = []RR{rr, rr} - bytes, err := msg.Pack() - if err != nil { - t.Fatalf("failed to pack msg: %v", err) - } - if err := msg.Unpack(bytes); err != nil { - t.Fatalf("failed to unpack msg: %v", err) - } - if len(msg.Answer) != 2 { - t.Fatalf("2 answers expected: %v", msg) - } - for i, rr := range msg.Answer { - rr := rr.(*HIP) - t.Logf("RR: %s", rr) - if l := len(rr.RendezvousServers); l != 2 { - t.Fatalf("2 servers expected, only %d in record %d:\n%v", l, i, msg) - } - for j, s := range []string{"rvs1.example.com.", "rvs2.example.com."} { - if rr.RendezvousServers[j] != s { - t.Fatalf("expected server %d of record %d to be %s:\n%v", j, i, s, msg) - } - } - } -} - -func ExampleSOA() { - s := "example.com. 1000 SOA master.example.com. admin.example.com. 1 4294967294 4294967293 4294967295 100" - if soa, err := NewRR(s); err == nil { - fmt.Println(soa.String()) - } - // Output: - // example.com. 1000 IN SOA master.example.com. admin.example.com. 1 4294967294 4294967293 4294967295 100 -} - -func TestLineNumberError(t *testing.T) { - s := "example.com. 1000 SOA master.example.com. admin.example.com. monkey 4294967294 4294967293 4294967295 100" - if _, err := NewRR(s); err != nil { - if err.Error() != "dns: bad SOA zone parameter: \"monkey\" at line: 1:68" { - t.Error("not expecting this error: ", err) - } - } -} - -// Test with no known RR on the line -func TestLineNumberError2(t *testing.T) { - tests := map[string]string{ - "example.com. 1000 SO master.example.com. admin.example.com. 1 4294967294 4294967293 4294967295 100": "dns: expecting RR type or class, not this...: \"SO\" at line: 1:21", - "example.com 1000 IN TALINK a.example.com. b..example.com.": "dns: bad TALINK NextName: \"b..example.com.\" at line: 1:57", - "example.com 1000 IN TALINK ( a.example.com. b..example.com. )": "dns: bad TALINK NextName: \"b..example.com.\" at line: 1:60", - `example.com 1000 IN TALINK ( a.example.com. - bb..example.com. )`: "dns: bad TALINK NextName: \"bb..example.com.\" at line: 2:18", - // This is a bug, it should report an error on line 1, but the new is already processed. - `example.com 1000 IN TALINK ( a.example.com. b...example.com. - )`: "dns: bad TALINK NextName: \"b...example.com.\" at line: 2:1"} - - for in, errStr := range tests { - _, err := NewRR(in) - if err == nil { - t.Error("err is nil") - } else { - if err.Error() != errStr { - t.Errorf("%s: error should be %s is %v", in, errStr, err) - } - } - } -} - -// Test if the calculations are correct -func TestRfc1982(t *testing.T) { - // If the current time and the timestamp are more than 68 years apart - // it means the date has wrapped. 0 is 1970 - - // fall in the current 68 year span - strtests := []string{"20120525134203", "19700101000000", "20380119031408"} - for _, v := range strtests { - if x, _ := StringToTime(v); v != TimeToString(x) { - t.Errorf("1982 arithmetic string failure %s (%s:%d)", v, TimeToString(x), x) - } - } - - inttests := map[uint32]string{0: "19700101000000", - 1 << 31: "20380119031408", - 1<<32 - 1: "21060207062815", - } - for i, v := range inttests { - if TimeToString(i) != v { - t.Errorf("1982 arithmetic int failure %d:%s (%s)", i, v, TimeToString(i)) - } - } - - // Future tests, these dates get parsed to a date within the current 136 year span - future := map[string]string{"22680119031408": "20631123173144", - "19010101121212": "20370206184028", - "19210101121212": "20570206184028", - "19500101121212": "20860206184028", - "19700101000000": "19700101000000", - "19690101000000": "21050207062816", - "29210101121212": "21040522212236", - } - for from, to := range future { - x, _ := StringToTime(from) - y := TimeToString(x) - if y != to { - t.Errorf("1982 arithmetic future failure %s:%s (%s)", from, to, y) - } - } -} - -func TestEmpty(t *testing.T) { - for _ = range ParseZone(strings.NewReader(""), "", "") { - t.Errorf("should be empty") - } -} - -func TestLowercaseTokens(t *testing.T) { - var testrecords = []string{ - "example.org. 300 IN a 1.2.3.4", - "example.org. 300 in A 1.2.3.4", - "example.org. 300 in a 1.2.3.4", - "example.org. 300 a 1.2.3.4", - "example.org. 300 A 1.2.3.4", - "example.org. IN a 1.2.3.4", - "example.org. in A 1.2.3.4", - "example.org. in a 1.2.3.4", - "example.org. a 1.2.3.4", - "example.org. A 1.2.3.4", - "example.org. a 1.2.3.4", - "$ORIGIN example.org.\n a 1.2.3.4", - "$Origin example.org.\n a 1.2.3.4", - "$origin example.org.\n a 1.2.3.4", - "example.org. Class1 Type1 1.2.3.4", - } - for _, testrr := range testrecords { - _, err := NewRR(testrr) - if err != nil { - t.Errorf("failed to parse %#v, got %v", testrr, err) - } - } -} - -func ExampleGenerate() { - // From the manual: http://www.bind9.net/manual/bind/9.3.2/Bv9ARM.ch06.html#id2566761 - zone := "$GENERATE 1-2 0 NS SERVER$.EXAMPLE.\n$GENERATE 1-8 $ CNAME $.0" - to := ParseZone(strings.NewReader(zone), "0.0.192.IN-ADDR.ARPA.", "") - for x := range to { - if x.Error == nil { - fmt.Println(x.RR.String()) - } - } - // Output: - // 0.0.0.192.IN-ADDR.ARPA. 3600 IN NS SERVER1.EXAMPLE. - // 0.0.0.192.IN-ADDR.ARPA. 3600 IN NS SERVER2.EXAMPLE. - // 1.0.0.192.IN-ADDR.ARPA. 3600 IN CNAME 1.0.0.0.192.IN-ADDR.ARPA. - // 2.0.0.192.IN-ADDR.ARPA. 3600 IN CNAME 2.0.0.0.192.IN-ADDR.ARPA. - // 3.0.0.192.IN-ADDR.ARPA. 3600 IN CNAME 3.0.0.0.192.IN-ADDR.ARPA. - // 4.0.0.192.IN-ADDR.ARPA. 3600 IN CNAME 4.0.0.0.192.IN-ADDR.ARPA. - // 5.0.0.192.IN-ADDR.ARPA. 3600 IN CNAME 5.0.0.0.192.IN-ADDR.ARPA. - // 6.0.0.192.IN-ADDR.ARPA. 3600 IN CNAME 6.0.0.0.192.IN-ADDR.ARPA. - // 7.0.0.192.IN-ADDR.ARPA. 3600 IN CNAME 7.0.0.0.192.IN-ADDR.ARPA. - // 8.0.0.192.IN-ADDR.ARPA. 3600 IN CNAME 8.0.0.0.192.IN-ADDR.ARPA. -} - -func TestSRVPacking(t *testing.T) { - msg := Msg{} - - things := []string{"1.2.3.4:8484", - "45.45.45.45:8484", - "84.84.84.84:8484", - } - - for i, n := range things { - h, p, err := net.SplitHostPort(n) - if err != nil { - continue - } - port := 8484 - tmp, err := strconv.Atoi(p) - if err == nil { - port = tmp - } - - rr := &SRV{ - Hdr: RR_Header{Name: "somename.", - Rrtype: TypeSRV, - Class: ClassINET, - Ttl: 5}, - Priority: uint16(i), - Weight: 5, - Port: uint16(port), - Target: h + ".", - } - - msg.Answer = append(msg.Answer, rr) - } - - _, err := msg.Pack() - if err != nil { - t.Fatalf("couldn't pack %v: %v", msg, err) - } -} - -func TestParseBackslash(t *testing.T) { - if r, err := NewRR("nul\\000gap.test.globnix.net. 600 IN A 192.0.2.10"); err != nil { - t.Errorf("could not create RR with \\000 in it") - } else { - t.Logf("parsed %s", r.String()) - } - if r, err := NewRR(`nul\000gap.test.globnix.net. 600 IN TXT "Hello\123"`); err != nil { - t.Errorf("could not create RR with \\000 in it") - } else { - t.Logf("parsed %s", r.String()) - } - if r, err := NewRR(`m\ @\ iek.nl. IN 3600 A 127.0.0.1`); err != nil { - t.Errorf("could not create RR with \\ and \\@ in it") - } else { - t.Logf("parsed %s", r.String()) - } -} - -func TestILNP(t *testing.T) { - tests := []string{ - "host1.example.com.\t3600\tIN\tNID\t10 0014:4fff:ff20:ee64", - "host1.example.com.\t3600\tIN\tNID\t20 0015:5fff:ff21:ee65", - "host2.example.com.\t3600\tIN\tNID\t10 0016:6fff:ff22:ee66", - "host1.example.com.\t3600\tIN\tL32\t10 10.1.2.0", - "host1.example.com.\t3600\tIN\tL32\t20 10.1.4.0", - "host2.example.com.\t3600\tIN\tL32\t10 10.1.8.0", - "host1.example.com.\t3600\tIN\tL64\t10 2001:0DB8:1140:1000", - "host1.example.com.\t3600\tIN\tL64\t20 2001:0DB8:2140:2000", - "host2.example.com.\t3600\tIN\tL64\t10 2001:0DB8:4140:4000", - "host1.example.com.\t3600\tIN\tLP\t10 l64-subnet1.example.com.", - "host1.example.com.\t3600\tIN\tLP\t10 l64-subnet2.example.com.", - "host1.example.com.\t3600\tIN\tLP\t20 l32-subnet1.example.com.", - } - for _, t1 := range tests { - r, err := NewRR(t1) - if err != nil { - t.Fatalf("an error occurred: %v", err) - } else { - if t1 != r.String() { - t.Fatalf("strings should be equal %s %s", t1, r.String()) - } - } - } -} - -func TestNsapGposEidNimloc(t *testing.T) { - dt := map[string]string{ - "foo.bar.com. IN NSAP 21 47000580ffff000000321099991111222233334444": "foo.bar.com.\t3600\tIN\tNSAP\t0x47000580ffff000000321099991111222233334444", - "foo.bar.com. IN NSAP 0x47000580ffff000000321099991111222233334444": "foo.bar.com.\t3600\tIN\tNSAP\t0x47000580ffff000000321099991111222233334444", - "host.school.de IN NSAP 17 39276f3100111100002222333344449876": "host.school.de.\t3600\tIN\tNSAP\t0x39276f3100111100002222333344449876", - "444433332222111199990123000000ff. NSAP-PTR foo.bar.com.": "444433332222111199990123000000ff.\t3600\tIN\tNSAP-PTR\tfoo.bar.com.", - "lillee. IN GPOS -32.6882 116.8652 10.0": "lillee.\t3600\tIN\tGPOS\t-32.6882 116.8652 10.0", - "hinault. IN GPOS -22.6882 116.8652 250.0": "hinault.\t3600\tIN\tGPOS\t-22.6882 116.8652 250.0", - "VENERA. IN NIMLOC 75234159EAC457800920": "VENERA.\t3600\tIN\tNIMLOC\t75234159EAC457800920", - "VAXA. IN EID 3141592653589793": "VAXA.\t3600\tIN\tEID\t3141592653589793", - } - for i, o := range dt { - rr, err := NewRR(i) - if err != nil { - t.Error("failed to parse RR: ", err) - continue - } - if rr.String() != o { - t.Errorf("`%s' should be equal to\n`%s', but is `%s'", i, o, rr.String()) - } else { - t.Logf("RR is OK: `%s'", rr.String()) - } - } -} - -func TestPX(t *testing.T) { - dt := map[string]string{ - "*.net2.it. IN PX 10 net2.it. PRMD-net2.ADMD-p400.C-it.": "*.net2.it.\t3600\tIN\tPX\t10 net2.it. PRMD-net2.ADMD-p400.C-it.", - "ab.net2.it. IN PX 10 ab.net2.it. O-ab.PRMD-net2.ADMDb.C-it.": "ab.net2.it.\t3600\tIN\tPX\t10 ab.net2.it. O-ab.PRMD-net2.ADMDb.C-it.", - } - for i, o := range dt { - rr, err := NewRR(i) - if err != nil { - t.Error("failed to parse RR: ", err) - continue - } - if rr.String() != o { - t.Errorf("`%s' should be equal to\n`%s', but is `%s'", i, o, rr.String()) - } else { - t.Logf("RR is OK: `%s'", rr.String()) - } - } -} - -func TestComment(t *testing.T) { - // Comments we must see - comments := map[string]bool{"; this is comment 1": true, - "; this is comment 4": true, "; this is comment 6": true, - "; this is comment 7": true, "; this is comment 8": true} - zone := ` -foo. IN A 10.0.0.1 ; this is comment 1 -foo. IN A ( - 10.0.0.2 ; this is comment2 -) -; this is comment3 -foo. IN A 10.0.0.3 -foo. IN A ( 10.0.0.4 ); this is comment 4 - -foo. IN A 10.0.0.5 -; this is comment5 - -foo. IN A 10.0.0.6 - -foo. IN DNSKEY 256 3 5 AwEAAb+8l ; this is comment 6 -foo. IN NSEC miek.nl. TXT RRSIG NSEC; this is comment 7 -foo. IN TXT "THIS IS TEXT MAN"; this is comment 8 -` - for x := range ParseZone(strings.NewReader(zone), ".", "") { - if x.Error == nil { - if x.Comment != "" { - if _, ok := comments[x.Comment]; !ok { - t.Errorf("wrong comment %s", x.Comment) - } - } - } - } -} - -func TestEUIxx(t *testing.T) { - tests := map[string]string{ - "host.example. IN EUI48 00-00-5e-90-01-2a": "host.example.\t3600\tIN\tEUI48\t00-00-5e-90-01-2a", - "host.example. IN EUI64 00-00-5e-ef-00-00-00-2a": "host.example.\t3600\tIN\tEUI64\t00-00-5e-ef-00-00-00-2a", - } - for i, o := range tests { - r, err := NewRR(i) - if err != nil { - t.Errorf("failed to parse %s: %v", i, err) - } - if r.String() != o { - t.Errorf("want %s, got %s", o, r.String()) - } - } -} - -func TestUserRR(t *testing.T) { - tests := map[string]string{ - "host.example. IN UID 1234": "host.example.\t3600\tIN\tUID\t1234", - "host.example. IN GID 1234556": "host.example.\t3600\tIN\tGID\t1234556", - "host.example. IN UINFO \"Miek Gieben\"": "host.example.\t3600\tIN\tUINFO\t\"Miek Gieben\"", - } - for i, o := range tests { - r, err := NewRR(i) - if err != nil { - t.Errorf("failed to parse %s: %v", i, err) - } - if r.String() != o { - t.Errorf("want %s, got %s", o, r.String()) - } - } -} - -func TestTXT(t *testing.T) { - // Test single entry TXT record - rr, err := NewRR(`_raop._tcp.local. 60 IN TXT "single value"`) - if err != nil { - t.Error("failed to parse single value TXT record", err) - } else if rr, ok := rr.(*TXT); !ok { - t.Error("wrong type, record should be of type TXT") - } else { - if len(rr.Txt) != 1 { - t.Error("bad size of TXT value:", len(rr.Txt)) - } else if rr.Txt[0] != "single value" { - t.Error("bad single value") - } - if rr.String() != `_raop._tcp.local. 60 IN TXT "single value"` { - t.Error("bad representation of TXT record:", rr.String()) - } - if rr.len() != 28+1+12 { - t.Error("bad size of serialized record:", rr.len()) - } - } - - // Test multi entries TXT record - rr, err = NewRR(`_raop._tcp.local. 60 IN TXT "a=1" "b=2" "c=3" "d=4"`) - if err != nil { - t.Error("failed to parse multi-values TXT record", err) - } else if rr, ok := rr.(*TXT); !ok { - t.Error("wrong type, record should be of type TXT") - } else { - if len(rr.Txt) != 4 { - t.Error("bad size of TXT multi-value:", len(rr.Txt)) - } else if rr.Txt[0] != "a=1" || rr.Txt[1] != "b=2" || rr.Txt[2] != "c=3" || rr.Txt[3] != "d=4" { - t.Error("bad values in TXT records") - } - if rr.String() != `_raop._tcp.local. 60 IN TXT "a=1" "b=2" "c=3" "d=4"` { - t.Error("bad representation of TXT multi value record:", rr.String()) - } - if rr.len() != 28+1+3+1+3+1+3+1+3 { - t.Error("bad size of serialized multi value record:", rr.len()) - } - } - - // Test empty-string in TXT record - rr, err = NewRR(`_raop._tcp.local. 60 IN TXT ""`) - if err != nil { - t.Error("failed to parse empty-string TXT record", err) - } else if rr, ok := rr.(*TXT); !ok { - t.Error("wrong type, record should be of type TXT") - } else { - if len(rr.Txt) != 1 { - t.Error("bad size of TXT empty-string value:", len(rr.Txt)) - } else if rr.Txt[0] != "" { - t.Error("bad value for empty-string TXT record") - } - if rr.String() != `_raop._tcp.local. 60 IN TXT ""` { - t.Error("bad representation of empty-string TXT record:", rr.String()) - } - if rr.len() != 28+1 { - t.Error("bad size of serialized record:", rr.len()) - } - } - - // Test TXT record with chunk larger than 255 bytes, they should be split up, by the parser - s := "" - for i := 0; i < 255; i++ { - s += "a" - } - s += "b" - rr, err = NewRR(`test.local. 60 IN TXT "` + s + `"`) - if err != nil { - t.Error("failed to parse empty-string TXT record", err) - } - if rr.(*TXT).Txt[1] != "b" { - t.Errorf("Txt should have two chunk, last one my be 'b', but is %s", rr.(*TXT).Txt[1]) - } - t.Log(rr.String()) -} - -func TestTypeXXXX(t *testing.T) { - _, err := NewRR("example.com IN TYPE1234 \\# 4 aabbccdd") - if err != nil { - t.Errorf("failed to parse TYPE1234 RR: %v", err) - } - _, err = NewRR("example.com IN TYPE655341 \\# 8 aabbccddaabbccdd") - if err == nil { - t.Errorf("this should not work, for TYPE655341") - } - _, err = NewRR("example.com IN TYPE1 \\# 4 0a000001") - if err == nil { - t.Errorf("this should not work") - } -} - -func TestPTR(t *testing.T) { - _, err := NewRR("144.2.0.192.in-addr.arpa. 900 IN PTR ilouse03146p0\\(.example.com.") - if err != nil { - t.Error("failed to parse ", err) - } -} - -func TestDigit(t *testing.T) { - tests := map[string]byte{ - "miek\\000.nl. 100 IN TXT \"A\"": 0, - "miek\\001.nl. 100 IN TXT \"A\"": 1, - "miek\\254.nl. 100 IN TXT \"A\"": 254, - "miek\\255.nl. 100 IN TXT \"A\"": 255, - "miek\\256.nl. 100 IN TXT \"A\"": 0, - "miek\\257.nl. 100 IN TXT \"A\"": 1, - "miek\\004.nl. 100 IN TXT \"A\"": 4, - } - for s, i := range tests { - r, err := NewRR(s) - buf := make([]byte, 40) - if err != nil { - t.Fatalf("failed to parse %v", err) - } - PackRR(r, buf, 0, nil, false) - t.Log(buf) - if buf[5] != i { - t.Fatalf("5 pos must be %d, is %d", i, buf[5]) - } - r1, _, _ := UnpackRR(buf, 0) - if r1.Header().Ttl != 100 { - t.Fatalf("TTL should %d, is %d", 100, r1.Header().Ttl) - } - } -} - -func TestParseRRSIGTimestamp(t *testing.T) { - tests := map[string]bool{ - `miek.nl. IN RRSIG SOA 8 2 43200 20140210031301 20140111031301 12051 miek.nl. MVZUyrYwq0iZhMFDDnVXD2BvuNiUJjSYlJAgzyAE6CF875BMvvZa+Sb0 RlSCL7WODQSQHhCx/fegHhVVF+Iz8N8kOLrmXD1+jO3Bm6Prl5UhcsPx WTBsg/kmxbp8sR1kvH4oZJtVfakG3iDerrxNaf0sQwhZzyfJQAqpC7pcBoc=`: true, - `miek.nl. IN RRSIG SOA 8 2 43200 315565800 4102477800 12051 miek.nl. MVZUyrYwq0iZhMFDDnVXD2BvuNiUJjSYlJAgzyAE6CF875BMvvZa+Sb0 RlSCL7WODQSQHhCx/fegHhVVF+Iz8N8kOLrmXD1+jO3Bm6Prl5UhcsPx WTBsg/kmxbp8sR1kvH4oZJtVfakG3iDerrxNaf0sQwhZzyfJQAqpC7pcBoc=`: true, - } - for r := range tests { - _, err := NewRR(r) - if err != nil { - t.Error(err) - } - } -} - -func TestTxtEqual(t *testing.T) { - rr1 := new(TXT) - rr1.Hdr = RR_Header{Name: ".", Rrtype: TypeTXT, Class: ClassINET, Ttl: 0} - rr1.Txt = []string{"a\"a", "\"", "b"} - rr2, _ := NewRR(rr1.String()) - if rr1.String() != rr2.String() { - // This is not an error, but keep this test. - t.Errorf("these two TXT records should match:\n%s\n%s", rr1.String(), rr2.String()) - } - t.Logf("%s\n%s", rr1.String(), rr2.String()) -} - -func TestTxtLong(t *testing.T) { - rr1 := new(TXT) - rr1.Hdr = RR_Header{Name: ".", Rrtype: TypeTXT, Class: ClassINET, Ttl: 0} - // Make a long txt record, this breaks when sending the packet, - // but not earlier. - rr1.Txt = []string{"start-"} - for i := 0; i < 200; i++ { - rr1.Txt[0] += "start-" - } - str := rr1.String() - if len(str) < len(rr1.Txt[0]) { - t.Error("string conversion should work") - } -} - -// Basically, don't crash. -func TestMalformedPackets(t *testing.T) { - var packets = []string{ - "0021641c0000000100000000000078787878787878787878787303636f6d0000100001", - } - - // com = 63 6f 6d - for _, packet := range packets { - data, _ := hex.DecodeString(packet) - // for _, v := range data { - // t.Log(v) - // } - var msg Msg - msg.Unpack(data) - // println(msg.String()) - } -} - -type algorithm struct { - name uint8 - bits int -} - -func TestNewPrivateKey(t *testing.T) { - if testing.Short() { - t.Skip("skipping test in short mode.") - } - algorithms := []algorithm{ - algorithm{ECDSAP256SHA256, 256}, - algorithm{ECDSAP384SHA384, 384}, - algorithm{RSASHA1, 1024}, - algorithm{RSASHA256, 2048}, - algorithm{DSA, 1024}, - } - - for _, algo := range algorithms { - key := new(DNSKEY) - key.Hdr.Rrtype = TypeDNSKEY - key.Hdr.Name = "miek.nl." - key.Hdr.Class = ClassINET - key.Hdr.Ttl = 14400 - key.Flags = 256 - key.Protocol = 3 - key.Algorithm = algo.name - privkey, err := key.Generate(algo.bits) - if err != nil { - t.Fatal(err) - } - - newPrivKey, err := key.NewPrivateKey(key.PrivateKeyString(privkey)) - if err != nil { - t.Error(key.String()) - t.Error(key.PrivateKeyString(privkey)) - t.Fatal(err) - } - - switch newPrivKey := newPrivKey.(type) { - case *RSAPrivateKey: - (*rsa.PrivateKey)(newPrivKey).Precompute() - } - - if !reflect.DeepEqual(privkey, newPrivKey) { - t.Errorf("[%v] Private keys differ:\n%#v\n%#v", AlgorithmToString[algo.name], privkey, newPrivKey) - } - } -} - -// special input test -func TestNewRRSpecial(t *testing.T) { - var ( - rr RR - err error - expect string - ) - - rr, err = NewRR("; comment") - expect = "" - if err != nil { - t.Errorf("unexpected err: %v", err) - } - if rr != nil { - t.Errorf("unexpected result: [%s] != [%s]", rr, expect) - } - - rr, err = NewRR("") - expect = "" - if err != nil { - t.Errorf("unexpected err: %v", err) - } - if rr != nil { - t.Errorf("unexpected result: [%s] != [%s]", rr, expect) - } - - rr, err = NewRR("$ORIGIN foo.") - expect = "" - if err != nil { - t.Errorf("unexpected err: %v", err) - } - if rr != nil { - t.Errorf("unexpected result: [%s] != [%s]", rr, expect) - } - - rr, err = NewRR(" ") - expect = "" - if err != nil { - t.Errorf("unexpected err: %v", err) - } - if rr != nil { - t.Errorf("unexpected result: [%s] != [%s]", rr, expect) - } - - rr, err = NewRR("\n") - expect = "" - if err != nil { - t.Errorf("unexpected err: %v", err) - } - if rr != nil { - t.Errorf("unexpected result: [%s] != [%s]", rr, expect) - } - - rr, err = NewRR("foo. A 1.1.1.1\nbar. A 2.2.2.2") - expect = "foo.\t3600\tIN\tA\t1.1.1.1" - if err != nil { - t.Errorf("unexpected err: %v", err) - } - if rr == nil || rr.String() != expect { - t.Errorf("unexpected result: [%s] != [%s]", rr, expect) - } -} - -func TestPrintfVerbsRdata(t *testing.T) { - x, _ := NewRR("www.miek.nl. IN MX 20 mx.miek.nl.") - if Field(x, 1) != "20" { - t.Errorf("should be 20") - } - if Field(x, 2) != "mx.miek.nl." { - t.Errorf("should be mx.miek.nl.") - } - - x, _ = NewRR("www.miek.nl. IN A 127.0.0.1") - if Field(x, 1) != "127.0.0.1" { - t.Errorf("should be 127.0.0.1") - } - - x, _ = NewRR("www.miek.nl. IN AAAA ::1") - if Field(x, 1) != "::1" { - t.Errorf("should be ::1") - } - - x, _ = NewRR("www.miek.nl. IN NSEC a.miek.nl. A NS SOA MX AAAA") - if Field(x, 1) != "a.miek.nl." { - t.Errorf("should be a.miek.nl.") - } - if Field(x, 2) != "A NS SOA MX AAAA" { - t.Errorf("should be A NS SOA MX AAAA") - } - - x, _ = NewRR("www.miek.nl. IN TXT \"first\" \"second\"") - if Field(x, 1) != "first second" { - t.Errorf("should be first second") - } - if Field(x, 0) != "" { - t.Errorf("should be empty") - } -} - -func TestParseIPSECKEY(t *testing.T) { - tests := []string{ - "38.2.0.192.in-addr.arpa. 7200 IN IPSECKEY ( 10 1 2 192.0.2.38 AQNRU3mG7TVTO2BkR47usntb102uFJtugbo6BSGvgqt4AQ== )", - "38.2.0.192.in-addr.arpa.\t7200\tIN\tIPSECKEY\t10 1 2 192.0.2.38 AQNRU3mG7TVTO2BkR47usntb102uFJtugbo6BSGvgqt4AQ==", - - "38.2.0.192.in-addr.arpa. 7200 IN IPSECKEY ( 10 0 2 . AQNRU3mG7TVTO2BkR47usntb102uFJtugbo6BSGvgqt4AQ== )", - "38.2.0.192.in-addr.arpa.\t7200\tIN\tIPSECKEY\t10 0 2 . AQNRU3mG7TVTO2BkR47usntb102uFJtugbo6BSGvgqt4AQ==", - - "38.2.0.192.in-addr.arpa. 7200 IN IPSECKEY ( 10 1 2 192.0.2.3 AQNRU3mG7TVTO2BkR47usntb102uFJtugbo6BSGvgqt4AQ== )", - "38.2.0.192.in-addr.arpa.\t7200\tIN\tIPSECKEY\t10 1 2 192.0.2.3 AQNRU3mG7TVTO2BkR47usntb102uFJtugbo6BSGvgqt4AQ==", - - "38.1.0.192.in-addr.arpa. 7200 IN IPSECKEY ( 10 3 2 mygateway.example.com. AQNRU3mG7TVTO2BkR47usntb102uFJtugbo6BSGvgqt4AQ== )", - "38.1.0.192.in-addr.arpa.\t7200\tIN\tIPSECKEY\t10 3 2 mygateway.example.com. AQNRU3mG7TVTO2BkR47usntb102uFJtugbo6BSGvgqt4AQ==", - - "0.d.4.0.3.0.e.f.f.f.3.f.0.1.2.0 7200 IN IPSECKEY ( 10 2 2 2001:0DB8:0:8002::2000:1 AQNRU3mG7TVTO2BkR47usntb102uFJtugbo6BSGvgqt4AQ== )", - "0.d.4.0.3.0.e.f.f.f.3.f.0.1.2.0.\t7200\tIN\tIPSECKEY\t10 2 2 2001:db8:0:8002::2000:1 AQNRU3mG7TVTO2BkR47usntb102uFJtugbo6BSGvgqt4AQ==", - } - for i := 0; i < len(tests)-1; i++ { - t1 := tests[i] - e1 := tests[i+1] - r, err := NewRR(t1) - if err != nil { - t.Errorf("failed to parse IPSECKEY %v", err) - continue - } - if r.String() != e1 { - t.Errorf("these two IPSECKEY records should match:\n%s\n%s", r.String(), e1) - } - i++ - } -} - -func TestParseTokenOverflow(t *testing.T) { - _, err := NewRR("_443._tcp.example.org. IN TLSA 0 0 0 308205e8308204d0a00302010202100411de8f53b462f6a5a861b712ec6b59300d06092a864886f70d01010b05003070310b300906035504061302555331153013060355040a130c446967694365727420496e6331193017060355040b13107777772e64696769636572742e636f6d312f302d06035504031326446967694365727420534841322048696768204173737572616e636520536572766572204341301e170d3134313130363030303030305a170d3135313131333132303030305a3081a5310b3009060355040613025553311330110603550408130a43616c69666f726e6961311430120603550407130b4c6f7320416e67656c6573313c303a060355040a1333496e7465726e657420436f72706f726174696f6e20666f722041737369676e6564204e616d657320616e64204e756d6265727331133011060355040b130a546563686e6f6c6f6779311830160603550403130f7777772e6578616d706c652e6f726730820122300d06092a864886f70d01010105000382010f003082010a02820101009e663f52a3d18cb67cdfed547408a4e47e4036538988da2798da3b6655f7240d693ed1cb3fe6d6ad3a9e657ff6efa86b83b0cad24e5d31ff2bf70ec3b78b213f1b4bf61bdc669cbbc07d67154128ca92a9b3cbb4213a836fb823ddd4d7cc04918314d25f06086fa9970ba17e357cca9b458c27eb71760ab95e3f9bc898ae89050ae4d09ba2f7e4259d9ff1e072a6971b18355a8b9e53670c3d5dbdbd283f93a764e71b3a4140ca0746090c08510e2e21078d7d07844bf9c03865b531a0bf2ee766bc401f6451c5a1e6f6fb5d5c1d6a97a0abe91ae8b02e89241e07353909ccd5b41c46de207c06801e08f20713603827f2ae3e68cf15ef881d7e0608f70742e30203010001a382024630820242301f0603551d230418301680145168ff90af0207753cccd9656462a212b859723b301d0603551d0e04160414b000a7f422e9b1ce216117c4c46e7164c8e60c553081810603551d11047a3078820f7777772e6578616d706c652e6f7267820b6578616d706c652e636f6d820b6578616d706c652e656475820b6578616d706c652e6e6574820b6578616d706c652e6f7267820f7777772e6578616d706c652e636f6d820f7777772e6578616d706c652e656475820f7777772e6578616d706c652e6e6574300e0603551d0f0101ff0404030205a0301d0603551d250416301406082b0601050507030106082b0601050507030230750603551d1f046e306c3034a032a030862e687474703a2f2f63726c332e64696769636572742e636f6d2f736861322d68612d7365727665722d67332e63726c3034a032a030862e687474703a2f2f63726c342e64696769636572742e636f6d2f736861322d68612d7365727665722d67332e63726c30420603551d20043b3039303706096086480186fd6c0101302a302806082b06010505070201161c68747470733a2f2f7777772e64696769636572742e636f6d2f43505330818306082b0601050507010104773075302406082b060105050730018618687474703a2f2f6f6373702e64696769636572742e636f6d304d06082b060105050730028641687474703a2f2f636163657274732e64696769636572742e636f6d2f446967694365727453484132486967684173737572616e636553657276657243412e637274300c0603551d130101ff04023000300d06092a864886f70d01010b050003820101005eac2124dedb3978a86ff3608406acb542d3cb54cb83facd63aec88144d6a1bf15dbf1f215c4a73e241e582365cba9ea50dd306541653b3513af1a0756c1b2720e8d112b34fb67181efad9c4609bdc670fb025fa6e6d42188161b026cf3089a08369c2f3609fc84bcc3479140c1922ede430ca8dbac2b2a3cdacb305ba15dc7361c4c3a5e6daa99cb446cb221b28078a7a944efba70d96f31ac143d959bccd2fd50e30c325ea2624fb6b6dbe9344dbcf133bfbd5b4e892d635dbf31596451672c6b65ba5ac9b3cddea92b35dab1065cae3c8cb6bb450a62ea2f72ea7c6bdc7b65fa09b012392543734083c7687d243f8d0375304d99ccd2e148966a8637a6797") - if err == nil { - t.Fatalf("token overflow should return an error") - } - t.Logf("err: %s\n", err) -} - -func TestParseTLSA(t *testing.T) { - lt := []string{ - "_443._tcp.example.org.\t3600\tIN\tTLSA\t1 1 1 c22be239f483c08957bc106219cc2d3ac1a308dfbbdd0a365f17b9351234cf00", - "_443._tcp.example.org.\t3600\tIN\tTLSA\t2 1 2 4e85f45179e9cd6e0e68e2eb5be2e85ec9b92d91c609caf3ef0315213e3f92ece92c38397a607214de95c7fadc0ad0f1c604a469a0387959745032c0d51492f3", - "_443._tcp.example.org.\t3600\tIN\tTLSA\t3 0 2 69ec8d2277360b215d0cd956b0e2747108dff34b27d461a41c800629e38ee6c2d1230cc9e8e36711330adc6766e6ff7c5fbb37f106f248337c1a20ad682888d2", - } - for _, o := range lt { - rr, err := NewRR(o) - if err != nil { - t.Error("failed to parse RR: ", err) - continue - } - if rr.String() != o { - t.Errorf("`%s' should be equal to\n`%s', but is `%s'", o, o, rr.String()) - } else { - t.Logf("RR is OK: `%s'", rr.String()) - } - } -} - -func TestParseSSHFP(t *testing.T) { - lt := []string{ - "test.example.org.\t300\tSSHFP\t1 2 (\n" + - "\t\t\t\t\tBC6533CDC95A79078A39A56EA7635984ED655318ADA9\n" + - "\t\t\t\t\tB6159E30723665DA95BB )", - "test.example.org.\t300\tSSHFP\t1 2 ( BC6533CDC 95A79078A39A56EA7635984ED655318AD A9B6159E3072366 5DA95BB )", - } - result := "test.example.org.\t300\tIN\tSSHFP\t1 2 BC6533CDC95A79078A39A56EA7635984ED655318ADA9B6159E30723665DA95BB" - for _, o := range lt { - rr, err := NewRR(o) - if err != nil { - t.Error("failed to parse RR: ", err) - continue - } - if rr.String() != result { - t.Errorf("`%s' should be equal to\n\n`%s', but is \n`%s'", o, result, rr.String()) - } else { - t.Logf("RR is OK: `%s'", rr.String()) - } - } -} - -func TestParseHINFO(t *testing.T) { - dt := map[string]string{ - "example.net. HINFO A B": "example.net. 3600 IN HINFO \"A\" \"B\"", - "example.net. HINFO \"A\" \"B\"": "example.net. 3600 IN HINFO \"A\" \"B\"", - "example.net. HINFO A B C D E F": "example.net. 3600 IN HINFO \"A\" \"B C D E F\"", - "example.net. HINFO AB": "example.net. 3600 IN HINFO \"AB\" \"\"", - // "example.net. HINFO PC-Intel-700mhz \"Redhat Linux 7.1\"": "example.net. 3600 IN HINFO \"PC-Intel-700mhz\" \"Redhat Linux 7.1\"", - // This one is recommended in Pro Bind book http://www.zytrax.com/books/dns/ch8/hinfo.html - // but effectively, even Bind would replace it to correctly formed text when you AXFR - // TODO: remove this set of comments or figure support for quoted/unquoted combinations in endingToTxtSlice function - } - for i, o := range dt { - rr, err := NewRR(i) - if err != nil { - t.Error("failed to parse RR: ", err) - continue - } - if rr.String() != o { - t.Errorf("`%s' should be equal to\n`%s', but is `%s'", i, o, rr.String()) - } else { - t.Logf("RR is OK: `%s'", rr.String()) - } - } -} - -func TestParseCAA(t *testing.T) { - lt := map[string]string{ - "example.net. CAA 0 issue \"symantec.com\"": "example.net.\t3600\tIN\tCAA\t0 issue \"symantec.com\"", - "example.net. CAA 0 issuewild \"symantec.com; stuff\"": "example.net.\t3600\tIN\tCAA\t0 issuewild \"symantec.com; stuff\"", - "example.net. CAA 128 tbs \"critical\"": "example.net.\t3600\tIN\tCAA\t128 tbs \"critical\"", - "example.net. CAA 2 auth \"0>09\\006\\010+\\006\\001\\004\\001\\214y\\002\\003\\001\\006\\009`\\134H\\001e\\003\\004\\002\\001\\004 y\\209\\012\\221r\\220\\156Q\\218\\150\\150{\\166\\245:\\231\\182%\\157:\\133\\179}\\1923r\\238\\151\\255\\128q\\145\\002\\001\\000\"": "example.net.\t3600\tIN\tCAA\t2 auth \"0>09\\006\\010+\\006\\001\\004\\001\\214y\\002\\003\\001\\006\\009`\\134H\\001e\\003\\004\\002\\001\\004 y\\209\\012\\221r\\220\\156Q\\218\\150\\150{\\166\\245:\\231\\182%\\157:\\133\\179}\\1923r\\238\\151\\255\\128q\\145\\002\\001\\000\"", - "example.net. TYPE257 0 issue \"symantec.com\"": "example.net.\t3600\tIN\tCAA\t0 issue \"symantec.com\"", - } - for i, o := range lt { - rr, err := NewRR(i) - if err != nil { - t.Error("failed to parse RR: ", err) - continue - } - if rr.String() != o { - t.Errorf("`%s' should be equal to\n`%s', but is `%s'", i, o, rr.String()) - } else { - t.Logf("RR is OK: `%s'", rr.String()) - } - } -} - -func TestPackCAA(t *testing.T) { - m := new(Msg) - record := new(CAA) - record.Hdr = RR_Header{Name: "example.com.", Rrtype: TypeCAA, Class: ClassINET, Ttl: 0} - record.Tag = "issue" - record.Value = "symantec.com" - record.Flag = 1 - - m.Answer = append(m.Answer, record) - bytes, err := m.Pack() - if err != nil { - t.Fatalf("failed to pack msg: %v", err) - } - if err := m.Unpack(bytes); err != nil { - t.Fatalf("failed to unpack msg: %v", err) - } - if len(m.Answer) != 1 { - t.Fatalf("incorrect number of answers unpacked") - } - rr := m.Answer[0].(*CAA) - if rr.Tag != "issue" { - t.Fatalf("invalid tag for unpacked answer") - } else if rr.Value != "symantec.com" { - t.Fatalf("invalid value for unpacked answer") - } else if rr.Flag != 1 { - t.Fatalf("invalid flag for unpacked answer") - } -} diff --git a/Godeps/_workspace/src/github.com/miekg/dns/privaterr.go b/Godeps/_workspace/src/github.com/miekg/dns/privaterr.go deleted file mode 100644 index a3baa06559..0000000000 --- a/Godeps/_workspace/src/github.com/miekg/dns/privaterr.go +++ /dev/null @@ -1,115 +0,0 @@ -package dns - -import ( - "fmt" - "strings" -) - -// PrivateRdata is an interface used for implementing "Private Use" RR types, see -// RFC 6895. This allows one to experiment with new RR types, without requesting an -// official type code. Also see dns.PrivateHandle and dns.PrivateHandleRemove. -type PrivateRdata interface { - // String returns the text presentaton of the Rdata of the Private RR. - String() string - // Parse parses the Rdata of the private RR. - Parse([]string) error - // Pack is used when packing a private RR into a buffer. - Pack([]byte) (int, error) - // Unpack is used when unpacking a private RR from a buffer. - // TODO(miek): diff. signature than Pack, see edns0.go for instance. - Unpack([]byte) (int, error) - // Copy copies the Rdata. - Copy(PrivateRdata) error - // Len returns the length in octets of the Rdata. - Len() int -} - -// PrivateRR represents an RR that uses a PrivateRdata user-defined type. -// It mocks normal RRs and implements dns.RR interface. -type PrivateRR struct { - Hdr RR_Header - Data PrivateRdata -} - -func mkPrivateRR(rrtype uint16) *PrivateRR { - // Panics if RR is not an instance of PrivateRR. - rrfunc, ok := typeToRR[rrtype] - if !ok { - panic(fmt.Sprintf("dns: invalid operation with Private RR type %d", rrtype)) - } - - anyrr := rrfunc() - switch rr := anyrr.(type) { - case *PrivateRR: - return rr - } - panic(fmt.Sprintf("dns: RR is not a PrivateRR, typeToRR[%d] generator returned %T", rrtype, anyrr)) -} - -func (r *PrivateRR) Header() *RR_Header { return &r.Hdr } -func (r *PrivateRR) String() string { return r.Hdr.String() + r.Data.String() } - -// Private len and copy parts to satisfy RR interface. -func (r *PrivateRR) len() int { return r.Hdr.len() + r.Data.Len() } -func (r *PrivateRR) copy() RR { - // make new RR like this: - rr := mkPrivateRR(r.Hdr.Rrtype) - newh := r.Hdr.copyHeader() - rr.Hdr = *newh - - err := r.Data.Copy(rr.Data) - if err != nil { - panic("dns: got value that could not be used to copy Private rdata") - } - return rr -} - -// PrivateHandle registers a private resource record type. It requires -// string and numeric representation of private RR type and generator function as argument. -func PrivateHandle(rtypestr string, rtype uint16, generator func() PrivateRdata) { - rtypestr = strings.ToUpper(rtypestr) - - typeToRR[rtype] = func() RR { return &PrivateRR{RR_Header{}, generator()} } - TypeToString[rtype] = rtypestr - StringToType[rtypestr] = rtype - - setPrivateRR := func(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - rr := mkPrivateRR(h.Rrtype) - rr.Hdr = h - - var l lex - text := make([]string, 0, 2) // could be 0..N elements, median is probably 1 - FETCH: - for { - // TODO(miek): we could also be returning _QUOTE, this might or might not - // be an issue (basically parsing TXT becomes hard) - switch l = <-c; l.value { - case zNewline, zEOF: - break FETCH - case zString: - text = append(text, l.token) - } - } - - err := rr.Data.Parse(text) - if err != nil { - return nil, &ParseError{f, err.Error(), l}, "" - } - - return rr, nil, "" - } - - typeToparserFunc[rtype] = parserFunc{setPrivateRR, true} -} - -// PrivateHandleRemove removes defenitions required to support private RR type. -func PrivateHandleRemove(rtype uint16) { - rtypestr, ok := TypeToString[rtype] - if ok { - delete(typeToRR, rtype) - delete(TypeToString, rtype) - delete(typeToparserFunc, rtype) - delete(StringToType, rtypestr) - } - return -} diff --git a/Godeps/_workspace/src/github.com/miekg/dns/privaterr_test.go b/Godeps/_workspace/src/github.com/miekg/dns/privaterr_test.go deleted file mode 100644 index d45084c689..0000000000 --- a/Godeps/_workspace/src/github.com/miekg/dns/privaterr_test.go +++ /dev/null @@ -1,170 +0,0 @@ -package dns_test - -import ( - "strings" - "testing" - - "github.com/miekg/dns" -) - -const TypeISBN uint16 = 0x0F01 - -// A crazy new RR type :) -type ISBN struct { - x string // rdata with 10 or 13 numbers, dashes or spaces allowed -} - -func NewISBN() dns.PrivateRdata { return &ISBN{""} } - -func (rd *ISBN) Len() int { return len([]byte(rd.x)) } -func (rd *ISBN) String() string { return rd.x } - -func (rd *ISBN) Parse(txt []string) error { - rd.x = strings.TrimSpace(strings.Join(txt, " ")) - return nil -} - -func (rd *ISBN) Pack(buf []byte) (int, error) { - b := []byte(rd.x) - n := copy(buf, b) - if n != len(b) { - return n, dns.ErrBuf - } - return n, nil -} - -func (rd *ISBN) Unpack(buf []byte) (int, error) { - rd.x = string(buf) - return len(buf), nil -} - -func (rd *ISBN) Copy(dest dns.PrivateRdata) error { - isbn, ok := dest.(*ISBN) - if !ok { - return dns.ErrRdata - } - isbn.x = rd.x - return nil -} - -var testrecord = strings.Join([]string{"example.org.", "3600", "IN", "ISBN", "12-3 456789-0-123"}, "\t") - -func TestPrivateText(t *testing.T) { - dns.PrivateHandle("ISBN", TypeISBN, NewISBN) - defer dns.PrivateHandleRemove(TypeISBN) - - rr, err := dns.NewRR(testrecord) - if err != nil { - t.Fatal(err) - } - if rr.String() != testrecord { - t.Errorf("record string representation did not match original %#v != %#v", rr.String(), testrecord) - } else { - t.Log(rr.String()) - } -} - -func TestPrivateByteSlice(t *testing.T) { - dns.PrivateHandle("ISBN", TypeISBN, NewISBN) - defer dns.PrivateHandleRemove(TypeISBN) - - rr, err := dns.NewRR(testrecord) - if err != nil { - t.Fatal(err) - } - - buf := make([]byte, 100) - off, err := dns.PackRR(rr, buf, 0, nil, false) - if err != nil { - t.Errorf("got error packing ISBN: %v", err) - } - - custrr := rr.(*dns.PrivateRR) - if ln := custrr.Data.Len() + len(custrr.Header().Name) + 11; ln != off { - t.Errorf("offset is not matching to length of Private RR: %d!=%d", off, ln) - } - - rr1, off1, err := dns.UnpackRR(buf[:off], 0) - if err != nil { - t.Errorf("got error unpacking ISBN: %v", err) - } - - if off1 != off { - t.Errorf("Offset after unpacking differs: %d != %d", off1, off) - } - - if rr1.String() != testrecord { - t.Errorf("Record string representation did not match original %#v != %#v", rr1.String(), testrecord) - } else { - t.Log(rr1.String()) - } -} - -const TypeVERSION uint16 = 0x0F02 - -type VERSION struct { - x string -} - -func NewVersion() dns.PrivateRdata { return &VERSION{""} } - -func (rd *VERSION) String() string { return rd.x } -func (rd *VERSION) Parse(txt []string) error { - rd.x = strings.TrimSpace(strings.Join(txt, " ")) - return nil -} - -func (rd *VERSION) Pack(buf []byte) (int, error) { - b := []byte(rd.x) - n := copy(buf, b) - if n != len(b) { - return n, dns.ErrBuf - } - return n, nil -} - -func (rd *VERSION) Unpack(buf []byte) (int, error) { - rd.x = string(buf) - return len(buf), nil -} - -func (rd *VERSION) Copy(dest dns.PrivateRdata) error { - isbn, ok := dest.(*VERSION) - if !ok { - return dns.ErrRdata - } - isbn.x = rd.x - return nil -} - -func (rd *VERSION) Len() int { - return len([]byte(rd.x)) -} - -var smallzone = `$ORIGIN example.org. -@ SOA sns.dns.icann.org. noc.dns.icann.org. ( - 2014091518 7200 3600 1209600 3600 -) - A 1.2.3.4 -ok ISBN 1231-92110-12 -go VERSION ( - 1.3.1 ; comment -) -www ISBN 1231-92110-16 -* CNAME @ -` - -func TestPrivateZoneParser(t *testing.T) { - dns.PrivateHandle("ISBN", TypeISBN, NewISBN) - dns.PrivateHandle("VERSION", TypeVERSION, NewVersion) - defer dns.PrivateHandleRemove(TypeISBN) - defer dns.PrivateHandleRemove(TypeVERSION) - - r := strings.NewReader(smallzone) - for x := range dns.ParseZone(r, ".", "") { - if err := x.Error; err != nil { - t.Fatal(err) - } - t.Log(x.RR) - } -} diff --git a/Godeps/_workspace/src/github.com/miekg/dns/rawmsg.go b/Godeps/_workspace/src/github.com/miekg/dns/rawmsg.go deleted file mode 100644 index f138b7761d..0000000000 --- a/Godeps/_workspace/src/github.com/miekg/dns/rawmsg.go +++ /dev/null @@ -1,95 +0,0 @@ -package dns - -// These raw* functions do not use reflection, they directly set the values -// in the buffer. There are faster than their reflection counterparts. - -// RawSetId sets the message id in buf. -func rawSetId(msg []byte, i uint16) bool { - if len(msg) < 2 { - return false - } - msg[0], msg[1] = packUint16(i) - return true -} - -// rawSetQuestionLen sets the length of the question section. -func rawSetQuestionLen(msg []byte, i uint16) bool { - if len(msg) < 6 { - return false - } - msg[4], msg[5] = packUint16(i) - return true -} - -// rawSetAnswerLen sets the lenght of the answer section. -func rawSetAnswerLen(msg []byte, i uint16) bool { - if len(msg) < 8 { - return false - } - msg[6], msg[7] = packUint16(i) - return true -} - -// rawSetsNsLen sets the lenght of the authority section. -func rawSetNsLen(msg []byte, i uint16) bool { - if len(msg) < 10 { - return false - } - msg[8], msg[9] = packUint16(i) - return true -} - -// rawSetExtraLen sets the lenght of the additional section. -func rawSetExtraLen(msg []byte, i uint16) bool { - if len(msg) < 12 { - return false - } - msg[10], msg[11] = packUint16(i) - return true -} - -// rawSetRdlength sets the rdlength in the header of -// the RR. The offset 'off' must be positioned at the -// start of the header of the RR, 'end' must be the -// end of the RR. -func rawSetRdlength(msg []byte, off, end int) bool { - l := len(msg) -Loop: - for { - if off+1 > l { - return false - } - c := int(msg[off]) - off++ - switch c & 0xC0 { - case 0x00: - if c == 0x00 { - // End of the domainname - break Loop - } - if off+c > l { - return false - } - off += c - - case 0xC0: - // pointer, next byte included, ends domainname - off++ - break Loop - } - } - // The domainname has been seen, we at the start of the fixed part in the header. - // Type is 2 bytes, class is 2 bytes, ttl 4 and then 2 bytes for the length. - off += 2 + 2 + 4 - if off+2 > l { - return false - } - //off+1 is the end of the header, 'end' is the end of the rr - //so 'end' - 'off+2' is the length of the rdata - rdatalen := end - (off + 2) - if rdatalen > 0xFFFF { - return false - } - msg[off], msg[off+1] = packUint16(uint16(rdatalen)) - return true -} diff --git a/Godeps/_workspace/src/github.com/miekg/dns/scanner.go b/Godeps/_workspace/src/github.com/miekg/dns/scanner.go deleted file mode 100644 index c29bc2f388..0000000000 --- a/Godeps/_workspace/src/github.com/miekg/dns/scanner.go +++ /dev/null @@ -1,43 +0,0 @@ -package dns - -// Implement a simple scanner, return a byte stream from an io reader. - -import ( - "bufio" - "io" - "text/scanner" -) - -type scan struct { - src *bufio.Reader - position scanner.Position - eof bool // Have we just seen a eof -} - -func scanInit(r io.Reader) *scan { - s := new(scan) - s.src = bufio.NewReader(r) - s.position.Line = 1 - return s -} - -// tokenText returns the next byte from the input -func (s *scan) tokenText() (byte, error) { - c, err := s.src.ReadByte() - if err != nil { - return c, err - } - // delay the newline handling until the next token is delivered, - // fixes off-by-one errors when reporting a parse error. - if s.eof == true { - s.position.Line++ - s.position.Column = 0 - s.eof = false - } - if c == '\n' { - s.eof = true - return c, nil - } - s.position.Column++ - return c, nil -} diff --git a/Godeps/_workspace/src/github.com/miekg/dns/server.go b/Godeps/_workspace/src/github.com/miekg/dns/server.go deleted file mode 100644 index 5e4ec92ba9..0000000000 --- a/Godeps/_workspace/src/github.com/miekg/dns/server.go +++ /dev/null @@ -1,630 +0,0 @@ -// DNS server implementation. - -package dns - -import ( - "bytes" - "io" - "net" - "sync" - "time" -) - -// Handler is implemented by any value that implements ServeDNS. -type Handler interface { - ServeDNS(w ResponseWriter, r *Msg) -} - -// A ResponseWriter interface is used by an DNS handler to -// construct an DNS response. -type ResponseWriter interface { - // LocalAddr returns the net.Addr of the server - LocalAddr() net.Addr - // RemoteAddr returns the net.Addr of the client that sent the current request. - RemoteAddr() net.Addr - // WriteMsg writes a reply back to the client. - WriteMsg(*Msg) error - // Write writes a raw buffer back to the client. - Write([]byte) (int, error) - // Close closes the connection. - Close() error - // TsigStatus returns the status of the Tsig. - TsigStatus() error - // TsigTimersOnly sets the tsig timers only boolean. - TsigTimersOnly(bool) - // Hijack lets the caller take over the connection. - // After a call to Hijack(), the DNS package will not do anything with the connection. - Hijack() -} - -type response struct { - hijacked bool // connection has been hijacked by handler - tsigStatus error - tsigTimersOnly bool - tsigRequestMAC string - tsigSecret map[string]string // the tsig secrets - udp *net.UDPConn // i/o connection if UDP was used - tcp *net.TCPConn // i/o connection if TCP was used - udpSession *SessionUDP // oob data to get egress interface right - remoteAddr net.Addr // address of the client -} - -// ServeMux is an DNS request multiplexer. It matches the -// zone name of each incoming request against a list of -// registered patterns add calls the handler for the pattern -// that most closely matches the zone name. ServeMux is DNSSEC aware, meaning -// that queries for the DS record are redirected to the parent zone (if that -// is also registered), otherwise the child gets the query. -// ServeMux is also safe for concurrent access from multiple goroutines. -type ServeMux struct { - z map[string]Handler - m *sync.RWMutex -} - -// NewServeMux allocates and returns a new ServeMux. -func NewServeMux() *ServeMux { return &ServeMux{z: make(map[string]Handler), m: new(sync.RWMutex)} } - -// DefaultServeMux is the default ServeMux used by Serve. -var DefaultServeMux = NewServeMux() - -// The HandlerFunc type is an adapter to allow the use of -// ordinary functions as DNS handlers. If f is a function -// with the appropriate signature, HandlerFunc(f) is a -// Handler object that calls f. -type HandlerFunc func(ResponseWriter, *Msg) - -// ServeDNS calls f(w, r). -func (f HandlerFunc) ServeDNS(w ResponseWriter, r *Msg) { - f(w, r) -} - -// HandleFailed returns a HandlerFunc that returns SERVFAIL for every request it gets. -func HandleFailed(w ResponseWriter, r *Msg) { - m := new(Msg) - m.SetRcode(r, RcodeServerFailure) - // does not matter if this write fails - w.WriteMsg(m) -} - -func failedHandler() Handler { return HandlerFunc(HandleFailed) } - -// ListenAndServe Starts a server on addresss and network speficied. Invoke handler -// for incoming queries. -func ListenAndServe(addr string, network string, handler Handler) error { - server := &Server{Addr: addr, Net: network, Handler: handler} - return server.ListenAndServe() -} - -// ActivateAndServe activates a server with a listener from systemd, -// l and p should not both be non-nil. -// If both l and p are not nil only p will be used. -// Invoke handler for incoming queries. -func ActivateAndServe(l net.Listener, p net.PacketConn, handler Handler) error { - server := &Server{Listener: l, PacketConn: p, Handler: handler} - return server.ActivateAndServe() -} - -func (mux *ServeMux) match(q string, t uint16) Handler { - mux.m.RLock() - defer mux.m.RUnlock() - var handler Handler - b := make([]byte, len(q)) // worst case, one label of length q - off := 0 - end := false - for { - l := len(q[off:]) - for i := 0; i < l; i++ { - b[i] = q[off+i] - if b[i] >= 'A' && b[i] <= 'Z' { - b[i] |= ('a' - 'A') - } - } - if h, ok := mux.z[string(b[:l])]; ok { // 'causes garbage, might want to change the map key - if t != TypeDS { - return h - } - // Continue for DS to see if we have a parent too, if so delegeate to the parent - handler = h - } - off, end = NextLabel(q, off) - if end { - break - } - } - // Wildcard match, if we have found nothing try the root zone as a last resort. - if h, ok := mux.z["."]; ok { - return h - } - return handler -} - -// Handle adds a handler to the ServeMux for pattern. -func (mux *ServeMux) Handle(pattern string, handler Handler) { - if pattern == "" { - panic("dns: invalid pattern " + pattern) - } - mux.m.Lock() - mux.z[Fqdn(pattern)] = handler - mux.m.Unlock() -} - -// HandleFunc adds a handler function to the ServeMux for pattern. -func (mux *ServeMux) HandleFunc(pattern string, handler func(ResponseWriter, *Msg)) { - mux.Handle(pattern, HandlerFunc(handler)) -} - -// HandleRemove deregistrars the handler specific for pattern from the ServeMux. -func (mux *ServeMux) HandleRemove(pattern string) { - if pattern == "" { - panic("dns: invalid pattern " + pattern) - } - // don't need a mutex here, because deleting is OK, even if the - // entry is note there. - delete(mux.z, Fqdn(pattern)) -} - -// ServeDNS dispatches the request to the handler whose -// pattern most closely matches the request message. If DefaultServeMux -// is used the correct thing for DS queries is done: a possible parent -// is sought. -// If no handler is found a standard SERVFAIL message is returned -// If the request message does not have exactly one question in the -// question section a SERVFAIL is returned, unlesss Unsafe is true. -func (mux *ServeMux) ServeDNS(w ResponseWriter, request *Msg) { - var h Handler - if len(request.Question) < 1 { // allow more than one question - h = failedHandler() - } else { - if h = mux.match(request.Question[0].Name, request.Question[0].Qtype); h == nil { - h = failedHandler() - } - } - h.ServeDNS(w, request) -} - -// Handle registers the handler with the given pattern -// in the DefaultServeMux. The documentation for -// ServeMux explains how patterns are matched. -func Handle(pattern string, handler Handler) { DefaultServeMux.Handle(pattern, handler) } - -// HandleRemove deregisters the handle with the given pattern -// in the DefaultServeMux. -func HandleRemove(pattern string) { DefaultServeMux.HandleRemove(pattern) } - -// HandleFunc registers the handler function with the given pattern -// in the DefaultServeMux. -func HandleFunc(pattern string, handler func(ResponseWriter, *Msg)) { - DefaultServeMux.HandleFunc(pattern, handler) -} - -// A Server defines parameters for running an DNS server. -type Server struct { - // Address to listen on, ":dns" if empty. - Addr string - // if "tcp" it will invoke a TCP listener, otherwise an UDP one. - Net string - // TCP Listener to use, this is to aid in systemd's socket activation. - Listener net.Listener - // UDP "Listener" to use, this is to aid in systemd's socket activation. - PacketConn net.PacketConn - // Handler to invoke, dns.DefaultServeMux if nil. - Handler Handler - // Default buffer size to use to read incoming UDP messages. If not set - // it defaults to MinMsgSize (512 B). - UDPSize int - // The net.Conn.SetReadTimeout value for new connections, defaults to 2 * time.Second. - ReadTimeout time.Duration - // The net.Conn.SetWriteTimeout value for new connections, defaults to 2 * time.Second. - WriteTimeout time.Duration - // TCP idle timeout for multiple queries, if nil, defaults to 8 * time.Second (RFC 5966). - IdleTimeout func() time.Duration - // Secret(s) for Tsig map[]. - TsigSecret map[string]string - // Unsafe instructs the server to disregard any sanity checks and directly hand the message to - // the handler. It will specfically not check if the query has the QR bit not set. - Unsafe bool - // If NotifyStartedFunc is set is is called, once the server has started listening. - NotifyStartedFunc func() - - // For graceful shutdown. - stopUDP chan bool - stopTCP chan bool - wgUDP sync.WaitGroup - wgTCP sync.WaitGroup - - // make start/shutdown not racy - lock sync.Mutex - started bool -} - -// ListenAndServe starts a nameserver on the configured address in *Server. -func (srv *Server) ListenAndServe() error { - srv.lock.Lock() - if srv.started { - srv.lock.Unlock() - return &Error{err: "server already started"} - } - srv.stopUDP, srv.stopTCP = make(chan bool), make(chan bool) - srv.started = true - srv.lock.Unlock() - addr := srv.Addr - if addr == "" { - addr = ":domain" - } - if srv.UDPSize == 0 { - srv.UDPSize = MinMsgSize - } - switch srv.Net { - case "tcp", "tcp4", "tcp6": - a, e := net.ResolveTCPAddr(srv.Net, addr) - if e != nil { - return e - } - l, e := net.ListenTCP(srv.Net, a) - if e != nil { - return e - } - srv.Listener = l - return srv.serveTCP(l) - case "udp", "udp4", "udp6": - a, e := net.ResolveUDPAddr(srv.Net, addr) - if e != nil { - return e - } - l, e := net.ListenUDP(srv.Net, a) - if e != nil { - return e - } - if e := setUDPSocketOptions(l); e != nil { - return e - } - srv.PacketConn = l - return srv.serveUDP(l) - } - return &Error{err: "bad network"} -} - -// ActivateAndServe starts a nameserver with the PacketConn or Listener -// configured in *Server. Its main use is to start a server from systemd. -func (srv *Server) ActivateAndServe() error { - srv.lock.Lock() - if srv.started { - srv.lock.Unlock() - return &Error{err: "server already started"} - } - srv.stopUDP, srv.stopTCP = make(chan bool), make(chan bool) - srv.started = true - srv.lock.Unlock() - if srv.PacketConn != nil { - if srv.UDPSize == 0 { - srv.UDPSize = MinMsgSize - } - if t, ok := srv.PacketConn.(*net.UDPConn); ok { - if e := setUDPSocketOptions(t); e != nil { - return e - } - return srv.serveUDP(t) - } - } - if srv.Listener != nil { - if t, ok := srv.Listener.(*net.TCPListener); ok { - return srv.serveTCP(t) - } - } - return &Error{err: "bad listeners"} -} - -// Shutdown gracefully shuts down a server. After a call to Shutdown, ListenAndServe and -// ActivateAndServe will return. All in progress queries are completed before the server -// is taken down. If the Shutdown is taking longer than the reading timeout and error -// is returned. -func (srv *Server) Shutdown() error { - srv.lock.Lock() - if !srv.started { - srv.lock.Unlock() - return &Error{err: "server not started"} - } - srv.started = false - srv.lock.Unlock() - net, addr := srv.Net, srv.Addr - switch { - case srv.Listener != nil: - a := srv.Listener.Addr() - net, addr = a.Network(), a.String() - case srv.PacketConn != nil: - a := srv.PacketConn.LocalAddr() - net, addr = a.Network(), a.String() - } - - fin := make(chan bool) - switch net { - case "tcp", "tcp4", "tcp6": - go func() { - srv.stopTCP <- true - srv.wgTCP.Wait() - fin <- true - }() - - case "udp", "udp4", "udp6": - go func() { - srv.stopUDP <- true - srv.wgUDP.Wait() - fin <- true - }() - } - - c := &Client{Net: net} - go c.Exchange(new(Msg), addr) // extra query to help ReadXXX loop to pass - - select { - case <-time.After(srv.getReadTimeout()): - return &Error{err: "server shutdown is pending"} - case <-fin: - return nil - } -} - -// getReadTimeout is a helper func to use system timeout if server did not intend to change it. -func (srv *Server) getReadTimeout() time.Duration { - rtimeout := dnsTimeout - if srv.ReadTimeout != 0 { - rtimeout = srv.ReadTimeout - } - return rtimeout -} - -// serveTCP starts a TCP listener for the server. -// Each request is handled in a separate goroutine. -func (srv *Server) serveTCP(l *net.TCPListener) error { - defer l.Close() - - if srv.NotifyStartedFunc != nil { - srv.NotifyStartedFunc() - } - - handler := srv.Handler - if handler == nil { - handler = DefaultServeMux - } - rtimeout := srv.getReadTimeout() - // deadline is not used here - for { - rw, e := l.AcceptTCP() - if e != nil { - continue - } - m, e := srv.readTCP(rw, rtimeout) - select { - case <-srv.stopTCP: - return nil - default: - } - if e != nil { - continue - } - srv.wgTCP.Add(1) - go srv.serve(rw.RemoteAddr(), handler, m, nil, nil, rw) - } - panic("dns: not reached") -} - -// serveUDP starts a UDP listener for the server. -// Each request is handled in a separate goroutine. -func (srv *Server) serveUDP(l *net.UDPConn) error { - defer l.Close() - - if srv.NotifyStartedFunc != nil { - srv.NotifyStartedFunc() - } - - handler := srv.Handler - if handler == nil { - handler = DefaultServeMux - } - rtimeout := srv.getReadTimeout() - // deadline is not used here - for { - m, s, e := srv.readUDP(l, rtimeout) - select { - case <-srv.stopUDP: - return nil - default: - } - if e != nil { - continue - } - srv.wgUDP.Add(1) - go srv.serve(s.RemoteAddr(), handler, m, l, s, nil) - } - panic("dns: not reached") -} - -// Serve a new connection. -func (srv *Server) serve(a net.Addr, h Handler, m []byte, u *net.UDPConn, s *SessionUDP, t *net.TCPConn) { - w := &response{tsigSecret: srv.TsigSecret, udp: u, tcp: t, remoteAddr: a, udpSession: s} - q := 0 - defer func() { - if u != nil { - srv.wgUDP.Done() - } - if t != nil { - srv.wgTCP.Done() - } - }() -Redo: - req := new(Msg) - err := req.Unpack(m) - if err != nil { // Send a FormatError back - x := new(Msg) - x.SetRcodeFormatError(req) - w.WriteMsg(x) - goto Exit - } - if !srv.Unsafe && req.Response { - goto Exit - } - - w.tsigStatus = nil - if w.tsigSecret != nil { - if t := req.IsTsig(); t != nil { - secret := t.Hdr.Name - if _, ok := w.tsigSecret[secret]; !ok { - w.tsigStatus = ErrKeyAlg - } - w.tsigStatus = TsigVerify(m, w.tsigSecret[secret], "", false) - w.tsigTimersOnly = false - w.tsigRequestMAC = req.Extra[len(req.Extra)-1].(*TSIG).MAC - } - } - h.ServeDNS(w, req) // Writes back to the client - -Exit: - if w.hijacked { - return // client calls Close() - } - if u != nil { // UDP, "close" and return - w.Close() - return - } - idleTimeout := tcpIdleTimeout - if srv.IdleTimeout != nil { - idleTimeout = srv.IdleTimeout() - } - m, e := srv.readTCP(w.tcp, idleTimeout) - if e == nil { - q++ - // TODO(miek): make this number configurable? - if q > 128 { // close socket after this many queries - w.Close() - return - } - goto Redo - } - w.Close() - return -} - -func (srv *Server) readTCP(conn *net.TCPConn, timeout time.Duration) ([]byte, error) { - conn.SetReadDeadline(time.Now().Add(timeout)) - l := make([]byte, 2) - n, err := conn.Read(l) - if err != nil || n != 2 { - if err != nil { - return nil, err - } - return nil, ErrShortRead - } - length, _ := unpackUint16(l, 0) - if length == 0 { - return nil, ErrShortRead - } - m := make([]byte, int(length)) - n, err = conn.Read(m[:int(length)]) - if err != nil || n == 0 { - if err != nil { - return nil, err - } - return nil, ErrShortRead - } - i := n - for i < int(length) { - j, err := conn.Read(m[i:int(length)]) - if err != nil { - return nil, err - } - i += j - } - n = i - m = m[:n] - return m, nil -} - -func (srv *Server) readUDP(conn *net.UDPConn, timeout time.Duration) ([]byte, *SessionUDP, error) { - conn.SetReadDeadline(time.Now().Add(timeout)) - m := make([]byte, srv.UDPSize) - n, s, e := ReadFromSessionUDP(conn, m) - if e != nil || n == 0 { - if e != nil { - return nil, nil, e - } - return nil, nil, ErrShortRead - } - m = m[:n] - return m, s, nil -} - -// WriteMsg implements the ResponseWriter.WriteMsg method. -func (w *response) WriteMsg(m *Msg) (err error) { - var data []byte - if w.tsigSecret != nil { // if no secrets, dont check for the tsig (which is a longer check) - if t := m.IsTsig(); t != nil { - data, w.tsigRequestMAC, err = TsigGenerate(m, w.tsigSecret[t.Hdr.Name], w.tsigRequestMAC, w.tsigTimersOnly) - if err != nil { - return err - } - _, err = w.Write(data) - return err - } - } - data, err = m.Pack() - if err != nil { - return err - } - _, err = w.Write(data) - return err -} - -// Write implements the ResponseWriter.Write method. -func (w *response) Write(m []byte) (int, error) { - switch { - case w.udp != nil: - n, err := WriteToSessionUDP(w.udp, m, w.udpSession) - return n, err - case w.tcp != nil: - lm := len(m) - if lm < 2 { - return 0, io.ErrShortBuffer - } - if lm > MaxMsgSize { - return 0, &Error{err: "message too large"} - } - l := make([]byte, 2, 2+lm) - l[0], l[1] = packUint16(uint16(lm)) - m = append(l, m...) - - n, err := io.Copy(w.tcp, bytes.NewReader(m)) - return int(n), err - } - panic("not reached") -} - -// LocalAddr implements the ResponseWriter.LocalAddr method. -func (w *response) LocalAddr() net.Addr { - if w.tcp != nil { - return w.tcp.LocalAddr() - } - return w.udp.LocalAddr() -} - -// RemoteAddr implements the ResponseWriter.RemoteAddr method. -func (w *response) RemoteAddr() net.Addr { return w.remoteAddr } - -// TsigStatus implements the ResponseWriter.TsigStatus method. -func (w *response) TsigStatus() error { return w.tsigStatus } - -// TsigTimersOnly implements the ResponseWriter.TsigTimersOnly method. -func (w *response) TsigTimersOnly(b bool) { w.tsigTimersOnly = b } - -// Hijack implements the ResponseWriter.Hijack method. -func (w *response) Hijack() { w.hijacked = true } - -// Close implements the ResponseWriter.Close method -func (w *response) Close() error { - // Can't close the udp conn, as that is actually the listener. - if w.tcp != nil { - e := w.tcp.Close() - w.tcp = nil - return e - } - return nil -} diff --git a/Godeps/_workspace/src/github.com/miekg/dns/server_test.go b/Godeps/_workspace/src/github.com/miekg/dns/server_test.go deleted file mode 100644 index dff0fb528b..0000000000 --- a/Godeps/_workspace/src/github.com/miekg/dns/server_test.go +++ /dev/null @@ -1,399 +0,0 @@ -package dns - -import ( - "fmt" - "net" - "runtime" - "sync" - "testing" -) - -func HelloServer(w ResponseWriter, req *Msg) { - m := new(Msg) - m.SetReply(req) - - m.Extra = make([]RR, 1) - m.Extra[0] = &TXT{Hdr: RR_Header{Name: m.Question[0].Name, Rrtype: TypeTXT, Class: ClassINET, Ttl: 0}, Txt: []string{"Hello world"}} - w.WriteMsg(m) -} - -func HelloServerBadId(w ResponseWriter, req *Msg) { - m := new(Msg) - m.SetReply(req) - m.Id += 1 - - m.Extra = make([]RR, 1) - m.Extra[0] = &TXT{Hdr: RR_Header{Name: m.Question[0].Name, Rrtype: TypeTXT, Class: ClassINET, Ttl: 0}, Txt: []string{"Hello world"}} - w.WriteMsg(m) -} - -func AnotherHelloServer(w ResponseWriter, req *Msg) { - m := new(Msg) - m.SetReply(req) - - m.Extra = make([]RR, 1) - m.Extra[0] = &TXT{Hdr: RR_Header{Name: m.Question[0].Name, Rrtype: TypeTXT, Class: ClassINET, Ttl: 0}, Txt: []string{"Hello example"}} - w.WriteMsg(m) -} - -func RunLocalUDPServer(laddr string) (*Server, string, error) { - pc, err := net.ListenPacket("udp", laddr) - if err != nil { - return nil, "", err - } - server := &Server{PacketConn: pc} - - waitLock := sync.Mutex{} - waitLock.Lock() - server.NotifyStartedFunc = waitLock.Unlock - - go func() { - server.ActivateAndServe() - pc.Close() - }() - - waitLock.Lock() - return server, pc.LocalAddr().String(), nil -} - -func RunLocalUDPServerUnsafe(laddr string) (*Server, string, error) { - pc, err := net.ListenPacket("udp", laddr) - if err != nil { - return nil, "", err - } - server := &Server{PacketConn: pc, Unsafe: true} - - waitLock := sync.Mutex{} - waitLock.Lock() - server.NotifyStartedFunc = waitLock.Unlock - - go func() { - server.ActivateAndServe() - pc.Close() - }() - - waitLock.Lock() - return server, pc.LocalAddr().String(), nil -} - -func RunLocalTCPServer(laddr string) (*Server, string, error) { - l, err := net.Listen("tcp", laddr) - if err != nil { - return nil, "", err - } - - server := &Server{Listener: l} - - waitLock := sync.Mutex{} - waitLock.Lock() - server.NotifyStartedFunc = waitLock.Unlock - - go func() { - server.ActivateAndServe() - l.Close() - }() - - waitLock.Lock() - return server, l.Addr().String(), nil -} - -func TestServing(t *testing.T) { - HandleFunc("miek.nl.", HelloServer) - HandleFunc("example.com.", AnotherHelloServer) - defer HandleRemove("miek.nl.") - defer HandleRemove("example.com.") - - s, addrstr, err := RunLocalUDPServer("127.0.0.1:0") - if err != nil { - t.Fatalf("Unable to run test server: %v", err) - } - defer s.Shutdown() - - c := new(Client) - m := new(Msg) - m.SetQuestion("miek.nl.", TypeTXT) - r, _, err := c.Exchange(m, addrstr) - if err != nil || len(r.Extra) == 0 { - t.Fatal("failed to exchange miek.nl", err) - } - txt := r.Extra[0].(*TXT).Txt[0] - if txt != "Hello world" { - t.Error("Unexpected result for miek.nl", txt, "!= Hello world") - } - - m.SetQuestion("example.com.", TypeTXT) - r, _, err = c.Exchange(m, addrstr) - if err != nil { - t.Fatal("failed to exchange example.com", err) - } - txt = r.Extra[0].(*TXT).Txt[0] - if txt != "Hello example" { - t.Error("Unexpected result for example.com", txt, "!= Hello example") - } - - // Test Mixes cased as noticed by Ask. - m.SetQuestion("eXaMplE.cOm.", TypeTXT) - r, _, err = c.Exchange(m, addrstr) - if err != nil { - t.Error("failed to exchange eXaMplE.cOm", err) - } - txt = r.Extra[0].(*TXT).Txt[0] - if txt != "Hello example" { - t.Error("Unexpected result for example.com", txt, "!= Hello example") - } -} - -func BenchmarkServe(b *testing.B) { - b.StopTimer() - HandleFunc("miek.nl.", HelloServer) - defer HandleRemove("miek.nl.") - a := runtime.GOMAXPROCS(4) - - s, addrstr, err := RunLocalUDPServer("127.0.0.1:0") - if err != nil { - b.Fatalf("Unable to run test server: %v", err) - } - defer s.Shutdown() - - c := new(Client) - m := new(Msg) - m.SetQuestion("miek.nl", TypeSOA) - - b.StartTimer() - for i := 0; i < b.N; i++ { - c.Exchange(m, addrstr) - } - runtime.GOMAXPROCS(a) -} - -func benchmarkServe6(b *testing.B) { - b.StopTimer() - HandleFunc("miek.nl.", HelloServer) - defer HandleRemove("miek.nl.") - a := runtime.GOMAXPROCS(4) - s, addrstr, err := RunLocalUDPServer("[::1]:0") - if err != nil { - b.Fatalf("Unable to run test server: %v", err) - } - defer s.Shutdown() - - c := new(Client) - m := new(Msg) - m.SetQuestion("miek.nl", TypeSOA) - - b.StartTimer() - for i := 0; i < b.N; i++ { - c.Exchange(m, addrstr) - } - runtime.GOMAXPROCS(a) -} - -func HelloServerCompress(w ResponseWriter, req *Msg) { - m := new(Msg) - m.SetReply(req) - m.Extra = make([]RR, 1) - m.Extra[0] = &TXT{Hdr: RR_Header{Name: m.Question[0].Name, Rrtype: TypeTXT, Class: ClassINET, Ttl: 0}, Txt: []string{"Hello world"}} - m.Compress = true - w.WriteMsg(m) -} - -func BenchmarkServeCompress(b *testing.B) { - b.StopTimer() - HandleFunc("miek.nl.", HelloServerCompress) - defer HandleRemove("miek.nl.") - a := runtime.GOMAXPROCS(4) - s, addrstr, err := RunLocalUDPServer("127.0.0.1:0") - if err != nil { - b.Fatalf("Unable to run test server: %v", err) - } - defer s.Shutdown() - - c := new(Client) - m := new(Msg) - m.SetQuestion("miek.nl", TypeSOA) - b.StartTimer() - for i := 0; i < b.N; i++ { - c.Exchange(m, addrstr) - } - runtime.GOMAXPROCS(a) -} - -func TestDotAsCatchAllWildcard(t *testing.T) { - mux := NewServeMux() - mux.Handle(".", HandlerFunc(HelloServer)) - mux.Handle("example.com.", HandlerFunc(AnotherHelloServer)) - - handler := mux.match("www.miek.nl.", TypeTXT) - if handler == nil { - t.Error("wildcard match failed") - } - - handler = mux.match("www.example.com.", TypeTXT) - if handler == nil { - t.Error("example.com match failed") - } - - handler = mux.match("a.www.example.com.", TypeTXT) - if handler == nil { - t.Error("a.www.example.com match failed") - } - - handler = mux.match("boe.", TypeTXT) - if handler == nil { - t.Error("boe. match failed") - } -} - -func TestCaseFolding(t *testing.T) { - mux := NewServeMux() - mux.Handle("_udp.example.com.", HandlerFunc(HelloServer)) - - handler := mux.match("_dns._udp.example.com.", TypeSRV) - if handler == nil { - t.Error("case sensitive characters folded") - } - - handler = mux.match("_DNS._UDP.EXAMPLE.COM.", TypeSRV) - if handler == nil { - t.Error("case insensitive characters not folded") - } -} - -func TestRootServer(t *testing.T) { - mux := NewServeMux() - mux.Handle(".", HandlerFunc(HelloServer)) - - handler := mux.match(".", TypeNS) - if handler == nil { - t.Error("root match failed") - } -} - -type maxRec struct { - max int - sync.RWMutex -} - -var M = new(maxRec) - -func HelloServerLargeResponse(resp ResponseWriter, req *Msg) { - m := new(Msg) - m.SetReply(req) - m.Authoritative = true - m1 := 0 - M.RLock() - m1 = M.max - M.RUnlock() - for i := 0; i < m1; i++ { - aRec := &A{ - Hdr: RR_Header{ - Name: req.Question[0].Name, - Rrtype: TypeA, - Class: ClassINET, - Ttl: 0, - }, - A: net.ParseIP(fmt.Sprintf("127.0.0.%d", i+1)).To4(), - } - m.Answer = append(m.Answer, aRec) - } - resp.WriteMsg(m) -} - -func TestServingLargeResponses(t *testing.T) { - HandleFunc("example.", HelloServerLargeResponse) - defer HandleRemove("example.") - - s, addrstr, err := RunLocalUDPServer("127.0.0.1:0") - if err != nil { - t.Fatalf("Unable to run test server: %v", err) - } - defer s.Shutdown() - - // Create request - m := new(Msg) - m.SetQuestion("web.service.example.", TypeANY) - - c := new(Client) - c.Net = "udp" - M.Lock() - M.max = 2 - M.Unlock() - _, _, err = c.Exchange(m, addrstr) - if err != nil { - t.Errorf("failed to exchange: %v", err) - } - // This must fail - M.Lock() - M.max = 20 - M.Unlock() - _, _, err = c.Exchange(m, addrstr) - if err == nil { - t.Error("failed to fail exchange, this should generate packet error") - } - // But this must work again - c.UDPSize = 7000 - _, _, err = c.Exchange(m, addrstr) - if err != nil { - t.Errorf("failed to exchange: %v", err) - } -} - -func TestServingResponse(t *testing.T) { - if testing.Short() { - t.Skip("skipping test in short mode.") - } - HandleFunc("miek.nl.", HelloServer) - s, addrstr, err := RunLocalUDPServer("127.0.0.1:0") - if err != nil { - t.Fatalf("Unable to run test server: %v", err) - } - - c := new(Client) - m := new(Msg) - m.SetQuestion("miek.nl.", TypeTXT) - m.Response = false - _, _, err = c.Exchange(m, addrstr) - if err != nil { - t.Fatal("failed to exchange", err) - } - m.Response = true - _, _, err = c.Exchange(m, addrstr) - if err == nil { - t.Fatal("exchanged response message") - } - - s.Shutdown() - s, addrstr, err = RunLocalUDPServerUnsafe("127.0.0.1:0") - if err != nil { - t.Fatalf("Unable to run test server: %v", err) - } - defer s.Shutdown() - - m.Response = true - _, _, err = c.Exchange(m, addrstr) - if err != nil { - t.Fatal("could exchanged response message in Unsafe mode") - } -} - -func TestShutdownTCP(t *testing.T) { - s, _, err := RunLocalTCPServer("127.0.0.1:0") - if err != nil { - t.Fatalf("Unable to run test server: %v", err) - } - err = s.Shutdown() - if err != nil { - t.Errorf("Could not shutdown test TCP server, %v", err) - } -} - -func TestShutdownUDP(t *testing.T) { - s, _, err := RunLocalUDPServer("127.0.0.1:0") - if err != nil { - t.Fatalf("Unable to run test server: %v", err) - } - err = s.Shutdown() - if err != nil { - t.Errorf("Could not shutdown test UDP server, %v", err) - } -} diff --git a/Godeps/_workspace/src/github.com/miekg/dns/sig0.go b/Godeps/_workspace/src/github.com/miekg/dns/sig0.go deleted file mode 100644 index 55d1a4ed49..0000000000 --- a/Godeps/_workspace/src/github.com/miekg/dns/sig0.go +++ /dev/null @@ -1,221 +0,0 @@ -package dns - -import ( - "crypto" - "crypto/dsa" - "crypto/ecdsa" - "crypto/rsa" - "math/big" - "strings" - "time" -) - -// Sign signs a dns.Msg. It fills the signature with the appropriate data. -// The SIG record should have the SignerName, KeyTag, Algorithm, Inception -// and Expiration set. -func (rr *SIG) Sign(k PrivateKey, m *Msg) ([]byte, error) { - if k == nil { - return nil, ErrPrivKey - } - if rr.KeyTag == 0 || len(rr.SignerName) == 0 || rr.Algorithm == 0 { - return nil, ErrKey - } - rr.Header().Rrtype = TypeSIG - rr.Header().Class = ClassANY - rr.Header().Ttl = 0 - rr.Header().Name = "." - rr.OrigTtl = 0 - rr.TypeCovered = 0 - rr.Labels = 0 - - buf := make([]byte, m.Len()+rr.len()) - mbuf, err := m.PackBuffer(buf) - if err != nil { - return nil, err - } - if &buf[0] != &mbuf[0] { - return nil, ErrBuf - } - off, err := PackRR(rr, buf, len(mbuf), nil, false) - if err != nil { - return nil, err - } - buf = buf[:off:cap(buf)] - var hash crypto.Hash - switch rr.Algorithm { - case DSA, RSASHA1: - hash = crypto.SHA1 - case RSASHA256, ECDSAP256SHA256: - hash = crypto.SHA256 - case ECDSAP384SHA384: - hash = crypto.SHA384 - case RSASHA512: - hash = crypto.SHA512 - default: - return nil, ErrAlg - } - hasher := hash.New() - // Write SIG rdata - hasher.Write(buf[len(mbuf)+1+2+2+4+2:]) - // Write message - hasher.Write(buf[:len(mbuf)]) - hashed := hasher.Sum(nil) - - sig, err := k.Sign(hashed, rr.Algorithm) - if err != nil { - return nil, err - } - rr.Signature = toBase64(sig) - buf = append(buf, sig...) - if len(buf) > int(^uint16(0)) { - return nil, ErrBuf - } - // Adjust sig data length - rdoff := len(mbuf) + 1 + 2 + 2 + 4 - rdlen, _ := unpackUint16(buf, rdoff) - rdlen += uint16(len(sig)) - buf[rdoff], buf[rdoff+1] = packUint16(rdlen) - // Adjust additional count - adc, _ := unpackUint16(buf, 10) - adc++ - buf[10], buf[11] = packUint16(adc) - return buf, nil -} - -// Verify validates the message buf using the key k. -// It's assumed that buf is a valid message from which rr was unpacked. -func (rr *SIG) Verify(k *KEY, buf []byte) error { - if k == nil { - return ErrKey - } - if rr.KeyTag == 0 || len(rr.SignerName) == 0 || rr.Algorithm == 0 { - return ErrKey - } - - var hash crypto.Hash - switch rr.Algorithm { - case DSA, RSASHA1: - hash = crypto.SHA1 - case RSASHA256, ECDSAP256SHA256: - hash = crypto.SHA256 - case ECDSAP384SHA384: - hash = crypto.SHA384 - case RSASHA512: - hash = crypto.SHA512 - default: - return ErrAlg - } - hasher := hash.New() - - buflen := len(buf) - qdc, _ := unpackUint16(buf, 4) - anc, _ := unpackUint16(buf, 6) - auc, _ := unpackUint16(buf, 8) - adc, offset := unpackUint16(buf, 10) - var err error - for i := uint16(0); i < qdc && offset < buflen; i++ { - _, offset, err = UnpackDomainName(buf, offset) - if err != nil { - return err - } - // Skip past Type and Class - offset += 2 + 2 - } - for i := uint16(1); i < anc+auc+adc && offset < buflen; i++ { - _, offset, err = UnpackDomainName(buf, offset) - if err != nil { - return err - } - // Skip past Type, Class and TTL - offset += 2 + 2 + 4 - if offset+1 >= buflen { - continue - } - var rdlen uint16 - rdlen, offset = unpackUint16(buf, offset) - offset += int(rdlen) - } - if offset >= buflen { - return &Error{err: "overflowing unpacking signed message"} - } - - // offset should be just prior to SIG - bodyend := offset - // owner name SHOULD be root - _, offset, err = UnpackDomainName(buf, offset) - if err != nil { - return err - } - // Skip Type, Class, TTL, RDLen - offset += 2 + 2 + 4 + 2 - sigstart := offset - // Skip Type Covered, Algorithm, Labels, Original TTL - offset += 2 + 1 + 1 + 4 - if offset+4+4 >= buflen { - return &Error{err: "overflow unpacking signed message"} - } - expire := uint32(buf[offset])<<24 | uint32(buf[offset+1])<<16 | uint32(buf[offset+2])<<8 | uint32(buf[offset+3]) - offset += 4 - incept := uint32(buf[offset])<<24 | uint32(buf[offset+1])<<16 | uint32(buf[offset+2])<<8 | uint32(buf[offset+3]) - offset += 4 - now := uint32(time.Now().Unix()) - if now < incept || now > expire { - return ErrTime - } - // Skip key tag - offset += 2 - var signername string - signername, offset, err = UnpackDomainName(buf, offset) - if err != nil { - return err - } - // If key has come from the DNS name compression might - // have mangled the case of the name - if strings.ToLower(signername) != strings.ToLower(k.Header().Name) { - return &Error{err: "signer name doesn't match key name"} - } - sigend := offset - hasher.Write(buf[sigstart:sigend]) - hasher.Write(buf[:10]) - hasher.Write([]byte{ - byte((adc - 1) << 8), - byte(adc - 1), - }) - hasher.Write(buf[12:bodyend]) - - hashed := hasher.Sum(nil) - sig := buf[sigend:] - switch k.Algorithm { - case DSA: - pk := k.publicKeyDSA() - sig = sig[1:] - r := big.NewInt(0) - r.SetBytes(sig[:len(sig)/2]) - s := big.NewInt(0) - s.SetBytes(sig[len(sig)/2:]) - if pk != nil { - if dsa.Verify(pk, hashed, r, s) { - return nil - } - return ErrSig - } - case RSASHA1, RSASHA256, RSASHA512: - pk := k.publicKeyRSA() - if pk != nil { - return rsa.VerifyPKCS1v15(pk, hash, hashed, sig) - } - case ECDSAP256SHA256, ECDSAP384SHA384: - pk := k.publicKeyECDSA() - r := big.NewInt(0) - r.SetBytes(sig[:len(sig)/2]) - s := big.NewInt(0) - s.SetBytes(sig[len(sig)/2:]) - if pk != nil { - if ecdsa.Verify(pk, hashed, r, s) { - return nil - } - return ErrSig - } - } - return ErrKeyAlg -} diff --git a/Godeps/_workspace/src/github.com/miekg/dns/sig0_test.go b/Godeps/_workspace/src/github.com/miekg/dns/sig0_test.go deleted file mode 100644 index cdd57ab819..0000000000 --- a/Godeps/_workspace/src/github.com/miekg/dns/sig0_test.go +++ /dev/null @@ -1,88 +0,0 @@ -package dns - -import ( - "testing" - "time" -) - -func TestSIG0(t *testing.T) { - if testing.Short() { - t.Skip("skipping test in short mode.") - } - m := new(Msg) - m.SetQuestion("example.org.", TypeSOA) - for _, alg := range []uint8{DSA, ECDSAP256SHA256, ECDSAP384SHA384, RSASHA1, RSASHA256, RSASHA512} { - algstr := AlgorithmToString[alg] - keyrr := new(KEY) - keyrr.Hdr.Name = algstr + "." - keyrr.Hdr.Rrtype = TypeKEY - keyrr.Hdr.Class = ClassINET - keyrr.Algorithm = alg - keysize := 1024 - switch alg { - case ECDSAP256SHA256: - keysize = 256 - case ECDSAP384SHA384: - keysize = 384 - } - pk, err := keyrr.Generate(keysize) - if err != nil { - t.Errorf("Failed to generate key for “%s”: %v", algstr, err) - continue - } - now := uint32(time.Now().Unix()) - sigrr := new(SIG) - sigrr.Hdr.Name = "." - sigrr.Hdr.Rrtype = TypeSIG - sigrr.Hdr.Class = ClassANY - sigrr.Algorithm = alg - sigrr.Expiration = now + 300 - sigrr.Inception = now - 300 - sigrr.KeyTag = keyrr.KeyTag() - sigrr.SignerName = keyrr.Hdr.Name - mb, err := sigrr.Sign(pk, m) - if err != nil { - t.Errorf("Failed to sign message using “%s”: %v", algstr, err) - continue - } - m := new(Msg) - if err := m.Unpack(mb); err != nil { - t.Errorf("Failed to unpack message signed using “%s”: %v", algstr, err) - continue - } - if len(m.Extra) != 1 { - t.Errorf("Missing SIG for message signed using “%s”", algstr) - continue - } - var sigrrwire *SIG - switch rr := m.Extra[0].(type) { - case *SIG: - sigrrwire = rr - default: - t.Errorf("Expected SIG RR, instead: %v", rr) - continue - } - for _, rr := range []*SIG{sigrr, sigrrwire} { - id := "sigrr" - if rr == sigrrwire { - id = "sigrrwire" - } - if err := rr.Verify(keyrr, mb); err != nil { - t.Errorf("Failed to verify “%s” signed SIG(%s): %v", algstr, id, err) - continue - } - } - mb[13]++ - if err := sigrr.Verify(keyrr, mb); err == nil { - t.Errorf("Verify succeeded on an altered message using “%s”", algstr) - continue - } - sigrr.Expiration = 2 - sigrr.Inception = 1 - mb, _ = sigrr.Sign(pk, m) - if err := sigrr.Verify(keyrr, mb); err == nil { - t.Errorf("Verify succeeded on an expired message using “%s”", algstr) - continue - } - } -} diff --git a/Godeps/_workspace/src/github.com/miekg/dns/singleinflight.go b/Godeps/_workspace/src/github.com/miekg/dns/singleinflight.go deleted file mode 100644 index 9573c7d0b8..0000000000 --- a/Godeps/_workspace/src/github.com/miekg/dns/singleinflight.go +++ /dev/null @@ -1,57 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Adapted for dns package usage by Miek Gieben. - -package dns - -import "sync" -import "time" - -// call is an in-flight or completed singleflight.Do call -type call struct { - wg sync.WaitGroup - val *Msg - rtt time.Duration - err error - dups int -} - -// singleflight represents a class of work and forms a namespace in -// which units of work can be executed with duplicate suppression. -type singleflight struct { - sync.Mutex // protects m - m map[string]*call // lazily initialized -} - -// Do executes and returns the results of the given function, making -// sure that only one execution is in-flight for a given key at a -// time. If a duplicate comes in, the duplicate caller waits for the -// original to complete and receives the same results. -// The return value shared indicates whether v was given to multiple callers. -func (g *singleflight) Do(key string, fn func() (*Msg, time.Duration, error)) (v *Msg, rtt time.Duration, err error, shared bool) { - g.Lock() - if g.m == nil { - g.m = make(map[string]*call) - } - if c, ok := g.m[key]; ok { - c.dups++ - g.Unlock() - c.wg.Wait() - return c.val, c.rtt, c.err, true - } - c := new(call) - c.wg.Add(1) - g.m[key] = c - g.Unlock() - - c.val, c.rtt, c.err = fn() - c.wg.Done() - - g.Lock() - delete(g.m, key) - g.Unlock() - - return c.val, c.rtt, c.err, c.dups > 0 -} diff --git a/Godeps/_workspace/src/github.com/miekg/dns/tlsa.go b/Godeps/_workspace/src/github.com/miekg/dns/tlsa.go deleted file mode 100644 index f027787df3..0000000000 --- a/Godeps/_workspace/src/github.com/miekg/dns/tlsa.go +++ /dev/null @@ -1,86 +0,0 @@ -package dns - -import ( - "crypto/sha256" - "crypto/sha512" - "crypto/x509" - "encoding/hex" - "errors" - "io" - "net" - "strconv" -) - -// CertificateToDANE converts a certificate to a hex string as used in the TLSA record. -func CertificateToDANE(selector, matchingType uint8, cert *x509.Certificate) (string, error) { - switch matchingType { - case 0: - switch selector { - case 0: - return hex.EncodeToString(cert.Raw), nil - case 1: - return hex.EncodeToString(cert.RawSubjectPublicKeyInfo), nil - } - case 1: - h := sha256.New() - switch selector { - case 0: - io.WriteString(h, string(cert.Raw)) - return hex.EncodeToString(h.Sum(nil)), nil - case 1: - io.WriteString(h, string(cert.RawSubjectPublicKeyInfo)) - return hex.EncodeToString(h.Sum(nil)), nil - } - case 2: - h := sha512.New() - switch selector { - case 0: - io.WriteString(h, string(cert.Raw)) - return hex.EncodeToString(h.Sum(nil)), nil - case 1: - io.WriteString(h, string(cert.RawSubjectPublicKeyInfo)) - return hex.EncodeToString(h.Sum(nil)), nil - } - } - return "", errors.New("dns: bad TLSA MatchingType or TLSA Selector") -} - -// Sign creates a TLSA record from an SSL certificate. -func (r *TLSA) Sign(usage, selector, matchingType int, cert *x509.Certificate) (err error) { - r.Hdr.Rrtype = TypeTLSA - r.Usage = uint8(usage) - r.Selector = uint8(selector) - r.MatchingType = uint8(matchingType) - - r.Certificate, err = CertificateToDANE(r.Selector, r.MatchingType, cert) - if err != nil { - return err - } - return nil -} - -// Verify verifies a TLSA record against an SSL certificate. If it is OK -// a nil error is returned. -func (r *TLSA) Verify(cert *x509.Certificate) error { - c, err := CertificateToDANE(r.Selector, r.MatchingType, cert) - if err != nil { - return err // Not also ErrSig? - } - if r.Certificate == c { - return nil - } - return ErrSig // ErrSig, really? -} - -// TLSAName returns the ownername of a TLSA resource record as per the -// rules specified in RFC 6698, Section 3. -func TLSAName(name, service, network string) (string, error) { - if !IsFqdn(name) { - return "", ErrFqdn - } - p, e := net.LookupPort(network, service) - if e != nil { - return "", e - } - return "_" + strconv.Itoa(p) + "_" + network + "." + name, nil -} diff --git a/Godeps/_workspace/src/github.com/miekg/dns/tsig.go b/Godeps/_workspace/src/github.com/miekg/dns/tsig.go deleted file mode 100644 index d7bc25056c..0000000000 --- a/Godeps/_workspace/src/github.com/miekg/dns/tsig.go +++ /dev/null @@ -1,333 +0,0 @@ -package dns - -import ( - "crypto/hmac" - "crypto/md5" - "crypto/sha1" - "crypto/sha256" - "crypto/sha512" - "encoding/hex" - "hash" - "io" - "strconv" - "strings" - "time" -) - -// HMAC hashing codes. These are transmitted as domain names. -const ( - HmacMD5 = "hmac-md5.sig-alg.reg.int." - HmacSHA1 = "hmac-sha1." - HmacSHA256 = "hmac-sha256." - HmacSHA512 = "hmac-sha512." -) - -// TSIG is the RR the holds the transaction signature of a message. -// See RFC 2845 and RFC 4635. -type TSIG struct { - Hdr RR_Header - Algorithm string `dns:"domain-name"` - TimeSigned uint64 `dns:"uint48"` - Fudge uint16 - MACSize uint16 - MAC string `dns:"size-hex"` - OrigId uint16 - Error uint16 - OtherLen uint16 - OtherData string `dns:"size-hex"` -} - -func (rr *TSIG) Header() *RR_Header { - return &rr.Hdr -} - -// TSIG has no official presentation format, but this will suffice. - -func (rr *TSIG) String() string { - s := "\n;; TSIG PSEUDOSECTION:\n" - s += rr.Hdr.String() + - " " + rr.Algorithm + - " " + tsigTimeToString(rr.TimeSigned) + - " " + strconv.Itoa(int(rr.Fudge)) + - " " + strconv.Itoa(int(rr.MACSize)) + - " " + strings.ToUpper(rr.MAC) + - " " + strconv.Itoa(int(rr.OrigId)) + - " " + strconv.Itoa(int(rr.Error)) + // BIND prints NOERROR - " " + strconv.Itoa(int(rr.OtherLen)) + - " " + rr.OtherData - return s -} - -func (rr *TSIG) len() int { - return rr.Hdr.len() + len(rr.Algorithm) + 1 + 6 + - 4 + len(rr.MAC)/2 + 1 + 6 + len(rr.OtherData)/2 + 1 -} - -func (rr *TSIG) copy() RR { - return &TSIG{*rr.Hdr.copyHeader(), rr.Algorithm, rr.TimeSigned, rr.Fudge, rr.MACSize, rr.MAC, rr.OrigId, rr.Error, rr.OtherLen, rr.OtherData} -} - -// The following values must be put in wireformat, so that the MAC can be calculated. -// RFC 2845, section 3.4.2. TSIG Variables. -type tsigWireFmt struct { - // From RR_Header - Name string `dns:"domain-name"` - Class uint16 - Ttl uint32 - // Rdata of the TSIG - Algorithm string `dns:"domain-name"` - TimeSigned uint64 `dns:"uint48"` - Fudge uint16 - // MACSize, MAC and OrigId excluded - Error uint16 - OtherLen uint16 - OtherData string `dns:"size-hex"` -} - -// If we have the MAC use this type to convert it to wiredata. -// Section 3.4.3. Request MAC -type macWireFmt struct { - MACSize uint16 - MAC string `dns:"size-hex"` -} - -// 3.3. Time values used in TSIG calculations -type timerWireFmt struct { - TimeSigned uint64 `dns:"uint48"` - Fudge uint16 -} - -// TsigGenerate fills out the TSIG record attached to the message. -// The message should contain -// a "stub" TSIG RR with the algorithm, key name (owner name of the RR), -// time fudge (defaults to 300 seconds) and the current time -// The TSIG MAC is saved in that Tsig RR. -// When TsigGenerate is called for the first time requestMAC is set to the empty string and -// timersOnly is false. -// If something goes wrong an error is returned, otherwise it is nil. -func TsigGenerate(m *Msg, secret, requestMAC string, timersOnly bool) ([]byte, string, error) { - if m.IsTsig() == nil { - panic("dns: TSIG not last RR in additional") - } - // If we barf here, the caller is to blame - rawsecret, err := fromBase64([]byte(secret)) - if err != nil { - return nil, "", err - } - - rr := m.Extra[len(m.Extra)-1].(*TSIG) - m.Extra = m.Extra[0 : len(m.Extra)-1] // kill the TSIG from the msg - mbuf, err := m.Pack() - if err != nil { - return nil, "", err - } - buf := tsigBuffer(mbuf, rr, requestMAC, timersOnly) - - t := new(TSIG) - var h hash.Hash - switch rr.Algorithm { - case HmacMD5: - h = hmac.New(md5.New, []byte(rawsecret)) - case HmacSHA1: - h = hmac.New(sha1.New, []byte(rawsecret)) - case HmacSHA256: - h = hmac.New(sha256.New, []byte(rawsecret)) - case HmacSHA512: - h = hmac.New(sha512.New, []byte(rawsecret)) - default: - return nil, "", ErrKeyAlg - } - io.WriteString(h, string(buf)) - t.MAC = hex.EncodeToString(h.Sum(nil)) - t.MACSize = uint16(len(t.MAC) / 2) // Size is half! - - t.Hdr = RR_Header{Name: rr.Hdr.Name, Rrtype: TypeTSIG, Class: ClassANY, Ttl: 0} - t.Fudge = rr.Fudge - t.TimeSigned = rr.TimeSigned - t.Algorithm = rr.Algorithm - t.OrigId = m.Id - - tbuf := make([]byte, t.len()) - if off, err := PackRR(t, tbuf, 0, nil, false); err == nil { - tbuf = tbuf[:off] // reset to actual size used - } else { - return nil, "", err - } - mbuf = append(mbuf, tbuf...) - rawSetExtraLen(mbuf, uint16(len(m.Extra)+1)) - return mbuf, t.MAC, nil -} - -// TsigVerify verifies the TSIG on a message. -// If the signature does not validate err contains the -// error, otherwise it is nil. -func TsigVerify(msg []byte, secret, requestMAC string, timersOnly bool) error { - rawsecret, err := fromBase64([]byte(secret)) - if err != nil { - return err - } - // Strip the TSIG from the incoming msg - stripped, tsig, err := stripTsig(msg) - if err != nil { - return err - } - - msgMAC, err := hex.DecodeString(tsig.MAC) - if err != nil { - return err - } - - buf := tsigBuffer(stripped, tsig, requestMAC, timersOnly) - - // Fudge factor works both ways. A message can arrive before it was signed because - // of clock skew. - now := uint64(time.Now().Unix()) - ti := now - tsig.TimeSigned - if now < tsig.TimeSigned { - ti = tsig.TimeSigned - now - } - if uint64(tsig.Fudge) < ti { - return ErrTime - } - - var h hash.Hash - switch tsig.Algorithm { - case HmacMD5: - h = hmac.New(md5.New, rawsecret) - case HmacSHA1: - h = hmac.New(sha1.New, rawsecret) - case HmacSHA256: - h = hmac.New(sha256.New, rawsecret) - case HmacSHA512: - h = hmac.New(sha512.New, rawsecret) - default: - return ErrKeyAlg - } - h.Write(buf) - if !hmac.Equal(h.Sum(nil), msgMAC) { - return ErrSig - } - return nil -} - -// Create a wiredata buffer for the MAC calculation. -func tsigBuffer(msgbuf []byte, rr *TSIG, requestMAC string, timersOnly bool) []byte { - var buf []byte - if rr.TimeSigned == 0 { - rr.TimeSigned = uint64(time.Now().Unix()) - } - if rr.Fudge == 0 { - rr.Fudge = 300 // Standard (RFC) default. - } - - if requestMAC != "" { - m := new(macWireFmt) - m.MACSize = uint16(len(requestMAC) / 2) - m.MAC = requestMAC - buf = make([]byte, len(requestMAC)) // long enough - n, _ := PackStruct(m, buf, 0) - buf = buf[:n] - } - - tsigvar := make([]byte, DefaultMsgSize) - if timersOnly { - tsig := new(timerWireFmt) - tsig.TimeSigned = rr.TimeSigned - tsig.Fudge = rr.Fudge - n, _ := PackStruct(tsig, tsigvar, 0) - tsigvar = tsigvar[:n] - } else { - tsig := new(tsigWireFmt) - tsig.Name = strings.ToLower(rr.Hdr.Name) - tsig.Class = ClassANY - tsig.Ttl = rr.Hdr.Ttl - tsig.Algorithm = strings.ToLower(rr.Algorithm) - tsig.TimeSigned = rr.TimeSigned - tsig.Fudge = rr.Fudge - tsig.Error = rr.Error - tsig.OtherLen = rr.OtherLen - tsig.OtherData = rr.OtherData - n, _ := PackStruct(tsig, tsigvar, 0) - tsigvar = tsigvar[:n] - } - - if requestMAC != "" { - x := append(buf, msgbuf...) - buf = append(x, tsigvar...) - } else { - buf = append(msgbuf, tsigvar...) - } - return buf -} - -// Strip the TSIG from the raw message. -func stripTsig(msg []byte) ([]byte, *TSIG, error) { - // Copied from msg.go's Unpack() - // Header. - var dh Header - var err error - dns := new(Msg) - rr := new(TSIG) - off := 0 - tsigoff := 0 - if off, err = UnpackStruct(&dh, msg, off); err != nil { - return nil, nil, err - } - if dh.Arcount == 0 { - return nil, nil, ErrNoSig - } - // Rcode, see msg.go Unpack() - if int(dh.Bits&0xF) == RcodeNotAuth { - return nil, nil, ErrAuth - } - - // Arrays. - dns.Question = make([]Question, dh.Qdcount) - dns.Answer = make([]RR, dh.Ancount) - dns.Ns = make([]RR, dh.Nscount) - dns.Extra = make([]RR, dh.Arcount) - - for i := 0; i < len(dns.Question); i++ { - off, err = UnpackStruct(&dns.Question[i], msg, off) - if err != nil { - return nil, nil, err - } - } - for i := 0; i < len(dns.Answer); i++ { - dns.Answer[i], off, err = UnpackRR(msg, off) - if err != nil { - return nil, nil, err - } - } - for i := 0; i < len(dns.Ns); i++ { - dns.Ns[i], off, err = UnpackRR(msg, off) - if err != nil { - return nil, nil, err - } - } - for i := 0; i < len(dns.Extra); i++ { - tsigoff = off - dns.Extra[i], off, err = UnpackRR(msg, off) - if err != nil { - return nil, nil, err - } - if dns.Extra[i].Header().Rrtype == TypeTSIG { - rr = dns.Extra[i].(*TSIG) - // Adjust Arcount. - arcount, _ := unpackUint16(msg, 10) - msg[10], msg[11] = packUint16(arcount - 1) - break - } - } - if rr == nil { - return nil, nil, ErrNoSig - } - return msg[:tsigoff], rr, nil -} - -// Translate the TSIG time signed into a date. There is no -// need for RFC1982 calculations as this date is 48 bits. -func tsigTimeToString(t uint64) string { - ti := time.Unix(int64(t), 0).UTC() - return ti.Format("20060102150405") -} diff --git a/Godeps/_workspace/src/github.com/miekg/dns/types.go b/Godeps/_workspace/src/github.com/miekg/dns/types.go deleted file mode 100644 index 49813b1cf9..0000000000 --- a/Godeps/_workspace/src/github.com/miekg/dns/types.go +++ /dev/null @@ -1,1765 +0,0 @@ -package dns - -import ( - "encoding/base64" - "fmt" - "net" - "strconv" - "strings" - "time" -) - -type ( - // Type is a DNS type. - Type uint16 - // Class is a DNS class. - Class uint16 - // Name is a DNS domain name. - Name string -) - -// Packet formats - -// Wire constants and supported types. -const ( - // valid RR_Header.Rrtype and Question.qtype - - TypeNone uint16 = 0 - TypeA uint16 = 1 - TypeNS uint16 = 2 - TypeMD uint16 = 3 - TypeMF uint16 = 4 - TypeCNAME uint16 = 5 - TypeSOA uint16 = 6 - TypeMB uint16 = 7 - TypeMG uint16 = 8 - TypeMR uint16 = 9 - TypeNULL uint16 = 10 - TypeWKS uint16 = 11 - TypePTR uint16 = 12 - TypeHINFO uint16 = 13 - TypeMINFO uint16 = 14 - TypeMX uint16 = 15 - TypeTXT uint16 = 16 - TypeRP uint16 = 17 - TypeAFSDB uint16 = 18 - TypeX25 uint16 = 19 - TypeISDN uint16 = 20 - TypeRT uint16 = 21 - TypeNSAP uint16 = 22 - TypeNSAPPTR uint16 = 23 - TypeSIG uint16 = 24 - TypeKEY uint16 = 25 - TypePX uint16 = 26 - TypeGPOS uint16 = 27 - TypeAAAA uint16 = 28 - TypeLOC uint16 = 29 - TypeNXT uint16 = 30 - TypeEID uint16 = 31 - TypeNIMLOC uint16 = 32 - TypeSRV uint16 = 33 - TypeATMA uint16 = 34 - TypeNAPTR uint16 = 35 - TypeKX uint16 = 36 - TypeCERT uint16 = 37 - TypeDNAME uint16 = 39 - TypeOPT uint16 = 41 // EDNS - TypeDS uint16 = 43 - TypeSSHFP uint16 = 44 - TypeIPSECKEY uint16 = 45 - TypeRRSIG uint16 = 46 - TypeNSEC uint16 = 47 - TypeDNSKEY uint16 = 48 - TypeDHCID uint16 = 49 - TypeNSEC3 uint16 = 50 - TypeNSEC3PARAM uint16 = 51 - TypeTLSA uint16 = 52 - TypeHIP uint16 = 55 - TypeNINFO uint16 = 56 - TypeRKEY uint16 = 57 - TypeTALINK uint16 = 58 - TypeCDS uint16 = 59 - TypeCDNSKEY uint16 = 60 - TypeOPENPGPKEY uint16 = 61 - TypeSPF uint16 = 99 - TypeUINFO uint16 = 100 - TypeUID uint16 = 101 - TypeGID uint16 = 102 - TypeUNSPEC uint16 = 103 - TypeNID uint16 = 104 - TypeL32 uint16 = 105 - TypeL64 uint16 = 106 - TypeLP uint16 = 107 - TypeEUI48 uint16 = 108 - TypeEUI64 uint16 = 109 - - TypeTKEY uint16 = 249 - TypeTSIG uint16 = 250 - - // valid Question.Qtype only - - TypeIXFR uint16 = 251 - TypeAXFR uint16 = 252 - TypeMAILB uint16 = 253 - TypeMAILA uint16 = 254 - TypeANY uint16 = 255 - - TypeURI uint16 = 256 - TypeCAA uint16 = 257 - TypeTA uint16 = 32768 - TypeDLV uint16 = 32769 - TypeReserved uint16 = 65535 - - // valid Question.Qclass - - ClassINET = 1 - ClassCSNET = 2 - ClassCHAOS = 3 - ClassHESIOD = 4 - ClassNONE = 254 - ClassANY = 255 - - // Msg.rcode - - RcodeSuccess = 0 - RcodeFormatError = 1 - RcodeServerFailure = 2 - RcodeNameError = 3 - RcodeNotImplemented = 4 - RcodeRefused = 5 - RcodeYXDomain = 6 - RcodeYXRrset = 7 - RcodeNXRrset = 8 - RcodeNotAuth = 9 - RcodeNotZone = 10 - RcodeBadSig = 16 // TSIG - RcodeBadVers = 16 // EDNS0 - RcodeBadKey = 17 - RcodeBadTime = 18 - RcodeBadMode = 19 // TKEY - RcodeBadName = 20 - RcodeBadAlg = 21 - RcodeBadTrunc = 22 // TSIG - - // Opcode, there is no 3 - - OpcodeQuery = 0 - OpcodeIQuery = 1 - OpcodeStatus = 2 - OpcodeNotify = 4 - OpcodeUpdate = 5 -) - -// The wire format for the DNS packet header. -type Header struct { - Id uint16 - Bits uint16 - Qdcount, Ancount, Nscount, Arcount uint16 -} - -const ( - headerSize = 12 - - // Header.Bits - _QR = 1 << 15 // query/response (response=1) - _AA = 1 << 10 // authoritative - _TC = 1 << 9 // truncated - _RD = 1 << 8 // recursion desired - _RA = 1 << 7 // recursion available - _Z = 1 << 6 // Z - _AD = 1 << 5 // authticated data - _CD = 1 << 4 // checking disabled - - LOC_EQUATOR = 1 << 31 // RFC 1876, Section 2. - LOC_PRIMEMERIDIAN = 1 << 31 // RFC 1876, Section 2. - - LOC_HOURS = 60 * 1000 - LOC_DEGREES = 60 * LOC_HOURS - - LOC_ALTITUDEBASE = 100000 -) - -// RFC 4398, Section 2.1 -const ( - CertPKIX = 1 + iota - CertSPKI - CertPGP - CertIPIX - CertISPKI - CertIPGP - CertACPKIX - CertIACPKIX - CertURI = 253 - CertOID = 254 -) - -var CertTypeToString = map[uint16]string{ - CertPKIX: "PKIX", - CertSPKI: "SPKI", - CertPGP: "PGP", - CertIPIX: "IPIX", - CertISPKI: "ISPKI", - CertIPGP: "IPGP", - CertACPKIX: "ACPKIX", - CertIACPKIX: "IACPKIX", - CertURI: "URI", - CertOID: "OID", -} - -var StringToCertType = reverseInt16(CertTypeToString) - -// Question holds a DNS question. There can be multiple questions in the -// question section of a message. Usually there is just one. -type Question struct { - Name string `dns:"cdomain-name"` // "cdomain-name" specifies encoding (and may be compressed) - Qtype uint16 - Qclass uint16 -} - -func (q *Question) String() (s string) { - // prefix with ; (as in dig) - s = ";" + sprintName(q.Name) + "\t" - s += Class(q.Qclass).String() + "\t" - s += " " + Type(q.Qtype).String() - return s -} - -func (q *Question) len() int { - l := len(q.Name) + 1 - return l + 4 -} - -type ANY struct { - Hdr RR_Header - // Does not have any rdata -} - -func (rr *ANY) Header() *RR_Header { return &rr.Hdr } -func (rr *ANY) copy() RR { return &ANY{*rr.Hdr.copyHeader()} } -func (rr *ANY) String() string { return rr.Hdr.String() } -func (rr *ANY) len() int { return rr.Hdr.len() } - -type CNAME struct { - Hdr RR_Header - Target string `dns:"cdomain-name"` -} - -func (rr *CNAME) Header() *RR_Header { return &rr.Hdr } -func (rr *CNAME) copy() RR { return &CNAME{*rr.Hdr.copyHeader(), sprintName(rr.Target)} } -func (rr *CNAME) String() string { return rr.Hdr.String() + rr.Target } -func (rr *CNAME) len() int { return rr.Hdr.len() + len(rr.Target) + 1 } - -type HINFO struct { - Hdr RR_Header - Cpu string - Os string -} - -func (rr *HINFO) Header() *RR_Header { return &rr.Hdr } -func (rr *HINFO) copy() RR { return &HINFO{*rr.Hdr.copyHeader(), rr.Cpu, rr.Os} } -func (rr *HINFO) String() string { - return rr.Hdr.String() + sprintTxt([]string{rr.Cpu, rr.Os}) -} -func (rr *HINFO) len() int { return rr.Hdr.len() + len(rr.Cpu) + len(rr.Os) } - -type MB struct { - Hdr RR_Header - Mb string `dns:"cdomain-name"` -} - -func (rr *MB) Header() *RR_Header { return &rr.Hdr } -func (rr *MB) copy() RR { return &MB{*rr.Hdr.copyHeader(), sprintName(rr.Mb)} } - -func (rr *MB) String() string { return rr.Hdr.String() + rr.Mb } -func (rr *MB) len() int { return rr.Hdr.len() + len(rr.Mb) + 1 } - -type MG struct { - Hdr RR_Header - Mg string `dns:"cdomain-name"` -} - -func (rr *MG) Header() *RR_Header { return &rr.Hdr } -func (rr *MG) copy() RR { return &MG{*rr.Hdr.copyHeader(), rr.Mg} } -func (rr *MG) len() int { l := len(rr.Mg) + 1; return rr.Hdr.len() + l } -func (rr *MG) String() string { return rr.Hdr.String() + sprintName(rr.Mg) } - -type MINFO struct { - Hdr RR_Header - Rmail string `dns:"cdomain-name"` - Email string `dns:"cdomain-name"` -} - -func (rr *MINFO) Header() *RR_Header { return &rr.Hdr } -func (rr *MINFO) copy() RR { return &MINFO{*rr.Hdr.copyHeader(), rr.Rmail, rr.Email} } - -func (rr *MINFO) String() string { - return rr.Hdr.String() + sprintName(rr.Rmail) + " " + sprintName(rr.Email) -} - -func (rr *MINFO) len() int { - l := len(rr.Rmail) + 1 - n := len(rr.Email) + 1 - return rr.Hdr.len() + l + n -} - -type MR struct { - Hdr RR_Header - Mr string `dns:"cdomain-name"` -} - -func (rr *MR) Header() *RR_Header { return &rr.Hdr } -func (rr *MR) copy() RR { return &MR{*rr.Hdr.copyHeader(), rr.Mr} } -func (rr *MR) len() int { l := len(rr.Mr) + 1; return rr.Hdr.len() + l } - -func (rr *MR) String() string { - return rr.Hdr.String() + sprintName(rr.Mr) -} - -type MF struct { - Hdr RR_Header - Mf string `dns:"cdomain-name"` -} - -func (rr *MF) Header() *RR_Header { return &rr.Hdr } -func (rr *MF) copy() RR { return &MF{*rr.Hdr.copyHeader(), rr.Mf} } -func (rr *MF) len() int { return rr.Hdr.len() + len(rr.Mf) + 1 } - -func (rr *MF) String() string { - return rr.Hdr.String() + sprintName(rr.Mf) -} - -type MD struct { - Hdr RR_Header - Md string `dns:"cdomain-name"` -} - -func (rr *MD) Header() *RR_Header { return &rr.Hdr } -func (rr *MD) copy() RR { return &MD{*rr.Hdr.copyHeader(), rr.Md} } -func (rr *MD) len() int { return rr.Hdr.len() + len(rr.Md) + 1 } - -func (rr *MD) String() string { - return rr.Hdr.String() + sprintName(rr.Md) -} - -type MX struct { - Hdr RR_Header - Preference uint16 - Mx string `dns:"cdomain-name"` -} - -func (rr *MX) Header() *RR_Header { return &rr.Hdr } -func (rr *MX) copy() RR { return &MX{*rr.Hdr.copyHeader(), rr.Preference, rr.Mx} } -func (rr *MX) len() int { l := len(rr.Mx) + 1; return rr.Hdr.len() + l + 2 } - -func (rr *MX) String() string { - return rr.Hdr.String() + strconv.Itoa(int(rr.Preference)) + " " + sprintName(rr.Mx) -} - -type AFSDB struct { - Hdr RR_Header - Subtype uint16 - Hostname string `dns:"cdomain-name"` -} - -func (rr *AFSDB) Header() *RR_Header { return &rr.Hdr } -func (rr *AFSDB) copy() RR { return &AFSDB{*rr.Hdr.copyHeader(), rr.Subtype, rr.Hostname} } -func (rr *AFSDB) len() int { l := len(rr.Hostname) + 1; return rr.Hdr.len() + l + 2 } - -func (rr *AFSDB) String() string { - return rr.Hdr.String() + strconv.Itoa(int(rr.Subtype)) + " " + sprintName(rr.Hostname) -} - -type X25 struct { - Hdr RR_Header - PSDNAddress string -} - -func (rr *X25) Header() *RR_Header { return &rr.Hdr } -func (rr *X25) copy() RR { return &X25{*rr.Hdr.copyHeader(), rr.PSDNAddress} } -func (rr *X25) len() int { return rr.Hdr.len() + len(rr.PSDNAddress) + 1 } - -func (rr *X25) String() string { - return rr.Hdr.String() + rr.PSDNAddress -} - -type RT struct { - Hdr RR_Header - Preference uint16 - Host string `dns:"cdomain-name"` -} - -func (rr *RT) Header() *RR_Header { return &rr.Hdr } -func (rr *RT) copy() RR { return &RT{*rr.Hdr.copyHeader(), rr.Preference, rr.Host} } -func (rr *RT) len() int { l := len(rr.Host) + 1; return rr.Hdr.len() + l + 2 } - -func (rr *RT) String() string { - return rr.Hdr.String() + strconv.Itoa(int(rr.Preference)) + " " + sprintName(rr.Host) -} - -type NS struct { - Hdr RR_Header - Ns string `dns:"cdomain-name"` -} - -func (rr *NS) Header() *RR_Header { return &rr.Hdr } -func (rr *NS) len() int { l := len(rr.Ns) + 1; return rr.Hdr.len() + l } -func (rr *NS) copy() RR { return &NS{*rr.Hdr.copyHeader(), rr.Ns} } - -func (rr *NS) String() string { - return rr.Hdr.String() + sprintName(rr.Ns) -} - -type PTR struct { - Hdr RR_Header - Ptr string `dns:"cdomain-name"` -} - -func (rr *PTR) Header() *RR_Header { return &rr.Hdr } -func (rr *PTR) copy() RR { return &PTR{*rr.Hdr.copyHeader(), rr.Ptr} } -func (rr *PTR) len() int { l := len(rr.Ptr) + 1; return rr.Hdr.len() + l } - -func (rr *PTR) String() string { - return rr.Hdr.String() + sprintName(rr.Ptr) -} - -type RP struct { - Hdr RR_Header - Mbox string `dns:"domain-name"` - Txt string `dns:"domain-name"` -} - -func (rr *RP) Header() *RR_Header { return &rr.Hdr } -func (rr *RP) copy() RR { return &RP{*rr.Hdr.copyHeader(), rr.Mbox, rr.Txt} } -func (rr *RP) len() int { return rr.Hdr.len() + len(rr.Mbox) + 1 + len(rr.Txt) + 1 } - -func (rr *RP) String() string { - return rr.Hdr.String() + rr.Mbox + " " + sprintTxt([]string{rr.Txt}) -} - -type SOA struct { - Hdr RR_Header - Ns string `dns:"cdomain-name"` - Mbox string `dns:"cdomain-name"` - Serial uint32 - Refresh uint32 - Retry uint32 - Expire uint32 - Minttl uint32 -} - -func (rr *SOA) Header() *RR_Header { return &rr.Hdr } -func (rr *SOA) copy() RR { - return &SOA{*rr.Hdr.copyHeader(), rr.Ns, rr.Mbox, rr.Serial, rr.Refresh, rr.Retry, rr.Expire, rr.Minttl} -} - -func (rr *SOA) String() string { - return rr.Hdr.String() + sprintName(rr.Ns) + " " + sprintName(rr.Mbox) + - " " + strconv.FormatInt(int64(rr.Serial), 10) + - " " + strconv.FormatInt(int64(rr.Refresh), 10) + - " " + strconv.FormatInt(int64(rr.Retry), 10) + - " " + strconv.FormatInt(int64(rr.Expire), 10) + - " " + strconv.FormatInt(int64(rr.Minttl), 10) -} - -func (rr *SOA) len() int { - l := len(rr.Ns) + 1 - n := len(rr.Mbox) + 1 - return rr.Hdr.len() + l + n + 20 -} - -type TXT struct { - Hdr RR_Header - Txt []string `dns:"txt"` -} - -func (rr *TXT) Header() *RR_Header { return &rr.Hdr } -func (rr *TXT) copy() RR { - cp := make([]string, len(rr.Txt), cap(rr.Txt)) - copy(cp, rr.Txt) - return &TXT{*rr.Hdr.copyHeader(), cp} -} - -func (rr *TXT) String() string { return rr.Hdr.String() + sprintTxt(rr.Txt) } - -func sprintName(s string) string { - src := []byte(s) - dst := make([]byte, 0, len(src)) - for i := 0; i < len(src); { - if i+1 < len(src) && src[i] == '\\' && src[i+1] == '.' { - dst = append(dst, src[i:i+2]...) - i += 2 - } else { - b, n := nextByte(src, i) - if n == 0 { - i++ // dangling back slash - } else if b == '.' { - dst = append(dst, b) - } else { - dst = appendDomainNameByte(dst, b) - } - i += n - } - } - return string(dst) -} - -func sprintCAAValue(s string) string { - src := []byte(s) - dst := make([]byte, 0, len(src)) - dst = append(dst, '"') - for i := 0; i < len(src); { - if i+1 < len(src) && src[i] == '\\' && src[i+1] == '.' { - dst = append(dst, src[i:i+2]...) - i += 2 - } else { - b, n := nextByte(src, i) - if n == 0 { - i++ // dangling back slash - } else if b == '.' { - dst = append(dst, b) - } else { - if b < ' ' || b > '~' { - dst = appendByte(dst, b) - } else { - dst = append(dst, b) - } - } - i += n - } - } - dst = append(dst, '"') - return string(dst) -} - -func sprintTxt(txt []string) string { - var out []byte - for i, s := range txt { - if i > 0 { - out = append(out, ` "`...) - } else { - out = append(out, '"') - } - bs := []byte(s) - for j := 0; j < len(bs); { - b, n := nextByte(bs, j) - if n == 0 { - break - } - out = appendTXTStringByte(out, b) - j += n - } - out = append(out, '"') - } - return string(out) -} - -func appendDomainNameByte(s []byte, b byte) []byte { - switch b { - case '.', ' ', '\'', '@', ';', '(', ')': // additional chars to escape - return append(s, '\\', b) - } - return appendTXTStringByte(s, b) -} - -func appendTXTStringByte(s []byte, b byte) []byte { - switch b { - case '\t': - return append(s, '\\', 't') - case '\r': - return append(s, '\\', 'r') - case '\n': - return append(s, '\\', 'n') - case '"', '\\': - return append(s, '\\', b) - } - if b < ' ' || b > '~' { - return appendByte(s, b) - } - return append(s, b) -} - -func appendByte(s []byte, b byte) []byte { - var buf [3]byte - bufs := strconv.AppendInt(buf[:0], int64(b), 10) - s = append(s, '\\') - for i := 0; i < 3-len(bufs); i++ { - s = append(s, '0') - } - for _, r := range bufs { - s = append(s, r) - } - return s -} - -func nextByte(b []byte, offset int) (byte, int) { - if offset >= len(b) { - return 0, 0 - } - if b[offset] != '\\' { - // not an escape sequence - return b[offset], 1 - } - switch len(b) - offset { - case 1: // dangling escape - return 0, 0 - case 2, 3: // too short to be \ddd - default: // maybe \ddd - if isDigit(b[offset+1]) && isDigit(b[offset+2]) && isDigit(b[offset+3]) { - return dddToByte(b[offset+1:]), 4 - } - } - // not \ddd, maybe a control char - switch b[offset+1] { - case 't': - return '\t', 2 - case 'r': - return '\r', 2 - case 'n': - return '\n', 2 - default: - return b[offset+1], 2 - } -} - -func (rr *TXT) len() int { - l := rr.Hdr.len() - for _, t := range rr.Txt { - l += len(t) + 1 - } - return l -} - -type SPF struct { - Hdr RR_Header - Txt []string `dns:"txt"` -} - -func (rr *SPF) Header() *RR_Header { return &rr.Hdr } -func (rr *SPF) copy() RR { - cp := make([]string, len(rr.Txt), cap(rr.Txt)) - copy(cp, rr.Txt) - return &SPF{*rr.Hdr.copyHeader(), cp} -} - -func (rr *SPF) String() string { return rr.Hdr.String() + sprintTxt(rr.Txt) } - -func (rr *SPF) len() int { - l := rr.Hdr.len() - for _, t := range rr.Txt { - l += len(t) + 1 - } - return l -} - -type SRV struct { - Hdr RR_Header - Priority uint16 - Weight uint16 - Port uint16 - Target string `dns:"domain-name"` -} - -func (rr *SRV) Header() *RR_Header { return &rr.Hdr } -func (rr *SRV) len() int { l := len(rr.Target) + 1; return rr.Hdr.len() + l + 6 } -func (rr *SRV) copy() RR { - return &SRV{*rr.Hdr.copyHeader(), rr.Priority, rr.Weight, rr.Port, rr.Target} -} - -func (rr *SRV) String() string { - return rr.Hdr.String() + - strconv.Itoa(int(rr.Priority)) + " " + - strconv.Itoa(int(rr.Weight)) + " " + - strconv.Itoa(int(rr.Port)) + " " + sprintName(rr.Target) -} - -type NAPTR struct { - Hdr RR_Header - Order uint16 - Preference uint16 - Flags string - Service string - Regexp string - Replacement string `dns:"domain-name"` -} - -func (rr *NAPTR) Header() *RR_Header { return &rr.Hdr } -func (rr *NAPTR) copy() RR { - return &NAPTR{*rr.Hdr.copyHeader(), rr.Order, rr.Preference, rr.Flags, rr.Service, rr.Regexp, rr.Replacement} -} - -func (rr *NAPTR) String() string { - return rr.Hdr.String() + - strconv.Itoa(int(rr.Order)) + " " + - strconv.Itoa(int(rr.Preference)) + " " + - "\"" + rr.Flags + "\" " + - "\"" + rr.Service + "\" " + - "\"" + rr.Regexp + "\" " + - rr.Replacement -} - -func (rr *NAPTR) len() int { - return rr.Hdr.len() + 4 + len(rr.Flags) + 1 + len(rr.Service) + 1 + - len(rr.Regexp) + 1 + len(rr.Replacement) + 1 -} - -// See RFC 4398. -type CERT struct { - Hdr RR_Header - Type uint16 - KeyTag uint16 - Algorithm uint8 - Certificate string `dns:"base64"` -} - -func (rr *CERT) Header() *RR_Header { return &rr.Hdr } -func (rr *CERT) copy() RR { - return &CERT{*rr.Hdr.copyHeader(), rr.Type, rr.KeyTag, rr.Algorithm, rr.Certificate} -} - -func (rr *CERT) String() string { - var ( - ok bool - certtype, algorithm string - ) - if certtype, ok = CertTypeToString[rr.Type]; !ok { - certtype = strconv.Itoa(int(rr.Type)) - } - if algorithm, ok = AlgorithmToString[rr.Algorithm]; !ok { - algorithm = strconv.Itoa(int(rr.Algorithm)) - } - return rr.Hdr.String() + certtype + - " " + strconv.Itoa(int(rr.KeyTag)) + - " " + algorithm + - " " + rr.Certificate -} - -func (rr *CERT) len() int { - return rr.Hdr.len() + 5 + - base64.StdEncoding.DecodedLen(len(rr.Certificate)) -} - -// See RFC 2672. -type DNAME struct { - Hdr RR_Header - Target string `dns:"domain-name"` -} - -func (rr *DNAME) Header() *RR_Header { return &rr.Hdr } -func (rr *DNAME) copy() RR { return &DNAME{*rr.Hdr.copyHeader(), rr.Target} } -func (rr *DNAME) len() int { l := len(rr.Target) + 1; return rr.Hdr.len() + l } - -func (rr *DNAME) String() string { - return rr.Hdr.String() + sprintName(rr.Target) -} - -type A struct { - Hdr RR_Header - A net.IP `dns:"a"` -} - -func (rr *A) Header() *RR_Header { return &rr.Hdr } -func (rr *A) copy() RR { return &A{*rr.Hdr.copyHeader(), copyIP(rr.A)} } -func (rr *A) len() int { return rr.Hdr.len() + net.IPv4len } - -func (rr *A) String() string { - if rr.A == nil { - return rr.Hdr.String() - } - return rr.Hdr.String() + rr.A.String() -} - -type AAAA struct { - Hdr RR_Header - AAAA net.IP `dns:"aaaa"` -} - -func (rr *AAAA) Header() *RR_Header { return &rr.Hdr } -func (rr *AAAA) copy() RR { return &AAAA{*rr.Hdr.copyHeader(), copyIP(rr.AAAA)} } -func (rr *AAAA) len() int { return rr.Hdr.len() + net.IPv6len } - -func (rr *AAAA) String() string { - if rr.AAAA == nil { - return rr.Hdr.String() - } - return rr.Hdr.String() + rr.AAAA.String() -} - -type PX struct { - Hdr RR_Header - Preference uint16 - Map822 string `dns:"domain-name"` - Mapx400 string `dns:"domain-name"` -} - -func (rr *PX) Header() *RR_Header { return &rr.Hdr } -func (rr *PX) copy() RR { return &PX{*rr.Hdr.copyHeader(), rr.Preference, rr.Map822, rr.Mapx400} } -func (rr *PX) String() string { - return rr.Hdr.String() + strconv.Itoa(int(rr.Preference)) + " " + sprintName(rr.Map822) + " " + sprintName(rr.Mapx400) -} -func (rr *PX) len() int { return rr.Hdr.len() + 2 + len(rr.Map822) + 1 + len(rr.Mapx400) + 1 } - -type GPOS struct { - Hdr RR_Header - Longitude string - Latitude string - Altitude string -} - -func (rr *GPOS) Header() *RR_Header { return &rr.Hdr } -func (rr *GPOS) copy() RR { return &GPOS{*rr.Hdr.copyHeader(), rr.Longitude, rr.Latitude, rr.Altitude} } -func (rr *GPOS) len() int { - return rr.Hdr.len() + len(rr.Longitude) + len(rr.Latitude) + len(rr.Altitude) + 3 -} -func (rr *GPOS) String() string { - return rr.Hdr.String() + rr.Longitude + " " + rr.Latitude + " " + rr.Altitude -} - -type LOC struct { - Hdr RR_Header - Version uint8 - Size uint8 - HorizPre uint8 - VertPre uint8 - Latitude uint32 - Longitude uint32 - Altitude uint32 -} - -func (rr *LOC) Header() *RR_Header { return &rr.Hdr } -func (rr *LOC) len() int { return rr.Hdr.len() + 4 + 12 } -func (rr *LOC) copy() RR { - return &LOC{*rr.Hdr.copyHeader(), rr.Version, rr.Size, rr.HorizPre, rr.VertPre, rr.Latitude, rr.Longitude, rr.Altitude} -} - -// cmToM takes a cm value expressed in RFC1876 SIZE mantissa/exponent -// format and returns a string in m (two decimals for the cm) -func cmToM(m, e uint8) string { - if e < 2 { - if e == 1 { - m *= 10 - } - - return fmt.Sprintf("0.%02d", m) - } - - s := fmt.Sprintf("%d", m) - for e > 2 { - s += "0" - e-- - } - return s -} - -// String returns a string version of a LOC -func (rr *LOC) String() string { - s := rr.Hdr.String() - - lat := rr.Latitude - ns := "N" - if lat > LOC_EQUATOR { - lat = lat - LOC_EQUATOR - } else { - ns = "S" - lat = LOC_EQUATOR - lat - } - h := lat / LOC_DEGREES - lat = lat % LOC_DEGREES - m := lat / LOC_HOURS - lat = lat % LOC_HOURS - s += fmt.Sprintf("%02d %02d %0.3f %s ", h, m, (float64(lat) / 1000), ns) - - lon := rr.Longitude - ew := "E" - if lon > LOC_PRIMEMERIDIAN { - lon = lon - LOC_PRIMEMERIDIAN - } else { - ew = "W" - lon = LOC_PRIMEMERIDIAN - lon - } - h = lon / LOC_DEGREES - lon = lon % LOC_DEGREES - m = lon / LOC_HOURS - lon = lon % LOC_HOURS - s += fmt.Sprintf("%02d %02d %0.3f %s ", h, m, (float64(lon) / 1000), ew) - - var alt = float64(rr.Altitude) / 100 - alt -= LOC_ALTITUDEBASE - if rr.Altitude%100 != 0 { - s += fmt.Sprintf("%.2fm ", alt) - } else { - s += fmt.Sprintf("%.0fm ", alt) - } - - s += cmToM((rr.Size&0xf0)>>4, rr.Size&0x0f) + "m " - s += cmToM((rr.HorizPre&0xf0)>>4, rr.HorizPre&0x0f) + "m " - s += cmToM((rr.VertPre&0xf0)>>4, rr.VertPre&0x0f) + "m" - - return s -} - -// SIG is identical to RRSIG and nowadays only used for SIG(0), RFC2931. -type SIG struct { - RRSIG -} - -type RRSIG struct { - Hdr RR_Header - TypeCovered uint16 - Algorithm uint8 - Labels uint8 - OrigTtl uint32 - Expiration uint32 - Inception uint32 - KeyTag uint16 - SignerName string `dns:"domain-name"` - Signature string `dns:"base64"` -} - -func (rr *RRSIG) Header() *RR_Header { return &rr.Hdr } -func (rr *RRSIG) copy() RR { - return &RRSIG{*rr.Hdr.copyHeader(), rr.TypeCovered, rr.Algorithm, rr.Labels, rr.OrigTtl, rr.Expiration, rr.Inception, rr.KeyTag, rr.SignerName, rr.Signature} -} - -func (rr *RRSIG) String() string { - s := rr.Hdr.String() - s += Type(rr.TypeCovered).String() - s += " " + strconv.Itoa(int(rr.Algorithm)) + - " " + strconv.Itoa(int(rr.Labels)) + - " " + strconv.FormatInt(int64(rr.OrigTtl), 10) + - " " + TimeToString(rr.Expiration) + - " " + TimeToString(rr.Inception) + - " " + strconv.Itoa(int(rr.KeyTag)) + - " " + sprintName(rr.SignerName) + - " " + rr.Signature - return s -} - -func (rr *RRSIG) len() int { - return rr.Hdr.len() + len(rr.SignerName) + 1 + - base64.StdEncoding.DecodedLen(len(rr.Signature)) + 18 -} - -type NSEC struct { - Hdr RR_Header - NextDomain string `dns:"domain-name"` - TypeBitMap []uint16 `dns:"nsec"` -} - -func (rr *NSEC) Header() *RR_Header { return &rr.Hdr } -func (rr *NSEC) copy() RR { - cp := make([]uint16, len(rr.TypeBitMap), cap(rr.TypeBitMap)) - copy(cp, rr.TypeBitMap) - return &NSEC{*rr.Hdr.copyHeader(), rr.NextDomain, cp} -} - -func (rr *NSEC) String() string { - s := rr.Hdr.String() + sprintName(rr.NextDomain) - for i := 0; i < len(rr.TypeBitMap); i++ { - s += " " + Type(rr.TypeBitMap[i]).String() - } - return s -} - -func (rr *NSEC) len() int { - l := rr.Hdr.len() + len(rr.NextDomain) + 1 - lastwindow := uint32(2 ^ 32 + 1) - for _, t := range rr.TypeBitMap { - window := t / 256 - if uint32(window) != lastwindow { - l += 1 + 32 - } - lastwindow = uint32(window) - } - return l -} - -type DLV struct { - DS -} - -type CDS struct { - DS -} - -type DS struct { - Hdr RR_Header - KeyTag uint16 - Algorithm uint8 - DigestType uint8 - Digest string `dns:"hex"` -} - -func (rr *DS) Header() *RR_Header { return &rr.Hdr } -func (rr *DS) len() int { return rr.Hdr.len() + 4 + len(rr.Digest)/2 } -func (rr *DS) copy() RR { - return &DS{*rr.Hdr.copyHeader(), rr.KeyTag, rr.Algorithm, rr.DigestType, rr.Digest} -} - -func (rr *DS) String() string { - return rr.Hdr.String() + strconv.Itoa(int(rr.KeyTag)) + - " " + strconv.Itoa(int(rr.Algorithm)) + - " " + strconv.Itoa(int(rr.DigestType)) + - " " + strings.ToUpper(rr.Digest) -} - -type KX struct { - Hdr RR_Header - Preference uint16 - Exchanger string `dns:"domain-name"` -} - -func (rr *KX) Header() *RR_Header { return &rr.Hdr } -func (rr *KX) len() int { return rr.Hdr.len() + 2 + len(rr.Exchanger) + 1 } -func (rr *KX) copy() RR { return &KX{*rr.Hdr.copyHeader(), rr.Preference, rr.Exchanger} } - -func (rr *KX) String() string { - return rr.Hdr.String() + strconv.Itoa(int(rr.Preference)) + - " " + sprintName(rr.Exchanger) -} - -type TA struct { - Hdr RR_Header - KeyTag uint16 - Algorithm uint8 - DigestType uint8 - Digest string `dns:"hex"` -} - -func (rr *TA) Header() *RR_Header { return &rr.Hdr } -func (rr *TA) len() int { return rr.Hdr.len() + 4 + len(rr.Digest)/2 } -func (rr *TA) copy() RR { - return &TA{*rr.Hdr.copyHeader(), rr.KeyTag, rr.Algorithm, rr.DigestType, rr.Digest} -} - -func (rr *TA) String() string { - return rr.Hdr.String() + strconv.Itoa(int(rr.KeyTag)) + - " " + strconv.Itoa(int(rr.Algorithm)) + - " " + strconv.Itoa(int(rr.DigestType)) + - " " + strings.ToUpper(rr.Digest) -} - -type TALINK struct { - Hdr RR_Header - PreviousName string `dns:"domain-name"` - NextName string `dns:"domain-name"` -} - -func (rr *TALINK) Header() *RR_Header { return &rr.Hdr } -func (rr *TALINK) copy() RR { return &TALINK{*rr.Hdr.copyHeader(), rr.PreviousName, rr.NextName} } -func (rr *TALINK) len() int { return rr.Hdr.len() + len(rr.PreviousName) + len(rr.NextName) + 2 } - -func (rr *TALINK) String() string { - return rr.Hdr.String() + - sprintName(rr.PreviousName) + " " + sprintName(rr.NextName) -} - -type SSHFP struct { - Hdr RR_Header - Algorithm uint8 - Type uint8 - FingerPrint string `dns:"hex"` -} - -func (rr *SSHFP) Header() *RR_Header { return &rr.Hdr } -func (rr *SSHFP) len() int { return rr.Hdr.len() + 2 + len(rr.FingerPrint)/2 } -func (rr *SSHFP) copy() RR { - return &SSHFP{*rr.Hdr.copyHeader(), rr.Algorithm, rr.Type, rr.FingerPrint} -} - -func (rr *SSHFP) String() string { - return rr.Hdr.String() + strconv.Itoa(int(rr.Algorithm)) + - " " + strconv.Itoa(int(rr.Type)) + - " " + strings.ToUpper(rr.FingerPrint) -} - -type IPSECKEY struct { - Hdr RR_Header - Precedence uint8 - // GatewayType: 1: A record, 2: AAAA record, 3: domainname. - // 0 is use for no type and GatewayName should be "." then. - GatewayType uint8 - Algorithm uint8 - // Gateway can be an A record, AAAA record or a domain name. - GatewayA net.IP `dns:"a"` - GatewayAAAA net.IP `dns:"aaaa"` - GatewayName string `dns:"domain-name"` - PublicKey string `dns:"base64"` -} - -func (rr *IPSECKEY) Header() *RR_Header { return &rr.Hdr } -func (rr *IPSECKEY) copy() RR { - return &IPSECKEY{*rr.Hdr.copyHeader(), rr.Precedence, rr.GatewayType, rr.Algorithm, rr.GatewayA, rr.GatewayAAAA, rr.GatewayName, rr.PublicKey} -} - -func (rr *IPSECKEY) String() string { - s := rr.Hdr.String() + strconv.Itoa(int(rr.Precedence)) + - " " + strconv.Itoa(int(rr.GatewayType)) + - " " + strconv.Itoa(int(rr.Algorithm)) - switch rr.GatewayType { - case 0: - fallthrough - case 3: - s += " " + rr.GatewayName - case 1: - s += " " + rr.GatewayA.String() - case 2: - s += " " + rr.GatewayAAAA.String() - default: - s += " ." - } - s += " " + rr.PublicKey - return s -} - -func (rr *IPSECKEY) len() int { - l := rr.Hdr.len() + 3 + 1 - switch rr.GatewayType { - default: - fallthrough - case 0: - fallthrough - case 3: - l += len(rr.GatewayName) - case 1: - l += 4 - case 2: - l += 16 - } - return l + base64.StdEncoding.DecodedLen(len(rr.PublicKey)) -} - -type KEY struct { - DNSKEY -} - -type CDNSKEY struct { - DNSKEY -} - -type DNSKEY struct { - Hdr RR_Header - Flags uint16 - Protocol uint8 - Algorithm uint8 - PublicKey string `dns:"base64"` -} - -func (rr *DNSKEY) Header() *RR_Header { return &rr.Hdr } -func (rr *DNSKEY) len() int { - return rr.Hdr.len() + 4 + base64.StdEncoding.DecodedLen(len(rr.PublicKey)) -} -func (rr *DNSKEY) copy() RR { - return &DNSKEY{*rr.Hdr.copyHeader(), rr.Flags, rr.Protocol, rr.Algorithm, rr.PublicKey} -} - -func (rr *DNSKEY) String() string { - return rr.Hdr.String() + strconv.Itoa(int(rr.Flags)) + - " " + strconv.Itoa(int(rr.Protocol)) + - " " + strconv.Itoa(int(rr.Algorithm)) + - " " + rr.PublicKey -} - -type RKEY struct { - Hdr RR_Header - Flags uint16 - Protocol uint8 - Algorithm uint8 - PublicKey string `dns:"base64"` -} - -func (rr *RKEY) Header() *RR_Header { return &rr.Hdr } -func (rr *RKEY) len() int { return rr.Hdr.len() + 4 + base64.StdEncoding.DecodedLen(len(rr.PublicKey)) } -func (rr *RKEY) copy() RR { - return &RKEY{*rr.Hdr.copyHeader(), rr.Flags, rr.Protocol, rr.Algorithm, rr.PublicKey} -} - -func (rr *RKEY) String() string { - return rr.Hdr.String() + strconv.Itoa(int(rr.Flags)) + - " " + strconv.Itoa(int(rr.Protocol)) + - " " + strconv.Itoa(int(rr.Algorithm)) + - " " + rr.PublicKey -} - -type NSAP struct { - Hdr RR_Header - Nsap string -} - -func (rr *NSAP) Header() *RR_Header { return &rr.Hdr } -func (rr *NSAP) copy() RR { return &NSAP{*rr.Hdr.copyHeader(), rr.Nsap} } -func (rr *NSAP) String() string { return rr.Hdr.String() + "0x" + rr.Nsap } -func (rr *NSAP) len() int { return rr.Hdr.len() + 1 + len(rr.Nsap) + 1 } - -type NSAPPTR struct { - Hdr RR_Header - Ptr string `dns:"domain-name"` -} - -func (rr *NSAPPTR) Header() *RR_Header { return &rr.Hdr } -func (rr *NSAPPTR) copy() RR { return &NSAPPTR{*rr.Hdr.copyHeader(), rr.Ptr} } -func (rr *NSAPPTR) String() string { return rr.Hdr.String() + sprintName(rr.Ptr) } -func (rr *NSAPPTR) len() int { return rr.Hdr.len() + len(rr.Ptr) } - -type NSEC3 struct { - Hdr RR_Header - Hash uint8 - Flags uint8 - Iterations uint16 - SaltLength uint8 - Salt string `dns:"size-hex"` - HashLength uint8 - NextDomain string `dns:"size-base32"` - TypeBitMap []uint16 `dns:"nsec"` -} - -func (rr *NSEC3) Header() *RR_Header { return &rr.Hdr } -func (rr *NSEC3) copy() RR { - cp := make([]uint16, len(rr.TypeBitMap), cap(rr.TypeBitMap)) - copy(cp, rr.TypeBitMap) - return &NSEC3{*rr.Hdr.copyHeader(), rr.Hash, rr.Flags, rr.Iterations, rr.SaltLength, rr.Salt, rr.HashLength, rr.NextDomain, cp} -} - -func (rr *NSEC3) String() string { - s := rr.Hdr.String() - s += strconv.Itoa(int(rr.Hash)) + - " " + strconv.Itoa(int(rr.Flags)) + - " " + strconv.Itoa(int(rr.Iterations)) + - " " + saltToString(rr.Salt) + - " " + rr.NextDomain - for i := 0; i < len(rr.TypeBitMap); i++ { - s += " " + Type(rr.TypeBitMap[i]).String() - } - return s -} - -func (rr *NSEC3) len() int { - l := rr.Hdr.len() + 6 + len(rr.Salt)/2 + 1 + len(rr.NextDomain) + 1 - lastwindow := uint32(2 ^ 32 + 1) - for _, t := range rr.TypeBitMap { - window := t / 256 - if uint32(window) != lastwindow { - l += 1 + 32 - } - lastwindow = uint32(window) - } - return l -} - -type NSEC3PARAM struct { - Hdr RR_Header - Hash uint8 - Flags uint8 - Iterations uint16 - SaltLength uint8 - Salt string `dns:"hex"` -} - -func (rr *NSEC3PARAM) Header() *RR_Header { return &rr.Hdr } -func (rr *NSEC3PARAM) len() int { return rr.Hdr.len() + 2 + 4 + 1 + len(rr.Salt)/2 } -func (rr *NSEC3PARAM) copy() RR { - return &NSEC3PARAM{*rr.Hdr.copyHeader(), rr.Hash, rr.Flags, rr.Iterations, rr.SaltLength, rr.Salt} -} - -func (rr *NSEC3PARAM) String() string { - s := rr.Hdr.String() - s += strconv.Itoa(int(rr.Hash)) + - " " + strconv.Itoa(int(rr.Flags)) + - " " + strconv.Itoa(int(rr.Iterations)) + - " " + saltToString(rr.Salt) - return s -} - -type TKEY struct { - Hdr RR_Header - Algorithm string `dns:"domain-name"` - Inception uint32 - Expiration uint32 - Mode uint16 - Error uint16 - KeySize uint16 - Key string - OtherLen uint16 - OtherData string -} - -func (rr *TKEY) Header() *RR_Header { return &rr.Hdr } -func (rr *TKEY) copy() RR { - return &TKEY{*rr.Hdr.copyHeader(), rr.Algorithm, rr.Inception, rr.Expiration, rr.Mode, rr.Error, rr.KeySize, rr.Key, rr.OtherLen, rr.OtherData} -} - -func (rr *TKEY) String() string { - // It has no presentation format - return "" -} - -func (rr *TKEY) len() int { - return rr.Hdr.len() + len(rr.Algorithm) + 1 + 4 + 4 + 6 + - len(rr.Key) + 2 + len(rr.OtherData) -} - -// RFC3597 represents an unknown/generic RR. -type RFC3597 struct { - Hdr RR_Header - Rdata string `dns:"hex"` -} - -func (rr *RFC3597) Header() *RR_Header { return &rr.Hdr } -func (rr *RFC3597) copy() RR { return &RFC3597{*rr.Hdr.copyHeader(), rr.Rdata} } -func (rr *RFC3597) len() int { return rr.Hdr.len() + len(rr.Rdata)/2 + 2 } - -func (rr *RFC3597) String() string { - // Let's call it a hack - s := rfc3597Header(rr.Hdr) - - s += "\\# " + strconv.Itoa(len(rr.Rdata)/2) + " " + rr.Rdata - return s -} - -func rfc3597Header(h RR_Header) string { - var s string - - s += sprintName(h.Name) + "\t" - s += strconv.FormatInt(int64(h.Ttl), 10) + "\t" - s += "CLASS" + strconv.Itoa(int(h.Class)) + "\t" - s += "TYPE" + strconv.Itoa(int(h.Rrtype)) + "\t" - return s -} - -type URI struct { - Hdr RR_Header - Priority uint16 - Weight uint16 - Target []string `dns:"txt"` -} - -func (rr *URI) Header() *RR_Header { return &rr.Hdr } -func (rr *URI) copy() RR { - cp := make([]string, len(rr.Target), cap(rr.Target)) - copy(cp, rr.Target) - return &URI{*rr.Hdr.copyHeader(), rr.Weight, rr.Priority, cp} -} - -func (rr *URI) String() string { - return rr.Hdr.String() + strconv.Itoa(int(rr.Priority)) + - " " + strconv.Itoa(int(rr.Weight)) + sprintTxt(rr.Target) -} - -func (rr *URI) len() int { - l := rr.Hdr.len() + 4 - for _, t := range rr.Target { - l += len(t) + 1 - } - return l -} - -type DHCID struct { - Hdr RR_Header - Digest string `dns:"base64"` -} - -func (rr *DHCID) Header() *RR_Header { return &rr.Hdr } -func (rr *DHCID) copy() RR { return &DHCID{*rr.Hdr.copyHeader(), rr.Digest} } -func (rr *DHCID) String() string { return rr.Hdr.String() + rr.Digest } -func (rr *DHCID) len() int { return rr.Hdr.len() + base64.StdEncoding.DecodedLen(len(rr.Digest)) } - -type TLSA struct { - Hdr RR_Header - Usage uint8 - Selector uint8 - MatchingType uint8 - Certificate string `dns:"hex"` -} - -func (rr *TLSA) Header() *RR_Header { return &rr.Hdr } -func (rr *TLSA) len() int { return rr.Hdr.len() + 3 + len(rr.Certificate)/2 } - -func (rr *TLSA) copy() RR { - return &TLSA{*rr.Hdr.copyHeader(), rr.Usage, rr.Selector, rr.MatchingType, rr.Certificate} -} - -func (rr *TLSA) String() string { - return rr.Hdr.String() + - strconv.Itoa(int(rr.Usage)) + - " " + strconv.Itoa(int(rr.Selector)) + - " " + strconv.Itoa(int(rr.MatchingType)) + - " " + rr.Certificate -} - -type HIP struct { - Hdr RR_Header - HitLength uint8 - PublicKeyAlgorithm uint8 - PublicKeyLength uint16 - Hit string `dns:"hex"` - PublicKey string `dns:"base64"` - RendezvousServers []string `dns:"domain-name"` -} - -func (rr *HIP) Header() *RR_Header { return &rr.Hdr } -func (rr *HIP) copy() RR { - cp := make([]string, len(rr.RendezvousServers), cap(rr.RendezvousServers)) - copy(cp, rr.RendezvousServers) - return &HIP{*rr.Hdr.copyHeader(), rr.HitLength, rr.PublicKeyAlgorithm, rr.PublicKeyLength, rr.Hit, rr.PublicKey, cp} -} - -func (rr *HIP) String() string { - s := rr.Hdr.String() + - strconv.Itoa(int(rr.PublicKeyAlgorithm)) + - " " + rr.Hit + - " " + rr.PublicKey - for _, d := range rr.RendezvousServers { - s += " " + sprintName(d) - } - return s -} - -func (rr *HIP) len() int { - l := rr.Hdr.len() + 4 + - len(rr.Hit)/2 + - base64.StdEncoding.DecodedLen(len(rr.PublicKey)) - for _, d := range rr.RendezvousServers { - l += len(d) + 1 - } - return l -} - -type NINFO struct { - Hdr RR_Header - ZSData []string `dns:"txt"` -} - -func (rr *NINFO) Header() *RR_Header { return &rr.Hdr } -func (rr *NINFO) copy() RR { - cp := make([]string, len(rr.ZSData), cap(rr.ZSData)) - copy(cp, rr.ZSData) - return &NINFO{*rr.Hdr.copyHeader(), cp} -} - -func (rr *NINFO) String() string { return rr.Hdr.String() + sprintTxt(rr.ZSData) } - -func (rr *NINFO) len() int { - l := rr.Hdr.len() - for _, t := range rr.ZSData { - l += len(t) + 1 - } - return l -} - -type WKS struct { - Hdr RR_Header - Address net.IP `dns:"a"` - Protocol uint8 - BitMap []uint16 `dns:"wks"` -} - -func (rr *WKS) Header() *RR_Header { return &rr.Hdr } -func (rr *WKS) len() int { return rr.Hdr.len() + net.IPv4len + 1 } - -func (rr *WKS) copy() RR { - cp := make([]uint16, len(rr.BitMap), cap(rr.BitMap)) - copy(cp, rr.BitMap) - return &WKS{*rr.Hdr.copyHeader(), copyIP(rr.Address), rr.Protocol, cp} -} - -func (rr *WKS) String() (s string) { - s = rr.Hdr.String() - if rr.Address != nil { - s += rr.Address.String() - } - // TODO(miek): missing protocol here, see /etc/protocols - for i := 0; i < len(rr.BitMap); i++ { - // should lookup the port - s += " " + strconv.Itoa(int(rr.BitMap[i])) - } - return s -} - -type NID struct { - Hdr RR_Header - Preference uint16 - NodeID uint64 -} - -func (rr *NID) Header() *RR_Header { return &rr.Hdr } -func (rr *NID) copy() RR { return &NID{*rr.Hdr.copyHeader(), rr.Preference, rr.NodeID} } -func (rr *NID) len() int { return rr.Hdr.len() + 2 + 8 } - -func (rr *NID) String() string { - s := rr.Hdr.String() + strconv.Itoa(int(rr.Preference)) - node := fmt.Sprintf("%0.16x", rr.NodeID) - s += " " + node[0:4] + ":" + node[4:8] + ":" + node[8:12] + ":" + node[12:16] - return s -} - -type L32 struct { - Hdr RR_Header - Preference uint16 - Locator32 net.IP `dns:"a"` -} - -func (rr *L32) Header() *RR_Header { return &rr.Hdr } -func (rr *L32) copy() RR { return &L32{*rr.Hdr.copyHeader(), rr.Preference, copyIP(rr.Locator32)} } -func (rr *L32) len() int { return rr.Hdr.len() + net.IPv4len } - -func (rr *L32) String() string { - if rr.Locator32 == nil { - return rr.Hdr.String() + strconv.Itoa(int(rr.Preference)) - } - return rr.Hdr.String() + strconv.Itoa(int(rr.Preference)) + - " " + rr.Locator32.String() -} - -type L64 struct { - Hdr RR_Header - Preference uint16 - Locator64 uint64 -} - -func (rr *L64) Header() *RR_Header { return &rr.Hdr } -func (rr *L64) copy() RR { return &L64{*rr.Hdr.copyHeader(), rr.Preference, rr.Locator64} } -func (rr *L64) len() int { return rr.Hdr.len() + 2 + 8 } - -func (rr *L64) String() string { - s := rr.Hdr.String() + strconv.Itoa(int(rr.Preference)) - node := fmt.Sprintf("%0.16X", rr.Locator64) - s += " " + node[0:4] + ":" + node[4:8] + ":" + node[8:12] + ":" + node[12:16] - return s -} - -type LP struct { - Hdr RR_Header - Preference uint16 - Fqdn string `dns:"domain-name"` -} - -func (rr *LP) Header() *RR_Header { return &rr.Hdr } -func (rr *LP) copy() RR { return &LP{*rr.Hdr.copyHeader(), rr.Preference, rr.Fqdn} } -func (rr *LP) len() int { return rr.Hdr.len() + 2 + len(rr.Fqdn) + 1 } - -func (rr *LP) String() string { - return rr.Hdr.String() + strconv.Itoa(int(rr.Preference)) + " " + sprintName(rr.Fqdn) -} - -type EUI48 struct { - Hdr RR_Header - Address uint64 `dns:"uint48"` -} - -func (rr *EUI48) Header() *RR_Header { return &rr.Hdr } -func (rr *EUI48) copy() RR { return &EUI48{*rr.Hdr.copyHeader(), rr.Address} } -func (rr *EUI48) String() string { return rr.Hdr.String() + euiToString(rr.Address, 48) } -func (rr *EUI48) len() int { return rr.Hdr.len() + 6 } - -type EUI64 struct { - Hdr RR_Header - Address uint64 -} - -func (rr *EUI64) Header() *RR_Header { return &rr.Hdr } -func (rr *EUI64) copy() RR { return &EUI64{*rr.Hdr.copyHeader(), rr.Address} } -func (rr *EUI64) String() string { return rr.Hdr.String() + euiToString(rr.Address, 64) } -func (rr *EUI64) len() int { return rr.Hdr.len() + 8 } - -type CAA struct { - Hdr RR_Header - Flag uint8 - Tag string - Value string `dns:"octet"` -} - -func (rr *CAA) Header() *RR_Header { return &rr.Hdr } -func (rr *CAA) copy() RR { return &CAA{*rr.Hdr.copyHeader(), rr.Flag, rr.Tag, rr.Value} } -func (rr *CAA) len() int { return rr.Hdr.len() + 2 + len(rr.Tag) + len(rr.Value) } -func (rr *CAA) String() string { - return rr.Hdr.String() + strconv.Itoa(int(rr.Flag)) + " " + rr.Tag + " " + sprintCAAValue(rr.Value) -} - -type UID struct { - Hdr RR_Header - Uid uint32 -} - -func (rr *UID) Header() *RR_Header { return &rr.Hdr } -func (rr *UID) copy() RR { return &UID{*rr.Hdr.copyHeader(), rr.Uid} } -func (rr *UID) String() string { return rr.Hdr.String() + strconv.FormatInt(int64(rr.Uid), 10) } -func (rr *UID) len() int { return rr.Hdr.len() + 4 } - -type GID struct { - Hdr RR_Header - Gid uint32 -} - -func (rr *GID) Header() *RR_Header { return &rr.Hdr } -func (rr *GID) copy() RR { return &GID{*rr.Hdr.copyHeader(), rr.Gid} } -func (rr *GID) String() string { return rr.Hdr.String() + strconv.FormatInt(int64(rr.Gid), 10) } -func (rr *GID) len() int { return rr.Hdr.len() + 4 } - -type UINFO struct { - Hdr RR_Header - Uinfo string -} - -func (rr *UINFO) Header() *RR_Header { return &rr.Hdr } -func (rr *UINFO) copy() RR { return &UINFO{*rr.Hdr.copyHeader(), rr.Uinfo} } -func (rr *UINFO) String() string { return rr.Hdr.String() + sprintTxt([]string{rr.Uinfo}) } -func (rr *UINFO) len() int { return rr.Hdr.len() + len(rr.Uinfo) + 1 } - -type EID struct { - Hdr RR_Header - Endpoint string `dns:"hex"` -} - -func (rr *EID) Header() *RR_Header { return &rr.Hdr } -func (rr *EID) copy() RR { return &EID{*rr.Hdr.copyHeader(), rr.Endpoint} } -func (rr *EID) String() string { return rr.Hdr.String() + strings.ToUpper(rr.Endpoint) } -func (rr *EID) len() int { return rr.Hdr.len() + len(rr.Endpoint)/2 } - -type NIMLOC struct { - Hdr RR_Header - Locator string `dns:"hex"` -} - -func (rr *NIMLOC) Header() *RR_Header { return &rr.Hdr } -func (rr *NIMLOC) copy() RR { return &NIMLOC{*rr.Hdr.copyHeader(), rr.Locator} } -func (rr *NIMLOC) String() string { return rr.Hdr.String() + strings.ToUpper(rr.Locator) } -func (rr *NIMLOC) len() int { return rr.Hdr.len() + len(rr.Locator)/2 } - -type OPENPGPKEY struct { - Hdr RR_Header - PublicKey string `dns:"base64"` -} - -func (rr *OPENPGPKEY) Header() *RR_Header { return &rr.Hdr } -func (rr *OPENPGPKEY) copy() RR { return &OPENPGPKEY{*rr.Hdr.copyHeader(), rr.PublicKey} } -func (rr *OPENPGPKEY) String() string { return rr.Hdr.String() + rr.PublicKey } -func (rr *OPENPGPKEY) len() int { - return rr.Hdr.len() + base64.StdEncoding.DecodedLen(len(rr.PublicKey)) -} - -// TimeToString translates the RRSIG's incep. and expir. times to the -// string representation used when printing the record. -// It takes serial arithmetic (RFC 1982) into account. -func TimeToString(t uint32) string { - mod := ((int64(t) - time.Now().Unix()) / year68) - 1 - if mod < 0 { - mod = 0 - } - ti := time.Unix(int64(t)-(mod*year68), 0).UTC() - return ti.Format("20060102150405") -} - -// StringToTime translates the RRSIG's incep. and expir. times from -// string values like "20110403154150" to an 32 bit integer. -// It takes serial arithmetic (RFC 1982) into account. -func StringToTime(s string) (uint32, error) { - t, e := time.Parse("20060102150405", s) - if e != nil { - return 0, e - } - mod := (t.Unix() / year68) - 1 - if mod < 0 { - mod = 0 - } - return uint32(t.Unix() - (mod * year68)), nil -} - -// saltToString converts a NSECX salt to uppercase and -// returns "-" when it is empty -func saltToString(s string) string { - if len(s) == 0 { - return "-" - } - return strings.ToUpper(s) -} - -func euiToString(eui uint64, bits int) (hex string) { - switch bits { - case 64: - hex = fmt.Sprintf("%16.16x", eui) - hex = hex[0:2] + "-" + hex[2:4] + "-" + hex[4:6] + "-" + hex[6:8] + - "-" + hex[8:10] + "-" + hex[10:12] + "-" + hex[12:14] + "-" + hex[14:16] - case 48: - hex = fmt.Sprintf("%12.12x", eui) - hex = hex[0:2] + "-" + hex[2:4] + "-" + hex[4:6] + "-" + hex[6:8] + - "-" + hex[8:10] + "-" + hex[10:12] - } - return -} - -// copyIP returns a copy of ip. -func copyIP(ip net.IP) net.IP { - p := make(net.IP, len(ip)) - copy(p, ip) - return p -} - -// Map of constructors for each RR type. -var typeToRR = map[uint16]func() RR{ - TypeA: func() RR { return new(A) }, - TypeAAAA: func() RR { return new(AAAA) }, - TypeAFSDB: func() RR { return new(AFSDB) }, - TypeCAA: func() RR { return new(CAA) }, - TypeCDS: func() RR { return new(CDS) }, - TypeCERT: func() RR { return new(CERT) }, - TypeCNAME: func() RR { return new(CNAME) }, - TypeDHCID: func() RR { return new(DHCID) }, - TypeDLV: func() RR { return new(DLV) }, - TypeDNAME: func() RR { return new(DNAME) }, - TypeKEY: func() RR { return new(KEY) }, - TypeDNSKEY: func() RR { return new(DNSKEY) }, - TypeDS: func() RR { return new(DS) }, - TypeEUI48: func() RR { return new(EUI48) }, - TypeEUI64: func() RR { return new(EUI64) }, - TypeGID: func() RR { return new(GID) }, - TypeGPOS: func() RR { return new(GPOS) }, - TypeEID: func() RR { return new(EID) }, - TypeHINFO: func() RR { return new(HINFO) }, - TypeHIP: func() RR { return new(HIP) }, - TypeIPSECKEY: func() RR { return new(IPSECKEY) }, - TypeKX: func() RR { return new(KX) }, - TypeL32: func() RR { return new(L32) }, - TypeL64: func() RR { return new(L64) }, - TypeLOC: func() RR { return new(LOC) }, - TypeLP: func() RR { return new(LP) }, - TypeMB: func() RR { return new(MB) }, - TypeMD: func() RR { return new(MD) }, - TypeMF: func() RR { return new(MF) }, - TypeMG: func() RR { return new(MG) }, - TypeMINFO: func() RR { return new(MINFO) }, - TypeMR: func() RR { return new(MR) }, - TypeMX: func() RR { return new(MX) }, - TypeNAPTR: func() RR { return new(NAPTR) }, - TypeNID: func() RR { return new(NID) }, - TypeNINFO: func() RR { return new(NINFO) }, - TypeNIMLOC: func() RR { return new(NIMLOC) }, - TypeNS: func() RR { return new(NS) }, - TypeNSAP: func() RR { return new(NSAP) }, - TypeNSAPPTR: func() RR { return new(NSAPPTR) }, - TypeNSEC3: func() RR { return new(NSEC3) }, - TypeNSEC3PARAM: func() RR { return new(NSEC3PARAM) }, - TypeNSEC: func() RR { return new(NSEC) }, - TypeOPENPGPKEY: func() RR { return new(OPENPGPKEY) }, - TypeOPT: func() RR { return new(OPT) }, - TypePTR: func() RR { return new(PTR) }, - TypeRKEY: func() RR { return new(RKEY) }, - TypeRP: func() RR { return new(RP) }, - TypePX: func() RR { return new(PX) }, - TypeSIG: func() RR { return new(SIG) }, - TypeRRSIG: func() RR { return new(RRSIG) }, - TypeRT: func() RR { return new(RT) }, - TypeSOA: func() RR { return new(SOA) }, - TypeSPF: func() RR { return new(SPF) }, - TypeSRV: func() RR { return new(SRV) }, - TypeSSHFP: func() RR { return new(SSHFP) }, - TypeTA: func() RR { return new(TA) }, - TypeTALINK: func() RR { return new(TALINK) }, - TypeTKEY: func() RR { return new(TKEY) }, - TypeTLSA: func() RR { return new(TLSA) }, - TypeTSIG: func() RR { return new(TSIG) }, - TypeTXT: func() RR { return new(TXT) }, - TypeUID: func() RR { return new(UID) }, - TypeUINFO: func() RR { return new(UINFO) }, - TypeURI: func() RR { return new(URI) }, - TypeWKS: func() RR { return new(WKS) }, - TypeX25: func() RR { return new(X25) }, -} diff --git a/Godeps/_workspace/src/github.com/miekg/dns/types_test.go b/Godeps/_workspace/src/github.com/miekg/dns/types_test.go deleted file mode 100644 index 118612946b..0000000000 --- a/Godeps/_workspace/src/github.com/miekg/dns/types_test.go +++ /dev/null @@ -1,42 +0,0 @@ -package dns - -import ( - "testing" -) - -func TestCmToM(t *testing.T) { - s := cmToM(0, 0) - if s != "0.00" { - t.Error("0, 0") - } - - s = cmToM(1, 0) - if s != "0.01" { - t.Error("1, 0") - } - - s = cmToM(3, 1) - if s != "0.30" { - t.Error("3, 1") - } - - s = cmToM(4, 2) - if s != "4" { - t.Error("4, 2") - } - - s = cmToM(5, 3) - if s != "50" { - t.Error("5, 3") - } - - s = cmToM(7, 5) - if s != "7000" { - t.Error("7, 5") - } - - s = cmToM(9, 9) - if s != "90000000" { - t.Error("9, 9") - } -} diff --git a/Godeps/_workspace/src/github.com/miekg/dns/udp.go b/Godeps/_workspace/src/github.com/miekg/dns/udp.go deleted file mode 100644 index 5430e84ecb..0000000000 --- a/Godeps/_workspace/src/github.com/miekg/dns/udp.go +++ /dev/null @@ -1,55 +0,0 @@ -// +build !windows - -package dns - -import ( - "net" - "syscall" -) - -type SessionUDP struct { - raddr *net.UDPAddr - context []byte -} - -func (s *SessionUDP) RemoteAddr() net.Addr { return s.raddr } - -// setUDPSocketOptions sets the UDP socket options. -// This function is implemented on a per platform basis. See udp_*.go for more details -func setUDPSocketOptions(conn *net.UDPConn) error { - sa, err := getUDPSocketName(conn) - if err != nil { - return err - } - switch sa.(type) { - case *syscall.SockaddrInet6: - v6only, err := getUDPSocketOptions6Only(conn) - if err != nil { - return err - } - setUDPSocketOptions6(conn) - if !v6only { - setUDPSocketOptions4(conn) - } - case *syscall.SockaddrInet4: - setUDPSocketOptions4(conn) - } - return nil -} - -// ReadFromSessionUDP acts just like net.UDPConn.ReadFrom(), but returns a session object instead of a -// net.UDPAddr. -func ReadFromSessionUDP(conn *net.UDPConn, b []byte) (int, *SessionUDP, error) { - oob := make([]byte, 40) - n, oobn, _, raddr, err := conn.ReadMsgUDP(b, oob) - if err != nil { - return n, nil, err - } - return n, &SessionUDP{raddr, oob[:oobn]}, err -} - -// WriteToSessionUDP acts just like net.UDPConn.WritetTo(), but uses a *SessionUDP instead of a net.Addr. -func WriteToSessionUDP(conn *net.UDPConn, b []byte, session *SessionUDP) (int, error) { - n, _, err := conn.WriteMsgUDP(b, session.context, session.raddr) - return n, err -} diff --git a/Godeps/_workspace/src/github.com/miekg/dns/udp_linux.go b/Godeps/_workspace/src/github.com/miekg/dns/udp_linux.go deleted file mode 100644 index 7a107857e1..0000000000 --- a/Godeps/_workspace/src/github.com/miekg/dns/udp_linux.go +++ /dev/null @@ -1,63 +0,0 @@ -// +build linux - -package dns - -// See: -// * http://stackoverflow.com/questions/3062205/setting-the-source-ip-for-a-udp-socket and -// * http://blog.powerdns.com/2012/10/08/on-binding-datagram-udp-sockets-to-the-any-addresses/ -// -// Why do we need this: When listening on 0.0.0.0 with UDP so kernel decides what is the outgoing -// interface, this might not always be the correct one. This code will make sure the egress -// packet's interface matched the ingress' one. - -import ( - "net" - "syscall" -) - -// setUDPSocketOptions4 prepares the v4 socket for sessions. -func setUDPSocketOptions4(conn *net.UDPConn) error { - file, err := conn.File() - if err != nil { - return err - } - if err := syscall.SetsockoptInt(int(file.Fd()), syscall.IPPROTO_IP, syscall.IP_PKTINFO, 1); err != nil { - return err - } - return nil -} - -// setUDPSocketOptions6 prepares the v6 socket for sessions. -func setUDPSocketOptions6(conn *net.UDPConn) error { - file, err := conn.File() - if err != nil { - return err - } - if err := syscall.SetsockoptInt(int(file.Fd()), syscall.IPPROTO_IPV6, syscall.IPV6_RECVPKTINFO, 1); err != nil { - return err - } - return nil -} - -// getUDPSocketOption6Only return true if the socket is v6 only and false when it is v4/v6 combined -// (dualstack). -func getUDPSocketOptions6Only(conn *net.UDPConn) (bool, error) { - file, err := conn.File() - if err != nil { - return false, err - } - // dual stack. See http://stackoverflow.com/questions/1618240/how-to-support-both-ipv4-and-ipv6-connections - v6only, err := syscall.GetsockoptInt(int(file.Fd()), syscall.IPPROTO_IPV6, syscall.IPV6_V6ONLY) - if err != nil { - return false, err - } - return v6only == 1, nil -} - -func getUDPSocketName(conn *net.UDPConn) (syscall.Sockaddr, error) { - file, err := conn.File() - if err != nil { - return nil, err - } - return syscall.Getsockname(int(file.Fd())) -} diff --git a/Godeps/_workspace/src/github.com/miekg/dns/udp_other.go b/Godeps/_workspace/src/github.com/miekg/dns/udp_other.go deleted file mode 100644 index c38dd3e7f0..0000000000 --- a/Godeps/_workspace/src/github.com/miekg/dns/udp_other.go +++ /dev/null @@ -1,17 +0,0 @@ -// +build !linux - -package dns - -import ( - "net" - "syscall" -) - -// These do nothing. See udp_linux.go for an example of how to implement this. - -// We tried to adhire to some kind of naming scheme. - -func setUDPSocketOptions4(conn *net.UDPConn) error { return nil } -func setUDPSocketOptions6(conn *net.UDPConn) error { return nil } -func getUDPSocketOptions6Only(conn *net.UDPConn) (bool, error) { return false, nil } -func getUDPSocketName(conn *net.UDPConn) (syscall.Sockaddr, error) { return nil, nil } diff --git a/Godeps/_workspace/src/github.com/miekg/dns/udp_windows.go b/Godeps/_workspace/src/github.com/miekg/dns/udp_windows.go deleted file mode 100644 index 2ce4b33002..0000000000 --- a/Godeps/_workspace/src/github.com/miekg/dns/udp_windows.go +++ /dev/null @@ -1,34 +0,0 @@ -// +build windows - -package dns - -import "net" - -type SessionUDP struct { - raddr *net.UDPAddr -} - -// ReadFromSessionUDP acts just like net.UDPConn.ReadFrom(), but returns a session object instead of a -// net.UDPAddr. -func ReadFromSessionUDP(conn *net.UDPConn, b []byte) (int, *SessionUDP, error) { - n, raddr, err := conn.ReadFrom(b) - if err != nil { - return n, nil, err - } - session := &SessionUDP{raddr.(*net.UDPAddr)} - return n, session, err -} - -// WriteToSessionUDP acts just like net.UDPConn.WritetTo(), but uses a *SessionUDP instead of a net.Addr. -func WriteToSessionUDP(conn *net.UDPConn, b []byte, session *SessionUDP) (int, error) { - n, err := conn.WriteTo(b, session.raddr) - return n, err -} - -func (s *SessionUDP) RemoteAddr() net.Addr { return s.raddr } - -// setUDPSocketOptions sets the UDP socket options. -// This function is implemented on a per platform basis. See udp_*.go for more details -func setUDPSocketOptions(conn *net.UDPConn) error { - return nil -} diff --git a/Godeps/_workspace/src/github.com/miekg/dns/update.go b/Godeps/_workspace/src/github.com/miekg/dns/update.go deleted file mode 100644 index 3539987ccb..0000000000 --- a/Godeps/_workspace/src/github.com/miekg/dns/update.go +++ /dev/null @@ -1,94 +0,0 @@ -package dns - -// NameUsed sets the RRs in the prereq section to -// "Name is in use" RRs. RFC 2136 section 2.4.4. -func (u *Msg) NameUsed(rr []RR) { - u.Answer = make([]RR, len(rr)) - for i, r := range rr { - u.Answer[i] = &ANY{Hdr: RR_Header{Name: r.Header().Name, Ttl: 0, Rrtype: TypeANY, Class: ClassANY}} - } -} - -// NameNotUsed sets the RRs in the prereq section to -// "Name is in not use" RRs. RFC 2136 section 2.4.5. -func (u *Msg) NameNotUsed(rr []RR) { - u.Answer = make([]RR, len(rr)) - for i, r := range rr { - u.Answer[i] = &ANY{Hdr: RR_Header{Name: r.Header().Name, Ttl: 0, Rrtype: TypeANY, Class: ClassNONE}} - } -} - -// Used sets the RRs in the prereq section to -// "RRset exists (value dependent -- with rdata)" RRs. RFC 2136 section 2.4.2. -func (u *Msg) Used(rr []RR) { - if len(u.Question) == 0 { - panic("dns: empty question section") - } - u.Answer = make([]RR, len(rr)) - for i, r := range rr { - u.Answer[i] = r - u.Answer[i].Header().Class = u.Question[0].Qclass - } -} - -// RRsetUsed sets the RRs in the prereq section to -// "RRset exists (value independent -- no rdata)" RRs. RFC 2136 section 2.4.1. -func (u *Msg) RRsetUsed(rr []RR) { - u.Answer = make([]RR, len(rr)) - for i, r := range rr { - u.Answer[i] = r - u.Answer[i].Header().Class = ClassANY - u.Answer[i].Header().Ttl = 0 - u.Answer[i].Header().Rdlength = 0 - } -} - -// RRsetNotUsed sets the RRs in the prereq section to -// "RRset does not exist" RRs. RFC 2136 section 2.4.3. -func (u *Msg) RRsetNotUsed(rr []RR) { - u.Answer = make([]RR, len(rr)) - for i, r := range rr { - u.Answer[i] = r - u.Answer[i].Header().Class = ClassNONE - u.Answer[i].Header().Rdlength = 0 - u.Answer[i].Header().Ttl = 0 - } -} - -// Insert creates a dynamic update packet that adds an complete RRset, see RFC 2136 section 2.5.1. -func (u *Msg) Insert(rr []RR) { - if len(u.Question) == 0 { - panic("dns: empty question section") - } - u.Ns = make([]RR, len(rr)) - for i, r := range rr { - u.Ns[i] = r - u.Ns[i].Header().Class = u.Question[0].Qclass - } -} - -// RemoveRRset creates a dynamic update packet that deletes an RRset, see RFC 2136 section 2.5.2. -func (u *Msg) RemoveRRset(rr []RR) { - u.Ns = make([]RR, len(rr)) - for i, r := range rr { - u.Ns[i] = &ANY{Hdr: RR_Header{Name: r.Header().Name, Ttl: 0, Rrtype: r.Header().Rrtype, Class: ClassANY}} - } -} - -// RemoveName creates a dynamic update packet that deletes all RRsets of a name, see RFC 2136 section 2.5.3 -func (u *Msg) RemoveName(rr []RR) { - u.Ns = make([]RR, len(rr)) - for i, r := range rr { - u.Ns[i] = &ANY{Hdr: RR_Header{Name: r.Header().Name, Ttl: 0, Rrtype: TypeANY, Class: ClassANY}} - } -} - -// Remove creates a dynamic update packet deletes RR from the RRSset, see RFC 2136 section 2.5.4 -func (u *Msg) Remove(rr []RR) { - u.Ns = make([]RR, len(rr)) - for i, r := range rr { - u.Ns[i] = r - u.Ns[i].Header().Class = ClassNONE - u.Ns[i].Header().Ttl = 0 - } -} diff --git a/Godeps/_workspace/src/github.com/miekg/dns/update_test.go b/Godeps/_workspace/src/github.com/miekg/dns/update_test.go deleted file mode 100644 index c5767b725c..0000000000 --- a/Godeps/_workspace/src/github.com/miekg/dns/update_test.go +++ /dev/null @@ -1,84 +0,0 @@ -package dns - -import ( - "bytes" - "testing" -) - -func TestDynamicUpdateParsing(t *testing.T) { - prefix := "example.com. IN " - for _, typ := range TypeToString { - if typ == "OPT" || typ == "AXFR" || typ == "IXFR" || typ == "ANY" || typ == "TKEY" || - typ == "TSIG" || typ == "ISDN" || typ == "UNSPEC" || typ == "NULL" || typ == "ATMA" { - continue - } - r, err := NewRR(prefix + typ) - if err != nil { - t.Errorf("failure to parse: %s %s: %v", prefix, typ, err) - } else { - t.Logf("parsed: %s", r.String()) - } - } -} - -func TestDynamicUpdateUnpack(t *testing.T) { - // From https://github.com/miekg/dns/issues/150#issuecomment-62296803 - // It should be an update message for the zone "example.", - // deleting the A RRset "example." and then adding an A record at "example.". - // class ANY, TYPE A - buf := []byte{171, 68, 40, 0, 0, 1, 0, 0, 0, 2, 0, 0, 7, 101, 120, 97, 109, 112, 108, 101, 0, 0, 6, 0, 1, 192, 12, 0, 1, 0, 255, 0, 0, 0, 0, 0, 0, 192, 12, 0, 1, 0, 1, 0, 0, 0, 0, 0, 4, 127, 0, 0, 1} - msg := new(Msg) - err := msg.Unpack(buf) - if err != nil { - t.Errorf("failed to unpack: %v\n%s", err, msg.String()) - } -} - -func TestDynamicUpdateZeroRdataUnpack(t *testing.T) { - m := new(Msg) - rr := &RR_Header{Name: ".", Rrtype: 0, Class: 1, Ttl: ^uint32(0), Rdlength: 0} - m.Answer = []RR{rr, rr, rr, rr, rr} - m.Ns = m.Answer - for n, s := range TypeToString { - rr.Rrtype = n - bytes, err := m.Pack() - if err != nil { - t.Errorf("failed to pack %s: %v", s, err) - continue - } - if err := new(Msg).Unpack(bytes); err != nil { - t.Errorf("failed to unpack %s: %v", s, err) - } - } -} - -func TestRemoveRRset(t *testing.T) { - // Should add a zero data RR in Class ANY with a TTL of 0 - // for each set mentioned in the RRs provided to it. - rr, err := NewRR(". 100 IN A 127.0.0.1") - if err != nil { - t.Fatalf("Error constructing RR: %v", err) - } - m := new(Msg) - m.Ns = []RR{&RR_Header{Name: ".", Rrtype: TypeA, Class: ClassANY, Ttl: 0, Rdlength: 0}} - expectstr := m.String() - expect, err := m.Pack() - if err != nil { - t.Fatalf("Error packing expected msg: %v", err) - } - - m.Ns = nil - m.RemoveRRset([]RR{rr}) - actual, err := m.Pack() - if err != nil { - t.Fatalf("Error packing actual msg: %v", err) - } - if !bytes.Equal(actual, expect) { - tmp := new(Msg) - if err := tmp.Unpack(actual); err != nil { - t.Fatalf("Error unpacking actual msg: %v", err) - } - t.Errorf("Expected msg:\n%s", expectstr) - t.Errorf("Actual msg:\n%v", tmp) - } -} diff --git a/Godeps/_workspace/src/github.com/miekg/dns/xfr.go b/Godeps/_workspace/src/github.com/miekg/dns/xfr.go deleted file mode 100644 index c098925d25..0000000000 --- a/Godeps/_workspace/src/github.com/miekg/dns/xfr.go +++ /dev/null @@ -1,233 +0,0 @@ -package dns - -import ( - "time" -) - -// Envelope is used when doing a zone transfer with a remote server. -type Envelope struct { - RR []RR // The set of RRs in the answer section of the xfr reply message. - Error error // If something went wrong, this contains the error. -} - -// A Transfer defines parameters that are used during a zone transfer. -type Transfer struct { - *Conn - DialTimeout time.Duration // net.DialTimeout, defaults to 2 seconds - ReadTimeout time.Duration // net.Conn.SetReadTimeout value for connections, defaults to 2 seconds - WriteTimeout time.Duration // net.Conn.SetWriteTimeout value for connections, defaults to 2 seconds - TsigSecret map[string]string // Secret(s) for Tsig map[], zonename must be fully qualified - tsigTimersOnly bool -} - -// Think we need to away to stop the transfer - -// In performs an incoming transfer with the server in a. -func (t *Transfer) In(q *Msg, a string) (env chan *Envelope, err error) { - timeout := dnsTimeout - if t.DialTimeout != 0 { - timeout = t.DialTimeout - } - t.Conn, err = DialTimeout("tcp", a, timeout) - if err != nil { - return nil, err - } - if err := t.WriteMsg(q); err != nil { - return nil, err - } - env = make(chan *Envelope) - go func() { - if q.Question[0].Qtype == TypeAXFR { - go t.inAxfr(q.Id, env) - return - } - if q.Question[0].Qtype == TypeIXFR { - go t.inIxfr(q.Id, env) - return - } - }() - return env, nil -} - -func (t *Transfer) inAxfr(id uint16, c chan *Envelope) { - first := true - defer t.Close() - defer close(c) - timeout := dnsTimeout - if t.ReadTimeout != 0 { - timeout = t.ReadTimeout - } - for { - t.Conn.SetReadDeadline(time.Now().Add(timeout)) - in, err := t.ReadMsg() - if err != nil { - c <- &Envelope{nil, err} - return - } - if id != in.Id { - c <- &Envelope{in.Answer, ErrId} - return - } - if first { - if !isSOAFirst(in) { - c <- &Envelope{in.Answer, ErrSoa} - return - } - first = !first - // only one answer that is SOA, receive more - if len(in.Answer) == 1 { - t.tsigTimersOnly = true - c <- &Envelope{in.Answer, nil} - continue - } - } - - if !first { - t.tsigTimersOnly = true // Subsequent envelopes use this. - if isSOALast(in) { - c <- &Envelope{in.Answer, nil} - return - } - c <- &Envelope{in.Answer, nil} - } - } - panic("dns: not reached") -} - -func (t *Transfer) inIxfr(id uint16, c chan *Envelope) { - serial := uint32(0) // The first serial seen is the current server serial - first := true - defer t.Close() - defer close(c) - timeout := dnsTimeout - if t.ReadTimeout != 0 { - timeout = t.ReadTimeout - } - for { - t.SetReadDeadline(time.Now().Add(timeout)) - in, err := t.ReadMsg() - if err != nil { - c <- &Envelope{nil, err} - return - } - if id != in.Id { - c <- &Envelope{in.Answer, ErrId} - return - } - if first { - // A single SOA RR signals "no changes" - if len(in.Answer) == 1 && isSOAFirst(in) { - c <- &Envelope{in.Answer, nil} - return - } - - // Check if the returned answer is ok - if !isSOAFirst(in) { - c <- &Envelope{in.Answer, ErrSoa} - return - } - // This serial is important - serial = in.Answer[0].(*SOA).Serial - first = !first - } - - // Now we need to check each message for SOA records, to see what we need to do - if !first { - t.tsigTimersOnly = true - // If the last record in the IXFR contains the servers' SOA, we should quit - if v, ok := in.Answer[len(in.Answer)-1].(*SOA); ok { - if v.Serial == serial { - c <- &Envelope{in.Answer, nil} - return - } - } - c <- &Envelope{in.Answer, nil} - } - } -} - -// Out performs an outgoing transfer with the client connecting in w. -// Basic use pattern: -// -// ch := make(chan *dns.Envelope) -// tr := new(dns.Transfer) -// tr.Out(w, r, ch) -// c <- &dns.Envelope{RR: []dns.RR{soa, rr1, rr2, rr3, soa}} -// close(ch) -// w.Hijack() -// // w.Close() // Client closes connection -// -// The server is responsible for sending the correct sequence of RRs through the -// channel ch. -func (t *Transfer) Out(w ResponseWriter, q *Msg, ch chan *Envelope) error { - for x := range ch { - r := new(Msg) - // Compress? - r.SetReply(q) - r.Authoritative = true - // assume it fits TODO(miek): fix - r.Answer = append(r.Answer, x.RR...) - if err := w.WriteMsg(r); err != nil { - return err - } - } - w.TsigTimersOnly(true) - return nil -} - -// ReadMsg reads a message from the transfer connection t. -func (t *Transfer) ReadMsg() (*Msg, error) { - m := new(Msg) - p := make([]byte, MaxMsgSize) - n, err := t.Read(p) - if err != nil && n == 0 { - return nil, err - } - p = p[:n] - if err := m.Unpack(p); err != nil { - return nil, err - } - if ts := m.IsTsig(); ts != nil && t.TsigSecret != nil { - if _, ok := t.TsigSecret[ts.Hdr.Name]; !ok { - return m, ErrSecret - } - // Need to work on the original message p, as that was used to calculate the tsig. - err = TsigVerify(p, t.TsigSecret[ts.Hdr.Name], t.tsigRequestMAC, t.tsigTimersOnly) - t.tsigRequestMAC = ts.MAC - } - return m, err -} - -// WriteMsg writes a message through the transfer connection t. -func (t *Transfer) WriteMsg(m *Msg) (err error) { - var out []byte - if ts := m.IsTsig(); ts != nil && t.TsigSecret != nil { - if _, ok := t.TsigSecret[ts.Hdr.Name]; !ok { - return ErrSecret - } - out, t.tsigRequestMAC, err = TsigGenerate(m, t.TsigSecret[ts.Hdr.Name], t.tsigRequestMAC, t.tsigTimersOnly) - } else { - out, err = m.Pack() - } - if err != nil { - return err - } - if _, err = t.Write(out); err != nil { - return err - } - return nil -} - -func isSOAFirst(in *Msg) bool { - if len(in.Answer) > 0 { - return in.Answer[0].Header().Rrtype == TypeSOA - } - return false -} - -func isSOALast(in *Msg) bool { - if len(in.Answer) > 0 { - return in.Answer[len(in.Answer)-1].Header().Rrtype == TypeSOA - } - return false -} diff --git a/Godeps/_workspace/src/github.com/miekg/dns/xfr_test.go b/Godeps/_workspace/src/github.com/miekg/dns/xfr_test.go deleted file mode 100644 index d52d84c6a3..0000000000 --- a/Godeps/_workspace/src/github.com/miekg/dns/xfr_test.go +++ /dev/null @@ -1,92 +0,0 @@ -package dns - -import ( - "net" - "testing" - "time" -) - -func getIP(s string) string { - a, err := net.LookupAddr(s) - if err != nil { - return "" - } - return a[0] -} - -// flaky, need to setup local server and test from -// that. -func testClientAXFR(t *testing.T) { - if testing.Short() { - return - } - m := new(Msg) - m.SetAxfr("miek.nl.") - - server := getIP("linode.atoom.net") - - tr := new(Transfer) - - if a, err := tr.In(m, net.JoinHostPort(server, "53")); err != nil { - t.Fatal("failed to setup axfr: ", err) - } else { - for ex := range a { - if ex.Error != nil { - t.Errorf("error %v", ex.Error) - break - } - for _, rr := range ex.RR { - t.Log(rr.String()) - } - } - } -} - -// fails. -func testClientAXFRMultipleEnvelopes(t *testing.T) { - if testing.Short() { - return - } - m := new(Msg) - m.SetAxfr("nlnetlabs.nl.") - - server := getIP("open.nlnetlabs.nl.") - - tr := new(Transfer) - if a, err := tr.In(m, net.JoinHostPort(server, "53")); err != nil { - t.Fatalf("Failed to setup axfr %v for server: %v", err, server) - } else { - for ex := range a { - if ex.Error != nil { - t.Errorf("Error %v", ex.Error) - break - } - } - } -} - -func testClientTsigAXFR(t *testing.T) { - if testing.Short() { - return - } - m := new(Msg) - m.SetAxfr("example.nl.") - m.SetTsig("axfr.", HmacMD5, 300, time.Now().Unix()) - - tr := new(Transfer) - tr.TsigSecret = map[string]string{"axfr.": "so6ZGir4GPAqINNh9U5c3A=="} - - if a, err := tr.In(m, "176.58.119.54:53"); err != nil { - t.Fatal("failed to setup axfr: ", err) - } else { - for ex := range a { - if ex.Error != nil { - t.Errorf("error %v", ex.Error) - break - } - for _, rr := range ex.RR { - t.Log(rr.String()) - } - } - } -} diff --git a/Godeps/_workspace/src/github.com/miekg/dns/zgenerate.go b/Godeps/_workspace/src/github.com/miekg/dns/zgenerate.go deleted file mode 100644 index ae92531572..0000000000 --- a/Godeps/_workspace/src/github.com/miekg/dns/zgenerate.go +++ /dev/null @@ -1,158 +0,0 @@ -package dns - -import ( - "bytes" - "fmt" - "strconv" - "strings" -) - -// Parse the $GENERATE statement as used in BIND9 zones. -// See http://www.zytrax.com/books/dns/ch8/generate.html for instance. -// We are called after '$GENERATE '. After which we expect: -// * the range (12-24/2) -// * lhs (ownername) -// * [[ttl][class]] -// * type -// * rhs (rdata) -// But we are lazy here, only the range is parsed *all* occurences -// of $ after that are interpreted. -// Any error are returned as a string value, the empty string signals -// "no error". -func generate(l lex, c chan lex, t chan *Token, o string) string { - step := 1 - if i := strings.IndexAny(l.token, "/"); i != -1 { - if i+1 == len(l.token) { - return "bad step in $GENERATE range" - } - if s, e := strconv.Atoi(l.token[i+1:]); e == nil { - if s < 0 { - return "bad step in $GENERATE range" - } - step = s - } else { - return "bad step in $GENERATE range" - } - l.token = l.token[:i] - } - sx := strings.SplitN(l.token, "-", 2) - if len(sx) != 2 { - return "bad start-stop in $GENERATE range" - } - start, err := strconv.Atoi(sx[0]) - if err != nil { - return "bad start in $GENERATE range" - } - end, err := strconv.Atoi(sx[1]) - if err != nil { - return "bad stop in $GENERATE range" - } - if end < 0 || start < 0 || end < start { - return "bad range in $GENERATE range" - } - - <-c // _BLANK - // Create a complete new string, which we then parse again. - s := "" -BuildRR: - l = <-c - if l.value != zNewline && l.value != zEOF { - s += l.token - goto BuildRR - } - for i := start; i <= end; i += step { - var ( - escape bool - dom bytes.Buffer - mod string - err string - offset int - ) - - for j := 0; j < len(s); j++ { // No 'range' because we need to jump around - switch s[j] { - case '\\': - if escape { - dom.WriteByte('\\') - escape = false - continue - } - escape = true - case '$': - mod = "%d" - offset = 0 - if escape { - dom.WriteByte('$') - escape = false - continue - } - escape = false - if j+1 >= len(s) { // End of the string - dom.WriteString(fmt.Sprintf(mod, i+offset)) - continue - } else { - if s[j+1] == '$' { - dom.WriteByte('$') - j++ - continue - } - } - // Search for { and } - if s[j+1] == '{' { // Modifier block - sep := strings.Index(s[j+2:], "}") - if sep == -1 { - return "bad modifier in $GENERATE" - } - mod, offset, err = modToPrintf(s[j+2 : j+2+sep]) - if err != "" { - return err - } - j += 2 + sep // Jump to it - } - dom.WriteString(fmt.Sprintf(mod, i+offset)) - default: - if escape { // Pretty useless here - escape = false - continue - } - dom.WriteByte(s[j]) - } - } - // Re-parse the RR and send it on the current channel t - rx, e := NewRR("$ORIGIN " + o + "\n" + dom.String()) - if e != nil { - return e.(*ParseError).err - } - t <- &Token{RR: rx} - // Its more efficient to first built the rrlist and then parse it in - // one go! But is this a problem? - } - return "" -} - -// Convert a $GENERATE modifier 0,0,d to something Printf can deal with. -func modToPrintf(s string) (string, int, string) { - xs := strings.SplitN(s, ",", 3) - if len(xs) != 3 { - return "", 0, "bad modifier in $GENERATE" - } - // xs[0] is offset, xs[1] is width, xs[2] is base - if xs[2] != "o" && xs[2] != "d" && xs[2] != "x" && xs[2] != "X" { - return "", 0, "bad base in $GENERATE" - } - offset, err := strconv.Atoi(xs[0]) - if err != nil { - return "", 0, "bad offset in $GENERATE" - } - width, err := strconv.Atoi(xs[1]) - if err != nil { - return "", offset, "bad width in $GENERATE" - } - switch { - case width < 0: - return "", offset, "bad width in $GENERATE" - case width == 0: - return "%" + xs[1] + xs[2], offset, "" - } - return "%0" + xs[1] + xs[2], offset, "" -} diff --git a/Godeps/_workspace/src/github.com/miekg/dns/zscan.go b/Godeps/_workspace/src/github.com/miekg/dns/zscan.go deleted file mode 100644 index 06be9cc6d1..0000000000 --- a/Godeps/_workspace/src/github.com/miekg/dns/zscan.go +++ /dev/null @@ -1,964 +0,0 @@ -package dns - -import ( - "io" - "log" - "os" - "strconv" - "strings" -) - -type debugging bool - -const debug debugging = false - -func (d debugging) Printf(format string, args ...interface{}) { - if d { - log.Printf(format, args...) - } -} - -const maxTok = 2048 // Largest token we can return. -const maxUint16 = 1<<16 - 1 - -// Tokinize a RFC 1035 zone file. The tokenizer will normalize it: -// * Add ownernames if they are left blank; -// * Suppress sequences of spaces; -// * Make each RR fit on one line (_NEWLINE is send as last) -// * Handle comments: ; -// * Handle braces - anywhere. -const ( - // Zonefile - zEOF = iota - zString - zBlank - zQuote - zNewline - zRrtpe - zOwner - zClass - zDirOrigin // $ORIGIN - zDirTtl // $TTL - zDirInclude // $INCLUDE - zDirGenerate // $GENERATE - - // Privatekey file - zValue - zKey - - zExpectOwnerDir // Ownername - zExpectOwnerBl // Whitespace after the ownername - zExpectAny // Expect rrtype, ttl or class - zExpectAnyNoClass // Expect rrtype or ttl - zExpectAnyNoClassBl // The whitespace after _EXPECT_ANY_NOCLASS - zExpectAnyNoTtl // Expect rrtype or class - zExpectAnyNoTtlBl // Whitespace after _EXPECT_ANY_NOTTL - zExpectRrtype // Expect rrtype - zExpectRrtypeBl // Whitespace BEFORE rrtype - zExpectRdata // The first element of the rdata - zExpectDirTtlBl // Space after directive $TTL - zExpectDirTtl // Directive $TTL - zExpectDirOriginBl // Space after directive $ORIGIN - zExpectDirOrigin // Directive $ORIGIN - zExpectDirIncludeBl // Space after directive $INCLUDE - zExpectDirInclude // Directive $INCLUDE - zExpectDirGenerate // Directive $GENERATE - zExpectDirGenerateBl // Space after directive $GENERATE -) - -// ParseError is a parsing error. It contains the parse error and the location in the io.Reader -// where the error occured. -type ParseError struct { - file string - err string - lex lex -} - -func (e *ParseError) Error() (s string) { - if e.file != "" { - s = e.file + ": " - } - s += "dns: " + e.err + ": " + strconv.QuoteToASCII(e.lex.token) + " at line: " + - strconv.Itoa(e.lex.line) + ":" + strconv.Itoa(e.lex.column) - return -} - -type lex struct { - token string // text of the token - tokenUpper string // uppercase text of the token - length int // lenght of the token - err bool // when true, token text has lexer error - value uint8 // value: zString, _BLANK, etc. - line int // line in the file - column int // column in the file - torc uint16 // type or class as parsed in the lexer, we only need to look this up in the grammar - comment string // any comment text seen -} - -// Token holds the token that are returned when a zone file is parsed. -type Token struct { - // The scanned resource record when error is not nil. - RR - // When an error occured, this has the error specifics. - Error *ParseError - // A potential comment positioned after the RR and on the same line. - Comment string -} - -// NewRR reads the RR contained in the string s. Only the first RR is -// returned. If s contains no RR, return nil with no error. The class -// defaults to IN and TTL defaults to 3600. The full zone file syntax -// like $TTL, $ORIGIN, etc. is supported. All fields of the returned -// RR are set, except RR.Header().Rdlength which is set to 0. -func NewRR(s string) (RR, error) { - if len(s) > 0 && s[len(s)-1] != '\n' { // We need a closing newline - return ReadRR(strings.NewReader(s+"\n"), "") - } - return ReadRR(strings.NewReader(s), "") -} - -// ReadRR reads the RR contained in q. -// See NewRR for more documentation. -func ReadRR(q io.Reader, filename string) (RR, error) { - r := <-parseZoneHelper(q, ".", filename, 1) - if r == nil { - return nil, nil - } - - if r.Error != nil { - return nil, r.Error - } - return r.RR, nil -} - -// ParseZone reads a RFC 1035 style one from r. It returns *Tokens on the -// returned channel, which consist out the parsed RR, a potential comment or an error. -// If there is an error the RR is nil. The string file is only used -// in error reporting. The string origin is used as the initial origin, as -// if the file would start with: $ORIGIN origin . -// The directives $INCLUDE, $ORIGIN, $TTL and $GENERATE are supported. -// The channel t is closed by ParseZone when the end of r is reached. -// -// Basic usage pattern when reading from a string (z) containing the -// zone data: -// -// for x := range dns.ParseZone(strings.NewReader(z), "", "") { -// if x.Error != nil { -// // Do something with x.RR -// } -// } -// -// Comments specified after an RR (and on the same line!) are returned too: -// -// foo. IN A 10.0.0.1 ; this is a comment -// -// The text "; this is comment" is returned in Token.Comment . Comments inside the -// RR are discarded. Comments on a line by themselves are discarded too. -func ParseZone(r io.Reader, origin, file string) chan *Token { - return parseZoneHelper(r, origin, file, 10000) -} - -func parseZoneHelper(r io.Reader, origin, file string, chansize int) chan *Token { - t := make(chan *Token, chansize) - go parseZone(r, origin, file, t, 0) - return t -} - -func parseZone(r io.Reader, origin, f string, t chan *Token, include int) { - defer func() { - if include == 0 { - close(t) - } - }() - s := scanInit(r) - c := make(chan lex) - // Start the lexer - go zlexer(s, c) - // 6 possible beginnings of a line, _ is a space - // 0. zRRTYPE -> all omitted until the rrtype - // 1. zOwner _ zRrtype -> class/ttl omitted - // 2. zOwner _ zString _ zRrtype -> class omitted - // 3. zOwner _ zString _ zClass _ zRrtype -> ttl/class - // 4. zOwner _ zClass _ zRrtype -> ttl omitted - // 5. zOwner _ zClass _ zString _ zRrtype -> class/ttl (reversed) - // After detecting these, we know the zRrtype so we can jump to functions - // handling the rdata for each of these types. - - if origin == "" { - origin = "." - } - origin = Fqdn(origin) - if _, ok := IsDomainName(origin); !ok { - t <- &Token{Error: &ParseError{f, "bad initial origin name", lex{}}} - return - } - - st := zExpectOwnerDir // initial state - var h RR_Header - var defttl uint32 = defaultTtl - var prevName string - for l := range c { - // Lexer spotted an error already - if l.err == true { - t <- &Token{Error: &ParseError{f, l.token, l}} - return - - } - switch st { - case zExpectOwnerDir: - // We can also expect a directive, like $TTL or $ORIGIN - h.Ttl = defttl - h.Class = ClassINET - switch l.value { - case zNewline: - st = zExpectOwnerDir - case zOwner: - h.Name = l.token - if l.token[0] == '@' { - h.Name = origin - prevName = h.Name - st = zExpectOwnerBl - break - } - if h.Name[l.length-1] != '.' { - h.Name = appendOrigin(h.Name, origin) - } - _, ok := IsDomainName(l.token) - if !ok { - t <- &Token{Error: &ParseError{f, "bad owner name", l}} - return - } - prevName = h.Name - st = zExpectOwnerBl - case zDirTtl: - st = zExpectDirTtlBl - case zDirOrigin: - st = zExpectDirOriginBl - case zDirInclude: - st = zExpectDirIncludeBl - case zDirGenerate: - st = zExpectDirGenerateBl - case zRrtpe: - h.Name = prevName - h.Rrtype = l.torc - st = zExpectRdata - case zClass: - h.Name = prevName - h.Class = l.torc - st = zExpectAnyNoClassBl - case zBlank: - // Discard, can happen when there is nothing on the - // line except the RR type - case zString: - ttl, ok := stringToTtl(l.token) - if !ok { - t <- &Token{Error: &ParseError{f, "not a TTL", l}} - return - } - h.Ttl = ttl - // Don't about the defttl, we should take the $TTL value - // defttl = ttl - st = zExpectAnyNoTtlBl - - default: - t <- &Token{Error: &ParseError{f, "syntax error at beginning", l}} - return - } - case zExpectDirIncludeBl: - if l.value != zBlank { - t <- &Token{Error: &ParseError{f, "no blank after $INCLUDE-directive", l}} - return - } - st = zExpectDirInclude - case zExpectDirInclude: - if l.value != zString { - t <- &Token{Error: &ParseError{f, "expecting $INCLUDE value, not this...", l}} - return - } - neworigin := origin // There may be optionally a new origin set after the filename, if not use current one - l := <-c - switch l.value { - case zBlank: - l := <-c - if l.value == zString { - if _, ok := IsDomainName(l.token); !ok { - t <- &Token{Error: &ParseError{f, "bad origin name", l}} - return - } - // a new origin is specified. - if l.token[l.length-1] != '.' { - if origin != "." { // Prevent .. endings - neworigin = l.token + "." + origin - } else { - neworigin = l.token + origin - } - } else { - neworigin = l.token - } - } - case zNewline, zEOF: - // Ok - default: - t <- &Token{Error: &ParseError{f, "garbage after $INCLUDE", l}} - return - } - // Start with the new file - r1, e1 := os.Open(l.token) - if e1 != nil { - t <- &Token{Error: &ParseError{f, "failed to open `" + l.token + "'", l}} - return - } - if include+1 > 7 { - t <- &Token{Error: &ParseError{f, "too deeply nested $INCLUDE", l}} - return - } - parseZone(r1, l.token, neworigin, t, include+1) - st = zExpectOwnerDir - case zExpectDirTtlBl: - if l.value != zBlank { - t <- &Token{Error: &ParseError{f, "no blank after $TTL-directive", l}} - return - } - st = zExpectDirTtl - case zExpectDirTtl: - if l.value != zString { - t <- &Token{Error: &ParseError{f, "expecting $TTL value, not this...", l}} - return - } - if e, _ := slurpRemainder(c, f); e != nil { - t <- &Token{Error: e} - return - } - ttl, ok := stringToTtl(l.token) - if !ok { - t <- &Token{Error: &ParseError{f, "expecting $TTL value, not this...", l}} - return - } - defttl = ttl - st = zExpectOwnerDir - case zExpectDirOriginBl: - if l.value != zBlank { - t <- &Token{Error: &ParseError{f, "no blank after $ORIGIN-directive", l}} - return - } - st = zExpectDirOrigin - case zExpectDirOrigin: - if l.value != zString { - t <- &Token{Error: &ParseError{f, "expecting $ORIGIN value, not this...", l}} - return - } - if e, _ := slurpRemainder(c, f); e != nil { - t <- &Token{Error: e} - } - if _, ok := IsDomainName(l.token); !ok { - t <- &Token{Error: &ParseError{f, "bad origin name", l}} - return - } - if l.token[l.length-1] != '.' { - if origin != "." { // Prevent .. endings - origin = l.token + "." + origin - } else { - origin = l.token + origin - } - } else { - origin = l.token - } - st = zExpectOwnerDir - case zExpectDirGenerateBl: - if l.value != zBlank { - t <- &Token{Error: &ParseError{f, "no blank after $GENERATE-directive", l}} - return - } - st = zExpectDirGenerate - case zExpectDirGenerate: - if l.value != zString { - t <- &Token{Error: &ParseError{f, "expecting $GENERATE value, not this...", l}} - return - } - if e := generate(l, c, t, origin); e != "" { - t <- &Token{Error: &ParseError{f, e, l}} - return - } - st = zExpectOwnerDir - case zExpectOwnerBl: - if l.value != zBlank { - t <- &Token{Error: &ParseError{f, "no blank after owner", l}} - return - } - st = zExpectAny - case zExpectAny: - switch l.value { - case zRrtpe: - h.Rrtype = l.torc - st = zExpectRdata - case zClass: - h.Class = l.torc - st = zExpectAnyNoClassBl - case zString: - ttl, ok := stringToTtl(l.token) - if !ok { - t <- &Token{Error: &ParseError{f, "not a TTL", l}} - return - } - h.Ttl = ttl - // defttl = ttl // don't set the defttl here - st = zExpectAnyNoTtlBl - default: - t <- &Token{Error: &ParseError{f, "expecting RR type, TTL or class, not this...", l}} - return - } - case zExpectAnyNoClassBl: - if l.value != zBlank { - t <- &Token{Error: &ParseError{f, "no blank before class", l}} - return - } - st = zExpectAnyNoClass - case zExpectAnyNoTtlBl: - if l.value != zBlank { - t <- &Token{Error: &ParseError{f, "no blank before TTL", l}} - return - } - st = zExpectAnyNoTtl - case zExpectAnyNoTtl: - switch l.value { - case zClass: - h.Class = l.torc - st = zExpectRrtypeBl - case zRrtpe: - h.Rrtype = l.torc - st = zExpectRdata - default: - t <- &Token{Error: &ParseError{f, "expecting RR type or class, not this...", l}} - return - } - case zExpectAnyNoClass: - switch l.value { - case zString: - ttl, ok := stringToTtl(l.token) - if !ok { - t <- &Token{Error: &ParseError{f, "not a TTL", l}} - return - } - h.Ttl = ttl - // defttl = ttl // don't set the def ttl anymore - st = zExpectRrtypeBl - case zRrtpe: - h.Rrtype = l.torc - st = zExpectRdata - default: - t <- &Token{Error: &ParseError{f, "expecting RR type or TTL, not this...", l}} - return - } - case zExpectRrtypeBl: - if l.value != zBlank { - t <- &Token{Error: &ParseError{f, "no blank before RR type", l}} - return - } - st = zExpectRrtype - case zExpectRrtype: - if l.value != zRrtpe { - t <- &Token{Error: &ParseError{f, "unknown RR type", l}} - return - } - h.Rrtype = l.torc - st = zExpectRdata - case zExpectRdata: - r, e, c1 := setRR(h, c, origin, f) - if e != nil { - // If e.lex is nil than we have encounter a unknown RR type - // in that case we substitute our current lex token - if e.lex.token == "" && e.lex.value == 0 { - e.lex = l // Uh, dirty - } - t <- &Token{Error: e} - return - } - t <- &Token{RR: r, Comment: c1} - st = zExpectOwnerDir - } - } - // If we get here, we and the h.Rrtype is still zero, we haven't parsed anything, this - // is not an error, because an empty zone file is still a zone file. -} - -// zlexer scans the sourcefile and returns tokens on the channel c. -func zlexer(s *scan, c chan lex) { - var l lex - str := make([]byte, maxTok) // Should be enough for any token - stri := 0 // Offset in str (0 means empty) - com := make([]byte, maxTok) // Hold comment text - comi := 0 - quote := false - escape := false - space := false - commt := false - rrtype := false - owner := true - brace := 0 - x, err := s.tokenText() - defer close(c) - for err == nil { - l.column = s.position.Column - l.line = s.position.Line - if stri >= maxTok { - l.token = "token length insufficient for parsing" - l.err = true - debug.Printf("[%+v]", l.token) - c <- l - return - } - if comi >= maxTok { - l.token = "comment length insufficient for parsing" - l.err = true - debug.Printf("[%+v]", l.token) - c <- l - return - } - - switch x { - case ' ', '\t': - if escape { - escape = false - str[stri] = x - stri++ - break - } - if quote { - // Inside quotes this is legal - str[stri] = x - stri++ - break - } - if commt { - com[comi] = x - comi++ - break - } - if stri == 0 { - // Space directly in the beginning, handled in the grammar - } else if owner { - // If we have a string and its the first, make it an owner - l.value = zOwner - l.token = string(str[:stri]) - l.tokenUpper = strings.ToUpper(l.token) - l.length = stri - // escape $... start with a \ not a $, so this will work - switch l.tokenUpper { - case "$TTL": - l.value = zDirTtl - case "$ORIGIN": - l.value = zDirOrigin - case "$INCLUDE": - l.value = zDirInclude - case "$GENERATE": - l.value = zDirGenerate - } - debug.Printf("[7 %+v]", l.token) - c <- l - } else { - l.value = zString - l.token = string(str[:stri]) - l.tokenUpper = strings.ToUpper(l.token) - l.length = stri - if !rrtype { - if t, ok := StringToType[l.tokenUpper]; ok { - l.value = zRrtpe - l.torc = t - rrtype = true - } else { - if strings.HasPrefix(l.tokenUpper, "TYPE") { - t, ok := typeToInt(l.token) - if !ok { - l.token = "unknown RR type" - l.err = true - c <- l - return - } - l.value = zRrtpe - l.torc = t - } - } - if t, ok := StringToClass[l.tokenUpper]; ok { - l.value = zClass - l.torc = t - } else { - if strings.HasPrefix(l.tokenUpper, "CLASS") { - t, ok := classToInt(l.token) - if !ok { - l.token = "unknown class" - l.err = true - c <- l - return - } - l.value = zClass - l.torc = t - } - } - } - debug.Printf("[6 %+v]", l.token) - c <- l - } - stri = 0 - // I reverse space stuff here - if !space && !commt { - l.value = zBlank - l.token = " " - l.length = 1 - debug.Printf("[5 %+v]", l.token) - c <- l - } - owner = false - space = true - case ';': - if escape { - escape = false - str[stri] = x - stri++ - break - } - if quote { - // Inside quotes this is legal - str[stri] = x - stri++ - break - } - if stri > 0 { - l.value = zString - l.token = string(str[:stri]) - l.length = stri - debug.Printf("[4 %+v]", l.token) - c <- l - stri = 0 - } - commt = true - com[comi] = ';' - comi++ - case '\r': - escape = false - if quote { - str[stri] = x - stri++ - break - } - // discard if outside of quotes - case '\n': - escape = false - // Escaped newline - if quote { - str[stri] = x - stri++ - break - } - // inside quotes this is legal - if commt { - // Reset a comment - commt = false - rrtype = false - stri = 0 - // If not in a brace this ends the comment AND the RR - if brace == 0 { - owner = true - owner = true - l.value = zNewline - l.token = "\n" - l.length = 1 - l.comment = string(com[:comi]) - debug.Printf("[3 %+v %+v]", l.token, l.comment) - c <- l - l.comment = "" - comi = 0 - break - } - com[comi] = ' ' // convert newline to space - comi++ - break - } - - if brace == 0 { - // If there is previous text, we should output it here - if stri != 0 { - l.value = zString - l.token = string(str[:stri]) - l.tokenUpper = strings.ToUpper(l.token) - - l.length = stri - if !rrtype { - if t, ok := StringToType[l.tokenUpper]; ok { - l.value = zRrtpe - l.torc = t - rrtype = true - } - } - debug.Printf("[2 %+v]", l.token) - c <- l - } - l.value = zNewline - l.token = "\n" - l.length = 1 - debug.Printf("[1 %+v]", l.token) - c <- l - stri = 0 - commt = false - rrtype = false - owner = true - comi = 0 - } - case '\\': - // comments do not get escaped chars, everything is copied - if commt { - com[comi] = x - comi++ - break - } - // something already escaped must be in string - if escape { - str[stri] = x - stri++ - escape = false - break - } - // something escaped outside of string gets added to string - str[stri] = x - stri++ - escape = true - case '"': - if commt { - com[comi] = x - comi++ - break - } - if escape { - str[stri] = x - stri++ - escape = false - break - } - space = false - // send previous gathered text and the quote - if stri != 0 { - l.value = zString - l.token = string(str[:stri]) - l.length = stri - - debug.Printf("[%+v]", l.token) - c <- l - stri = 0 - } - - // send quote itself as separate token - l.value = zQuote - l.token = "\"" - l.length = 1 - c <- l - quote = !quote - case '(', ')': - if commt { - com[comi] = x - comi++ - break - } - if escape { - str[stri] = x - stri++ - escape = false - break - } - if quote { - str[stri] = x - stri++ - break - } - switch x { - case ')': - brace-- - if brace < 0 { - l.token = "extra closing brace" - l.err = true - debug.Printf("[%+v]", l.token) - c <- l - return - } - case '(': - brace++ - } - default: - escape = false - if commt { - com[comi] = x - comi++ - break - } - str[stri] = x - stri++ - space = false - } - x, err = s.tokenText() - } - if stri > 0 { - // Send remainder - l.token = string(str[:stri]) - l.length = stri - l.value = zString - debug.Printf("[%+v]", l.token) - c <- l - } -} - -// Extract the class number from CLASSxx -func classToInt(token string) (uint16, bool) { - class, ok := strconv.Atoi(token[5:]) - if ok != nil || class > maxUint16 { - return 0, false - } - return uint16(class), true -} - -// Extract the rr number from TYPExxx -func typeToInt(token string) (uint16, bool) { - typ, ok := strconv.Atoi(token[4:]) - if ok != nil || typ > maxUint16 { - return 0, false - } - return uint16(typ), true -} - -// Parse things like 2w, 2m, etc, Return the time in seconds. -func stringToTtl(token string) (uint32, bool) { - s := uint32(0) - i := uint32(0) - for _, c := range token { - switch c { - case 's', 'S': - s += i - i = 0 - case 'm', 'M': - s += i * 60 - i = 0 - case 'h', 'H': - s += i * 60 * 60 - i = 0 - case 'd', 'D': - s += i * 60 * 60 * 24 - i = 0 - case 'w', 'W': - s += i * 60 * 60 * 24 * 7 - i = 0 - case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': - i *= 10 - i += uint32(c) - '0' - default: - return 0, false - } - } - return s + i, true -} - -// Parse LOC records' [.][mM] into a -// mantissa exponent format. Token should contain the entire -// string (i.e. no spaces allowed) -func stringToCm(token string) (e, m uint8, ok bool) { - if token[len(token)-1] == 'M' || token[len(token)-1] == 'm' { - token = token[0 : len(token)-1] - } - s := strings.SplitN(token, ".", 2) - var meters, cmeters, val int - var err error - switch len(s) { - case 2: - if cmeters, err = strconv.Atoi(s[1]); err != nil { - return - } - fallthrough - case 1: - if meters, err = strconv.Atoi(s[0]); err != nil { - return - } - case 0: - // huh? - return 0, 0, false - } - ok = true - if meters > 0 { - e = 2 - val = meters - } else { - e = 0 - val = cmeters - } - for val > 10 { - e++ - val /= 10 - } - if e > 9 { - ok = false - } - m = uint8(val) - return -} - -func appendOrigin(name, origin string) string { - if origin == "." { - return name + origin - } - return name + "." + origin -} - -// LOC record helper function -func locCheckNorth(token string, latitude uint32) (uint32, bool) { - switch token { - case "n", "N": - return LOC_EQUATOR + latitude, true - case "s", "S": - return LOC_EQUATOR - latitude, true - } - return latitude, false -} - -// LOC record helper function -func locCheckEast(token string, longitude uint32) (uint32, bool) { - switch token { - case "e", "E": - return LOC_EQUATOR + longitude, true - case "w", "W": - return LOC_EQUATOR - longitude, true - } - return longitude, false -} - -// "Eat" the rest of the "line". Return potential comments -func slurpRemainder(c chan lex, f string) (*ParseError, string) { - l := <-c - com := "" - switch l.value { - case zBlank: - l = <-c - com = l.comment - if l.value != zNewline && l.value != zEOF { - return &ParseError{f, "garbage after rdata", l}, "" - } - case zNewline: - com = l.comment - case zEOF: - default: - return &ParseError{f, "garbage after rdata", l}, "" - } - return nil, com -} - -// Parse a 64 bit-like ipv6 address: "0014:4fff:ff20:ee64" -// Used for NID and L64 record. -func stringToNodeID(l lex) (uint64, *ParseError) { - if len(l.token) < 19 { - return 0, &ParseError{l.token, "bad NID/L64 NodeID/Locator64", l} - } - // There must be three colons at fixes postitions, if not its a parse error - if l.token[4] != ':' && l.token[9] != ':' && l.token[14] != ':' { - return 0, &ParseError{l.token, "bad NID/L64 NodeID/Locator64", l} - } - s := l.token[0:4] + l.token[5:9] + l.token[10:14] + l.token[15:19] - u, e := strconv.ParseUint(s, 16, 64) - if e != nil { - return 0, &ParseError{l.token, "bad NID/L64 NodeID/Locator64", l} - } - return u, nil -} diff --git a/Godeps/_workspace/src/github.com/miekg/dns/zscan_rr.go b/Godeps/_workspace/src/github.com/miekg/dns/zscan_rr.go deleted file mode 100644 index 3763d3f4cf..0000000000 --- a/Godeps/_workspace/src/github.com/miekg/dns/zscan_rr.go +++ /dev/null @@ -1,2272 +0,0 @@ -package dns - -import ( - "encoding/base64" - "net" - "strconv" - "strings" -) - -type parserFunc struct { - // Func defines the function that parses the tokens and returns the RR - // or an error. The last string contains any comments in the line as - // they returned by the lexer as well. - Func func(h RR_Header, c chan lex, origin string, file string) (RR, *ParseError, string) - // Signals if the RR ending is of variable length, like TXT or records - // that have Hexadecimal or Base64 as their last element in the Rdata. Records - // that have a fixed ending or for instance A, AAAA, SOA and etc. - Variable bool -} - -// Parse the rdata of each rrtype. -// All data from the channel c is either zString or zBlank. -// After the rdata there may come a zBlank and then a zNewline -// or immediately a zNewline. If this is not the case we flag -// an *ParseError: garbage after rdata. -func setRR(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - parserfunc, ok := typeToparserFunc[h.Rrtype] - if ok { - r, e, cm := parserfunc.Func(h, c, o, f) - if parserfunc.Variable { - return r, e, cm - } - if e != nil { - return nil, e, "" - } - e, cm = slurpRemainder(c, f) - if e != nil { - return nil, e, "" - } - return r, nil, cm - } - // RFC3957 RR (Unknown RR handling) - return setRFC3597(h, c, o, f) -} - -// A remainder of the rdata with embedded spaces, return the parsed string (sans the spaces) -// or an error -func endingToString(c chan lex, errstr, f string) (string, *ParseError, string) { - s := "" - l := <-c // zString - for l.value != zNewline && l.value != zEOF { - if l.err { - return s, &ParseError{f, errstr, l}, "" - } - switch l.value { - case zString: - s += l.token - case zBlank: // Ok - default: - return "", &ParseError{f, errstr, l}, "" - } - l = <-c - } - return s, nil, l.comment -} - -// A remainder of the rdata with embedded spaces, return the parsed string slice (sans the spaces) -// or an error -func endingToTxtSlice(c chan lex, errstr, f string) ([]string, *ParseError, string) { - // Get the remaining data until we see a zNewline - quote := false - l := <-c - var s []string - if l.err { - return s, &ParseError{f, errstr, l}, "" - } - switch l.value == zQuote { - case true: // A number of quoted string - s = make([]string, 0) - empty := true - for l.value != zNewline && l.value != zEOF { - if l.err { - return nil, &ParseError{f, errstr, l}, "" - } - switch l.value { - case zString: - empty = false - if len(l.token) > 255 { - // split up tokens that are larger than 255 into 255-chunks - sx := []string{} - p, i := 0, 255 - for { - if i <= len(l.token) { - sx = append(sx, l.token[p:i]) - } else { - sx = append(sx, l.token[p:]) - break - - } - p, i = p+255, i+255 - } - s = append(s, sx...) - break - } - - s = append(s, l.token) - case zBlank: - if quote { - // zBlank can only be seen in between txt parts. - return nil, &ParseError{f, errstr, l}, "" - } - case zQuote: - if empty && quote { - s = append(s, "") - } - quote = !quote - empty = true - default: - return nil, &ParseError{f, errstr, l}, "" - } - l = <-c - } - if quote { - return nil, &ParseError{f, errstr, l}, "" - } - case false: // Unquoted text record - s = make([]string, 1) - for l.value != zNewline && l.value != zEOF { - if l.err { - return s, &ParseError{f, errstr, l}, "" - } - s[0] += l.token - l = <-c - } - } - return s, nil, l.comment -} - -func setA(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - rr := new(A) - rr.Hdr = h - - l := <-c - if l.length == 0 { // Dynamic updates. - return rr, nil, "" - } - rr.A = net.ParseIP(l.token) - if rr.A == nil { - return nil, &ParseError{f, "bad A A", l}, "" - } - return rr, nil, "" -} - -func setAAAA(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - rr := new(AAAA) - rr.Hdr = h - - l := <-c - if l.length == 0 { - return rr, nil, "" - } - rr.AAAA = net.ParseIP(l.token) - if rr.AAAA == nil { - return nil, &ParseError{f, "bad AAAA AAAA", l}, "" - } - return rr, nil, "" -} - -func setNS(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - rr := new(NS) - rr.Hdr = h - - l := <-c - rr.Ns = l.token - if l.length == 0 { - return rr, nil, "" - } - if l.token == "@" { - rr.Ns = o - return rr, nil, "" - } - _, ok := IsDomainName(l.token) - if !ok { - return nil, &ParseError{f, "bad NS Ns", l}, "" - } - if rr.Ns[l.length-1] != '.' { - rr.Ns = appendOrigin(rr.Ns, o) - } - return rr, nil, "" -} - -func setPTR(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - rr := new(PTR) - rr.Hdr = h - - l := <-c - rr.Ptr = l.token - if l.length == 0 { // dynamic update rr. - return rr, nil, "" - } - if l.token == "@" { - rr.Ptr = o - return rr, nil, "" - } - _, ok := IsDomainName(l.token) - if !ok { - return nil, &ParseError{f, "bad PTR Ptr", l}, "" - } - if rr.Ptr[l.length-1] != '.' { - rr.Ptr = appendOrigin(rr.Ptr, o) - } - return rr, nil, "" -} - -func setNSAPPTR(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - rr := new(NSAPPTR) - rr.Hdr = h - - l := <-c - rr.Ptr = l.token - if l.length == 0 { - return rr, nil, "" - } - if l.token == "@" { - rr.Ptr = o - return rr, nil, "" - } - _, ok := IsDomainName(l.token) - if !ok { - return nil, &ParseError{f, "bad NSAP-PTR Ptr", l}, "" - } - if rr.Ptr[l.length-1] != '.' { - rr.Ptr = appendOrigin(rr.Ptr, o) - } - return rr, nil, "" -} - -func setRP(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - rr := new(RP) - rr.Hdr = h - - l := <-c - rr.Mbox = l.token - if l.length == 0 { - return rr, nil, "" - } - if l.token == "@" { - rr.Mbox = o - } else { - _, ok := IsDomainName(l.token) - if !ok { - return nil, &ParseError{f, "bad RP Mbox", l}, "" - } - if rr.Mbox[l.length-1] != '.' { - rr.Mbox = appendOrigin(rr.Mbox, o) - } - } - <-c // zBlank - l = <-c - rr.Txt = l.token - if l.token == "@" { - rr.Txt = o - return rr, nil, "" - } - _, ok := IsDomainName(l.token) - if !ok { - return nil, &ParseError{f, "bad RP Txt", l}, "" - } - if rr.Txt[l.length-1] != '.' { - rr.Txt = appendOrigin(rr.Txt, o) - } - return rr, nil, "" -} - -func setMR(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - rr := new(MR) - rr.Hdr = h - - l := <-c - rr.Mr = l.token - if l.length == 0 { - return rr, nil, "" - } - if l.token == "@" { - rr.Mr = o - return rr, nil, "" - } - _, ok := IsDomainName(l.token) - if !ok { - return nil, &ParseError{f, "bad MR Mr", l}, "" - } - if rr.Mr[l.length-1] != '.' { - rr.Mr = appendOrigin(rr.Mr, o) - } - return rr, nil, "" -} - -func setMB(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - rr := new(MB) - rr.Hdr = h - - l := <-c - rr.Mb = l.token - if l.length == 0 { - return rr, nil, "" - } - if l.token == "@" { - rr.Mb = o - return rr, nil, "" - } - _, ok := IsDomainName(l.token) - if !ok { - return nil, &ParseError{f, "bad MB Mb", l}, "" - } - if rr.Mb[l.length-1] != '.' { - rr.Mb = appendOrigin(rr.Mb, o) - } - return rr, nil, "" -} - -func setMG(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - rr := new(MG) - rr.Hdr = h - - l := <-c - rr.Mg = l.token - if l.length == 0 { - return rr, nil, "" - } - if l.token == "@" { - rr.Mg = o - return rr, nil, "" - } - _, ok := IsDomainName(l.token) - if !ok { - return nil, &ParseError{f, "bad MG Mg", l}, "" - } - if rr.Mg[l.length-1] != '.' { - rr.Mg = appendOrigin(rr.Mg, o) - } - return rr, nil, "" -} - -func setHINFO(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - rr := new(HINFO) - rr.Hdr = h - - chunks, e, c1 := endingToTxtSlice(c, "bad HINFO Fields", f) - if e != nil { - return nil, e, c1 - } - - if ln := len(chunks); ln == 0 { - return rr, nil, "" - } else if ln == 1 { - // Can we split it? - if out := strings.Fields(chunks[0]); len(out) > 1 { - chunks = out - } else { - chunks = append(chunks, "") - } - } - - rr.Cpu = chunks[0] - rr.Os = strings.Join(chunks[1:], " ") - - return rr, nil, "" -} - -func setMINFO(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - rr := new(MINFO) - rr.Hdr = h - - l := <-c - rr.Rmail = l.token - if l.length == 0 { - return rr, nil, "" - } - if l.token == "@" { - rr.Rmail = o - } else { - _, ok := IsDomainName(l.token) - if !ok { - return nil, &ParseError{f, "bad MINFO Rmail", l}, "" - } - if rr.Rmail[l.length-1] != '.' { - rr.Rmail = appendOrigin(rr.Rmail, o) - } - } - <-c // zBlank - l = <-c - rr.Email = l.token - if l.token == "@" { - rr.Email = o - return rr, nil, "" - } - _, ok := IsDomainName(l.token) - if !ok { - return nil, &ParseError{f, "bad MINFO Email", l}, "" - } - if rr.Email[l.length-1] != '.' { - rr.Email = appendOrigin(rr.Email, o) - } - return rr, nil, "" -} - -func setMF(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - rr := new(MF) - rr.Hdr = h - - l := <-c - rr.Mf = l.token - if l.length == 0 { - return rr, nil, "" - } - if l.token == "@" { - rr.Mf = o - return rr, nil, "" - } - _, ok := IsDomainName(l.token) - if !ok { - return nil, &ParseError{f, "bad MF Mf", l}, "" - } - if rr.Mf[l.length-1] != '.' { - rr.Mf = appendOrigin(rr.Mf, o) - } - return rr, nil, "" -} - -func setMD(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - rr := new(MD) - rr.Hdr = h - - l := <-c - rr.Md = l.token - if l.length == 0 { - return rr, nil, "" - } - if l.token == "@" { - rr.Md = o - return rr, nil, "" - } - _, ok := IsDomainName(l.token) - if !ok { - return nil, &ParseError{f, "bad MD Md", l}, "" - } - if rr.Md[l.length-1] != '.' { - rr.Md = appendOrigin(rr.Md, o) - } - return rr, nil, "" -} - -func setMX(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - rr := new(MX) - rr.Hdr = h - - l := <-c - if l.length == 0 { - return rr, nil, "" - } - i, e := strconv.Atoi(l.token) - if e != nil { - return nil, &ParseError{f, "bad MX Pref", l}, "" - } - rr.Preference = uint16(i) - <-c // zBlank - l = <-c // zString - rr.Mx = l.token - if l.token == "@" { - rr.Mx = o - return rr, nil, "" - } - _, ok := IsDomainName(l.token) - if !ok || l.length == 0 { - return nil, &ParseError{f, "bad MX Mx", l}, "" - } - if rr.Mx[l.length-1] != '.' { - rr.Mx = appendOrigin(rr.Mx, o) - } - return rr, nil, "" -} - -func setRT(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - rr := new(RT) - rr.Hdr = h - l := <-c - if l.length == 0 { - return rr, nil, "" - } - i, e := strconv.Atoi(l.token) - if e != nil { - return nil, &ParseError{f, "bad RT Preference", l}, "" - } - rr.Preference = uint16(i) - <-c // zBlank - l = <-c // zString - rr.Host = l.token - if l.token == "@" { - rr.Host = o - return rr, nil, "" - } - _, ok := IsDomainName(l.token) - if !ok || l.length == 0 { - return nil, &ParseError{f, "bad RT Host", l}, "" - } - if rr.Host[l.length-1] != '.' { - rr.Host = appendOrigin(rr.Host, o) - } - return rr, nil, "" -} - -func setAFSDB(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - rr := new(AFSDB) - rr.Hdr = h - - l := <-c - if l.length == 0 { - return rr, nil, "" - } - i, e := strconv.Atoi(l.token) - if e != nil { - return nil, &ParseError{f, "bad AFSDB Subtype", l}, "" - } - rr.Subtype = uint16(i) - <-c // zBlank - l = <-c // zString - rr.Hostname = l.token - if l.token == "@" { - rr.Hostname = o - return rr, nil, "" - } - _, ok := IsDomainName(l.token) - if !ok || l.length == 0 { - return nil, &ParseError{f, "bad AFSDB Hostname", l}, "" - } - if rr.Hostname[l.length-1] != '.' { - rr.Hostname = appendOrigin(rr.Hostname, o) - } - return rr, nil, "" -} - -func setX25(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - rr := new(X25) - rr.Hdr = h - - l := <-c - rr.PSDNAddress = l.token - return rr, nil, "" -} - -func setKX(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - rr := new(KX) - rr.Hdr = h - - l := <-c - if l.length == 0 { - return rr, nil, "" - } - i, e := strconv.Atoi(l.token) - if e != nil { - return nil, &ParseError{f, "bad KX Pref", l}, "" - } - rr.Preference = uint16(i) - <-c // zBlank - l = <-c // zString - rr.Exchanger = l.token - if l.token == "@" { - rr.Exchanger = o - return rr, nil, "" - } - _, ok := IsDomainName(l.token) - if !ok || l.length == 0 { - return nil, &ParseError{f, "bad KX Exchanger", l}, "" - } - if rr.Exchanger[l.length-1] != '.' { - rr.Exchanger = appendOrigin(rr.Exchanger, o) - } - return rr, nil, "" -} - -func setCNAME(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - rr := new(CNAME) - rr.Hdr = h - - l := <-c - rr.Target = l.token - if l.length == 0 { - return rr, nil, "" - } - if l.token == "@" { - rr.Target = o - return rr, nil, "" - } - _, ok := IsDomainName(l.token) - if !ok || l.length == 0 { - return nil, &ParseError{f, "bad CNAME Target", l}, "" - } - if rr.Target[l.length-1] != '.' { - rr.Target = appendOrigin(rr.Target, o) - } - return rr, nil, "" -} - -func setDNAME(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - rr := new(DNAME) - rr.Hdr = h - - l := <-c - rr.Target = l.token - if l.length == 0 { - return rr, nil, "" - } - if l.token == "@" { - rr.Target = o - return rr, nil, "" - } - _, ok := IsDomainName(l.token) - if !ok { - return nil, &ParseError{f, "bad CNAME Target", l}, "" - } - if rr.Target[l.length-1] != '.' { - rr.Target = appendOrigin(rr.Target, o) - } - return rr, nil, "" -} - -func setSOA(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - rr := new(SOA) - rr.Hdr = h - - l := <-c - rr.Ns = l.token - if l.length == 0 { - return rr, nil, "" - } - <-c // zBlank - if l.token == "@" { - rr.Ns = o - } else { - _, ok := IsDomainName(l.token) - if !ok { - return nil, &ParseError{f, "bad SOA Ns", l}, "" - } - if rr.Ns[l.length-1] != '.' { - rr.Ns = appendOrigin(rr.Ns, o) - } - } - - l = <-c - rr.Mbox = l.token - if l.token == "@" { - rr.Mbox = o - } else { - _, ok := IsDomainName(l.token) - if !ok || l.length == 0 { - return nil, &ParseError{f, "bad SOA Mbox", l}, "" - } - if rr.Mbox[l.length-1] != '.' { - rr.Mbox = appendOrigin(rr.Mbox, o) - } - } - <-c // zBlank - - var ( - v uint32 - ok bool - ) - for i := 0; i < 5; i++ { - l = <-c - if j, e := strconv.Atoi(l.token); e != nil { - if i == 0 { - // Serial should be a number - return nil, &ParseError{f, "bad SOA zone parameter", l}, "" - } - if v, ok = stringToTtl(l.token); !ok { - return nil, &ParseError{f, "bad SOA zone parameter", l}, "" - - } - } else { - v = uint32(j) - } - switch i { - case 0: - rr.Serial = v - <-c // zBlank - case 1: - rr.Refresh = v - <-c // zBlank - case 2: - rr.Retry = v - <-c // zBlank - case 3: - rr.Expire = v - <-c // zBlank - case 4: - rr.Minttl = v - } - } - return rr, nil, "" -} - -func setSRV(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - rr := new(SRV) - rr.Hdr = h - - l := <-c - if l.length == 0 { - return rr, nil, "" - } - i, e := strconv.Atoi(l.token) - if e != nil { - return nil, &ParseError{f, "bad SRV Priority", l}, "" - } - rr.Priority = uint16(i) - <-c // zBlank - l = <-c // zString - i, e = strconv.Atoi(l.token) - if e != nil { - return nil, &ParseError{f, "bad SRV Weight", l}, "" - } - rr.Weight = uint16(i) - <-c // zBlank - l = <-c // zString - i, e = strconv.Atoi(l.token) - if e != nil { - return nil, &ParseError{f, "bad SRV Port", l}, "" - } - rr.Port = uint16(i) - <-c // zBlank - l = <-c // zString - rr.Target = l.token - if l.token == "@" { - rr.Target = o - return rr, nil, "" - } - _, ok := IsDomainName(l.token) - if !ok || l.length == 0 { - return nil, &ParseError{f, "bad SRV Target", l}, "" - } - if rr.Target[l.length-1] != '.' { - rr.Target = appendOrigin(rr.Target, o) - } - return rr, nil, "" -} - -func setNAPTR(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - rr := new(NAPTR) - rr.Hdr = h - - l := <-c - if l.length == 0 { - return rr, nil, "" - } - i, e := strconv.Atoi(l.token) - if e != nil { - return nil, &ParseError{f, "bad NAPTR Order", l}, "" - } - rr.Order = uint16(i) - <-c // zBlank - l = <-c // zString - i, e = strconv.Atoi(l.token) - if e != nil { - return nil, &ParseError{f, "bad NAPTR Preference", l}, "" - } - rr.Preference = uint16(i) - // Flags - <-c // zBlank - l = <-c // _QUOTE - if l.value != zQuote { - return nil, &ParseError{f, "bad NAPTR Flags", l}, "" - } - l = <-c // Either String or Quote - if l.value == zString { - rr.Flags = l.token - l = <-c // _QUOTE - if l.value != zQuote { - return nil, &ParseError{f, "bad NAPTR Flags", l}, "" - } - } else if l.value == zQuote { - rr.Flags = "" - } else { - return nil, &ParseError{f, "bad NAPTR Flags", l}, "" - } - - // Service - <-c // zBlank - l = <-c // _QUOTE - if l.value != zQuote { - return nil, &ParseError{f, "bad NAPTR Service", l}, "" - } - l = <-c // Either String or Quote - if l.value == zString { - rr.Service = l.token - l = <-c // _QUOTE - if l.value != zQuote { - return nil, &ParseError{f, "bad NAPTR Service", l}, "" - } - } else if l.value == zQuote { - rr.Service = "" - } else { - return nil, &ParseError{f, "bad NAPTR Service", l}, "" - } - - // Regexp - <-c // zBlank - l = <-c // _QUOTE - if l.value != zQuote { - return nil, &ParseError{f, "bad NAPTR Regexp", l}, "" - } - l = <-c // Either String or Quote - if l.value == zString { - rr.Regexp = l.token - l = <-c // _QUOTE - if l.value != zQuote { - return nil, &ParseError{f, "bad NAPTR Regexp", l}, "" - } - } else if l.value == zQuote { - rr.Regexp = "" - } else { - return nil, &ParseError{f, "bad NAPTR Regexp", l}, "" - } - // After quote no space?? - <-c // zBlank - l = <-c // zString - rr.Replacement = l.token - if l.token == "@" { - rr.Replacement = o - return rr, nil, "" - } - _, ok := IsDomainName(l.token) - if !ok || l.length == 0 { - return nil, &ParseError{f, "bad NAPTR Replacement", l}, "" - } - if rr.Replacement[l.length-1] != '.' { - rr.Replacement = appendOrigin(rr.Replacement, o) - } - return rr, nil, "" -} - -func setTALINK(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - rr := new(TALINK) - rr.Hdr = h - - l := <-c - rr.PreviousName = l.token - if l.length == 0 { - return rr, nil, "" - } - if l.token == "@" { - rr.PreviousName = o - } else { - _, ok := IsDomainName(l.token) - if !ok { - return nil, &ParseError{f, "bad TALINK PreviousName", l}, "" - } - if rr.PreviousName[l.length-1] != '.' { - rr.PreviousName = appendOrigin(rr.PreviousName, o) - } - } - <-c // zBlank - l = <-c - rr.NextName = l.token - if l.token == "@" { - rr.NextName = o - return rr, nil, "" - } - _, ok := IsDomainName(l.token) - if !ok || l.length == 0 { - return nil, &ParseError{f, "bad TALINK NextName", l}, "" - } - if rr.NextName[l.length-1] != '.' { - rr.NextName = appendOrigin(rr.NextName, o) - } - return rr, nil, "" -} - -func setLOC(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - rr := new(LOC) - rr.Hdr = h - // Non zero defaults for LOC record, see RFC 1876, Section 3. - rr.HorizPre = 165 // 10000 - rr.VertPre = 162 // 10 - rr.Size = 18 // 1 - ok := false - // North - l := <-c - if l.length == 0 { - return rr, nil, "" - } - if i, e := strconv.Atoi(l.token); e != nil { - return nil, &ParseError{f, "bad LOC Latitude", l}, "" - } else { - rr.Latitude = 1000 * 60 * 60 * uint32(i) - } - <-c // zBlank - // Either number, 'N' or 'S' - l = <-c - if rr.Latitude, ok = locCheckNorth(l.token, rr.Latitude); ok { - goto East - } - if i, e := strconv.Atoi(l.token); e != nil { - return nil, &ParseError{f, "bad LOC Latitude minutes", l}, "" - } else { - rr.Latitude += 1000 * 60 * uint32(i) - } - <-c // zBlank - l = <-c - if i, e := strconv.ParseFloat(l.token, 32); e != nil { - return nil, &ParseError{f, "bad LOC Latitude seconds", l}, "" - } else { - rr.Latitude += uint32(1000 * i) - } - <-c // zBlank - // Either number, 'N' or 'S' - l = <-c - if rr.Latitude, ok = locCheckNorth(l.token, rr.Latitude); ok { - goto East - } - // If still alive, flag an error - return nil, &ParseError{f, "bad LOC Latitude North/South", l}, "" - -East: - // East - <-c // zBlank - l = <-c - if i, e := strconv.Atoi(l.token); e != nil { - return nil, &ParseError{f, "bad LOC Longitude", l}, "" - } else { - rr.Longitude = 1000 * 60 * 60 * uint32(i) - } - <-c // zBlank - // Either number, 'E' or 'W' - l = <-c - if rr.Longitude, ok = locCheckEast(l.token, rr.Longitude); ok { - goto Altitude - } - if i, e := strconv.Atoi(l.token); e != nil { - return nil, &ParseError{f, "bad LOC Longitude minutes", l}, "" - } else { - rr.Longitude += 1000 * 60 * uint32(i) - } - <-c // zBlank - l = <-c - if i, e := strconv.ParseFloat(l.token, 32); e != nil { - return nil, &ParseError{f, "bad LOC Longitude seconds", l}, "" - } else { - rr.Longitude += uint32(1000 * i) - } - <-c // zBlank - // Either number, 'E' or 'W' - l = <-c - if rr.Longitude, ok = locCheckEast(l.token, rr.Longitude); ok { - goto Altitude - } - // If still alive, flag an error - return nil, &ParseError{f, "bad LOC Longitude East/West", l}, "" - -Altitude: - <-c // zBlank - l = <-c - if l.token[len(l.token)-1] == 'M' || l.token[len(l.token)-1] == 'm' { - l.token = l.token[0 : len(l.token)-1] - } - if i, e := strconv.ParseFloat(l.token, 32); e != nil { - return nil, &ParseError{f, "bad LOC Altitude", l}, "" - } else { - rr.Altitude = uint32(i*100.0 + 10000000.0 + 0.5) - } - - // And now optionally the other values - l = <-c - count := 0 - for l.value != zNewline && l.value != zEOF { - switch l.value { - case zString: - switch count { - case 0: // Size - e, m, ok := stringToCm(l.token) - if !ok { - return nil, &ParseError{f, "bad LOC Size", l}, "" - } - rr.Size = (e & 0x0f) | (m << 4 & 0xf0) - case 1: // HorizPre - e, m, ok := stringToCm(l.token) - if !ok { - return nil, &ParseError{f, "bad LOC HorizPre", l}, "" - } - rr.HorizPre = (e & 0x0f) | (m << 4 & 0xf0) - case 2: // VertPre - e, m, ok := stringToCm(l.token) - if !ok { - return nil, &ParseError{f, "bad LOC VertPre", l}, "" - } - rr.VertPre = (e & 0x0f) | (m << 4 & 0xf0) - } - count++ - case zBlank: - // Ok - default: - return nil, &ParseError{f, "bad LOC Size, HorizPre or VertPre", l}, "" - } - l = <-c - } - return rr, nil, "" -} - -func setHIP(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - rr := new(HIP) - rr.Hdr = h - - // HitLength is not represented - l := <-c - if l.length == 0 { - return rr, nil, l.comment - } - i, e := strconv.Atoi(l.token) - if e != nil { - return nil, &ParseError{f, "bad HIP PublicKeyAlgorithm", l}, "" - } - rr.PublicKeyAlgorithm = uint8(i) - <-c // zBlank - l = <-c // zString - rr.Hit = l.token // This can not contain spaces, see RFC 5205 Section 6. - rr.HitLength = uint8(len(rr.Hit)) / 2 - - <-c // zBlank - l = <-c // zString - rr.PublicKey = l.token // This cannot contain spaces - rr.PublicKeyLength = uint16(base64.StdEncoding.DecodedLen(len(rr.PublicKey))) - - // RendezvousServers (if any) - l = <-c - var xs []string - for l.value != zNewline && l.value != zEOF { - switch l.value { - case zString: - if l.token == "@" { - xs = append(xs, o) - continue - } - _, ok := IsDomainName(l.token) - if !ok || l.length == 0 { - return nil, &ParseError{f, "bad HIP RendezvousServers", l}, "" - } - if l.token[l.length-1] != '.' { - l.token = appendOrigin(l.token, o) - } - xs = append(xs, l.token) - case zBlank: - // Ok - default: - return nil, &ParseError{f, "bad HIP RendezvousServers", l}, "" - } - l = <-c - } - rr.RendezvousServers = xs - return rr, nil, l.comment -} - -func setCERT(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - rr := new(CERT) - rr.Hdr = h - - l := <-c - if l.length == 0 { - return rr, nil, l.comment - } - if v, ok := StringToCertType[l.token]; ok { - rr.Type = v - } else if i, e := strconv.Atoi(l.token); e != nil { - return nil, &ParseError{f, "bad CERT Type", l}, "" - } else { - rr.Type = uint16(i) - } - <-c // zBlank - l = <-c // zString - i, e := strconv.Atoi(l.token) - if e != nil { - return nil, &ParseError{f, "bad CERT KeyTag", l}, "" - } - rr.KeyTag = uint16(i) - <-c // zBlank - l = <-c // zString - if v, ok := StringToAlgorithm[l.token]; ok { - rr.Algorithm = v - } else if i, e := strconv.Atoi(l.token); e != nil { - return nil, &ParseError{f, "bad CERT Algorithm", l}, "" - } else { - rr.Algorithm = uint8(i) - } - s, e1, c1 := endingToString(c, "bad CERT Certificate", f) - if e1 != nil { - return nil, e1, c1 - } - rr.Certificate = s - return rr, nil, c1 -} - -func setOPENPGPKEY(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - rr := new(OPENPGPKEY) - rr.Hdr = h - - s, e, c1 := endingToString(c, "bad OPENPGPKEY PublicKey", f) - if e != nil { - return nil, e, c1 - } - rr.PublicKey = s - return rr, nil, c1 -} - -func setSIG(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - r, e, s := setRRSIG(h, c, o, f) - if r != nil { - return &SIG{*r.(*RRSIG)}, e, s - } - return nil, e, s -} - -func setRRSIG(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - rr := new(RRSIG) - rr.Hdr = h - l := <-c - if l.length == 0 { - return rr, nil, l.comment - } - if t, ok := StringToType[l.tokenUpper]; !ok { - if strings.HasPrefix(l.tokenUpper, "TYPE") { - t, ok = typeToInt(l.tokenUpper) - if !ok { - return nil, &ParseError{f, "bad RRSIG Typecovered", l}, "" - } - rr.TypeCovered = t - } else { - return nil, &ParseError{f, "bad RRSIG Typecovered", l}, "" - } - } else { - rr.TypeCovered = t - } - <-c // zBlank - l = <-c - i, err := strconv.Atoi(l.token) - if err != nil { - return nil, &ParseError{f, "bad RRSIG Algorithm", l}, "" - } - rr.Algorithm = uint8(i) - <-c // zBlank - l = <-c - i, err = strconv.Atoi(l.token) - if err != nil { - return nil, &ParseError{f, "bad RRSIG Labels", l}, "" - } - rr.Labels = uint8(i) - <-c // zBlank - l = <-c - i, err = strconv.Atoi(l.token) - if err != nil { - return nil, &ParseError{f, "bad RRSIG OrigTtl", l}, "" - } - rr.OrigTtl = uint32(i) - <-c // zBlank - l = <-c - if i, err := StringToTime(l.token); err != nil { - // Try to see if all numeric and use it as epoch - if i, err := strconv.ParseInt(l.token, 10, 64); err == nil { - // TODO(miek): error out on > MAX_UINT32, same below - rr.Expiration = uint32(i) - } else { - return nil, &ParseError{f, "bad RRSIG Expiration", l}, "" - } - } else { - rr.Expiration = i - } - <-c // zBlank - l = <-c - if i, err := StringToTime(l.token); err != nil { - if i, err := strconv.ParseInt(l.token, 10, 64); err == nil { - rr.Inception = uint32(i) - } else { - return nil, &ParseError{f, "bad RRSIG Inception", l}, "" - } - } else { - rr.Inception = i - } - <-c // zBlank - l = <-c - i, err = strconv.Atoi(l.token) - if err != nil { - return nil, &ParseError{f, "bad RRSIG KeyTag", l}, "" - } - rr.KeyTag = uint16(i) - <-c // zBlank - l = <-c - rr.SignerName = l.token - if l.token == "@" { - rr.SignerName = o - } else { - _, ok := IsDomainName(l.token) - if !ok || l.length == 0 { - return nil, &ParseError{f, "bad RRSIG SignerName", l}, "" - } - if rr.SignerName[l.length-1] != '.' { - rr.SignerName = appendOrigin(rr.SignerName, o) - } - } - s, e, c1 := endingToString(c, "bad RRSIG Signature", f) - if e != nil { - return nil, e, c1 - } - rr.Signature = s - return rr, nil, c1 -} - -func setNSEC(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - rr := new(NSEC) - rr.Hdr = h - - l := <-c - rr.NextDomain = l.token - if l.length == 0 { - return rr, nil, l.comment - } - if l.token == "@" { - rr.NextDomain = o - } else { - _, ok := IsDomainName(l.token) - if !ok { - return nil, &ParseError{f, "bad NSEC NextDomain", l}, "" - } - if rr.NextDomain[l.length-1] != '.' { - rr.NextDomain = appendOrigin(rr.NextDomain, o) - } - } - - rr.TypeBitMap = make([]uint16, 0) - var ( - k uint16 - ok bool - ) - l = <-c - for l.value != zNewline && l.value != zEOF { - switch l.value { - case zBlank: - // Ok - case zString: - if k, ok = StringToType[l.tokenUpper]; !ok { - if k, ok = typeToInt(l.tokenUpper); !ok { - return nil, &ParseError{f, "bad NSEC TypeBitMap", l}, "" - } - } - rr.TypeBitMap = append(rr.TypeBitMap, k) - default: - return nil, &ParseError{f, "bad NSEC TypeBitMap", l}, "" - } - l = <-c - } - return rr, nil, l.comment -} - -func setNSEC3(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - rr := new(NSEC3) - rr.Hdr = h - - l := <-c - if l.length == 0 { - return rr, nil, l.comment - } - i, e := strconv.Atoi(l.token) - if e != nil { - return nil, &ParseError{f, "bad NSEC3 Hash", l}, "" - } - rr.Hash = uint8(i) - <-c // zBlank - l = <-c - i, e = strconv.Atoi(l.token) - if e != nil { - return nil, &ParseError{f, "bad NSEC3 Flags", l}, "" - } - rr.Flags = uint8(i) - <-c // zBlank - l = <-c - i, e = strconv.Atoi(l.token) - if e != nil { - return nil, &ParseError{f, "bad NSEC3 Iterations", l}, "" - } - rr.Iterations = uint16(i) - <-c - l = <-c - if len(l.token) == 0 { - return nil, &ParseError{f, "bad NSEC3 Salt", l}, "" - } - rr.SaltLength = uint8(len(l.token)) / 2 - rr.Salt = l.token - - <-c - l = <-c - rr.HashLength = 20 // Fix for NSEC3 (sha1 160 bits) - rr.NextDomain = l.token - - rr.TypeBitMap = make([]uint16, 0) - var ( - k uint16 - ok bool - ) - l = <-c - for l.value != zNewline && l.value != zEOF { - switch l.value { - case zBlank: - // Ok - case zString: - if k, ok = StringToType[l.tokenUpper]; !ok { - if k, ok = typeToInt(l.tokenUpper); !ok { - return nil, &ParseError{f, "bad NSEC3 TypeBitMap", l}, "" - } - } - rr.TypeBitMap = append(rr.TypeBitMap, k) - default: - return nil, &ParseError{f, "bad NSEC3 TypeBitMap", l}, "" - } - l = <-c - } - return rr, nil, l.comment -} - -func setNSEC3PARAM(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - rr := new(NSEC3PARAM) - rr.Hdr = h - - l := <-c - if l.length == 0 { - return rr, nil, "" - } - i, e := strconv.Atoi(l.token) - if e != nil { - return nil, &ParseError{f, "bad NSEC3PARAM Hash", l}, "" - } - rr.Hash = uint8(i) - <-c // zBlank - l = <-c - i, e = strconv.Atoi(l.token) - if e != nil { - return nil, &ParseError{f, "bad NSEC3PARAM Flags", l}, "" - } - rr.Flags = uint8(i) - <-c // zBlank - l = <-c - i, e = strconv.Atoi(l.token) - if e != nil { - return nil, &ParseError{f, "bad NSEC3PARAM Iterations", l}, "" - } - rr.Iterations = uint16(i) - <-c - l = <-c - rr.SaltLength = uint8(len(l.token)) - rr.Salt = l.token - return rr, nil, "" -} - -func setEUI48(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - rr := new(EUI48) - rr.Hdr = h - - l := <-c - if l.length == 0 { - return rr, nil, "" - } - if l.length != 17 { - return nil, &ParseError{f, "bad EUI48 Address", l}, "" - } - addr := make([]byte, 12) - dash := 0 - for i := 0; i < 10; i += 2 { - addr[i] = l.token[i+dash] - addr[i+1] = l.token[i+1+dash] - dash++ - if l.token[i+1+dash] != '-' { - return nil, &ParseError{f, "bad EUI48 Address", l}, "" - } - } - addr[10] = l.token[15] - addr[11] = l.token[16] - - i, e := strconv.ParseUint(string(addr), 16, 48) - if e != nil { - return nil, &ParseError{f, "bad EUI48 Address", l}, "" - } - rr.Address = i - return rr, nil, "" -} - -func setEUI64(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - rr := new(EUI64) - rr.Hdr = h - - l := <-c - if l.length == 0 { - return rr, nil, "" - } - if l.length != 23 { - return nil, &ParseError{f, "bad EUI64 Address", l}, "" - } - addr := make([]byte, 16) - dash := 0 - for i := 0; i < 14; i += 2 { - addr[i] = l.token[i+dash] - addr[i+1] = l.token[i+1+dash] - dash++ - if l.token[i+1+dash] != '-' { - return nil, &ParseError{f, "bad EUI64 Address", l}, "" - } - } - addr[14] = l.token[21] - addr[15] = l.token[22] - - i, e := strconv.ParseUint(string(addr), 16, 64) - if e != nil { - return nil, &ParseError{f, "bad EUI68 Address", l}, "" - } - rr.Address = uint64(i) - return rr, nil, "" -} - -func setWKS(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - rr := new(WKS) - rr.Hdr = h - - l := <-c - if l.length == 0 { - return rr, nil, l.comment - } - rr.Address = net.ParseIP(l.token) - if rr.Address == nil { - return nil, &ParseError{f, "bad WKS Address", l}, "" - } - - <-c // zBlank - l = <-c - proto := "tcp" - i, e := strconv.Atoi(l.token) - if e != nil { - return nil, &ParseError{f, "bad WKS Protocol", l}, "" - } - rr.Protocol = uint8(i) - switch rr.Protocol { - case 17: - proto = "udp" - case 6: - proto = "tcp" - default: - return nil, &ParseError{f, "bad WKS Protocol", l}, "" - } - - <-c - l = <-c - rr.BitMap = make([]uint16, 0) - var ( - k int - err error - ) - for l.value != zNewline && l.value != zEOF { - switch l.value { - case zBlank: - // Ok - case zString: - if k, err = net.LookupPort(proto, l.token); err != nil { - if i, e := strconv.Atoi(l.token); e != nil { // If a number use that - return nil, &ParseError{f, "bad WKS BitMap", l}, "" - } else { - rr.BitMap = append(rr.BitMap, uint16(i)) - } - } - rr.BitMap = append(rr.BitMap, uint16(k)) - default: - return nil, &ParseError{f, "bad WKS BitMap", l}, "" - } - l = <-c - } - return rr, nil, l.comment -} - -func setSSHFP(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - rr := new(SSHFP) - rr.Hdr = h - - l := <-c - if l.length == 0 { - return rr, nil, "" - } - i, e := strconv.Atoi(l.token) - if e != nil { - return nil, &ParseError{f, "bad SSHFP Algorithm", l}, "" - } - rr.Algorithm = uint8(i) - <-c // zBlank - l = <-c - i, e = strconv.Atoi(l.token) - if e != nil { - return nil, &ParseError{f, "bad SSHFP Type", l}, "" - } - rr.Type = uint8(i) - <-c // zBlank - s, e1, c1 := endingToString(c, "bad SSHFP Fingerprint", f) - if e1 != nil { - return nil, e1, c1 - } - rr.FingerPrint = s - return rr, nil, "" -} - -func setDNSKEYs(h RR_Header, c chan lex, o, f, typ string) (RR, *ParseError, string) { - rr := new(DNSKEY) - rr.Hdr = h - - l := <-c - if l.length == 0 { - return rr, nil, l.comment - } - i, e := strconv.Atoi(l.token) - if e != nil { - return nil, &ParseError{f, "bad " + typ + " Flags", l}, "" - } - rr.Flags = uint16(i) - <-c // zBlank - l = <-c // zString - i, e = strconv.Atoi(l.token) - if e != nil { - return nil, &ParseError{f, "bad " + typ + " Protocol", l}, "" - } - rr.Protocol = uint8(i) - <-c // zBlank - l = <-c // zString - i, e = strconv.Atoi(l.token) - if e != nil { - return nil, &ParseError{f, "bad " + typ + " Algorithm", l}, "" - } - rr.Algorithm = uint8(i) - s, e1, c1 := endingToString(c, "bad "+typ+" PublicKey", f) - if e1 != nil { - return nil, e1, c1 - } - rr.PublicKey = s - return rr, nil, c1 -} - -func setKEY(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - r, e, s := setDNSKEYs(h, c, o, f, "KEY") - if r != nil { - return &KEY{*r.(*DNSKEY)}, e, s - } - return nil, e, s -} - -func setDNSKEY(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - r, e, s := setDNSKEYs(h, c, o, f, "DNSKEY") - return r, e, s -} - -func setCDNSKEY(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - r, e, s := setDNSKEYs(h, c, o, f, "CDNSKEY") - if r != nil { - return &CDNSKEY{*r.(*DNSKEY)}, e, s - } - return nil, e, s -} - -func setRKEY(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - rr := new(RKEY) - rr.Hdr = h - - l := <-c - if l.length == 0 { - return rr, nil, l.comment - } - i, e := strconv.Atoi(l.token) - if e != nil { - return nil, &ParseError{f, "bad RKEY Flags", l}, "" - } - rr.Flags = uint16(i) - <-c // zBlank - l = <-c // zString - i, e = strconv.Atoi(l.token) - if e != nil { - return nil, &ParseError{f, "bad RKEY Protocol", l}, "" - } - rr.Protocol = uint8(i) - <-c // zBlank - l = <-c // zString - i, e = strconv.Atoi(l.token) - if e != nil { - return nil, &ParseError{f, "bad RKEY Algorithm", l}, "" - } - rr.Algorithm = uint8(i) - s, e1, c1 := endingToString(c, "bad RKEY PublicKey", f) - if e1 != nil { - return nil, e1, c1 - } - rr.PublicKey = s - return rr, nil, c1 -} - -func setEID(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - rr := new(EID) - rr.Hdr = h - s, e, c1 := endingToString(c, "bad EID Endpoint", f) - if e != nil { - return nil, e, c1 - } - rr.Endpoint = s - return rr, nil, c1 -} - -func setNIMLOC(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - rr := new(NIMLOC) - rr.Hdr = h - s, e, c1 := endingToString(c, "bad NIMLOC Locator", f) - if e != nil { - return nil, e, c1 - } - rr.Locator = s - return rr, nil, c1 -} - -func setNSAP(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - rr := new(NSAP) - rr.Hdr = h - chunks, e1, c1 := endingToTxtSlice(c, "bad NSAP Nsap", f) - if e1 != nil { - return nil, e1, c1 - } - // data would come as one string or multiple... Just to ignore possible - // variety let's merge things back together and split to actual "words" - s := strings.Fields(strings.Join(chunks, " ")) - if len(s) == 0 { - return rr, nil, c1 - } - if len(s[0]) >= 2 && s[0][0:2] == "0x" || s[0][0:2] == "0X" { - // although RFC only suggests 0x there is no clarification that X is not allowed - rr.Nsap = strings.Join(s, "")[2:] - } else { - // since we do not know what to do with this data, and, we would not use original length - // in formatting, it's moot to check correctness of the length - _, err := strconv.Atoi(s[0]) - if err != nil { - return nil, &ParseError{f, "bad NSAP Length", lex{token: s[0]}}, "" - } - rr.Nsap = strings.Join(s[1:], "") - } - return rr, nil, c1 -} - -func setGPOS(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - rr := new(GPOS) - rr.Hdr = h - l := <-c - if l.length == 0 { - return rr, nil, "" - } - _, e := strconv.ParseFloat(l.token, 64) - if e != nil { - return nil, &ParseError{f, "bad GPOS Longitude", l}, "" - } - rr.Longitude = l.token - <-c // zBlank - l = <-c - _, e = strconv.ParseFloat(l.token, 64) - if e != nil { - return nil, &ParseError{f, "bad GPOS Latitude", l}, "" - } - rr.Latitude = l.token - <-c // zBlank - l = <-c - _, e = strconv.ParseFloat(l.token, 64) - if e != nil { - return nil, &ParseError{f, "bad GPOS Altitude", l}, "" - } - rr.Altitude = l.token - return rr, nil, "" -} - -func setDSs(h RR_Header, c chan lex, o, f, typ string) (RR, *ParseError, string) { - rr := new(DS) - rr.Hdr = h - l := <-c - if l.length == 0 { - return rr, nil, l.comment - } - i, e := strconv.Atoi(l.token) - if e != nil { - return nil, &ParseError{f, "bad " + typ + " KeyTag", l}, "" - } - rr.KeyTag = uint16(i) - <-c // zBlank - l = <-c - if i, e := strconv.Atoi(l.token); e != nil { - i, ok := StringToAlgorithm[l.tokenUpper] - if !ok { - return nil, &ParseError{f, "bad " + typ + " Algorithm", l}, "" - } - rr.Algorithm = i - } else { - rr.Algorithm = uint8(i) - } - <-c // zBlank - l = <-c - i, e = strconv.Atoi(l.token) - if e != nil { - return nil, &ParseError{f, "bad " + typ + " DigestType", l}, "" - } - rr.DigestType = uint8(i) - s, e1, c1 := endingToString(c, "bad "+typ+" Digest", f) - if e1 != nil { - return nil, e1, c1 - } - rr.Digest = s - return rr, nil, c1 -} - -func setDS(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - r, e, s := setDSs(h, c, o, f, "DS") - return r, e, s -} - -func setDLV(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - r, e, s := setDSs(h, c, o, f, "DLV") - if r != nil { - return &DLV{*r.(*DS)}, e, s - } - return nil, e, s -} - -func setCDS(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - r, e, s := setDSs(h, c, o, f, "CDS") - if r != nil { - return &CDS{*r.(*DS)}, e, s - } - return nil, e, s -} - -func setTA(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - rr := new(TA) - rr.Hdr = h - l := <-c - if l.length == 0 { - return rr, nil, l.comment - } - i, e := strconv.Atoi(l.token) - if e != nil { - return nil, &ParseError{f, "bad TA KeyTag", l}, "" - } - rr.KeyTag = uint16(i) - <-c // zBlank - l = <-c - if i, e := strconv.Atoi(l.token); e != nil { - i, ok := StringToAlgorithm[l.tokenUpper] - if !ok { - return nil, &ParseError{f, "bad TA Algorithm", l}, "" - } - rr.Algorithm = i - } else { - rr.Algorithm = uint8(i) - } - <-c // zBlank - l = <-c - i, e = strconv.Atoi(l.token) - if e != nil { - return nil, &ParseError{f, "bad TA DigestType", l}, "" - } - rr.DigestType = uint8(i) - s, e, c1 := endingToString(c, "bad TA Digest", f) - if e != nil { - return nil, e.(*ParseError), c1 - } - rr.Digest = s - return rr, nil, c1 -} - -func setTLSA(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - rr := new(TLSA) - rr.Hdr = h - l := <-c - if l.length == 0 { - return rr, nil, l.comment - } - i, e := strconv.Atoi(l.token) - if e != nil { - return nil, &ParseError{f, "bad TLSA Usage", l}, "" - } - rr.Usage = uint8(i) - <-c // zBlank - l = <-c - i, e = strconv.Atoi(l.token) - if e != nil { - return nil, &ParseError{f, "bad TLSA Selector", l}, "" - } - rr.Selector = uint8(i) - <-c // zBlank - l = <-c - i, e = strconv.Atoi(l.token) - if e != nil { - return nil, &ParseError{f, "bad TLSA MatchingType", l}, "" - } - rr.MatchingType = uint8(i) - // So this needs be e2 (i.e. different than e), because...??t - s, e2, c1 := endingToString(c, "bad TLSA Certificate", f) - if e2 != nil { - return nil, e2, c1 - } - rr.Certificate = s - return rr, nil, c1 -} - -func setRFC3597(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - rr := new(RFC3597) - rr.Hdr = h - l := <-c - if l.token != "\\#" { - return nil, &ParseError{f, "bad RFC3597 Rdata", l}, "" - } - <-c // zBlank - l = <-c - rdlength, e := strconv.Atoi(l.token) - if e != nil { - return nil, &ParseError{f, "bad RFC3597 Rdata ", l}, "" - } - - s, e1, c1 := endingToString(c, "bad RFC3597 Rdata", f) - if e1 != nil { - return nil, e1, c1 - } - if rdlength*2 != len(s) { - return nil, &ParseError{f, "bad RFC3597 Rdata", l}, "" - } - rr.Rdata = s - return rr, nil, c1 -} - -func setSPF(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - rr := new(SPF) - rr.Hdr = h - - s, e, c1 := endingToTxtSlice(c, "bad SPF Txt", f) - if e != nil { - return nil, e, "" - } - rr.Txt = s - return rr, nil, c1 -} - -func setTXT(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - rr := new(TXT) - rr.Hdr = h - - // no zBlank reading here, because all this rdata is TXT - s, e, c1 := endingToTxtSlice(c, "bad TXT Txt", f) - if e != nil { - return nil, e, "" - } - rr.Txt = s - return rr, nil, c1 -} - -// identical to setTXT -func setNINFO(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - rr := new(NINFO) - rr.Hdr = h - - s, e, c1 := endingToTxtSlice(c, "bad NINFO ZSData", f) - if e != nil { - return nil, e, "" - } - rr.ZSData = s - return rr, nil, c1 -} - -func setURI(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - rr := new(URI) - rr.Hdr = h - - l := <-c - if l.length == 0 { - return rr, nil, l.comment - } - i, e := strconv.Atoi(l.token) - if e != nil { - return nil, &ParseError{f, "bad URI Priority", l}, "" - } - rr.Priority = uint16(i) - <-c // zBlank - l = <-c - i, e = strconv.Atoi(l.token) - if e != nil { - return nil, &ParseError{f, "bad URI Weight", l}, "" - } - rr.Weight = uint16(i) - - <-c // zBlank - s, e, c1 := endingToTxtSlice(c, "bad URI Target", f) - if e != nil { - return nil, e.(*ParseError), "" - } - rr.Target = s - return rr, nil, c1 -} - -func setDHCID(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - // awesome record to parse! - rr := new(DHCID) - rr.Hdr = h - - s, e, c1 := endingToString(c, "bad DHCID Digest", f) - if e != nil { - return nil, e, c1 - } - rr.Digest = s - return rr, nil, c1 -} - -func setNID(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - rr := new(NID) - rr.Hdr = h - - l := <-c - if l.length == 0 { - return rr, nil, "" - } - i, e := strconv.Atoi(l.token) - if e != nil { - return nil, &ParseError{f, "bad NID Preference", l}, "" - } - rr.Preference = uint16(i) - <-c // zBlank - l = <-c // zString - u, err := stringToNodeID(l) - if err != nil { - return nil, err, "" - } - rr.NodeID = u - return rr, nil, "" -} - -func setL32(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - rr := new(L32) - rr.Hdr = h - - l := <-c - if l.length == 0 { - return rr, nil, "" - } - i, e := strconv.Atoi(l.token) - if e != nil { - return nil, &ParseError{f, "bad L32 Preference", l}, "" - } - rr.Preference = uint16(i) - <-c // zBlank - l = <-c // zString - rr.Locator32 = net.ParseIP(l.token) - if rr.Locator32 == nil { - return nil, &ParseError{f, "bad L32 Locator", l}, "" - } - return rr, nil, "" -} - -func setLP(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - rr := new(LP) - rr.Hdr = h - - l := <-c - if l.length == 0 { - return rr, nil, "" - } - i, e := strconv.Atoi(l.token) - if e != nil { - return nil, &ParseError{f, "bad LP Preference", l}, "" - } - rr.Preference = uint16(i) - <-c // zBlank - l = <-c // zString - rr.Fqdn = l.token - if l.length == 0 { - return rr, nil, "" - } - if l.token == "@" { - rr.Fqdn = o - return rr, nil, "" - } - _, ok := IsDomainName(l.token) - if !ok || l.length == 0 { - return nil, &ParseError{f, "bad LP Fqdn", l}, "" - } - if rr.Fqdn[l.length-1] != '.' { - rr.Fqdn = appendOrigin(rr.Fqdn, o) - } - return rr, nil, "" -} - -func setL64(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - rr := new(L64) - rr.Hdr = h - - l := <-c - if l.length == 0 { - return rr, nil, "" - } - i, e := strconv.Atoi(l.token) - if e != nil { - return nil, &ParseError{f, "bad L64 Preference", l}, "" - } - rr.Preference = uint16(i) - <-c // zBlank - l = <-c // zString - u, err := stringToNodeID(l) - if err != nil { - return nil, err, "" - } - rr.Locator64 = u - return rr, nil, "" -} - -func setUID(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - rr := new(UID) - rr.Hdr = h - l := <-c - if l.length == 0 { - return rr, nil, "" - } - i, e := strconv.Atoi(l.token) - if e != nil { - return nil, &ParseError{f, "bad UID Uid", l}, "" - } - rr.Uid = uint32(i) - return rr, nil, "" -} - -func setGID(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - rr := new(GID) - rr.Hdr = h - l := <-c - if l.length == 0 { - return rr, nil, "" - } - i, e := strconv.Atoi(l.token) - if e != nil { - return nil, &ParseError{f, "bad GID Gid", l}, "" - } - rr.Gid = uint32(i) - return rr, nil, "" -} - -func setUINFO(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - rr := new(UINFO) - rr.Hdr = h - s, e, c1 := endingToTxtSlice(c, "bad UINFO Uinfo", f) - if e != nil { - return nil, e, "" - } - rr.Uinfo = s[0] // silently discard anything above - return rr, nil, c1 -} - -func setPX(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - rr := new(PX) - rr.Hdr = h - - l := <-c - if l.length == 0 { - return rr, nil, "" - } - i, e := strconv.Atoi(l.token) - if e != nil { - return nil, &ParseError{f, "bad PX Preference", l}, "" - } - rr.Preference = uint16(i) - <-c // zBlank - l = <-c // zString - rr.Map822 = l.token - if l.length == 0 { - return rr, nil, "" - } - if l.token == "@" { - rr.Map822 = o - return rr, nil, "" - } - _, ok := IsDomainName(l.token) - if !ok { - return nil, &ParseError{f, "bad PX Map822", l}, "" - } - if rr.Map822[l.length-1] != '.' { - rr.Map822 = appendOrigin(rr.Map822, o) - } - <-c // zBlank - l = <-c // zString - rr.Mapx400 = l.token - if l.token == "@" { - rr.Mapx400 = o - return rr, nil, "" - } - _, ok = IsDomainName(l.token) - if !ok || l.length == 0 { - return nil, &ParseError{f, "bad PX Mapx400", l}, "" - } - if rr.Mapx400[l.length-1] != '.' { - rr.Mapx400 = appendOrigin(rr.Mapx400, o) - } - return rr, nil, "" -} - -func setIPSECKEY(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - rr := new(IPSECKEY) - rr.Hdr = h - l := <-c - if l.length == 0 { - return rr, nil, l.comment - } - i, err := strconv.Atoi(l.token) - if err != nil { - return nil, &ParseError{f, "bad IPSECKEY Precedence", l}, "" - } - rr.Precedence = uint8(i) - <-c // zBlank - l = <-c - i, err = strconv.Atoi(l.token) - if err != nil { - return nil, &ParseError{f, "bad IPSECKEY GatewayType", l}, "" - } - rr.GatewayType = uint8(i) - <-c // zBlank - l = <-c - i, err = strconv.Atoi(l.token) - if err != nil { - return nil, &ParseError{f, "bad IPSECKEY Algorithm", l}, "" - } - rr.Algorithm = uint8(i) - - // Now according to GatewayType we can have different elements here - <-c // zBlank - l = <-c - switch rr.GatewayType { - case 0: - fallthrough - case 3: - rr.GatewayName = l.token - if l.token == "@" { - rr.GatewayName = o - } - _, ok := IsDomainName(l.token) - if !ok { - return nil, &ParseError{f, "bad IPSECKEY GatewayName", l}, "" - } - if rr.GatewayName[l.length-1] != '.' { - rr.GatewayName = appendOrigin(rr.GatewayName, o) - } - case 1: - rr.GatewayA = net.ParseIP(l.token) - if rr.GatewayA == nil { - return nil, &ParseError{f, "bad IPSECKEY GatewayA", l}, "" - } - case 2: - rr.GatewayAAAA = net.ParseIP(l.token) - if rr.GatewayAAAA == nil { - return nil, &ParseError{f, "bad IPSECKEY GatewayAAAA", l}, "" - } - default: - return nil, &ParseError{f, "bad IPSECKEY GatewayType", l}, "" - } - - s, e, c1 := endingToString(c, "bad IPSECKEY PublicKey", f) - if e != nil { - return nil, e, c1 - } - rr.PublicKey = s - return rr, nil, c1 -} - -func setCAA(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - rr := new(CAA) - rr.Hdr = h - l := <-c - if l.length == 0 { - return rr, nil, l.comment - } - i, err := strconv.Atoi(l.token) - if err != nil { - return nil, &ParseError{f, "bad CAA Flag", l}, "" - } - rr.Flag = uint8(i) - - <-c // zBlank - l = <-c // zString - if l.value != zString { - return nil, &ParseError{f, "bad CAA Tag", l}, "" - } - rr.Tag = l.token - - <-c // zBlank - s, e, c1 := endingToTxtSlice(c, "bad CAA Value", f) - if e != nil { - return nil, e, "" - } - if len(s) > 1 { - return nil, &ParseError{f, "bad CAA Value", l}, "" - } else { - rr.Value = s[0] - } - return rr, nil, c1 -} - -var typeToparserFunc = map[uint16]parserFunc{ - TypeAAAA: parserFunc{setAAAA, false}, - TypeAFSDB: parserFunc{setAFSDB, false}, - TypeA: parserFunc{setA, false}, - TypeCAA: parserFunc{setCAA, true}, - TypeCDS: parserFunc{setCDS, true}, - TypeCDNSKEY: parserFunc{setCDNSKEY, true}, - TypeCERT: parserFunc{setCERT, true}, - TypeCNAME: parserFunc{setCNAME, false}, - TypeDHCID: parserFunc{setDHCID, true}, - TypeDLV: parserFunc{setDLV, true}, - TypeDNAME: parserFunc{setDNAME, false}, - TypeKEY: parserFunc{setKEY, true}, - TypeDNSKEY: parserFunc{setDNSKEY, true}, - TypeDS: parserFunc{setDS, true}, - TypeEID: parserFunc{setEID, true}, - TypeEUI48: parserFunc{setEUI48, false}, - TypeEUI64: parserFunc{setEUI64, false}, - TypeGID: parserFunc{setGID, false}, - TypeGPOS: parserFunc{setGPOS, false}, - TypeHINFO: parserFunc{setHINFO, true}, - TypeHIP: parserFunc{setHIP, true}, - TypeIPSECKEY: parserFunc{setIPSECKEY, true}, - TypeKX: parserFunc{setKX, false}, - TypeL32: parserFunc{setL32, false}, - TypeL64: parserFunc{setL64, false}, - TypeLOC: parserFunc{setLOC, true}, - TypeLP: parserFunc{setLP, false}, - TypeMB: parserFunc{setMB, false}, - TypeMD: parserFunc{setMD, false}, - TypeMF: parserFunc{setMF, false}, - TypeMG: parserFunc{setMG, false}, - TypeMINFO: parserFunc{setMINFO, false}, - TypeMR: parserFunc{setMR, false}, - TypeMX: parserFunc{setMX, false}, - TypeNAPTR: parserFunc{setNAPTR, false}, - TypeNID: parserFunc{setNID, false}, - TypeNIMLOC: parserFunc{setNIMLOC, true}, - TypeNINFO: parserFunc{setNINFO, true}, - TypeNSAP: parserFunc{setNSAP, true}, - TypeNSAPPTR: parserFunc{setNSAPPTR, false}, - TypeNSEC3PARAM: parserFunc{setNSEC3PARAM, false}, - TypeNSEC3: parserFunc{setNSEC3, true}, - TypeNSEC: parserFunc{setNSEC, true}, - TypeNS: parserFunc{setNS, false}, - TypeOPENPGPKEY: parserFunc{setOPENPGPKEY, true}, - TypePTR: parserFunc{setPTR, false}, - TypePX: parserFunc{setPX, false}, - TypeSIG: parserFunc{setSIG, true}, - TypeRKEY: parserFunc{setRKEY, true}, - TypeRP: parserFunc{setRP, false}, - TypeRRSIG: parserFunc{setRRSIG, true}, - TypeRT: parserFunc{setRT, false}, - TypeSOA: parserFunc{setSOA, false}, - TypeSPF: parserFunc{setSPF, true}, - TypeSRV: parserFunc{setSRV, false}, - TypeSSHFP: parserFunc{setSSHFP, true}, - TypeTALINK: parserFunc{setTALINK, false}, - TypeTA: parserFunc{setTA, true}, - TypeTLSA: parserFunc{setTLSA, true}, - TypeTXT: parserFunc{setTXT, true}, - TypeUID: parserFunc{setUID, false}, - TypeUINFO: parserFunc{setUINFO, true}, - TypeURI: parserFunc{setURI, true}, - TypeWKS: parserFunc{setWKS, true}, - TypeX25: parserFunc{setX25, false}, -} diff --git a/Godeps/_workspace/src/github.com/prometheus/client_golang/extraction/discriminator.go b/Godeps/_workspace/src/github.com/prometheus/client_golang/extraction/discriminator.go deleted file mode 100644 index a353c6378c..0000000000 --- a/Godeps/_workspace/src/github.com/prometheus/client_golang/extraction/discriminator.go +++ /dev/null @@ -1,74 +0,0 @@ -// Copyright 2013 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package extraction - -import ( - "errors" - "fmt" - "mime" - "net/http" -) - -// ProcessorForRequestHeader interprets a HTTP request header to determine -// what Processor should be used for the given input. If no acceptable -// Processor can be found, an error is returned. -func ProcessorForRequestHeader(header http.Header) (Processor, error) { - if header == nil { - return nil, errors.New("received illegal and nil header") - } - - mediatype, params, err := mime.ParseMediaType(header.Get("Content-Type")) - if err != nil { - return nil, fmt.Errorf("invalid Content-Type header %q: %s", header.Get("Content-Type"), err) - } - switch mediatype { - case "application/vnd.google.protobuf": - if params["proto"] != "io.prometheus.client.MetricFamily" { - return nil, fmt.Errorf("unrecognized protocol message %s", params["proto"]) - } - if params["encoding"] != "delimited" { - return nil, fmt.Errorf("unsupported encoding %s", params["encoding"]) - } - return MetricFamilyProcessor, nil - case "text/plain": - switch params["version"] { - case "0.0.4": - return Processor004, nil - case "": - // Fallback: most recent version. - return Processor004, nil - default: - return nil, fmt.Errorf("unrecognized API version %s", params["version"]) - } - case "application/json": - var prometheusAPIVersion string - - if params["schema"] == "prometheus/telemetry" && params["version"] != "" { - prometheusAPIVersion = params["version"] - } else { - prometheusAPIVersion = header.Get("X-Prometheus-API-Version") - } - - switch prometheusAPIVersion { - case "0.0.2": - return Processor002, nil - case "0.0.1": - return Processor001, nil - default: - return nil, fmt.Errorf("unrecognized API version %s", prometheusAPIVersion) - } - default: - return nil, fmt.Errorf("unsupported media type %q, expected %q", mediatype, "application/json") - } -} diff --git a/Godeps/_workspace/src/github.com/prometheus/client_golang/extraction/discriminator_test.go b/Godeps/_workspace/src/github.com/prometheus/client_golang/extraction/discriminator_test.go deleted file mode 100644 index 4f08248d64..0000000000 --- a/Godeps/_workspace/src/github.com/prometheus/client_golang/extraction/discriminator_test.go +++ /dev/null @@ -1,126 +0,0 @@ -// Copyright 2013 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package extraction - -import ( - "errors" - "net/http" - "testing" -) - -func testDiscriminatorHTTPHeader(t testing.TB) { - var scenarios = []struct { - input map[string]string - output Processor - err error - }{ - { - output: nil, - err: errors.New("received illegal and nil header"), - }, - { - input: map[string]string{"Content-Type": "application/json", "X-Prometheus-API-Version": "0.0.0"}, - output: nil, - err: errors.New("unrecognized API version 0.0.0"), - }, - { - input: map[string]string{"Content-Type": "application/json", "X-Prometheus-API-Version": "0.0.1"}, - output: Processor001, - err: nil, - }, - { - input: map[string]string{"Content-Type": `application/json; schema="prometheus/telemetry"; version=0.0.0`}, - output: nil, - err: errors.New("unrecognized API version 0.0.0"), - }, - { - input: map[string]string{"Content-Type": `application/json; schema="prometheus/telemetry"; version=0.0.1`}, - output: Processor001, - err: nil, - }, - { - input: map[string]string{"Content-Type": `application/json; schema="prometheus/telemetry"; version=0.0.2`}, - output: Processor002, - err: nil, - }, - { - input: map[string]string{"Content-Type": `application/vnd.google.protobuf; proto="io.prometheus.client.MetricFamily"; encoding="delimited"`}, - output: MetricFamilyProcessor, - err: nil, - }, - { - input: map[string]string{"Content-Type": `application/vnd.google.protobuf; proto="illegal"; encoding="delimited"`}, - output: nil, - err: errors.New("unrecognized protocol message illegal"), - }, - { - input: map[string]string{"Content-Type": `application/vnd.google.protobuf; proto="io.prometheus.client.MetricFamily"; encoding="illegal"`}, - output: nil, - err: errors.New("unsupported encoding illegal"), - }, - { - input: map[string]string{"Content-Type": `text/plain; version=0.0.4`}, - output: Processor004, - err: nil, - }, - { - input: map[string]string{"Content-Type": `text/plain`}, - output: Processor004, - err: nil, - }, - { - input: map[string]string{"Content-Type": `text/plain; version=0.0.3`}, - output: nil, - err: errors.New("unrecognized API version 0.0.3"), - }, - } - - for i, scenario := range scenarios { - var header http.Header - - if len(scenario.input) > 0 { - header = http.Header{} - } - - for key, value := range scenario.input { - header.Add(key, value) - } - - actual, err := ProcessorForRequestHeader(header) - - if scenario.err != err { - if scenario.err != nil && err != nil { - if scenario.err.Error() != err.Error() { - t.Errorf("%d. expected %s, got %s", i, scenario.err, err) - } - } else if scenario.err != nil || err != nil { - t.Errorf("%d. expected %s, got %s", i, scenario.err, err) - } - } - - if scenario.output != actual { - t.Errorf("%d. expected %s, got %s", i, scenario.output, actual) - } - } -} - -func TestDiscriminatorHTTPHeader(t *testing.T) { - testDiscriminatorHTTPHeader(t) -} - -func BenchmarkDiscriminatorHTTPHeader(b *testing.B) { - for i := 0; i < b.N; i++ { - testDiscriminatorHTTPHeader(b) - } -} diff --git a/Godeps/_workspace/src/github.com/prometheus/client_golang/extraction/extraction.go b/Godeps/_workspace/src/github.com/prometheus/client_golang/extraction/extraction.go deleted file mode 100644 index 31cdafad4b..0000000000 --- a/Godeps/_workspace/src/github.com/prometheus/client_golang/extraction/extraction.go +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright 2013 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package extraction decodes Prometheus clients' data streams for consumers. -package extraction diff --git a/Godeps/_workspace/src/github.com/prometheus/client_golang/extraction/fixtures/empty.json b/Godeps/_workspace/src/github.com/prometheus/client_golang/extraction/fixtures/empty.json deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/Godeps/_workspace/src/github.com/prometheus/client_golang/extraction/fixtures/test0_0_1-0_0_2-large.json b/Godeps/_workspace/src/github.com/prometheus/client_golang/extraction/fixtures/test0_0_1-0_0_2-large.json deleted file mode 100644 index 7168338c8b..0000000000 --- a/Godeps/_workspace/src/github.com/prometheus/client_golang/extraction/fixtures/test0_0_1-0_0_2-large.json +++ /dev/null @@ -1,1032 +0,0 @@ -[ - { - "baseLabels": { - "__name__": "rpc_calls_total_0", - "job": "batch_job" - }, - "docstring": "Total count of RPC calls.", - "metric": { - "type": "counter", - "value": [ - { - "labels": { - "foo": "bar", - "service": "zed" - }, - "value": 25 - }, - { - "labels": { - "foo": "baz", - "service": "bar" - }, - "value": 25 - }, - { - "labels": { - "foo": "bar", - "service": "foo" - }, - "value": 25 - }, - { - "labels": { - "foo": "baz", - "service": "foo" - }, - "value": 25 - } - ] - } - }, - { - "baseLabels": { - "__name__": "rpc_latency_microseconds_0" - }, - "docstring": "RPC latency summary.", - "metric": { - "type": "histogram", - "value": [ - { - "labels": { - "foo": "bar", - "service": "foo" - }, - "value": { - "0.010000": 15.890724674774395, - "0.050000": 15.890724674774395, - "0.500000": 84.63044031436561, - "0.900000": 160.21100853053224, - "0.990000": 172.49828748957728 - } - }, - { - "labels": { - "foo": "bar", - "service": "zed" - }, - "value": { - "0.010000": 0.0459814091918713, - "0.050000": 0.0459814091918713, - "0.500000": 0.6120456642749681, - "0.900000": 1.355915069887731, - "0.990000": 1.772733213161236 - } - }, - { - "labels": { - "foo": "bar", - "service": "bar" - }, - "value": { - "0.010000": 78.48563317257356, - "0.050000": 78.48563317257356, - "0.500000": 97.31798360385088, - "0.900000": 109.89202084295582, - "0.990000": 109.99626121011262 - } - }, - { - "labels": { - "foo": "baz", - "service": "bar" - }, - "value": { - "0.010000": 78.48563317257356, - "0.050000": 78.48563317257356, - "0.500000": 97.31798360385088, - "0.900000": 109.89202084295582, - "0.990000": 109.99626121011262 - } - } - ] - } - }, - { - "baseLabels": { - "__name__": "rpc_calls_total_1", - "job": "batch_job" - }, - "docstring": "Total count of RPC calls.", - "metric": { - "type": "counter", - "value": [ - { - "labels": { - "foo": "bar", - "service": "zed" - }, - "value": 25 - }, - { - "labels": { - "foo": "baz", - "service": "bar" - }, - "value": 25 - }, - { - "labels": { - "foo": "bar", - "service": "foo" - }, - "value": 25 - }, - { - "labels": { - "foo": "baz", - "service": "foo" - }, - "value": 25 - } - ] - } - }, - { - "baseLabels": { - "__name__": "rpc_latency_microseconds_1" - }, - "docstring": "RPC latency summary.", - "metric": { - "type": "histogram", - "value": [ - { - "labels": { - "foo": "bar", - "service": "foo" - }, - "value": { - "0.010000": 15.890724674774395, - "0.050000": 15.890724674774395, - "0.500000": 84.63044031436561, - "0.900000": 160.21100853053224, - "0.990000": 172.49828748957728 - } - }, - { - "labels": { - "foo": "bar", - "service": "zed" - }, - "value": { - "0.010000": 0.0459814091918713, - "0.050000": 0.0459814091918713, - "0.500000": 0.6120456642749681, - "0.900000": 1.355915069887731, - "0.990000": 1.772733213161236 - } - }, - { - "labels": { - "foo": "bar", - "service": "bar" - }, - "value": { - "0.010000": 78.48563317257356, - "0.050000": 78.48563317257356, - "0.500000": 97.31798360385088, - "0.900000": 109.89202084295582, - "0.990000": 109.99626121011262 - } - }, - { - "labels": { - "foo": "baz", - "service": "bar" - }, - "value": { - "0.010000": 78.48563317257356, - "0.050000": 78.48563317257356, - "0.500000": 97.31798360385088, - "0.900000": 109.89202084295582, - "0.990000": 109.99626121011262 - } - } - ] - } - }, - { - "baseLabels": { - "__name__": "rpc_calls_total_2", - "job": "batch_job" - }, - "docstring": "Total count of RPC calls.", - "metric": { - "type": "counter", - "value": [ - { - "labels": { - "foo": "bar", - "service": "zed" - }, - "value": 25 - }, - { - "labels": { - "foo": "baz", - "service": "bar" - }, - "value": 25 - }, - { - "labels": { - "foo": "bar", - "service": "foo" - }, - "value": 25 - }, - { - "labels": { - "foo": "baz", - "service": "foo" - }, - "value": 25 - } - ] - } - }, - { - "baseLabels": { - "__name__": "rpc_latency_microseconds_2" - }, - "docstring": "RPC latency summary.", - "metric": { - "type": "histogram", - "value": [ - { - "labels": { - "foo": "bar", - "service": "foo" - }, - "value": { - "0.010000": 15.890724674774395, - "0.050000": 15.890724674774395, - "0.500000": 84.63044031436561, - "0.900000": 160.21100853053224, - "0.990000": 172.49828748957728 - } - }, - { - "labels": { - "foo": "bar", - "service": "zed" - }, - "value": { - "0.010000": 0.0459814091918713, - "0.050000": 0.0459814091918713, - "0.500000": 0.6120456642749681, - "0.900000": 1.355915069887731, - "0.990000": 1.772733213161236 - } - }, - { - "labels": { - "foo": "bar", - "service": "bar" - }, - "value": { - "0.010000": 78.48563317257356, - "0.050000": 78.48563317257356, - "0.500000": 97.31798360385088, - "0.900000": 109.89202084295582, - "0.990000": 109.99626121011262 - } - }, - { - "labels": { - "foo": "baz", - "service": "bar" - }, - "value": { - "0.010000": 78.48563317257356, - "0.050000": 78.48563317257356, - "0.500000": 97.31798360385088, - "0.900000": 109.89202084295582, - "0.990000": 109.99626121011262 - } - } - ] - } - }, - { - "baseLabels": { - "__name__": "rpc_calls_total_3", - "job": "batch_job" - }, - "docstring": "Total count of RPC calls.", - "metric": { - "type": "counter", - "value": [ - { - "labels": { - "foo": "bar", - "service": "zed" - }, - "value": 25 - }, - { - "labels": { - "foo": "baz", - "service": "bar" - }, - "value": 25 - }, - { - "labels": { - "foo": "bar", - "service": "foo" - }, - "value": 25 - }, - { - "labels": { - "foo": "baz", - "service": "foo" - }, - "value": 25 - } - ] - } - }, - { - "baseLabels": { - "__name__": "rpc_latency_microseconds_3" - }, - "docstring": "RPC latency summary.", - "metric": { - "type": "histogram", - "value": [ - { - "labels": { - "foo": "bar", - "service": "foo" - }, - "value": { - "0.010000": 15.890724674774395, - "0.050000": 15.890724674774395, - "0.500000": 84.63044031436561, - "0.900000": 160.21100853053224, - "0.990000": 172.49828748957728 - } - }, - { - "labels": { - "foo": "bar", - "service": "zed" - }, - "value": { - "0.010000": 0.0459814091918713, - "0.050000": 0.0459814091918713, - "0.500000": 0.6120456642749681, - "0.900000": 1.355915069887731, - "0.990000": 1.772733213161236 - } - }, - { - "labels": { - "foo": "bar", - "service": "bar" - }, - "value": { - "0.010000": 78.48563317257356, - "0.050000": 78.48563317257356, - "0.500000": 97.31798360385088, - "0.900000": 109.89202084295582, - "0.990000": 109.99626121011262 - } - }, - { - "labels": { - "foo": "baz", - "service": "bar" - }, - "value": { - "0.010000": 78.48563317257356, - "0.050000": 78.48563317257356, - "0.500000": 97.31798360385088, - "0.900000": 109.89202084295582, - "0.990000": 109.99626121011262 - } - } - ] - } - }, - { - "baseLabels": { - "__name__": "rpc_calls_total_4", - "job": "batch_job" - }, - "docstring": "Total count of RPC calls.", - "metric": { - "type": "counter", - "value": [ - { - "labels": { - "foo": "bar", - "service": "zed" - }, - "value": 25 - }, - { - "labels": { - "foo": "baz", - "service": "bar" - }, - "value": 25 - }, - { - "labels": { - "foo": "bar", - "service": "foo" - }, - "value": 25 - }, - { - "labels": { - "foo": "baz", - "service": "foo" - }, - "value": 25 - } - ] - } - }, - { - "baseLabels": { - "__name__": "rpc_latency_microseconds_4" - }, - "docstring": "RPC latency summary.", - "metric": { - "type": "histogram", - "value": [ - { - "labels": { - "foo": "bar", - "service": "foo" - }, - "value": { - "0.010000": 15.890724674774395, - "0.050000": 15.890724674774395, - "0.500000": 84.63044031436561, - "0.900000": 160.21100853053224, - "0.990000": 172.49828748957728 - } - }, - { - "labels": { - "foo": "bar", - "service": "zed" - }, - "value": { - "0.010000": 0.0459814091918713, - "0.050000": 0.0459814091918713, - "0.500000": 0.6120456642749681, - "0.900000": 1.355915069887731, - "0.990000": 1.772733213161236 - } - }, - { - "labels": { - "foo": "bar", - "service": "bar" - }, - "value": { - "0.010000": 78.48563317257356, - "0.050000": 78.48563317257356, - "0.500000": 97.31798360385088, - "0.900000": 109.89202084295582, - "0.990000": 109.99626121011262 - } - }, - { - "labels": { - "foo": "baz", - "service": "bar" - }, - "value": { - "0.010000": 78.48563317257356, - "0.050000": 78.48563317257356, - "0.500000": 97.31798360385088, - "0.900000": 109.89202084295582, - "0.990000": 109.99626121011262 - } - } - ] - } - }, - { - "baseLabels": { - "__name__": "rpc_calls_total_5", - "job": "batch_job" - }, - "docstring": "Total count of RPC calls.", - "metric": { - "type": "counter", - "value": [ - { - "labels": { - "foo": "bar", - "service": "zed" - }, - "value": 25 - }, - { - "labels": { - "foo": "baz", - "service": "bar" - }, - "value": 25 - }, - { - "labels": { - "foo": "bar", - "service": "foo" - }, - "value": 25 - }, - { - "labels": { - "foo": "baz", - "service": "foo" - }, - "value": 25 - } - ] - } - }, - { - "baseLabels": { - "__name__": "rpc_latency_microseconds_5" - }, - "docstring": "RPC latency summary.", - "metric": { - "type": "histogram", - "value": [ - { - "labels": { - "foo": "bar", - "service": "foo" - }, - "value": { - "0.010000": 15.890724674774395, - "0.050000": 15.890724674774395, - "0.500000": 84.63044031436561, - "0.900000": 160.21100853053224, - "0.990000": 172.49828748957728 - } - }, - { - "labels": { - "foo": "bar", - "service": "zed" - }, - "value": { - "0.010000": 0.0459814091918713, - "0.050000": 0.0459814091918713, - "0.500000": 0.6120456642749681, - "0.900000": 1.355915069887731, - "0.990000": 1.772733213161236 - } - }, - { - "labels": { - "foo": "bar", - "service": "bar" - }, - "value": { - "0.010000": 78.48563317257356, - "0.050000": 78.48563317257356, - "0.500000": 97.31798360385088, - "0.900000": 109.89202084295582, - "0.990000": 109.99626121011262 - } - }, - { - "labels": { - "foo": "baz", - "service": "bar" - }, - "value": { - "0.010000": 78.48563317257356, - "0.050000": 78.48563317257356, - "0.500000": 97.31798360385088, - "0.900000": 109.89202084295582, - "0.990000": 109.99626121011262 - } - } - ] - } - }, - { - "baseLabels": { - "__name__": "rpc_calls_total_6", - "job": "batch_job" - }, - "docstring": "Total count of RPC calls.", - "metric": { - "type": "counter", - "value": [ - { - "labels": { - "foo": "bar", - "service": "zed" - }, - "value": 25 - }, - { - "labels": { - "foo": "baz", - "service": "bar" - }, - "value": 25 - }, - { - "labels": { - "foo": "bar", - "service": "foo" - }, - "value": 25 - }, - { - "labels": { - "foo": "baz", - "service": "foo" - }, - "value": 25 - } - ] - } - }, - { - "baseLabels": { - "__name__": "rpc_latency_microseconds_6" - }, - "docstring": "RPC latency summary.", - "metric": { - "type": "histogram", - "value": [ - { - "labels": { - "foo": "bar", - "service": "foo" - }, - "value": { - "0.010000": 15.890724674774395, - "0.050000": 15.890724674774395, - "0.500000": 84.63044031436561, - "0.900000": 160.21100853053224, - "0.990000": 172.49828748957728 - } - }, - { - "labels": { - "foo": "bar", - "service": "zed" - }, - "value": { - "0.010000": 0.0459814091918713, - "0.050000": 0.0459814091918713, - "0.500000": 0.6120456642749681, - "0.900000": 1.355915069887731, - "0.990000": 1.772733213161236 - } - }, - { - "labels": { - "foo": "bar", - "service": "bar" - }, - "value": { - "0.010000": 78.48563317257356, - "0.050000": 78.48563317257356, - "0.500000": 97.31798360385088, - "0.900000": 109.89202084295582, - "0.990000": 109.99626121011262 - } - }, - { - "labels": { - "foo": "baz", - "service": "bar" - }, - "value": { - "0.010000": 78.48563317257356, - "0.050000": 78.48563317257356, - "0.500000": 97.31798360385088, - "0.900000": 109.89202084295582, - "0.990000": 109.99626121011262 - } - } - ] - } - }, - { - "baseLabels": { - "__name__": "rpc_calls_total_7", - "job": "batch_job" - }, - "docstring": "Total count of RPC calls.", - "metric": { - "type": "counter", - "value": [ - { - "labels": { - "foo": "bar", - "service": "zed" - }, - "value": 25 - }, - { - "labels": { - "foo": "baz", - "service": "bar" - }, - "value": 25 - }, - { - "labels": { - "foo": "bar", - "service": "foo" - }, - "value": 25 - }, - { - "labels": { - "foo": "baz", - "service": "foo" - }, - "value": 25 - } - ] - } - }, - { - "baseLabels": { - "__name__": "rpc_latency_microseconds_7" - }, - "docstring": "RPC latency summary.", - "metric": { - "type": "histogram", - "value": [ - { - "labels": { - "foo": "bar", - "service": "foo" - }, - "value": { - "0.010000": 15.890724674774395, - "0.050000": 15.890724674774395, - "0.500000": 84.63044031436561, - "0.900000": 160.21100853053224, - "0.990000": 172.49828748957728 - } - }, - { - "labels": { - "foo": "bar", - "service": "zed" - }, - "value": { - "0.010000": 0.0459814091918713, - "0.050000": 0.0459814091918713, - "0.500000": 0.6120456642749681, - "0.900000": 1.355915069887731, - "0.990000": 1.772733213161236 - } - }, - { - "labels": { - "foo": "bar", - "service": "bar" - }, - "value": { - "0.010000": 78.48563317257356, - "0.050000": 78.48563317257356, - "0.500000": 97.31798360385088, - "0.900000": 109.89202084295582, - "0.990000": 109.99626121011262 - } - }, - { - "labels": { - "foo": "baz", - "service": "bar" - }, - "value": { - "0.010000": 78.48563317257356, - "0.050000": 78.48563317257356, - "0.500000": 97.31798360385088, - "0.900000": 109.89202084295582, - "0.990000": 109.99626121011262 - } - } - ] - } - }, - { - "baseLabels": { - "__name__": "rpc_calls_total_8", - "job": "batch_job" - }, - "docstring": "Total count of RPC calls.", - "metric": { - "type": "counter", - "value": [ - { - "labels": { - "foo": "bar", - "service": "zed" - }, - "value": 25 - }, - { - "labels": { - "foo": "baz", - "service": "bar" - }, - "value": 25 - }, - { - "labels": { - "foo": "bar", - "service": "foo" - }, - "value": 25 - }, - { - "labels": { - "foo": "baz", - "service": "foo" - }, - "value": 25 - } - ] - } - }, - { - "baseLabels": { - "__name__": "rpc_latency_microseconds_8" - }, - "docstring": "RPC latency summary.", - "metric": { - "type": "histogram", - "value": [ - { - "labels": { - "foo": "bar", - "service": "foo" - }, - "value": { - "0.010000": 15.890724674774395, - "0.050000": 15.890724674774395, - "0.500000": 84.63044031436561, - "0.900000": 160.21100853053224, - "0.990000": 172.49828748957728 - } - }, - { - "labels": { - "foo": "bar", - "service": "zed" - }, - "value": { - "0.010000": 0.0459814091918713, - "0.050000": 0.0459814091918713, - "0.500000": 0.6120456642749681, - "0.900000": 1.355915069887731, - "0.990000": 1.772733213161236 - } - }, - { - "labels": { - "foo": "bar", - "service": "bar" - }, - "value": { - "0.010000": 78.48563317257356, - "0.050000": 78.48563317257356, - "0.500000": 97.31798360385088, - "0.900000": 109.89202084295582, - "0.990000": 109.99626121011262 - } - }, - { - "labels": { - "foo": "baz", - "service": "bar" - }, - "value": { - "0.010000": 78.48563317257356, - "0.050000": 78.48563317257356, - "0.500000": 97.31798360385088, - "0.900000": 109.89202084295582, - "0.990000": 109.99626121011262 - } - } - ] - } - }, - { - "baseLabels": { - "__name__": "rpc_calls_total_9", - "job": "batch_job" - }, - "docstring": "Total count of RPC calls.", - "metric": { - "type": "counter", - "value": [ - { - "labels": { - "foo": "bar", - "service": "zed" - }, - "value": 25 - }, - { - "labels": { - "foo": "baz", - "service": "bar" - }, - "value": 25 - }, - { - "labels": { - "foo": "bar", - "service": "foo" - }, - "value": 25 - }, - { - "labels": { - "foo": "baz", - "service": "foo" - }, - "value": 25 - } - ] - } - }, - { - "baseLabels": { - "__name__": "rpc_latency_microseconds_9" - }, - "docstring": "RPC latency summary.", - "metric": { - "type": "histogram", - "value": [ - { - "labels": { - "foo": "bar", - "service": "foo" - }, - "value": { - "0.010000": 15.890724674774395, - "0.050000": 15.890724674774395, - "0.500000": 84.63044031436561, - "0.900000": 160.21100853053224, - "0.990000": 172.49828748957728 - } - }, - { - "labels": { - "foo": "bar", - "service": "zed" - }, - "value": { - "0.010000": 0.0459814091918713, - "0.050000": 0.0459814091918713, - "0.500000": 0.6120456642749681, - "0.900000": 1.355915069887731, - "0.990000": 1.772733213161236 - } - }, - { - "labels": { - "foo": "bar", - "service": "bar" - }, - "value": { - "0.010000": 78.48563317257356, - "0.050000": 78.48563317257356, - "0.500000": 97.31798360385088, - "0.900000": 109.89202084295582, - "0.990000": 109.99626121011262 - } - }, - { - "labels": { - "foo": "baz", - "service": "bar" - }, - "value": { - "0.010000": 78.48563317257356, - "0.050000": 78.48563317257356, - "0.500000": 97.31798360385088, - "0.900000": 109.89202084295582, - "0.990000": 109.99626121011262 - } - } - ] - } - } -] diff --git a/Godeps/_workspace/src/github.com/prometheus/client_golang/extraction/fixtures/test0_0_1-0_0_2.json b/Godeps/_workspace/src/github.com/prometheus/client_golang/extraction/fixtures/test0_0_1-0_0_2.json deleted file mode 100644 index 1ac5be7d7e..0000000000 --- a/Godeps/_workspace/src/github.com/prometheus/client_golang/extraction/fixtures/test0_0_1-0_0_2.json +++ /dev/null @@ -1,79 +0,0 @@ -[ - { - "baseLabels": { - "__name__": "rpc_calls_total", - "job": "batch_job" - }, - "docstring": "RPC calls.", - "metric": { - "type": "counter", - "value": [ - { - "labels": { - "service": "zed" - }, - "value": 25 - }, - { - "labels": { - "service": "bar" - }, - "value": 25 - }, - { - "labels": { - "service": "foo" - }, - "value": 25 - } - ] - } - }, - { - "baseLabels": { - "__name__": "rpc_latency_microseconds" - }, - "docstring": "RPC latency.", - "metric": { - "type": "histogram", - "value": [ - { - "labels": { - "service": "foo" - }, - "value": { - "0.010000": 15.890724674774395, - "0.050000": 15.890724674774395, - "0.500000": 84.63044031436561, - "0.900000": 160.21100853053224, - "0.990000": 172.49828748957728 - } - }, - { - "labels": { - "service": "zed" - }, - "value": { - "0.010000": 0.0459814091918713, - "0.050000": 0.0459814091918713, - "0.500000": 0.6120456642749681, - "0.900000": 1.355915069887731, - "0.990000": 1.772733213161236 - } - }, - { - "labels": { - "service": "bar" - }, - "value": { - "0.010000": 78.48563317257356, - "0.050000": 78.48563317257356, - "0.500000": 97.31798360385088, - "0.900000": 109.89202084295582, - "0.990000": 109.99626121011262 - } - } - ] - } - } -] diff --git a/Godeps/_workspace/src/github.com/prometheus/client_golang/extraction/metricfamilyprocessor.go b/Godeps/_workspace/src/github.com/prometheus/client_golang/extraction/metricfamilyprocessor.go deleted file mode 100644 index 5a8f40bbf3..0000000000 --- a/Godeps/_workspace/src/github.com/prometheus/client_golang/extraction/metricfamilyprocessor.go +++ /dev/null @@ -1,318 +0,0 @@ -// Copyright 2013 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package extraction - -import ( - "fmt" - "io" - "math" - - dto "github.com/prometheus/client_model/go" - - "github.com/matttproud/golang_protobuf_extensions/pbutil" - - "github.com/prometheus/client_golang/model" -) - -type metricFamilyProcessor struct{} - -// MetricFamilyProcessor decodes varint encoded record length-delimited streams -// of io.prometheus.client.MetricFamily. -// -// See http://godoc.org/github.com/matttproud/golang_protobuf_extensions/ext for -// more details. -var MetricFamilyProcessor = &metricFamilyProcessor{} - -func (m *metricFamilyProcessor) ProcessSingle(i io.Reader, out Ingester, o *ProcessOptions) error { - family := &dto.MetricFamily{} - - for { - family.Reset() - - if _, err := pbutil.ReadDelimited(i, family); err != nil { - if err == io.EOF { - return nil - } - return err - } - if err := extractMetricFamily(out, o, family); err != nil { - return err - } - } -} - -func extractMetricFamily(out Ingester, o *ProcessOptions, family *dto.MetricFamily) error { - switch family.GetType() { - case dto.MetricType_COUNTER: - if err := extractCounter(out, o, family); err != nil { - return err - } - case dto.MetricType_GAUGE: - if err := extractGauge(out, o, family); err != nil { - return err - } - case dto.MetricType_SUMMARY: - if err := extractSummary(out, o, family); err != nil { - return err - } - case dto.MetricType_UNTYPED: - if err := extractUntyped(out, o, family); err != nil { - return err - } - case dto.MetricType_HISTOGRAM: - if err := extractHistogram(out, o, family); err != nil { - return err - } - } - return nil -} - -func extractCounter(out Ingester, o *ProcessOptions, f *dto.MetricFamily) error { - samples := make(model.Samples, 0, len(f.Metric)) - - for _, m := range f.Metric { - if m.Counter == nil { - continue - } - - sample := &model.Sample{ - Metric: model.Metric{}, - Value: model.SampleValue(m.Counter.GetValue()), - } - samples = append(samples, sample) - - if m.TimestampMs != nil { - sample.Timestamp = model.TimestampFromUnixNano(*m.TimestampMs * 1000000) - } else { - sample.Timestamp = o.Timestamp - } - - metric := sample.Metric - for _, p := range m.Label { - metric[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) - } - metric[model.MetricNameLabel] = model.LabelValue(f.GetName()) - } - - return out.Ingest(samples) -} - -func extractGauge(out Ingester, o *ProcessOptions, f *dto.MetricFamily) error { - samples := make(model.Samples, 0, len(f.Metric)) - - for _, m := range f.Metric { - if m.Gauge == nil { - continue - } - - sample := &model.Sample{ - Metric: model.Metric{}, - Value: model.SampleValue(m.Gauge.GetValue()), - } - samples = append(samples, sample) - - if m.TimestampMs != nil { - sample.Timestamp = model.TimestampFromUnixNano(*m.TimestampMs * 1000000) - } else { - sample.Timestamp = o.Timestamp - } - - metric := sample.Metric - for _, p := range m.Label { - metric[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) - } - metric[model.MetricNameLabel] = model.LabelValue(f.GetName()) - } - - return out.Ingest(samples) -} - -func extractSummary(out Ingester, o *ProcessOptions, f *dto.MetricFamily) error { - samples := make(model.Samples, 0, len(f.Metric)) - - for _, m := range f.Metric { - if m.Summary == nil { - continue - } - - timestamp := o.Timestamp - if m.TimestampMs != nil { - timestamp = model.TimestampFromUnixNano(*m.TimestampMs * 1000000) - } - - for _, q := range m.Summary.Quantile { - sample := &model.Sample{ - Metric: model.Metric{}, - Value: model.SampleValue(q.GetValue()), - Timestamp: timestamp, - } - samples = append(samples, sample) - - metric := sample.Metric - for _, p := range m.Label { - metric[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) - } - // BUG(matt): Update other names to "quantile". - metric[model.LabelName(model.QuantileLabel)] = model.LabelValue(fmt.Sprint(q.GetQuantile())) - metric[model.MetricNameLabel] = model.LabelValue(f.GetName()) - } - - if m.Summary.SampleSum != nil { - sum := &model.Sample{ - Metric: model.Metric{}, - Value: model.SampleValue(m.Summary.GetSampleSum()), - Timestamp: timestamp, - } - samples = append(samples, sum) - - metric := sum.Metric - for _, p := range m.Label { - metric[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) - } - metric[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_sum") - } - - if m.Summary.SampleCount != nil { - count := &model.Sample{ - Metric: model.Metric{}, - Value: model.SampleValue(m.Summary.GetSampleCount()), - Timestamp: timestamp, - } - samples = append(samples, count) - - metric := count.Metric - for _, p := range m.Label { - metric[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) - } - metric[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_count") - } - } - - return out.Ingest(samples) -} - -func extractUntyped(out Ingester, o *ProcessOptions, f *dto.MetricFamily) error { - samples := make(model.Samples, 0, len(f.Metric)) - - for _, m := range f.Metric { - if m.Untyped == nil { - continue - } - - sample := &model.Sample{ - Metric: model.Metric{}, - Value: model.SampleValue(m.Untyped.GetValue()), - } - samples = append(samples, sample) - - if m.TimestampMs != nil { - sample.Timestamp = model.TimestampFromUnixNano(*m.TimestampMs * 1000000) - } else { - sample.Timestamp = o.Timestamp - } - - metric := sample.Metric - for _, p := range m.Label { - metric[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) - } - metric[model.MetricNameLabel] = model.LabelValue(f.GetName()) - } - - return out.Ingest(samples) -} - -func extractHistogram(out Ingester, o *ProcessOptions, f *dto.MetricFamily) error { - samples := make(model.Samples, 0, len(f.Metric)) - - for _, m := range f.Metric { - if m.Histogram == nil { - continue - } - - timestamp := o.Timestamp - if m.TimestampMs != nil { - timestamp = model.TimestampFromUnixNano(*m.TimestampMs * 1000000) - } - - infSeen := false - - for _, q := range m.Histogram.Bucket { - sample := &model.Sample{ - Metric: model.Metric{}, - Value: model.SampleValue(q.GetCumulativeCount()), - Timestamp: timestamp, - } - samples = append(samples, sample) - - metric := sample.Metric - for _, p := range m.Label { - metric[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) - } - metric[model.LabelName(model.BucketLabel)] = model.LabelValue(fmt.Sprint(q.GetUpperBound())) - metric[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_bucket") - - if math.IsInf(q.GetUpperBound(), +1) { - infSeen = true - } - } - - if m.Histogram.SampleSum != nil { - sum := &model.Sample{ - Metric: model.Metric{}, - Value: model.SampleValue(m.Histogram.GetSampleSum()), - Timestamp: timestamp, - } - samples = append(samples, sum) - - metric := sum.Metric - for _, p := range m.Label { - metric[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) - } - metric[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_sum") - } - - if m.Histogram.SampleCount != nil { - count := &model.Sample{ - Metric: model.Metric{}, - Value: model.SampleValue(m.Histogram.GetSampleCount()), - Timestamp: timestamp, - } - samples = append(samples, count) - - metric := count.Metric - for _, p := range m.Label { - metric[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) - } - metric[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_count") - - if !infSeen { - infBucket := &model.Sample{ - Metric: model.Metric{}, - Value: count.Value, - Timestamp: timestamp, - } - samples = append(samples, infBucket) - - metric := infBucket.Metric - for _, p := range m.Label { - metric[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) - } - metric[model.LabelName(model.BucketLabel)] = model.LabelValue("+Inf") - metric[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_bucket") - } - } - } - - return out.Ingest(samples) -} diff --git a/Godeps/_workspace/src/github.com/prometheus/client_golang/extraction/metricfamilyprocessor_test.go b/Godeps/_workspace/src/github.com/prometheus/client_golang/extraction/metricfamilyprocessor_test.go deleted file mode 100644 index 9ba0fdbce3..0000000000 --- a/Godeps/_workspace/src/github.com/prometheus/client_golang/extraction/metricfamilyprocessor_test.go +++ /dev/null @@ -1,153 +0,0 @@ -// Copyright 2013 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package extraction - -import ( - "sort" - "strings" - "testing" - - "github.com/prometheus/client_golang/model" -) - -var testTime = model.Now() - -type metricFamilyProcessorScenario struct { - in string - expected, actual []model.Samples -} - -func (s *metricFamilyProcessorScenario) Ingest(samples model.Samples) error { - s.actual = append(s.actual, samples) - return nil -} - -func (s *metricFamilyProcessorScenario) test(t *testing.T, set int) { - i := strings.NewReader(s.in) - - o := &ProcessOptions{ - Timestamp: testTime, - } - - err := MetricFamilyProcessor.ProcessSingle(i, s, o) - if err != nil { - t.Fatalf("%d. got error: %s", set, err) - } - - if len(s.expected) != len(s.actual) { - t.Fatalf("%d. expected length %d, got %d", set, len(s.expected), len(s.actual)) - } - - for i, expected := range s.expected { - sort.Sort(s.actual[i]) - sort.Sort(expected) - - if !expected.Equal(s.actual[i]) { - t.Errorf("%d.%d. expected %s, got %s", set, i, expected, s.actual[i]) - } - } -} - -func TestMetricFamilyProcessor(t *testing.T) { - scenarios := []metricFamilyProcessorScenario{ - { - in: "", - }, - { - in: "\x8f\x01\n\rrequest_count\x12\x12Number of requests\x18\x00\"0\n#\n\x0fsome_label_name\x12\x10some_label_value\x1a\t\t\x00\x00\x00\x00\x00\x00E\xc0\"6\n)\n\x12another_label_name\x12\x13another_label_value\x1a\t\t\x00\x00\x00\x00\x00\x00U@", - expected: []model.Samples{ - model.Samples{ - &model.Sample{ - Metric: model.Metric{model.MetricNameLabel: "request_count", "some_label_name": "some_label_value"}, - Value: -42, - Timestamp: testTime, - }, - &model.Sample{ - Metric: model.Metric{model.MetricNameLabel: "request_count", "another_label_name": "another_label_value"}, - Value: 84, - Timestamp: testTime, - }, - }, - }, - }, - { - in: "\xb9\x01\n\rrequest_count\x12\x12Number of requests\x18\x02\"O\n#\n\x0fsome_label_name\x12\x10some_label_value\"(\x1a\x12\t\xaeG\xe1z\x14\xae\xef?\x11\x00\x00\x00\x00\x00\x00E\xc0\x1a\x12\t+\x87\x16\xd9\xce\xf7\xef?\x11\x00\x00\x00\x00\x00\x00U\xc0\"A\n)\n\x12another_label_name\x12\x13another_label_value\"\x14\x1a\x12\t\x00\x00\x00\x00\x00\x00\xe0?\x11\x00\x00\x00\x00\x00\x00$@", - expected: []model.Samples{ - model.Samples{ - &model.Sample{ - Metric: model.Metric{model.MetricNameLabel: "request_count", "some_label_name": "some_label_value", "quantile": "0.99"}, - Value: -42, - Timestamp: testTime, - }, - &model.Sample{ - Metric: model.Metric{model.MetricNameLabel: "request_count", "some_label_name": "some_label_value", "quantile": "0.999"}, - Value: -84, - Timestamp: testTime, - }, - &model.Sample{ - Metric: model.Metric{model.MetricNameLabel: "request_count", "another_label_name": "another_label_value", "quantile": "0.5"}, - Value: 10, - Timestamp: testTime, - }, - }, - }, - }, - { - in: "\x8d\x01\n\x1drequest_duration_microseconds\x12\x15The response latency.\x18\x04\"S:Q\b\x85\x15\x11\xcd\xcc\xccL\x8f\xcb:A\x1a\v\b{\x11\x00\x00\x00\x00\x00\x00Y@\x1a\f\b\x9c\x03\x11\x00\x00\x00\x00\x00\x00^@\x1a\f\b\xd0\x04\x11\x00\x00\x00\x00\x00\x00b@\x1a\f\b\xf4\v\x11\x9a\x99\x99\x99\x99\x99e@\x1a\f\b\x85\x15\x11\x00\x00\x00\x00\x00\x00\xf0\u007f", - expected: []model.Samples{ - model.Samples{ - &model.Sample{ - Metric: model.Metric{model.MetricNameLabel: "request_duration_microseconds_bucket", "le": "100"}, - Value: 123, - Timestamp: testTime, - }, - &model.Sample{ - Metric: model.Metric{model.MetricNameLabel: "request_duration_microseconds_bucket", "le": "120"}, - Value: 412, - Timestamp: testTime, - }, - &model.Sample{ - Metric: model.Metric{model.MetricNameLabel: "request_duration_microseconds_bucket", "le": "144"}, - Value: 592, - Timestamp: testTime, - }, - &model.Sample{ - Metric: model.Metric{model.MetricNameLabel: "request_duration_microseconds_bucket", "le": "172.8"}, - Value: 1524, - Timestamp: testTime, - }, - &model.Sample{ - Metric: model.Metric{model.MetricNameLabel: "request_duration_microseconds_bucket", "le": "+Inf"}, - Value: 2693, - Timestamp: testTime, - }, - &model.Sample{ - Metric: model.Metric{model.MetricNameLabel: "request_duration_microseconds_sum"}, - Value: 1756047.3, - Timestamp: testTime, - }, - &model.Sample{ - Metric: model.Metric{model.MetricNameLabel: "request_duration_microseconds_count"}, - Value: 2693, - Timestamp: testTime, - }, - }, - }, - }, - } - - for i, scenario := range scenarios { - scenario.test(t, i) - } -} diff --git a/Godeps/_workspace/src/github.com/prometheus/client_golang/extraction/processor.go b/Godeps/_workspace/src/github.com/prometheus/client_golang/extraction/processor.go deleted file mode 100644 index 50c93c9e68..0000000000 --- a/Godeps/_workspace/src/github.com/prometheus/client_golang/extraction/processor.go +++ /dev/null @@ -1,84 +0,0 @@ -// Copyright 2013 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package extraction - -import ( - "io" - "time" - - "github.com/prometheus/client_golang/model" -) - -// ProcessOptions dictates how the interpreted stream should be rendered for -// consumption. -type ProcessOptions struct { - // Timestamp is added to each value from the stream that has no explicit - // timestamp set. - Timestamp model.Timestamp -} - -// Ingester consumes result streams in whatever way is desired by the user. -type Ingester interface { - Ingest(model.Samples) error -} - -// Processor is responsible for decoding the actual message responses from -// stream into a format that can be consumed with the end result written -// to the results channel. -type Processor interface { - // ProcessSingle treats the input as a single self-contained message body and - // transforms it accordingly. It has no support for streaming. - ProcessSingle(in io.Reader, out Ingester, o *ProcessOptions) error -} - -// Helper function to convert map[string]string into LabelSet. -// -// NOTE: This should be deleted when support for go 1.0.3 is removed; 1.1 is -// smart enough to unmarshal JSON objects into LabelSet directly. -func labelSet(labels map[string]string) model.LabelSet { - labelset := make(model.LabelSet, len(labels)) - - for k, v := range labels { - labelset[model.LabelName(k)] = model.LabelValue(v) - } - - return labelset -} - -// A basic interface only useful in testing contexts for dispensing the time -// in a controlled manner. -type instantProvider interface { - // The current instant. - Now() time.Time -} - -// Clock is a simple means for fluently wrapping around standard Go timekeeping -// mechanisms to enhance testability without compromising code readability. -// -// It is sufficient for use on bare initialization. A provider should be -// set only for test contexts. When not provided, it emits the current -// system time. -type clock struct { - // The underlying means through which time is provided, if supplied. - Provider instantProvider -} - -// Emit the current instant. -func (t *clock) Now() time.Time { - if t.Provider == nil { - return time.Now() - } - - return t.Provider.Now() -} diff --git a/Godeps/_workspace/src/github.com/prometheus/client_golang/extraction/processor0_0_1.go b/Godeps/_workspace/src/github.com/prometheus/client_golang/extraction/processor0_0_1.go deleted file mode 100644 index 7a728efb0e..0000000000 --- a/Godeps/_workspace/src/github.com/prometheus/client_golang/extraction/processor0_0_1.go +++ /dev/null @@ -1,127 +0,0 @@ -// Copyright 2013 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package extraction - -import ( - "encoding/json" - "fmt" - "io" - "io/ioutil" - - "github.com/prometheus/client_golang/model" -) - -const ( - baseLabels001 = "baseLabels" - counter001 = "counter" - docstring001 = "docstring" - gauge001 = "gauge" - histogram001 = "histogram" - labels001 = "labels" - metric001 = "metric" - type001 = "type" - value001 = "value" - percentile001 = "percentile" -) - -// Processor001 is responsible for decoding payloads from protocol version -// 0.0.1. -var Processor001 = &processor001{} - -// processor001 is responsible for handling API version 0.0.1. -type processor001 struct{} - -// entity001 represents a the JSON structure that 0.0.1 uses. -type entity001 []struct { - BaseLabels map[string]string `json:"baseLabels"` - Docstring string `json:"docstring"` - Metric struct { - MetricType string `json:"type"` - Value []struct { - Labels map[string]string `json:"labels"` - Value interface{} `json:"value"` - } `json:"value"` - } `json:"metric"` -} - -func (p *processor001) ProcessSingle(in io.Reader, out Ingester, o *ProcessOptions) error { - // TODO(matt): Replace with plain-jane JSON unmarshalling. - buffer, err := ioutil.ReadAll(in) - if err != nil { - return err - } - - entities := entity001{} - if err = json.Unmarshal(buffer, &entities); err != nil { - return err - } - - // TODO(matt): This outer loop is a great basis for parallelization. - pendingSamples := model.Samples{} - for _, entity := range entities { - for _, value := range entity.Metric.Value { - labels := labelSet(entity.BaseLabels).Merge(labelSet(value.Labels)) - - switch entity.Metric.MetricType { - case gauge001, counter001: - sampleValue, ok := value.Value.(float64) - if !ok { - return fmt.Errorf("could not convert value from %s %s to float64", entity, value) - } - - pendingSamples = append(pendingSamples, &model.Sample{ - Metric: model.Metric(labels), - Timestamp: o.Timestamp, - Value: model.SampleValue(sampleValue), - }) - - break - - case histogram001: - sampleValue, ok := value.Value.(map[string]interface{}) - if !ok { - return fmt.Errorf("could not convert value from %q to a map[string]interface{}", value.Value) - } - - for percentile, percentileValue := range sampleValue { - individualValue, ok := percentileValue.(float64) - if !ok { - return fmt.Errorf("could not convert value from %q to a float64", percentileValue) - } - - childMetric := make(map[model.LabelName]model.LabelValue, len(labels)+1) - - for k, v := range labels { - childMetric[k] = v - } - - childMetric[model.LabelName(percentile001)] = model.LabelValue(percentile) - - pendingSamples = append(pendingSamples, &model.Sample{ - Metric: model.Metric(childMetric), - Timestamp: o.Timestamp, - Value: model.SampleValue(individualValue), - }) - } - - break - } - } - } - if len(pendingSamples) > 0 { - return out.Ingest(pendingSamples) - } - - return nil -} diff --git a/Godeps/_workspace/src/github.com/prometheus/client_golang/extraction/processor0_0_1_test.go b/Godeps/_workspace/src/github.com/prometheus/client_golang/extraction/processor0_0_1_test.go deleted file mode 100644 index b970b03e97..0000000000 --- a/Godeps/_workspace/src/github.com/prometheus/client_golang/extraction/processor0_0_1_test.go +++ /dev/null @@ -1,185 +0,0 @@ -// Copyright 2013 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package extraction - -import ( - "errors" - "os" - "path" - "sort" - "testing" - - "github.com/prometheus/client_golang/model" -) - -var test001Time = model.Now() - -type testProcessor001ProcessScenario struct { - in string - expected, actual []model.Samples - err error -} - -func (s *testProcessor001ProcessScenario) Ingest(samples model.Samples) error { - s.actual = append(s.actual, samples) - return nil -} - -func (s *testProcessor001ProcessScenario) test(t testing.TB, set int) { - reader, err := os.Open(path.Join("fixtures", s.in)) - if err != nil { - t.Fatalf("%d. couldn't open scenario input file %s: %s", set, s.in, err) - } - - options := &ProcessOptions{ - Timestamp: test001Time, - } - err = Processor001.ProcessSingle(reader, s, options) - if s.err != err && (s.err == nil || err == nil || err.Error() != s.err.Error()) { - t.Fatalf("%d. expected err of %s, got %s", set, s.err, err) - } - - if len(s.actual) != len(s.expected) { - t.Fatalf("%d. expected output length of %d, got %d", set, len(s.expected), len(s.actual)) - } - - for i, expected := range s.expected { - sort.Sort(s.actual[i]) - sort.Sort(expected) - - if !expected.Equal(s.actual[i]) { - t.Errorf("%d.%d. expected %s, got %s", set, i, expected, s.actual[i]) - } - } -} - -func testProcessor001Process(t testing.TB) { - var scenarios = []testProcessor001ProcessScenario{ - { - in: "empty.json", - err: errors.New("unexpected end of JSON input"), - }, - { - in: "test0_0_1-0_0_2.json", - expected: []model.Samples{ - model.Samples{ - &model.Sample{ - Metric: model.Metric{"service": "zed", model.MetricNameLabel: "rpc_calls_total", "job": "batch_job"}, - Value: 25, - Timestamp: test001Time, - }, - &model.Sample{ - Metric: model.Metric{"service": "bar", model.MetricNameLabel: "rpc_calls_total", "job": "batch_job"}, - Value: 25, - Timestamp: test001Time, - }, - &model.Sample{ - Metric: model.Metric{"service": "foo", model.MetricNameLabel: "rpc_calls_total", "job": "batch_job"}, - Value: 25, - Timestamp: test001Time, - }, - &model.Sample{ - Metric: model.Metric{"percentile": "0.010000", model.MetricNameLabel: "rpc_latency_microseconds", "service": "zed"}, - Value: 0.0459814091918713, - Timestamp: test001Time, - }, - &model.Sample{ - Metric: model.Metric{"percentile": "0.010000", model.MetricNameLabel: "rpc_latency_microseconds", "service": "bar"}, - Value: 78.48563317257356, - Timestamp: test001Time, - }, - &model.Sample{ - Metric: model.Metric{"percentile": "0.010000", model.MetricNameLabel: "rpc_latency_microseconds", "service": "foo"}, - Value: 15.890724674774395, - Timestamp: test001Time, - }, - &model.Sample{ - Metric: model.Metric{"percentile": "0.050000", model.MetricNameLabel: "rpc_latency_microseconds", "service": "zed"}, - Value: 0.0459814091918713, - Timestamp: test001Time, - }, - &model.Sample{ - Metric: model.Metric{"percentile": "0.050000", model.MetricNameLabel: "rpc_latency_microseconds", "service": "bar"}, - Value: 78.48563317257356, - Timestamp: test001Time, - }, - &model.Sample{ - Metric: model.Metric{"percentile": "0.050000", model.MetricNameLabel: "rpc_latency_microseconds", "service": "foo"}, - Value: 15.890724674774395, - Timestamp: test001Time, - }, - &model.Sample{ - Metric: model.Metric{"percentile": "0.500000", model.MetricNameLabel: "rpc_latency_microseconds", "service": "zed"}, - Value: 0.6120456642749681, - Timestamp: test001Time, - }, - &model.Sample{ - Metric: model.Metric{"percentile": "0.500000", model.MetricNameLabel: "rpc_latency_microseconds", "service": "bar"}, - Value: 97.31798360385088, - Timestamp: test001Time, - }, - &model.Sample{ - Metric: model.Metric{"percentile": "0.500000", model.MetricNameLabel: "rpc_latency_microseconds", "service": "foo"}, - Value: 84.63044031436561, - Timestamp: test001Time, - }, - &model.Sample{ - Metric: model.Metric{"percentile": "0.900000", model.MetricNameLabel: "rpc_latency_microseconds", "service": "zed"}, - Value: 1.355915069887731, - Timestamp: test001Time, - }, - &model.Sample{ - Metric: model.Metric{"percentile": "0.900000", model.MetricNameLabel: "rpc_latency_microseconds", "service": "bar"}, - Value: 109.89202084295582, - Timestamp: test001Time, - }, - &model.Sample{ - Metric: model.Metric{"percentile": "0.900000", model.MetricNameLabel: "rpc_latency_microseconds", "service": "foo"}, - Value: 160.21100853053224, - Timestamp: test001Time, - }, - &model.Sample{ - Metric: model.Metric{"percentile": "0.990000", model.MetricNameLabel: "rpc_latency_microseconds", "service": "zed"}, - Value: 1.772733213161236, - Timestamp: test001Time, - }, - &model.Sample{ - Metric: model.Metric{"percentile": "0.990000", model.MetricNameLabel: "rpc_latency_microseconds", "service": "bar"}, - Value: 109.99626121011262, - Timestamp: test001Time, - }, - &model.Sample{ - Metric: model.Metric{"percentile": "0.990000", model.MetricNameLabel: "rpc_latency_microseconds", "service": "foo"}, - Value: 172.49828748957728, - Timestamp: test001Time, - }, - }, - }, - }, - } - - for i, scenario := range scenarios { - scenario.test(t, i) - } -} - -func TestProcessor001Process(t *testing.T) { - testProcessor001Process(t) -} - -func BenchmarkProcessor001Process(b *testing.B) { - for i := 0; i < b.N; i++ { - testProcessor001Process(b) - } -} diff --git a/Godeps/_workspace/src/github.com/prometheus/client_golang/extraction/processor0_0_2.go b/Godeps/_workspace/src/github.com/prometheus/client_golang/extraction/processor0_0_2.go deleted file mode 100644 index 24c7e81554..0000000000 --- a/Godeps/_workspace/src/github.com/prometheus/client_golang/extraction/processor0_0_2.go +++ /dev/null @@ -1,106 +0,0 @@ -// Copyright 2013 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package extraction - -import ( - "encoding/json" - "fmt" - "io" - - "github.com/prometheus/client_golang/model" -) - -// Processor002 is responsible for decoding payloads from protocol version -// 0.0.2. -var Processor002 = &processor002{} - -type histogram002 struct { - Labels map[string]string `json:"labels"` - Values map[string]model.SampleValue `json:"value"` -} - -type counter002 struct { - Labels map[string]string `json:"labels"` - Value model.SampleValue `json:"value"` -} - -type processor002 struct{} - -func (p *processor002) ProcessSingle(in io.Reader, out Ingester, o *ProcessOptions) error { - // Processor for telemetry schema version 0.0.2. - // container for telemetry data - var entities []struct { - BaseLabels map[string]string `json:"baseLabels"` - Docstring string `json:"docstring"` - Metric struct { - Type string `json:"type"` - Values json.RawMessage `json:"value"` - } `json:"metric"` - } - - if err := json.NewDecoder(in).Decode(&entities); err != nil { - return err - } - - pendingSamples := model.Samples{} - for _, entity := range entities { - switch entity.Metric.Type { - case "counter", "gauge": - var values []counter002 - - if err := json.Unmarshal(entity.Metric.Values, &values); err != nil { - return fmt.Errorf("could not extract %s value: %s", entity.Metric.Type, err) - } - - for _, counter := range values { - labels := labelSet(entity.BaseLabels).Merge(labelSet(counter.Labels)) - - pendingSamples = append(pendingSamples, &model.Sample{ - Metric: model.Metric(labels), - Timestamp: o.Timestamp, - Value: counter.Value, - }) - } - - case "histogram": - var values []histogram002 - - if err := json.Unmarshal(entity.Metric.Values, &values); err != nil { - return fmt.Errorf("could not extract %s value: %s", entity.Metric.Type, err) - } - - for _, histogram := range values { - for percentile, value := range histogram.Values { - labels := labelSet(entity.BaseLabels).Merge(labelSet(histogram.Labels)) - labels[model.LabelName("percentile")] = model.LabelValue(percentile) - - pendingSamples = append(pendingSamples, &model.Sample{ - Metric: model.Metric(labels), - Timestamp: o.Timestamp, - Value: value, - }) - } - } - - default: - return fmt.Errorf("unknown metric type %q", entity.Metric.Type) - } - } - - if len(pendingSamples) > 0 { - return out.Ingest(pendingSamples) - } - - return nil -} diff --git a/Godeps/_workspace/src/github.com/prometheus/client_golang/extraction/processor0_0_2_test.go b/Godeps/_workspace/src/github.com/prometheus/client_golang/extraction/processor0_0_2_test.go deleted file mode 100644 index b2b7587020..0000000000 --- a/Godeps/_workspace/src/github.com/prometheus/client_golang/extraction/processor0_0_2_test.go +++ /dev/null @@ -1,225 +0,0 @@ -// Copyright 2013 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package extraction - -import ( - "bytes" - "errors" - "io/ioutil" - "os" - "path" - "runtime" - "sort" - "testing" - - "github.com/prometheus/client_golang/model" -) - -var test002Time = model.Now() - -type testProcessor002ProcessScenario struct { - in string - expected, actual []model.Samples - err error -} - -func (s *testProcessor002ProcessScenario) Ingest(samples model.Samples) error { - s.actual = append(s.actual, samples) - return nil -} - -func (s *testProcessor002ProcessScenario) test(t testing.TB, set int) { - reader, err := os.Open(path.Join("fixtures", s.in)) - if err != nil { - t.Fatalf("%d. couldn't open scenario input file %s: %s", set, s.in, err) - } - - options := &ProcessOptions{ - Timestamp: test002Time, - } - err = Processor002.ProcessSingle(reader, s, options) - if s.err != err && (s.err == nil || err == nil || err.Error() != s.err.Error()) { - t.Fatalf("%d. expected err of %s, got %s", set, s.err, err) - } - - if len(s.actual) != len(s.expected) { - t.Fatalf("%d. expected output length of %d, got %d", set, len(s.expected), len(s.actual)) - } - - for i, expected := range s.expected { - sort.Sort(s.actual[i]) - sort.Sort(expected) - - if !expected.Equal(s.actual[i]) { - t.Fatalf("%d.%d. expected %s, got %s", set, i, expected, s.actual[i]) - } - } -} - -func testProcessor002Process(t testing.TB) { - var scenarios = []testProcessor002ProcessScenario{ - { - in: "empty.json", - err: errors.New("EOF"), - }, - { - in: "test0_0_1-0_0_2.json", - expected: []model.Samples{ - model.Samples{ - &model.Sample{ - Metric: model.Metric{"service": "zed", model.MetricNameLabel: "rpc_calls_total", "job": "batch_job"}, - Value: 25, - Timestamp: test002Time, - }, - &model.Sample{ - Metric: model.Metric{"service": "bar", model.MetricNameLabel: "rpc_calls_total", "job": "batch_job"}, - Value: 25, - Timestamp: test002Time, - }, - &model.Sample{ - Metric: model.Metric{"service": "foo", model.MetricNameLabel: "rpc_calls_total", "job": "batch_job"}, - Value: 25, - Timestamp: test002Time, - }, - &model.Sample{ - Metric: model.Metric{"percentile": "0.010000", model.MetricNameLabel: "rpc_latency_microseconds", "service": "zed"}, - Value: 0.0459814091918713, - Timestamp: test002Time, - }, - &model.Sample{ - Metric: model.Metric{"percentile": "0.010000", model.MetricNameLabel: "rpc_latency_microseconds", "service": "bar"}, - Value: 78.48563317257356, - Timestamp: test002Time, - }, - &model.Sample{ - Metric: model.Metric{"percentile": "0.010000", model.MetricNameLabel: "rpc_latency_microseconds", "service": "foo"}, - Value: 15.890724674774395, - Timestamp: test002Time, - }, - &model.Sample{ - - Metric: model.Metric{"percentile": "0.050000", model.MetricNameLabel: "rpc_latency_microseconds", "service": "zed"}, - Value: 0.0459814091918713, - Timestamp: test002Time, - }, - &model.Sample{ - Metric: model.Metric{"percentile": "0.050000", model.MetricNameLabel: "rpc_latency_microseconds", "service": "bar"}, - Value: 78.48563317257356, - Timestamp: test002Time, - }, - &model.Sample{ - Metric: model.Metric{"percentile": "0.050000", model.MetricNameLabel: "rpc_latency_microseconds", "service": "foo"}, - Value: 15.890724674774395, - Timestamp: test002Time, - }, - &model.Sample{ - Metric: model.Metric{"percentile": "0.500000", model.MetricNameLabel: "rpc_latency_microseconds", "service": "zed"}, - Value: 0.6120456642749681, - Timestamp: test002Time, - }, - &model.Sample{ - Metric: model.Metric{"percentile": "0.500000", model.MetricNameLabel: "rpc_latency_microseconds", "service": "bar"}, - Value: 97.31798360385088, - Timestamp: test002Time, - }, - &model.Sample{ - Metric: model.Metric{"percentile": "0.500000", model.MetricNameLabel: "rpc_latency_microseconds", "service": "foo"}, - Value: 84.63044031436561, - Timestamp: test002Time, - }, - &model.Sample{ - Metric: model.Metric{"percentile": "0.900000", model.MetricNameLabel: "rpc_latency_microseconds", "service": "zed"}, - Value: 1.355915069887731, - Timestamp: test002Time, - }, - &model.Sample{ - Metric: model.Metric{"percentile": "0.900000", model.MetricNameLabel: "rpc_latency_microseconds", "service": "bar"}, - Value: 109.89202084295582, - Timestamp: test002Time, - }, - &model.Sample{ - Metric: model.Metric{"percentile": "0.900000", model.MetricNameLabel: "rpc_latency_microseconds", "service": "foo"}, - Value: 160.21100853053224, - Timestamp: test002Time, - }, - &model.Sample{ - Metric: model.Metric{"percentile": "0.990000", model.MetricNameLabel: "rpc_latency_microseconds", "service": "zed"}, - Value: 1.772733213161236, - Timestamp: test002Time, - }, - &model.Sample{ - Metric: model.Metric{"percentile": "0.990000", model.MetricNameLabel: "rpc_latency_microseconds", "service": "bar"}, - Value: 109.99626121011262, - Timestamp: test002Time, - }, - &model.Sample{ - Metric: model.Metric{"percentile": "0.990000", model.MetricNameLabel: "rpc_latency_microseconds", "service": "foo"}, - Value: 172.49828748957728, - Timestamp: test002Time, - }, - }, - }, - }, - } - - for i, scenario := range scenarios { - scenario.test(t, i) - } -} - -func TestProcessor002Process(t *testing.T) { - testProcessor002Process(t) -} - -func BenchmarkProcessor002Process(b *testing.B) { - b.StopTimer() - - pre := runtime.MemStats{} - runtime.ReadMemStats(&pre) - - b.StartTimer() - - for i := 0; i < b.N; i++ { - testProcessor002Process(b) - } - - post := runtime.MemStats{} - runtime.ReadMemStats(&post) - - allocated := post.TotalAlloc - pre.TotalAlloc - - b.Logf("Allocated %d at %f per cycle with %d cycles.", allocated, float64(allocated)/float64(b.N), b.N) -} - -func BenchmarkProcessor002ParseOnly(b *testing.B) { - b.StopTimer() - data, err := ioutil.ReadFile("fixtures/test0_0_1-0_0_2-large.json") - if err != nil { - b.Fatal(err) - } - ing := fakeIngester{} - b.StartTimer() - - for i := 0; i < b.N; i++ { - if err := Processor002.ProcessSingle(bytes.NewReader(data), ing, &ProcessOptions{}); err != nil { - b.Fatal(err) - } - } -} - -type fakeIngester struct{} - -func (i fakeIngester) Ingest(model.Samples) error { - return nil -} diff --git a/Godeps/_workspace/src/github.com/prometheus/client_golang/extraction/textprocessor.go b/Godeps/_workspace/src/github.com/prometheus/client_golang/extraction/textprocessor.go deleted file mode 100644 index 2eca1c63a6..0000000000 --- a/Godeps/_workspace/src/github.com/prometheus/client_golang/extraction/textprocessor.go +++ /dev/null @@ -1,40 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package extraction - -import ( - "io" - - "github.com/prometheus/client_golang/text" -) - -type processor004 struct{} - -// Processor004 s responsible for decoding payloads from the text based variety -// of protocol version 0.0.4. -var Processor004 = &processor004{} - -func (t *processor004) ProcessSingle(i io.Reader, out Ingester, o *ProcessOptions) error { - var parser text.Parser - metricFamilies, err := parser.TextToMetricFamilies(i) - if err != nil { - return err - } - for _, metricFamily := range metricFamilies { - if err := extractMetricFamily(out, o, metricFamily); err != nil { - return err - } - } - return nil -} diff --git a/Godeps/_workspace/src/github.com/prometheus/client_golang/extraction/textprocessor_test.go b/Godeps/_workspace/src/github.com/prometheus/client_golang/extraction/textprocessor_test.go deleted file mode 100644 index ff704a9bc3..0000000000 --- a/Godeps/_workspace/src/github.com/prometheus/client_golang/extraction/textprocessor_test.go +++ /dev/null @@ -1,100 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package extraction - -import ( - "sort" - "strings" - "testing" - - "github.com/prometheus/client_golang/model" -) - -var ( - ts = model.Now() - in = ` -# Only a quite simple scenario with two metric families. -# More complicated tests of the parser itself can be found in the text package. -# TYPE mf2 counter -mf2 3 -mf1{label="value1"} -3.14 123456 -mf1{label="value2"} 42 -mf2 4 -` - out = map[model.LabelValue]model.Samples{ - "mf1": model.Samples{ - &model.Sample{ - Metric: model.Metric{model.MetricNameLabel: "mf1", "label": "value1"}, - Value: -3.14, - Timestamp: 123456, - }, - &model.Sample{ - Metric: model.Metric{model.MetricNameLabel: "mf1", "label": "value2"}, - Value: 42, - Timestamp: ts, - }, - }, - "mf2": model.Samples{ - &model.Sample{ - Metric: model.Metric{model.MetricNameLabel: "mf2"}, - Value: 3, - Timestamp: ts, - }, - &model.Sample{ - Metric: model.Metric{model.MetricNameLabel: "mf2"}, - Value: 4, - Timestamp: ts, - }, - }, - } -) - -type testIngester struct { - results []model.Samples -} - -func (i *testIngester) Ingest(s model.Samples) error { - i.results = append(i.results, s) - return nil -} - -func TestTextProcessor(t *testing.T) { - var ingester testIngester - i := strings.NewReader(in) - o := &ProcessOptions{ - Timestamp: ts, - } - - err := Processor004.ProcessSingle(i, &ingester, o) - if err != nil { - t.Fatal(err) - } - if expected, got := len(out), len(ingester.results); expected != got { - t.Fatalf("Expected length %d, got %d", expected, got) - } - for _, r := range ingester.results { - expected, ok := out[r[0].Metric[model.MetricNameLabel]] - if !ok { - t.Fatalf( - "Unexpected metric name %q", - r[0].Metric[model.MetricNameLabel], - ) - } - sort.Sort(expected) - sort.Sort(r) - if !expected.Equal(r) { - t.Errorf("expected %s, got %s", expected, r) - } - } -} diff --git a/Godeps/_workspace/src/github.com/prometheus/client_golang/model/fingerprinting.go b/Godeps/_workspace/src/github.com/prometheus/client_golang/model/fingerprinting.go deleted file mode 100644 index 5b2ffe3bb3..0000000000 --- a/Godeps/_workspace/src/github.com/prometheus/client_golang/model/fingerprinting.go +++ /dev/null @@ -1,110 +0,0 @@ -// Copyright 2013 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package model - -import ( - "fmt" - "strconv" -) - -// Fingerprint provides a hash-capable representation of a Metric. -// For our purposes, FNV-1A 64-bit is used. -type Fingerprint uint64 - -func (f Fingerprint) String() string { - return fmt.Sprintf("%016x", uint64(f)) -} - -// Less implements sort.Interface. -func (f Fingerprint) Less(o Fingerprint) bool { - return f < o -} - -// Equal implements sort.Interface. -func (f Fingerprint) Equal(o Fingerprint) bool { - return f == o -} - -// LoadFromString transforms a string representation into a Fingerprint. -func (f *Fingerprint) LoadFromString(s string) error { - num, err := strconv.ParseUint(s, 16, 64) - if err != nil { - return err - } - *f = Fingerprint(num) - return nil -} - -// Fingerprints represents a collection of Fingerprint subject to a given -// natural sorting scheme. It implements sort.Interface. -type Fingerprints []Fingerprint - -// Len implements sort.Interface. -func (f Fingerprints) Len() int { - return len(f) -} - -// Less implements sort.Interface. -func (f Fingerprints) Less(i, j int) bool { - return f[i] < f[j] -} - -// Swap implements sort.Interface. -func (f Fingerprints) Swap(i, j int) { - f[i], f[j] = f[j], f[i] -} - -// FingerprintSet is a set of Fingerprints. -type FingerprintSet map[Fingerprint]struct{} - -// Equal returns true if both sets contain the same elements (and not more). -func (s FingerprintSet) Equal(o FingerprintSet) bool { - if len(s) != len(o) { - return false - } - - for k := range s { - if _, ok := o[k]; !ok { - return false - } - } - - return true -} - -// Intersection returns the elements contained in both sets. -func (s FingerprintSet) Intersection(o FingerprintSet) FingerprintSet { - myLength, otherLength := len(s), len(o) - if myLength == 0 || otherLength == 0 { - return FingerprintSet{} - } - - subSet := s - superSet := o - - if otherLength < myLength { - subSet = o - superSet = s - } - - out := FingerprintSet{} - - for k := range subSet { - if _, ok := superSet[k]; ok { - out[k] = struct{}{} - } - } - - return out -} diff --git a/Godeps/_workspace/src/github.com/prometheus/client_golang/model/labelname.go b/Godeps/_workspace/src/github.com/prometheus/client_golang/model/labelname.go deleted file mode 100644 index bcb8f8906f..0000000000 --- a/Godeps/_workspace/src/github.com/prometheus/client_golang/model/labelname.go +++ /dev/null @@ -1,127 +0,0 @@ -// Copyright 2013 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package model - -import ( - "encoding/json" - "fmt" - "regexp" - "strings" -) - -const ( - // ExportedLabelPrefix is the prefix to prepend to the label names present in - // exported metrics if a label of the same name is added by the server. - ExportedLabelPrefix LabelName = "exported_" - - // MetricNameLabel is the label name indicating the metric name of a - // timeseries. - MetricNameLabel LabelName = "__name__" - - // SchemeLabel is the name of the label that holds the scheme on which to - // scrape a target. - SchemeLabel LabelName = "__scheme__" - - // AddressLabel is the name of the label that holds the address of - // a scrape target. - AddressLabel LabelName = "__address__" - - // MetricsPathLabel is the name of the label that holds the path on which to - // scrape a target. - MetricsPathLabel LabelName = "__metrics_path__" - - // ReservedLabelPrefix is a prefix which is not legal in user-supplied - // label names. - ReservedLabelPrefix = "__" - - // MetaLabelPrefix is a prefix for labels that provide meta information. - // Labels with this prefix are used for intermediate label processing and - // will not be attached to time series. - MetaLabelPrefix = "__meta_" - - // ParamLabelPrefix is a prefix for labels that provide URL parameters - // used to scrape a target. - ParamLabelPrefix = "__param_" - - // JobLabel is the label name indicating the job from which a timeseries - // was scraped. - JobLabel LabelName = "job" - - // InstanceLabel is the label name used for the instance label. - InstanceLabel LabelName = "instance" - - // BucketLabel is used for the label that defines the upper bound of a - // bucket of a histogram ("le" -> "less or equal"). - BucketLabel = "le" - - // QuantileLabel is used for the label that defines the quantile in a - // summary. - QuantileLabel = "quantile" -) - -// LabelNameRE is a regular expression matching valid label names. -var LabelNameRE = regexp.MustCompile("^[a-zA-Z_][a-zA-Z0-9_]*$") - -// A LabelName is a key for a LabelSet or Metric. It has a value associated -// therewith. -type LabelName string - -// UnmarshalYAML implements the yaml.Unmarshaler interface. -func (ln *LabelName) UnmarshalYAML(unmarshal func(interface{}) error) error { - var s string - if err := unmarshal(&s); err != nil { - return err - } - if !LabelNameRE.MatchString(s) { - return fmt.Errorf("%q is not a valid label name", s) - } - *ln = LabelName(s) - return nil -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (ln *LabelName) UnmarshalJSON(b []byte) error { - var s string - if err := json.Unmarshal(b, &s); err != nil { - return err - } - if !LabelNameRE.MatchString(s) { - return fmt.Errorf("%q is not a valid label name", s) - } - *ln = LabelName(s) - return nil -} - -// LabelNames is a sortable LabelName slice. In implements sort.Interface. -type LabelNames []LabelName - -func (l LabelNames) Len() int { - return len(l) -} - -func (l LabelNames) Less(i, j int) bool { - return l[i] < l[j] -} - -func (l LabelNames) Swap(i, j int) { - l[i], l[j] = l[j], l[i] -} - -func (l LabelNames) String() string { - labelStrings := make([]string, 0, len(l)) - for _, label := range l { - labelStrings = append(labelStrings, string(label)) - } - return strings.Join(labelStrings, ", ") -} diff --git a/Godeps/_workspace/src/github.com/prometheus/client_golang/model/labelname_test.go b/Godeps/_workspace/src/github.com/prometheus/client_golang/model/labelname_test.go deleted file mode 100644 index 693228d347..0000000000 --- a/Godeps/_workspace/src/github.com/prometheus/client_golang/model/labelname_test.go +++ /dev/null @@ -1,55 +0,0 @@ -// Copyright 2013 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package model - -import ( - "sort" - "testing" -) - -func testLabelNames(t testing.TB) { - var scenarios = []struct { - in LabelNames - out LabelNames - }{ - { - in: LabelNames{"ZZZ", "zzz"}, - out: LabelNames{"ZZZ", "zzz"}, - }, - { - in: LabelNames{"aaa", "AAA"}, - out: LabelNames{"AAA", "aaa"}, - }, - } - - for i, scenario := range scenarios { - sort.Sort(scenario.in) - - for j, expected := range scenario.out { - if expected != scenario.in[j] { - t.Errorf("%d.%d expected %s, got %s", i, j, expected, scenario.in[j]) - } - } - } -} - -func TestLabelNames(t *testing.T) { - testLabelNames(t) -} - -func BenchmarkLabelNames(b *testing.B) { - for i := 0; i < b.N; i++ { - testLabelNames(b) - } -} diff --git a/Godeps/_workspace/src/github.com/prometheus/client_golang/model/labelset.go b/Godeps/_workspace/src/github.com/prometheus/client_golang/model/labelset.go deleted file mode 100644 index 382ab62d39..0000000000 --- a/Godeps/_workspace/src/github.com/prometheus/client_golang/model/labelset.go +++ /dev/null @@ -1,83 +0,0 @@ -// Copyright 2013 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package model - -import ( - "encoding/json" - "fmt" - "sort" - "strings" -) - -// A LabelSet is a collection of LabelName and LabelValue pairs. The LabelSet -// may be fully-qualified down to the point where it may resolve to a single -// Metric in the data store or not. All operations that occur within the realm -// of a LabelSet can emit a vector of Metric entities to which the LabelSet may -// match. -type LabelSet map[LabelName]LabelValue - -// Merge is a helper function to non-destructively merge two label sets. -func (l LabelSet) Merge(other LabelSet) LabelSet { - result := make(LabelSet, len(l)) - - for k, v := range l { - result[k] = v - } - - for k, v := range other { - result[k] = v - } - - return result -} - -func (l LabelSet) String() string { - labelStrings := make([]string, 0, len(l)) - for label, value := range l { - labelStrings = append(labelStrings, fmt.Sprintf("%s=%q", label, value)) - } - - switch len(labelStrings) { - case 0: - return "" - default: - sort.Strings(labelStrings) - return fmt.Sprintf("{%s}", strings.Join(labelStrings, ", ")) - } -} - -// MergeFromMetric merges Metric into this LabelSet. -func (l LabelSet) MergeFromMetric(m Metric) { - for k, v := range m { - l[k] = v - } -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (l *LabelSet) UnmarshalJSON(b []byte) error { - var m map[LabelName]LabelValue - if err := json.Unmarshal(b, &m); err != nil { - return err - } - // encoding/json only unmarshals maps of the form map[string]T. It treats - // LabelName as a string and does not call its UnmarshalJSON method. - // Thus, we have to replicate the behavior here. - for ln := range m { - if !LabelNameRE.MatchString(string(ln)) { - return fmt.Errorf("%q is not a valid label name", ln) - } - } - *l = LabelSet(m) - return nil -} diff --git a/Godeps/_workspace/src/github.com/prometheus/client_golang/model/labelvalue.go b/Godeps/_workspace/src/github.com/prometheus/client_golang/model/labelvalue.go deleted file mode 100644 index df2d14cc12..0000000000 --- a/Godeps/_workspace/src/github.com/prometheus/client_golang/model/labelvalue.go +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright 2013 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package model - -import ( - "sort" -) - -// A LabelValue is an associated value for a LabelName. -type LabelValue string - -// LabelValues is a sortable LabelValue slice. It implements sort.Interface. -type LabelValues []LabelValue - -func (l LabelValues) Len() int { - return len(l) -} - -func (l LabelValues) Less(i, j int) bool { - return sort.StringsAreSorted([]string{string(l[i]), string(l[j])}) -} - -func (l LabelValues) Swap(i, j int) { - l[i], l[j] = l[j], l[i] -} diff --git a/Godeps/_workspace/src/github.com/prometheus/client_golang/model/labelvalue_test.go b/Godeps/_workspace/src/github.com/prometheus/client_golang/model/labelvalue_test.go deleted file mode 100644 index 15904edf4f..0000000000 --- a/Godeps/_workspace/src/github.com/prometheus/client_golang/model/labelvalue_test.go +++ /dev/null @@ -1,55 +0,0 @@ -// Copyright 2013 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package model - -import ( - "sort" - "testing" -) - -func testLabelValues(t testing.TB) { - var scenarios = []struct { - in LabelValues - out LabelValues - }{ - { - in: LabelValues{"ZZZ", "zzz"}, - out: LabelValues{"ZZZ", "zzz"}, - }, - { - in: LabelValues{"aaa", "AAA"}, - out: LabelValues{"AAA", "aaa"}, - }, - } - - for i, scenario := range scenarios { - sort.Sort(scenario.in) - - for j, expected := range scenario.out { - if expected != scenario.in[j] { - t.Errorf("%d.%d expected %s, got %s", i, j, expected, scenario.in[j]) - } - } - } -} - -func TestLabelValues(t *testing.T) { - testLabelValues(t) -} - -func BenchmarkLabelValues(b *testing.B) { - for i := 0; i < b.N; i++ { - testLabelValues(b) - } -} diff --git a/Godeps/_workspace/src/github.com/prometheus/client_golang/model/metric.go b/Godeps/_workspace/src/github.com/prometheus/client_golang/model/metric.go deleted file mode 100644 index 864c012ecc..0000000000 --- a/Godeps/_workspace/src/github.com/prometheus/client_golang/model/metric.go +++ /dev/null @@ -1,192 +0,0 @@ -// Copyright 2013 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package model - -import ( - "encoding/json" - "fmt" - "sort" - "strings" -) - -var separator = []byte{0} - -// A Metric is similar to a LabelSet, but the key difference is that a Metric is -// a singleton and refers to one and only one stream of samples. -type Metric map[LabelName]LabelValue - -// Equal compares the metrics. -func (m Metric) Equal(o Metric) bool { - if len(m) != len(o) { - return false - } - for ln, lv := range m { - olv, ok := o[ln] - if !ok { - return false - } - if olv != lv { - return false - } - } - return true -} - -// Before compares the metrics, using the following criteria: -// -// If m has fewer labels than o, it is before o. If it has more, it is not. -// -// If the number of labels is the same, the superset of all label names is -// sorted alphanumerically. The first differing label pair found in that order -// determines the outcome: If the label does not exist at all in m, then m is -// before o, and vice versa. Otherwise the label value is compared -// alphanumerically. -// -// If m and o are equal, the method returns false. -func (m Metric) Before(o Metric) bool { - if len(m) < len(o) { - return true - } - if len(m) > len(o) { - return false - } - - lns := make(LabelNames, 0, len(m)+len(o)) - for ln := range m { - lns = append(lns, ln) - } - for ln := range o { - lns = append(lns, ln) - } - // It's probably not worth it to de-dup lns. - sort.Sort(lns) - for _, ln := range lns { - mlv, ok := m[ln] - if !ok { - return true - } - olv, ok := o[ln] - if !ok { - return false - } - if mlv < olv { - return true - } - if mlv > olv { - return false - } - } - return false -} - -// String implements Stringer. -func (m Metric) String() string { - metricName, hasName := m[MetricNameLabel] - numLabels := len(m) - 1 - if !hasName { - numLabels = len(m) - } - labelStrings := make([]string, 0, numLabels) - for label, value := range m { - if label != MetricNameLabel { - labelStrings = append(labelStrings, fmt.Sprintf("%s=%q", label, value)) - } - } - - switch numLabels { - case 0: - if hasName { - return string(metricName) - } - return "{}" - default: - sort.Strings(labelStrings) - return fmt.Sprintf("%s{%s}", metricName, strings.Join(labelStrings, ", ")) - } -} - -// Fingerprint returns a Metric's Fingerprint. -func (m Metric) Fingerprint() Fingerprint { - return metricToFingerprint(m) -} - -// FastFingerprint returns a Metric's Fingerprint calculated by a faster hashing -// algorithm, which is, however, more susceptible to hash collisions. -func (m Metric) FastFingerprint() Fingerprint { - return metricToFastFingerprint(m) -} - -// Clone returns a copy of the Metric. -func (m Metric) Clone() Metric { - clone := Metric{} - for k, v := range m { - clone[k] = v - } - return clone -} - -// MergeFromLabelSet merges a label set into this Metric, prefixing a collision -// prefix to the label names merged from the label set where required. -func (m Metric) MergeFromLabelSet(labels LabelSet, collisionPrefix LabelName) { - for k, v := range labels { - if collisionPrefix != "" { - for { - if _, exists := m[k]; !exists { - break - } - k = collisionPrefix + k - } - } - - m[k] = v - } -} - -// COWMetric wraps a Metric to enable copy-on-write access patterns. -type COWMetric struct { - Copied bool - Metric Metric -} - -// Set sets a label name in the wrapped Metric to a given value and copies the -// Metric initially, if it is not already a copy. -func (m *COWMetric) Set(ln LabelName, lv LabelValue) { - m.doCOW() - m.Metric[ln] = lv -} - -// Delete deletes a given label name from the wrapped Metric and copies the -// Metric initially, if it is not already a copy. -func (m *COWMetric) Delete(ln LabelName) { - m.doCOW() - delete(m.Metric, ln) -} - -// doCOW copies the underlying Metric if it is not already a copy. -func (m *COWMetric) doCOW() { - if !m.Copied { - m.Metric = m.Metric.Clone() - m.Copied = true - } -} - -// String implements fmt.Stringer. -func (m COWMetric) String() string { - return m.Metric.String() -} - -// MarshalJSON implements json.Marshaler. -func (m COWMetric) MarshalJSON() ([]byte, error) { - return json.Marshal(m.Metric) -} diff --git a/Godeps/_workspace/src/github.com/prometheus/client_golang/model/metric_test.go b/Godeps/_workspace/src/github.com/prometheus/client_golang/model/metric_test.go deleted file mode 100644 index 5dbc023764..0000000000 --- a/Godeps/_workspace/src/github.com/prometheus/client_golang/model/metric_test.go +++ /dev/null @@ -1,130 +0,0 @@ -// Copyright 2013 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package model - -import "testing" - -func testMetric(t testing.TB) { - var scenarios = []struct { - input Metric - fingerprint Fingerprint - fastFingerprint Fingerprint - }{ - { - input: Metric{}, - fingerprint: 14695981039346656037, - fastFingerprint: 14695981039346656037, - }, - { - input: Metric{ - "first_name": "electro", - "occupation": "robot", - "manufacturer": "westinghouse", - }, - fingerprint: 5911716720268894962, - fastFingerprint: 11310079640881077873, - }, - { - input: Metric{ - "x": "y", - }, - fingerprint: 8241431561484471700, - fastFingerprint: 13948396922932177635, - }, - { - input: Metric{ - "a": "bb", - "b": "c", - }, - fingerprint: 3016285359649981711, - fastFingerprint: 3198632812309449502, - }, - { - input: Metric{ - "a": "b", - "bb": "c", - }, - fingerprint: 7122421792099404749, - fastFingerprint: 5774953389407657638, - }, - } - - for i, scenario := range scenarios { - if scenario.fingerprint != scenario.input.Fingerprint() { - t.Errorf("%d. expected %d, got %d", i, scenario.fingerprint, scenario.input.Fingerprint()) - } - if scenario.fastFingerprint != scenario.input.FastFingerprint() { - t.Errorf("%d. expected %d, got %d", i, scenario.fastFingerprint, scenario.input.FastFingerprint()) - } - } -} - -func TestMetric(t *testing.T) { - testMetric(t) -} - -func BenchmarkMetric(b *testing.B) { - for i := 0; i < b.N; i++ { - testMetric(b) - } -} - -func TestCOWMetric(t *testing.T) { - testMetric := Metric{ - "to_delete": "test1", - "to_change": "test2", - } - - scenarios := []struct { - fn func(*COWMetric) - out Metric - }{ - { - fn: func(cm *COWMetric) { - cm.Delete("to_delete") - }, - out: Metric{ - "to_change": "test2", - }, - }, - { - fn: func(cm *COWMetric) { - cm.Set("to_change", "changed") - }, - out: Metric{ - "to_delete": "test1", - "to_change": "changed", - }, - }, - } - - for i, s := range scenarios { - orig := testMetric.Clone() - cm := &COWMetric{ - Metric: orig, - } - - s.fn(cm) - - // Test that the original metric was not modified. - if !orig.Equal(testMetric) { - t.Fatalf("%d. original metric changed; expected %v, got %v", i, testMetric, orig) - } - - // Test that the new metric has the right changes. - if !cm.Metric.Equal(s.out) { - t.Fatalf("%d. copied metric doesn't contain expected changes; expected %v, got %v", i, s.out, cm.Metric) - } - } -} diff --git a/Godeps/_workspace/src/github.com/prometheus/client_golang/model/model.go b/Godeps/_workspace/src/github.com/prometheus/client_golang/model/model.go deleted file mode 100644 index 189c5dcf6c..0000000000 --- a/Godeps/_workspace/src/github.com/prometheus/client_golang/model/model.go +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright 2013 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package model contains core representation of Prometheus client primitives. -package model diff --git a/Godeps/_workspace/src/github.com/prometheus/client_golang/model/sample.go b/Godeps/_workspace/src/github.com/prometheus/client_golang/model/sample.go deleted file mode 100644 index c13a44d95c..0000000000 --- a/Godeps/_workspace/src/github.com/prometheus/client_golang/model/sample.go +++ /dev/null @@ -1,79 +0,0 @@ -// Copyright 2013 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package model - -// Sample is a sample value with a timestamp and a metric. -type Sample struct { - Metric Metric - Value SampleValue - Timestamp Timestamp -} - -// Equal compares first the metrics, then the timestamp, then the value. -func (s *Sample) Equal(o *Sample) bool { - if s == o { - return true - } - - if !s.Metric.Equal(o.Metric) { - return false - } - if !s.Timestamp.Equal(o.Timestamp) { - return false - } - if !s.Value.Equal(o.Value) { - return false - } - - return true -} - -// Samples is a sortable Sample slice. It implements sort.Interface. -type Samples []*Sample - -func (s Samples) Len() int { - return len(s) -} - -// Less compares first the metrics, then the timestamp. -func (s Samples) Less(i, j int) bool { - switch { - case s[i].Metric.Before(s[j].Metric): - return true - case s[j].Metric.Before(s[i].Metric): - return false - case s[i].Timestamp.Before(s[j].Timestamp): - return true - default: - return false - } -} - -func (s Samples) Swap(i, j int) { - s[i], s[j] = s[j], s[i] -} - -// Equal compares two sets of samples and returns true if they are equal. -func (s Samples) Equal(o Samples) bool { - if len(s) != len(o) { - return false - } - - for i, sample := range s { - if !sample.Equal(o[i]) { - return false - } - } - return true -} diff --git a/Godeps/_workspace/src/github.com/prometheus/client_golang/model/sample_test.go b/Godeps/_workspace/src/github.com/prometheus/client_golang/model/sample_test.go deleted file mode 100644 index d5e065d6c5..0000000000 --- a/Godeps/_workspace/src/github.com/prometheus/client_golang/model/sample_test.go +++ /dev/null @@ -1,114 +0,0 @@ -// Copyright 2013 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package model - -import ( - "sort" - "testing" -) - -func TestSamplesSort(t *testing.T) { - input := Samples{ - &Sample{ - Metric: Metric{ - MetricNameLabel: "A", - }, - Timestamp: 1, - }, - &Sample{ - Metric: Metric{ - MetricNameLabel: "A", - }, - Timestamp: 2, - }, - &Sample{ - Metric: Metric{ - MetricNameLabel: "C", - }, - Timestamp: 1, - }, - &Sample{ - Metric: Metric{ - MetricNameLabel: "C", - }, - Timestamp: 2, - }, - &Sample{ - Metric: Metric{ - MetricNameLabel: "B", - }, - Timestamp: 1, - }, - &Sample{ - Metric: Metric{ - MetricNameLabel: "B", - }, - Timestamp: 2, - }, - } - - expected := Samples{ - &Sample{ - Metric: Metric{ - MetricNameLabel: "A", - }, - Timestamp: 1, - }, - &Sample{ - Metric: Metric{ - MetricNameLabel: "A", - }, - Timestamp: 2, - }, - &Sample{ - Metric: Metric{ - MetricNameLabel: "B", - }, - Timestamp: 1, - }, - &Sample{ - Metric: Metric{ - MetricNameLabel: "B", - }, - Timestamp: 2, - }, - &Sample{ - Metric: Metric{ - MetricNameLabel: "C", - }, - Timestamp: 1, - }, - &Sample{ - Metric: Metric{ - MetricNameLabel: "C", - }, - Timestamp: 2, - }, - } - - sort.Sort(input) - - for i, actual := range input { - actualFp := actual.Metric.Fingerprint() - expectedFp := expected[i].Metric.Fingerprint() - - if !actualFp.Equal(expectedFp) { - t.Fatalf("%d. Incorrect fingerprint. Got %s; want %s", i, actualFp.String(), expectedFp.String()) - } - - if actual.Timestamp != expected[i].Timestamp { - t.Fatalf("%d. Incorrect timestamp. Got %s; want %s", i, actual.Timestamp, expected[i].Timestamp) - } - } -} diff --git a/Godeps/_workspace/src/github.com/prometheus/client_golang/model/samplevalue.go b/Godeps/_workspace/src/github.com/prometheus/client_golang/model/samplevalue.go deleted file mode 100644 index 469c2c0b0e..0000000000 --- a/Godeps/_workspace/src/github.com/prometheus/client_golang/model/samplevalue.go +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright 2013 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package model - -import ( - "fmt" - "strconv" -) - -// A SampleValue is a representation of a value for a given sample at a given -// time. -type SampleValue float64 - -// Equal does a straight v==o. -func (v SampleValue) Equal(o SampleValue) bool { - return v == o -} - -// MarshalJSON implements json.Marshaler. -func (v SampleValue) MarshalJSON() ([]byte, error) { - return []byte(fmt.Sprintf(`"%s"`, v)), nil -} - -func (v SampleValue) String() string { - return strconv.FormatFloat(float64(v), 'f', -1, 64) -} diff --git a/Godeps/_workspace/src/github.com/prometheus/client_golang/model/signature.go b/Godeps/_workspace/src/github.com/prometheus/client_golang/model/signature.go deleted file mode 100644 index 7bd58f4b0b..0000000000 --- a/Godeps/_workspace/src/github.com/prometheus/client_golang/model/signature.go +++ /dev/null @@ -1,190 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package model - -import ( - "bytes" - "hash" - "hash/fnv" - "sort" - "sync" -) - -// SeparatorByte is a byte that cannot occur in valid UTF-8 sequences and is -// used to separate label names, label values, and other strings from each other -// when calculating their combined hash value (aka signature aka fingerprint). -const SeparatorByte byte = 255 - -var ( - // cache the signature of an empty label set. - emptyLabelSignature = fnv.New64a().Sum64() - - hashAndBufPool sync.Pool -) - -type hashAndBuf struct { - h hash.Hash64 - b bytes.Buffer -} - -func getHashAndBuf() *hashAndBuf { - hb := hashAndBufPool.Get() - if hb == nil { - return &hashAndBuf{h: fnv.New64a()} - } - return hb.(*hashAndBuf) -} - -func putHashAndBuf(hb *hashAndBuf) { - hb.h.Reset() - hb.b.Reset() - hashAndBufPool.Put(hb) -} - -// LabelsToSignature returns a quasi-unique signature (i.e., fingerprint) for a -// given label set. (Collisions are possible but unlikely if the number of label -// sets the function is applied to is small.) -func LabelsToSignature(labels map[string]string) uint64 { - if len(labels) == 0 { - return emptyLabelSignature - } - - labelNames := make([]string, 0, len(labels)) - for labelName := range labels { - labelNames = append(labelNames, labelName) - } - sort.Strings(labelNames) - - hb := getHashAndBuf() - defer putHashAndBuf(hb) - - for _, labelName := range labelNames { - hb.b.WriteString(labelName) - hb.b.WriteByte(SeparatorByte) - hb.b.WriteString(labels[labelName]) - hb.b.WriteByte(SeparatorByte) - hb.h.Write(hb.b.Bytes()) - hb.b.Reset() - } - return hb.h.Sum64() -} - -// metricToFingerprint works exactly as LabelsToSignature but takes a Metric as -// parameter (rather than a label map) and returns a Fingerprint. -func metricToFingerprint(m Metric) Fingerprint { - if len(m) == 0 { - return Fingerprint(emptyLabelSignature) - } - - labelNames := make(LabelNames, 0, len(m)) - for labelName := range m { - labelNames = append(labelNames, labelName) - } - sort.Sort(labelNames) - - hb := getHashAndBuf() - defer putHashAndBuf(hb) - - for _, labelName := range labelNames { - hb.b.WriteString(string(labelName)) - hb.b.WriteByte(SeparatorByte) - hb.b.WriteString(string(m[labelName])) - hb.b.WriteByte(SeparatorByte) - hb.h.Write(hb.b.Bytes()) - hb.b.Reset() - } - return Fingerprint(hb.h.Sum64()) -} - -// metricToFastFingerprint works similar to metricToFingerprint but uses a -// faster and less allocation-heavy hash function, which is more susceptible to -// create hash collisions. Therefore, collision detection should be applied. -func metricToFastFingerprint(m Metric) Fingerprint { - if len(m) == 0 { - return Fingerprint(emptyLabelSignature) - } - - var result uint64 - hb := getHashAndBuf() - defer putHashAndBuf(hb) - - for labelName, labelValue := range m { - hb.b.WriteString(string(labelName)) - hb.b.WriteByte(SeparatorByte) - hb.b.WriteString(string(labelValue)) - hb.h.Write(hb.b.Bytes()) - result ^= hb.h.Sum64() - hb.h.Reset() - hb.b.Reset() - } - return Fingerprint(result) -} - -// SignatureForLabels works like LabelsToSignature but takes a Metric as -// parameter (rather than a label map) and only includes the labels with the -// specified LabelNames into the signature calculation. The labels passed in -// will be sorted by this function. -func SignatureForLabels(m Metric, labels LabelNames) uint64 { - if len(m) == 0 || len(labels) == 0 { - return emptyLabelSignature - } - - sort.Sort(labels) - - hb := getHashAndBuf() - defer putHashAndBuf(hb) - - for _, label := range labels { - hb.b.WriteString(string(label)) - hb.b.WriteByte(SeparatorByte) - hb.b.WriteString(string(m[label])) - hb.b.WriteByte(SeparatorByte) - hb.h.Write(hb.b.Bytes()) - hb.b.Reset() - } - return hb.h.Sum64() -} - -// SignatureWithoutLabels works like LabelsToSignature but takes a Metric as -// parameter (rather than a label map) and excludes the labels with any of the -// specified LabelNames from the signature calculation. -func SignatureWithoutLabels(m Metric, labels map[LabelName]struct{}) uint64 { - if len(m) == 0 { - return emptyLabelSignature - } - - labelNames := make(LabelNames, 0, len(m)) - for labelName := range m { - if _, exclude := labels[labelName]; !exclude { - labelNames = append(labelNames, labelName) - } - } - if len(labelNames) == 0 { - return emptyLabelSignature - } - sort.Sort(labelNames) - - hb := getHashAndBuf() - defer putHashAndBuf(hb) - - for _, labelName := range labelNames { - hb.b.WriteString(string(labelName)) - hb.b.WriteByte(SeparatorByte) - hb.b.WriteString(string(m[labelName])) - hb.b.WriteByte(SeparatorByte) - hb.h.Write(hb.b.Bytes()) - hb.b.Reset() - } - return hb.h.Sum64() -} diff --git a/Godeps/_workspace/src/github.com/prometheus/client_golang/model/signature_test.go b/Godeps/_workspace/src/github.com/prometheus/client_golang/model/signature_test.go deleted file mode 100644 index 01db531d07..0000000000 --- a/Godeps/_workspace/src/github.com/prometheus/client_golang/model/signature_test.go +++ /dev/null @@ -1,304 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package model - -import ( - "runtime" - "sync" - "testing" -) - -func TestLabelsToSignature(t *testing.T) { - var scenarios = []struct { - in map[string]string - out uint64 - }{ - { - in: map[string]string{}, - out: 14695981039346656037, - }, - { - in: map[string]string{"name": "garland, briggs", "fear": "love is not enough"}, - out: 5799056148416392346, - }, - } - - for i, scenario := range scenarios { - actual := LabelsToSignature(scenario.in) - - if actual != scenario.out { - t.Errorf("%d. expected %d, got %d", i, scenario.out, actual) - } - } -} - -func TestMetricToFingerprint(t *testing.T) { - var scenarios = []struct { - in Metric - out Fingerprint - }{ - { - in: Metric{}, - out: 14695981039346656037, - }, - { - in: Metric{"name": "garland, briggs", "fear": "love is not enough"}, - out: 5799056148416392346, - }, - } - - for i, scenario := range scenarios { - actual := metricToFingerprint(scenario.in) - - if actual != scenario.out { - t.Errorf("%d. expected %d, got %d", i, scenario.out, actual) - } - } -} - -func TestMetricToFastFingerprint(t *testing.T) { - var scenarios = []struct { - in Metric - out Fingerprint - }{ - { - in: Metric{}, - out: 14695981039346656037, - }, - { - in: Metric{"name": "garland, briggs", "fear": "love is not enough"}, - out: 12952432476264840823, - }, - } - - for i, scenario := range scenarios { - actual := metricToFastFingerprint(scenario.in) - - if actual != scenario.out { - t.Errorf("%d. expected %d, got %d", i, scenario.out, actual) - } - } -} - -func TestSignatureForLabels(t *testing.T) { - var scenarios = []struct { - in Metric - labels LabelNames - out uint64 - }{ - { - in: Metric{}, - labels: nil, - out: 14695981039346656037, - }, - { - in: Metric{"name": "garland, briggs", "fear": "love is not enough"}, - labels: LabelNames{"fear", "name"}, - out: 5799056148416392346, - }, - { - in: Metric{"name": "garland, briggs", "fear": "love is not enough", "foo": "bar"}, - labels: LabelNames{"fear", "name"}, - out: 5799056148416392346, - }, - { - in: Metric{"name": "garland, briggs", "fear": "love is not enough"}, - labels: LabelNames{}, - out: 14695981039346656037, - }, - { - in: Metric{"name": "garland, briggs", "fear": "love is not enough"}, - labels: nil, - out: 14695981039346656037, - }, - } - - for i, scenario := range scenarios { - actual := SignatureForLabels(scenario.in, scenario.labels) - - if actual != scenario.out { - t.Errorf("%d. expected %d, got %d", i, scenario.out, actual) - } - } -} - -func TestSignatureWithoutLabels(t *testing.T) { - var scenarios = []struct { - in Metric - labels map[LabelName]struct{} - out uint64 - }{ - { - in: Metric{}, - labels: nil, - out: 14695981039346656037, - }, - { - in: Metric{"name": "garland, briggs", "fear": "love is not enough"}, - labels: map[LabelName]struct{}{"fear": struct{}{}, "name": struct{}{}}, - out: 14695981039346656037, - }, - { - in: Metric{"name": "garland, briggs", "fear": "love is not enough", "foo": "bar"}, - labels: map[LabelName]struct{}{"foo": struct{}{}}, - out: 5799056148416392346, - }, - { - in: Metric{"name": "garland, briggs", "fear": "love is not enough"}, - labels: map[LabelName]struct{}{}, - out: 5799056148416392346, - }, - { - in: Metric{"name": "garland, briggs", "fear": "love is not enough"}, - labels: nil, - out: 5799056148416392346, - }, - } - - for i, scenario := range scenarios { - actual := SignatureWithoutLabels(scenario.in, scenario.labels) - - if actual != scenario.out { - t.Errorf("%d. expected %d, got %d", i, scenario.out, actual) - } - } -} - -func benchmarkLabelToSignature(b *testing.B, l map[string]string, e uint64) { - for i := 0; i < b.N; i++ { - if a := LabelsToSignature(l); a != e { - b.Fatalf("expected signature of %d for %s, got %d", e, l, a) - } - } -} - -func BenchmarkLabelToSignatureScalar(b *testing.B) { - benchmarkLabelToSignature(b, nil, 14695981039346656037) -} - -func BenchmarkLabelToSignatureSingle(b *testing.B) { - benchmarkLabelToSignature(b, map[string]string{"first-label": "first-label-value"}, 5146282821936882169) -} - -func BenchmarkLabelToSignatureDouble(b *testing.B) { - benchmarkLabelToSignature(b, map[string]string{"first-label": "first-label-value", "second-label": "second-label-value"}, 3195800080984914717) -} - -func BenchmarkLabelToSignatureTriple(b *testing.B) { - benchmarkLabelToSignature(b, map[string]string{"first-label": "first-label-value", "second-label": "second-label-value", "third-label": "third-label-value"}, 13843036195897128121) -} - -func benchmarkMetricToFingerprint(b *testing.B, m Metric, e Fingerprint) { - for i := 0; i < b.N; i++ { - if a := metricToFingerprint(m); a != e { - b.Fatalf("expected signature of %d for %s, got %d", e, m, a) - } - } -} - -func BenchmarkMetricToFingerprintScalar(b *testing.B) { - benchmarkMetricToFingerprint(b, nil, 14695981039346656037) -} - -func BenchmarkMetricToFingerprintSingle(b *testing.B) { - benchmarkMetricToFingerprint(b, Metric{"first-label": "first-label-value"}, 5146282821936882169) -} - -func BenchmarkMetricToFingerprintDouble(b *testing.B) { - benchmarkMetricToFingerprint(b, Metric{"first-label": "first-label-value", "second-label": "second-label-value"}, 3195800080984914717) -} - -func BenchmarkMetricToFingerprintTriple(b *testing.B) { - benchmarkMetricToFingerprint(b, Metric{"first-label": "first-label-value", "second-label": "second-label-value", "third-label": "third-label-value"}, 13843036195897128121) -} - -func benchmarkMetricToFastFingerprint(b *testing.B, m Metric, e Fingerprint) { - for i := 0; i < b.N; i++ { - if a := metricToFastFingerprint(m); a != e { - b.Fatalf("expected signature of %d for %s, got %d", e, m, a) - } - } -} - -func BenchmarkMetricToFastFingerprintScalar(b *testing.B) { - benchmarkMetricToFastFingerprint(b, nil, 14695981039346656037) -} - -func BenchmarkMetricToFastFingerprintSingle(b *testing.B) { - benchmarkMetricToFastFingerprint(b, Metric{"first-label": "first-label-value"}, 5147259542624943964) -} - -func BenchmarkMetricToFastFingerprintDouble(b *testing.B) { - benchmarkMetricToFastFingerprint(b, Metric{"first-label": "first-label-value", "second-label": "second-label-value"}, 18269973311206963528) -} - -func BenchmarkMetricToFastFingerprintTriple(b *testing.B) { - benchmarkMetricToFastFingerprint(b, Metric{"first-label": "first-label-value", "second-label": "second-label-value", "third-label": "third-label-value"}, 15738406913934009676) -} - -func TestEmptyLabelSignature(t *testing.T) { - input := []map[string]string{nil, {}} - - var ms runtime.MemStats - runtime.ReadMemStats(&ms) - - alloc := ms.Alloc - - for _, labels := range input { - LabelsToSignature(labels) - } - - runtime.ReadMemStats(&ms) - - if got := ms.Alloc; alloc != got { - t.Fatal("expected LabelsToSignature with empty labels not to perform allocations") - } -} - -func benchmarkMetricToFastFingerprintConc(b *testing.B, m Metric, e Fingerprint, concLevel int) { - var start, end sync.WaitGroup - start.Add(1) - end.Add(concLevel) - - for i := 0; i < concLevel; i++ { - go func() { - start.Wait() - for j := b.N / concLevel; j >= 0; j-- { - if a := metricToFastFingerprint(m); a != e { - b.Fatalf("expected signature of %d for %s, got %d", e, m, a) - } - } - end.Done() - }() - } - b.ResetTimer() - start.Done() - end.Wait() -} - -func BenchmarkMetricToFastFingerprintTripleConc1(b *testing.B) { - benchmarkMetricToFastFingerprintConc(b, Metric{"first-label": "first-label-value", "second-label": "second-label-value", "third-label": "third-label-value"}, 15738406913934009676, 1) -} - -func BenchmarkMetricToFastFingerprintTripleConc2(b *testing.B) { - benchmarkMetricToFastFingerprintConc(b, Metric{"first-label": "first-label-value", "second-label": "second-label-value", "third-label": "third-label-value"}, 15738406913934009676, 2) -} - -func BenchmarkMetricToFastFingerprintTripleConc4(b *testing.B) { - benchmarkMetricToFastFingerprintConc(b, Metric{"first-label": "first-label-value", "second-label": "second-label-value", "third-label": "third-label-value"}, 15738406913934009676, 4) -} - -func BenchmarkMetricToFastFingerprintTripleConc8(b *testing.B) { - benchmarkMetricToFastFingerprintConc(b, Metric{"first-label": "first-label-value", "second-label": "second-label-value", "third-label": "third-label-value"}, 15738406913934009676, 8) -} diff --git a/Godeps/_workspace/src/github.com/prometheus/client_golang/model/timestamp.go b/Godeps/_workspace/src/github.com/prometheus/client_golang/model/timestamp.go deleted file mode 100644 index afffdcf753..0000000000 --- a/Godeps/_workspace/src/github.com/prometheus/client_golang/model/timestamp.go +++ /dev/null @@ -1,116 +0,0 @@ -// Copyright 2013 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package model - -import ( - "math" - "strconv" - - native_time "time" -) - -// Timestamp is the number of milliseconds since the epoch -// (1970-01-01 00:00 UTC) excluding leap seconds. -type Timestamp int64 - -const ( - // MinimumTick is the minimum supported time resolution. This has to be - // at least native_time.Second in order for the code below to work. - MinimumTick = native_time.Millisecond - // second is the timestamp duration equivalent to one second. - second = int64(native_time.Second / MinimumTick) - // The number of nanoseconds per minimum tick. - nanosPerTick = int64(MinimumTick / native_time.Nanosecond) - - // Earliest is the earliest timestamp representable. Handy for - // initializing a high watermark. - Earliest = Timestamp(math.MinInt64) - // Latest is the latest timestamp representable. Handy for initializing - // a low watermark. - Latest = Timestamp(math.MaxInt64) -) - -// Equal reports whether two timestamps represent the same instant. -func (t Timestamp) Equal(o Timestamp) bool { - return t == o -} - -// Before reports whether the timestamp t is before o. -func (t Timestamp) Before(o Timestamp) bool { - return t < o -} - -// After reports whether the timestamp t is after o. -func (t Timestamp) After(o Timestamp) bool { - return t > o -} - -// Add returns the Timestamp t + d. -func (t Timestamp) Add(d native_time.Duration) Timestamp { - return t + Timestamp(d/MinimumTick) -} - -// Sub returns the Duration t - o. -func (t Timestamp) Sub(o Timestamp) native_time.Duration { - return native_time.Duration(t-o) * MinimumTick -} - -// Time returns the time.Time representation of t. -func (t Timestamp) Time() native_time.Time { - return native_time.Unix(int64(t)/second, (int64(t)%second)*nanosPerTick) -} - -// Unix returns t as a Unix time, the number of seconds elapsed -// since January 1, 1970 UTC. -func (t Timestamp) Unix() int64 { - return int64(t) / second -} - -// UnixNano returns t as a Unix time, the number of nanoseconds elapsed -// since January 1, 1970 UTC. -func (t Timestamp) UnixNano() int64 { - return int64(t) * nanosPerTick -} - -// String returns a string representation of the timestamp. -func (t Timestamp) String() string { - return strconv.FormatFloat(float64(t)/float64(second), 'f', -1, 64) -} - -// MarshalJSON implements the json.Marshaler interface. -func (t Timestamp) MarshalJSON() ([]byte, error) { - return []byte(t.String()), nil -} - -// Now returns the current time as a Timestamp. -func Now() Timestamp { - return TimestampFromTime(native_time.Now()) -} - -// TimestampFromTime returns the Timestamp equivalent to the time.Time t. -func TimestampFromTime(t native_time.Time) Timestamp { - return TimestampFromUnixNano(t.UnixNano()) -} - -// TimestampFromUnix returns the Timestamp equivalent to the Unix timestamp t -// provided in seconds. -func TimestampFromUnix(t int64) Timestamp { - return Timestamp(t * second) -} - -// TimestampFromUnixNano returns the Timestamp equivalent to the Unix timestamp -// t provided in nanoseconds. -func TimestampFromUnixNano(t int64) Timestamp { - return Timestamp(t / nanosPerTick) -} diff --git a/Godeps/_workspace/src/github.com/prometheus/client_golang/model/timestamp_test.go b/Godeps/_workspace/src/github.com/prometheus/client_golang/model/timestamp_test.go deleted file mode 100644 index fa028a47de..0000000000 --- a/Godeps/_workspace/src/github.com/prometheus/client_golang/model/timestamp_test.go +++ /dev/null @@ -1,86 +0,0 @@ -// Copyright 2013 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package model - -import ( - "testing" - native_time "time" -) - -func TestComparators(t *testing.T) { - t1a := TimestampFromUnix(0) - t1b := TimestampFromUnix(0) - t2 := TimestampFromUnix(2*second - 1) - - if !t1a.Equal(t1b) { - t.Fatalf("Expected %s to be equal to %s", t1a, t1b) - } - if t1a.Equal(t2) { - t.Fatalf("Expected %s to not be equal to %s", t1a, t2) - } - - if !t1a.Before(t2) { - t.Fatalf("Expected %s to be before %s", t1a, t2) - } - if t1a.Before(t1b) { - t.Fatalf("Expected %s to not be before %s", t1a, t1b) - } - - if !t2.After(t1a) { - t.Fatalf("Expected %s to be after %s", t2, t1a) - } - if t1b.After(t1a) { - t.Fatalf("Expected %s to not be after %s", t1b, t1a) - } -} - -func TestTimestampConversions(t *testing.T) { - unixSecs := int64(1136239445) - unixNsecs := int64(123456789) - unixNano := unixSecs*1000000000 + unixNsecs - - t1 := native_time.Unix(unixSecs, unixNsecs-unixNsecs%nanosPerTick) - t2 := native_time.Unix(unixSecs, unixNsecs) - - ts := TimestampFromUnixNano(unixNano) - if !ts.Time().Equal(t1) { - t.Fatalf("Expected %s, got %s", t1, ts.Time()) - } - - // Test available precision. - ts = TimestampFromTime(t2) - if !ts.Time().Equal(t1) { - t.Fatalf("Expected %s, got %s", t1, ts.Time()) - } - - if ts.UnixNano() != unixNano-unixNano%nanosPerTick { - t.Fatalf("Expected %d, got %d", unixNano, ts.UnixNano()) - } -} - -func TestDuration(t *testing.T) { - duration := native_time.Second + native_time.Minute + native_time.Hour - goTime := native_time.Unix(1136239445, 0) - - ts := TimestampFromTime(goTime) - if !goTime.Add(duration).Equal(ts.Add(duration).Time()) { - t.Fatalf("Expected %s to be equal to %s", goTime.Add(duration), ts.Add(duration)) - } - - earlier := ts.Add(-duration) - delta := ts.Sub(earlier) - if delta != duration { - t.Fatalf("Expected %s to be equal to %s", delta, duration) - } -} diff --git a/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/.gitignore b/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/.gitignore deleted file mode 100644 index 3460f0346d..0000000000 --- a/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/.gitignore +++ /dev/null @@ -1 +0,0 @@ -command-line-arguments.test diff --git a/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/README.md b/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/README.md deleted file mode 100644 index 81032bed88..0000000000 --- a/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/README.md +++ /dev/null @@ -1,53 +0,0 @@ -# Overview -This is the [Prometheus](http://www.prometheus.io) telemetric -instrumentation client [Go](http://golang.org) client library. It -enable authors to define process-space metrics for their servers and -expose them through a web service interface for extraction, -aggregation, and a whole slew of other post processing techniques. - -# Installing - $ go get github.com/prometheus/client_golang/prometheus - -# Example -```go -package main - -import ( - "net/http" - - "github.com/prometheus/client_golang/prometheus" -) - -var ( - indexed = prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: "my_company", - Subsystem: "indexer", - Name: "documents_indexed", - Help: "The number of documents indexed.", - }) - size = prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: "my_company", - Subsystem: "storage", - Name: "documents_total_size_bytes", - Help: "The total size of all documents in the storage.", - }) -) - -func main() { - http.Handle("/metrics", prometheus.Handler()) - - indexed.Inc() - size.Set(5) - - http.ListenAndServe(":8080", nil) -} - -func init() { - prometheus.MustRegister(indexed) - prometheus.MustRegister(size) -} -``` - -# Documentation - -[![GoDoc](https://godoc.org/github.com/prometheus/client_golang?status.png)](https://godoc.org/github.com/prometheus/client_golang) diff --git a/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/benchmark_test.go b/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/benchmark_test.go deleted file mode 100644 index 6ae7333fcc..0000000000 --- a/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/benchmark_test.go +++ /dev/null @@ -1,159 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -import ( - "testing" -) - -func BenchmarkCounterWithLabelValues(b *testing.B) { - m := NewCounterVec( - CounterOpts{ - Name: "benchmark_counter", - Help: "A counter to benchmark it.", - }, - []string{"one", "two", "three"}, - ) - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - m.WithLabelValues("eins", "zwei", "drei").Inc() - } -} - -func BenchmarkCounterWithMappedLabels(b *testing.B) { - m := NewCounterVec( - CounterOpts{ - Name: "benchmark_counter", - Help: "A counter to benchmark it.", - }, - []string{"one", "two", "three"}, - ) - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - m.With(Labels{"two": "zwei", "one": "eins", "three": "drei"}).Inc() - } -} - -func BenchmarkCounterWithPreparedMappedLabels(b *testing.B) { - m := NewCounterVec( - CounterOpts{ - Name: "benchmark_counter", - Help: "A counter to benchmark it.", - }, - []string{"one", "two", "three"}, - ) - b.ReportAllocs() - b.ResetTimer() - labels := Labels{"two": "zwei", "one": "eins", "three": "drei"} - for i := 0; i < b.N; i++ { - m.With(labels).Inc() - } -} - -func BenchmarkCounterNoLabels(b *testing.B) { - m := NewCounter(CounterOpts{ - Name: "benchmark_counter", - Help: "A counter to benchmark it.", - }) - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - m.Inc() - } -} - -func BenchmarkGaugeWithLabelValues(b *testing.B) { - m := NewGaugeVec( - GaugeOpts{ - Name: "benchmark_gauge", - Help: "A gauge to benchmark it.", - }, - []string{"one", "two", "three"}, - ) - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - m.WithLabelValues("eins", "zwei", "drei").Set(3.1415) - } -} - -func BenchmarkGaugeNoLabels(b *testing.B) { - m := NewGauge(GaugeOpts{ - Name: "benchmark_gauge", - Help: "A gauge to benchmark it.", - }) - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - m.Set(3.1415) - } -} - -func BenchmarkSummaryWithLabelValues(b *testing.B) { - m := NewSummaryVec( - SummaryOpts{ - Name: "benchmark_summary", - Help: "A summary to benchmark it.", - }, - []string{"one", "two", "three"}, - ) - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - m.WithLabelValues("eins", "zwei", "drei").Observe(3.1415) - } -} - -func BenchmarkSummaryNoLabels(b *testing.B) { - m := NewSummary(SummaryOpts{ - Name: "benchmark_summary", - Help: "A summary to benchmark it.", - }, - ) - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - m.Observe(3.1415) - } -} - -func BenchmarkHistogramWithLabelValues(b *testing.B) { - m := NewHistogramVec( - HistogramOpts{ - Name: "benchmark_histogram", - Help: "A histogram to benchmark it.", - }, - []string{"one", "two", "three"}, - ) - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - m.WithLabelValues("eins", "zwei", "drei").Observe(3.1415) - } -} - -func BenchmarkHistogramNoLabels(b *testing.B) { - m := NewHistogram(HistogramOpts{ - Name: "benchmark_histogram", - Help: "A histogram to benchmark it.", - }, - ) - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - m.Observe(3.1415) - } -} diff --git a/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/collector.go b/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/collector.go deleted file mode 100644 index c04688009f..0000000000 --- a/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/collector.go +++ /dev/null @@ -1,75 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -// Collector is the interface implemented by anything that can be used by -// Prometheus to collect metrics. A Collector has to be registered for -// collection. See Register, MustRegister, RegisterOrGet, and MustRegisterOrGet. -// -// The stock metrics provided by this package (like Gauge, Counter, Summary) are -// also Collectors (which only ever collect one metric, namely itself). An -// implementer of Collector may, however, collect multiple metrics in a -// coordinated fashion and/or create metrics on the fly. Examples for collectors -// already implemented in this library are the metric vectors (i.e. collection -// of multiple instances of the same Metric but with different label values) -// like GaugeVec or SummaryVec, and the ExpvarCollector. -type Collector interface { - // Describe sends the super-set of all possible descriptors of metrics - // collected by this Collector to the provided channel and returns once - // the last descriptor has been sent. The sent descriptors fulfill the - // consistency and uniqueness requirements described in the Desc - // documentation. (It is valid if one and the same Collector sends - // duplicate descriptors. Those duplicates are simply ignored. However, - // two different Collectors must not send duplicate descriptors.) This - // method idempotently sends the same descriptors throughout the - // lifetime of the Collector. If a Collector encounters an error while - // executing this method, it must send an invalid descriptor (created - // with NewInvalidDesc) to signal the error to the registry. - Describe(chan<- *Desc) - // Collect is called by Prometheus when collecting metrics. The - // implementation sends each collected metric via the provided channel - // and returns once the last metric has been sent. The descriptor of - // each sent metric is one of those returned by Describe. Returned - // metrics that share the same descriptor must differ in their variable - // label values. This method may be called concurrently and must - // therefore be implemented in a concurrency safe way. Blocking occurs - // at the expense of total performance of rendering all registered - // metrics. Ideally, Collector implementations support concurrent - // readers. - Collect(chan<- Metric) -} - -// SelfCollector implements Collector for a single Metric so that that the -// Metric collects itself. Add it as an anonymous field to a struct that -// implements Metric, and call Init with the Metric itself as an argument. -type SelfCollector struct { - self Metric -} - -// Init provides the SelfCollector with a reference to the metric it is supposed -// to collect. It is usually called within the factory function to create a -// metric. See example. -func (c *SelfCollector) Init(self Metric) { - c.self = self -} - -// Describe implements Collector. -func (c *SelfCollector) Describe(ch chan<- *Desc) { - ch <- c.self.Desc() -} - -// Collect implements Collector. -func (c *SelfCollector) Collect(ch chan<- Metric) { - ch <- c.self -} diff --git a/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/counter.go b/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/counter.go deleted file mode 100644 index a2952d1c88..0000000000 --- a/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/counter.go +++ /dev/null @@ -1,175 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -import ( - "errors" - "hash/fnv" -) - -// Counter is a Metric that represents a single numerical value that only ever -// goes up. That implies that it cannot be used to count items whose number can -// also go down, e.g. the number of currently running goroutines. Those -// "counters" are represented by Gauges. -// -// A Counter is typically used to count requests served, tasks completed, errors -// occurred, etc. -// -// To create Counter instances, use NewCounter. -type Counter interface { - Metric - Collector - - // Set is used to set the Counter to an arbitrary value. It is only used - // if you have to transfer a value from an external counter into this - // Prometheus metric. Do not use it for regular handling of a - // Prometheus counter (as it can be used to break the contract of - // monotonically increasing values). - Set(float64) - // Inc increments the counter by 1. - Inc() - // Add adds the given value to the counter. It panics if the value is < - // 0. - Add(float64) -} - -// CounterOpts is an alias for Opts. See there for doc comments. -type CounterOpts Opts - -// NewCounter creates a new Counter based on the provided CounterOpts. -func NewCounter(opts CounterOpts) Counter { - desc := NewDesc( - BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), - opts.Help, - nil, - opts.ConstLabels, - ) - result := &counter{value: value{desc: desc, valType: CounterValue, labelPairs: desc.constLabelPairs}} - result.Init(result) // Init self-collection. - return result -} - -type counter struct { - value -} - -func (c *counter) Add(v float64) { - if v < 0 { - panic(errors.New("counter cannot decrease in value")) - } - c.value.Add(v) -} - -// CounterVec is a Collector that bundles a set of Counters that all share the -// same Desc, but have different values for their variable labels. This is used -// if you want to count the same thing partitioned by various dimensions -// (e.g. number of HTTP requests, partitioned by response code and -// method). Create instances with NewCounterVec. -// -// CounterVec embeds MetricVec. See there for a full list of methods with -// detailed documentation. -type CounterVec struct { - MetricVec -} - -// NewCounterVec creates a new CounterVec based on the provided CounterOpts and -// partitioned by the given label names. At least one label name must be -// provided. -func NewCounterVec(opts CounterOpts, labelNames []string) *CounterVec { - desc := NewDesc( - BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), - opts.Help, - labelNames, - opts.ConstLabels, - ) - return &CounterVec{ - MetricVec: MetricVec{ - children: map[uint64]Metric{}, - desc: desc, - hash: fnv.New64a(), - newMetric: func(lvs ...string) Metric { - result := &counter{value: value{ - desc: desc, - valType: CounterValue, - labelPairs: makeLabelPairs(desc, lvs), - }} - result.Init(result) // Init self-collection. - return result - }, - }, - } -} - -// GetMetricWithLabelValues replaces the method of the same name in -// MetricVec. The difference is that this method returns a Counter and not a -// Metric so that no type conversion is required. -func (m *CounterVec) GetMetricWithLabelValues(lvs ...string) (Counter, error) { - metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...) - if metric != nil { - return metric.(Counter), err - } - return nil, err -} - -// GetMetricWith replaces the method of the same name in MetricVec. The -// difference is that this method returns a Counter and not a Metric so that no -// type conversion is required. -func (m *CounterVec) GetMetricWith(labels Labels) (Counter, error) { - metric, err := m.MetricVec.GetMetricWith(labels) - if metric != nil { - return metric.(Counter), err - } - return nil, err -} - -// WithLabelValues works as GetMetricWithLabelValues, but panics where -// GetMetricWithLabelValues would have returned an error. By not returning an -// error, WithLabelValues allows shortcuts like -// myVec.WithLabelValues("404", "GET").Add(42) -func (m *CounterVec) WithLabelValues(lvs ...string) Counter { - return m.MetricVec.WithLabelValues(lvs...).(Counter) -} - -// With works as GetMetricWith, but panics where GetMetricWithLabels would have -// returned an error. By not returning an error, With allows shortcuts like -// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42) -func (m *CounterVec) With(labels Labels) Counter { - return m.MetricVec.With(labels).(Counter) -} - -// CounterFunc is a Counter whose value is determined at collect time by calling a -// provided function. -// -// To create CounterFunc instances, use NewCounterFunc. -type CounterFunc interface { - Metric - Collector -} - -// NewCounterFunc creates a new CounterFunc based on the provided -// CounterOpts. The value reported is determined by calling the given function -// from within the Write method. Take into account that metric collection may -// happen concurrently. If that results in concurrent calls to Write, like in -// the case where a CounterFunc is directly registered with Prometheus, the -// provided function must be concurrency-safe. The function should also honor -// the contract for a Counter (values only go up, not down), but compliance will -// not be checked. -func NewCounterFunc(opts CounterOpts, function func() float64) CounterFunc { - return newValueFunc(NewDesc( - BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), - opts.Help, - nil, - opts.ConstLabels, - ), CounterValue, function) -} diff --git a/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/counter_test.go b/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/counter_test.go deleted file mode 100644 index 67391a23aa..0000000000 --- a/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/counter_test.go +++ /dev/null @@ -1,58 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -import ( - "math" - "testing" - - dto "github.com/prometheus/client_model/go" -) - -func TestCounterAdd(t *testing.T) { - counter := NewCounter(CounterOpts{ - Name: "test", - Help: "test help", - ConstLabels: Labels{"a": "1", "b": "2"}, - }).(*counter) - counter.Inc() - if expected, got := 1., math.Float64frombits(counter.valBits); expected != got { - t.Errorf("Expected %f, got %f.", expected, got) - } - counter.Add(42) - if expected, got := 43., math.Float64frombits(counter.valBits); expected != got { - t.Errorf("Expected %f, got %f.", expected, got) - } - - if expected, got := "counter cannot decrease in value", decreaseCounter(counter).Error(); expected != got { - t.Errorf("Expected error %q, got %q.", expected, got) - } - - m := &dto.Metric{} - counter.Write(m) - - if expected, got := `label: label: counter: `, m.String(); expected != got { - t.Errorf("expected %q, got %q", expected, got) - } -} - -func decreaseCounter(c *counter) (err error) { - defer func() { - if e := recover(); e != nil { - err = e.(error) - } - }() - c.Add(-1) - return nil -} diff --git a/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/desc.go b/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/desc.go deleted file mode 100644 index 1fe10bc508..0000000000 --- a/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/desc.go +++ /dev/null @@ -1,198 +0,0 @@ -package prometheus - -import ( - "bytes" - "errors" - "fmt" - "hash/fnv" - "regexp" - "sort" - "strings" - - "github.com/golang/protobuf/proto" - - dto "github.com/prometheus/client_model/go" - - "github.com/prometheus/client_golang/model" -) - -var ( - metricNameRE = regexp.MustCompile(`^[a-zA-Z_][a-zA-Z0-9_:]*$`) -) - -// Labels represents a collection of label name -> value mappings. This type is -// commonly used with the With(Labels) and GetMetricWith(Labels) methods of -// metric vector Collectors, e.g.: -// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42) -// -// The other use-case is the specification of constant label pairs in Opts or to -// create a Desc. -type Labels map[string]string - -// Desc is the descriptor used by every Prometheus Metric. It is essentially -// the immutable meta-data of a Metric. The normal Metric implementations -// included in this package manage their Desc under the hood. Users only have to -// deal with Desc if they use advanced features like the ExpvarCollector or -// custom Collectors and Metrics. -// -// Descriptors registered with the same registry have to fulfill certain -// consistency and uniqueness criteria if they share the same fully-qualified -// name: They must have the same help string and the same label names (aka label -// dimensions) in each, constLabels and variableLabels, but they must differ in -// the values of the constLabels. -// -// Descriptors that share the same fully-qualified names and the same label -// values of their constLabels are considered equal. -// -// Use NewDesc to create new Desc instances. -type Desc struct { - // fqName has been built from Namespace, Subsystem, and Name. - fqName string - // help provides some helpful information about this metric. - help string - // constLabelPairs contains precalculated DTO label pairs based on - // the constant labels. - constLabelPairs []*dto.LabelPair - // VariableLabels contains names of labels for which the metric - // maintains variable values. - variableLabels []string - // id is a hash of the values of the ConstLabels and fqName. This - // must be unique among all registered descriptors and can therefore be - // used as an identifier of the descriptor. - id uint64 - // dimHash is a hash of the label names (preset and variable) and the - // Help string. Each Desc with the same fqName must have the same - // dimHash. - dimHash uint64 - // err is an error that occured during construction. It is reported on - // registration time. - err error -} - -// NewDesc allocates and initializes a new Desc. Errors are recorded in the Desc -// and will be reported on registration time. variableLabels and constLabels can -// be nil if no such labels should be set. fqName and help must not be empty. -// -// variableLabels only contain the label names. Their label values are variable -// and therefore not part of the Desc. (They are managed within the Metric.) -// -// For constLabels, the label values are constant. Therefore, they are fully -// specified in the Desc. See the Opts documentation for the implications of -// constant labels. -func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) *Desc { - d := &Desc{ - fqName: fqName, - help: help, - variableLabels: variableLabels, - } - if help == "" { - d.err = errors.New("empty help string") - return d - } - if !metricNameRE.MatchString(fqName) { - d.err = fmt.Errorf("%q is not a valid metric name", fqName) - return d - } - // labelValues contains the label values of const labels (in order of - // their sorted label names) plus the fqName (at position 0). - labelValues := make([]string, 1, len(constLabels)+1) - labelValues[0] = fqName - labelNames := make([]string, 0, len(constLabels)+len(variableLabels)) - labelNameSet := map[string]struct{}{} - // First add only the const label names and sort them... - for labelName := range constLabels { - if !checkLabelName(labelName) { - d.err = fmt.Errorf("%q is not a valid label name", labelName) - return d - } - labelNames = append(labelNames, labelName) - labelNameSet[labelName] = struct{}{} - } - sort.Strings(labelNames) - // ... so that we can now add const label values in the order of their names. - for _, labelName := range labelNames { - labelValues = append(labelValues, constLabels[labelName]) - } - // Now add the variable label names, but prefix them with something that - // cannot be in a regular label name. That prevents matching the label - // dimension with a different mix between preset and variable labels. - for _, labelName := range variableLabels { - if !checkLabelName(labelName) { - d.err = fmt.Errorf("%q is not a valid label name", labelName) - return d - } - labelNames = append(labelNames, "$"+labelName) - labelNameSet[labelName] = struct{}{} - } - if len(labelNames) != len(labelNameSet) { - d.err = errors.New("duplicate label names") - return d - } - h := fnv.New64a() - var b bytes.Buffer // To copy string contents into, avoiding []byte allocations. - for _, val := range labelValues { - b.Reset() - b.WriteString(val) - b.WriteByte(model.SeparatorByte) - h.Write(b.Bytes()) - } - d.id = h.Sum64() - // Sort labelNames so that order doesn't matter for the hash. - sort.Strings(labelNames) - // Now hash together (in this order) the help string and the sorted - // label names. - h.Reset() - b.Reset() - b.WriteString(help) - b.WriteByte(model.SeparatorByte) - h.Write(b.Bytes()) - for _, labelName := range labelNames { - b.Reset() - b.WriteString(labelName) - b.WriteByte(model.SeparatorByte) - h.Write(b.Bytes()) - } - d.dimHash = h.Sum64() - - d.constLabelPairs = make([]*dto.LabelPair, 0, len(constLabels)) - for n, v := range constLabels { - d.constLabelPairs = append(d.constLabelPairs, &dto.LabelPair{ - Name: proto.String(n), - Value: proto.String(v), - }) - } - sort.Sort(LabelPairSorter(d.constLabelPairs)) - return d -} - -// NewInvalidDesc returns an invalid descriptor, i.e. a descriptor with the -// provided error set. If a collector returning such a descriptor is registered, -// registration will fail with the provided error. NewInvalidDesc can be used by -// a Collector to signal inability to describe itself. -func NewInvalidDesc(err error) *Desc { - return &Desc{ - err: err, - } -} - -func (d *Desc) String() string { - lpStrings := make([]string, 0, len(d.constLabelPairs)) - for _, lp := range d.constLabelPairs { - lpStrings = append( - lpStrings, - fmt.Sprintf("%s=%q", lp.GetName(), lp.GetValue()), - ) - } - return fmt.Sprintf( - "Desc{fqName: %q, help: %q, constLabels: {%s}, variableLabels: %v}", - d.fqName, - d.help, - strings.Join(lpStrings, ","), - d.variableLabels, - ) -} - -func checkLabelName(l string) bool { - return model.LabelNameRE.MatchString(l) && - !strings.HasPrefix(l, model.ReservedLabelPrefix) -} diff --git a/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/doc.go b/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/doc.go deleted file mode 100644 index 425fe8793c..0000000000 --- a/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/doc.go +++ /dev/null @@ -1,109 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package prometheus provides embeddable metric primitives for servers and -// standardized exposition of telemetry through a web services interface. -// -// All exported functions and methods are safe to be used concurrently unless -// specified otherwise. -// -// To expose metrics registered with the Prometheus registry, an HTTP server -// needs to know about the Prometheus handler. The usual endpoint is "/metrics". -// -// http.Handle("/metrics", prometheus.Handler()) -// -// As a starting point a very basic usage example: -// -// package main -// -// import ( -// "net/http" -// -// "github.com/prometheus/client_golang/prometheus" -// ) -// -// var ( -// cpuTemp = prometheus.NewGauge(prometheus.GaugeOpts{ -// Name: "cpu_temperature_celsius", -// Help: "Current temperature of the CPU.", -// }) -// hdFailures = prometheus.NewCounter(prometheus.CounterOpts{ -// Name: "hd_errors_total", -// Help: "Number of hard-disk errors.", -// }) -// ) -// -// func init() { -// prometheus.MustRegister(cpuTemp) -// prometheus.MustRegister(hdFailures) -// } -// -// func main() { -// cpuTemp.Set(65.3) -// hdFailures.Inc() -// -// http.Handle("/metrics", prometheus.Handler()) -// http.ListenAndServe(":8080", nil) -// } -// -// -// This is a complete program that exports two metrics, a Gauge and a Counter. -// It also exports some stats about the HTTP usage of the /metrics -// endpoint. (See the Handler function for more detail.) -// -// Two more advanced metric types are the Summary and Histogram. -// -// In addition to the fundamental metric types Gauge, Counter, Summary, and -// Histogram, a very important part of the Prometheus data model is the -// partitioning of samples along dimensions called labels, which results in -// metric vectors. The fundamental types are GaugeVec, CounterVec, SummaryVec, -// and HistogramVec. -// -// Those are all the parts needed for basic usage. Detailed documentation and -// examples are provided below. -// -// Everything else this package offers is essentially for "power users" only. A -// few pointers to "power user features": -// -// All the various ...Opts structs have a ConstLabels field for labels that -// never change their value (which is only useful under special circumstances, -// see documentation of the Opts type). -// -// The Untyped metric behaves like a Gauge, but signals the Prometheus server -// not to assume anything about its type. -// -// Functions to fine-tune how the metric registry works: EnableCollectChecks, -// PanicOnCollectError, Register, Unregister, SetMetricFamilyInjectionHook. -// -// For custom metric collection, there are two entry points: Custom Metric -// implementations and custom Collector implementations. A Metric is the -// fundamental unit in the Prometheus data model: a sample at a point in time -// together with its meta-data (like its fully-qualified name and any number of -// pairs of label name and label value) that knows how to marshal itself into a -// data transfer object (aka DTO, implemented as a protocol buffer). A Collector -// gets registered with the Prometheus registry and manages the collection of -// one or more Metrics. Many parts of this package are building blocks for -// Metrics and Collectors. Desc is the metric descriptor, actually used by all -// metrics under the hood, and by Collectors to describe the Metrics to be -// collected, but only to be dealt with by users if they implement their own -// Metrics or Collectors. To create a Desc, the BuildFQName function will come -// in handy. Other useful components for Metric and Collector implementation -// include: LabelPairSorter to sort the DTO version of label pairs, -// NewConstMetric and MustNewConstMetric to create "throw away" Metrics at -// collection time, MetricVec to bundle custom Metrics into a metric vector -// Collector, SelfCollector to make a custom Metric collect itself. -// -// A good example for a custom Collector is the ExpVarCollector included in this -// package, which exports variables exported via the "expvar" package as -// Prometheus metrics. -package prometheus diff --git a/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/example_clustermanager_test.go b/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/example_clustermanager_test.go deleted file mode 100644 index 6f3e215d47..0000000000 --- a/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/example_clustermanager_test.go +++ /dev/null @@ -1,130 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus_test - -import ( - "sync" - - "github.com/prometheus/client_golang/prometheus" -) - -// ClusterManager is an example for a system that might have been built without -// Prometheus in mind. It models a central manager of jobs running in a -// cluster. To turn it into something that collects Prometheus metrics, we -// simply add the two methods required for the Collector interface. -// -// An additional challenge is that multiple instances of the ClusterManager are -// run within the same binary, each in charge of a different zone. We need to -// make use of ConstLabels to be able to register each ClusterManager instance -// with Prometheus. -type ClusterManager struct { - Zone string - OOMCount *prometheus.CounterVec - RAMUsage *prometheus.GaugeVec - mtx sync.Mutex // Protects OOMCount and RAMUsage. - // ... many more fields -} - -// ReallyExpensiveAssessmentOfTheSystemState is a mock for the data gathering a -// real cluster manager would have to do. Since it may actually be really -// expensive, it must only be called once per collection. This implementation, -// obviously, only returns some made-up data. -func (c *ClusterManager) ReallyExpensiveAssessmentOfTheSystemState() ( - oomCountByHost map[string]int, ramUsageByHost map[string]float64, -) { - // Just example fake data. - oomCountByHost = map[string]int{ - "foo.example.org": 42, - "bar.example.org": 2001, - } - ramUsageByHost = map[string]float64{ - "foo.example.org": 6.023e23, - "bar.example.org": 3.14, - } - return -} - -// Describe faces the interesting challenge that the two metric vectors that are -// used in this example are already Collectors themselves. However, thanks to -// the use of channels, it is really easy to "chain" Collectors. Here we simply -// call the Describe methods of the two metric vectors. -func (c *ClusterManager) Describe(ch chan<- *prometheus.Desc) { - c.OOMCount.Describe(ch) - c.RAMUsage.Describe(ch) -} - -// Collect first triggers the ReallyExpensiveAssessmentOfTheSystemState. Then it -// sets the retrieved values in the two metric vectors and then sends all their -// metrics to the channel (again using a chaining technique as in the Describe -// method). Since Collect could be called multiple times concurrently, that part -// is protected by a mutex. -func (c *ClusterManager) Collect(ch chan<- prometheus.Metric) { - oomCountByHost, ramUsageByHost := c.ReallyExpensiveAssessmentOfTheSystemState() - c.mtx.Lock() - defer c.mtx.Unlock() - for host, oomCount := range oomCountByHost { - c.OOMCount.WithLabelValues(host).Set(float64(oomCount)) - } - for host, ramUsage := range ramUsageByHost { - c.RAMUsage.WithLabelValues(host).Set(ramUsage) - } - c.OOMCount.Collect(ch) - c.RAMUsage.Collect(ch) - // All metrics in OOMCount and RAMUsage are sent to the channel now. We - // can safely reset the two metric vectors now, so that we can start - // fresh in the next Collect cycle. (Imagine a host disappears from the - // cluster. If we did not reset here, its Metric would stay in the - // metric vectors forever.) - c.OOMCount.Reset() - c.RAMUsage.Reset() -} - -// NewClusterManager creates the two metric vectors OOMCount and RAMUsage. Note -// that the zone is set as a ConstLabel. (It's different in each instance of the -// ClusterManager, but constant over the lifetime of an instance.) The reported -// values are partitioned by host, which is therefore a variable label. -func NewClusterManager(zone string) *ClusterManager { - return &ClusterManager{ - Zone: zone, - OOMCount: prometheus.NewCounterVec( - prometheus.CounterOpts{ - Subsystem: "clustermanager", - Name: "oom_count", - Help: "number of OOM crashes", - ConstLabels: prometheus.Labels{"zone": zone}, - }, - []string{"host"}, - ), - RAMUsage: prometheus.NewGaugeVec( - prometheus.GaugeOpts{ - Subsystem: "clustermanager", - Name: "ram_usage_bytes", - Help: "RAM usage as reported to the cluster manager", - ConstLabels: prometheus.Labels{"zone": zone}, - }, - []string{"host"}, - ), - } -} - -func ExampleCollector_clustermanager() { - workerDB := NewClusterManager("db") - workerCA := NewClusterManager("ca") - prometheus.MustRegister(workerDB) - prometheus.MustRegister(workerCA) - - // Since we are dealing with custom Collector implementations, it might - // be a good idea to enable the collect checks in the registry. - prometheus.EnableCollectChecks(true) -} diff --git a/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/example_memstats_test.go b/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/example_memstats_test.go deleted file mode 100644 index a84d072504..0000000000 --- a/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/example_memstats_test.go +++ /dev/null @@ -1,87 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus_test - -import ( - "runtime" - - "github.com/prometheus/client_golang/prometheus" -) - -var ( - allocDesc = prometheus.NewDesc( - prometheus.BuildFQName("", "memstats", "alloc_bytes"), - "bytes allocated and still in use", - nil, nil, - ) - totalAllocDesc = prometheus.NewDesc( - prometheus.BuildFQName("", "memstats", "total_alloc_bytes"), - "bytes allocated (even if freed)", - nil, nil, - ) - numGCDesc = prometheus.NewDesc( - prometheus.BuildFQName("", "memstats", "num_gc_total"), - "number of GCs run", - nil, nil, - ) -) - -// MemStatsCollector is an example for a custom Collector that solves the -// problem of feeding into multiple metrics at the same time. The -// runtime.ReadMemStats should happen only once, and then the results need to be -// fed into a number of separate Metrics. In this example, only a few of the -// values reported by ReadMemStats are used. For each, there is a Desc provided -// as a var, so the MemStatsCollector itself needs nothing else in the -// struct. Only the methods need to be implemented. -type MemStatsCollector struct{} - -// Describe just sends the three Desc objects for the Metrics we intend to -// collect. -func (_ MemStatsCollector) Describe(ch chan<- *prometheus.Desc) { - ch <- allocDesc - ch <- totalAllocDesc - ch <- numGCDesc -} - -// Collect does the trick by calling ReadMemStats once and then constructing -// three different Metrics on the fly. -func (_ MemStatsCollector) Collect(ch chan<- prometheus.Metric) { - var ms runtime.MemStats - runtime.ReadMemStats(&ms) - ch <- prometheus.MustNewConstMetric( - allocDesc, - prometheus.GaugeValue, - float64(ms.Alloc), - ) - ch <- prometheus.MustNewConstMetric( - totalAllocDesc, - prometheus.GaugeValue, - float64(ms.TotalAlloc), - ) - ch <- prometheus.MustNewConstMetric( - numGCDesc, - prometheus.CounterValue, - float64(ms.NumGC), - ) - // To avoid new allocations on each collection, you could also keep - // metric objects around and return the same objects each time, just - // with new values set. -} - -func ExampleCollector_memstats() { - prometheus.MustRegister(&MemStatsCollector{}) - // Since we are dealing with custom Collector implementations, it might - // be a good idea to enable the collect checks in the registry. - prometheus.EnableCollectChecks(true) -} diff --git a/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/example_selfcollector_test.go b/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/example_selfcollector_test.go deleted file mode 100644 index 608deeb027..0000000000 --- a/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/example_selfcollector_test.go +++ /dev/null @@ -1,69 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus_test - -import ( - "runtime" - - "github.com/golang/protobuf/proto" - - dto "github.com/prometheus/client_model/go" - - "github.com/prometheus/client_golang/prometheus" -) - -func NewCallbackMetric(desc *prometheus.Desc, callback func() float64) *CallbackMetric { - result := &CallbackMetric{desc: desc, callback: callback} - result.Init(result) // Initialize the SelfCollector. - return result -} - -// TODO: Come up with a better example. - -// CallbackMetric is an example for a user-defined Metric that exports the -// result of a function call as a metric of type "untyped" without any -// labels. It uses SelfCollector to turn the Metric into a Collector so that it -// can be registered with Prometheus. -// -// Note that this example is pretty much academic as the prometheus package -// already provides an UntypedFunc type. -type CallbackMetric struct { - prometheus.SelfCollector - - desc *prometheus.Desc - callback func() float64 -} - -func (cm *CallbackMetric) Desc() *prometheus.Desc { - return cm.desc -} - -func (cm *CallbackMetric) Write(m *dto.Metric) error { - m.Untyped = &dto.Untyped{Value: proto.Float64(cm.callback())} - return nil -} - -func ExampleSelfCollector() { - m := NewCallbackMetric( - prometheus.NewDesc( - "runtime_goroutines_count", - "Total number of goroutines that currently exist.", - nil, nil, // No labels, these must be nil. - ), - func() float64 { - return float64(runtime.NumGoroutine()) - }, - ) - prometheus.MustRegister(m) -} diff --git a/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/examples_test.go b/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/examples_test.go deleted file mode 100644 index 0344e465b0..0000000000 --- a/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/examples_test.go +++ /dev/null @@ -1,649 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus_test - -import ( - "flag" - "fmt" - "math" - "net/http" - "os" - "runtime" - "sort" - "time" - - dto "github.com/prometheus/client_model/go" - - "github.com/golang/protobuf/proto" - - "github.com/prometheus/client_golang/prometheus" -) - -func ExampleGauge() { - opsQueued := prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: "our_company", - Subsystem: "blob_storage", - Name: "ops_queued", - Help: "Number of blob storage operations waiting to be processed.", - }) - prometheus.MustRegister(opsQueued) - - // 10 operations queued by the goroutine managing incoming requests. - opsQueued.Add(10) - // A worker goroutine has picked up a waiting operation. - opsQueued.Dec() - // And once more... - opsQueued.Dec() -} - -func ExampleGaugeVec() { - binaryVersion := flag.String("binary_version", "debug", "Version of the binary: debug, canary, production.") - flag.Parse() - - opsQueued := prometheus.NewGaugeVec( - prometheus.GaugeOpts{ - Namespace: "our_company", - Subsystem: "blob_storage", - Name: "ops_queued", - Help: "Number of blob storage operations waiting to be processed, partitioned by user and type.", - ConstLabels: prometheus.Labels{"binary_version": *binaryVersion}, - }, - []string{ - // Which user has requested the operation? - "user", - // Of what type is the operation? - "type", - }, - ) - prometheus.MustRegister(opsQueued) - - // Increase a value using compact (but order-sensitive!) WithLabelValues(). - opsQueued.WithLabelValues("bob", "put").Add(4) - // Increase a value with a map using WithLabels. More verbose, but order - // doesn't matter anymore. - opsQueued.With(prometheus.Labels{"type": "delete", "user": "alice"}).Inc() -} - -func ExampleGaugeFunc() { - if err := prometheus.Register(prometheus.NewGaugeFunc( - prometheus.GaugeOpts{ - Subsystem: "runtime", - Name: "goroutines_count", - Help: "Number of goroutines that currently exist.", - }, - func() float64 { return float64(runtime.NumGoroutine()) }, - )); err == nil { - fmt.Println("GaugeFunc 'goroutines_count' registered.") - } - // Note that the count of goroutines is a gauge (and not a counter) as - // it can go up and down. - - // Output: - // GaugeFunc 'goroutines_count' registered. -} - -func ExampleCounter() { - pushCounter := prometheus.NewCounter(prometheus.CounterOpts{ - Name: "repository_pushes", // Note: No help string... - }) - err := prometheus.Register(pushCounter) // ... so this will return an error. - if err != nil { - fmt.Println("Push counter couldn't be registered, no counting will happen:", err) - return - } - - // Try it once more, this time with a help string. - pushCounter = prometheus.NewCounter(prometheus.CounterOpts{ - Name: "repository_pushes", - Help: "Number of pushes to external repository.", - }) - err = prometheus.Register(pushCounter) - if err != nil { - fmt.Println("Push counter couldn't be registered AGAIN, no counting will happen:", err) - return - } - - pushComplete := make(chan struct{}) - // TODO: Start a goroutine that performs repository pushes and reports - // each completion via the channel. - for _ = range pushComplete { - pushCounter.Inc() - } - // Output: - // Push counter couldn't be registered, no counting will happen: descriptor Desc{fqName: "repository_pushes", help: "", constLabels: {}, variableLabels: []} is invalid: empty help string -} - -func ExampleCounterVec() { - binaryVersion := flag.String("environment", "test", "Execution environment: test, staging, production.") - flag.Parse() - - httpReqs := prometheus.NewCounterVec( - prometheus.CounterOpts{ - Name: "http_requests_total", - Help: "How many HTTP requests processed, partitioned by status code and HTTP method.", - ConstLabels: prometheus.Labels{"env": *binaryVersion}, - }, - []string{"code", "method"}, - ) - prometheus.MustRegister(httpReqs) - - httpReqs.WithLabelValues("404", "POST").Add(42) - - // If you have to access the same set of labels very frequently, it - // might be good to retrieve the metric only once and keep a handle to - // it. But beware of deletion of that metric, see below! - m := httpReqs.WithLabelValues("200", "GET") - for i := 0; i < 1000000; i++ { - m.Inc() - } - // Delete a metric from the vector. If you have previously kept a handle - // to that metric (as above), future updates via that handle will go - // unseen (even if you re-create a metric with the same label set - // later). - httpReqs.DeleteLabelValues("200", "GET") - // Same thing with the more verbose Labels syntax. - httpReqs.Delete(prometheus.Labels{"method": "GET", "code": "200"}) -} - -func ExampleInstrumentHandler() { - // Handle the "/doc" endpoint with the standard http.FileServer handler. - // By wrapping the handler with InstrumentHandler, request count, - // request and response sizes, and request latency are automatically - // exported to Prometheus, partitioned by HTTP status code and method - // and by the handler name (here "fileserver"). - http.Handle("/doc", prometheus.InstrumentHandler( - "fileserver", http.FileServer(http.Dir("/usr/share/doc")), - )) - // The Prometheus handler still has to be registered to handle the - // "/metrics" endpoint. The handler returned by prometheus.Handler() is - // already instrumented - with "prometheus" as the handler name. In this - // example, we want the handler name to be "metrics", so we instrument - // the uninstrumented Prometheus handler ourselves. - http.Handle("/metrics", prometheus.InstrumentHandler( - "metrics", prometheus.UninstrumentedHandler(), - )) -} - -func ExampleLabelPairSorter() { - labelPairs := []*dto.LabelPair{ - &dto.LabelPair{Name: proto.String("status"), Value: proto.String("404")}, - &dto.LabelPair{Name: proto.String("method"), Value: proto.String("get")}, - } - - sort.Sort(prometheus.LabelPairSorter(labelPairs)) - - fmt.Println(labelPairs) - // Output: - // [name:"method" value:"get" name:"status" value:"404" ] -} - -func ExampleRegister() { - // Imagine you have a worker pool and want to count the tasks completed. - taskCounter := prometheus.NewCounter(prometheus.CounterOpts{ - Subsystem: "worker_pool", - Name: "completed_tasks_total", - Help: "Total number of tasks completed.", - }) - // This will register fine. - if err := prometheus.Register(taskCounter); err != nil { - fmt.Println(err) - } else { - fmt.Println("taskCounter registered.") - } - // Don't forget to tell the HTTP server about the Prometheus handler. - // (In a real program, you still need to start the HTTP server...) - http.Handle("/metrics", prometheus.Handler()) - - // Now you can start workers and give every one of them a pointer to - // taskCounter and let it increment it whenever it completes a task. - taskCounter.Inc() // This has to happen somewhere in the worker code. - - // But wait, you want to see how individual workers perform. So you need - // a vector of counters, with one element for each worker. - taskCounterVec := prometheus.NewCounterVec( - prometheus.CounterOpts{ - Subsystem: "worker_pool", - Name: "completed_tasks_total", - Help: "Total number of tasks completed.", - }, - []string{"worker_id"}, - ) - - // Registering will fail because we already have a metric of that name. - if err := prometheus.Register(taskCounterVec); err != nil { - fmt.Println("taskCounterVec not registered:", err) - } else { - fmt.Println("taskCounterVec registered.") - } - - // To fix, first unregister the old taskCounter. - if prometheus.Unregister(taskCounter) { - fmt.Println("taskCounter unregistered.") - } - - // Try registering taskCounterVec again. - if err := prometheus.Register(taskCounterVec); err != nil { - fmt.Println("taskCounterVec not registered:", err) - } else { - fmt.Println("taskCounterVec registered.") - } - // Bummer! Still doesn't work. - - // Prometheus will not allow you to ever export metrics with - // inconsistent help strings or label names. After unregistering, the - // unregistered metrics will cease to show up in the /metrics HTTP - // response, but the registry still remembers that those metrics had - // been exported before. For this example, we will now choose a - // different name. (In a real program, you would obviously not export - // the obsolete metric in the first place.) - taskCounterVec = prometheus.NewCounterVec( - prometheus.CounterOpts{ - Subsystem: "worker_pool", - Name: "completed_tasks_by_id", - Help: "Total number of tasks completed.", - }, - []string{"worker_id"}, - ) - if err := prometheus.Register(taskCounterVec); err != nil { - fmt.Println("taskCounterVec not registered:", err) - } else { - fmt.Println("taskCounterVec registered.") - } - // Finally it worked! - - // The workers have to tell taskCounterVec their id to increment the - // right element in the metric vector. - taskCounterVec.WithLabelValues("42").Inc() // Code from worker 42. - - // Each worker could also keep a reference to their own counter element - // around. Pick the counter at initialization time of the worker. - myCounter := taskCounterVec.WithLabelValues("42") // From worker 42 initialization code. - myCounter.Inc() // Somewhere in the code of that worker. - - // Note that something like WithLabelValues("42", "spurious arg") would - // panic (because you have provided too many label values). If you want - // to get an error instead, use GetMetricWithLabelValues(...) instead. - notMyCounter, err := taskCounterVec.GetMetricWithLabelValues("42", "spurious arg") - if err != nil { - fmt.Println("Worker initialization failed:", err) - } - if notMyCounter == nil { - fmt.Println("notMyCounter is nil.") - } - - // A different (and somewhat tricky) approach is to use - // ConstLabels. ConstLabels are pairs of label names and label values - // that never change. You might ask what those labels are good for (and - // rightfully so - if they never change, they could as well be part of - // the metric name). There are essentially two use-cases: The first is - // if labels are constant throughout the lifetime of a binary execution, - // but they vary over time or between different instances of a running - // binary. The second is what we have here: Each worker creates and - // registers an own Counter instance where the only difference is in the - // value of the ConstLabels. Those Counters can all be registered - // because the different ConstLabel values guarantee that each worker - // will increment a different Counter metric. - counterOpts := prometheus.CounterOpts{ - Subsystem: "worker_pool", - Name: "completed_tasks", - Help: "Total number of tasks completed.", - ConstLabels: prometheus.Labels{"worker_id": "42"}, - } - taskCounterForWorker42 := prometheus.NewCounter(counterOpts) - if err := prometheus.Register(taskCounterForWorker42); err != nil { - fmt.Println("taskCounterVForWorker42 not registered:", err) - } else { - fmt.Println("taskCounterForWorker42 registered.") - } - // Obviously, in real code, taskCounterForWorker42 would be a member - // variable of a worker struct, and the "42" would be retrieved with a - // GetId() method or something. The Counter would be created and - // registered in the initialization code of the worker. - - // For the creation of the next Counter, we can recycle - // counterOpts. Just change the ConstLabels. - counterOpts.ConstLabels = prometheus.Labels{"worker_id": "2001"} - taskCounterForWorker2001 := prometheus.NewCounter(counterOpts) - if err := prometheus.Register(taskCounterForWorker2001); err != nil { - fmt.Println("taskCounterVForWorker2001 not registered:", err) - } else { - fmt.Println("taskCounterForWorker2001 registered.") - } - - taskCounterForWorker2001.Inc() - taskCounterForWorker42.Inc() - taskCounterForWorker2001.Inc() - - // Yet another approach would be to turn the workers themselves into - // Collectors and register them. See the Collector example for details. - - // Output: - // taskCounter registered. - // taskCounterVec not registered: a previously registered descriptor with the same fully-qualified name as Desc{fqName: "worker_pool_completed_tasks_total", help: "Total number of tasks completed.", constLabels: {}, variableLabels: [worker_id]} has different label names or a different help string - // taskCounter unregistered. - // taskCounterVec not registered: a previously registered descriptor with the same fully-qualified name as Desc{fqName: "worker_pool_completed_tasks_total", help: "Total number of tasks completed.", constLabels: {}, variableLabels: [worker_id]} has different label names or a different help string - // taskCounterVec registered. - // Worker initialization failed: inconsistent label cardinality - // notMyCounter is nil. - // taskCounterForWorker42 registered. - // taskCounterForWorker2001 registered. -} - -func ExampleSummary() { - temps := prometheus.NewSummary(prometheus.SummaryOpts{ - Name: "pond_temperature_celsius", - Help: "The temperature of the frog pond.", // Sorry, we can't measure how badly it smells. - }) - - // Simulate some observations. - for i := 0; i < 1000; i++ { - temps.Observe(30 + math.Floor(120*math.Sin(float64(i)*0.1))/10) - } - - // Just for demonstration, let's check the state of the summary by - // (ab)using its Write method (which is usually only used by Prometheus - // internally). - metric := &dto.Metric{} - temps.Write(metric) - fmt.Println(proto.MarshalTextString(metric)) - - // Output: - // summary: < - // sample_count: 1000 - // sample_sum: 29969.50000000001 - // quantile: < - // quantile: 0.5 - // value: 31.1 - // > - // quantile: < - // quantile: 0.9 - // value: 41.3 - // > - // quantile: < - // quantile: 0.99 - // value: 41.9 - // > - // > -} - -func ExampleSummaryVec() { - temps := prometheus.NewSummaryVec( - prometheus.SummaryOpts{ - Name: "pond_temperature_celsius", - Help: "The temperature of the frog pond.", // Sorry, we can't measure how badly it smells. - }, - []string{"species"}, - ) - - // Simulate some observations. - for i := 0; i < 1000; i++ { - temps.WithLabelValues("litoria-caerulea").Observe(30 + math.Floor(120*math.Sin(float64(i)*0.1))/10) - temps.WithLabelValues("lithobates-catesbeianus").Observe(32 + math.Floor(100*math.Cos(float64(i)*0.11))/10) - } - - // Create a Summary without any observations. - temps.WithLabelValues("leiopelma-hochstetteri") - - // Just for demonstration, let's check the state of the summary vector - // by (ab)using its Collect method and the Write method of its elements - // (which is usually only used by Prometheus internally - code like the - // following will never appear in your own code). - metricChan := make(chan prometheus.Metric) - go func() { - defer close(metricChan) - temps.Collect(metricChan) - }() - - metricStrings := []string{} - for metric := range metricChan { - dtoMetric := &dto.Metric{} - metric.Write(dtoMetric) - metricStrings = append(metricStrings, proto.MarshalTextString(dtoMetric)) - } - sort.Strings(metricStrings) // For reproducible print order. - fmt.Println(metricStrings) - - // Output: - // [label: < - // name: "species" - // value: "leiopelma-hochstetteri" - // > - // summary: < - // sample_count: 0 - // sample_sum: 0 - // quantile: < - // quantile: 0.5 - // value: nan - // > - // quantile: < - // quantile: 0.9 - // value: nan - // > - // quantile: < - // quantile: 0.99 - // value: nan - // > - // > - // label: < - // name: "species" - // value: "lithobates-catesbeianus" - // > - // summary: < - // sample_count: 1000 - // sample_sum: 31956.100000000017 - // quantile: < - // quantile: 0.5 - // value: 32.4 - // > - // quantile: < - // quantile: 0.9 - // value: 41.4 - // > - // quantile: < - // quantile: 0.99 - // value: 41.9 - // > - // > - // label: < - // name: "species" - // value: "litoria-caerulea" - // > - // summary: < - // sample_count: 1000 - // sample_sum: 29969.50000000001 - // quantile: < - // quantile: 0.5 - // value: 31.1 - // > - // quantile: < - // quantile: 0.9 - // value: 41.3 - // > - // quantile: < - // quantile: 0.99 - // value: 41.9 - // > - // > - // ] -} - -func ExampleConstSummary() { - desc := prometheus.NewDesc( - "http_request_duration_seconds", - "A summary of the HTTP request durations.", - []string{"code", "method"}, - prometheus.Labels{"owner": "example"}, - ) - - // Create a constant summary from values we got from a 3rd party telemetry system. - s := prometheus.MustNewConstSummary( - desc, - 4711, 403.34, - map[float64]float64{0.5: 42.3, 0.9: 323.3}, - "200", "get", - ) - - // Just for demonstration, let's check the state of the summary by - // (ab)using its Write method (which is usually only used by Prometheus - // internally). - metric := &dto.Metric{} - s.Write(metric) - fmt.Println(proto.MarshalTextString(metric)) - - // Output: - // label: < - // name: "code" - // value: "200" - // > - // label: < - // name: "method" - // value: "get" - // > - // label: < - // name: "owner" - // value: "example" - // > - // summary: < - // sample_count: 4711 - // sample_sum: 403.34 - // quantile: < - // quantile: 0.5 - // value: 42.3 - // > - // quantile: < - // quantile: 0.9 - // value: 323.3 - // > - // > -} - -func ExampleHistogram() { - temps := prometheus.NewHistogram(prometheus.HistogramOpts{ - Name: "pond_temperature_celsius", - Help: "The temperature of the frog pond.", // Sorry, we can't measure how badly it smells. - Buckets: prometheus.LinearBuckets(20, 5, 5), // 5 buckets, each 5 centigrade wide. - }) - - // Simulate some observations. - for i := 0; i < 1000; i++ { - temps.Observe(30 + math.Floor(120*math.Sin(float64(i)*0.1))/10) - } - - // Just for demonstration, let's check the state of the histogram by - // (ab)using its Write method (which is usually only used by Prometheus - // internally). - metric := &dto.Metric{} - temps.Write(metric) - fmt.Println(proto.MarshalTextString(metric)) - - // Output: - // histogram: < - // sample_count: 1000 - // sample_sum: 29969.50000000001 - // bucket: < - // cumulative_count: 192 - // upper_bound: 20 - // > - // bucket: < - // cumulative_count: 366 - // upper_bound: 25 - // > - // bucket: < - // cumulative_count: 501 - // upper_bound: 30 - // > - // bucket: < - // cumulative_count: 638 - // upper_bound: 35 - // > - // bucket: < - // cumulative_count: 816 - // upper_bound: 40 - // > - // > -} - -func ExampleConstHistogram() { - desc := prometheus.NewDesc( - "http_request_duration_seconds", - "A histogram of the HTTP request durations.", - []string{"code", "method"}, - prometheus.Labels{"owner": "example"}, - ) - - // Create a constant histogram from values we got from a 3rd party telemetry system. - h := prometheus.MustNewConstHistogram( - desc, - 4711, 403.34, - map[float64]uint64{25: 121, 50: 2403, 100: 3221, 200: 4233}, - "200", "get", - ) - - // Just for demonstration, let's check the state of the histogram by - // (ab)using its Write method (which is usually only used by Prometheus - // internally). - metric := &dto.Metric{} - h.Write(metric) - fmt.Println(proto.MarshalTextString(metric)) - - // Output: - // label: < - // name: "code" - // value: "200" - // > - // label: < - // name: "method" - // value: "get" - // > - // label: < - // name: "owner" - // value: "example" - // > - // histogram: < - // sample_count: 4711 - // sample_sum: 403.34 - // bucket: < - // cumulative_count: 121 - // upper_bound: 25 - // > - // bucket: < - // cumulative_count: 2403 - // upper_bound: 50 - // > - // bucket: < - // cumulative_count: 3221 - // upper_bound: 100 - // > - // bucket: < - // cumulative_count: 4233 - // upper_bound: 200 - // > - // > -} - -func ExamplePushCollectors() { - hostname, _ := os.Hostname() - completionTime := prometheus.NewGauge(prometheus.GaugeOpts{ - Name: "db_backup_last_completion_time", - Help: "The timestamp of the last succesful completion of a DB backup.", - }) - completionTime.Set(float64(time.Now().Unix())) - if err := prometheus.PushCollectors( - "db_backup", hostname, - "http://pushgateway:9091", - completionTime, - ); err != nil { - fmt.Println("Could not push completion time to Pushgateway:", err) - } -} diff --git a/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/expvar.go b/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/expvar.go deleted file mode 100644 index 0f7630d53f..0000000000 --- a/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/expvar.go +++ /dev/null @@ -1,119 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -import ( - "encoding/json" - "expvar" -) - -// ExpvarCollector collects metrics from the expvar interface. It provides a -// quick way to expose numeric values that are already exported via expvar as -// Prometheus metrics. Note that the data models of expvar and Prometheus are -// fundamentally different, and that the ExpvarCollector is inherently -// slow. Thus, the ExpvarCollector is probably great for experiments and -// prototying, but you should seriously consider a more direct implementation of -// Prometheus metrics for monitoring production systems. -// -// Use NewExpvarCollector to create new instances. -type ExpvarCollector struct { - exports map[string]*Desc -} - -// NewExpvarCollector returns a newly allocated ExpvarCollector that still has -// to be registered with the Prometheus registry. -// -// The exports map has the following meaning: -// -// The keys in the map correspond to expvar keys, i.e. for every expvar key you -// want to export as Prometheus metric, you need an entry in the exports -// map. The descriptor mapped to each key describes how to export the expvar -// value. It defines the name and the help string of the Prometheus metric -// proxying the expvar value. The type will always be Untyped. -// -// For descriptors without variable labels, the expvar value must be a number or -// a bool. The number is then directly exported as the Prometheus sample -// value. (For a bool, 'false' translates to 0 and 'true' to 1). Expvar values -// that are not numbers or bools are silently ignored. -// -// If the descriptor has one variable label, the expvar value must be an expvar -// map. The keys in the expvar map become the various values of the one -// Prometheus label. The values in the expvar map must be numbers or bools again -// as above. -// -// For descriptors with more than one variable label, the expvar must be a -// nested expvar map, i.e. where the values of the topmost map are maps again -// etc. until a depth is reached that corresponds to the number of labels. The -// leaves of that structure must be numbers or bools as above to serve as the -// sample values. -// -// Anything that does not fit into the scheme above is silently ignored. -func NewExpvarCollector(exports map[string]*Desc) *ExpvarCollector { - return &ExpvarCollector{ - exports: exports, - } -} - -// Describe implements Collector. -func (e *ExpvarCollector) Describe(ch chan<- *Desc) { - for _, desc := range e.exports { - ch <- desc - } -} - -// Collect implements Collector. -func (e *ExpvarCollector) Collect(ch chan<- Metric) { - for name, desc := range e.exports { - var m Metric - expVar := expvar.Get(name) - if expVar == nil { - continue - } - var v interface{} - labels := make([]string, len(desc.variableLabels)) - if err := json.Unmarshal([]byte(expVar.String()), &v); err != nil { - ch <- NewInvalidMetric(desc, err) - continue - } - var processValue func(v interface{}, i int) - processValue = func(v interface{}, i int) { - if i >= len(labels) { - copiedLabels := append(make([]string, 0, len(labels)), labels...) - switch v := v.(type) { - case float64: - m = MustNewConstMetric(desc, UntypedValue, v, copiedLabels...) - case bool: - if v { - m = MustNewConstMetric(desc, UntypedValue, 1, copiedLabels...) - } else { - m = MustNewConstMetric(desc, UntypedValue, 0, copiedLabels...) - } - default: - return - } - ch <- m - return - } - vm, ok := v.(map[string]interface{}) - if !ok { - return - } - for lv, val := range vm { - labels[i] = lv - processValue(val, i+1) - } - } - processValue(v, 0) - } -} diff --git a/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/expvar_test.go b/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/expvar_test.go deleted file mode 100644 index 5d3128faed..0000000000 --- a/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/expvar_test.go +++ /dev/null @@ -1,97 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus_test - -import ( - "expvar" - "fmt" - "sort" - "strings" - - dto "github.com/prometheus/client_model/go" - - "github.com/prometheus/client_golang/prometheus" -) - -func ExampleExpvarCollector() { - expvarCollector := prometheus.NewExpvarCollector(map[string]*prometheus.Desc{ - "memstats": prometheus.NewDesc( - "expvar_memstats", - "All numeric memstats as one metric family. Not a good role-model, actually... ;-)", - []string{"type"}, nil, - ), - "lone-int": prometheus.NewDesc( - "expvar_lone_int", - "Just an expvar int as an example.", - nil, nil, - ), - "http-request-map": prometheus.NewDesc( - "expvar_http_request_total", - "How many http requests processed, partitioned by status code and http method.", - []string{"code", "method"}, nil, - ), - }) - prometheus.MustRegister(expvarCollector) - - // The Prometheus part is done here. But to show that this example is - // doing anything, we have to manually export something via expvar. In - // real-life use-cases, some library would already have exported via - // expvar what we want to re-export as Prometheus metrics. - expvar.NewInt("lone-int").Set(42) - expvarMap := expvar.NewMap("http-request-map") - var ( - expvarMap1, expvarMap2 expvar.Map - expvarInt11, expvarInt12, expvarInt21, expvarInt22 expvar.Int - ) - expvarMap1.Init() - expvarMap2.Init() - expvarInt11.Set(3) - expvarInt12.Set(13) - expvarInt21.Set(11) - expvarInt22.Set(212) - expvarMap1.Set("POST", &expvarInt11) - expvarMap1.Set("GET", &expvarInt12) - expvarMap2.Set("POST", &expvarInt21) - expvarMap2.Set("GET", &expvarInt22) - expvarMap.Set("404", &expvarMap1) - expvarMap.Set("200", &expvarMap2) - // Results in the following expvar map: - // "http-request-count": {"200": {"POST": 11, "GET": 212}, "404": {"POST": 3, "GET": 13}} - - // Let's see what the scrape would yield, but exclude the memstats metrics. - metricStrings := []string{} - metric := dto.Metric{} - metricChan := make(chan prometheus.Metric) - go func() { - expvarCollector.Collect(metricChan) - close(metricChan) - }() - for m := range metricChan { - if strings.Index(m.Desc().String(), "expvar_memstats") == -1 { - metric.Reset() - m.Write(&metric) - metricStrings = append(metricStrings, metric.String()) - } - } - sort.Strings(metricStrings) - for _, s := range metricStrings { - fmt.Println(strings.TrimRight(s, " ")) - } - // Output: - // label: label: untyped: - // label: label: untyped: - // label: label: untyped: - // label: label: untyped: - // untyped: -} diff --git a/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/gauge.go b/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/gauge.go deleted file mode 100644 index ba8a402caf..0000000000 --- a/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/gauge.go +++ /dev/null @@ -1,147 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -import "hash/fnv" - -// Gauge is a Metric that represents a single numerical value that can -// arbitrarily go up and down. -// -// A Gauge is typically used for measured values like temperatures or current -// memory usage, but also "counts" that can go up and down, like the number of -// running goroutines. -// -// To create Gauge instances, use NewGauge. -type Gauge interface { - Metric - Collector - - // Set sets the Gauge to an arbitrary value. - Set(float64) - // Inc increments the Gauge by 1. - Inc() - // Dec decrements the Gauge by 1. - Dec() - // Add adds the given value to the Gauge. (The value can be - // negative, resulting in a decrease of the Gauge.) - Add(float64) - // Sub subtracts the given value from the Gauge. (The value can be - // negative, resulting in an increase of the Gauge.) - Sub(float64) -} - -// GaugeOpts is an alias for Opts. See there for doc comments. -type GaugeOpts Opts - -// NewGauge creates a new Gauge based on the provided GaugeOpts. -func NewGauge(opts GaugeOpts) Gauge { - return newValue(NewDesc( - BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), - opts.Help, - nil, - opts.ConstLabels, - ), GaugeValue, 0) -} - -// GaugeVec is a Collector that bundles a set of Gauges that all share the same -// Desc, but have different values for their variable labels. This is used if -// you want to count the same thing partitioned by various dimensions -// (e.g. number of operations queued, partitioned by user and operation -// type). Create instances with NewGaugeVec. -type GaugeVec struct { - MetricVec -} - -// NewGaugeVec creates a new GaugeVec based on the provided GaugeOpts and -// partitioned by the given label names. At least one label name must be -// provided. -func NewGaugeVec(opts GaugeOpts, labelNames []string) *GaugeVec { - desc := NewDesc( - BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), - opts.Help, - labelNames, - opts.ConstLabels, - ) - return &GaugeVec{ - MetricVec: MetricVec{ - children: map[uint64]Metric{}, - desc: desc, - hash: fnv.New64a(), - newMetric: func(lvs ...string) Metric { - return newValue(desc, GaugeValue, 0, lvs...) - }, - }, - } -} - -// GetMetricWithLabelValues replaces the method of the same name in -// MetricVec. The difference is that this method returns a Gauge and not a -// Metric so that no type conversion is required. -func (m *GaugeVec) GetMetricWithLabelValues(lvs ...string) (Gauge, error) { - metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...) - if metric != nil { - return metric.(Gauge), err - } - return nil, err -} - -// GetMetricWith replaces the method of the same name in MetricVec. The -// difference is that this method returns a Gauge and not a Metric so that no -// type conversion is required. -func (m *GaugeVec) GetMetricWith(labels Labels) (Gauge, error) { - metric, err := m.MetricVec.GetMetricWith(labels) - if metric != nil { - return metric.(Gauge), err - } - return nil, err -} - -// WithLabelValues works as GetMetricWithLabelValues, but panics where -// GetMetricWithLabelValues would have returned an error. By not returning an -// error, WithLabelValues allows shortcuts like -// myVec.WithLabelValues("404", "GET").Add(42) -func (m *GaugeVec) WithLabelValues(lvs ...string) Gauge { - return m.MetricVec.WithLabelValues(lvs...).(Gauge) -} - -// With works as GetMetricWith, but panics where GetMetricWithLabels would have -// returned an error. By not returning an error, With allows shortcuts like -// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42) -func (m *GaugeVec) With(labels Labels) Gauge { - return m.MetricVec.With(labels).(Gauge) -} - -// GaugeFunc is a Gauge whose value is determined at collect time by calling a -// provided function. -// -// To create GaugeFunc instances, use NewGaugeFunc. -type GaugeFunc interface { - Metric - Collector -} - -// NewGaugeFunc creates a new GaugeFunc based on the provided GaugeOpts. The -// value reported is determined by calling the given function from within the -// Write method. Take into account that metric collection may happen -// concurrently. If that results in concurrent calls to Write, like in the case -// where a GaugeFunc is directly registered with Prometheus, the provided -// function must be concurrency-safe. -func NewGaugeFunc(opts GaugeOpts, function func() float64) GaugeFunc { - return newValueFunc(NewDesc( - BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), - opts.Help, - nil, - opts.ConstLabels, - ), GaugeValue, function) -} diff --git a/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/gauge_test.go b/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/gauge_test.go deleted file mode 100644 index 48cab46367..0000000000 --- a/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/gauge_test.go +++ /dev/null @@ -1,182 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -import ( - "math" - "math/rand" - "sync" - "testing" - "testing/quick" - - dto "github.com/prometheus/client_model/go" -) - -func listenGaugeStream(vals, result chan float64, done chan struct{}) { - var sum float64 -outer: - for { - select { - case <-done: - close(vals) - for v := range vals { - sum += v - } - break outer - case v := <-vals: - sum += v - } - } - result <- sum - close(result) -} - -func TestGaugeConcurrency(t *testing.T) { - it := func(n uint32) bool { - mutations := int(n % 10000) - concLevel := int(n%15 + 1) - - var start, end sync.WaitGroup - start.Add(1) - end.Add(concLevel) - - sStream := make(chan float64, mutations*concLevel) - result := make(chan float64) - done := make(chan struct{}) - - go listenGaugeStream(sStream, result, done) - go func() { - end.Wait() - close(done) - }() - - gge := NewGauge(GaugeOpts{ - Name: "test_gauge", - Help: "no help can be found here", - }) - for i := 0; i < concLevel; i++ { - vals := make([]float64, mutations) - for j := 0; j < mutations; j++ { - vals[j] = rand.Float64() - 0.5 - } - - go func(vals []float64) { - start.Wait() - for _, v := range vals { - sStream <- v - gge.Add(v) - } - end.Done() - }(vals) - } - start.Done() - - if expected, got := <-result, math.Float64frombits(gge.(*value).valBits); math.Abs(expected-got) > 0.000001 { - t.Fatalf("expected approx. %f, got %f", expected, got) - return false - } - return true - } - - if err := quick.Check(it, nil); err != nil { - t.Fatal(err) - } -} - -func TestGaugeVecConcurrency(t *testing.T) { - it := func(n uint32) bool { - mutations := int(n % 10000) - concLevel := int(n%15 + 1) - vecLength := int(n%5 + 1) - - var start, end sync.WaitGroup - start.Add(1) - end.Add(concLevel) - - sStreams := make([]chan float64, vecLength) - results := make([]chan float64, vecLength) - done := make(chan struct{}) - - for i := 0; i < vecLength; i++ { - sStreams[i] = make(chan float64, mutations*concLevel) - results[i] = make(chan float64) - go listenGaugeStream(sStreams[i], results[i], done) - } - - go func() { - end.Wait() - close(done) - }() - - gge := NewGaugeVec( - GaugeOpts{ - Name: "test_gauge", - Help: "no help can be found here", - }, - []string{"label"}, - ) - for i := 0; i < concLevel; i++ { - vals := make([]float64, mutations) - pick := make([]int, mutations) - for j := 0; j < mutations; j++ { - vals[j] = rand.Float64() - 0.5 - pick[j] = rand.Intn(vecLength) - } - - go func(vals []float64) { - start.Wait() - for i, v := range vals { - sStreams[pick[i]] <- v - gge.WithLabelValues(string('A' + pick[i])).Add(v) - } - end.Done() - }(vals) - } - start.Done() - - for i := range sStreams { - if expected, got := <-results[i], math.Float64frombits(gge.WithLabelValues(string('A'+i)).(*value).valBits); math.Abs(expected-got) > 0.000001 { - t.Fatalf("expected approx. %f, got %f", expected, got) - return false - } - } - return true - } - - if err := quick.Check(it, nil); err != nil { - t.Fatal(err) - } -} - -func TestGaugeFunc(t *testing.T) { - gf := NewGaugeFunc( - GaugeOpts{ - Name: "test_name", - Help: "test help", - ConstLabels: Labels{"a": "1", "b": "2"}, - }, - func() float64 { return 3.1415 }, - ) - - if expected, got := `Desc{fqName: "test_name", help: "test help", constLabels: {a="1",b="2"}, variableLabels: []}`, gf.Desc().String(); expected != got { - t.Errorf("expected %q, got %q", expected, got) - } - - m := &dto.Metric{} - gf.Write(m) - - if expected, got := `label: label: gauge: `, m.String(); expected != got { - t.Errorf("expected %q, got %q", expected, got) - } -} diff --git a/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/go_collector.go b/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/go_collector.go deleted file mode 100644 index 85fa20be45..0000000000 --- a/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/go_collector.go +++ /dev/null @@ -1,50 +0,0 @@ -package prometheus - -import ( - "runtime" - "runtime/debug" - "time" -) - -type goCollector struct { - goroutines Gauge - gcDesc *Desc -} - -// NewGoCollector returns a collector which exports metrics about the current -// go process. -func NewGoCollector() *goCollector { - return &goCollector{ - goroutines: NewGauge(GaugeOpts{ - Name: "go_goroutines", - Help: "Number of goroutines that currently exist.", - }), - gcDesc: NewDesc( - "go_gc_duration_seconds", - "A summary of the GC invocation durations.", - nil, nil), - } -} - -// Describe returns all descriptions of the collector. -func (c *goCollector) Describe(ch chan<- *Desc) { - ch <- c.goroutines.Desc() - ch <- c.gcDesc -} - -// Collect returns the current state of all metrics of the collector. -func (c *goCollector) Collect(ch chan<- Metric) { - c.goroutines.Set(float64(runtime.NumGoroutine())) - ch <- c.goroutines - - var stats debug.GCStats - stats.PauseQuantiles = make([]time.Duration, 5) - debug.ReadGCStats(&stats) - - quantiles := make(map[float64]float64) - for idx, pq := range stats.PauseQuantiles[1:] { - quantiles[float64(idx+1)/float64(len(stats.PauseQuantiles)-1)] = pq.Seconds() - } - quantiles[0.0] = stats.PauseQuantiles[0].Seconds() - ch <- MustNewConstSummary(c.gcDesc, uint64(stats.NumGC), float64(stats.PauseTotal.Seconds()), quantiles) -} diff --git a/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/go_collector_test.go b/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/go_collector_test.go deleted file mode 100644 index b75d28e593..0000000000 --- a/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/go_collector_test.go +++ /dev/null @@ -1,119 +0,0 @@ -package prometheus - -import ( - "runtime" - "testing" - "time" - - dto "github.com/prometheus/client_model/go" -) - -func TestGoCollector(t *testing.T) { - var ( - c = NewGoCollector() - ch = make(chan Metric) - waitc = make(chan struct{}) - closec = make(chan struct{}) - old = -1 - ) - defer close(closec) - - go func() { - c.Collect(ch) - go func(c <-chan struct{}) { - <-c - }(closec) - <-waitc - c.Collect(ch) - }() - - for { - select { - case metric := <-ch: - switch m := metric.(type) { - // Attention, this also catches Counter... - case Gauge: - pb := &dto.Metric{} - m.Write(pb) - if pb.GetGauge() == nil { - continue - } - - if old == -1 { - old = int(pb.GetGauge().GetValue()) - close(waitc) - continue - } - - if diff := int(pb.GetGauge().GetValue()) - old; diff != 1 { - // TODO: This is flaky in highly concurrent situations. - t.Errorf("want 1 new goroutine, got %d", diff) - } - - return - } - case <-time.After(1 * time.Second): - t.Fatalf("expected collect timed out") - } - } -} - -func TestGCCollector(t *testing.T) { - var ( - c = NewGoCollector() - ch = make(chan Metric) - waitc = make(chan struct{}) - closec = make(chan struct{}) - oldGC uint64 - oldPause float64 - ) - defer close(closec) - - go func() { - c.Collect(ch) - // force GC - runtime.GC() - <-waitc - c.Collect(ch) - }() - - first := true - for { - select { - case metric := <-ch: - switch m := metric.(type) { - case *constSummary, *value: - pb := &dto.Metric{} - m.Write(pb) - if pb.GetSummary() == nil { - continue - } - - if len(pb.GetSummary().Quantile) != 5 { - t.Errorf("expected 4 buckets, got %d", len(pb.GetSummary().Quantile)) - } - for idx, want := range []float64{0.0, 0.25, 0.5, 0.75, 1.0} { - if *pb.GetSummary().Quantile[idx].Quantile != want { - t.Errorf("bucket #%d is off, got %f, want %f", idx, *pb.GetSummary().Quantile[idx].Quantile, want) - } - } - if first { - first = false - oldGC = *pb.GetSummary().SampleCount - oldPause = *pb.GetSummary().SampleSum - close(waitc) - continue - } - if diff := *pb.GetSummary().SampleCount - oldGC; diff != 1 { - t.Errorf("want 1 new garbage collection run, got %d", diff) - } - if diff := *pb.GetSummary().SampleSum - oldPause; diff <= 0 { - t.Errorf("want moar pause, got %f", diff) - } - return - } - case <-time.After(1 * time.Second): - t.Fatalf("expected collect timed out") - } - } -} diff --git a/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/histogram.go b/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/histogram.go deleted file mode 100644 index a94bbaf7a4..0000000000 --- a/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/histogram.go +++ /dev/null @@ -1,447 +0,0 @@ -// Copyright 2015 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -import ( - "fmt" - "hash/fnv" - "math" - "sort" - "sync/atomic" - - "github.com/golang/protobuf/proto" - - "github.com/prometheus/client_golang/model" - dto "github.com/prometheus/client_model/go" -) - -// A Histogram counts individual observations from an event or sample stream in -// configurable buckets. Similar to a summary, it also provides a sum of -// observations and an observation count. -// -// On the Prometheus server, quantiles can be calculated from a Histogram using -// the histogram_quantile function in the query language. -// -// Note that Histograms, in contrast to Summaries, can be aggregated with the -// Prometheus query language (see the documentation for detailed -// procedures). However, Histograms require the user to pre-define suitable -// buckets, and they are in general less accurate. The Observe method of a -// Histogram has a very low performance overhead in comparison with the Observe -// method of a Summary. -// -// To create Histogram instances, use NewHistogram. -type Histogram interface { - Metric - Collector - - // Observe adds a single observation to the histogram. - Observe(float64) -} - -var ( - // DefBuckets are the default Histogram buckets. The default buckets are - // tailored to broadly measure the response time (in seconds) of a - // network service. Most likely, however, you will be required to define - // buckets customized to your use case. - DefBuckets = []float64{.005, .01, .025, .05, .1, .25, .5, 1, 2.5, 5, 10} - - errBucketLabelNotAllowed = fmt.Errorf( - "%q is not allowed as label name in histograms", model.BucketLabel, - ) -) - -// LinearBuckets creates 'count' buckets, each 'width' wide, where the lowest -// bucket has an upper bound of 'start'. The final +Inf bucket is not counted -// and not included in the returned slice. The returned slice is meant to be -// used for the Buckets field of HistogramOpts. -// -// The function panics if 'count' is zero or negative. -func LinearBuckets(start, width float64, count int) []float64 { - if count < 1 { - panic("LinearBuckets needs a positive count") - } - buckets := make([]float64, count) - for i := range buckets { - buckets[i] = start - start += width - } - return buckets -} - -// ExponentialBuckets creates 'count' buckets, where the lowest bucket has an -// upper bound of 'start' and each following bucket's upper bound is 'factor' -// times the previous bucket's upper bound. The final +Inf bucket is not counted -// and not included in the returned slice. The returned slice is meant to be -// used for the Buckets field of HistogramOpts. -// -// The function panics if 'count' is 0 or negative, if 'start' is 0 or negative, -// or if 'factor' is less than or equal 1. -func ExponentialBuckets(start, factor float64, count int) []float64 { - if count < 1 { - panic("ExponentialBuckets needs a positive count") - } - if start <= 0 { - panic("ExponentialBuckets needs a positive start value") - } - if factor <= 1 { - panic("ExponentialBuckets needs a factor greater than 1") - } - buckets := make([]float64, count) - for i := range buckets { - buckets[i] = start - start *= factor - } - return buckets -} - -// HistogramOpts bundles the options for creating a Histogram metric. It is -// mandatory to set Name and Help to a non-empty string. All other fields are -// optional and can safely be left at their zero value. -type HistogramOpts struct { - // Namespace, Subsystem, and Name are components of the fully-qualified - // name of the Histogram (created by joining these components with - // "_"). Only Name is mandatory, the others merely help structuring the - // name. Note that the fully-qualified name of the Histogram must be a - // valid Prometheus metric name. - Namespace string - Subsystem string - Name string - - // Help provides information about this Histogram. Mandatory! - // - // Metrics with the same fully-qualified name must have the same Help - // string. - Help string - - // ConstLabels are used to attach fixed labels to this - // Histogram. Histograms with the same fully-qualified name must have the - // same label names in their ConstLabels. - // - // Note that in most cases, labels have a value that varies during the - // lifetime of a process. Those labels are usually managed with a - // HistogramVec. ConstLabels serve only special purposes. One is for the - // special case where the value of a label does not change during the - // lifetime of a process, e.g. if the revision of the running binary is - // put into a label. Another, more advanced purpose is if more than one - // Collector needs to collect Histograms with the same fully-qualified - // name. In that case, those Summaries must differ in the values of - // their ConstLabels. See the Collector examples. - // - // If the value of a label never changes (not even between binaries), - // that label most likely should not be a label at all (but part of the - // metric name). - ConstLabels Labels - - // Buckets defines the buckets into which observations are counted. Each - // element in the slice is the upper inclusive bound of a bucket. The - // values must be sorted in strictly increasing order. There is no need - // to add a highest bucket with +Inf bound, it will be added - // implicitly. The default value is DefBuckets. - Buckets []float64 -} - -// NewHistogram creates a new Histogram based on the provided HistogramOpts. It -// panics if the buckets in HistogramOpts are not in strictly increasing order. -func NewHistogram(opts HistogramOpts) Histogram { - return newHistogram( - NewDesc( - BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), - opts.Help, - nil, - opts.ConstLabels, - ), - opts, - ) -} - -func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogram { - if len(desc.variableLabels) != len(labelValues) { - panic(errInconsistentCardinality) - } - - for _, n := range desc.variableLabels { - if n == model.BucketLabel { - panic(errBucketLabelNotAllowed) - } - } - for _, lp := range desc.constLabelPairs { - if lp.GetName() == model.BucketLabel { - panic(errBucketLabelNotAllowed) - } - } - - if len(opts.Buckets) == 0 { - opts.Buckets = DefBuckets - } - - h := &histogram{ - desc: desc, - upperBounds: opts.Buckets, - labelPairs: makeLabelPairs(desc, labelValues), - } - for i, upperBound := range h.upperBounds { - if i < len(h.upperBounds)-1 { - if upperBound >= h.upperBounds[i+1] { - panic(fmt.Errorf( - "histogram buckets must be in increasing order: %f >= %f", - upperBound, h.upperBounds[i+1], - )) - } - } else { - if math.IsInf(upperBound, +1) { - // The +Inf bucket is implicit. Remove it here. - h.upperBounds = h.upperBounds[:i] - } - } - } - // Finally we know the final length of h.upperBounds and can make counts. - h.counts = make([]uint64, len(h.upperBounds)) - - h.Init(h) // Init self-collection. - return h -} - -type histogram struct { - // sumBits contains the bits of the float64 representing the sum of all - // observations. sumBits and count have to go first in the struct to - // guarantee alignment for atomic operations. - // http://golang.org/pkg/sync/atomic/#pkg-note-BUG - sumBits uint64 - count uint64 - - SelfCollector - // Note that there is no mutex required. - - desc *Desc - - upperBounds []float64 - counts []uint64 - - labelPairs []*dto.LabelPair -} - -func (h *histogram) Desc() *Desc { - return h.desc -} - -func (h *histogram) Observe(v float64) { - // TODO(beorn7): For small numbers of buckets (<30), a linear search is - // slightly faster than the binary search. If we really care, we could - // switch from one search strategy to the other depending on the number - // of buckets. - // - // Microbenchmarks (BenchmarkHistogramNoLabels): - // 11 buckets: 38.3 ns/op linear - binary 48.7 ns/op - // 100 buckets: 78.1 ns/op linear - binary 54.9 ns/op - // 300 buckets: 154 ns/op linear - binary 61.6 ns/op - i := sort.SearchFloat64s(h.upperBounds, v) - if i < len(h.counts) { - atomic.AddUint64(&h.counts[i], 1) - } - atomic.AddUint64(&h.count, 1) - for { - oldBits := atomic.LoadUint64(&h.sumBits) - newBits := math.Float64bits(math.Float64frombits(oldBits) + v) - if atomic.CompareAndSwapUint64(&h.sumBits, oldBits, newBits) { - break - } - } -} - -func (h *histogram) Write(out *dto.Metric) error { - his := &dto.Histogram{} - buckets := make([]*dto.Bucket, len(h.upperBounds)) - - his.SampleSum = proto.Float64(math.Float64frombits(atomic.LoadUint64(&h.sumBits))) - his.SampleCount = proto.Uint64(atomic.LoadUint64(&h.count)) - var count uint64 - for i, upperBound := range h.upperBounds { - count += atomic.LoadUint64(&h.counts[i]) - buckets[i] = &dto.Bucket{ - CumulativeCount: proto.Uint64(count), - UpperBound: proto.Float64(upperBound), - } - } - his.Bucket = buckets - out.Histogram = his - out.Label = h.labelPairs - return nil -} - -// HistogramVec is a Collector that bundles a set of Histograms that all share the -// same Desc, but have different values for their variable labels. This is used -// if you want to count the same thing partitioned by various dimensions -// (e.g. HTTP request latencies, partitioned by status code and method). Create -// instances with NewHistogramVec. -type HistogramVec struct { - MetricVec -} - -// NewHistogramVec creates a new HistogramVec based on the provided HistogramOpts and -// partitioned by the given label names. At least one label name must be -// provided. -func NewHistogramVec(opts HistogramOpts, labelNames []string) *HistogramVec { - desc := NewDesc( - BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), - opts.Help, - labelNames, - opts.ConstLabels, - ) - return &HistogramVec{ - MetricVec: MetricVec{ - children: map[uint64]Metric{}, - desc: desc, - hash: fnv.New64a(), - newMetric: func(lvs ...string) Metric { - return newHistogram(desc, opts, lvs...) - }, - }, - } -} - -// GetMetricWithLabelValues replaces the method of the same name in -// MetricVec. The difference is that this method returns a Histogram and not a -// Metric so that no type conversion is required. -func (m *HistogramVec) GetMetricWithLabelValues(lvs ...string) (Histogram, error) { - metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...) - if metric != nil { - return metric.(Histogram), err - } - return nil, err -} - -// GetMetricWith replaces the method of the same name in MetricVec. The -// difference is that this method returns a Histogram and not a Metric so that no -// type conversion is required. -func (m *HistogramVec) GetMetricWith(labels Labels) (Histogram, error) { - metric, err := m.MetricVec.GetMetricWith(labels) - if metric != nil { - return metric.(Histogram), err - } - return nil, err -} - -// WithLabelValues works as GetMetricWithLabelValues, but panics where -// GetMetricWithLabelValues would have returned an error. By not returning an -// error, WithLabelValues allows shortcuts like -// myVec.WithLabelValues("404", "GET").Observe(42.21) -func (m *HistogramVec) WithLabelValues(lvs ...string) Histogram { - return m.MetricVec.WithLabelValues(lvs...).(Histogram) -} - -// With works as GetMetricWith, but panics where GetMetricWithLabels would have -// returned an error. By not returning an error, With allows shortcuts like -// myVec.With(Labels{"code": "404", "method": "GET"}).Observe(42.21) -func (m *HistogramVec) With(labels Labels) Histogram { - return m.MetricVec.With(labels).(Histogram) -} - -type constHistogram struct { - desc *Desc - count uint64 - sum float64 - buckets map[float64]uint64 - labelPairs []*dto.LabelPair -} - -func (h *constHistogram) Desc() *Desc { - return h.desc -} - -func (h *constHistogram) Write(out *dto.Metric) error { - his := &dto.Histogram{} - buckets := make([]*dto.Bucket, 0, len(h.buckets)) - - his.SampleCount = proto.Uint64(h.count) - his.SampleSum = proto.Float64(h.sum) - - for upperBound, count := range h.buckets { - buckets = append(buckets, &dto.Bucket{ - CumulativeCount: proto.Uint64(count), - UpperBound: proto.Float64(upperBound), - }) - } - - if len(buckets) > 0 { - sort.Sort(buckSort(buckets)) - } - his.Bucket = buckets - - out.Histogram = his - out.Label = h.labelPairs - - return nil -} - -// NewConstHistogram returns a metric representing a Prometheus histogram with -// fixed values for the count, sum, and bucket counts. As those parameters -// cannot be changed, the returned value does not implement the Histogram -// interface (but only the Metric interface). Users of this package will not -// have much use for it in regular operations. However, when implementing custom -// Collectors, it is useful as a throw-away metric that is generated on the fly -// to send it to Prometheus in the Collect method. -// -// buckets is a map of upper bounds to cumulative counts, excluding the +Inf -// bucket. -// -// NewConstHistogram returns an error if the length of labelValues is not -// consistent with the variable labels in Desc. -func NewConstHistogram( - desc *Desc, - count uint64, - sum float64, - buckets map[float64]uint64, - labelValues ...string, -) (Metric, error) { - if len(desc.variableLabels) != len(labelValues) { - return nil, errInconsistentCardinality - } - return &constHistogram{ - desc: desc, - count: count, - sum: sum, - buckets: buckets, - labelPairs: makeLabelPairs(desc, labelValues), - }, nil -} - -// MustNewConstHistogram is a version of NewConstHistogram that panics where -// NewConstMetric would have returned an error. -func MustNewConstHistogram( - desc *Desc, - count uint64, - sum float64, - buckets map[float64]uint64, - labelValues ...string, -) Metric { - m, err := NewConstHistogram(desc, count, sum, buckets, labelValues...) - if err != nil { - panic(err) - } - return m -} - -type buckSort []*dto.Bucket - -func (s buckSort) Len() int { - return len(s) -} - -func (s buckSort) Swap(i, j int) { - s[i], s[j] = s[j], s[i] -} - -func (s buckSort) Less(i, j int) bool { - return s[i].GetUpperBound() < s[j].GetUpperBound() -} diff --git a/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/histogram_test.go b/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/histogram_test.go deleted file mode 100644 index 855af46950..0000000000 --- a/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/histogram_test.go +++ /dev/null @@ -1,318 +0,0 @@ -// Copyright 2015 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -import ( - "math" - "math/rand" - "reflect" - "sort" - "sync" - "testing" - "testing/quick" - - dto "github.com/prometheus/client_model/go" -) - -func benchmarkHistogramObserve(w int, b *testing.B) { - b.StopTimer() - - wg := new(sync.WaitGroup) - wg.Add(w) - - g := new(sync.WaitGroup) - g.Add(1) - - s := NewHistogram(HistogramOpts{}) - - for i := 0; i < w; i++ { - go func() { - g.Wait() - - for i := 0; i < b.N; i++ { - s.Observe(float64(i)) - } - - wg.Done() - }() - } - - b.StartTimer() - g.Done() - wg.Wait() -} - -func BenchmarkHistogramObserve1(b *testing.B) { - benchmarkHistogramObserve(1, b) -} - -func BenchmarkHistogramObserve2(b *testing.B) { - benchmarkHistogramObserve(2, b) -} - -func BenchmarkHistogramObserve4(b *testing.B) { - benchmarkHistogramObserve(4, b) -} - -func BenchmarkHistogramObserve8(b *testing.B) { - benchmarkHistogramObserve(8, b) -} - -func benchmarkHistogramWrite(w int, b *testing.B) { - b.StopTimer() - - wg := new(sync.WaitGroup) - wg.Add(w) - - g := new(sync.WaitGroup) - g.Add(1) - - s := NewHistogram(HistogramOpts{}) - - for i := 0; i < 1000000; i++ { - s.Observe(float64(i)) - } - - for j := 0; j < w; j++ { - outs := make([]dto.Metric, b.N) - - go func(o []dto.Metric) { - g.Wait() - - for i := 0; i < b.N; i++ { - s.Write(&o[i]) - } - - wg.Done() - }(outs) - } - - b.StartTimer() - g.Done() - wg.Wait() -} - -func BenchmarkHistogramWrite1(b *testing.B) { - benchmarkHistogramWrite(1, b) -} - -func BenchmarkHistogramWrite2(b *testing.B) { - benchmarkHistogramWrite(2, b) -} - -func BenchmarkHistogramWrite4(b *testing.B) { - benchmarkHistogramWrite(4, b) -} - -func BenchmarkHistogramWrite8(b *testing.B) { - benchmarkHistogramWrite(8, b) -} - -// Intentionally adding +Inf here to test if that case is handled correctly. -// Also, getCumulativeCounts depends on it. -var testBuckets = []float64{-2, -1, -0.5, 0, 0.5, 1, 2, math.Inf(+1)} - -func TestHistogramConcurrency(t *testing.T) { - rand.Seed(42) - - it := func(n uint32) bool { - mutations := int(n%1e4 + 1e4) - concLevel := int(n%5 + 1) - total := mutations * concLevel - - var start, end sync.WaitGroup - start.Add(1) - end.Add(concLevel) - - sum := NewHistogram(HistogramOpts{ - Name: "test_histogram", - Help: "helpless", - Buckets: testBuckets, - }) - - allVars := make([]float64, total) - var sampleSum float64 - for i := 0; i < concLevel; i++ { - vals := make([]float64, mutations) - for j := 0; j < mutations; j++ { - v := rand.NormFloat64() - vals[j] = v - allVars[i*mutations+j] = v - sampleSum += v - } - - go func(vals []float64) { - start.Wait() - for _, v := range vals { - sum.Observe(v) - } - end.Done() - }(vals) - } - sort.Float64s(allVars) - start.Done() - end.Wait() - - m := &dto.Metric{} - sum.Write(m) - if got, want := int(*m.Histogram.SampleCount), total; got != want { - t.Errorf("got sample count %d, want %d", got, want) - } - if got, want := *m.Histogram.SampleSum, sampleSum; math.Abs((got-want)/want) > 0.001 { - t.Errorf("got sample sum %f, want %f", got, want) - } - - wantCounts := getCumulativeCounts(allVars) - - if got, want := len(m.Histogram.Bucket), len(testBuckets)-1; got != want { - t.Errorf("got %d buckets in protobuf, want %d", got, want) - } - for i, wantBound := range testBuckets { - if i == len(testBuckets)-1 { - break // No +Inf bucket in protobuf. - } - if gotBound := *m.Histogram.Bucket[i].UpperBound; gotBound != wantBound { - t.Errorf("got bound %f, want %f", gotBound, wantBound) - } - if gotCount, wantCount := *m.Histogram.Bucket[i].CumulativeCount, wantCounts[i]; gotCount != wantCount { - t.Errorf("got count %d, want %d", gotCount, wantCount) - } - } - return true - } - - if err := quick.Check(it, nil); err != nil { - t.Error(err) - } -} - -func TestHistogramVecConcurrency(t *testing.T) { - rand.Seed(42) - - objectives := make([]float64, 0, len(DefObjectives)) - for qu := range DefObjectives { - - objectives = append(objectives, qu) - } - sort.Float64s(objectives) - - it := func(n uint32) bool { - mutations := int(n%1e4 + 1e4) - concLevel := int(n%7 + 1) - vecLength := int(n%3 + 1) - - var start, end sync.WaitGroup - start.Add(1) - end.Add(concLevel) - - his := NewHistogramVec( - HistogramOpts{ - Name: "test_histogram", - Help: "helpless", - Buckets: []float64{-2, -1, -0.5, 0, 0.5, 1, 2, math.Inf(+1)}, - }, - []string{"label"}, - ) - - allVars := make([][]float64, vecLength) - sampleSums := make([]float64, vecLength) - for i := 0; i < concLevel; i++ { - vals := make([]float64, mutations) - picks := make([]int, mutations) - for j := 0; j < mutations; j++ { - v := rand.NormFloat64() - vals[j] = v - pick := rand.Intn(vecLength) - picks[j] = pick - allVars[pick] = append(allVars[pick], v) - sampleSums[pick] += v - } - - go func(vals []float64) { - start.Wait() - for i, v := range vals { - his.WithLabelValues(string('A' + picks[i])).Observe(v) - } - end.Done() - }(vals) - } - for _, vars := range allVars { - sort.Float64s(vars) - } - start.Done() - end.Wait() - - for i := 0; i < vecLength; i++ { - m := &dto.Metric{} - s := his.WithLabelValues(string('A' + i)) - s.Write(m) - - if got, want := len(m.Histogram.Bucket), len(testBuckets)-1; got != want { - t.Errorf("got %d buckets in protobuf, want %d", got, want) - } - if got, want := int(*m.Histogram.SampleCount), len(allVars[i]); got != want { - t.Errorf("got sample count %d, want %d", got, want) - } - if got, want := *m.Histogram.SampleSum, sampleSums[i]; math.Abs((got-want)/want) > 0.001 { - t.Errorf("got sample sum %f, want %f", got, want) - } - - wantCounts := getCumulativeCounts(allVars[i]) - - for j, wantBound := range testBuckets { - if j == len(testBuckets)-1 { - break // No +Inf bucket in protobuf. - } - if gotBound := *m.Histogram.Bucket[j].UpperBound; gotBound != wantBound { - t.Errorf("got bound %f, want %f", gotBound, wantBound) - } - if gotCount, wantCount := *m.Histogram.Bucket[j].CumulativeCount, wantCounts[j]; gotCount != wantCount { - t.Errorf("got count %d, want %d", gotCount, wantCount) - } - } - } - return true - } - - if err := quick.Check(it, nil); err != nil { - t.Error(err) - } -} - -func getCumulativeCounts(vars []float64) []uint64 { - counts := make([]uint64, len(testBuckets)) - for _, v := range vars { - for i := len(testBuckets) - 1; i >= 0; i-- { - if v > testBuckets[i] { - break - } - counts[i]++ - } - } - return counts -} - -func TestBuckets(t *testing.T) { - got := LinearBuckets(-15, 5, 6) - want := []float64{-15, -10, -5, 0, 5, 10} - if !reflect.DeepEqual(got, want) { - t.Errorf("linear buckets: got %v, want %v", got, want) - } - - got = ExponentialBuckets(100, 1.2, 3) - want = []float64{100, 120, 144} - if !reflect.DeepEqual(got, want) { - t.Errorf("linear buckets: got %v, want %v", got, want) - } -} diff --git a/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/http.go b/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/http.go deleted file mode 100644 index eabe602468..0000000000 --- a/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/http.go +++ /dev/null @@ -1,361 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -import ( - "bufio" - "io" - "net" - "net/http" - "strconv" - "strings" - "time" -) - -var instLabels = []string{"method", "code"} - -type nower interface { - Now() time.Time -} - -type nowFunc func() time.Time - -func (n nowFunc) Now() time.Time { - return n() -} - -var now nower = nowFunc(func() time.Time { - return time.Now() -}) - -func nowSeries(t ...time.Time) nower { - return nowFunc(func() time.Time { - defer func() { - t = t[1:] - }() - - return t[0] - }) -} - -// InstrumentHandler wraps the given HTTP handler for instrumentation. It -// registers four metric collectors (if not already done) and reports HTTP -// metrics to the (newly or already) registered collectors: http_requests_total -// (CounterVec), http_request_duration_microseconds (Summary), -// http_request_size_bytes (Summary), http_response_size_bytes (Summary). Each -// has a constant label named "handler" with the provided handlerName as -// value. http_requests_total is a metric vector partitioned by HTTP method -// (label name "method") and HTTP status code (label name "code"). -func InstrumentHandler(handlerName string, handler http.Handler) http.HandlerFunc { - return InstrumentHandlerFunc(handlerName, handler.ServeHTTP) -} - -// InstrumentHandlerFunc wraps the given function for instrumentation. It -// otherwise works in the same way as InstrumentHandler. -func InstrumentHandlerFunc(handlerName string, handlerFunc func(http.ResponseWriter, *http.Request)) http.HandlerFunc { - return InstrumentHandlerFuncWithOpts( - SummaryOpts{ - Subsystem: "http", - ConstLabels: Labels{"handler": handlerName}, - }, - handlerFunc, - ) -} - -// InstrumentHandlerWithOpts works like InstrumentHandler but provides more -// flexibility (at the cost of a more complex call syntax). As -// InstrumentHandler, this function registers four metric collectors, but it -// uses the provided SummaryOpts to create them. However, the fields "Name" and -// "Help" in the SummaryOpts are ignored. "Name" is replaced by -// "requests_total", "request_duration_microseconds", "request_size_bytes", and -// "response_size_bytes", respectively. "Help" is replaced by an appropriate -// help string. The names of the variable labels of the http_requests_total -// CounterVec are "method" (get, post, etc.), and "code" (HTTP status code). -// -// If InstrumentHandlerWithOpts is called as follows, it mimics exactly the -// behavior of InstrumentHandler: -// -// prometheus.InstrumentHandlerWithOpts( -// prometheus.SummaryOpts{ -// Subsystem: "http", -// ConstLabels: prometheus.Labels{"handler": handlerName}, -// }, -// handler, -// ) -// -// Technical detail: "requests_total" is a CounterVec, not a SummaryVec, so it -// cannot use SummaryOpts. Instead, a CounterOpts struct is created internally, -// and all its fields are set to the equally named fields in the provided -// SummaryOpts. -func InstrumentHandlerWithOpts(opts SummaryOpts, handler http.Handler) http.HandlerFunc { - return InstrumentHandlerFuncWithOpts(opts, handler.ServeHTTP) -} - -// InstrumentHandlerFuncWithOpts works like InstrumentHandlerFunc but provides -// more flexibility (at the cost of a more complex call syntax). See -// InstrumentHandlerWithOpts for details how the provided SummaryOpts are used. -func InstrumentHandlerFuncWithOpts(opts SummaryOpts, handlerFunc func(http.ResponseWriter, *http.Request)) http.HandlerFunc { - reqCnt := NewCounterVec( - CounterOpts{ - Namespace: opts.Namespace, - Subsystem: opts.Subsystem, - Name: "requests_total", - Help: "Total number of HTTP requests made.", - ConstLabels: opts.ConstLabels, - }, - instLabels, - ) - - opts.Name = "request_duration_microseconds" - opts.Help = "The HTTP request latencies in microseconds." - reqDur := NewSummary(opts) - - opts.Name = "request_size_bytes" - opts.Help = "The HTTP request sizes in bytes." - reqSz := NewSummary(opts) - - opts.Name = "response_size_bytes" - opts.Help = "The HTTP response sizes in bytes." - resSz := NewSummary(opts) - - regReqCnt := MustRegisterOrGet(reqCnt).(*CounterVec) - regReqDur := MustRegisterOrGet(reqDur).(Summary) - regReqSz := MustRegisterOrGet(reqSz).(Summary) - regResSz := MustRegisterOrGet(resSz).(Summary) - - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - now := time.Now() - - delegate := &responseWriterDelegator{ResponseWriter: w} - out := make(chan int) - urlLen := 0 - if r.URL != nil { - urlLen = len(r.URL.String()) - } - go computeApproximateRequestSize(r, out, urlLen) - - _, cn := w.(http.CloseNotifier) - _, fl := w.(http.Flusher) - _, hj := w.(http.Hijacker) - _, rf := w.(io.ReaderFrom) - var rw http.ResponseWriter - if cn && fl && hj && rf { - rw = &fancyResponseWriterDelegator{delegate} - } else { - rw = delegate - } - handlerFunc(rw, r) - - elapsed := float64(time.Since(now)) / float64(time.Microsecond) - - method := sanitizeMethod(r.Method) - code := sanitizeCode(delegate.status) - regReqCnt.WithLabelValues(method, code).Inc() - regReqDur.Observe(elapsed) - regResSz.Observe(float64(delegate.written)) - regReqSz.Observe(float64(<-out)) - }) -} - -func computeApproximateRequestSize(r *http.Request, out chan int, s int) { - s += len(r.Method) - s += len(r.Proto) - for name, values := range r.Header { - s += len(name) - for _, value := range values { - s += len(value) - } - } - s += len(r.Host) - - // N.B. r.Form and r.MultipartForm are assumed to be included in r.URL. - - if r.ContentLength != -1 { - s += int(r.ContentLength) - } - out <- s -} - -type responseWriterDelegator struct { - http.ResponseWriter - - handler, method string - status int - written int64 - wroteHeader bool -} - -func (r *responseWriterDelegator) WriteHeader(code int) { - r.status = code - r.wroteHeader = true - r.ResponseWriter.WriteHeader(code) -} - -func (r *responseWriterDelegator) Write(b []byte) (int, error) { - if !r.wroteHeader { - r.WriteHeader(http.StatusOK) - } - n, err := r.ResponseWriter.Write(b) - r.written += int64(n) - return n, err -} - -type fancyResponseWriterDelegator struct { - *responseWriterDelegator -} - -func (f *fancyResponseWriterDelegator) CloseNotify() <-chan bool { - return f.ResponseWriter.(http.CloseNotifier).CloseNotify() -} - -func (f *fancyResponseWriterDelegator) Flush() { - f.ResponseWriter.(http.Flusher).Flush() -} - -func (f *fancyResponseWriterDelegator) Hijack() (net.Conn, *bufio.ReadWriter, error) { - return f.ResponseWriter.(http.Hijacker).Hijack() -} - -func (f *fancyResponseWriterDelegator) ReadFrom(r io.Reader) (int64, error) { - if !f.wroteHeader { - f.WriteHeader(http.StatusOK) - } - n, err := f.ResponseWriter.(io.ReaderFrom).ReadFrom(r) - f.written += n - return n, err -} - -func sanitizeMethod(m string) string { - switch m { - case "GET", "get": - return "get" - case "PUT", "put": - return "put" - case "HEAD", "head": - return "head" - case "POST", "post": - return "post" - case "DELETE", "delete": - return "delete" - case "CONNECT", "connect": - return "connect" - case "OPTIONS", "options": - return "options" - case "NOTIFY", "notify": - return "notify" - default: - return strings.ToLower(m) - } -} - -func sanitizeCode(s int) string { - switch s { - case 100: - return "100" - case 101: - return "101" - - case 200: - return "200" - case 201: - return "201" - case 202: - return "202" - case 203: - return "203" - case 204: - return "204" - case 205: - return "205" - case 206: - return "206" - - case 300: - return "300" - case 301: - return "301" - case 302: - return "302" - case 304: - return "304" - case 305: - return "305" - case 307: - return "307" - - case 400: - return "400" - case 401: - return "401" - case 402: - return "402" - case 403: - return "403" - case 404: - return "404" - case 405: - return "405" - case 406: - return "406" - case 407: - return "407" - case 408: - return "408" - case 409: - return "409" - case 410: - return "410" - case 411: - return "411" - case 412: - return "412" - case 413: - return "413" - case 414: - return "414" - case 415: - return "415" - case 416: - return "416" - case 417: - return "417" - case 418: - return "418" - - case 500: - return "500" - case 501: - return "501" - case 502: - return "502" - case 503: - return "503" - case 504: - return "504" - case 505: - return "505" - - case 428: - return "428" - case 429: - return "429" - case 431: - return "431" - case 511: - return "511" - - default: - return strconv.Itoa(s) - } -} diff --git a/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/http_test.go b/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/http_test.go deleted file mode 100644 index ffe0418cf8..0000000000 --- a/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/http_test.go +++ /dev/null @@ -1,121 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -import ( - "net/http" - "net/http/httptest" - "testing" - "time" - - dto "github.com/prometheus/client_model/go" -) - -type respBody string - -func (b respBody) ServeHTTP(w http.ResponseWriter, r *http.Request) { - w.WriteHeader(http.StatusTeapot) - w.Write([]byte(b)) -} - -func TestInstrumentHandler(t *testing.T) { - defer func(n nower) { - now = n.(nower) - }(now) - - instant := time.Now() - end := instant.Add(30 * time.Second) - now = nowSeries(instant, end) - respBody := respBody("Howdy there!") - - hndlr := InstrumentHandler("test-handler", respBody) - - opts := SummaryOpts{ - Subsystem: "http", - ConstLabels: Labels{"handler": "test-handler"}, - } - - reqCnt := MustRegisterOrGet(NewCounterVec( - CounterOpts{ - Namespace: opts.Namespace, - Subsystem: opts.Subsystem, - Name: "requests_total", - Help: "Total number of HTTP requests made.", - ConstLabels: opts.ConstLabels, - }, - instLabels, - )).(*CounterVec) - - opts.Name = "request_duration_microseconds" - opts.Help = "The HTTP request latencies in microseconds." - reqDur := MustRegisterOrGet(NewSummary(opts)).(Summary) - - opts.Name = "request_size_bytes" - opts.Help = "The HTTP request sizes in bytes." - MustRegisterOrGet(NewSummary(opts)) - - opts.Name = "response_size_bytes" - opts.Help = "The HTTP response sizes in bytes." - MustRegisterOrGet(NewSummary(opts)) - - reqCnt.Reset() - - resp := httptest.NewRecorder() - req := &http.Request{ - Method: "GET", - } - - hndlr.ServeHTTP(resp, req) - - if resp.Code != http.StatusTeapot { - t.Fatalf("expected status %d, got %d", http.StatusTeapot, resp.Code) - } - if string(resp.Body.Bytes()) != "Howdy there!" { - t.Fatalf("expected body %s, got %s", "Howdy there!", string(resp.Body.Bytes())) - } - - out := &dto.Metric{} - reqDur.Write(out) - if want, got := "test-handler", out.Label[0].GetValue(); want != got { - t.Errorf("want label value %q in reqDur, got %q", want, got) - } - if want, got := uint64(1), out.Summary.GetSampleCount(); want != got { - t.Errorf("want sample count %d in reqDur, got %d", want, got) - } - - out.Reset() - if want, got := 1, len(reqCnt.children); want != got { - t.Errorf("want %d children in reqCnt, got %d", want, got) - } - cnt, err := reqCnt.GetMetricWithLabelValues("get", "418") - if err != nil { - t.Fatal(err) - } - cnt.Write(out) - if want, got := "418", out.Label[0].GetValue(); want != got { - t.Errorf("want label value %q in reqCnt, got %q", want, got) - } - if want, got := "test-handler", out.Label[1].GetValue(); want != got { - t.Errorf("want label value %q in reqCnt, got %q", want, got) - } - if want, got := "get", out.Label[2].GetValue(); want != got { - t.Errorf("want label value %q in reqCnt, got %q", want, got) - } - if out.Counter == nil { - t.Fatal("expected non-nil counter in reqCnt") - } - if want, got := 1., out.Counter.GetValue(); want != got { - t.Errorf("want reqCnt of %f, got %f", want, got) - } -} diff --git a/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/metric.go b/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/metric.go deleted file mode 100644 index d8905de2e8..0000000000 --- a/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/metric.go +++ /dev/null @@ -1,164 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -import ( - "strings" - - dto "github.com/prometheus/client_model/go" -) - -// A Metric models a single sample value with its meta data being exported to -// Prometheus. Implementers of Metric in this package inclued Gauge, Counter, -// Untyped, and Summary. Users can implement their own Metric types, but that -// should be rarely needed. See the example for SelfCollector, which is also an -// example for a user-implemented Metric. -type Metric interface { - // Desc returns the descriptor for the Metric. This method idempotently - // returns the same descriptor throughout the lifetime of the - // Metric. The returned descriptor is immutable by contract. A Metric - // unable to describe itself must return an invalid descriptor (created - // with NewInvalidDesc). - Desc() *Desc - // Write encodes the Metric into a "Metric" Protocol Buffer data - // transmission object. - // - // Implementers of custom Metric types must observe concurrency safety - // as reads of this metric may occur at any time, and any blocking - // occurs at the expense of total performance of rendering all - // registered metrics. Ideally Metric implementations should support - // concurrent readers. - // - // The Prometheus client library attempts to minimize memory allocations - // and will provide a pre-existing reset dto.Metric pointer. Prometheus - // may recycle the dto.Metric proto message, so Metric implementations - // should just populate the provided dto.Metric and then should not keep - // any reference to it. - // - // While populating dto.Metric, labels must be sorted lexicographically. - // (Implementers may find LabelPairSorter useful for that.) - Write(*dto.Metric) error -} - -// Opts bundles the options for creating most Metric types. Each metric -// implementation XXX has its own XXXOpts type, but in most cases, it is just be -// an alias of this type (which might change when the requirement arises.) -// -// It is mandatory to set Name and Help to a non-empty string. All other fields -// are optional and can safely be left at their zero value. -type Opts struct { - // Namespace, Subsystem, and Name are components of the fully-qualified - // name of the Metric (created by joining these components with - // "_"). Only Name is mandatory, the others merely help structuring the - // name. Note that the fully-qualified name of the metric must be a - // valid Prometheus metric name. - Namespace string - Subsystem string - Name string - - // Help provides information about this metric. Mandatory! - // - // Metrics with the same fully-qualified name must have the same Help - // string. - Help string - - // ConstLabels are used to attach fixed labels to this metric. Metrics - // with the same fully-qualified name must have the same label names in - // their ConstLabels. - // - // Note that in most cases, labels have a value that varies during the - // lifetime of a process. Those labels are usually managed with a metric - // vector collector (like CounterVec, GaugeVec, UntypedVec). ConstLabels - // serve only special purposes. One is for the special case where the - // value of a label does not change during the lifetime of a process, - // e.g. if the revision of the running binary is put into a - // label. Another, more advanced purpose is if more than one Collector - // needs to collect Metrics with the same fully-qualified name. In that - // case, those Metrics must differ in the values of their - // ConstLabels. See the Collector examples. - // - // If the value of a label never changes (not even between binaries), - // that label most likely should not be a label at all (but part of the - // metric name). - ConstLabels Labels -} - -// BuildFQName joins the given three name components by "_". Empty name -// components are ignored. If the name parameter itself is empty, an empty -// string is returned, no matter what. Metric implementations included in this -// library use this function internally to generate the fully-qualified metric -// name from the name component in their Opts. Users of the library will only -// need this function if they implement their own Metric or instantiate a Desc -// (with NewDesc) directly. -func BuildFQName(namespace, subsystem, name string) string { - if name == "" { - return "" - } - switch { - case namespace != "" && subsystem != "": - return strings.Join([]string{namespace, subsystem, name}, "_") - case namespace != "": - return strings.Join([]string{namespace, name}, "_") - case subsystem != "": - return strings.Join([]string{subsystem, name}, "_") - } - return name -} - -// LabelPairSorter implements sort.Interface. It is used to sort a slice of -// dto.LabelPair pointers. This is useful for implementing the Write method of -// custom metrics. -type LabelPairSorter []*dto.LabelPair - -func (s LabelPairSorter) Len() int { - return len(s) -} - -func (s LabelPairSorter) Swap(i, j int) { - s[i], s[j] = s[j], s[i] -} - -func (s LabelPairSorter) Less(i, j int) bool { - return s[i].GetName() < s[j].GetName() -} - -type hashSorter []uint64 - -func (s hashSorter) Len() int { - return len(s) -} - -func (s hashSorter) Swap(i, j int) { - s[i], s[j] = s[j], s[i] -} - -func (s hashSorter) Less(i, j int) bool { - return s[i] < s[j] -} - -type invalidMetric struct { - desc *Desc - err error -} - -// NewInvalidMetric returns a metric whose Write method always returns the -// provided error. It is useful if a Collector finds itself unable to collect -// a metric and wishes to report an error to the registry. -func NewInvalidMetric(desc *Desc, err error) Metric { - return &invalidMetric{desc, err} -} - -func (m *invalidMetric) Desc() *Desc { return m.desc } - -func (m *invalidMetric) Write(*dto.Metric) error { return m.err } diff --git a/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/metric_test.go b/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/metric_test.go deleted file mode 100644 index 7145f5e53c..0000000000 --- a/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/metric_test.go +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -import "testing" - -func TestBuildFQName(t *testing.T) { - scenarios := []struct{ namespace, subsystem, name, result string }{ - {"a", "b", "c", "a_b_c"}, - {"", "b", "c", "b_c"}, - {"a", "", "c", "a_c"}, - {"", "", "c", "c"}, - {"a", "b", "", ""}, - {"a", "", "", ""}, - {"", "b", "", ""}, - {" ", "", "", ""}, - } - - for i, s := range scenarios { - if want, got := s.result, BuildFQName(s.namespace, s.subsystem, s.name); want != got { - t.Errorf("%d. want %s, got %s", i, want, got) - } - } -} diff --git a/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/process_collector.go b/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/process_collector.go deleted file mode 100644 index d8cf0eda34..0000000000 --- a/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/process_collector.go +++ /dev/null @@ -1,142 +0,0 @@ -// Copyright 2015 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -import "github.com/prometheus/procfs" - -type processCollector struct { - pid int - collectFn func(chan<- Metric) - pidFn func() (int, error) - cpuTotal Counter - openFDs, maxFDs Gauge - vsize, rss Gauge - startTime Gauge -} - -// NewProcessCollector returns a collector which exports the current state of -// process metrics including cpu, memory and file descriptor usage as well as -// the process start time for the given process id under the given namespace. -func NewProcessCollector(pid int, namespace string) *processCollector { - return NewProcessCollectorPIDFn( - func() (int, error) { return pid, nil }, - namespace, - ) -} - -// NewProcessCollectorPIDFn returns a collector which exports the current state -// of process metrics including cpu, memory and file descriptor usage as well -// as the process start time under the given namespace. The given pidFn is -// called on each collect and is used to determine the process to export -// metrics for. -func NewProcessCollectorPIDFn( - pidFn func() (int, error), - namespace string, -) *processCollector { - c := processCollector{ - pidFn: pidFn, - collectFn: func(chan<- Metric) {}, - - cpuTotal: NewCounter(CounterOpts{ - Namespace: namespace, - Name: "process_cpu_seconds_total", - Help: "Total user and system CPU time spent in seconds.", - }), - openFDs: NewGauge(GaugeOpts{ - Namespace: namespace, - Name: "process_open_fds", - Help: "Number of open file descriptors.", - }), - maxFDs: NewGauge(GaugeOpts{ - Namespace: namespace, - Name: "process_max_fds", - Help: "Maximum number of open file descriptors.", - }), - vsize: NewGauge(GaugeOpts{ - Namespace: namespace, - Name: "process_virtual_memory_bytes", - Help: "Virtual memory size in bytes.", - }), - rss: NewGauge(GaugeOpts{ - Namespace: namespace, - Name: "process_resident_memory_bytes", - Help: "Resident memory size in bytes.", - }), - startTime: NewGauge(GaugeOpts{ - Namespace: namespace, - Name: "process_start_time_seconds", - Help: "Start time of the process since unix epoch in seconds.", - }), - } - - // Set up process metric collection if supported by the runtime. - if _, err := procfs.NewStat(); err == nil { - c.collectFn = c.processCollect - } - - return &c -} - -// Describe returns all descriptions of the collector. -func (c *processCollector) Describe(ch chan<- *Desc) { - ch <- c.cpuTotal.Desc() - ch <- c.openFDs.Desc() - ch <- c.maxFDs.Desc() - ch <- c.vsize.Desc() - ch <- c.rss.Desc() - ch <- c.startTime.Desc() -} - -// Collect returns the current state of all metrics of the collector. -func (c *processCollector) Collect(ch chan<- Metric) { - c.collectFn(ch) -} - -// TODO(ts): Bring back error reporting by reverting 7faf9e7 as soon as the -// client allows users to configure the error behavior. -func (c *processCollector) processCollect(ch chan<- Metric) { - pid, err := c.pidFn() - if err != nil { - return - } - - p, err := procfs.NewProc(pid) - if err != nil { - return - } - - if stat, err := p.NewStat(); err == nil { - c.cpuTotal.Set(stat.CPUTime()) - ch <- c.cpuTotal - c.vsize.Set(float64(stat.VirtualMemory())) - ch <- c.vsize - c.rss.Set(float64(stat.ResidentMemory())) - ch <- c.rss - - if startTime, err := stat.StartTime(); err == nil { - c.startTime.Set(startTime) - ch <- c.startTime - } - } - - if fds, err := p.FileDescriptorsLen(); err == nil { - c.openFDs.Set(float64(fds)) - ch <- c.openFDs - } - - if limits, err := p.NewLimits(); err == nil { - c.maxFDs.Set(float64(limits.OpenFiles)) - ch <- c.maxFDs - } -} diff --git a/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/process_collector_test.go b/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/process_collector_test.go deleted file mode 100644 index 829715acd8..0000000000 --- a/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/process_collector_test.go +++ /dev/null @@ -1,54 +0,0 @@ -package prometheus - -import ( - "io/ioutil" - "net/http" - "net/http/httptest" - "os" - "regexp" - "testing" - - "github.com/prometheus/procfs" -) - -func TestProcessCollector(t *testing.T) { - if _, err := procfs.Self(); err != nil { - t.Skipf("skipping TestProcessCollector, procfs not available: %s", err) - } - - registry := newRegistry() - registry.Register(NewProcessCollector(os.Getpid(), "")) - registry.Register(NewProcessCollectorPIDFn( - func() (int, error) { return os.Getpid(), nil }, "foobar")) - - s := httptest.NewServer(InstrumentHandler("prometheus", registry)) - defer s.Close() - r, err := http.Get(s.URL) - if err != nil { - t.Fatal(err) - } - defer r.Body.Close() - body, err := ioutil.ReadAll(r.Body) - if err != nil { - t.Fatal(err) - } - - for _, re := range []*regexp.Regexp{ - regexp.MustCompile("process_cpu_seconds_total [0-9]"), - regexp.MustCompile("process_max_fds [0-9]{2,}"), - regexp.MustCompile("process_open_fds [1-9]"), - regexp.MustCompile("process_virtual_memory_bytes [1-9]"), - regexp.MustCompile("process_resident_memory_bytes [1-9]"), - regexp.MustCompile("process_start_time_seconds [0-9.]{10,}"), - regexp.MustCompile("foobar_process_cpu_seconds_total [0-9]"), - regexp.MustCompile("foobar_process_max_fds [0-9]{2,}"), - regexp.MustCompile("foobar_process_open_fds [1-9]"), - regexp.MustCompile("foobar_process_virtual_memory_bytes [1-9]"), - regexp.MustCompile("foobar_process_resident_memory_bytes [1-9]"), - regexp.MustCompile("foobar_process_start_time_seconds [0-9.]{10,}"), - } { - if !re.Match(body) { - t.Errorf("want body to match %s\n%s", re, body) - } - } -} diff --git a/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/push.go b/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/push.go deleted file mode 100644 index 1c33848a35..0000000000 --- a/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/push.go +++ /dev/null @@ -1,65 +0,0 @@ -// Copyright 2015 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Copyright (c) 2013, The Prometheus Authors -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be found -// in the LICENSE file. - -package prometheus - -// Push triggers a metric collection by the default registry and pushes all -// collected metrics to the Pushgateway specified by addr. See the Pushgateway -// documentation for detailed implications of the job and instance -// parameter. instance can be left empty. You can use just host:port or ip:port -// as url, in which case 'http://' is added automatically. You can also include -// the schema in the URL. However, do not include the '/metrics/jobs/...' part. -// -// Note that all previously pushed metrics with the same job and instance will -// be replaced with the metrics pushed by this call. (It uses HTTP method 'PUT' -// to push to the Pushgateway.) -func Push(job, instance, url string) error { - return defRegistry.Push(job, instance, url, "PUT") -} - -// PushAdd works like Push, but only previously pushed metrics with the same -// name (and the same job and instance) will be replaced. (It uses HTTP method -// 'POST' to push to the Pushgateway.) -func PushAdd(job, instance, url string) error { - return defRegistry.Push(job, instance, url, "POST") -} - -// PushCollectors works like Push, but it does not collect from the default -// registry. Instead, it collects from the provided collectors. It is a -// convenient way to push only a few metrics. -func PushCollectors(job, instance, url string, collectors ...Collector) error { - return pushCollectors(job, instance, url, "PUT", collectors...) -} - -// PushAddCollectors works like PushAdd, but it does not collect from the -// default registry. Instead, it collects from the provided collectors. It is a -// convenient way to push only a few metrics. -func PushAddCollectors(job, instance, url string, collectors ...Collector) error { - return pushCollectors(job, instance, url, "POST", collectors...) -} - -func pushCollectors(job, instance, url, method string, collectors ...Collector) error { - r := newRegistry() - for _, collector := range collectors { - if _, err := r.Register(collector); err != nil { - return err - } - } - return r.Push(job, instance, url, method) -} diff --git a/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/registry.go b/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/registry.go deleted file mode 100644 index 3223193a34..0000000000 --- a/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/registry.go +++ /dev/null @@ -1,760 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Copyright (c) 2013, The Prometheus Authors -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be found -// in the LICENSE file. - -package prometheus - -import ( - "bytes" - "compress/gzip" - "errors" - "fmt" - "hash/fnv" - "io" - "net/http" - "net/url" - "os" - "sort" - "strings" - "sync" - - "bitbucket.org/ww/goautoneg" - "github.com/golang/protobuf/proto" - - dto "github.com/prometheus/client_model/go" - - "github.com/prometheus/client_golang/model" - "github.com/prometheus/client_golang/text" -) - -var ( - defRegistry = newDefaultRegistry() - errAlreadyReg = errors.New("duplicate metrics collector registration attempted") -) - -// Constants relevant to the HTTP interface. -const ( - // APIVersion is the version of the format of the exported data. This - // will match this library's version, which subscribes to the Semantic - // Versioning scheme. - APIVersion = "0.0.4" - - // DelimitedTelemetryContentType is the content type set on telemetry - // data responses in delimited protobuf format. - DelimitedTelemetryContentType = `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=delimited` - // TextTelemetryContentType is the content type set on telemetry data - // responses in text format. - TextTelemetryContentType = `text/plain; version=` + APIVersion - // ProtoTextTelemetryContentType is the content type set on telemetry - // data responses in protobuf text format. (Only used for debugging.) - ProtoTextTelemetryContentType = `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=text` - // ProtoCompactTextTelemetryContentType is the content type set on - // telemetry data responses in protobuf compact text format. (Only used - // for debugging.) - ProtoCompactTextTelemetryContentType = `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=compact-text` - - // Constants for object pools. - numBufs = 4 - numMetricFamilies = 1000 - numMetrics = 10000 - - // Capacity for the channel to collect metrics and descriptors. - capMetricChan = 1000 - capDescChan = 10 - - contentTypeHeader = "Content-Type" - contentLengthHeader = "Content-Length" - contentEncodingHeader = "Content-Encoding" - - acceptEncodingHeader = "Accept-Encoding" - acceptHeader = "Accept" -) - -// Handler returns the HTTP handler for the global Prometheus registry. It is -// already instrumented with InstrumentHandler (using "prometheus" as handler -// name). Usually the handler is used to handle the "/metrics" endpoint. -func Handler() http.Handler { - return InstrumentHandler("prometheus", defRegistry) -} - -// UninstrumentedHandler works in the same way as Handler, but the returned HTTP -// handler is not instrumented. This is useful if no instrumentation is desired -// (for whatever reason) or if the instrumentation has to happen with a -// different handler name (or with a different instrumentation approach -// altogether). See the InstrumentHandler example. -func UninstrumentedHandler() http.Handler { - return defRegistry -} - -// Register registers a new Collector to be included in metrics collection. It -// returns an error if the descriptors provided by the Collector are invalid or -// if they - in combination with descriptors of already registered Collectors - -// do not fulfill the consistency and uniqueness criteria described in the Desc -// documentation. -// -// Do not register the same Collector multiple times concurrently. (Registering -// the same Collector twice would result in an error anyway, but on top of that, -// it is not safe to do so concurrently.) -func Register(m Collector) error { - _, err := defRegistry.Register(m) - return err -} - -// MustRegister works like Register but panics where Register would have -// returned an error. -func MustRegister(m Collector) { - err := Register(m) - if err != nil { - panic(err) - } -} - -// RegisterOrGet works like Register but does not return an error if a Collector -// is registered that equals a previously registered Collector. (Two Collectors -// are considered equal if their Describe method yields the same set of -// descriptors.) Instead, the previously registered Collector is returned (which -// is helpful if the new and previously registered Collectors are equal but not -// identical, i.e. not pointers to the same object). -// -// As for Register, it is still not safe to call RegisterOrGet with the same -// Collector multiple times concurrently. -func RegisterOrGet(m Collector) (Collector, error) { - return defRegistry.RegisterOrGet(m) -} - -// MustRegisterOrGet works like Register but panics where RegisterOrGet would -// have returned an error. -func MustRegisterOrGet(m Collector) Collector { - existing, err := RegisterOrGet(m) - if err != nil { - panic(err) - } - return existing -} - -// Unregister unregisters the Collector that equals the Collector passed in as -// an argument. (Two Collectors are considered equal if their Describe method -// yields the same set of descriptors.) The function returns whether a Collector -// was unregistered. -func Unregister(c Collector) bool { - return defRegistry.Unregister(c) -} - -// SetMetricFamilyInjectionHook sets a function that is called whenever metrics -// are collected. The hook function must be set before metrics collection begins -// (i.e. call SetMetricFamilyInjectionHook before setting the HTTP handler.) The -// MetricFamily protobufs returned by the hook function are merged with the -// metrics collected in the usual way. -// -// This is a way to directly inject MetricFamily protobufs managed and owned by -// the caller. The caller has full responsibility. As no registration of the -// injected metrics has happened, there is no descriptor to check against, and -// there are no registration-time checks. If collect-time checks are disabled -// (see function EnableCollectChecks), no sanity checks are performed on the -// returned protobufs at all. If collect-checks are enabled, type and uniqueness -// checks are performed, but no further consistency checks (which would require -// knowledge of a metric descriptor). -// -// Sorting concerns: The caller is responsible for sorting the label pairs in -// each metric. However, the order of metrics will be sorted by the registry as -// it is required anyway after merging with the metric families collected -// conventionally. -// -// The function must be callable at any time and concurrently. -func SetMetricFamilyInjectionHook(hook func() []*dto.MetricFamily) { - defRegistry.metricFamilyInjectionHook = hook -} - -// PanicOnCollectError sets the behavior whether a panic is caused upon an error -// while metrics are collected and served to the HTTP endpoint. By default, an -// internal server error (status code 500) is served with an error message. -func PanicOnCollectError(b bool) { - defRegistry.panicOnCollectError = b -} - -// EnableCollectChecks enables (or disables) additional consistency checks -// during metrics collection. These additional checks are not enabled by default -// because they inflict a performance penalty and the errors they check for can -// only happen if the used Metric and Collector types have internal programming -// errors. It can be helpful to enable these checks while working with custom -// Collectors or Metrics whose correctness is not well established yet. -func EnableCollectChecks(b bool) { - defRegistry.collectChecksEnabled = b -} - -// encoder is a function that writes a dto.MetricFamily to an io.Writer in a -// certain encoding. It returns the number of bytes written and any error -// encountered. Note that pbutil.WriteDelimited and pbutil.MetricFamilyToText -// are encoders. -type encoder func(io.Writer, *dto.MetricFamily) (int, error) - -type registry struct { - mtx sync.RWMutex - collectorsByID map[uint64]Collector // ID is a hash of the descIDs. - descIDs map[uint64]struct{} - dimHashesByName map[string]uint64 - bufPool chan *bytes.Buffer - metricFamilyPool chan *dto.MetricFamily - metricPool chan *dto.Metric - metricFamilyInjectionHook func() []*dto.MetricFamily - - panicOnCollectError, collectChecksEnabled bool -} - -func (r *registry) Register(c Collector) (Collector, error) { - descChan := make(chan *Desc, capDescChan) - go func() { - c.Describe(descChan) - close(descChan) - }() - - newDescIDs := map[uint64]struct{}{} - newDimHashesByName := map[string]uint64{} - var collectorID uint64 // Just a sum of all desc IDs. - var duplicateDescErr error - - r.mtx.Lock() - defer r.mtx.Unlock() - // Coduct various tests... - for desc := range descChan { - - // Is the descriptor valid at all? - if desc.err != nil { - return c, fmt.Errorf("descriptor %s is invalid: %s", desc, desc.err) - } - - // Is the descID unique? - // (In other words: Is the fqName + constLabel combination unique?) - if _, exists := r.descIDs[desc.id]; exists { - duplicateDescErr = fmt.Errorf("descriptor %s already exists with the same fully-qualified name and const label values", desc) - } - // If it is not a duplicate desc in this collector, add it to - // the collectorID. (We allow duplicate descs within the same - // collector, but their existence must be a no-op.) - if _, exists := newDescIDs[desc.id]; !exists { - newDescIDs[desc.id] = struct{}{} - collectorID += desc.id - } - - // Are all the label names and the help string consistent with - // previous descriptors of the same name? - // First check existing descriptors... - if dimHash, exists := r.dimHashesByName[desc.fqName]; exists { - if dimHash != desc.dimHash { - return nil, fmt.Errorf("a previously registered descriptor with the same fully-qualified name as %s has different label names or a different help string", desc) - } - } else { - // ...then check the new descriptors already seen. - if dimHash, exists := newDimHashesByName[desc.fqName]; exists { - if dimHash != desc.dimHash { - return nil, fmt.Errorf("descriptors reported by collector have inconsistent label names or help strings for the same fully-qualified name, offender is %s", desc) - } - } else { - newDimHashesByName[desc.fqName] = desc.dimHash - } - } - } - // Did anything happen at all? - if len(newDescIDs) == 0 { - return nil, errors.New("collector has no descriptors") - } - if existing, exists := r.collectorsByID[collectorID]; exists { - return existing, errAlreadyReg - } - // If the collectorID is new, but at least one of the descs existed - // before, we are in trouble. - if duplicateDescErr != nil { - return nil, duplicateDescErr - } - - // Only after all tests have passed, actually register. - r.collectorsByID[collectorID] = c - for hash := range newDescIDs { - r.descIDs[hash] = struct{}{} - } - for name, dimHash := range newDimHashesByName { - r.dimHashesByName[name] = dimHash - } - return c, nil -} - -func (r *registry) RegisterOrGet(m Collector) (Collector, error) { - existing, err := r.Register(m) - if err != nil && err != errAlreadyReg { - return nil, err - } - return existing, nil -} - -func (r *registry) Unregister(c Collector) bool { - descChan := make(chan *Desc, capDescChan) - go func() { - c.Describe(descChan) - close(descChan) - }() - - descIDs := map[uint64]struct{}{} - var collectorID uint64 // Just a sum of the desc IDs. - for desc := range descChan { - if _, exists := descIDs[desc.id]; !exists { - collectorID += desc.id - descIDs[desc.id] = struct{}{} - } - } - - r.mtx.RLock() - if _, exists := r.collectorsByID[collectorID]; !exists { - r.mtx.RUnlock() - return false - } - r.mtx.RUnlock() - - r.mtx.Lock() - defer r.mtx.Unlock() - - delete(r.collectorsByID, collectorID) - for id := range descIDs { - delete(r.descIDs, id) - } - // dimHashesByName is left untouched as those must be consistent - // throughout the lifetime of a program. - return true -} - -func (r *registry) Push(job, instance, pushURL, method string) error { - if !strings.Contains(pushURL, "://") { - pushURL = "http://" + pushURL - } - pushURL = fmt.Sprintf("%s/metrics/jobs/%s", pushURL, url.QueryEscape(job)) - if instance != "" { - pushURL += "/instances/" + url.QueryEscape(instance) - } - buf := r.getBuf() - defer r.giveBuf(buf) - if _, err := r.writePB(buf, text.WriteProtoDelimited); err != nil { - if r.panicOnCollectError { - panic(err) - } - return err - } - req, err := http.NewRequest(method, pushURL, buf) - if err != nil { - return err - } - req.Header.Set(contentTypeHeader, DelimitedTelemetryContentType) - resp, err := http.DefaultClient.Do(req) - if err != nil { - return err - } - defer resp.Body.Close() - if resp.StatusCode != 202 { - return fmt.Errorf("unexpected status code %d while pushing to %s", resp.StatusCode, pushURL) - } - return nil -} - -func (r *registry) ServeHTTP(w http.ResponseWriter, req *http.Request) { - enc, contentType := chooseEncoder(req) - buf := r.getBuf() - defer r.giveBuf(buf) - writer, encoding := decorateWriter(req, buf) - if _, err := r.writePB(writer, enc); err != nil { - if r.panicOnCollectError { - panic(err) - } - http.Error(w, "An error has occurred:\n\n"+err.Error(), http.StatusInternalServerError) - return - } - if closer, ok := writer.(io.Closer); ok { - closer.Close() - } - header := w.Header() - header.Set(contentTypeHeader, contentType) - header.Set(contentLengthHeader, fmt.Sprint(buf.Len())) - if encoding != "" { - header.Set(contentEncodingHeader, encoding) - } - w.Write(buf.Bytes()) -} - -func (r *registry) writePB(w io.Writer, writeEncoded encoder) (int, error) { - var metricHashes map[uint64]struct{} - if r.collectChecksEnabled { - metricHashes = make(map[uint64]struct{}) - } - metricChan := make(chan Metric, capMetricChan) - wg := sync.WaitGroup{} - - r.mtx.RLock() - metricFamiliesByName := make(map[string]*dto.MetricFamily, len(r.dimHashesByName)) - - // Scatter. - // (Collectors could be complex and slow, so we call them all at once.) - wg.Add(len(r.collectorsByID)) - go func() { - wg.Wait() - close(metricChan) - }() - for _, collector := range r.collectorsByID { - go func(collector Collector) { - defer wg.Done() - collector.Collect(metricChan) - }(collector) - } - r.mtx.RUnlock() - - // Drain metricChan in case of premature return. - defer func() { - for _ = range metricChan { - } - }() - - // Gather. - for metric := range metricChan { - // This could be done concurrently, too, but it required locking - // of metricFamiliesByName (and of metricHashes if checks are - // enabled). Most likely not worth it. - desc := metric.Desc() - metricFamily, ok := metricFamiliesByName[desc.fqName] - if !ok { - metricFamily = r.getMetricFamily() - defer r.giveMetricFamily(metricFamily) - metricFamily.Name = proto.String(desc.fqName) - metricFamily.Help = proto.String(desc.help) - metricFamiliesByName[desc.fqName] = metricFamily - } - dtoMetric := r.getMetric() - defer r.giveMetric(dtoMetric) - if err := metric.Write(dtoMetric); err != nil { - // TODO: Consider different means of error reporting so - // that a single erroneous metric could be skipped - // instead of blowing up the whole collection. - return 0, fmt.Errorf("error collecting metric %v: %s", desc, err) - } - switch { - case metricFamily.Type != nil: - // Type already set. We are good. - case dtoMetric.Gauge != nil: - metricFamily.Type = dto.MetricType_GAUGE.Enum() - case dtoMetric.Counter != nil: - metricFamily.Type = dto.MetricType_COUNTER.Enum() - case dtoMetric.Summary != nil: - metricFamily.Type = dto.MetricType_SUMMARY.Enum() - case dtoMetric.Untyped != nil: - metricFamily.Type = dto.MetricType_UNTYPED.Enum() - case dtoMetric.Histogram != nil: - metricFamily.Type = dto.MetricType_HISTOGRAM.Enum() - default: - return 0, fmt.Errorf("empty metric collected: %s", dtoMetric) - } - if r.collectChecksEnabled { - if err := r.checkConsistency(metricFamily, dtoMetric, desc, metricHashes); err != nil { - return 0, err - } - } - metricFamily.Metric = append(metricFamily.Metric, dtoMetric) - } - - if r.metricFamilyInjectionHook != nil { - for _, mf := range r.metricFamilyInjectionHook() { - existingMF, exists := metricFamiliesByName[mf.GetName()] - if !exists { - metricFamiliesByName[mf.GetName()] = mf - if r.collectChecksEnabled { - for _, m := range mf.Metric { - if err := r.checkConsistency(mf, m, nil, metricHashes); err != nil { - return 0, err - } - } - } - continue - } - for _, m := range mf.Metric { - if r.collectChecksEnabled { - if err := r.checkConsistency(existingMF, m, nil, metricHashes); err != nil { - return 0, err - } - } - existingMF.Metric = append(existingMF.Metric, m) - } - } - } - - // Now that MetricFamilies are all set, sort their Metrics - // lexicographically by their label values. - for _, mf := range metricFamiliesByName { - sort.Sort(metricSorter(mf.Metric)) - } - - // Write out MetricFamilies sorted by their name. - names := make([]string, 0, len(metricFamiliesByName)) - for name := range metricFamiliesByName { - names = append(names, name) - } - sort.Strings(names) - - var written int - for _, name := range names { - w, err := writeEncoded(w, metricFamiliesByName[name]) - written += w - if err != nil { - return written, err - } - } - return written, nil -} - -func (r *registry) checkConsistency(metricFamily *dto.MetricFamily, dtoMetric *dto.Metric, desc *Desc, metricHashes map[uint64]struct{}) error { - - // Type consistency with metric family. - if metricFamily.GetType() == dto.MetricType_GAUGE && dtoMetric.Gauge == nil || - metricFamily.GetType() == dto.MetricType_COUNTER && dtoMetric.Counter == nil || - metricFamily.GetType() == dto.MetricType_SUMMARY && dtoMetric.Summary == nil || - metricFamily.GetType() == dto.MetricType_HISTOGRAM && dtoMetric.Histogram == nil || - metricFamily.GetType() == dto.MetricType_UNTYPED && dtoMetric.Untyped == nil { - return fmt.Errorf( - "collected metric %s %s is not a %s", - metricFamily.GetName(), dtoMetric, metricFamily.GetType(), - ) - } - - // Is the metric unique (i.e. no other metric with the same name and the same label values)? - h := fnv.New64a() - var buf bytes.Buffer - buf.WriteString(metricFamily.GetName()) - buf.WriteByte(model.SeparatorByte) - h.Write(buf.Bytes()) - // Make sure label pairs are sorted. We depend on it for the consistency - // check. Label pairs must be sorted by contract. But the point of this - // method is to check for contract violations. So we better do the sort - // now. - sort.Sort(LabelPairSorter(dtoMetric.Label)) - for _, lp := range dtoMetric.Label { - buf.Reset() - buf.WriteString(lp.GetValue()) - buf.WriteByte(model.SeparatorByte) - h.Write(buf.Bytes()) - } - metricHash := h.Sum64() - if _, exists := metricHashes[metricHash]; exists { - return fmt.Errorf( - "collected metric %s %s was collected before with the same name and label values", - metricFamily.GetName(), dtoMetric, - ) - } - metricHashes[metricHash] = struct{}{} - - if desc == nil { - return nil // Nothing left to check if we have no desc. - } - - // Desc consistency with metric family. - if metricFamily.GetName() != desc.fqName { - return fmt.Errorf( - "collected metric %s %s has name %q but should have %q", - metricFamily.GetName(), dtoMetric, metricFamily.GetName(), desc.fqName, - ) - } - if metricFamily.GetHelp() != desc.help { - return fmt.Errorf( - "collected metric %s %s has help %q but should have %q", - metricFamily.GetName(), dtoMetric, metricFamily.GetHelp(), desc.help, - ) - } - - // Is the desc consistent with the content of the metric? - lpsFromDesc := make([]*dto.LabelPair, 0, len(dtoMetric.Label)) - lpsFromDesc = append(lpsFromDesc, desc.constLabelPairs...) - for _, l := range desc.variableLabels { - lpsFromDesc = append(lpsFromDesc, &dto.LabelPair{ - Name: proto.String(l), - }) - } - if len(lpsFromDesc) != len(dtoMetric.Label) { - return fmt.Errorf( - "labels in collected metric %s %s are inconsistent with descriptor %s", - metricFamily.GetName(), dtoMetric, desc, - ) - } - sort.Sort(LabelPairSorter(lpsFromDesc)) - for i, lpFromDesc := range lpsFromDesc { - lpFromMetric := dtoMetric.Label[i] - if lpFromDesc.GetName() != lpFromMetric.GetName() || - lpFromDesc.Value != nil && lpFromDesc.GetValue() != lpFromMetric.GetValue() { - return fmt.Errorf( - "labels in collected metric %s %s are inconsistent with descriptor %s", - metricFamily.GetName(), dtoMetric, desc, - ) - } - } - - r.mtx.RLock() // Remaining checks need the read lock. - defer r.mtx.RUnlock() - - // Is the desc registered? - if _, exist := r.descIDs[desc.id]; !exist { - return fmt.Errorf( - "collected metric %s %s with unregistered descriptor %s", - metricFamily.GetName(), dtoMetric, desc, - ) - } - - return nil -} - -func (r *registry) getBuf() *bytes.Buffer { - select { - case buf := <-r.bufPool: - return buf - default: - return &bytes.Buffer{} - } -} - -func (r *registry) giveBuf(buf *bytes.Buffer) { - buf.Reset() - select { - case r.bufPool <- buf: - default: - } -} - -func (r *registry) getMetricFamily() *dto.MetricFamily { - select { - case mf := <-r.metricFamilyPool: - return mf - default: - return &dto.MetricFamily{} - } -} - -func (r *registry) giveMetricFamily(mf *dto.MetricFamily) { - mf.Reset() - select { - case r.metricFamilyPool <- mf: - default: - } -} - -func (r *registry) getMetric() *dto.Metric { - select { - case m := <-r.metricPool: - return m - default: - return &dto.Metric{} - } -} - -func (r *registry) giveMetric(m *dto.Metric) { - m.Reset() - select { - case r.metricPool <- m: - default: - } -} - -func newRegistry() *registry { - return ®istry{ - collectorsByID: map[uint64]Collector{}, - descIDs: map[uint64]struct{}{}, - dimHashesByName: map[string]uint64{}, - bufPool: make(chan *bytes.Buffer, numBufs), - metricFamilyPool: make(chan *dto.MetricFamily, numMetricFamilies), - metricPool: make(chan *dto.Metric, numMetrics), - } -} - -func newDefaultRegistry() *registry { - r := newRegistry() - r.Register(NewProcessCollector(os.Getpid(), "")) - r.Register(NewGoCollector()) - return r -} - -func chooseEncoder(req *http.Request) (encoder, string) { - accepts := goautoneg.ParseAccept(req.Header.Get(acceptHeader)) - for _, accept := range accepts { - switch { - case accept.Type == "application" && - accept.SubType == "vnd.google.protobuf" && - accept.Params["proto"] == "io.prometheus.client.MetricFamily": - switch accept.Params["encoding"] { - case "delimited": - return text.WriteProtoDelimited, DelimitedTelemetryContentType - case "text": - return text.WriteProtoText, ProtoTextTelemetryContentType - case "compact-text": - return text.WriteProtoCompactText, ProtoCompactTextTelemetryContentType - default: - continue - } - case accept.Type == "text" && - accept.SubType == "plain" && - (accept.Params["version"] == "0.0.4" || accept.Params["version"] == ""): - return text.MetricFamilyToText, TextTelemetryContentType - default: - continue - } - } - return text.MetricFamilyToText, TextTelemetryContentType -} - -// decorateWriter wraps a writer to handle gzip compression if requested. It -// returns the decorated writer and the appropriate "Content-Encoding" header -// (which is empty if no compression is enabled). -func decorateWriter(request *http.Request, writer io.Writer) (io.Writer, string) { - header := request.Header.Get(acceptEncodingHeader) - parts := strings.Split(header, ",") - for _, part := range parts { - part := strings.TrimSpace(part) - if part == "gzip" || strings.HasPrefix(part, "gzip;") { - return gzip.NewWriter(writer), "gzip" - } - } - return writer, "" -} - -type metricSorter []*dto.Metric - -func (s metricSorter) Len() int { - return len(s) -} - -func (s metricSorter) Swap(i, j int) { - s[i], s[j] = s[j], s[i] -} - -func (s metricSorter) Less(i, j int) bool { - if len(s[i].Label) != len(s[j].Label) { - // This should not happen. The metrics are - // inconsistent. However, we have to deal with the fact, as - // people might use custom collectors or metric family injection - // to create inconsistent metrics. So let's simply compare the - // number of labels in this case. That will still yield - // reproducible sorting. - return len(s[i].Label) < len(s[j].Label) - } - for n, lp := range s[i].Label { - vi := lp.GetValue() - vj := s[j].Label[n].GetValue() - if vi != vj { - return vi < vj - } - } - return true -} diff --git a/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/registry_test.go b/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/registry_test.go deleted file mode 100644 index f30c90c06b..0000000000 --- a/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/registry_test.go +++ /dev/null @@ -1,535 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Copyright (c) 2013, The Prometheus Authors -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be found -// in the LICENSE file. - -package prometheus - -import ( - "bytes" - "encoding/binary" - "net/http" - "testing" - - "github.com/golang/protobuf/proto" - dto "github.com/prometheus/client_model/go" -) - -type fakeResponseWriter struct { - header http.Header - body bytes.Buffer -} - -func (r *fakeResponseWriter) Header() http.Header { - return r.header -} - -func (r *fakeResponseWriter) Write(d []byte) (l int, err error) { - return r.body.Write(d) -} - -func (r *fakeResponseWriter) WriteHeader(c int) { -} - -func testHandler(t testing.TB) { - - metricVec := NewCounterVec( - CounterOpts{ - Name: "name", - Help: "docstring", - ConstLabels: Labels{"constname": "constvalue"}, - }, - []string{"labelname"}, - ) - - metricVec.WithLabelValues("val1").Inc() - metricVec.WithLabelValues("val2").Inc() - - varintBuf := make([]byte, binary.MaxVarintLen32) - - externalMetricFamily := &dto.MetricFamily{ - Name: proto.String("externalname"), - Help: proto.String("externaldocstring"), - Type: dto.MetricType_COUNTER.Enum(), - Metric: []*dto.Metric{ - { - Label: []*dto.LabelPair{ - { - Name: proto.String("externalconstname"), - Value: proto.String("externalconstvalue"), - }, - { - Name: proto.String("externallabelname"), - Value: proto.String("externalval1"), - }, - }, - Counter: &dto.Counter{ - Value: proto.Float64(1), - }, - }, - }, - } - marshaledExternalMetricFamily, err := proto.Marshal(externalMetricFamily) - if err != nil { - t.Fatal(err) - } - var externalBuf bytes.Buffer - l := binary.PutUvarint(varintBuf, uint64(len(marshaledExternalMetricFamily))) - _, err = externalBuf.Write(varintBuf[:l]) - if err != nil { - t.Fatal(err) - } - _, err = externalBuf.Write(marshaledExternalMetricFamily) - if err != nil { - t.Fatal(err) - } - externalMetricFamilyAsBytes := externalBuf.Bytes() - externalMetricFamilyAsText := []byte(`# HELP externalname externaldocstring -# TYPE externalname counter -externalname{externalconstname="externalconstvalue",externallabelname="externalval1"} 1 -`) - externalMetricFamilyAsProtoText := []byte(`name: "externalname" -help: "externaldocstring" -type: COUNTER -metric: < - label: < - name: "externalconstname" - value: "externalconstvalue" - > - label: < - name: "externallabelname" - value: "externalval1" - > - counter: < - value: 1 - > -> - -`) - externalMetricFamilyAsProtoCompactText := []byte(`name:"externalname" help:"externaldocstring" type:COUNTER metric: label: counter: > -`) - - expectedMetricFamily := &dto.MetricFamily{ - Name: proto.String("name"), - Help: proto.String("docstring"), - Type: dto.MetricType_COUNTER.Enum(), - Metric: []*dto.Metric{ - { - Label: []*dto.LabelPair{ - { - Name: proto.String("constname"), - Value: proto.String("constvalue"), - }, - { - Name: proto.String("labelname"), - Value: proto.String("val1"), - }, - }, - Counter: &dto.Counter{ - Value: proto.Float64(1), - }, - }, - { - Label: []*dto.LabelPair{ - { - Name: proto.String("constname"), - Value: proto.String("constvalue"), - }, - { - Name: proto.String("labelname"), - Value: proto.String("val2"), - }, - }, - Counter: &dto.Counter{ - Value: proto.Float64(1), - }, - }, - }, - } - marshaledExpectedMetricFamily, err := proto.Marshal(expectedMetricFamily) - if err != nil { - t.Fatal(err) - } - var buf bytes.Buffer - l = binary.PutUvarint(varintBuf, uint64(len(marshaledExpectedMetricFamily))) - _, err = buf.Write(varintBuf[:l]) - if err != nil { - t.Fatal(err) - } - _, err = buf.Write(marshaledExpectedMetricFamily) - if err != nil { - t.Fatal(err) - } - expectedMetricFamilyAsBytes := buf.Bytes() - expectedMetricFamilyAsText := []byte(`# HELP name docstring -# TYPE name counter -name{constname="constvalue",labelname="val1"} 1 -name{constname="constvalue",labelname="val2"} 1 -`) - expectedMetricFamilyAsProtoText := []byte(`name: "name" -help: "docstring" -type: COUNTER -metric: < - label: < - name: "constname" - value: "constvalue" - > - label: < - name: "labelname" - value: "val1" - > - counter: < - value: 1 - > -> -metric: < - label: < - name: "constname" - value: "constvalue" - > - label: < - name: "labelname" - value: "val2" - > - counter: < - value: 1 - > -> - -`) - expectedMetricFamilyAsProtoCompactText := []byte(`name:"name" help:"docstring" type:COUNTER metric: label: counter: > metric: label: counter: > -`) - - externalMetricFamilyWithSameName := &dto.MetricFamily{ - Name: proto.String("name"), - Help: proto.String("inconsistent help string does not matter here"), - Type: dto.MetricType_COUNTER.Enum(), - Metric: []*dto.Metric{ - { - Label: []*dto.LabelPair{ - { - Name: proto.String("constname"), - Value: proto.String("constvalue"), - }, - { - Name: proto.String("labelname"), - Value: proto.String("different_val"), - }, - }, - Counter: &dto.Counter{ - Value: proto.Float64(42), - }, - }, - }, - } - - expectedMetricFamilyMergedWithExternalAsProtoCompactText := []byte(`name:"name" help:"docstring" type:COUNTER metric: label: counter: > metric: label: counter: > metric: label: counter: > -`) - - type output struct { - headers map[string]string - body []byte - } - - var scenarios = []struct { - headers map[string]string - out output - collector Collector - externalMF []*dto.MetricFamily - }{ - { // 0 - headers: map[string]string{ - "Accept": "foo/bar;q=0.2, dings/bums;q=0.8", - }, - out: output{ - headers: map[string]string{ - "Content-Type": `text/plain; version=0.0.4`, - }, - body: []byte{}, - }, - }, - { // 1 - headers: map[string]string{ - "Accept": "foo/bar;q=0.2, application/quark;q=0.8", - }, - out: output{ - headers: map[string]string{ - "Content-Type": `text/plain; version=0.0.4`, - }, - body: []byte{}, - }, - }, - { // 2 - headers: map[string]string{ - "Accept": "foo/bar;q=0.2, application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=bla;q=0.8", - }, - out: output{ - headers: map[string]string{ - "Content-Type": `text/plain; version=0.0.4`, - }, - body: []byte{}, - }, - }, - { // 3 - headers: map[string]string{ - "Accept": "text/plain;q=0.2, application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=delimited;q=0.8", - }, - out: output{ - headers: map[string]string{ - "Content-Type": `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=delimited`, - }, - body: []byte{}, - }, - }, - { // 4 - headers: map[string]string{ - "Accept": "application/json", - }, - out: output{ - headers: map[string]string{ - "Content-Type": `text/plain; version=0.0.4`, - }, - body: expectedMetricFamilyAsText, - }, - collector: metricVec, - }, - { // 5 - headers: map[string]string{ - "Accept": "application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=delimited", - }, - out: output{ - headers: map[string]string{ - "Content-Type": `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=delimited`, - }, - body: expectedMetricFamilyAsBytes, - }, - collector: metricVec, - }, - { // 6 - headers: map[string]string{ - "Accept": "application/json", - }, - out: output{ - headers: map[string]string{ - "Content-Type": `text/plain; version=0.0.4`, - }, - body: externalMetricFamilyAsText, - }, - externalMF: []*dto.MetricFamily{externalMetricFamily}, - }, - { // 7 - headers: map[string]string{ - "Accept": "application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=delimited", - }, - out: output{ - headers: map[string]string{ - "Content-Type": `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=delimited`, - }, - body: externalMetricFamilyAsBytes, - }, - externalMF: []*dto.MetricFamily{externalMetricFamily}, - }, - { // 8 - headers: map[string]string{ - "Accept": "application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=delimited", - }, - out: output{ - headers: map[string]string{ - "Content-Type": `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=delimited`, - }, - body: bytes.Join( - [][]byte{ - externalMetricFamilyAsBytes, - expectedMetricFamilyAsBytes, - }, - []byte{}, - ), - }, - collector: metricVec, - externalMF: []*dto.MetricFamily{externalMetricFamily}, - }, - { // 9 - headers: map[string]string{ - "Accept": "text/plain", - }, - out: output{ - headers: map[string]string{ - "Content-Type": `text/plain; version=0.0.4`, - }, - body: []byte{}, - }, - }, - { // 10 - headers: map[string]string{ - "Accept": "application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=bla;q=0.2, text/plain;q=0.5", - }, - out: output{ - headers: map[string]string{ - "Content-Type": `text/plain; version=0.0.4`, - }, - body: expectedMetricFamilyAsText, - }, - collector: metricVec, - }, - { // 11 - headers: map[string]string{ - "Accept": "application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=bla;q=0.2, text/plain;q=0.5;version=0.0.4", - }, - out: output{ - headers: map[string]string{ - "Content-Type": `text/plain; version=0.0.4`, - }, - body: bytes.Join( - [][]byte{ - externalMetricFamilyAsText, - expectedMetricFamilyAsText, - }, - []byte{}, - ), - }, - collector: metricVec, - externalMF: []*dto.MetricFamily{externalMetricFamily}, - }, - { // 12 - headers: map[string]string{ - "Accept": "application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=delimited;q=0.2, text/plain;q=0.5;version=0.0.2", - }, - out: output{ - headers: map[string]string{ - "Content-Type": `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=delimited`, - }, - body: bytes.Join( - [][]byte{ - externalMetricFamilyAsBytes, - expectedMetricFamilyAsBytes, - }, - []byte{}, - ), - }, - collector: metricVec, - externalMF: []*dto.MetricFamily{externalMetricFamily}, - }, - { // 13 - headers: map[string]string{ - "Accept": "application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=text;q=0.5, application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=delimited;q=0.4", - }, - out: output{ - headers: map[string]string{ - "Content-Type": `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=text`, - }, - body: bytes.Join( - [][]byte{ - externalMetricFamilyAsProtoText, - expectedMetricFamilyAsProtoText, - }, - []byte{}, - ), - }, - collector: metricVec, - externalMF: []*dto.MetricFamily{externalMetricFamily}, - }, - { // 14 - headers: map[string]string{ - "Accept": "application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=compact-text", - }, - out: output{ - headers: map[string]string{ - "Content-Type": `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=compact-text`, - }, - body: bytes.Join( - [][]byte{ - externalMetricFamilyAsProtoCompactText, - expectedMetricFamilyAsProtoCompactText, - }, - []byte{}, - ), - }, - collector: metricVec, - externalMF: []*dto.MetricFamily{externalMetricFamily}, - }, - { // 15 - headers: map[string]string{ - "Accept": "application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=compact-text", - }, - out: output{ - headers: map[string]string{ - "Content-Type": `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=compact-text`, - }, - body: bytes.Join( - [][]byte{ - externalMetricFamilyAsProtoCompactText, - expectedMetricFamilyMergedWithExternalAsProtoCompactText, - }, - []byte{}, - ), - }, - collector: metricVec, - externalMF: []*dto.MetricFamily{ - externalMetricFamily, - externalMetricFamilyWithSameName, - }, - }, - } - for i, scenario := range scenarios { - registry := newRegistry() - registry.collectChecksEnabled = true - - if scenario.collector != nil { - registry.Register(scenario.collector) - } - if scenario.externalMF != nil { - registry.metricFamilyInjectionHook = func() []*dto.MetricFamily { - return scenario.externalMF - } - } - writer := &fakeResponseWriter{ - header: http.Header{}, - } - handler := InstrumentHandler("prometheus", registry) - request, _ := http.NewRequest("GET", "/", nil) - for key, value := range scenario.headers { - request.Header.Add(key, value) - } - handler(writer, request) - - for key, value := range scenario.out.headers { - if writer.Header().Get(key) != value { - t.Errorf( - "%d. expected %q for header %q, got %q", - i, value, key, writer.Header().Get(key), - ) - } - } - - if !bytes.Equal(scenario.out.body, writer.body.Bytes()) { - t.Errorf( - "%d. expected %q for body, got %q", - i, scenario.out.body, writer.body.Bytes(), - ) - } - } -} - -func TestHandler(t *testing.T) { - testHandler(t) -} - -func BenchmarkHandler(b *testing.B) { - for i := 0; i < b.N; i++ { - testHandler(b) - } -} diff --git a/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/summary.go b/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/summary.go deleted file mode 100644 index 67fe43cd74..0000000000 --- a/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/summary.go +++ /dev/null @@ -1,536 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -import ( - "fmt" - "hash/fnv" - "math" - "sort" - "sync" - "time" - - "github.com/beorn7/perks/quantile" - "github.com/golang/protobuf/proto" - - dto "github.com/prometheus/client_model/go" - - "github.com/prometheus/client_golang/model" -) - -// A Summary captures individual observations from an event or sample stream and -// summarizes them in a manner similar to traditional summary statistics: 1. sum -// of observations, 2. observation count, 3. rank estimations. -// -// A typical use-case is the observation of request latencies. By default, a -// Summary provides the median, the 90th and the 99th percentile of the latency -// as rank estimations. -// -// Note that the rank estimations cannot be aggregated in a meaningful way with -// the Prometheus query language (i.e. you cannot average or add them). If you -// need aggregatable quantiles (e.g. you want the 99th percentile latency of all -// queries served across all instances of a service), consider the Histogram -// metric type. See the Prometheus documentation for more details. -// -// To create Summary instances, use NewSummary. -type Summary interface { - Metric - Collector - - // Observe adds a single observation to the summary. - Observe(float64) -} - -var ( - // DefObjectives are the default Summary quantile values. - DefObjectives = map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001} - - errQuantileLabelNotAllowed = fmt.Errorf( - "%q is not allowed as label name in summaries", model.QuantileLabel, - ) -) - -// Default values for SummaryOpts. -const ( - // DefMaxAge is the default duration for which observations stay - // relevant. - DefMaxAge time.Duration = 10 * time.Minute - // DefAgeBuckets is the default number of buckets used to calculate the - // age of observations. - DefAgeBuckets = 5 - // DefBufCap is the standard buffer size for collecting Summary observations. - DefBufCap = 500 -) - -// SummaryOpts bundles the options for creating a Summary metric. It is -// mandatory to set Name and Help to a non-empty string. All other fields are -// optional and can safely be left at their zero value. -type SummaryOpts struct { - // Namespace, Subsystem, and Name are components of the fully-qualified - // name of the Summary (created by joining these components with - // "_"). Only Name is mandatory, the others merely help structuring the - // name. Note that the fully-qualified name of the Summary must be a - // valid Prometheus metric name. - Namespace string - Subsystem string - Name string - - // Help provides information about this Summary. Mandatory! - // - // Metrics with the same fully-qualified name must have the same Help - // string. - Help string - - // ConstLabels are used to attach fixed labels to this - // Summary. Summaries with the same fully-qualified name must have the - // same label names in their ConstLabels. - // - // Note that in most cases, labels have a value that varies during the - // lifetime of a process. Those labels are usually managed with a - // SummaryVec. ConstLabels serve only special purposes. One is for the - // special case where the value of a label does not change during the - // lifetime of a process, e.g. if the revision of the running binary is - // put into a label. Another, more advanced purpose is if more than one - // Collector needs to collect Summaries with the same fully-qualified - // name. In that case, those Summaries must differ in the values of - // their ConstLabels. See the Collector examples. - // - // If the value of a label never changes (not even between binaries), - // that label most likely should not be a label at all (but part of the - // metric name). - ConstLabels Labels - - // Objectives defines the quantile rank estimates with their respective - // absolute error. The default value is DefObjectives. - Objectives map[float64]float64 - - // MaxAge defines the duration for which an observation stays relevant - // for the summary. Must be positive. The default value is DefMaxAge. - MaxAge time.Duration - - // AgeBuckets is the number of buckets used to exclude observations that - // are older than MaxAge from the summary. A higher number has a - // resource penalty, so only increase it if the higher resolution is - // really required. For very high observation rates, you might want to - // reduce the number of age buckets. With only one age bucket, you will - // effectively see a complete reset of the summary each time MaxAge has - // passed. The default value is DefAgeBuckets. - AgeBuckets uint32 - - // BufCap defines the default sample stream buffer size. The default - // value of DefBufCap should suffice for most uses. If there is a need - // to increase the value, a multiple of 500 is recommended (because that - // is the internal buffer size of the underlying package - // "github.com/bmizerany/perks/quantile"). - BufCap uint32 -} - -// TODO: Great fuck-up with the sliding-window decay algorithm... The Merge -// method of perk/quantile is actually not working as advertised - and it might -// be unfixable, as the underlying algorithm is apparently not capable of -// merging summaries in the first place. To avoid using Merge, we are currently -// adding observations to _each_ age bucket, i.e. the effort to add a sample is -// essentially multiplied by the number of age buckets. When rotating age -// buckets, we empty the previous head stream. On scrape time, we simply take -// the quantiles from the head stream (no merging required). Result: More effort -// on observation time, less effort on scrape time, which is exactly the -// opposite of what we try to accomplish, but at least the results are correct. -// -// The quite elegant previous contraption to merge the age buckets efficiently -// on scrape time (see code up commit 6b9530d72ea715f0ba612c0120e6e09fbf1d49d0) -// can't be used anymore. - -// NewSummary creates a new Summary based on the provided SummaryOpts. -func NewSummary(opts SummaryOpts) Summary { - return newSummary( - NewDesc( - BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), - opts.Help, - nil, - opts.ConstLabels, - ), - opts, - ) -} - -func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary { - if len(desc.variableLabels) != len(labelValues) { - panic(errInconsistentCardinality) - } - - for _, n := range desc.variableLabels { - if n == model.QuantileLabel { - panic(errQuantileLabelNotAllowed) - } - } - for _, lp := range desc.constLabelPairs { - if lp.GetName() == model.QuantileLabel { - panic(errQuantileLabelNotAllowed) - } - } - - if len(opts.Objectives) == 0 { - opts.Objectives = DefObjectives - } - - if opts.MaxAge < 0 { - panic(fmt.Errorf("illegal max age MaxAge=%v", opts.MaxAge)) - } - if opts.MaxAge == 0 { - opts.MaxAge = DefMaxAge - } - - if opts.AgeBuckets == 0 { - opts.AgeBuckets = DefAgeBuckets - } - - if opts.BufCap == 0 { - opts.BufCap = DefBufCap - } - - s := &summary{ - desc: desc, - - objectives: opts.Objectives, - sortedObjectives: make([]float64, 0, len(opts.Objectives)), - - labelPairs: makeLabelPairs(desc, labelValues), - - hotBuf: make([]float64, 0, opts.BufCap), - coldBuf: make([]float64, 0, opts.BufCap), - streamDuration: opts.MaxAge / time.Duration(opts.AgeBuckets), - } - s.headStreamExpTime = time.Now().Add(s.streamDuration) - s.hotBufExpTime = s.headStreamExpTime - - for i := uint32(0); i < opts.AgeBuckets; i++ { - s.streams = append(s.streams, s.newStream()) - } - s.headStream = s.streams[0] - - for qu := range s.objectives { - s.sortedObjectives = append(s.sortedObjectives, qu) - } - sort.Float64s(s.sortedObjectives) - - s.Init(s) // Init self-collection. - return s -} - -type summary struct { - SelfCollector - - bufMtx sync.Mutex // Protects hotBuf and hotBufExpTime. - mtx sync.Mutex // Protects every other moving part. - // Lock bufMtx before mtx if both are needed. - - desc *Desc - - objectives map[float64]float64 - sortedObjectives []float64 - - labelPairs []*dto.LabelPair - - sum float64 - cnt uint64 - - hotBuf, coldBuf []float64 - - streams []*quantile.Stream - streamDuration time.Duration - headStream *quantile.Stream - headStreamIdx int - headStreamExpTime, hotBufExpTime time.Time -} - -func (s *summary) Desc() *Desc { - return s.desc -} - -func (s *summary) Observe(v float64) { - s.bufMtx.Lock() - defer s.bufMtx.Unlock() - - now := time.Now() - if now.After(s.hotBufExpTime) { - s.asyncFlush(now) - } - s.hotBuf = append(s.hotBuf, v) - if len(s.hotBuf) == cap(s.hotBuf) { - s.asyncFlush(now) - } -} - -func (s *summary) Write(out *dto.Metric) error { - sum := &dto.Summary{} - qs := make([]*dto.Quantile, 0, len(s.objectives)) - - s.bufMtx.Lock() - s.mtx.Lock() - // Swap bufs even if hotBuf is empty to set new hotBufExpTime. - s.swapBufs(time.Now()) - s.bufMtx.Unlock() - - s.flushColdBuf() - sum.SampleCount = proto.Uint64(s.cnt) - sum.SampleSum = proto.Float64(s.sum) - - for _, rank := range s.sortedObjectives { - var q float64 - if s.headStream.Count() == 0 { - q = math.NaN() - } else { - q = s.headStream.Query(rank) - } - qs = append(qs, &dto.Quantile{ - Quantile: proto.Float64(rank), - Value: proto.Float64(q), - }) - } - - s.mtx.Unlock() - - if len(qs) > 0 { - sort.Sort(quantSort(qs)) - } - sum.Quantile = qs - - out.Summary = sum - out.Label = s.labelPairs - return nil -} - -func (s *summary) newStream() *quantile.Stream { - return quantile.NewTargeted(s.objectives) -} - -// asyncFlush needs bufMtx locked. -func (s *summary) asyncFlush(now time.Time) { - s.mtx.Lock() - s.swapBufs(now) - - // Unblock the original goroutine that was responsible for the mutation - // that triggered the compaction. But hold onto the global non-buffer - // state mutex until the operation finishes. - go func() { - s.flushColdBuf() - s.mtx.Unlock() - }() -} - -// rotateStreams needs mtx AND bufMtx locked. -func (s *summary) maybeRotateStreams() { - for !s.hotBufExpTime.Equal(s.headStreamExpTime) { - s.headStream.Reset() - s.headStreamIdx++ - if s.headStreamIdx >= len(s.streams) { - s.headStreamIdx = 0 - } - s.headStream = s.streams[s.headStreamIdx] - s.headStreamExpTime = s.headStreamExpTime.Add(s.streamDuration) - } -} - -// flushColdBuf needs mtx locked. -func (s *summary) flushColdBuf() { - for _, v := range s.coldBuf { - for _, stream := range s.streams { - stream.Insert(v) - } - s.cnt++ - s.sum += v - } - s.coldBuf = s.coldBuf[0:0] - s.maybeRotateStreams() -} - -// swapBufs needs mtx AND bufMtx locked, coldBuf must be empty. -func (s *summary) swapBufs(now time.Time) { - if len(s.coldBuf) != 0 { - panic("coldBuf is not empty") - } - s.hotBuf, s.coldBuf = s.coldBuf, s.hotBuf - // hotBuf is now empty and gets new expiration set. - for now.After(s.hotBufExpTime) { - s.hotBufExpTime = s.hotBufExpTime.Add(s.streamDuration) - } -} - -type quantSort []*dto.Quantile - -func (s quantSort) Len() int { - return len(s) -} - -func (s quantSort) Swap(i, j int) { - s[i], s[j] = s[j], s[i] -} - -func (s quantSort) Less(i, j int) bool { - return s[i].GetQuantile() < s[j].GetQuantile() -} - -// SummaryVec is a Collector that bundles a set of Summaries that all share the -// same Desc, but have different values for their variable labels. This is used -// if you want to count the same thing partitioned by various dimensions -// (e.g. HTTP request latencies, partitioned by status code and method). Create -// instances with NewSummaryVec. -type SummaryVec struct { - MetricVec -} - -// NewSummaryVec creates a new SummaryVec based on the provided SummaryOpts and -// partitioned by the given label names. At least one label name must be -// provided. -func NewSummaryVec(opts SummaryOpts, labelNames []string) *SummaryVec { - desc := NewDesc( - BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), - opts.Help, - labelNames, - opts.ConstLabels, - ) - return &SummaryVec{ - MetricVec: MetricVec{ - children: map[uint64]Metric{}, - desc: desc, - hash: fnv.New64a(), - newMetric: func(lvs ...string) Metric { - return newSummary(desc, opts, lvs...) - }, - }, - } -} - -// GetMetricWithLabelValues replaces the method of the same name in -// MetricVec. The difference is that this method returns a Summary and not a -// Metric so that no type conversion is required. -func (m *SummaryVec) GetMetricWithLabelValues(lvs ...string) (Summary, error) { - metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...) - if metric != nil { - return metric.(Summary), err - } - return nil, err -} - -// GetMetricWith replaces the method of the same name in MetricVec. The -// difference is that this method returns a Summary and not a Metric so that no -// type conversion is required. -func (m *SummaryVec) GetMetricWith(labels Labels) (Summary, error) { - metric, err := m.MetricVec.GetMetricWith(labels) - if metric != nil { - return metric.(Summary), err - } - return nil, err -} - -// WithLabelValues works as GetMetricWithLabelValues, but panics where -// GetMetricWithLabelValues would have returned an error. By not returning an -// error, WithLabelValues allows shortcuts like -// myVec.WithLabelValues("404", "GET").Observe(42.21) -func (m *SummaryVec) WithLabelValues(lvs ...string) Summary { - return m.MetricVec.WithLabelValues(lvs...).(Summary) -} - -// With works as GetMetricWith, but panics where GetMetricWithLabels would have -// returned an error. By not returning an error, With allows shortcuts like -// myVec.With(Labels{"code": "404", "method": "GET"}).Observe(42.21) -func (m *SummaryVec) With(labels Labels) Summary { - return m.MetricVec.With(labels).(Summary) -} - -type constSummary struct { - desc *Desc - count uint64 - sum float64 - quantiles map[float64]float64 - labelPairs []*dto.LabelPair -} - -func (s *constSummary) Desc() *Desc { - return s.desc -} - -func (s *constSummary) Write(out *dto.Metric) error { - sum := &dto.Summary{} - qs := make([]*dto.Quantile, 0, len(s.quantiles)) - - sum.SampleCount = proto.Uint64(s.count) - sum.SampleSum = proto.Float64(s.sum) - - for rank, q := range s.quantiles { - qs = append(qs, &dto.Quantile{ - Quantile: proto.Float64(rank), - Value: proto.Float64(q), - }) - } - - if len(qs) > 0 { - sort.Sort(quantSort(qs)) - } - sum.Quantile = qs - - out.Summary = sum - out.Label = s.labelPairs - - return nil -} - -// NewConstSummary returns a metric representing a Prometheus summary with fixed -// values for the count, sum, and quantiles. As those parameters cannot be -// changed, the returned value does not implement the Summary interface (but -// only the Metric interface). Users of this package will not have much use for -// it in regular operations. However, when implementing custom Collectors, it is -// useful as a throw-away metric that is generated on the fly to send it to -// Prometheus in the Collect method. -// -// quantiles maps ranks to quantile values. For example, a median latency of -// 0.23s and a 99th percentile latency of 0.56s would be expressed as: -// map[float64]float64{0.5: 0.23, 0.99: 0.56} -// -// NewConstSummary returns an error if the length of labelValues is not -// consistent with the variable labels in Desc. -func NewConstSummary( - desc *Desc, - count uint64, - sum float64, - quantiles map[float64]float64, - labelValues ...string, -) (Metric, error) { - if len(desc.variableLabels) != len(labelValues) { - return nil, errInconsistentCardinality - } - return &constSummary{ - desc: desc, - count: count, - sum: sum, - quantiles: quantiles, - labelPairs: makeLabelPairs(desc, labelValues), - }, nil -} - -// MustNewConstSummary is a version of NewConstSummary that panics where -// NewConstMetric would have returned an error. -func MustNewConstSummary( - desc *Desc, - count uint64, - sum float64, - quantiles map[float64]float64, - labelValues ...string, -) Metric { - m, err := NewConstSummary(desc, count, sum, quantiles, labelValues...) - if err != nil { - panic(err) - } - return m -} diff --git a/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/summary_test.go b/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/summary_test.go deleted file mode 100644 index 0790cdfe72..0000000000 --- a/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/summary_test.go +++ /dev/null @@ -1,347 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -import ( - "math" - "math/rand" - "sort" - "sync" - "testing" - "testing/quick" - "time" - - dto "github.com/prometheus/client_model/go" -) - -func benchmarkSummaryObserve(w int, b *testing.B) { - b.StopTimer() - - wg := new(sync.WaitGroup) - wg.Add(w) - - g := new(sync.WaitGroup) - g.Add(1) - - s := NewSummary(SummaryOpts{}) - - for i := 0; i < w; i++ { - go func() { - g.Wait() - - for i := 0; i < b.N; i++ { - s.Observe(float64(i)) - } - - wg.Done() - }() - } - - b.StartTimer() - g.Done() - wg.Wait() -} - -func BenchmarkSummaryObserve1(b *testing.B) { - benchmarkSummaryObserve(1, b) -} - -func BenchmarkSummaryObserve2(b *testing.B) { - benchmarkSummaryObserve(2, b) -} - -func BenchmarkSummaryObserve4(b *testing.B) { - benchmarkSummaryObserve(4, b) -} - -func BenchmarkSummaryObserve8(b *testing.B) { - benchmarkSummaryObserve(8, b) -} - -func benchmarkSummaryWrite(w int, b *testing.B) { - b.StopTimer() - - wg := new(sync.WaitGroup) - wg.Add(w) - - g := new(sync.WaitGroup) - g.Add(1) - - s := NewSummary(SummaryOpts{}) - - for i := 0; i < 1000000; i++ { - s.Observe(float64(i)) - } - - for j := 0; j < w; j++ { - outs := make([]dto.Metric, b.N) - - go func(o []dto.Metric) { - g.Wait() - - for i := 0; i < b.N; i++ { - s.Write(&o[i]) - } - - wg.Done() - }(outs) - } - - b.StartTimer() - g.Done() - wg.Wait() -} - -func BenchmarkSummaryWrite1(b *testing.B) { - benchmarkSummaryWrite(1, b) -} - -func BenchmarkSummaryWrite2(b *testing.B) { - benchmarkSummaryWrite(2, b) -} - -func BenchmarkSummaryWrite4(b *testing.B) { - benchmarkSummaryWrite(4, b) -} - -func BenchmarkSummaryWrite8(b *testing.B) { - benchmarkSummaryWrite(8, b) -} - -func TestSummaryConcurrency(t *testing.T) { - if testing.Short() { - t.Skip("Skipping test in short mode.") - } - - rand.Seed(42) - - it := func(n uint32) bool { - mutations := int(n%1e4 + 1e4) - concLevel := int(n%5 + 1) - total := mutations * concLevel - - var start, end sync.WaitGroup - start.Add(1) - end.Add(concLevel) - - sum := NewSummary(SummaryOpts{ - Name: "test_summary", - Help: "helpless", - }) - - allVars := make([]float64, total) - var sampleSum float64 - for i := 0; i < concLevel; i++ { - vals := make([]float64, mutations) - for j := 0; j < mutations; j++ { - v := rand.NormFloat64() - vals[j] = v - allVars[i*mutations+j] = v - sampleSum += v - } - - go func(vals []float64) { - start.Wait() - for _, v := range vals { - sum.Observe(v) - } - end.Done() - }(vals) - } - sort.Float64s(allVars) - start.Done() - end.Wait() - - m := &dto.Metric{} - sum.Write(m) - if got, want := int(*m.Summary.SampleCount), total; got != want { - t.Errorf("got sample count %d, want %d", got, want) - } - if got, want := *m.Summary.SampleSum, sampleSum; math.Abs((got-want)/want) > 0.001 { - t.Errorf("got sample sum %f, want %f", got, want) - } - - objectives := make([]float64, 0, len(DefObjectives)) - for qu := range DefObjectives { - objectives = append(objectives, qu) - } - sort.Float64s(objectives) - - for i, wantQ := range objectives { - ε := DefObjectives[wantQ] - gotQ := *m.Summary.Quantile[i].Quantile - gotV := *m.Summary.Quantile[i].Value - min, max := getBounds(allVars, wantQ, ε) - if gotQ != wantQ { - t.Errorf("got quantile %f, want %f", gotQ, wantQ) - } - if gotV < min || gotV > max { - t.Errorf("got %f for quantile %f, want [%f,%f]", gotV, gotQ, min, max) - } - } - return true - } - - if err := quick.Check(it, nil); err != nil { - t.Error(err) - } -} - -func TestSummaryVecConcurrency(t *testing.T) { - if testing.Short() { - t.Skip("Skipping test in short mode.") - } - - rand.Seed(42) - - objectives := make([]float64, 0, len(DefObjectives)) - for qu := range DefObjectives { - - objectives = append(objectives, qu) - } - sort.Float64s(objectives) - - it := func(n uint32) bool { - mutations := int(n%1e4 + 1e4) - concLevel := int(n%7 + 1) - vecLength := int(n%3 + 1) - - var start, end sync.WaitGroup - start.Add(1) - end.Add(concLevel) - - sum := NewSummaryVec( - SummaryOpts{ - Name: "test_summary", - Help: "helpless", - }, - []string{"label"}, - ) - - allVars := make([][]float64, vecLength) - sampleSums := make([]float64, vecLength) - for i := 0; i < concLevel; i++ { - vals := make([]float64, mutations) - picks := make([]int, mutations) - for j := 0; j < mutations; j++ { - v := rand.NormFloat64() - vals[j] = v - pick := rand.Intn(vecLength) - picks[j] = pick - allVars[pick] = append(allVars[pick], v) - sampleSums[pick] += v - } - - go func(vals []float64) { - start.Wait() - for i, v := range vals { - sum.WithLabelValues(string('A' + picks[i])).Observe(v) - } - end.Done() - }(vals) - } - for _, vars := range allVars { - sort.Float64s(vars) - } - start.Done() - end.Wait() - - for i := 0; i < vecLength; i++ { - m := &dto.Metric{} - s := sum.WithLabelValues(string('A' + i)) - s.Write(m) - if got, want := int(*m.Summary.SampleCount), len(allVars[i]); got != want { - t.Errorf("got sample count %d for label %c, want %d", got, 'A'+i, want) - } - if got, want := *m.Summary.SampleSum, sampleSums[i]; math.Abs((got-want)/want) > 0.001 { - t.Errorf("got sample sum %f for label %c, want %f", got, 'A'+i, want) - } - for j, wantQ := range objectives { - ε := DefObjectives[wantQ] - gotQ := *m.Summary.Quantile[j].Quantile - gotV := *m.Summary.Quantile[j].Value - min, max := getBounds(allVars[i], wantQ, ε) - if gotQ != wantQ { - t.Errorf("got quantile %f for label %c, want %f", gotQ, 'A'+i, wantQ) - } - if gotV < min || gotV > max { - t.Errorf("got %f for quantile %f for label %c, want [%f,%f]", gotV, gotQ, 'A'+i, min, max) - } - } - } - return true - } - - if err := quick.Check(it, nil); err != nil { - t.Error(err) - } -} - -func TestSummaryDecay(t *testing.T) { - if testing.Short() { - t.Skip("Skipping test in short mode.") - // More because it depends on timing than because it is particularly long... - } - - sum := NewSummary(SummaryOpts{ - Name: "test_summary", - Help: "helpless", - MaxAge: 100 * time.Millisecond, - Objectives: map[float64]float64{0.1: 0.001}, - AgeBuckets: 10, - }) - - m := &dto.Metric{} - i := 0 - tick := time.NewTicker(time.Millisecond) - for _ = range tick.C { - i++ - sum.Observe(float64(i)) - if i%10 == 0 { - sum.Write(m) - if got, want := *m.Summary.Quantile[0].Value, math.Max(float64(i)/10, float64(i-90)); math.Abs(got-want) > 20 { - t.Errorf("%d. got %f, want %f", i, got, want) - } - m.Reset() - } - if i >= 1000 { - break - } - } - tick.Stop() - // Wait for MaxAge without observations and make sure quantiles are NaN. - time.Sleep(100 * time.Millisecond) - sum.Write(m) - if got := *m.Summary.Quantile[0].Value; !math.IsNaN(got) { - t.Errorf("got %f, want NaN after expiration", got) - } -} - -func getBounds(vars []float64, q, ε float64) (min, max float64) { - // TODO: This currently tolerates an error of up to 2*ε. The error must - // be at most ε, but for some reason, it's sometimes slightly - // higher. That's a bug. - n := float64(len(vars)) - lower := int((q - 2*ε) * n) - upper := int(math.Ceil((q + 2*ε) * n)) - min = vars[0] - if lower > 1 { - min = vars[lower-1] - } - max = vars[len(vars)-1] - if upper < len(vars) { - max = vars[upper-1] - } - return -} diff --git a/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/untyped.go b/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/untyped.go deleted file mode 100644 index c65ab1c531..0000000000 --- a/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/untyped.go +++ /dev/null @@ -1,145 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -import "hash/fnv" - -// Untyped is a Metric that represents a single numerical value that can -// arbitrarily go up and down. -// -// An Untyped metric works the same as a Gauge. The only difference is that to -// no type information is implied. -// -// To create Untyped instances, use NewUntyped. -type Untyped interface { - Metric - Collector - - // Set sets the Untyped metric to an arbitrary value. - Set(float64) - // Inc increments the Untyped metric by 1. - Inc() - // Dec decrements the Untyped metric by 1. - Dec() - // Add adds the given value to the Untyped metric. (The value can be - // negative, resulting in a decrease.) - Add(float64) - // Sub subtracts the given value from the Untyped metric. (The value can - // be negative, resulting in an increase.) - Sub(float64) -} - -// UntypedOpts is an alias for Opts. See there for doc comments. -type UntypedOpts Opts - -// NewUntyped creates a new Untyped metric from the provided UntypedOpts. -func NewUntyped(opts UntypedOpts) Untyped { - return newValue(NewDesc( - BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), - opts.Help, - nil, - opts.ConstLabels, - ), UntypedValue, 0) -} - -// UntypedVec is a Collector that bundles a set of Untyped metrics that all -// share the same Desc, but have different values for their variable -// labels. This is used if you want to count the same thing partitioned by -// various dimensions. Create instances with NewUntypedVec. -type UntypedVec struct { - MetricVec -} - -// NewUntypedVec creates a new UntypedVec based on the provided UntypedOpts and -// partitioned by the given label names. At least one label name must be -// provided. -func NewUntypedVec(opts UntypedOpts, labelNames []string) *UntypedVec { - desc := NewDesc( - BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), - opts.Help, - labelNames, - opts.ConstLabels, - ) - return &UntypedVec{ - MetricVec: MetricVec{ - children: map[uint64]Metric{}, - desc: desc, - hash: fnv.New64a(), - newMetric: func(lvs ...string) Metric { - return newValue(desc, UntypedValue, 0, lvs...) - }, - }, - } -} - -// GetMetricWithLabelValues replaces the method of the same name in -// MetricVec. The difference is that this method returns an Untyped and not a -// Metric so that no type conversion is required. -func (m *UntypedVec) GetMetricWithLabelValues(lvs ...string) (Untyped, error) { - metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...) - if metric != nil { - return metric.(Untyped), err - } - return nil, err -} - -// GetMetricWith replaces the method of the same name in MetricVec. The -// difference is that this method returns an Untyped and not a Metric so that no -// type conversion is required. -func (m *UntypedVec) GetMetricWith(labels Labels) (Untyped, error) { - metric, err := m.MetricVec.GetMetricWith(labels) - if metric != nil { - return metric.(Untyped), err - } - return nil, err -} - -// WithLabelValues works as GetMetricWithLabelValues, but panics where -// GetMetricWithLabelValues would have returned an error. By not returning an -// error, WithLabelValues allows shortcuts like -// myVec.WithLabelValues("404", "GET").Add(42) -func (m *UntypedVec) WithLabelValues(lvs ...string) Untyped { - return m.MetricVec.WithLabelValues(lvs...).(Untyped) -} - -// With works as GetMetricWith, but panics where GetMetricWithLabels would have -// returned an error. By not returning an error, With allows shortcuts like -// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42) -func (m *UntypedVec) With(labels Labels) Untyped { - return m.MetricVec.With(labels).(Untyped) -} - -// UntypedFunc is an Untyped whose value is determined at collect time by -// calling a provided function. -// -// To create UntypedFunc instances, use NewUntypedFunc. -type UntypedFunc interface { - Metric - Collector -} - -// NewUntypedFunc creates a new UntypedFunc based on the provided -// UntypedOpts. The value reported is determined by calling the given function -// from within the Write method. Take into account that metric collection may -// happen concurrently. If that results in concurrent calls to Write, like in -// the case where an UntypedFunc is directly registered with Prometheus, the -// provided function must be concurrency-safe. -func NewUntypedFunc(opts UntypedOpts, function func() float64) UntypedFunc { - return newValueFunc(NewDesc( - BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), - opts.Help, - nil, - opts.ConstLabels, - ), UntypedValue, function) -} diff --git a/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/value.go b/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/value.go deleted file mode 100644 index b54ac11e88..0000000000 --- a/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/value.go +++ /dev/null @@ -1,234 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -import ( - "errors" - "fmt" - "math" - "sort" - "sync/atomic" - - dto "github.com/prometheus/client_model/go" - - "github.com/golang/protobuf/proto" -) - -// ValueType is an enumeration of metric types that represent a simple value. -type ValueType int - -// Possible values for the ValueType enum. -const ( - _ ValueType = iota - CounterValue - GaugeValue - UntypedValue -) - -var errInconsistentCardinality = errors.New("inconsistent label cardinality") - -// value is a generic metric for simple values. It implements Metric, Collector, -// Counter, Gauge, and Untyped. Its effective type is determined by -// ValueType. This is a low-level building block used by the library to back the -// implementations of Counter, Gauge, and Untyped. -type value struct { - // valBits containst the bits of the represented float64 value. It has - // to go first in the struct to guarantee alignment for atomic - // operations. http://golang.org/pkg/sync/atomic/#pkg-note-BUG - valBits uint64 - - SelfCollector - - desc *Desc - valType ValueType - labelPairs []*dto.LabelPair -} - -// newValue returns a newly allocated value with the given Desc, ValueType, -// sample value and label values. It panics if the number of label -// values is different from the number of variable labels in Desc. -func newValue(desc *Desc, valueType ValueType, val float64, labelValues ...string) *value { - if len(labelValues) != len(desc.variableLabels) { - panic(errInconsistentCardinality) - } - result := &value{ - desc: desc, - valType: valueType, - valBits: math.Float64bits(val), - labelPairs: makeLabelPairs(desc, labelValues), - } - result.Init(result) - return result -} - -func (v *value) Desc() *Desc { - return v.desc -} - -func (v *value) Set(val float64) { - atomic.StoreUint64(&v.valBits, math.Float64bits(val)) -} - -func (v *value) Inc() { - v.Add(1) -} - -func (v *value) Dec() { - v.Add(-1) -} - -func (v *value) Add(val float64) { - for { - oldBits := atomic.LoadUint64(&v.valBits) - newBits := math.Float64bits(math.Float64frombits(oldBits) + val) - if atomic.CompareAndSwapUint64(&v.valBits, oldBits, newBits) { - return - } - } -} - -func (v *value) Sub(val float64) { - v.Add(val * -1) -} - -func (v *value) Write(out *dto.Metric) error { - val := math.Float64frombits(atomic.LoadUint64(&v.valBits)) - return populateMetric(v.valType, val, v.labelPairs, out) -} - -// valueFunc is a generic metric for simple values retrieved on collect time -// from a function. It implements Metric and Collector. Its effective type is -// determined by ValueType. This is a low-level building block used by the -// library to back the implementations of CounterFunc, GaugeFunc, and -// UntypedFunc. -type valueFunc struct { - SelfCollector - - desc *Desc - valType ValueType - function func() float64 - labelPairs []*dto.LabelPair -} - -// newValueFunc returns a newly allocated valueFunc with the given Desc and -// ValueType. The value reported is determined by calling the given function -// from within the Write method. Take into account that metric collection may -// happen concurrently. If that results in concurrent calls to Write, like in -// the case where a valueFunc is directly registered with Prometheus, the -// provided function must be concurrency-safe. -func newValueFunc(desc *Desc, valueType ValueType, function func() float64) *valueFunc { - result := &valueFunc{ - desc: desc, - valType: valueType, - function: function, - labelPairs: makeLabelPairs(desc, nil), - } - result.Init(result) - return result -} - -func (v *valueFunc) Desc() *Desc { - return v.desc -} - -func (v *valueFunc) Write(out *dto.Metric) error { - return populateMetric(v.valType, v.function(), v.labelPairs, out) -} - -// NewConstMetric returns a metric with one fixed value that cannot be -// changed. Users of this package will not have much use for it in regular -// operations. However, when implementing custom Collectors, it is useful as a -// throw-away metric that is generated on the fly to send it to Prometheus in -// the Collect method. NewConstMetric returns an error if the length of -// labelValues is not consistent with the variable labels in Desc. -func NewConstMetric(desc *Desc, valueType ValueType, value float64, labelValues ...string) (Metric, error) { - if len(desc.variableLabels) != len(labelValues) { - return nil, errInconsistentCardinality - } - return &constMetric{ - desc: desc, - valType: valueType, - val: value, - labelPairs: makeLabelPairs(desc, labelValues), - }, nil -} - -// MustNewConstMetric is a version of NewConstMetric that panics where -// NewConstMetric would have returned an error. -func MustNewConstMetric(desc *Desc, valueType ValueType, value float64, labelValues ...string) Metric { - m, err := NewConstMetric(desc, valueType, value, labelValues...) - if err != nil { - panic(err) - } - return m -} - -type constMetric struct { - desc *Desc - valType ValueType - val float64 - labelPairs []*dto.LabelPair -} - -func (m *constMetric) Desc() *Desc { - return m.desc -} - -func (m *constMetric) Write(out *dto.Metric) error { - return populateMetric(m.valType, m.val, m.labelPairs, out) -} - -func populateMetric( - t ValueType, - v float64, - labelPairs []*dto.LabelPair, - m *dto.Metric, -) error { - m.Label = labelPairs - switch t { - case CounterValue: - m.Counter = &dto.Counter{Value: proto.Float64(v)} - case GaugeValue: - m.Gauge = &dto.Gauge{Value: proto.Float64(v)} - case UntypedValue: - m.Untyped = &dto.Untyped{Value: proto.Float64(v)} - default: - return fmt.Errorf("encountered unknown type %v", t) - } - return nil -} - -func makeLabelPairs(desc *Desc, labelValues []string) []*dto.LabelPair { - totalLen := len(desc.variableLabels) + len(desc.constLabelPairs) - if totalLen == 0 { - // Super fast path. - return nil - } - if len(desc.variableLabels) == 0 { - // Moderately fast path. - return desc.constLabelPairs - } - labelPairs := make([]*dto.LabelPair, 0, totalLen) - for i, n := range desc.variableLabels { - labelPairs = append(labelPairs, &dto.LabelPair{ - Name: proto.String(n), - Value: proto.String(labelValues[i]), - }) - } - for _, lp := range desc.constLabelPairs { - labelPairs = append(labelPairs, lp) - } - sort.Sort(LabelPairSorter(labelPairs)) - return labelPairs -} diff --git a/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/vec.go b/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/vec.go deleted file mode 100644 index a1f3bdf37d..0000000000 --- a/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/vec.go +++ /dev/null @@ -1,247 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -import ( - "bytes" - "fmt" - "hash" - "sync" -) - -// MetricVec is a Collector to bundle metrics of the same name that -// differ in their label values. MetricVec is usually not used directly but as a -// building block for implementations of vectors of a given metric -// type. GaugeVec, CounterVec, SummaryVec, and UntypedVec are examples already -// provided in this package. -type MetricVec struct { - mtx sync.RWMutex // Protects not only children, but also hash and buf. - children map[uint64]Metric - desc *Desc - - // hash is our own hash instance to avoid repeated allocations. - hash hash.Hash64 - // buf is used to copy string contents into it for hashing, - // again to avoid allocations. - buf bytes.Buffer - - newMetric func(labelValues ...string) Metric -} - -// Describe implements Collector. The length of the returned slice -// is always one. -func (m *MetricVec) Describe(ch chan<- *Desc) { - ch <- m.desc -} - -// Collect implements Collector. -func (m *MetricVec) Collect(ch chan<- Metric) { - m.mtx.RLock() - defer m.mtx.RUnlock() - - for _, metric := range m.children { - ch <- metric - } -} - -// GetMetricWithLabelValues returns the Metric for the given slice of label -// values (same order as the VariableLabels in Desc). If that combination of -// label values is accessed for the first time, a new Metric is created. -// -// It is possible to call this method without using the returned Metric to only -// create the new Metric but leave it at its start value (e.g. a Summary or -// Histogram without any observations). See also the SummaryVec example. -// -// Keeping the Metric for later use is possible (and should be considered if -// performance is critical), but keep in mind that Reset, DeleteLabelValues and -// Delete can be used to delete the Metric from the MetricVec. In that case, the -// Metric will still exist, but it will not be exported anymore, even if a -// Metric with the same label values is created later. See also the CounterVec -// example. -// -// An error is returned if the number of label values is not the same as the -// number of VariableLabels in Desc. -// -// Note that for more than one label value, this method is prone to mistakes -// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as -// an alternative to avoid that type of mistake. For higher label numbers, the -// latter has a much more readable (albeit more verbose) syntax, but it comes -// with a performance overhead (for creating and processing the Labels map). -// See also the GaugeVec example. -func (m *MetricVec) GetMetricWithLabelValues(lvs ...string) (Metric, error) { - m.mtx.Lock() - defer m.mtx.Unlock() - - h, err := m.hashLabelValues(lvs) - if err != nil { - return nil, err - } - return m.getOrCreateMetric(h, lvs...), nil -} - -// GetMetricWith returns the Metric for the given Labels map (the label names -// must match those of the VariableLabels in Desc). If that label map is -// accessed for the first time, a new Metric is created. Implications of -// creating a Metric without using it and keeping the Metric for later use are -// the same as for GetMetricWithLabelValues. -// -// An error is returned if the number and names of the Labels are inconsistent -// with those of the VariableLabels in Desc. -// -// This method is used for the same purpose as -// GetMetricWithLabelValues(...string). See there for pros and cons of the two -// methods. -func (m *MetricVec) GetMetricWith(labels Labels) (Metric, error) { - m.mtx.Lock() - defer m.mtx.Unlock() - - h, err := m.hashLabels(labels) - if err != nil { - return nil, err - } - lvs := make([]string, len(labels)) - for i, label := range m.desc.variableLabels { - lvs[i] = labels[label] - } - return m.getOrCreateMetric(h, lvs...), nil -} - -// WithLabelValues works as GetMetricWithLabelValues, but panics if an error -// occurs. The method allows neat syntax like: -// httpReqs.WithLabelValues("404", "POST").Inc() -func (m *MetricVec) WithLabelValues(lvs ...string) Metric { - metric, err := m.GetMetricWithLabelValues(lvs...) - if err != nil { - panic(err) - } - return metric -} - -// With works as GetMetricWith, but panics if an error occurs. The method allows -// neat syntax like: -// httpReqs.With(Labels{"status":"404", "method":"POST"}).Inc() -func (m *MetricVec) With(labels Labels) Metric { - metric, err := m.GetMetricWith(labels) - if err != nil { - panic(err) - } - return metric -} - -// DeleteLabelValues removes the metric where the variable labels are the same -// as those passed in as labels (same order as the VariableLabels in Desc). It -// returns true if a metric was deleted. -// -// It is not an error if the number of label values is not the same as the -// number of VariableLabels in Desc. However, such inconsistent label count can -// never match an actual Metric, so the method will always return false in that -// case. -// -// Note that for more than one label value, this method is prone to mistakes -// caused by an incorrect order of arguments. Consider Delete(Labels) as an -// alternative to avoid that type of mistake. For higher label numbers, the -// latter has a much more readable (albeit more verbose) syntax, but it comes -// with a performance overhead (for creating and processing the Labels map). -// See also the CounterVec example. -func (m *MetricVec) DeleteLabelValues(lvs ...string) bool { - m.mtx.Lock() - defer m.mtx.Unlock() - - h, err := m.hashLabelValues(lvs) - if err != nil { - return false - } - if _, has := m.children[h]; !has { - return false - } - delete(m.children, h) - return true -} - -// Delete deletes the metric where the variable labels are the same as those -// passed in as labels. It returns true if a metric was deleted. -// -// It is not an error if the number and names of the Labels are inconsistent -// with those of the VariableLabels in the Desc of the MetricVec. However, such -// inconsistent Labels can never match an actual Metric, so the method will -// always return false in that case. -// -// This method is used for the same purpose as DeleteLabelValues(...string). See -// there for pros and cons of the two methods. -func (m *MetricVec) Delete(labels Labels) bool { - m.mtx.Lock() - defer m.mtx.Unlock() - - h, err := m.hashLabels(labels) - if err != nil { - return false - } - if _, has := m.children[h]; !has { - return false - } - delete(m.children, h) - return true -} - -// Reset deletes all metrics in this vector. -func (m *MetricVec) Reset() { - m.mtx.Lock() - defer m.mtx.Unlock() - - for h := range m.children { - delete(m.children, h) - } -} - -func (m *MetricVec) hashLabelValues(vals []string) (uint64, error) { - if len(vals) != len(m.desc.variableLabels) { - return 0, errInconsistentCardinality - } - m.hash.Reset() - for _, val := range vals { - m.buf.Reset() - m.buf.WriteString(val) - m.hash.Write(m.buf.Bytes()) - } - return m.hash.Sum64(), nil -} - -func (m *MetricVec) hashLabels(labels Labels) (uint64, error) { - if len(labels) != len(m.desc.variableLabels) { - return 0, errInconsistentCardinality - } - m.hash.Reset() - for _, label := range m.desc.variableLabels { - val, ok := labels[label] - if !ok { - return 0, fmt.Errorf("label name %q missing in label map", label) - } - m.buf.Reset() - m.buf.WriteString(val) - m.hash.Write(m.buf.Bytes()) - } - return m.hash.Sum64(), nil -} - -func (m *MetricVec) getOrCreateMetric(hash uint64, labelValues ...string) Metric { - metric, ok := m.children[hash] - if !ok { - // Copy labelValues. Otherwise, they would be allocated even if we don't go - // down this code path. - copiedLabelValues := append(make([]string, 0, len(labelValues)), labelValues...) - metric = m.newMetric(copiedLabelValues...) - m.children[hash] = metric - } - return metric -} diff --git a/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/vec_test.go b/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/vec_test.go deleted file mode 100644 index 0e9431e656..0000000000 --- a/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/vec_test.go +++ /dev/null @@ -1,91 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -import ( - "hash/fnv" - "testing" -) - -func TestDelete(t *testing.T) { - desc := NewDesc("test", "helpless", []string{"l1", "l2"}, nil) - vec := MetricVec{ - children: map[uint64]Metric{}, - desc: desc, - hash: fnv.New64a(), - newMetric: func(lvs ...string) Metric { - return newValue(desc, UntypedValue, 0, lvs...) - }, - } - - if got, want := vec.Delete(Labels{"l1": "v1", "l2": "v2"}), false; got != want { - t.Errorf("got %v, want %v", got, want) - } - - vec.With(Labels{"l1": "v1", "l2": "v2"}).(Untyped).Set(42) - if got, want := vec.Delete(Labels{"l1": "v1", "l2": "v2"}), true; got != want { - t.Errorf("got %v, want %v", got, want) - } - if got, want := vec.Delete(Labels{"l1": "v1", "l2": "v2"}), false; got != want { - t.Errorf("got %v, want %v", got, want) - } - - vec.With(Labels{"l1": "v1", "l2": "v2"}).(Untyped).Set(42) - if got, want := vec.Delete(Labels{"l2": "v2", "l1": "v1"}), true; got != want { - t.Errorf("got %v, want %v", got, want) - } - if got, want := vec.Delete(Labels{"l2": "v2", "l1": "v1"}), false; got != want { - t.Errorf("got %v, want %v", got, want) - } - - vec.With(Labels{"l1": "v1", "l2": "v2"}).(Untyped).Set(42) - if got, want := vec.Delete(Labels{"l2": "v1", "l1": "v2"}), false; got != want { - t.Errorf("got %v, want %v", got, want) - } - if got, want := vec.Delete(Labels{"l1": "v1"}), false; got != want { - t.Errorf("got %v, want %v", got, want) - } -} - -func TestDeleteLabelValues(t *testing.T) { - desc := NewDesc("test", "helpless", []string{"l1", "l2"}, nil) - vec := MetricVec{ - children: map[uint64]Metric{}, - desc: desc, - hash: fnv.New64a(), - newMetric: func(lvs ...string) Metric { - return newValue(desc, UntypedValue, 0, lvs...) - }, - } - - if got, want := vec.DeleteLabelValues("v1", "v2"), false; got != want { - t.Errorf("got %v, want %v", got, want) - } - - vec.With(Labels{"l1": "v1", "l2": "v2"}).(Untyped).Set(42) - if got, want := vec.DeleteLabelValues("v1", "v2"), true; got != want { - t.Errorf("got %v, want %v", got, want) - } - if got, want := vec.DeleteLabelValues("v1", "v2"), false; got != want { - t.Errorf("got %v, want %v", got, want) - } - - vec.With(Labels{"l1": "v1", "l2": "v2"}).(Untyped).Set(42) - if got, want := vec.DeleteLabelValues("v2", "v1"), false; got != want { - t.Errorf("got %v, want %v", got, want) - } - if got, want := vec.DeleteLabelValues("v1"), false; got != want { - t.Errorf("got %v, want %v", got, want) - } -} diff --git a/Godeps/_workspace/src/github.com/prometheus/client_golang/text/bench_test.go b/Godeps/_workspace/src/github.com/prometheus/client_golang/text/bench_test.go deleted file mode 100644 index c315eec36f..0000000000 --- a/Godeps/_workspace/src/github.com/prometheus/client_golang/text/bench_test.go +++ /dev/null @@ -1,169 +0,0 @@ -// Copyright 2015 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package text - -import ( - "bytes" - "compress/gzip" - "io" - "io/ioutil" - "testing" - - "github.com/matttproud/golang_protobuf_extensions/pbutil" - - dto "github.com/prometheus/client_model/go" -) - -// Benchmarks to show how much penalty text format parsing actually inflicts. -// -// Example results on Linux 3.13.0, Intel(R) Core(TM) i7-4700MQ CPU @ 2.40GHz, go1.4. -// -// BenchmarkParseText 1000 1188535 ns/op 205085 B/op 6135 allocs/op -// BenchmarkParseTextGzip 1000 1376567 ns/op 246224 B/op 6151 allocs/op -// BenchmarkParseProto 10000 172790 ns/op 52258 B/op 1160 allocs/op -// BenchmarkParseProtoGzip 5000 324021 ns/op 94931 B/op 1211 allocs/op -// BenchmarkParseProtoMap 10000 187946 ns/op 58714 B/op 1203 allocs/op -// -// CONCLUSION: The overhead for the map is negligible. Text format needs ~5x more allocations. -// Without compression, it needs ~7x longer, but with compression (the more relevant scenario), -// the difference becomes less relevant, only ~4x. -// -// The test data contains 248 samples. -// -// BenchmarkProcessor002ParseOnly in the extraction package is not quite -// comparable to the benchmarks here, but it gives an idea: JSON parsing is even -// slower than text parsing and needs a comparable amount of allocs. - -// BenchmarkParseText benchmarks the parsing of a text-format scrape into metric -// family DTOs. -func BenchmarkParseText(b *testing.B) { - b.StopTimer() - data, err := ioutil.ReadFile("testdata/text") - if err != nil { - b.Fatal(err) - } - b.StartTimer() - - for i := 0; i < b.N; i++ { - if _, err := parser.TextToMetricFamilies(bytes.NewReader(data)); err != nil { - b.Fatal(err) - } - } -} - -// BenchmarkParseTextGzip benchmarks the parsing of a gzipped text-format scrape -// into metric family DTOs. -func BenchmarkParseTextGzip(b *testing.B) { - b.StopTimer() - data, err := ioutil.ReadFile("testdata/text.gz") - if err != nil { - b.Fatal(err) - } - b.StartTimer() - - for i := 0; i < b.N; i++ { - in, err := gzip.NewReader(bytes.NewReader(data)) - if err != nil { - b.Fatal(err) - } - if _, err := parser.TextToMetricFamilies(in); err != nil { - b.Fatal(err) - } - } -} - -// BenchmarkParseProto benchmarks the parsing of a protobuf-format scrape into -// metric family DTOs. Note that this does not build a map of metric families -// (as the text version does), because it is not required for Prometheus -// ingestion either. (However, it is required for the text-format parsing, as -// the metric family might be sprinkled all over the text, while the -// protobuf-format guarantees bundling at one place.) -func BenchmarkParseProto(b *testing.B) { - b.StopTimer() - data, err := ioutil.ReadFile("testdata/protobuf") - if err != nil { - b.Fatal(err) - } - b.StartTimer() - - for i := 0; i < b.N; i++ { - family := &dto.MetricFamily{} - in := bytes.NewReader(data) - for { - family.Reset() - if _, err := pbutil.ReadDelimited(in, family); err != nil { - if err == io.EOF { - break - } - b.Fatal(err) - } - } - } -} - -// BenchmarkParseProtoGzip is like BenchmarkParseProto above, but parses gzipped -// protobuf format. -func BenchmarkParseProtoGzip(b *testing.B) { - b.StopTimer() - data, err := ioutil.ReadFile("testdata/protobuf.gz") - if err != nil { - b.Fatal(err) - } - b.StartTimer() - - for i := 0; i < b.N; i++ { - family := &dto.MetricFamily{} - in, err := gzip.NewReader(bytes.NewReader(data)) - if err != nil { - b.Fatal(err) - } - for { - family.Reset() - if _, err := pbutil.ReadDelimited(in, family); err != nil { - if err == io.EOF { - break - } - b.Fatal(err) - } - } - } -} - -// BenchmarkParseProtoMap is like BenchmarkParseProto but DOES put the parsed -// metric family DTOs into a map. This is not happening during Prometheus -// ingestion. It is just here to measure the overhead of that map creation and -// separate it from the overhead of the text format parsing. -func BenchmarkParseProtoMap(b *testing.B) { - b.StopTimer() - data, err := ioutil.ReadFile("testdata/protobuf") - if err != nil { - b.Fatal(err) - } - b.StartTimer() - - for i := 0; i < b.N; i++ { - families := map[string]*dto.MetricFamily{} - in := bytes.NewReader(data) - for { - family := &dto.MetricFamily{} - if _, err := pbutil.ReadDelimited(in, family); err != nil { - if err == io.EOF { - break - } - b.Fatal(err) - } - families[family.GetName()] = family - } - } -} diff --git a/Godeps/_workspace/src/github.com/prometheus/client_golang/text/create.go b/Godeps/_workspace/src/github.com/prometheus/client_golang/text/create.go deleted file mode 100644 index 57def3219a..0000000000 --- a/Godeps/_workspace/src/github.com/prometheus/client_golang/text/create.go +++ /dev/null @@ -1,315 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package text contains helper functions to parse and create text-based -// exchange formats. The package currently supports (only) version 0.0.4 of the -// exchange format. Should other versions be supported in the future, some -// versioning scheme has to be applied. Possibilities include separate packages -// or separate functions. The best way depends on the nature of future changes, -// which is the reason why no versioning scheme has been applied prematurely -// here. -package text - -import ( - "bytes" - "fmt" - "io" - "math" - "strings" - - "github.com/prometheus/client_golang/model" - dto "github.com/prometheus/client_model/go" -) - -// MetricFamilyToText converts a MetricFamily proto message into text format and -// writes the resulting lines to 'out'. It returns the number of bytes written -// and any error encountered. This function does not perform checks on the -// content of the metric and label names, i.e. invalid metric or label names -// will result in invalid text format output. -// This method fulfills the type 'prometheus.encoder'. -func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (int, error) { - var written int - - // Fail-fast checks. - if len(in.Metric) == 0 { - return written, fmt.Errorf("MetricFamily has no metrics: %s", in) - } - name := in.GetName() - if name == "" { - return written, fmt.Errorf("MetricFamily has no name: %s", in) - } - if in.Type == nil { - return written, fmt.Errorf("MetricFamily has no type: %s", in) - } - - // Comments, first HELP, then TYPE. - if in.Help != nil { - n, err := fmt.Fprintf( - out, "# HELP %s %s\n", - name, escapeString(*in.Help, false), - ) - written += n - if err != nil { - return written, err - } - } - metricType := in.GetType() - n, err := fmt.Fprintf( - out, "# TYPE %s %s\n", - name, strings.ToLower(metricType.String()), - ) - written += n - if err != nil { - return written, err - } - - // Finally the samples, one line for each. - for _, metric := range in.Metric { - switch metricType { - case dto.MetricType_COUNTER: - if metric.Counter == nil { - return written, fmt.Errorf( - "expected counter in metric %s %s", name, metric, - ) - } - n, err = writeSample( - name, metric, "", "", - metric.Counter.GetValue(), - out, - ) - case dto.MetricType_GAUGE: - if metric.Gauge == nil { - return written, fmt.Errorf( - "expected gauge in metric %s %s", name, metric, - ) - } - n, err = writeSample( - name, metric, "", "", - metric.Gauge.GetValue(), - out, - ) - case dto.MetricType_UNTYPED: - if metric.Untyped == nil { - return written, fmt.Errorf( - "expected untyped in metric %s %s", name, metric, - ) - } - n, err = writeSample( - name, metric, "", "", - metric.Untyped.GetValue(), - out, - ) - case dto.MetricType_SUMMARY: - if metric.Summary == nil { - return written, fmt.Errorf( - "expected summary in metric %s %s", name, metric, - ) - } - for _, q := range metric.Summary.Quantile { - n, err = writeSample( - name, metric, - model.QuantileLabel, fmt.Sprint(q.GetQuantile()), - q.GetValue(), - out, - ) - written += n - if err != nil { - return written, err - } - } - n, err = writeSample( - name+"_sum", metric, "", "", - metric.Summary.GetSampleSum(), - out, - ) - if err != nil { - return written, err - } - written += n - n, err = writeSample( - name+"_count", metric, "", "", - float64(metric.Summary.GetSampleCount()), - out, - ) - case dto.MetricType_HISTOGRAM: - if metric.Histogram == nil { - return written, fmt.Errorf( - "expected histogram in metric %s %s", name, metric, - ) - } - infSeen := false - for _, q := range metric.Histogram.Bucket { - n, err = writeSample( - name+"_bucket", metric, - model.BucketLabel, fmt.Sprint(q.GetUpperBound()), - float64(q.GetCumulativeCount()), - out, - ) - written += n - if err != nil { - return written, err - } - if math.IsInf(q.GetUpperBound(), +1) { - infSeen = true - } - } - if !infSeen { - n, err = writeSample( - name+"_bucket", metric, - model.BucketLabel, "+Inf", - float64(metric.Histogram.GetSampleCount()), - out, - ) - if err != nil { - return written, err - } - written += n - } - n, err = writeSample( - name+"_sum", metric, "", "", - metric.Histogram.GetSampleSum(), - out, - ) - if err != nil { - return written, err - } - written += n - n, err = writeSample( - name+"_count", metric, "", "", - float64(metric.Histogram.GetSampleCount()), - out, - ) - default: - return written, fmt.Errorf( - "unexpected type in metric %s %s", name, metric, - ) - } - written += n - if err != nil { - return written, err - } - } - return written, nil -} - -// writeSample writes a single sample in text format to out, given the metric -// name, the metric proto message itself, optionally an additional label name -// and value (use empty strings if not required), and the value. The function -// returns the number of bytes written and any error encountered. -func writeSample( - name string, - metric *dto.Metric, - additionalLabelName, additionalLabelValue string, - value float64, - out io.Writer, -) (int, error) { - var written int - n, err := fmt.Fprint(out, name) - written += n - if err != nil { - return written, err - } - n, err = labelPairsToText( - metric.Label, - additionalLabelName, additionalLabelValue, - out, - ) - written += n - if err != nil { - return written, err - } - n, err = fmt.Fprintf(out, " %v", value) - written += n - if err != nil { - return written, err - } - if metric.TimestampMs != nil { - n, err = fmt.Fprintf(out, " %v", *metric.TimestampMs) - written += n - if err != nil { - return written, err - } - } - n, err = out.Write([]byte{'\n'}) - written += n - if err != nil { - return written, err - } - return written, nil -} - -// labelPairsToText converts a slice of LabelPair proto messages plus the -// explicitly given additional label pair into text formatted as required by the -// text format and writes it to 'out'. An empty slice in combination with an -// empty string 'additionalLabelName' results in nothing being -// written. Otherwise, the label pairs are written, escaped as required by the -// text format, and enclosed in '{...}'. The function returns the number of -// bytes written and any error encountered. -func labelPairsToText( - in []*dto.LabelPair, - additionalLabelName, additionalLabelValue string, - out io.Writer, -) (int, error) { - if len(in) == 0 && additionalLabelName == "" { - return 0, nil - } - var written int - separator := '{' - for _, lp := range in { - n, err := fmt.Fprintf( - out, `%c%s="%s"`, - separator, lp.GetName(), escapeString(lp.GetValue(), true), - ) - written += n - if err != nil { - return written, err - } - separator = ',' - } - if additionalLabelName != "" { - n, err := fmt.Fprintf( - out, `%c%s="%s"`, - separator, additionalLabelName, - escapeString(additionalLabelValue, true), - ) - written += n - if err != nil { - return written, err - } - } - n, err := out.Write([]byte{'}'}) - written += n - if err != nil { - return written, err - } - return written, nil -} - -// escapeString replaces '\' by '\\', new line character by '\n', and - if -// includeDoubleQuote is true - '"' by '\"'. -func escapeString(v string, includeDoubleQuote bool) string { - result := bytes.NewBuffer(make([]byte, 0, len(v))) - for _, c := range v { - switch { - case c == '\\': - result.WriteString(`\\`) - case includeDoubleQuote && c == '"': - result.WriteString(`\"`) - case c == '\n': - result.WriteString(`\n`) - default: - result.WriteRune(c) - } - } - return result.String() -} diff --git a/Godeps/_workspace/src/github.com/prometheus/client_golang/text/create_test.go b/Godeps/_workspace/src/github.com/prometheus/client_golang/text/create_test.go deleted file mode 100644 index fe938de80c..0000000000 --- a/Godeps/_workspace/src/github.com/prometheus/client_golang/text/create_test.go +++ /dev/null @@ -1,439 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package text - -import ( - "bytes" - "math" - "strings" - "testing" - - "github.com/golang/protobuf/proto" - dto "github.com/prometheus/client_model/go" -) - -func testCreate(t testing.TB) { - var scenarios = []struct { - in *dto.MetricFamily - out string - }{ - // 0: Counter, NaN as value, timestamp given. - { - in: &dto.MetricFamily{ - Name: proto.String("name"), - Help: proto.String("two-line\n doc str\\ing"), - Type: dto.MetricType_COUNTER.Enum(), - Metric: []*dto.Metric{ - &dto.Metric{ - Label: []*dto.LabelPair{ - &dto.LabelPair{ - Name: proto.String("labelname"), - Value: proto.String("val1"), - }, - &dto.LabelPair{ - Name: proto.String("basename"), - Value: proto.String("basevalue"), - }, - }, - Counter: &dto.Counter{ - Value: proto.Float64(math.NaN()), - }, - }, - &dto.Metric{ - Label: []*dto.LabelPair{ - &dto.LabelPair{ - Name: proto.String("labelname"), - Value: proto.String("val2"), - }, - &dto.LabelPair{ - Name: proto.String("basename"), - Value: proto.String("basevalue"), - }, - }, - Counter: &dto.Counter{ - Value: proto.Float64(.23), - }, - TimestampMs: proto.Int64(1234567890), - }, - }, - }, - out: `# HELP name two-line\n doc str\\ing -# TYPE name counter -name{labelname="val1",basename="basevalue"} NaN -name{labelname="val2",basename="basevalue"} 0.23 1234567890 -`, - }, - // 1: Gauge, some escaping required, +Inf as value, multi-byte characters in label values. - { - in: &dto.MetricFamily{ - Name: proto.String("gauge_name"), - Help: proto.String("gauge\ndoc\nstr\"ing"), - Type: dto.MetricType_GAUGE.Enum(), - Metric: []*dto.Metric{ - &dto.Metric{ - Label: []*dto.LabelPair{ - &dto.LabelPair{ - Name: proto.String("name_1"), - Value: proto.String("val with\nnew line"), - }, - &dto.LabelPair{ - Name: proto.String("name_2"), - Value: proto.String("val with \\backslash and \"quotes\""), - }, - }, - Gauge: &dto.Gauge{ - Value: proto.Float64(math.Inf(+1)), - }, - }, - &dto.Metric{ - Label: []*dto.LabelPair{ - &dto.LabelPair{ - Name: proto.String("name_1"), - Value: proto.String("Björn"), - }, - &dto.LabelPair{ - Name: proto.String("name_2"), - Value: proto.String("佖佥"), - }, - }, - Gauge: &dto.Gauge{ - Value: proto.Float64(3.14E42), - }, - }, - }, - }, - out: `# HELP gauge_name gauge\ndoc\nstr"ing -# TYPE gauge_name gauge -gauge_name{name_1="val with\nnew line",name_2="val with \\backslash and \"quotes\""} +Inf -gauge_name{name_1="Björn",name_2="佖佥"} 3.14e+42 -`, - }, - // 2: Untyped, no help, one sample with no labels and -Inf as value, another sample with one label. - { - in: &dto.MetricFamily{ - Name: proto.String("untyped_name"), - Type: dto.MetricType_UNTYPED.Enum(), - Metric: []*dto.Metric{ - &dto.Metric{ - Untyped: &dto.Untyped{ - Value: proto.Float64(math.Inf(-1)), - }, - }, - &dto.Metric{ - Label: []*dto.LabelPair{ - &dto.LabelPair{ - Name: proto.String("name_1"), - Value: proto.String("value 1"), - }, - }, - Untyped: &dto.Untyped{ - Value: proto.Float64(-1.23e-45), - }, - }, - }, - }, - out: `# TYPE untyped_name untyped -untyped_name -Inf -untyped_name{name_1="value 1"} -1.23e-45 -`, - }, - // 3: Summary. - { - in: &dto.MetricFamily{ - Name: proto.String("summary_name"), - Help: proto.String("summary docstring"), - Type: dto.MetricType_SUMMARY.Enum(), - Metric: []*dto.Metric{ - &dto.Metric{ - Summary: &dto.Summary{ - SampleCount: proto.Uint64(42), - SampleSum: proto.Float64(-3.4567), - Quantile: []*dto.Quantile{ - &dto.Quantile{ - Quantile: proto.Float64(0.5), - Value: proto.Float64(-1.23), - }, - &dto.Quantile{ - Quantile: proto.Float64(0.9), - Value: proto.Float64(.2342354), - }, - &dto.Quantile{ - Quantile: proto.Float64(0.99), - Value: proto.Float64(0), - }, - }, - }, - }, - &dto.Metric{ - Label: []*dto.LabelPair{ - &dto.LabelPair{ - Name: proto.String("name_1"), - Value: proto.String("value 1"), - }, - &dto.LabelPair{ - Name: proto.String("name_2"), - Value: proto.String("value 2"), - }, - }, - Summary: &dto.Summary{ - SampleCount: proto.Uint64(4711), - SampleSum: proto.Float64(2010.1971), - Quantile: []*dto.Quantile{ - &dto.Quantile{ - Quantile: proto.Float64(0.5), - Value: proto.Float64(1), - }, - &dto.Quantile{ - Quantile: proto.Float64(0.9), - Value: proto.Float64(2), - }, - &dto.Quantile{ - Quantile: proto.Float64(0.99), - Value: proto.Float64(3), - }, - }, - }, - }, - }, - }, - out: `# HELP summary_name summary docstring -# TYPE summary_name summary -summary_name{quantile="0.5"} -1.23 -summary_name{quantile="0.9"} 0.2342354 -summary_name{quantile="0.99"} 0 -summary_name_sum -3.4567 -summary_name_count 42 -summary_name{name_1="value 1",name_2="value 2",quantile="0.5"} 1 -summary_name{name_1="value 1",name_2="value 2",quantile="0.9"} 2 -summary_name{name_1="value 1",name_2="value 2",quantile="0.99"} 3 -summary_name_sum{name_1="value 1",name_2="value 2"} 2010.1971 -summary_name_count{name_1="value 1",name_2="value 2"} 4711 -`, - }, - // 4: Histogram - { - in: &dto.MetricFamily{ - Name: proto.String("request_duration_microseconds"), - Help: proto.String("The response latency."), - Type: dto.MetricType_HISTOGRAM.Enum(), - Metric: []*dto.Metric{ - &dto.Metric{ - Histogram: &dto.Histogram{ - SampleCount: proto.Uint64(2693), - SampleSum: proto.Float64(1756047.3), - Bucket: []*dto.Bucket{ - &dto.Bucket{ - UpperBound: proto.Float64(100), - CumulativeCount: proto.Uint64(123), - }, - &dto.Bucket{ - UpperBound: proto.Float64(120), - CumulativeCount: proto.Uint64(412), - }, - &dto.Bucket{ - UpperBound: proto.Float64(144), - CumulativeCount: proto.Uint64(592), - }, - &dto.Bucket{ - UpperBound: proto.Float64(172.8), - CumulativeCount: proto.Uint64(1524), - }, - &dto.Bucket{ - UpperBound: proto.Float64(math.Inf(+1)), - CumulativeCount: proto.Uint64(2693), - }, - }, - }, - }, - }, - }, - out: `# HELP request_duration_microseconds The response latency. -# TYPE request_duration_microseconds histogram -request_duration_microseconds_bucket{le="100"} 123 -request_duration_microseconds_bucket{le="120"} 412 -request_duration_microseconds_bucket{le="144"} 592 -request_duration_microseconds_bucket{le="172.8"} 1524 -request_duration_microseconds_bucket{le="+Inf"} 2693 -request_duration_microseconds_sum 1.7560473e+06 -request_duration_microseconds_count 2693 -`, - }, - // 5: Histogram with missing +Inf bucket. - { - in: &dto.MetricFamily{ - Name: proto.String("request_duration_microseconds"), - Help: proto.String("The response latency."), - Type: dto.MetricType_HISTOGRAM.Enum(), - Metric: []*dto.Metric{ - &dto.Metric{ - Histogram: &dto.Histogram{ - SampleCount: proto.Uint64(2693), - SampleSum: proto.Float64(1756047.3), - Bucket: []*dto.Bucket{ - &dto.Bucket{ - UpperBound: proto.Float64(100), - CumulativeCount: proto.Uint64(123), - }, - &dto.Bucket{ - UpperBound: proto.Float64(120), - CumulativeCount: proto.Uint64(412), - }, - &dto.Bucket{ - UpperBound: proto.Float64(144), - CumulativeCount: proto.Uint64(592), - }, - &dto.Bucket{ - UpperBound: proto.Float64(172.8), - CumulativeCount: proto.Uint64(1524), - }, - }, - }, - }, - }, - }, - out: `# HELP request_duration_microseconds The response latency. -# TYPE request_duration_microseconds histogram -request_duration_microseconds_bucket{le="100"} 123 -request_duration_microseconds_bucket{le="120"} 412 -request_duration_microseconds_bucket{le="144"} 592 -request_duration_microseconds_bucket{le="172.8"} 1524 -request_duration_microseconds_bucket{le="+Inf"} 2693 -request_duration_microseconds_sum 1.7560473e+06 -request_duration_microseconds_count 2693 -`, - }, - } - - for i, scenario := range scenarios { - out := bytes.NewBuffer(make([]byte, 0, len(scenario.out))) - n, err := MetricFamilyToText(out, scenario.in) - if err != nil { - t.Errorf("%d. error: %s", i, err) - continue - } - if expected, got := len(scenario.out), n; expected != got { - t.Errorf( - "%d. expected %d bytes written, got %d", - i, expected, got, - ) - } - if expected, got := scenario.out, out.String(); expected != got { - t.Errorf( - "%d. expected out=%q, got %q", - i, expected, got, - ) - } - } - -} - -func TestCreate(t *testing.T) { - testCreate(t) -} - -func BenchmarkCreate(b *testing.B) { - for i := 0; i < b.N; i++ { - testCreate(b) - } -} - -func testCreateError(t testing.TB) { - var scenarios = []struct { - in *dto.MetricFamily - err string - }{ - // 0: No metric. - { - in: &dto.MetricFamily{ - Name: proto.String("name"), - Help: proto.String("doc string"), - Type: dto.MetricType_COUNTER.Enum(), - Metric: []*dto.Metric{}, - }, - err: "MetricFamily has no metrics", - }, - // 1: No metric name. - { - in: &dto.MetricFamily{ - Help: proto.String("doc string"), - Type: dto.MetricType_UNTYPED.Enum(), - Metric: []*dto.Metric{ - &dto.Metric{ - Untyped: &dto.Untyped{ - Value: proto.Float64(math.Inf(-1)), - }, - }, - }, - }, - err: "MetricFamily has no name", - }, - // 2: No metric type. - { - in: &dto.MetricFamily{ - Name: proto.String("name"), - Help: proto.String("doc string"), - Metric: []*dto.Metric{ - &dto.Metric{ - Untyped: &dto.Untyped{ - Value: proto.Float64(math.Inf(-1)), - }, - }, - }, - }, - err: "MetricFamily has no type", - }, - // 3: Wrong type. - { - in: &dto.MetricFamily{ - Name: proto.String("name"), - Help: proto.String("doc string"), - Type: dto.MetricType_COUNTER.Enum(), - Metric: []*dto.Metric{ - &dto.Metric{ - Untyped: &dto.Untyped{ - Value: proto.Float64(math.Inf(-1)), - }, - }, - }, - }, - err: "expected counter in metric", - }, - } - - for i, scenario := range scenarios { - var out bytes.Buffer - _, err := MetricFamilyToText(&out, scenario.in) - if err == nil { - t.Errorf("%d. expected error, got nil", i) - continue - } - if expected, got := scenario.err, err.Error(); strings.Index(got, expected) != 0 { - t.Errorf( - "%d. expected error starting with %q, got %q", - i, expected, got, - ) - } - } - -} - -func TestCreateError(t *testing.T) { - testCreateError(t) -} - -func BenchmarkCreateError(b *testing.B) { - for i := 0; i < b.N; i++ { - testCreateError(b) - } -} diff --git a/Godeps/_workspace/src/github.com/prometheus/client_golang/text/parse.go b/Godeps/_workspace/src/github.com/prometheus/client_golang/text/parse.go deleted file mode 100644 index 2f337a8094..0000000000 --- a/Godeps/_workspace/src/github.com/prometheus/client_golang/text/parse.go +++ /dev/null @@ -1,746 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package text - -import ( - "bufio" - "bytes" - "fmt" - "io" - "math" - "strconv" - "strings" - - dto "github.com/prometheus/client_model/go" - - "github.com/golang/protobuf/proto" - "github.com/prometheus/client_golang/model" -) - -// A stateFn is a function that represents a state in a state machine. By -// executing it, the state is progressed to the next state. The stateFn returns -// another stateFn, which represents the new state. The end state is represented -// by nil. -type stateFn func() stateFn - -// ParseError signals errors while parsing the simple and flat text-based -// exchange format. -type ParseError struct { - Line int - Msg string -} - -// Error implements the error interface. -func (e ParseError) Error() string { - return fmt.Sprintf("text format parsing error in line %d: %s", e.Line, e.Msg) -} - -// Parser is used to parse the simple and flat text-based exchange format. Its -// nil value is ready to use. -type Parser struct { - metricFamiliesByName map[string]*dto.MetricFamily - buf *bufio.Reader // Where the parsed input is read through. - err error // Most recent error. - lineCount int // Tracks the line count for error messages. - currentByte byte // The most recent byte read. - currentToken bytes.Buffer // Re-used each time a token has to be gathered from multiple bytes. - currentMF *dto.MetricFamily - currentMetric *dto.Metric - currentLabelPair *dto.LabelPair - - // The remaining member variables are only used for summaries/histograms. - currentLabels map[string]string // All labels including '__name__' but excluding 'quantile'/'le' - // Summary specific. - summaries map[uint64]*dto.Metric // Key is created with LabelsToSignature. - currentQuantile float64 - // Histogram specific. - histograms map[uint64]*dto.Metric // Key is created with LabelsToSignature. - currentBucket float64 - // These tell us if the currently processed line ends on '_count' or - // '_sum' respectively and belong to a summary/histogram, representing the sample - // count and sum of that summary/histogram. - currentIsSummaryCount, currentIsSummarySum bool - currentIsHistogramCount, currentIsHistogramSum bool -} - -// TextToMetricFamilies reads 'in' as the simple and flat text-based exchange -// format and creates MetricFamily proto messages. It returns the MetricFamily -// proto messages in a map where the metric names are the keys, along with any -// error encountered. -// -// If the input contains duplicate metrics (i.e. lines with the same metric name -// and exactly the same label set), the resulting MetricFamily will contain -// duplicate Metric proto messages. Similar is true for duplicate label -// names. Checks for duplicates have to be performed separately, if required. -// Also note that neither the metrics within each MetricFamily are sorted nor -// the label pairs within each Metric. Sorting is not required for the most -// frequent use of this method, which is sample ingestion in the Prometheus -// server. However, for presentation purposes, you might want to sort the -// metrics, and in some cases, you must sort the labels, e.g. for consumption by -// the metric family injection hook of the Prometheus registry. -// -// Summaries and histograms are rather special beasts. You would probably not -// use them in the simple text format anyway. This method can deal with -// summaries and histograms if they are presented in exactly the way the -// text.Create function creates them. -// -// This method must not be called concurrently. If you want to parse different -// input concurrently, instantiate a separate Parser for each goroutine. -func (p *Parser) TextToMetricFamilies(in io.Reader) (map[string]*dto.MetricFamily, error) { - p.reset(in) - for nextState := p.startOfLine; nextState != nil; nextState = nextState() { - // Magic happens here... - } - // Get rid of empty metric families. - for k, mf := range p.metricFamiliesByName { - if len(mf.GetMetric()) == 0 { - delete(p.metricFamiliesByName, k) - } - } - return p.metricFamiliesByName, p.err -} - -func (p *Parser) reset(in io.Reader) { - p.metricFamiliesByName = map[string]*dto.MetricFamily{} - if p.buf == nil { - p.buf = bufio.NewReader(in) - } else { - p.buf.Reset(in) - } - p.err = nil - p.lineCount = 0 - if p.summaries == nil || len(p.summaries) > 0 { - p.summaries = map[uint64]*dto.Metric{} - } - if p.histograms == nil || len(p.histograms) > 0 { - p.histograms = map[uint64]*dto.Metric{} - } - p.currentQuantile = math.NaN() - p.currentBucket = math.NaN() -} - -// startOfLine represents the state where the next byte read from p.buf is the -// start of a line (or whitespace leading up to it). -func (p *Parser) startOfLine() stateFn { - p.lineCount++ - if p.skipBlankTab(); p.err != nil { - // End of input reached. This is the only case where - // that is not an error but a signal that we are done. - p.err = nil - return nil - } - switch p.currentByte { - case '#': - return p.startComment - case '\n': - return p.startOfLine // Empty line, start the next one. - } - return p.readingMetricName -} - -// startComment represents the state where the next byte read from p.buf is the -// start of a comment (or whitespace leading up to it). -func (p *Parser) startComment() stateFn { - if p.skipBlankTab(); p.err != nil { - return nil // Unexpected end of input. - } - if p.currentByte == '\n' { - return p.startOfLine - } - if p.readTokenUntilWhitespace(); p.err != nil { - return nil // Unexpected end of input. - } - // If we have hit the end of line already, there is nothing left - // to do. This is not considered a syntax error. - if p.currentByte == '\n' { - return p.startOfLine - } - keyword := p.currentToken.String() - if keyword != "HELP" && keyword != "TYPE" { - // Generic comment, ignore by fast forwarding to end of line. - for p.currentByte != '\n' { - if p.currentByte, p.err = p.buf.ReadByte(); p.err != nil { - return nil // Unexpected end of input. - } - } - return p.startOfLine - } - // There is something. Next has to be a metric name. - if p.skipBlankTab(); p.err != nil { - return nil // Unexpected end of input. - } - if p.readTokenAsMetricName(); p.err != nil { - return nil // Unexpected end of input. - } - if p.currentByte == '\n' { - // At the end of the line already. - // Again, this is not considered a syntax error. - return p.startOfLine - } - if !isBlankOrTab(p.currentByte) { - p.parseError("invalid metric name in comment") - return nil - } - p.setOrCreateCurrentMF() - if p.skipBlankTab(); p.err != nil { - return nil // Unexpected end of input. - } - if p.currentByte == '\n' { - // At the end of the line already. - // Again, this is not considered a syntax error. - return p.startOfLine - } - switch keyword { - case "HELP": - return p.readingHelp - case "TYPE": - return p.readingType - } - panic(fmt.Sprintf("code error: unexpected keyword %q", keyword)) -} - -// readingMetricName represents the state where the last byte read (now in -// p.currentByte) is the first byte of a metric name. -func (p *Parser) readingMetricName() stateFn { - if p.readTokenAsMetricName(); p.err != nil { - return nil - } - if p.currentToken.Len() == 0 { - p.parseError("invalid metric name") - return nil - } - p.setOrCreateCurrentMF() - // Now is the time to fix the type if it hasn't happened yet. - if p.currentMF.Type == nil { - p.currentMF.Type = dto.MetricType_UNTYPED.Enum() - } - p.currentMetric = &dto.Metric{} - // Do not append the newly created currentMetric to - // currentMF.Metric right now. First wait if this is a summary, - // and the metric exists already, which we can only know after - // having read all the labels. - if p.skipBlankTabIfCurrentBlankTab(); p.err != nil { - return nil // Unexpected end of input. - } - return p.readingLabels -} - -// readingLabels represents the state where the last byte read (now in -// p.currentByte) is either the first byte of the label set (i.e. a '{'), or the -// first byte of the value (otherwise). -func (p *Parser) readingLabels() stateFn { - // Summaries/histograms are special. We have to reset the - // currentLabels map, currentQuantile and currentBucket before starting to - // read labels. - if p.currentMF.GetType() == dto.MetricType_SUMMARY || p.currentMF.GetType() == dto.MetricType_HISTOGRAM { - p.currentLabels = map[string]string{} - p.currentLabels[string(model.MetricNameLabel)] = p.currentMF.GetName() - p.currentQuantile = math.NaN() - p.currentBucket = math.NaN() - } - if p.currentByte != '{' { - return p.readingValue - } - return p.startLabelName -} - -// startLabelName represents the state where the next byte read from p.buf is -// the start of a label name (or whitespace leading up to it). -func (p *Parser) startLabelName() stateFn { - if p.skipBlankTab(); p.err != nil { - return nil // Unexpected end of input. - } - if p.currentByte == '}' { - if p.skipBlankTab(); p.err != nil { - return nil // Unexpected end of input. - } - return p.readingValue - } - if p.readTokenAsLabelName(); p.err != nil { - return nil // Unexpected end of input. - } - if p.currentToken.Len() == 0 { - p.parseError(fmt.Sprintf("invalid label name for metric %q", p.currentMF.GetName())) - return nil - } - p.currentLabelPair = &dto.LabelPair{Name: proto.String(p.currentToken.String())} - if p.currentLabelPair.GetName() == string(model.MetricNameLabel) { - p.parseError(fmt.Sprintf("label name %q is reserved", model.MetricNameLabel)) - return nil - } - // Special summary/histogram treatment. Don't add 'quantile' and 'le' - // labels to 'real' labels. - if !(p.currentMF.GetType() == dto.MetricType_SUMMARY && p.currentLabelPair.GetName() == model.QuantileLabel) && - !(p.currentMF.GetType() == dto.MetricType_HISTOGRAM && p.currentLabelPair.GetName() == model.BucketLabel) { - p.currentMetric.Label = append(p.currentMetric.Label, p.currentLabelPair) - } - if p.skipBlankTabIfCurrentBlankTab(); p.err != nil { - return nil // Unexpected end of input. - } - if p.currentByte != '=' { - p.parseError(fmt.Sprintf("expected '=' after label name, found %q", p.currentByte)) - return nil - } - return p.startLabelValue -} - -// startLabelValue represents the state where the next byte read from p.buf is -// the start of a (quoted) label value (or whitespace leading up to it). -func (p *Parser) startLabelValue() stateFn { - if p.skipBlankTab(); p.err != nil { - return nil // Unexpected end of input. - } - if p.currentByte != '"' { - p.parseError(fmt.Sprintf("expected '\"' at start of label value, found %q", p.currentByte)) - return nil - } - if p.readTokenAsLabelValue(); p.err != nil { - return nil - } - p.currentLabelPair.Value = proto.String(p.currentToken.String()) - // Special treatment of summaries: - // - Quantile labels are special, will result in dto.Quantile later. - // - Other labels have to be added to currentLabels for signature calculation. - if p.currentMF.GetType() == dto.MetricType_SUMMARY { - if p.currentLabelPair.GetName() == model.QuantileLabel { - if p.currentQuantile, p.err = strconv.ParseFloat(p.currentLabelPair.GetValue(), 64); p.err != nil { - // Create a more helpful error message. - p.parseError(fmt.Sprintf("expected float as value for 'quantile' label, got %q", p.currentLabelPair.GetValue())) - return nil - } - } else { - p.currentLabels[p.currentLabelPair.GetName()] = p.currentLabelPair.GetValue() - } - } - // Similar special treatment of histograms. - if p.currentMF.GetType() == dto.MetricType_HISTOGRAM { - if p.currentLabelPair.GetName() == model.BucketLabel { - if p.currentBucket, p.err = strconv.ParseFloat(p.currentLabelPair.GetValue(), 64); p.err != nil { - // Create a more helpful error message. - p.parseError(fmt.Sprintf("expected float as value for 'le' label, got %q", p.currentLabelPair.GetValue())) - return nil - } - } else { - p.currentLabels[p.currentLabelPair.GetName()] = p.currentLabelPair.GetValue() - } - } - if p.skipBlankTab(); p.err != nil { - return nil // Unexpected end of input. - } - switch p.currentByte { - case ',': - return p.startLabelName - - case '}': - if p.skipBlankTab(); p.err != nil { - return nil // Unexpected end of input. - } - return p.readingValue - default: - p.parseError(fmt.Sprintf("unexpected end of label value %q", p.currentLabelPair.Value)) - return nil - } -} - -// readingValue represents the state where the last byte read (now in -// p.currentByte) is the first byte of the sample value (i.e. a float). -func (p *Parser) readingValue() stateFn { - // When we are here, we have read all the labels, so for the - // special case of a summary/histogram, we can finally find out - // if the metric already exists. - if p.currentMF.GetType() == dto.MetricType_SUMMARY { - signature := model.LabelsToSignature(p.currentLabels) - if summary := p.summaries[signature]; summary != nil { - p.currentMetric = summary - } else { - p.summaries[signature] = p.currentMetric - p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric) - } - } else if p.currentMF.GetType() == dto.MetricType_HISTOGRAM { - signature := model.LabelsToSignature(p.currentLabels) - if histogram := p.histograms[signature]; histogram != nil { - p.currentMetric = histogram - } else { - p.histograms[signature] = p.currentMetric - p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric) - } - } else { - p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric) - } - if p.readTokenUntilWhitespace(); p.err != nil { - return nil // Unexpected end of input. - } - value, err := strconv.ParseFloat(p.currentToken.String(), 64) - if err != nil { - // Create a more helpful error message. - p.parseError(fmt.Sprintf("expected float as value, got %q", p.currentToken.String())) - return nil - } - switch p.currentMF.GetType() { - case dto.MetricType_COUNTER: - p.currentMetric.Counter = &dto.Counter{Value: proto.Float64(value)} - case dto.MetricType_GAUGE: - p.currentMetric.Gauge = &dto.Gauge{Value: proto.Float64(value)} - case dto.MetricType_UNTYPED: - p.currentMetric.Untyped = &dto.Untyped{Value: proto.Float64(value)} - case dto.MetricType_SUMMARY: - // *sigh* - if p.currentMetric.Summary == nil { - p.currentMetric.Summary = &dto.Summary{} - } - switch { - case p.currentIsSummaryCount: - p.currentMetric.Summary.SampleCount = proto.Uint64(uint64(value)) - case p.currentIsSummarySum: - p.currentMetric.Summary.SampleSum = proto.Float64(value) - case !math.IsNaN(p.currentQuantile): - p.currentMetric.Summary.Quantile = append( - p.currentMetric.Summary.Quantile, - &dto.Quantile{ - Quantile: proto.Float64(p.currentQuantile), - Value: proto.Float64(value), - }, - ) - } - case dto.MetricType_HISTOGRAM: - // *sigh* - if p.currentMetric.Histogram == nil { - p.currentMetric.Histogram = &dto.Histogram{} - } - switch { - case p.currentIsHistogramCount: - p.currentMetric.Histogram.SampleCount = proto.Uint64(uint64(value)) - case p.currentIsHistogramSum: - p.currentMetric.Histogram.SampleSum = proto.Float64(value) - case !math.IsNaN(p.currentBucket): - p.currentMetric.Histogram.Bucket = append( - p.currentMetric.Histogram.Bucket, - &dto.Bucket{ - UpperBound: proto.Float64(p.currentBucket), - CumulativeCount: proto.Uint64(uint64(value)), - }, - ) - } - default: - p.err = fmt.Errorf("unexpected type for metric name %q", p.currentMF.GetName()) - } - if p.currentByte == '\n' { - return p.startOfLine - } - return p.startTimestamp -} - -// startTimestamp represents the state where the next byte read from p.buf is -// the start of the timestamp (or whitespace leading up to it). -func (p *Parser) startTimestamp() stateFn { - if p.skipBlankTab(); p.err != nil { - return nil // Unexpected end of input. - } - if p.readTokenUntilWhitespace(); p.err != nil { - return nil // Unexpected end of input. - } - timestamp, err := strconv.ParseInt(p.currentToken.String(), 10, 64) - if err != nil { - // Create a more helpful error message. - p.parseError(fmt.Sprintf("expected integer as timestamp, got %q", p.currentToken.String())) - return nil - } - p.currentMetric.TimestampMs = proto.Int64(timestamp) - if p.readTokenUntilNewline(false); p.err != nil { - return nil // Unexpected end of input. - } - if p.currentToken.Len() > 0 { - p.parseError(fmt.Sprintf("spurious string after timestamp: %q", p.currentToken.String())) - return nil - } - return p.startOfLine -} - -// readingHelp represents the state where the last byte read (now in -// p.currentByte) is the first byte of the docstring after 'HELP'. -func (p *Parser) readingHelp() stateFn { - if p.currentMF.Help != nil { - p.parseError(fmt.Sprintf("second HELP line for metric name %q", p.currentMF.GetName())) - return nil - } - // Rest of line is the docstring. - if p.readTokenUntilNewline(true); p.err != nil { - return nil // Unexpected end of input. - } - p.currentMF.Help = proto.String(p.currentToken.String()) - return p.startOfLine -} - -// readingType represents the state where the last byte read (now in -// p.currentByte) is the first byte of the type hint after 'HELP'. -func (p *Parser) readingType() stateFn { - if p.currentMF.Type != nil { - p.parseError(fmt.Sprintf("second TYPE line for metric name %q, or TYPE reported after samples", p.currentMF.GetName())) - return nil - } - // Rest of line is the type. - if p.readTokenUntilNewline(false); p.err != nil { - return nil // Unexpected end of input. - } - metricType, ok := dto.MetricType_value[strings.ToUpper(p.currentToken.String())] - if !ok { - p.parseError(fmt.Sprintf("unknown metric type %q", p.currentToken.String())) - return nil - } - p.currentMF.Type = dto.MetricType(metricType).Enum() - return p.startOfLine -} - -// parseError sets p.err to a ParseError at the current line with the given -// message. -func (p *Parser) parseError(msg string) { - p.err = ParseError{ - Line: p.lineCount, - Msg: msg, - } -} - -// skipBlankTab reads (and discards) bytes from p.buf until it encounters a byte -// that is neither ' ' nor '\t'. That byte is left in p.currentByte. -func (p *Parser) skipBlankTab() { - for { - if p.currentByte, p.err = p.buf.ReadByte(); p.err != nil || !isBlankOrTab(p.currentByte) { - return - } - } -} - -// skipBlankTabIfCurrentBlankTab works exactly as skipBlankTab but doesn't do -// anything if p.currentByte is neither ' ' nor '\t'. -func (p *Parser) skipBlankTabIfCurrentBlankTab() { - if isBlankOrTab(p.currentByte) { - p.skipBlankTab() - } -} - -// readTokenUntilWhitespace copies bytes from p.buf into p.currentToken. The -// first byte considered is the byte already read (now in p.currentByte). The -// first whitespace byte encountered is still copied into p.currentByte, but not -// into p.currentToken. -func (p *Parser) readTokenUntilWhitespace() { - p.currentToken.Reset() - for p.err == nil && !isBlankOrTab(p.currentByte) && p.currentByte != '\n' { - p.currentToken.WriteByte(p.currentByte) - p.currentByte, p.err = p.buf.ReadByte() - } -} - -// readTokenUntilNewline copies bytes from p.buf into p.currentToken. The first -// byte considered is the byte already read (now in p.currentByte). The first -// newline byte encountered is still copied into p.currentByte, but not into -// p.currentToken. If recognizeEscapeSequence is true, two escape sequences are -// recognized: '\\' tranlates into '\', and '\n' into a line-feed character. All -// other escape sequences are invalid and cause an error. -func (p *Parser) readTokenUntilNewline(recognizeEscapeSequence bool) { - p.currentToken.Reset() - escaped := false - for p.err == nil { - if recognizeEscapeSequence && escaped { - switch p.currentByte { - case '\\': - p.currentToken.WriteByte(p.currentByte) - case 'n': - p.currentToken.WriteByte('\n') - default: - p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte)) - return - } - escaped = false - } else { - switch p.currentByte { - case '\n': - return - case '\\': - escaped = true - default: - p.currentToken.WriteByte(p.currentByte) - } - } - p.currentByte, p.err = p.buf.ReadByte() - } -} - -// readTokenAsMetricName copies a metric name from p.buf into p.currentToken. -// The first byte considered is the byte already read (now in p.currentByte). -// The first byte not part of a metric name is still copied into p.currentByte, -// but not into p.currentToken. -func (p *Parser) readTokenAsMetricName() { - p.currentToken.Reset() - if !isValidMetricNameStart(p.currentByte) { - return - } - for { - p.currentToken.WriteByte(p.currentByte) - p.currentByte, p.err = p.buf.ReadByte() - if p.err != nil || !isValidMetricNameContinuation(p.currentByte) { - return - } - } -} - -// readTokenAsLabelName copies a label name from p.buf into p.currentToken. -// The first byte considered is the byte already read (now in p.currentByte). -// The first byte not part of a label name is still copied into p.currentByte, -// but not into p.currentToken. -func (p *Parser) readTokenAsLabelName() { - p.currentToken.Reset() - if !isValidLabelNameStart(p.currentByte) { - return - } - for { - p.currentToken.WriteByte(p.currentByte) - p.currentByte, p.err = p.buf.ReadByte() - if p.err != nil || !isValidLabelNameContinuation(p.currentByte) { - return - } - } -} - -// readTokenAsLabelValue copies a label value from p.buf into p.currentToken. -// In contrast to the other 'readTokenAs...' functions, which start with the -// last read byte in p.currentByte, this method ignores p.currentByte and starts -// with reading a new byte from p.buf. The first byte not part of a label value -// is still copied into p.currentByte, but not into p.currentToken. -func (p *Parser) readTokenAsLabelValue() { - p.currentToken.Reset() - escaped := false - for { - if p.currentByte, p.err = p.buf.ReadByte(); p.err != nil { - return - } - if escaped { - switch p.currentByte { - case '"', '\\': - p.currentToken.WriteByte(p.currentByte) - case 'n': - p.currentToken.WriteByte('\n') - default: - p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte)) - return - } - escaped = false - continue - } - switch p.currentByte { - case '"': - return - case '\n': - p.parseError(fmt.Sprintf("label value %q contains unescaped new-line", p.currentToken.String())) - return - case '\\': - escaped = true - default: - p.currentToken.WriteByte(p.currentByte) - } - } -} - -func (p *Parser) setOrCreateCurrentMF() { - p.currentIsSummaryCount = false - p.currentIsSummarySum = false - p.currentIsHistogramCount = false - p.currentIsHistogramSum = false - name := p.currentToken.String() - if p.currentMF = p.metricFamiliesByName[name]; p.currentMF != nil { - return - } - // Try out if this is a _sum or _count for a summary/histogram. - summaryName := summaryMetricName(name) - if p.currentMF = p.metricFamiliesByName[summaryName]; p.currentMF != nil { - if p.currentMF.GetType() == dto.MetricType_SUMMARY { - if isCount(name) { - p.currentIsSummaryCount = true - } - if isSum(name) { - p.currentIsSummarySum = true - } - return - } - } - histogramName := histogramMetricName(name) - if p.currentMF = p.metricFamiliesByName[histogramName]; p.currentMF != nil { - if p.currentMF.GetType() == dto.MetricType_HISTOGRAM { - if isCount(name) { - p.currentIsHistogramCount = true - } - if isSum(name) { - p.currentIsHistogramSum = true - } - return - } - } - p.currentMF = &dto.MetricFamily{Name: proto.String(name)} - p.metricFamiliesByName[name] = p.currentMF -} - -func isValidLabelNameStart(b byte) bool { - return (b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' -} - -func isValidLabelNameContinuation(b byte) bool { - return isValidLabelNameStart(b) || (b >= '0' && b <= '9') -} - -func isValidMetricNameStart(b byte) bool { - return isValidLabelNameStart(b) || b == ':' -} - -func isValidMetricNameContinuation(b byte) bool { - return isValidLabelNameContinuation(b) || b == ':' -} - -func isBlankOrTab(b byte) bool { - return b == ' ' || b == '\t' -} - -func isCount(name string) bool { - return len(name) > 6 && name[len(name)-6:] == "_count" -} - -func isSum(name string) bool { - return len(name) > 4 && name[len(name)-4:] == "_sum" -} - -func isBucket(name string) bool { - return len(name) > 7 && name[len(name)-7:] == "_bucket" -} - -func summaryMetricName(name string) string { - switch { - case isCount(name): - return name[:len(name)-6] - case isSum(name): - return name[:len(name)-4] - default: - return name - } -} - -func histogramMetricName(name string) string { - switch { - case isCount(name): - return name[:len(name)-6] - case isSum(name): - return name[:len(name)-4] - case isBucket(name): - return name[:len(name)-7] - default: - return name - } -} diff --git a/Godeps/_workspace/src/github.com/prometheus/client_golang/text/parse_test.go b/Godeps/_workspace/src/github.com/prometheus/client_golang/text/parse_test.go deleted file mode 100644 index cc3e6470f0..0000000000 --- a/Godeps/_workspace/src/github.com/prometheus/client_golang/text/parse_test.go +++ /dev/null @@ -1,588 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package text - -import ( - "math" - "strings" - "testing" - - "github.com/golang/protobuf/proto" - dto "github.com/prometheus/client_model/go" -) - -var parser Parser - -func testParse(t testing.TB) { - var scenarios = []struct { - in string - out []*dto.MetricFamily - }{ - // 0: Empty lines as input. - { - in: ` - -`, - out: []*dto.MetricFamily{}, - }, - // 1: Minimal case. - { - in: ` -minimal_metric 1.234 -another_metric -3e3 103948 -# Even that: -no_labels{} 3 -# HELP line for non-existing metric will be ignored. -`, - out: []*dto.MetricFamily{ - &dto.MetricFamily{ - Name: proto.String("minimal_metric"), - Type: dto.MetricType_UNTYPED.Enum(), - Metric: []*dto.Metric{ - &dto.Metric{ - Untyped: &dto.Untyped{ - Value: proto.Float64(1.234), - }, - }, - }, - }, - &dto.MetricFamily{ - Name: proto.String("another_metric"), - Type: dto.MetricType_UNTYPED.Enum(), - Metric: []*dto.Metric{ - &dto.Metric{ - Untyped: &dto.Untyped{ - Value: proto.Float64(-3e3), - }, - TimestampMs: proto.Int64(103948), - }, - }, - }, - &dto.MetricFamily{ - Name: proto.String("no_labels"), - Type: dto.MetricType_UNTYPED.Enum(), - Metric: []*dto.Metric{ - &dto.Metric{ - Untyped: &dto.Untyped{ - Value: proto.Float64(3), - }, - }, - }, - }, - }, - }, - // 2: Counters & gauges, docstrings, various whitespace, escape sequences. - { - in: ` -# A normal comment. -# -# TYPE name counter -name{labelname="val1",basename="basevalue"} NaN -name {labelname="val2",basename="base\"v\\al\nue"} 0.23 1234567890 -# HELP name two-line\n doc str\\ing - - # HELP name2 doc str"ing 2 - # TYPE name2 gauge -name2{labelname="val2" ,basename = "basevalue2" } +Inf 54321 -name2{ labelname = "val1" , }-Inf -`, - out: []*dto.MetricFamily{ - &dto.MetricFamily{ - Name: proto.String("name"), - Help: proto.String("two-line\n doc str\\ing"), - Type: dto.MetricType_COUNTER.Enum(), - Metric: []*dto.Metric{ - &dto.Metric{ - Label: []*dto.LabelPair{ - &dto.LabelPair{ - Name: proto.String("labelname"), - Value: proto.String("val1"), - }, - &dto.LabelPair{ - Name: proto.String("basename"), - Value: proto.String("basevalue"), - }, - }, - Counter: &dto.Counter{ - Value: proto.Float64(math.NaN()), - }, - }, - &dto.Metric{ - Label: []*dto.LabelPair{ - &dto.LabelPair{ - Name: proto.String("labelname"), - Value: proto.String("val2"), - }, - &dto.LabelPair{ - Name: proto.String("basename"), - Value: proto.String("base\"v\\al\nue"), - }, - }, - Counter: &dto.Counter{ - Value: proto.Float64(.23), - }, - TimestampMs: proto.Int64(1234567890), - }, - }, - }, - &dto.MetricFamily{ - Name: proto.String("name2"), - Help: proto.String("doc str\"ing 2"), - Type: dto.MetricType_GAUGE.Enum(), - Metric: []*dto.Metric{ - &dto.Metric{ - Label: []*dto.LabelPair{ - &dto.LabelPair{ - Name: proto.String("labelname"), - Value: proto.String("val2"), - }, - &dto.LabelPair{ - Name: proto.String("basename"), - Value: proto.String("basevalue2"), - }, - }, - Gauge: &dto.Gauge{ - Value: proto.Float64(math.Inf(+1)), - }, - TimestampMs: proto.Int64(54321), - }, - &dto.Metric{ - Label: []*dto.LabelPair{ - &dto.LabelPair{ - Name: proto.String("labelname"), - Value: proto.String("val1"), - }, - }, - Gauge: &dto.Gauge{ - Value: proto.Float64(math.Inf(-1)), - }, - }, - }, - }, - }, - }, - // 3: The evil summary, mixed with other types and funny comments. - { - in: ` -# TYPE my_summary summary -my_summary{n1="val1",quantile="0.5"} 110 -decoy -1 -2 -my_summary{n1="val1",quantile="0.9"} 140 1 -my_summary_count{n1="val1"} 42 -# Latest timestamp wins in case of a summary. -my_summary_sum{n1="val1"} 4711 2 -fake_sum{n1="val1"} 2001 -# TYPE another_summary summary -another_summary_count{n2="val2",n1="val1"} 20 -my_summary_count{n2="val2",n1="val1"} 5 5 -another_summary{n1="val1",n2="val2",quantile=".3"} -1.2 -my_summary_sum{n1="val2"} 08 15 -my_summary{n1="val3", quantile="0.2"} 4711 - my_summary{n1="val1",n2="val2",quantile="-12.34",} NaN -# some -# funny comments -# HELP -# HELP -# HELP my_summary -# HELP my_summary -`, - out: []*dto.MetricFamily{ - &dto.MetricFamily{ - Name: proto.String("fake_sum"), - Type: dto.MetricType_UNTYPED.Enum(), - Metric: []*dto.Metric{ - &dto.Metric{ - Label: []*dto.LabelPair{ - &dto.LabelPair{ - Name: proto.String("n1"), - Value: proto.String("val1"), - }, - }, - Untyped: &dto.Untyped{ - Value: proto.Float64(2001), - }, - }, - }, - }, - &dto.MetricFamily{ - Name: proto.String("decoy"), - Type: dto.MetricType_UNTYPED.Enum(), - Metric: []*dto.Metric{ - &dto.Metric{ - Untyped: &dto.Untyped{ - Value: proto.Float64(-1), - }, - TimestampMs: proto.Int64(-2), - }, - }, - }, - &dto.MetricFamily{ - Name: proto.String("my_summary"), - Type: dto.MetricType_SUMMARY.Enum(), - Metric: []*dto.Metric{ - &dto.Metric{ - Label: []*dto.LabelPair{ - &dto.LabelPair{ - Name: proto.String("n1"), - Value: proto.String("val1"), - }, - }, - Summary: &dto.Summary{ - SampleCount: proto.Uint64(42), - SampleSum: proto.Float64(4711), - Quantile: []*dto.Quantile{ - &dto.Quantile{ - Quantile: proto.Float64(0.5), - Value: proto.Float64(110), - }, - &dto.Quantile{ - Quantile: proto.Float64(0.9), - Value: proto.Float64(140), - }, - }, - }, - TimestampMs: proto.Int64(2), - }, - &dto.Metric{ - Label: []*dto.LabelPair{ - &dto.LabelPair{ - Name: proto.String("n2"), - Value: proto.String("val2"), - }, - &dto.LabelPair{ - Name: proto.String("n1"), - Value: proto.String("val1"), - }, - }, - Summary: &dto.Summary{ - SampleCount: proto.Uint64(5), - Quantile: []*dto.Quantile{ - &dto.Quantile{ - Quantile: proto.Float64(-12.34), - Value: proto.Float64(math.NaN()), - }, - }, - }, - TimestampMs: proto.Int64(5), - }, - &dto.Metric{ - Label: []*dto.LabelPair{ - &dto.LabelPair{ - Name: proto.String("n1"), - Value: proto.String("val2"), - }, - }, - Summary: &dto.Summary{ - SampleSum: proto.Float64(8), - }, - TimestampMs: proto.Int64(15), - }, - &dto.Metric{ - Label: []*dto.LabelPair{ - &dto.LabelPair{ - Name: proto.String("n1"), - Value: proto.String("val3"), - }, - }, - Summary: &dto.Summary{ - Quantile: []*dto.Quantile{ - &dto.Quantile{ - Quantile: proto.Float64(0.2), - Value: proto.Float64(4711), - }, - }, - }, - }, - }, - }, - &dto.MetricFamily{ - Name: proto.String("another_summary"), - Type: dto.MetricType_SUMMARY.Enum(), - Metric: []*dto.Metric{ - &dto.Metric{ - Label: []*dto.LabelPair{ - &dto.LabelPair{ - Name: proto.String("n2"), - Value: proto.String("val2"), - }, - &dto.LabelPair{ - Name: proto.String("n1"), - Value: proto.String("val1"), - }, - }, - Summary: &dto.Summary{ - SampleCount: proto.Uint64(20), - Quantile: []*dto.Quantile{ - &dto.Quantile{ - Quantile: proto.Float64(0.3), - Value: proto.Float64(-1.2), - }, - }, - }, - }, - }, - }, - }, - }, - // 4: The histogram. - { - in: ` -# HELP request_duration_microseconds The response latency. -# TYPE request_duration_microseconds histogram -request_duration_microseconds_bucket{le="100"} 123 -request_duration_microseconds_bucket{le="120"} 412 -request_duration_microseconds_bucket{le="144"} 592 -request_duration_microseconds_bucket{le="172.8"} 1524 -request_duration_microseconds_bucket{le="+Inf"} 2693 -request_duration_microseconds_sum 1.7560473e+06 -request_duration_microseconds_count 2693 -`, - out: []*dto.MetricFamily{ - { - Name: proto.String("request_duration_microseconds"), - Help: proto.String("The response latency."), - Type: dto.MetricType_HISTOGRAM.Enum(), - Metric: []*dto.Metric{ - &dto.Metric{ - Histogram: &dto.Histogram{ - SampleCount: proto.Uint64(2693), - SampleSum: proto.Float64(1756047.3), - Bucket: []*dto.Bucket{ - &dto.Bucket{ - UpperBound: proto.Float64(100), - CumulativeCount: proto.Uint64(123), - }, - &dto.Bucket{ - UpperBound: proto.Float64(120), - CumulativeCount: proto.Uint64(412), - }, - &dto.Bucket{ - UpperBound: proto.Float64(144), - CumulativeCount: proto.Uint64(592), - }, - &dto.Bucket{ - UpperBound: proto.Float64(172.8), - CumulativeCount: proto.Uint64(1524), - }, - &dto.Bucket{ - UpperBound: proto.Float64(math.Inf(+1)), - CumulativeCount: proto.Uint64(2693), - }, - }, - }, - }, - }, - }, - }, - }, - } - - for i, scenario := range scenarios { - out, err := parser.TextToMetricFamilies(strings.NewReader(scenario.in)) - if err != nil { - t.Errorf("%d. error: %s", i, err) - continue - } - if expected, got := len(scenario.out), len(out); expected != got { - t.Errorf( - "%d. expected %d MetricFamilies, got %d", - i, expected, got, - ) - } - for _, expected := range scenario.out { - got, ok := out[expected.GetName()] - if !ok { - t.Errorf( - "%d. expected MetricFamily %q, found none", - i, expected.GetName(), - ) - continue - } - if expected.String() != got.String() { - t.Errorf( - "%d. expected MetricFamily %s, got %s", - i, expected, got, - ) - } - } - } -} - -func TestParse(t *testing.T) { - testParse(t) -} - -func BenchmarkParse(b *testing.B) { - for i := 0; i < b.N; i++ { - testParse(b) - } -} - -func testParseError(t testing.TB) { - var scenarios = []struct { - in string - err string - }{ - // 0: No new-line at end of input. - { - in: `bla 3.14`, - err: "EOF", - }, - // 1: Invalid escape sequence in label value. - { - in: `metric{label="\t"} 3.14`, - err: "text format parsing error in line 1: invalid escape sequence", - }, - // 2: Newline in label value. - { - in: ` -metric{label="new -line"} 3.14 -`, - err: `text format parsing error in line 2: label value "new" contains unescaped new-line`, - }, - // 3: - { - in: `metric{@="bla"} 3.14`, - err: "text format parsing error in line 1: invalid label name for metric", - }, - // 4: - { - in: `metric{__name__="bla"} 3.14`, - err: `text format parsing error in line 1: label name "__name__" is reserved`, - }, - // 5: - { - in: `metric{label+="bla"} 3.14`, - err: "text format parsing error in line 1: expected '=' after label name", - }, - // 6: - { - in: `metric{label=bla} 3.14`, - err: "text format parsing error in line 1: expected '\"' at start of label value", - }, - // 7: - { - in: ` -# TYPE metric summary -metric{quantile="bla"} 3.14 -`, - err: "text format parsing error in line 3: expected float as value for 'quantile' label", - }, - // 8: - { - in: `metric{label="bla"+} 3.14`, - err: "text format parsing error in line 1: unexpected end of label value", - }, - // 9: - { - in: `metric{label="bla"} 3.14 2.72 -`, - err: "text format parsing error in line 1: expected integer as timestamp", - }, - // 10: - { - in: `metric{label="bla"} 3.14 2 3 -`, - err: "text format parsing error in line 1: spurious string after timestamp", - }, - // 11: - { - in: `metric{label="bla"} blubb -`, - err: "text format parsing error in line 1: expected float as value", - }, - // 12: - { - in: ` -# HELP metric one -# HELP metric two -`, - err: "text format parsing error in line 3: second HELP line for metric name", - }, - // 13: - { - in: ` -# TYPE metric counter -# TYPE metric untyped -`, - err: `text format parsing error in line 3: second TYPE line for metric name "metric", or TYPE reported after samples`, - }, - // 14: - { - in: ` -metric 4.12 -# TYPE metric counter -`, - err: `text format parsing error in line 3: second TYPE line for metric name "metric", or TYPE reported after samples`, - }, - // 14: - { - in: ` -# TYPE metric bla -`, - err: "text format parsing error in line 2: unknown metric type", - }, - // 15: - { - in: ` -# TYPE met-ric -`, - err: "text format parsing error in line 2: invalid metric name in comment", - }, - // 16: - { - in: `@invalidmetric{label="bla"} 3.14 2`, - err: "text format parsing error in line 1: invalid metric name", - }, - // 17: - { - in: `{label="bla"} 3.14 2`, - err: "text format parsing error in line 1: invalid metric name", - }, - // 18: - { - in: ` -# TYPE metric histogram -metric_bucket{le="bla"} 3.14 -`, - err: "text format parsing error in line 3: expected float as value for 'le' label", - }, - } - - for i, scenario := range scenarios { - _, err := parser.TextToMetricFamilies(strings.NewReader(scenario.in)) - if err == nil { - t.Errorf("%d. expected error, got nil", i) - continue - } - if expected, got := scenario.err, err.Error(); strings.Index(got, expected) != 0 { - t.Errorf( - "%d. expected error starting with %q, got %q", - i, expected, got, - ) - } - } - -} - -func TestParseError(t *testing.T) { - testParseError(t) -} - -func BenchmarkParseError(b *testing.B) { - for i := 0; i < b.N; i++ { - testParseError(b) - } -} diff --git a/Godeps/_workspace/src/github.com/prometheus/client_golang/text/proto.go b/Godeps/_workspace/src/github.com/prometheus/client_golang/text/proto.go deleted file mode 100644 index e82bbb3b40..0000000000 --- a/Godeps/_workspace/src/github.com/prometheus/client_golang/text/proto.go +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package text - -import ( - "fmt" - "io" - - "github.com/golang/protobuf/proto" - "github.com/matttproud/golang_protobuf_extensions/pbutil" - - dto "github.com/prometheus/client_model/go" -) - -// WriteProtoDelimited writes the MetricFamily to the writer in delimited -// protobuf format and returns the number of bytes written and any error -// encountered. -func WriteProtoDelimited(w io.Writer, p *dto.MetricFamily) (int, error) { - return pbutil.WriteDelimited(w, p) -} - -// WriteProtoText writes the MetricFamily to the writer in text format and -// returns the number of bytes written and any error encountered. -func WriteProtoText(w io.Writer, p *dto.MetricFamily) (int, error) { - return fmt.Fprintf(w, "%s\n", proto.MarshalTextString(p)) -} - -// WriteProtoCompactText writes the MetricFamily to the writer in compact text -// format and returns the number of bytes written and any error encountered. -func WriteProtoCompactText(w io.Writer, p *dto.MetricFamily) (int, error) { - return fmt.Fprintf(w, "%s\n", p) -} diff --git a/Godeps/_workspace/src/github.com/prometheus/client_golang/text/testdata/protobuf b/Godeps/_workspace/src/github.com/prometheus/client_golang/text/testdata/protobuf deleted file mode 100644 index df48256390..0000000000 Binary files a/Godeps/_workspace/src/github.com/prometheus/client_golang/text/testdata/protobuf and /dev/null differ diff --git a/Godeps/_workspace/src/github.com/prometheus/client_golang/text/testdata/protobuf.gz b/Godeps/_workspace/src/github.com/prometheus/client_golang/text/testdata/protobuf.gz deleted file mode 100644 index 2c8704d69e..0000000000 Binary files a/Godeps/_workspace/src/github.com/prometheus/client_golang/text/testdata/protobuf.gz and /dev/null differ diff --git a/Godeps/_workspace/src/github.com/prometheus/client_golang/text/testdata/text b/Godeps/_workspace/src/github.com/prometheus/client_golang/text/testdata/text deleted file mode 100644 index f3d8c37844..0000000000 --- a/Godeps/_workspace/src/github.com/prometheus/client_golang/text/testdata/text +++ /dev/null @@ -1,322 +0,0 @@ -# HELP http_request_duration_microseconds The HTTP request latencies in microseconds. -# TYPE http_request_duration_microseconds summary -http_request_duration_microseconds{handler="/",quantile="0.5"} 0 -http_request_duration_microseconds{handler="/",quantile="0.9"} 0 -http_request_duration_microseconds{handler="/",quantile="0.99"} 0 -http_request_duration_microseconds_sum{handler="/"} 0 -http_request_duration_microseconds_count{handler="/"} 0 -http_request_duration_microseconds{handler="/alerts",quantile="0.5"} 0 -http_request_duration_microseconds{handler="/alerts",quantile="0.9"} 0 -http_request_duration_microseconds{handler="/alerts",quantile="0.99"} 0 -http_request_duration_microseconds_sum{handler="/alerts"} 0 -http_request_duration_microseconds_count{handler="/alerts"} 0 -http_request_duration_microseconds{handler="/api/metrics",quantile="0.5"} 0 -http_request_duration_microseconds{handler="/api/metrics",quantile="0.9"} 0 -http_request_duration_microseconds{handler="/api/metrics",quantile="0.99"} 0 -http_request_duration_microseconds_sum{handler="/api/metrics"} 0 -http_request_duration_microseconds_count{handler="/api/metrics"} 0 -http_request_duration_microseconds{handler="/api/query",quantile="0.5"} 0 -http_request_duration_microseconds{handler="/api/query",quantile="0.9"} 0 -http_request_duration_microseconds{handler="/api/query",quantile="0.99"} 0 -http_request_duration_microseconds_sum{handler="/api/query"} 0 -http_request_duration_microseconds_count{handler="/api/query"} 0 -http_request_duration_microseconds{handler="/api/query_range",quantile="0.5"} 0 -http_request_duration_microseconds{handler="/api/query_range",quantile="0.9"} 0 -http_request_duration_microseconds{handler="/api/query_range",quantile="0.99"} 0 -http_request_duration_microseconds_sum{handler="/api/query_range"} 0 -http_request_duration_microseconds_count{handler="/api/query_range"} 0 -http_request_duration_microseconds{handler="/api/targets",quantile="0.5"} 0 -http_request_duration_microseconds{handler="/api/targets",quantile="0.9"} 0 -http_request_duration_microseconds{handler="/api/targets",quantile="0.99"} 0 -http_request_duration_microseconds_sum{handler="/api/targets"} 0 -http_request_duration_microseconds_count{handler="/api/targets"} 0 -http_request_duration_microseconds{handler="/consoles/",quantile="0.5"} 0 -http_request_duration_microseconds{handler="/consoles/",quantile="0.9"} 0 -http_request_duration_microseconds{handler="/consoles/",quantile="0.99"} 0 -http_request_duration_microseconds_sum{handler="/consoles/"} 0 -http_request_duration_microseconds_count{handler="/consoles/"} 0 -http_request_duration_microseconds{handler="/graph",quantile="0.5"} 0 -http_request_duration_microseconds{handler="/graph",quantile="0.9"} 0 -http_request_duration_microseconds{handler="/graph",quantile="0.99"} 0 -http_request_duration_microseconds_sum{handler="/graph"} 0 -http_request_duration_microseconds_count{handler="/graph"} 0 -http_request_duration_microseconds{handler="/heap",quantile="0.5"} 0 -http_request_duration_microseconds{handler="/heap",quantile="0.9"} 0 -http_request_duration_microseconds{handler="/heap",quantile="0.99"} 0 -http_request_duration_microseconds_sum{handler="/heap"} 0 -http_request_duration_microseconds_count{handler="/heap"} 0 -http_request_duration_microseconds{handler="/static/",quantile="0.5"} 0 -http_request_duration_microseconds{handler="/static/",quantile="0.9"} 0 -http_request_duration_microseconds{handler="/static/",quantile="0.99"} 0 -http_request_duration_microseconds_sum{handler="/static/"} 0 -http_request_duration_microseconds_count{handler="/static/"} 0 -http_request_duration_microseconds{handler="prometheus",quantile="0.5"} 1307.275 -http_request_duration_microseconds{handler="prometheus",quantile="0.9"} 1858.632 -http_request_duration_microseconds{handler="prometheus",quantile="0.99"} 3087.384 -http_request_duration_microseconds_sum{handler="prometheus"} 179886.5000000001 -http_request_duration_microseconds_count{handler="prometheus"} 119 -# HELP http_request_size_bytes The HTTP request sizes in bytes. -# TYPE http_request_size_bytes summary -http_request_size_bytes{handler="/",quantile="0.5"} 0 -http_request_size_bytes{handler="/",quantile="0.9"} 0 -http_request_size_bytes{handler="/",quantile="0.99"} 0 -http_request_size_bytes_sum{handler="/"} 0 -http_request_size_bytes_count{handler="/"} 0 -http_request_size_bytes{handler="/alerts",quantile="0.5"} 0 -http_request_size_bytes{handler="/alerts",quantile="0.9"} 0 -http_request_size_bytes{handler="/alerts",quantile="0.99"} 0 -http_request_size_bytes_sum{handler="/alerts"} 0 -http_request_size_bytes_count{handler="/alerts"} 0 -http_request_size_bytes{handler="/api/metrics",quantile="0.5"} 0 -http_request_size_bytes{handler="/api/metrics",quantile="0.9"} 0 -http_request_size_bytes{handler="/api/metrics",quantile="0.99"} 0 -http_request_size_bytes_sum{handler="/api/metrics"} 0 -http_request_size_bytes_count{handler="/api/metrics"} 0 -http_request_size_bytes{handler="/api/query",quantile="0.5"} 0 -http_request_size_bytes{handler="/api/query",quantile="0.9"} 0 -http_request_size_bytes{handler="/api/query",quantile="0.99"} 0 -http_request_size_bytes_sum{handler="/api/query"} 0 -http_request_size_bytes_count{handler="/api/query"} 0 -http_request_size_bytes{handler="/api/query_range",quantile="0.5"} 0 -http_request_size_bytes{handler="/api/query_range",quantile="0.9"} 0 -http_request_size_bytes{handler="/api/query_range",quantile="0.99"} 0 -http_request_size_bytes_sum{handler="/api/query_range"} 0 -http_request_size_bytes_count{handler="/api/query_range"} 0 -http_request_size_bytes{handler="/api/targets",quantile="0.5"} 0 -http_request_size_bytes{handler="/api/targets",quantile="0.9"} 0 -http_request_size_bytes{handler="/api/targets",quantile="0.99"} 0 -http_request_size_bytes_sum{handler="/api/targets"} 0 -http_request_size_bytes_count{handler="/api/targets"} 0 -http_request_size_bytes{handler="/consoles/",quantile="0.5"} 0 -http_request_size_bytes{handler="/consoles/",quantile="0.9"} 0 -http_request_size_bytes{handler="/consoles/",quantile="0.99"} 0 -http_request_size_bytes_sum{handler="/consoles/"} 0 -http_request_size_bytes_count{handler="/consoles/"} 0 -http_request_size_bytes{handler="/graph",quantile="0.5"} 0 -http_request_size_bytes{handler="/graph",quantile="0.9"} 0 -http_request_size_bytes{handler="/graph",quantile="0.99"} 0 -http_request_size_bytes_sum{handler="/graph"} 0 -http_request_size_bytes_count{handler="/graph"} 0 -http_request_size_bytes{handler="/heap",quantile="0.5"} 0 -http_request_size_bytes{handler="/heap",quantile="0.9"} 0 -http_request_size_bytes{handler="/heap",quantile="0.99"} 0 -http_request_size_bytes_sum{handler="/heap"} 0 -http_request_size_bytes_count{handler="/heap"} 0 -http_request_size_bytes{handler="/static/",quantile="0.5"} 0 -http_request_size_bytes{handler="/static/",quantile="0.9"} 0 -http_request_size_bytes{handler="/static/",quantile="0.99"} 0 -http_request_size_bytes_sum{handler="/static/"} 0 -http_request_size_bytes_count{handler="/static/"} 0 -http_request_size_bytes{handler="prometheus",quantile="0.5"} 291 -http_request_size_bytes{handler="prometheus",quantile="0.9"} 291 -http_request_size_bytes{handler="prometheus",quantile="0.99"} 291 -http_request_size_bytes_sum{handler="prometheus"} 34488 -http_request_size_bytes_count{handler="prometheus"} 119 -# HELP http_requests_total Total number of HTTP requests made. -# TYPE http_requests_total counter -http_requests_total{code="200",handler="prometheus",method="get"} 119 -# HELP http_response_size_bytes The HTTP response sizes in bytes. -# TYPE http_response_size_bytes summary -http_response_size_bytes{handler="/",quantile="0.5"} 0 -http_response_size_bytes{handler="/",quantile="0.9"} 0 -http_response_size_bytes{handler="/",quantile="0.99"} 0 -http_response_size_bytes_sum{handler="/"} 0 -http_response_size_bytes_count{handler="/"} 0 -http_response_size_bytes{handler="/alerts",quantile="0.5"} 0 -http_response_size_bytes{handler="/alerts",quantile="0.9"} 0 -http_response_size_bytes{handler="/alerts",quantile="0.99"} 0 -http_response_size_bytes_sum{handler="/alerts"} 0 -http_response_size_bytes_count{handler="/alerts"} 0 -http_response_size_bytes{handler="/api/metrics",quantile="0.5"} 0 -http_response_size_bytes{handler="/api/metrics",quantile="0.9"} 0 -http_response_size_bytes{handler="/api/metrics",quantile="0.99"} 0 -http_response_size_bytes_sum{handler="/api/metrics"} 0 -http_response_size_bytes_count{handler="/api/metrics"} 0 -http_response_size_bytes{handler="/api/query",quantile="0.5"} 0 -http_response_size_bytes{handler="/api/query",quantile="0.9"} 0 -http_response_size_bytes{handler="/api/query",quantile="0.99"} 0 -http_response_size_bytes_sum{handler="/api/query"} 0 -http_response_size_bytes_count{handler="/api/query"} 0 -http_response_size_bytes{handler="/api/query_range",quantile="0.5"} 0 -http_response_size_bytes{handler="/api/query_range",quantile="0.9"} 0 -http_response_size_bytes{handler="/api/query_range",quantile="0.99"} 0 -http_response_size_bytes_sum{handler="/api/query_range"} 0 -http_response_size_bytes_count{handler="/api/query_range"} 0 -http_response_size_bytes{handler="/api/targets",quantile="0.5"} 0 -http_response_size_bytes{handler="/api/targets",quantile="0.9"} 0 -http_response_size_bytes{handler="/api/targets",quantile="0.99"} 0 -http_response_size_bytes_sum{handler="/api/targets"} 0 -http_response_size_bytes_count{handler="/api/targets"} 0 -http_response_size_bytes{handler="/consoles/",quantile="0.5"} 0 -http_response_size_bytes{handler="/consoles/",quantile="0.9"} 0 -http_response_size_bytes{handler="/consoles/",quantile="0.99"} 0 -http_response_size_bytes_sum{handler="/consoles/"} 0 -http_response_size_bytes_count{handler="/consoles/"} 0 -http_response_size_bytes{handler="/graph",quantile="0.5"} 0 -http_response_size_bytes{handler="/graph",quantile="0.9"} 0 -http_response_size_bytes{handler="/graph",quantile="0.99"} 0 -http_response_size_bytes_sum{handler="/graph"} 0 -http_response_size_bytes_count{handler="/graph"} 0 -http_response_size_bytes{handler="/heap",quantile="0.5"} 0 -http_response_size_bytes{handler="/heap",quantile="0.9"} 0 -http_response_size_bytes{handler="/heap",quantile="0.99"} 0 -http_response_size_bytes_sum{handler="/heap"} 0 -http_response_size_bytes_count{handler="/heap"} 0 -http_response_size_bytes{handler="/static/",quantile="0.5"} 0 -http_response_size_bytes{handler="/static/",quantile="0.9"} 0 -http_response_size_bytes{handler="/static/",quantile="0.99"} 0 -http_response_size_bytes_sum{handler="/static/"} 0 -http_response_size_bytes_count{handler="/static/"} 0 -http_response_size_bytes{handler="prometheus",quantile="0.5"} 2049 -http_response_size_bytes{handler="prometheus",quantile="0.9"} 2058 -http_response_size_bytes{handler="prometheus",quantile="0.99"} 2064 -http_response_size_bytes_sum{handler="prometheus"} 247001 -http_response_size_bytes_count{handler="prometheus"} 119 -# HELP process_cpu_seconds_total Total user and system CPU time spent in seconds. -# TYPE process_cpu_seconds_total counter -process_cpu_seconds_total 0.55 -# HELP go_goroutines Number of goroutines that currently exist. -# TYPE go_goroutines gauge -go_goroutines 70 -# HELP process_max_fds Maximum number of open file descriptors. -# TYPE process_max_fds gauge -process_max_fds 8192 -# HELP process_open_fds Number of open file descriptors. -# TYPE process_open_fds gauge -process_open_fds 29 -# HELP process_resident_memory_bytes Resident memory size in bytes. -# TYPE process_resident_memory_bytes gauge -process_resident_memory_bytes 5.3870592e+07 -# HELP process_start_time_seconds Start time of the process since unix epoch in seconds. -# TYPE process_start_time_seconds gauge -process_start_time_seconds 1.42236894836e+09 -# HELP process_virtual_memory_bytes Virtual memory size in bytes. -# TYPE process_virtual_memory_bytes gauge -process_virtual_memory_bytes 5.41478912e+08 -# HELP prometheus_dns_sd_lookup_failures_total The number of DNS-SD lookup failures. -# TYPE prometheus_dns_sd_lookup_failures_total counter -prometheus_dns_sd_lookup_failures_total 0 -# HELP prometheus_dns_sd_lookups_total The number of DNS-SD lookups. -# TYPE prometheus_dns_sd_lookups_total counter -prometheus_dns_sd_lookups_total 7 -# HELP prometheus_evaluator_duration_milliseconds The duration for all evaluations to execute. -# TYPE prometheus_evaluator_duration_milliseconds summary -prometheus_evaluator_duration_milliseconds{quantile="0.01"} 0 -prometheus_evaluator_duration_milliseconds{quantile="0.05"} 0 -prometheus_evaluator_duration_milliseconds{quantile="0.5"} 0 -prometheus_evaluator_duration_milliseconds{quantile="0.9"} 1 -prometheus_evaluator_duration_milliseconds{quantile="0.99"} 1 -prometheus_evaluator_duration_milliseconds_sum 12 -prometheus_evaluator_duration_milliseconds_count 23 -# HELP prometheus_local_storage_checkpoint_duration_milliseconds The duration (in milliseconds) it took to checkpoint in-memory metrics and head chunks. -# TYPE prometheus_local_storage_checkpoint_duration_milliseconds gauge -prometheus_local_storage_checkpoint_duration_milliseconds 0 -# HELP prometheus_local_storage_chunk_ops_total The total number of chunk operations by their type. -# TYPE prometheus_local_storage_chunk_ops_total counter -prometheus_local_storage_chunk_ops_total{type="create"} 598 -prometheus_local_storage_chunk_ops_total{type="persist"} 174 -prometheus_local_storage_chunk_ops_total{type="pin"} 920 -prometheus_local_storage_chunk_ops_total{type="transcode"} 415 -prometheus_local_storage_chunk_ops_total{type="unpin"} 920 -# HELP prometheus_local_storage_indexing_batch_latency_milliseconds Quantiles for batch indexing latencies in milliseconds. -# TYPE prometheus_local_storage_indexing_batch_latency_milliseconds summary -prometheus_local_storage_indexing_batch_latency_milliseconds{quantile="0.5"} 0 -prometheus_local_storage_indexing_batch_latency_milliseconds{quantile="0.9"} 0 -prometheus_local_storage_indexing_batch_latency_milliseconds{quantile="0.99"} 0 -prometheus_local_storage_indexing_batch_latency_milliseconds_sum 0 -prometheus_local_storage_indexing_batch_latency_milliseconds_count 1 -# HELP prometheus_local_storage_indexing_batch_sizes Quantiles for indexing batch sizes (number of metrics per batch). -# TYPE prometheus_local_storage_indexing_batch_sizes summary -prometheus_local_storage_indexing_batch_sizes{quantile="0.5"} 2 -prometheus_local_storage_indexing_batch_sizes{quantile="0.9"} 2 -prometheus_local_storage_indexing_batch_sizes{quantile="0.99"} 2 -prometheus_local_storage_indexing_batch_sizes_sum 2 -prometheus_local_storage_indexing_batch_sizes_count 1 -# HELP prometheus_local_storage_indexing_queue_capacity The capacity of the indexing queue. -# TYPE prometheus_local_storage_indexing_queue_capacity gauge -prometheus_local_storage_indexing_queue_capacity 16384 -# HELP prometheus_local_storage_indexing_queue_length The number of metrics waiting to be indexed. -# TYPE prometheus_local_storage_indexing_queue_length gauge -prometheus_local_storage_indexing_queue_length 0 -# HELP prometheus_local_storage_ingested_samples_total The total number of samples ingested. -# TYPE prometheus_local_storage_ingested_samples_total counter -prometheus_local_storage_ingested_samples_total 30473 -# HELP prometheus_local_storage_invalid_preload_requests_total The total number of preload requests referring to a non-existent series. This is an indication of outdated label indexes. -# TYPE prometheus_local_storage_invalid_preload_requests_total counter -prometheus_local_storage_invalid_preload_requests_total 0 -# HELP prometheus_local_storage_memory_chunkdescs The current number of chunk descriptors in memory. -# TYPE prometheus_local_storage_memory_chunkdescs gauge -prometheus_local_storage_memory_chunkdescs 1059 -# HELP prometheus_local_storage_memory_chunks The current number of chunks in memory, excluding cloned chunks (i.e. chunks without a descriptor). -# TYPE prometheus_local_storage_memory_chunks gauge -prometheus_local_storage_memory_chunks 1020 -# HELP prometheus_local_storage_memory_series The current number of series in memory. -# TYPE prometheus_local_storage_memory_series gauge -prometheus_local_storage_memory_series 424 -# HELP prometheus_local_storage_persist_latency_microseconds A summary of latencies for persisting each chunk. -# TYPE prometheus_local_storage_persist_latency_microseconds summary -prometheus_local_storage_persist_latency_microseconds{quantile="0.5"} 30.377 -prometheus_local_storage_persist_latency_microseconds{quantile="0.9"} 203.539 -prometheus_local_storage_persist_latency_microseconds{quantile="0.99"} 2626.463 -prometheus_local_storage_persist_latency_microseconds_sum 20424.415 -prometheus_local_storage_persist_latency_microseconds_count 174 -# HELP prometheus_local_storage_persist_queue_capacity The total capacity of the persist queue. -# TYPE prometheus_local_storage_persist_queue_capacity gauge -prometheus_local_storage_persist_queue_capacity 1024 -# HELP prometheus_local_storage_persist_queue_length The current number of chunks waiting in the persist queue. -# TYPE prometheus_local_storage_persist_queue_length gauge -prometheus_local_storage_persist_queue_length 0 -# HELP prometheus_local_storage_series_ops_total The total number of series operations by their type. -# TYPE prometheus_local_storage_series_ops_total counter -prometheus_local_storage_series_ops_total{type="create"} 2 -prometheus_local_storage_series_ops_total{type="maintenance_in_memory"} 11 -# HELP prometheus_notifications_latency_milliseconds Latency quantiles for sending alert notifications (not including dropped notifications). -# TYPE prometheus_notifications_latency_milliseconds summary -prometheus_notifications_latency_milliseconds{quantile="0.5"} 0 -prometheus_notifications_latency_milliseconds{quantile="0.9"} 0 -prometheus_notifications_latency_milliseconds{quantile="0.99"} 0 -prometheus_notifications_latency_milliseconds_sum 0 -prometheus_notifications_latency_milliseconds_count 0 -# HELP prometheus_notifications_queue_capacity The capacity of the alert notifications queue. -# TYPE prometheus_notifications_queue_capacity gauge -prometheus_notifications_queue_capacity 100 -# HELP prometheus_notifications_queue_length The number of alert notifications in the queue. -# TYPE prometheus_notifications_queue_length gauge -prometheus_notifications_queue_length 0 -# HELP prometheus_rule_evaluation_duration_milliseconds The duration for a rule to execute. -# TYPE prometheus_rule_evaluation_duration_milliseconds summary -prometheus_rule_evaluation_duration_milliseconds{rule_type="alerting",quantile="0.5"} 0 -prometheus_rule_evaluation_duration_milliseconds{rule_type="alerting",quantile="0.9"} 0 -prometheus_rule_evaluation_duration_milliseconds{rule_type="alerting",quantile="0.99"} 2 -prometheus_rule_evaluation_duration_milliseconds_sum{rule_type="alerting"} 12 -prometheus_rule_evaluation_duration_milliseconds_count{rule_type="alerting"} 115 -prometheus_rule_evaluation_duration_milliseconds{rule_type="recording",quantile="0.5"} 0 -prometheus_rule_evaluation_duration_milliseconds{rule_type="recording",quantile="0.9"} 0 -prometheus_rule_evaluation_duration_milliseconds{rule_type="recording",quantile="0.99"} 3 -prometheus_rule_evaluation_duration_milliseconds_sum{rule_type="recording"} 15 -prometheus_rule_evaluation_duration_milliseconds_count{rule_type="recording"} 115 -# HELP prometheus_rule_evaluation_failures_total The total number of rule evaluation failures. -# TYPE prometheus_rule_evaluation_failures_total counter -prometheus_rule_evaluation_failures_total 0 -# HELP prometheus_samples_queue_capacity Capacity of the queue for unwritten samples. -# TYPE prometheus_samples_queue_capacity gauge -prometheus_samples_queue_capacity 4096 -# HELP prometheus_samples_queue_length Current number of items in the queue for unwritten samples. Each item comprises all samples exposed by one target as one metric family (i.e. metrics of the same name). -# TYPE prometheus_samples_queue_length gauge -prometheus_samples_queue_length 0 -# HELP prometheus_target_interval_length_seconds Actual intervals between scrapes. -# TYPE prometheus_target_interval_length_seconds summary -prometheus_target_interval_length_seconds{interval="15s",quantile="0.01"} 14 -prometheus_target_interval_length_seconds{interval="15s",quantile="0.05"} 14 -prometheus_target_interval_length_seconds{interval="15s",quantile="0.5"} 15 -prometheus_target_interval_length_seconds{interval="15s",quantile="0.9"} 15 -prometheus_target_interval_length_seconds{interval="15s",quantile="0.99"} 15 -prometheus_target_interval_length_seconds_sum{interval="15s"} 175 -prometheus_target_interval_length_seconds_count{interval="15s"} 12 -prometheus_target_interval_length_seconds{interval="1s",quantile="0.01"} 0 -prometheus_target_interval_length_seconds{interval="1s",quantile="0.05"} 0 -prometheus_target_interval_length_seconds{interval="1s",quantile="0.5"} 0 -prometheus_target_interval_length_seconds{interval="1s",quantile="0.9"} 1 -prometheus_target_interval_length_seconds{interval="1s",quantile="0.99"} 1 -prometheus_target_interval_length_seconds_sum{interval="1s"} 55 -prometheus_target_interval_length_seconds_count{interval="1s"} 117 diff --git a/Godeps/_workspace/src/github.com/prometheus/client_golang/text/testdata/text.gz b/Godeps/_workspace/src/github.com/prometheus/client_golang/text/testdata/text.gz deleted file mode 100644 index 46de5995ad..0000000000 Binary files a/Godeps/_workspace/src/github.com/prometheus/client_golang/text/testdata/text.gz and /dev/null differ diff --git a/Godeps/_workspace/src/github.com/prometheus/client_model/go/metrics.pb.go b/Godeps/_workspace/src/github.com/prometheus/client_model/go/metrics.pb.go deleted file mode 100644 index b065f8683f..0000000000 --- a/Godeps/_workspace/src/github.com/prometheus/client_model/go/metrics.pb.go +++ /dev/null @@ -1,364 +0,0 @@ -// Code generated by protoc-gen-go. -// source: metrics.proto -// DO NOT EDIT! - -/* -Package io_prometheus_client is a generated protocol buffer package. - -It is generated from these files: - metrics.proto - -It has these top-level messages: - LabelPair - Gauge - Counter - Quantile - Summary - Untyped - Histogram - Bucket - Metric - MetricFamily -*/ -package io_prometheus_client - -import proto "github.com/golang/protobuf/proto" -import math "math" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = math.Inf - -type MetricType int32 - -const ( - MetricType_COUNTER MetricType = 0 - MetricType_GAUGE MetricType = 1 - MetricType_SUMMARY MetricType = 2 - MetricType_UNTYPED MetricType = 3 - MetricType_HISTOGRAM MetricType = 4 -) - -var MetricType_name = map[int32]string{ - 0: "COUNTER", - 1: "GAUGE", - 2: "SUMMARY", - 3: "UNTYPED", - 4: "HISTOGRAM", -} -var MetricType_value = map[string]int32{ - "COUNTER": 0, - "GAUGE": 1, - "SUMMARY": 2, - "UNTYPED": 3, - "HISTOGRAM": 4, -} - -func (x MetricType) Enum() *MetricType { - p := new(MetricType) - *p = x - return p -} -func (x MetricType) String() string { - return proto.EnumName(MetricType_name, int32(x)) -} -func (x *MetricType) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(MetricType_value, data, "MetricType") - if err != nil { - return err - } - *x = MetricType(value) - return nil -} - -type LabelPair struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Value *string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *LabelPair) Reset() { *m = LabelPair{} } -func (m *LabelPair) String() string { return proto.CompactTextString(m) } -func (*LabelPair) ProtoMessage() {} - -func (m *LabelPair) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -func (m *LabelPair) GetValue() string { - if m != nil && m.Value != nil { - return *m.Value - } - return "" -} - -type Gauge struct { - Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Gauge) Reset() { *m = Gauge{} } -func (m *Gauge) String() string { return proto.CompactTextString(m) } -func (*Gauge) ProtoMessage() {} - -func (m *Gauge) GetValue() float64 { - if m != nil && m.Value != nil { - return *m.Value - } - return 0 -} - -type Counter struct { - Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Counter) Reset() { *m = Counter{} } -func (m *Counter) String() string { return proto.CompactTextString(m) } -func (*Counter) ProtoMessage() {} - -func (m *Counter) GetValue() float64 { - if m != nil && m.Value != nil { - return *m.Value - } - return 0 -} - -type Quantile struct { - Quantile *float64 `protobuf:"fixed64,1,opt,name=quantile" json:"quantile,omitempty"` - Value *float64 `protobuf:"fixed64,2,opt,name=value" json:"value,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Quantile) Reset() { *m = Quantile{} } -func (m *Quantile) String() string { return proto.CompactTextString(m) } -func (*Quantile) ProtoMessage() {} - -func (m *Quantile) GetQuantile() float64 { - if m != nil && m.Quantile != nil { - return *m.Quantile - } - return 0 -} - -func (m *Quantile) GetValue() float64 { - if m != nil && m.Value != nil { - return *m.Value - } - return 0 -} - -type Summary struct { - SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count" json:"sample_count,omitempty"` - SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum" json:"sample_sum,omitempty"` - Quantile []*Quantile `protobuf:"bytes,3,rep,name=quantile" json:"quantile,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Summary) Reset() { *m = Summary{} } -func (m *Summary) String() string { return proto.CompactTextString(m) } -func (*Summary) ProtoMessage() {} - -func (m *Summary) GetSampleCount() uint64 { - if m != nil && m.SampleCount != nil { - return *m.SampleCount - } - return 0 -} - -func (m *Summary) GetSampleSum() float64 { - if m != nil && m.SampleSum != nil { - return *m.SampleSum - } - return 0 -} - -func (m *Summary) GetQuantile() []*Quantile { - if m != nil { - return m.Quantile - } - return nil -} - -type Untyped struct { - Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Untyped) Reset() { *m = Untyped{} } -func (m *Untyped) String() string { return proto.CompactTextString(m) } -func (*Untyped) ProtoMessage() {} - -func (m *Untyped) GetValue() float64 { - if m != nil && m.Value != nil { - return *m.Value - } - return 0 -} - -type Histogram struct { - SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count" json:"sample_count,omitempty"` - SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum" json:"sample_sum,omitempty"` - Bucket []*Bucket `protobuf:"bytes,3,rep,name=bucket" json:"bucket,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Histogram) Reset() { *m = Histogram{} } -func (m *Histogram) String() string { return proto.CompactTextString(m) } -func (*Histogram) ProtoMessage() {} - -func (m *Histogram) GetSampleCount() uint64 { - if m != nil && m.SampleCount != nil { - return *m.SampleCount - } - return 0 -} - -func (m *Histogram) GetSampleSum() float64 { - if m != nil && m.SampleSum != nil { - return *m.SampleSum - } - return 0 -} - -func (m *Histogram) GetBucket() []*Bucket { - if m != nil { - return m.Bucket - } - return nil -} - -type Bucket struct { - CumulativeCount *uint64 `protobuf:"varint,1,opt,name=cumulative_count" json:"cumulative_count,omitempty"` - UpperBound *float64 `protobuf:"fixed64,2,opt,name=upper_bound" json:"upper_bound,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Bucket) Reset() { *m = Bucket{} } -func (m *Bucket) String() string { return proto.CompactTextString(m) } -func (*Bucket) ProtoMessage() {} - -func (m *Bucket) GetCumulativeCount() uint64 { - if m != nil && m.CumulativeCount != nil { - return *m.CumulativeCount - } - return 0 -} - -func (m *Bucket) GetUpperBound() float64 { - if m != nil && m.UpperBound != nil { - return *m.UpperBound - } - return 0 -} - -type Metric struct { - Label []*LabelPair `protobuf:"bytes,1,rep,name=label" json:"label,omitempty"` - Gauge *Gauge `protobuf:"bytes,2,opt,name=gauge" json:"gauge,omitempty"` - Counter *Counter `protobuf:"bytes,3,opt,name=counter" json:"counter,omitempty"` - Summary *Summary `protobuf:"bytes,4,opt,name=summary" json:"summary,omitempty"` - Untyped *Untyped `protobuf:"bytes,5,opt,name=untyped" json:"untyped,omitempty"` - Histogram *Histogram `protobuf:"bytes,7,opt,name=histogram" json:"histogram,omitempty"` - TimestampMs *int64 `protobuf:"varint,6,opt,name=timestamp_ms" json:"timestamp_ms,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Metric) Reset() { *m = Metric{} } -func (m *Metric) String() string { return proto.CompactTextString(m) } -func (*Metric) ProtoMessage() {} - -func (m *Metric) GetLabel() []*LabelPair { - if m != nil { - return m.Label - } - return nil -} - -func (m *Metric) GetGauge() *Gauge { - if m != nil { - return m.Gauge - } - return nil -} - -func (m *Metric) GetCounter() *Counter { - if m != nil { - return m.Counter - } - return nil -} - -func (m *Metric) GetSummary() *Summary { - if m != nil { - return m.Summary - } - return nil -} - -func (m *Metric) GetUntyped() *Untyped { - if m != nil { - return m.Untyped - } - return nil -} - -func (m *Metric) GetHistogram() *Histogram { - if m != nil { - return m.Histogram - } - return nil -} - -func (m *Metric) GetTimestampMs() int64 { - if m != nil && m.TimestampMs != nil { - return *m.TimestampMs - } - return 0 -} - -type MetricFamily struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Help *string `protobuf:"bytes,2,opt,name=help" json:"help,omitempty"` - Type *MetricType `protobuf:"varint,3,opt,name=type,enum=io.prometheus.client.MetricType" json:"type,omitempty"` - Metric []*Metric `protobuf:"bytes,4,rep,name=metric" json:"metric,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *MetricFamily) Reset() { *m = MetricFamily{} } -func (m *MetricFamily) String() string { return proto.CompactTextString(m) } -func (*MetricFamily) ProtoMessage() {} - -func (m *MetricFamily) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -func (m *MetricFamily) GetHelp() string { - if m != nil && m.Help != nil { - return *m.Help - } - return "" -} - -func (m *MetricFamily) GetType() MetricType { - if m != nil && m.Type != nil { - return *m.Type - } - return MetricType_COUNTER -} - -func (m *MetricFamily) GetMetric() []*Metric { - if m != nil { - return m.Metric - } - return nil -} - -func init() { - proto.RegisterEnum("io.prometheus.client.MetricType", MetricType_name, MetricType_value) -} diff --git a/Godeps/_workspace/src/github.com/prometheus/common/expfmt/bench_test.go b/Godeps/_workspace/src/github.com/prometheus/common/expfmt/bench_test.go deleted file mode 100644 index 92b16a028a..0000000000 --- a/Godeps/_workspace/src/github.com/prometheus/common/expfmt/bench_test.go +++ /dev/null @@ -1,171 +0,0 @@ -// Copyright 2015 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package expfmt - -import ( - "bytes" - "compress/gzip" - "io" - "io/ioutil" - "testing" - - "github.com/matttproud/golang_protobuf_extensions/pbutil" - - dto "github.com/prometheus/client_model/go" -) - -var parser TextParser - -// Benchmarks to show how much penalty text format parsing actually inflicts. -// -// Example results on Linux 3.13.0, Intel(R) Core(TM) i7-4700MQ CPU @ 2.40GHz, go1.4. -// -// BenchmarkParseText 1000 1188535 ns/op 205085 B/op 6135 allocs/op -// BenchmarkParseTextGzip 1000 1376567 ns/op 246224 B/op 6151 allocs/op -// BenchmarkParseProto 10000 172790 ns/op 52258 B/op 1160 allocs/op -// BenchmarkParseProtoGzip 5000 324021 ns/op 94931 B/op 1211 allocs/op -// BenchmarkParseProtoMap 10000 187946 ns/op 58714 B/op 1203 allocs/op -// -// CONCLUSION: The overhead for the map is negligible. Text format needs ~5x more allocations. -// Without compression, it needs ~7x longer, but with compression (the more relevant scenario), -// the difference becomes less relevant, only ~4x. -// -// The test data contains 248 samples. -// -// BenchmarkProcessor002ParseOnly in the extraction package is not quite -// comparable to the benchmarks here, but it gives an idea: JSON parsing is even -// slower than text parsing and needs a comparable amount of allocs. - -// BenchmarkParseText benchmarks the parsing of a text-format scrape into metric -// family DTOs. -func BenchmarkParseText(b *testing.B) { - b.StopTimer() - data, err := ioutil.ReadFile("testdata/text") - if err != nil { - b.Fatal(err) - } - b.StartTimer() - - for i := 0; i < b.N; i++ { - if _, err := parser.TextToMetricFamilies(bytes.NewReader(data)); err != nil { - b.Fatal(err) - } - } -} - -// BenchmarkParseTextGzip benchmarks the parsing of a gzipped text-format scrape -// into metric family DTOs. -func BenchmarkParseTextGzip(b *testing.B) { - b.StopTimer() - data, err := ioutil.ReadFile("testdata/text.gz") - if err != nil { - b.Fatal(err) - } - b.StartTimer() - - for i := 0; i < b.N; i++ { - in, err := gzip.NewReader(bytes.NewReader(data)) - if err != nil { - b.Fatal(err) - } - if _, err := parser.TextToMetricFamilies(in); err != nil { - b.Fatal(err) - } - } -} - -// BenchmarkParseProto benchmarks the parsing of a protobuf-format scrape into -// metric family DTOs. Note that this does not build a map of metric families -// (as the text version does), because it is not required for Prometheus -// ingestion either. (However, it is required for the text-format parsing, as -// the metric family might be sprinkled all over the text, while the -// protobuf-format guarantees bundling at one place.) -func BenchmarkParseProto(b *testing.B) { - b.StopTimer() - data, err := ioutil.ReadFile("testdata/protobuf") - if err != nil { - b.Fatal(err) - } - b.StartTimer() - - for i := 0; i < b.N; i++ { - family := &dto.MetricFamily{} - in := bytes.NewReader(data) - for { - family.Reset() - if _, err := pbutil.ReadDelimited(in, family); err != nil { - if err == io.EOF { - break - } - b.Fatal(err) - } - } - } -} - -// BenchmarkParseProtoGzip is like BenchmarkParseProto above, but parses gzipped -// protobuf format. -func BenchmarkParseProtoGzip(b *testing.B) { - b.StopTimer() - data, err := ioutil.ReadFile("testdata/protobuf.gz") - if err != nil { - b.Fatal(err) - } - b.StartTimer() - - for i := 0; i < b.N; i++ { - family := &dto.MetricFamily{} - in, err := gzip.NewReader(bytes.NewReader(data)) - if err != nil { - b.Fatal(err) - } - for { - family.Reset() - if _, err := pbutil.ReadDelimited(in, family); err != nil { - if err == io.EOF { - break - } - b.Fatal(err) - } - } - } -} - -// BenchmarkParseProtoMap is like BenchmarkParseProto but DOES put the parsed -// metric family DTOs into a map. This is not happening during Prometheus -// ingestion. It is just here to measure the overhead of that map creation and -// separate it from the overhead of the text format parsing. -func BenchmarkParseProtoMap(b *testing.B) { - b.StopTimer() - data, err := ioutil.ReadFile("testdata/protobuf") - if err != nil { - b.Fatal(err) - } - b.StartTimer() - - for i := 0; i < b.N; i++ { - families := map[string]*dto.MetricFamily{} - in := bytes.NewReader(data) - for { - family := &dto.MetricFamily{} - if _, err := pbutil.ReadDelimited(in, family); err != nil { - if err == io.EOF { - break - } - b.Fatal(err) - } - families[family.GetName()] = family - } - } -} diff --git a/Godeps/_workspace/src/github.com/prometheus/common/expfmt/decode.go b/Godeps/_workspace/src/github.com/prometheus/common/expfmt/decode.go deleted file mode 100644 index 68a894445c..0000000000 --- a/Godeps/_workspace/src/github.com/prometheus/common/expfmt/decode.go +++ /dev/null @@ -1,388 +0,0 @@ -// Copyright 2015 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package expfmt - -import ( - "fmt" - "io" - "math" - "mime" - "net/http" - - dto "github.com/prometheus/client_model/go" - - "github.com/matttproud/golang_protobuf_extensions/pbutil" - "github.com/prometheus/common/model" -) - -// Decoder types decode an input stream into metric families. -type Decoder interface { - Decode(*dto.MetricFamily) error -} - -type DecodeOptions struct { - // Timestamp is added to each value from the stream that has no explicit timestamp set. - Timestamp model.Time -} - -// NewDecor returns a new decoder based on the HTTP header. -func NewDecoder(r io.Reader, h http.Header) (Decoder, error) { - ct := h.Get(hdrContentType) - - mediatype, params, err := mime.ParseMediaType(ct) - if err != nil { - return nil, fmt.Errorf("invalid Content-Type header %q: %s", ct, err) - } - - const ( - protoType = ProtoType + "/" + ProtoSubType - textType = "text/plain" - ) - - switch mediatype { - case protoType: - if p := params["proto"]; p != ProtoProtocol { - return nil, fmt.Errorf("unrecognized protocol message %s", p) - } - if e := params["encoding"]; e != "delimited" { - return nil, fmt.Errorf("unsupported encoding %s", e) - } - return &protoDecoder{r: r}, nil - - case textType: - if v, ok := params["version"]; ok && v != "0.0.4" { - return nil, fmt.Errorf("unrecognized protocol version %s", v) - } - return &textDecoder{r: r}, nil - - default: - return nil, fmt.Errorf("unsupported media type %q, expected %q or %q", mediatype, protoType, textType) - } -} - -// protoDecoder implements the Decoder interface for protocol buffers. -type protoDecoder struct { - r io.Reader -} - -// Decode implements the Decoder interface. -func (d *protoDecoder) Decode(v *dto.MetricFamily) error { - _, err := pbutil.ReadDelimited(d.r, v) - return err -} - -// textDecoder implements the Decoder interface for the text protcol. -type textDecoder struct { - r io.Reader - p TextParser - fams []*dto.MetricFamily -} - -// Decode implements the Decoder interface. -func (d *textDecoder) Decode(v *dto.MetricFamily) error { - // TODO(fabxc): Wrap this as a line reader to make streaming safer. - if len(d.fams) == 0 { - // No cached metric families, read everything and parse metrics. - fams, err := d.p.TextToMetricFamilies(d.r) - if err != nil { - return err - } - if len(fams) == 0 { - return io.EOF - } - for _, f := range fams { - d.fams = append(d.fams, f) - } - } - *v = *d.fams[len(d.fams)-1] - d.fams = d.fams[:len(d.fams)-1] - return nil -} - -type SampleDecoder struct { - Dec Decoder - Opts *DecodeOptions - - f dto.MetricFamily -} - -func (sd *SampleDecoder) Decode(s *model.Vector) error { - if err := sd.Dec.Decode(&sd.f); err != nil { - return err - } - *s = extractSamples(&sd.f, sd.Opts) - return nil -} - -// Extract samples builds a slice of samples from the provided metric families. -func ExtractSamples(o *DecodeOptions, fams ...*dto.MetricFamily) model.Vector { - var all model.Vector - for _, f := range fams { - all = append(all, extractSamples(f, o)...) - } - return all -} - -func extractSamples(f *dto.MetricFamily, o *DecodeOptions) model.Vector { - switch *f.Type { - case dto.MetricType_COUNTER: - return extractCounter(o, f) - case dto.MetricType_GAUGE: - return extractGauge(o, f) - case dto.MetricType_SUMMARY: - return extractSummary(o, f) - case dto.MetricType_UNTYPED: - return extractUntyped(o, f) - case dto.MetricType_HISTOGRAM: - return extractHistogram(o, f) - } - panic("expfmt.extractSamples: unknown metric family type") -} - -func extractCounter(o *DecodeOptions, f *dto.MetricFamily) model.Vector { - samples := make(model.Vector, 0, len(f.Metric)) - - for _, m := range f.Metric { - if m.Counter == nil { - continue - } - - lset := make(model.LabelSet, len(m.Label)+1) - for _, p := range m.Label { - lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) - } - lset[model.MetricNameLabel] = model.LabelValue(f.GetName()) - - smpl := &model.Sample{ - Metric: model.Metric(lset), - Value: model.SampleValue(m.Counter.GetValue()), - } - - if m.TimestampMs != nil { - smpl.Timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000) - } else { - smpl.Timestamp = o.Timestamp - } - - samples = append(samples, smpl) - } - - return samples -} - -func extractGauge(o *DecodeOptions, f *dto.MetricFamily) model.Vector { - samples := make(model.Vector, 0, len(f.Metric)) - - for _, m := range f.Metric { - if m.Gauge == nil { - continue - } - - lset := make(model.LabelSet, len(m.Label)+1) - for _, p := range m.Label { - lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) - } - lset[model.MetricNameLabel] = model.LabelValue(f.GetName()) - - smpl := &model.Sample{ - Metric: model.Metric(lset), - Value: model.SampleValue(m.Gauge.GetValue()), - } - - if m.TimestampMs != nil { - smpl.Timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000) - } else { - smpl.Timestamp = o.Timestamp - } - - samples = append(samples, smpl) - } - - return samples -} - -func extractUntyped(o *DecodeOptions, f *dto.MetricFamily) model.Vector { - samples := make(model.Vector, 0, len(f.Metric)) - - for _, m := range f.Metric { - if m.Untyped == nil { - continue - } - - lset := make(model.LabelSet, len(m.Label)+1) - for _, p := range m.Label { - lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) - } - lset[model.MetricNameLabel] = model.LabelValue(f.GetName()) - - smpl := &model.Sample{ - Metric: model.Metric(lset), - Value: model.SampleValue(m.Untyped.GetValue()), - } - - if m.TimestampMs != nil { - smpl.Timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000) - } else { - smpl.Timestamp = o.Timestamp - } - - samples = append(samples, smpl) - } - - return samples -} - -func extractSummary(o *DecodeOptions, f *dto.MetricFamily) model.Vector { - samples := make(model.Vector, 0, len(f.Metric)) - - for _, m := range f.Metric { - if m.Summary == nil { - continue - } - - timestamp := o.Timestamp - if m.TimestampMs != nil { - timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000) - } - - for _, q := range m.Summary.Quantile { - lset := make(model.LabelSet, len(m.Label)+2) - for _, p := range m.Label { - lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) - } - // BUG(matt): Update other names to "quantile". - lset[model.LabelName(model.QuantileLabel)] = model.LabelValue(fmt.Sprint(q.GetQuantile())) - lset[model.MetricNameLabel] = model.LabelValue(f.GetName()) - - samples = append(samples, &model.Sample{ - Metric: model.Metric(lset), - Value: model.SampleValue(q.GetValue()), - Timestamp: timestamp, - }) - } - - if m.Summary.SampleSum != nil { - lset := make(model.LabelSet, len(m.Label)+1) - for _, p := range m.Label { - lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) - } - lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_sum") - - samples = append(samples, &model.Sample{ - Metric: model.Metric(lset), - Value: model.SampleValue(m.Summary.GetSampleSum()), - Timestamp: timestamp, - }) - } - - if m.Summary.SampleCount != nil { - lset := make(model.LabelSet, len(m.Label)+1) - for _, p := range m.Label { - lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) - } - lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_count") - - samples = append(samples, &model.Sample{ - Metric: model.Metric(lset), - Value: model.SampleValue(m.Summary.GetSampleCount()), - Timestamp: timestamp, - }) - } - } - - return samples -} - -func extractHistogram(o *DecodeOptions, f *dto.MetricFamily) model.Vector { - samples := make(model.Vector, 0, len(f.Metric)) - - for _, m := range f.Metric { - if m.Histogram == nil { - continue - } - - timestamp := o.Timestamp - if m.TimestampMs != nil { - timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000) - } - - infSeen := false - - for _, q := range m.Histogram.Bucket { - lset := make(model.LabelSet, len(m.Label)+2) - for _, p := range m.Label { - lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) - } - lset[model.LabelName(model.BucketLabel)] = model.LabelValue(fmt.Sprint(q.GetUpperBound())) - lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_bucket") - - if math.IsInf(q.GetUpperBound(), +1) { - infSeen = true - } - - samples = append(samples, &model.Sample{ - Metric: model.Metric(lset), - Value: model.SampleValue(q.GetCumulativeCount()), - Timestamp: timestamp, - }) - } - - if m.Histogram.SampleSum != nil { - lset := make(model.LabelSet, len(m.Label)+1) - for _, p := range m.Label { - lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) - } - lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_sum") - - samples = append(samples, &model.Sample{ - Metric: model.Metric(lset), - Value: model.SampleValue(m.Histogram.GetSampleSum()), - Timestamp: timestamp, - }) - } - - if m.Histogram.SampleCount != nil { - lset := make(model.LabelSet, len(m.Label)+1) - for _, p := range m.Label { - lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) - } - lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_count") - - count := &model.Sample{ - Metric: model.Metric(lset), - Value: model.SampleValue(m.Histogram.GetSampleCount()), - Timestamp: timestamp, - } - samples = append(samples, count) - - if !infSeen { - // Append a infinity bucket sample. - lset := make(model.LabelSet, len(m.Label)+2) - for _, p := range m.Label { - lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) - } - lset[model.LabelName(model.BucketLabel)] = model.LabelValue("+Inf") - lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_bucket") - - samples = append(samples, &model.Sample{ - Metric: model.Metric(lset), - Value: count.Value, - Timestamp: timestamp, - }) - } - } - } - - return samples -} diff --git a/Godeps/_workspace/src/github.com/prometheus/common/expfmt/decode_test.go b/Godeps/_workspace/src/github.com/prometheus/common/expfmt/decode_test.go deleted file mode 100644 index ec2638b42c..0000000000 --- a/Godeps/_workspace/src/github.com/prometheus/common/expfmt/decode_test.go +++ /dev/null @@ -1,327 +0,0 @@ -// Copyright 2015 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package expfmt - -import ( - "errors" - "io" - "net/http" - "reflect" - "sort" - "strings" - "testing" - - "github.com/prometheus/common/model" -) - -func TestTextDecoder(t *testing.T) { - var ( - ts = model.Now() - in = ` -# Only a quite simple scenario with two metric families. -# More complicated tests of the parser itself can be found in the text package. -# TYPE mf2 counter -mf2 3 -mf1{label="value1"} -3.14 123456 -mf1{label="value2"} 42 -mf2 4 -` - out = model.Vector{ - &model.Sample{ - Metric: model.Metric{ - model.MetricNameLabel: "mf1", - "label": "value1", - }, - Value: -3.14, - Timestamp: 123456, - }, - &model.Sample{ - Metric: model.Metric{ - model.MetricNameLabel: "mf1", - "label": "value2", - }, - Value: 42, - Timestamp: ts, - }, - &model.Sample{ - Metric: model.Metric{ - model.MetricNameLabel: "mf2", - }, - Value: 3, - Timestamp: ts, - }, - &model.Sample{ - Metric: model.Metric{ - model.MetricNameLabel: "mf2", - }, - Value: 4, - Timestamp: ts, - }, - } - ) - - dec := &SampleDecoder{ - Dec: &textDecoder{r: strings.NewReader(in)}, - Opts: &DecodeOptions{ - Timestamp: ts, - }, - } - var all model.Vector - for { - var smpls model.Vector - err := dec.Decode(&smpls) - if err == io.EOF { - break - } - if err != nil { - t.Fatal(err) - } - all = append(all, smpls...) - } - sort.Sort(all) - sort.Sort(out) - if !reflect.DeepEqual(all, out) { - t.Fatalf("output does not match") - } -} - -func TestProtoDecoder(t *testing.T) { - - var testTime = model.Now() - - scenarios := []struct { - in string - expected model.Vector - }{ - { - in: "", - }, - { - in: "\x8f\x01\n\rrequest_count\x12\x12Number of requests\x18\x00\"0\n#\n\x0fsome_label_name\x12\x10some_label_value\x1a\t\t\x00\x00\x00\x00\x00\x00E\xc0\"6\n)\n\x12another_label_name\x12\x13another_label_value\x1a\t\t\x00\x00\x00\x00\x00\x00U@", - expected: model.Vector{ - &model.Sample{ - Metric: model.Metric{ - model.MetricNameLabel: "request_count", - "some_label_name": "some_label_value", - }, - Value: -42, - Timestamp: testTime, - }, - &model.Sample{ - Metric: model.Metric{ - model.MetricNameLabel: "request_count", - "another_label_name": "another_label_value", - }, - Value: 84, - Timestamp: testTime, - }, - }, - }, - { - in: "\xb9\x01\n\rrequest_count\x12\x12Number of requests\x18\x02\"O\n#\n\x0fsome_label_name\x12\x10some_label_value\"(\x1a\x12\t\xaeG\xe1z\x14\xae\xef?\x11\x00\x00\x00\x00\x00\x00E\xc0\x1a\x12\t+\x87\x16\xd9\xce\xf7\xef?\x11\x00\x00\x00\x00\x00\x00U\xc0\"A\n)\n\x12another_label_name\x12\x13another_label_value\"\x14\x1a\x12\t\x00\x00\x00\x00\x00\x00\xe0?\x11\x00\x00\x00\x00\x00\x00$@", - expected: model.Vector{ - &model.Sample{ - Metric: model.Metric{ - model.MetricNameLabel: "request_count", - "some_label_name": "some_label_value", - "quantile": "0.99", - }, - Value: -42, - Timestamp: testTime, - }, - &model.Sample{ - Metric: model.Metric{ - model.MetricNameLabel: "request_count", - "some_label_name": "some_label_value", - "quantile": "0.999", - }, - Value: -84, - Timestamp: testTime, - }, - &model.Sample{ - Metric: model.Metric{ - model.MetricNameLabel: "request_count", - "another_label_name": "another_label_value", - "quantile": "0.5", - }, - Value: 10, - Timestamp: testTime, - }, - }, - }, - { - in: "\x8d\x01\n\x1drequest_duration_microseconds\x12\x15The response latency.\x18\x04\"S:Q\b\x85\x15\x11\xcd\xcc\xccL\x8f\xcb:A\x1a\v\b{\x11\x00\x00\x00\x00\x00\x00Y@\x1a\f\b\x9c\x03\x11\x00\x00\x00\x00\x00\x00^@\x1a\f\b\xd0\x04\x11\x00\x00\x00\x00\x00\x00b@\x1a\f\b\xf4\v\x11\x9a\x99\x99\x99\x99\x99e@\x1a\f\b\x85\x15\x11\x00\x00\x00\x00\x00\x00\xf0\u007f", - expected: model.Vector{ - &model.Sample{ - Metric: model.Metric{ - model.MetricNameLabel: "request_duration_microseconds_bucket", - "le": "100", - }, - Value: 123, - Timestamp: testTime, - }, - &model.Sample{ - Metric: model.Metric{ - model.MetricNameLabel: "request_duration_microseconds_bucket", - "le": "120", - }, - Value: 412, - Timestamp: testTime, - }, - &model.Sample{ - Metric: model.Metric{ - model.MetricNameLabel: "request_duration_microseconds_bucket", - "le": "144", - }, - Value: 592, - Timestamp: testTime, - }, - &model.Sample{ - Metric: model.Metric{ - model.MetricNameLabel: "request_duration_microseconds_bucket", - "le": "172.8", - }, - Value: 1524, - Timestamp: testTime, - }, - &model.Sample{ - Metric: model.Metric{ - model.MetricNameLabel: "request_duration_microseconds_bucket", - "le": "+Inf", - }, - Value: 2693, - Timestamp: testTime, - }, - &model.Sample{ - Metric: model.Metric{ - model.MetricNameLabel: "request_duration_microseconds_sum", - }, - Value: 1756047.3, - Timestamp: testTime, - }, - &model.Sample{ - Metric: model.Metric{ - model.MetricNameLabel: "request_duration_microseconds_count", - }, - Value: 2693, - Timestamp: testTime, - }, - }, - }, - } - - for _, scenario := range scenarios { - dec := &SampleDecoder{ - Dec: &protoDecoder{r: strings.NewReader(scenario.in)}, - Opts: &DecodeOptions{ - Timestamp: testTime, - }, - } - - var all model.Vector - for { - var smpls model.Vector - err := dec.Decode(&smpls) - if err == io.EOF { - break - } - if err != nil { - t.Fatal(err) - } - all = append(all, smpls...) - } - sort.Sort(all) - sort.Sort(scenario.expected) - if !reflect.DeepEqual(all, scenario.expected) { - t.Fatalf("output does not match") - } - } -} - -func testDiscriminatorHTTPHeader(t testing.TB) { - var scenarios = []struct { - input map[string]string - output Decoder - err error - }{ - { - input: map[string]string{"Content-Type": `application/vnd.google.protobuf; proto="io.prometheus.client.MetricFamily"; encoding="delimited"`}, - output: &protoDecoder{}, - err: nil, - }, - { - input: map[string]string{"Content-Type": `application/vnd.google.protobuf; proto="illegal"; encoding="delimited"`}, - output: nil, - err: errors.New("unrecognized protocol message illegal"), - }, - { - input: map[string]string{"Content-Type": `application/vnd.google.protobuf; proto="io.prometheus.client.MetricFamily"; encoding="illegal"`}, - output: nil, - err: errors.New("unsupported encoding illegal"), - }, - { - input: map[string]string{"Content-Type": `text/plain; version=0.0.4`}, - output: &textDecoder{}, - err: nil, - }, - { - input: map[string]string{"Content-Type": `text/plain`}, - output: &textDecoder{}, - err: nil, - }, - { - input: map[string]string{"Content-Type": `text/plain; version=0.0.3`}, - output: nil, - err: errors.New("unrecognized protocol version 0.0.3"), - }, - } - - for i, scenario := range scenarios { - var header http.Header - - if len(scenario.input) > 0 { - header = http.Header{} - } - - for key, value := range scenario.input { - header.Add(key, value) - } - - actual, err := NewDecoder(nil, header) - - if scenario.err != err { - if scenario.err != nil && err != nil { - if scenario.err.Error() != err.Error() { - t.Errorf("%d. expected %s, got %s", i, scenario.err, err) - } - } else if scenario.err != nil || err != nil { - t.Errorf("%d. expected %s, got %s", i, scenario.err, err) - } - } - - if !reflect.DeepEqual(scenario.output, actual) { - t.Errorf("%d. expected %s, got %s", i, scenario.output, actual) - } - } -} - -func TestDiscriminatorHTTPHeader(t *testing.T) { - testDiscriminatorHTTPHeader(t) -} - -func BenchmarkDiscriminatorHTTPHeader(b *testing.B) { - for i := 0; i < b.N; i++ { - testDiscriminatorHTTPHeader(b) - } -} diff --git a/Godeps/_workspace/src/github.com/prometheus/common/expfmt/encode.go b/Godeps/_workspace/src/github.com/prometheus/common/expfmt/encode.go deleted file mode 100644 index fd3c186e1f..0000000000 --- a/Godeps/_workspace/src/github.com/prometheus/common/expfmt/encode.go +++ /dev/null @@ -1,88 +0,0 @@ -// Copyright 2015 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package expfmt - -import ( - "fmt" - "io" - "net/http" - - "bitbucket.org/ww/goautoneg" - "github.com/golang/protobuf/proto" - "github.com/matttproud/golang_protobuf_extensions/pbutil" - - dto "github.com/prometheus/client_model/go" -) - -// Encoder types encode metric families into an underlying wire protocol. -type Encoder interface { - Encode(*dto.MetricFamily) error -} - -type encoder func(*dto.MetricFamily) error - -func (e encoder) Encode(v *dto.MetricFamily) error { - return e(v) -} - -// Negotiate returns the Content-Type based on the given Accept header. -// If no appropriate accepted type is found, FmtText is returned. -func Negotiate(h http.Header) Format { - for _, ac := range goautoneg.ParseAccept(h.Get(hdrAccept)) { - // Check for protocol buffer - if ac.Type == ProtoType && ac.SubType == ProtoSubType && ac.Params["proto"] == ProtoProtocol { - switch ac.Params["encoding"] { - case "delimited": - return FmtProtoDelim - case "text": - return FmtProtoText - case "compact-text": - return FmtProtoCompact - } - } - // Check for text format. - ver := ac.Params["version"] - if ac.Type == "text" && ac.SubType == "plain" && (ver == TextVersion || ver == "") { - return FmtText - } - } - return FmtText -} - -// NewEncoder returns a new encoder based on content type negotiation. -func NewEncoder(w io.Writer, format Format) Encoder { - switch format { - case FmtProtoDelim: - return encoder(func(v *dto.MetricFamily) error { - _, err := pbutil.WriteDelimited(w, v) - return err - }) - case FmtProtoCompact: - return encoder(func(v *dto.MetricFamily) error { - _, err := fmt.Fprintln(w, v.String()) - return err - }) - case FmtProtoText: - return encoder(func(v *dto.MetricFamily) error { - _, err := fmt.Fprintln(w, proto.MarshalTextString(v)) - return err - }) - case FmtText: - return encoder(func(v *dto.MetricFamily) error { - _, err := MetricFamilyToText(w, v) - return err - }) - } - panic("expfmt.NewEncoder: unknown format") -} diff --git a/Godeps/_workspace/src/github.com/prometheus/common/expfmt/expfmt.go b/Godeps/_workspace/src/github.com/prometheus/common/expfmt/expfmt.go deleted file mode 100644 index 7a8114ce4b..0000000000 --- a/Godeps/_workspace/src/github.com/prometheus/common/expfmt/expfmt.go +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright 2015 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// A package for reading and writing Prometheus metrics. -package expfmt - -type Format string - -const ( - TextVersion = "0.0.4" - - ProtoType = `application` - ProtoSubType = `vnd.google.protobuf` - ProtoProtocol = `io.prometheus.client.MetricFamily` - ProtoFmt = ProtoType + "/" + ProtoSubType + "; proto=" + ProtoProtocol + ";" - - // The Content-Type values for the different wire protocols. - FmtText Format = `text/plain; version=` + TextVersion - FmtProtoDelim Format = ProtoFmt + ` encoding=delimited` - FmtProtoText Format = ProtoFmt + ` encoding=text` - FmtProtoCompact Format = ProtoFmt + ` encoding=compact-text` -) - -const ( - hdrContentType = "Content-Type" - hdrAccept = "Accept" -) diff --git a/Godeps/_workspace/src/github.com/prometheus/common/expfmt/testdata/protobuf b/Godeps/_workspace/src/github.com/prometheus/common/expfmt/testdata/protobuf deleted file mode 100644 index d5aae50915..0000000000 --- a/Godeps/_workspace/src/github.com/prometheus/common/expfmt/testdata/protobuf +++ /dev/null @@ -1,516 +0,0 @@ -fc08 0a22 6874 7470 5f72 6571 7565 7374 -5f64 7572 6174 696f 6e5f 6d69 6372 6f73 -6563 6f6e 6473 122b 5468 6520 4854 5450 -2072 6571 7565 7374 206c 6174 656e 6369 -6573 2069 6e20 6d69 6372 6f73 6563 6f6e -6473 2e18 0222 570a 0c0a 0768 616e 646c -6572 1201 2f22 4708 0011 0000 0000 0000 -0000 1a12 0900 0000 0000 00e0 3f11 0000 -0000 0000 0000 1a12 09cd cccc cccc ccec -3f11 0000 0000 0000 0000 1a12 09ae 47e1 -7a14 aeef 3f11 0000 0000 0000 0000 225d -0a12 0a07 6861 6e64 6c65 7212 072f 616c -6572 7473 2247 0800 1100 0000 0000 0000 -001a 1209 0000 0000 0000 e03f 1100 0000 -0000 0000 001a 1209 cdcc cccc cccc ec3f -1100 0000 0000 0000 001a 1209 ae47 e17a -14ae ef3f 1100 0000 0000 0000 0022 620a -170a 0768 616e 646c 6572 120c 2f61 7069 -2f6d 6574 7269 6373 2247 0800 1100 0000 -0000 0000 001a 1209 0000 0000 0000 e03f -1100 0000 0000 0000 001a 1209 cdcc cccc -cccc ec3f 1100 0000 0000 0000 001a 1209 -ae47 e17a 14ae ef3f 1100 0000 0000 0000 -0022 600a 150a 0768 616e 646c 6572 120a -2f61 7069 2f71 7565 7279 2247 0800 1100 -0000 0000 0000 001a 1209 0000 0000 0000 -e03f 1100 0000 0000 0000 001a 1209 cdcc -cccc cccc ec3f 1100 0000 0000 0000 001a -1209 ae47 e17a 14ae ef3f 1100 0000 0000 -0000 0022 660a 1b0a 0768 616e 646c 6572 -1210 2f61 7069 2f71 7565 7279 5f72 616e -6765 2247 0800 1100 0000 0000 0000 001a -1209 0000 0000 0000 e03f 1100 0000 0000 -0000 001a 1209 cdcc cccc cccc ec3f 1100 -0000 0000 0000 001a 1209 ae47 e17a 14ae -ef3f 1100 0000 0000 0000 0022 620a 170a -0768 616e 646c 6572 120c 2f61 7069 2f74 -6172 6765 7473 2247 0800 1100 0000 0000 -0000 001a 1209 0000 0000 0000 e03f 1100 -0000 0000 0000 001a 1209 cdcc cccc cccc -ec3f 1100 0000 0000 0000 001a 1209 ae47 -e17a 14ae ef3f 1100 0000 0000 0000 0022 -600a 150a 0768 616e 646c 6572 120a 2f63 -6f6e 736f 6c65 732f 2247 0800 1100 0000 -0000 0000 001a 1209 0000 0000 0000 e03f -1100 0000 0000 0000 001a 1209 cdcc cccc -cccc ec3f 1100 0000 0000 0000 001a 1209 -ae47 e17a 14ae ef3f 1100 0000 0000 0000 -0022 5c0a 110a 0768 616e 646c 6572 1206 -2f67 7261 7068 2247 0800 1100 0000 0000 -0000 001a 1209 0000 0000 0000 e03f 1100 -0000 0000 0000 001a 1209 cdcc cccc cccc -ec3f 1100 0000 0000 0000 001a 1209 ae47 -e17a 14ae ef3f 1100 0000 0000 0000 0022 -5b0a 100a 0768 616e 646c 6572 1205 2f68 -6561 7022 4708 0011 0000 0000 0000 0000 -1a12 0900 0000 0000 00e0 3f11 0000 0000 -0000 0000 1a12 09cd cccc cccc ccec 3f11 -0000 0000 0000 0000 1a12 09ae 47e1 7a14 -aeef 3f11 0000 0000 0000 0000 225e 0a13 -0a07 6861 6e64 6c65 7212 082f 7374 6174 -6963 2f22 4708 0011 0000 0000 0000 0000 -1a12 0900 0000 0000 00e0 3f11 0000 0000 -0000 0000 1a12 09cd cccc cccc ccec 3f11 -0000 0000 0000 0000 1a12 09ae 47e1 7a14 -aeef 3f11 0000 0000 0000 0000 2260 0a15 -0a07 6861 6e64 6c65 7212 0a70 726f 6d65 -7468 6575 7322 4708 3b11 5b8f c2f5 083f -f440 1a12 0900 0000 0000 00e0 3f11 e17a -14ae c7af 9340 1a12 09cd cccc cccc ccec -3f11 2fdd 2406 81f0 9640 1a12 09ae 47e1 -7a14 aeef 3f11 3d0a d7a3 b095 a740 e608 -0a17 6874 7470 5f72 6571 7565 7374 5f73 -697a 655f 6279 7465 7312 2054 6865 2048 -5454 5020 7265 7175 6573 7420 7369 7a65 -7320 696e 2062 7974 6573 2e18 0222 570a -0c0a 0768 616e 646c 6572 1201 2f22 4708 -0011 0000 0000 0000 0000 1a12 0900 0000 -0000 00e0 3f11 0000 0000 0000 0000 1a12 -09cd cccc cccc ccec 3f11 0000 0000 0000 -0000 1a12 09ae 47e1 7a14 aeef 3f11 0000 -0000 0000 0000 225d 0a12 0a07 6861 6e64 -6c65 7212 072f 616c 6572 7473 2247 0800 -1100 0000 0000 0000 001a 1209 0000 0000 -0000 e03f 1100 0000 0000 0000 001a 1209 -cdcc cccc cccc ec3f 1100 0000 0000 0000 -001a 1209 ae47 e17a 14ae ef3f 1100 0000 -0000 0000 0022 620a 170a 0768 616e 646c -6572 120c 2f61 7069 2f6d 6574 7269 6373 -2247 0800 1100 0000 0000 0000 001a 1209 -0000 0000 0000 e03f 1100 0000 0000 0000 -001a 1209 cdcc cccc cccc ec3f 1100 0000 -0000 0000 001a 1209 ae47 e17a 14ae ef3f -1100 0000 0000 0000 0022 600a 150a 0768 -616e 646c 6572 120a 2f61 7069 2f71 7565 -7279 2247 0800 1100 0000 0000 0000 001a -1209 0000 0000 0000 e03f 1100 0000 0000 -0000 001a 1209 cdcc cccc cccc ec3f 1100 -0000 0000 0000 001a 1209 ae47 e17a 14ae -ef3f 1100 0000 0000 0000 0022 660a 1b0a -0768 616e 646c 6572 1210 2f61 7069 2f71 -7565 7279 5f72 616e 6765 2247 0800 1100 -0000 0000 0000 001a 1209 0000 0000 0000 -e03f 1100 0000 0000 0000 001a 1209 cdcc -cccc cccc ec3f 1100 0000 0000 0000 001a -1209 ae47 e17a 14ae ef3f 1100 0000 0000 -0000 0022 620a 170a 0768 616e 646c 6572 -120c 2f61 7069 2f74 6172 6765 7473 2247 -0800 1100 0000 0000 0000 001a 1209 0000 -0000 0000 e03f 1100 0000 0000 0000 001a -1209 cdcc cccc cccc ec3f 1100 0000 0000 -0000 001a 1209 ae47 e17a 14ae ef3f 1100 -0000 0000 0000 0022 600a 150a 0768 616e -646c 6572 120a 2f63 6f6e 736f 6c65 732f -2247 0800 1100 0000 0000 0000 001a 1209 -0000 0000 0000 e03f 1100 0000 0000 0000 -001a 1209 cdcc cccc cccc ec3f 1100 0000 -0000 0000 001a 1209 ae47 e17a 14ae ef3f -1100 0000 0000 0000 0022 5c0a 110a 0768 -616e 646c 6572 1206 2f67 7261 7068 2247 -0800 1100 0000 0000 0000 001a 1209 0000 -0000 0000 e03f 1100 0000 0000 0000 001a -1209 cdcc cccc cccc ec3f 1100 0000 0000 -0000 001a 1209 ae47 e17a 14ae ef3f 1100 -0000 0000 0000 0022 5b0a 100a 0768 616e -646c 6572 1205 2f68 6561 7022 4708 0011 -0000 0000 0000 0000 1a12 0900 0000 0000 -00e0 3f11 0000 0000 0000 0000 1a12 09cd -cccc cccc ccec 3f11 0000 0000 0000 0000 -1a12 09ae 47e1 7a14 aeef 3f11 0000 0000 -0000 0000 225e 0a13 0a07 6861 6e64 6c65 -7212 082f 7374 6174 6963 2f22 4708 0011 -0000 0000 0000 0000 1a12 0900 0000 0000 -00e0 3f11 0000 0000 0000 0000 1a12 09cd -cccc cccc ccec 3f11 0000 0000 0000 0000 -1a12 09ae 47e1 7a14 aeef 3f11 0000 0000 -0000 0000 2260 0a15 0a07 6861 6e64 6c65 -7212 0a70 726f 6d65 7468 6575 7322 4708 -3b11 0000 0000 40c4 d040 1a12 0900 0000 -0000 00e0 3f11 0000 0000 0030 7240 1a12 -09cd cccc cccc ccec 3f11 0000 0000 0030 -7240 1a12 09ae 47e1 7a14 aeef 3f11 0000 -0000 0030 7240 7c0a 1368 7474 705f 7265 -7175 6573 7473 5f74 6f74 616c 1223 546f -7461 6c20 6e75 6d62 6572 206f 6620 4854 -5450 2072 6571 7565 7374 7320 6d61 6465 -2e18 0022 3e0a 0b0a 0463 6f64 6512 0332 -3030 0a15 0a07 6861 6e64 6c65 7212 0a70 -726f 6d65 7468 6575 730a 0d0a 066d 6574 -686f 6412 0367 6574 1a09 0900 0000 0000 -804d 40e8 080a 1868 7474 705f 7265 7370 -6f6e 7365 5f73 697a 655f 6279 7465 7312 -2154 6865 2048 5454 5020 7265 7370 6f6e -7365 2073 697a 6573 2069 6e20 6279 7465 -732e 1802 2257 0a0c 0a07 6861 6e64 6c65 -7212 012f 2247 0800 1100 0000 0000 0000 -001a 1209 0000 0000 0000 e03f 1100 0000 -0000 0000 001a 1209 cdcc cccc cccc ec3f -1100 0000 0000 0000 001a 1209 ae47 e17a -14ae ef3f 1100 0000 0000 0000 0022 5d0a -120a 0768 616e 646c 6572 1207 2f61 6c65 -7274 7322 4708 0011 0000 0000 0000 0000 -1a12 0900 0000 0000 00e0 3f11 0000 0000 -0000 0000 1a12 09cd cccc cccc ccec 3f11 -0000 0000 0000 0000 1a12 09ae 47e1 7a14 -aeef 3f11 0000 0000 0000 0000 2262 0a17 -0a07 6861 6e64 6c65 7212 0c2f 6170 692f -6d65 7472 6963 7322 4708 0011 0000 0000 -0000 0000 1a12 0900 0000 0000 00e0 3f11 -0000 0000 0000 0000 1a12 09cd cccc cccc -ccec 3f11 0000 0000 0000 0000 1a12 09ae -47e1 7a14 aeef 3f11 0000 0000 0000 0000 -2260 0a15 0a07 6861 6e64 6c65 7212 0a2f -6170 692f 7175 6572 7922 4708 0011 0000 -0000 0000 0000 1a12 0900 0000 0000 00e0 -3f11 0000 0000 0000 0000 1a12 09cd cccc -cccc ccec 3f11 0000 0000 0000 0000 1a12 -09ae 47e1 7a14 aeef 3f11 0000 0000 0000 -0000 2266 0a1b 0a07 6861 6e64 6c65 7212 -102f 6170 692f 7175 6572 795f 7261 6e67 -6522 4708 0011 0000 0000 0000 0000 1a12 -0900 0000 0000 00e0 3f11 0000 0000 0000 -0000 1a12 09cd cccc cccc ccec 3f11 0000 -0000 0000 0000 1a12 09ae 47e1 7a14 aeef -3f11 0000 0000 0000 0000 2262 0a17 0a07 -6861 6e64 6c65 7212 0c2f 6170 692f 7461 -7267 6574 7322 4708 0011 0000 0000 0000 -0000 1a12 0900 0000 0000 00e0 3f11 0000 -0000 0000 0000 1a12 09cd cccc cccc ccec -3f11 0000 0000 0000 0000 1a12 09ae 47e1 -7a14 aeef 3f11 0000 0000 0000 0000 2260 -0a15 0a07 6861 6e64 6c65 7212 0a2f 636f -6e73 6f6c 6573 2f22 4708 0011 0000 0000 -0000 0000 1a12 0900 0000 0000 00e0 3f11 -0000 0000 0000 0000 1a12 09cd cccc cccc -ccec 3f11 0000 0000 0000 0000 1a12 09ae -47e1 7a14 aeef 3f11 0000 0000 0000 0000 -225c 0a11 0a07 6861 6e64 6c65 7212 062f -6772 6170 6822 4708 0011 0000 0000 0000 -0000 1a12 0900 0000 0000 00e0 3f11 0000 -0000 0000 0000 1a12 09cd cccc cccc ccec -3f11 0000 0000 0000 0000 1a12 09ae 47e1 -7a14 aeef 3f11 0000 0000 0000 0000 225b -0a10 0a07 6861 6e64 6c65 7212 052f 6865 -6170 2247 0800 1100 0000 0000 0000 001a -1209 0000 0000 0000 e03f 1100 0000 0000 -0000 001a 1209 cdcc cccc cccc ec3f 1100 -0000 0000 0000 001a 1209 ae47 e17a 14ae -ef3f 1100 0000 0000 0000 0022 5e0a 130a -0768 616e 646c 6572 1208 2f73 7461 7469 -632f 2247 0800 1100 0000 0000 0000 001a -1209 0000 0000 0000 e03f 1100 0000 0000 -0000 001a 1209 cdcc cccc cccc ec3f 1100 -0000 0000 0000 001a 1209 ae47 e17a 14ae -ef3f 1100 0000 0000 0000 0022 600a 150a -0768 616e 646c 6572 120a 7072 6f6d 6574 -6865 7573 2247 083b 1100 0000 00e0 b4fc -401a 1209 0000 0000 0000 e03f 1100 0000 -0000 349f 401a 1209 cdcc cccc cccc ec3f -1100 0000 0000 08a0 401a 1209 ae47 e17a -14ae ef3f 1100 0000 0000 0aa0 405c 0a19 -7072 6f63 6573 735f 6370 755f 7365 636f -6e64 735f 746f 7461 6c12 3054 6f74 616c -2075 7365 7220 616e 6420 7379 7374 656d -2043 5055 2074 696d 6520 7370 656e 7420 -696e 2073 6563 6f6e 6473 2e18 0022 0b1a -0909 a470 3d0a d7a3 d03f 4f0a 1270 726f -6365 7373 5f67 6f72 6f75 7469 6e65 7312 -2a4e 756d 6265 7220 6f66 2067 6f72 6f75 -7469 6e65 7320 7468 6174 2063 7572 7265 -6e74 6c79 2065 7869 7374 2e18 0122 0b12 -0909 0000 0000 0000 5140 4a0a 0f70 726f -6365 7373 5f6d 6178 5f66 6473 1228 4d61 -7869 6d75 6d20 6e75 6d62 6572 206f 6620 -6f70 656e 2066 696c 6520 6465 7363 7269 -7074 6f72 732e 1801 220b 1209 0900 0000 -0000 00c0 4043 0a10 7072 6f63 6573 735f -6f70 656e 5f66 6473 1220 4e75 6d62 6572 -206f 6620 6f70 656e 2066 696c 6520 6465 -7363 7269 7074 6f72 732e 1801 220b 1209 -0900 0000 0000 003d 404e 0a1d 7072 6f63 -6573 735f 7265 7369 6465 6e74 5f6d 656d -6f72 795f 6279 7465 7312 1e52 6573 6964 -656e 7420 6d65 6d6f 7279 2073 697a 6520 -696e 2062 7974 6573 2e18 0122 0b12 0909 -0000 0000 004b 8841 630a 1a70 726f 6365 -7373 5f73 7461 7274 5f74 696d 655f 7365 -636f 6e64 7312 3653 7461 7274 2074 696d -6520 6f66 2074 6865 2070 726f 6365 7373 -2073 696e 6365 2075 6e69 7820 6570 6f63 -6820 696e 2073 6563 6f6e 6473 2e18 0122 -0b12 0909 3d0a 172d e831 d541 4c0a 1c70 -726f 6365 7373 5f76 6972 7475 616c 5f6d -656d 6f72 795f 6279 7465 7312 1d56 6972 -7475 616c 206d 656d 6f72 7920 7369 7a65 -2069 6e20 6279 7465 732e 1801 220b 1209 -0900 0000 0020 12c0 415f 0a27 7072 6f6d -6574 6865 7573 5f64 6e73 5f73 645f 6c6f -6f6b 7570 5f66 6169 6c75 7265 735f 746f -7461 6c12 2554 6865 206e 756d 6265 7220 -6f66 2044 4e53 2d53 4420 6c6f 6f6b 7570 -2066 6169 6c75 7265 732e 1800 220b 1a09 -0900 0000 0000 0000 004f 0a1f 7072 6f6d -6574 6865 7573 5f64 6e73 5f73 645f 6c6f -6f6b 7570 735f 746f 7461 6c12 1d54 6865 -206e 756d 6265 7220 6f66 2044 4e53 2d53 -4420 6c6f 6f6b 7570 732e 1800 220b 1a09 -0900 0000 0000 0008 40cf 010a 2a70 726f -6d65 7468 6575 735f 6576 616c 7561 746f -725f 6475 7261 7469 6f6e 5f6d 696c 6c69 -7365 636f 6e64 7312 2c54 6865 2064 7572 -6174 696f 6e20 666f 7220 616c 6c20 6576 -616c 7561 7469 6f6e 7320 746f 2065 7865 -6375 7465 2e18 0222 7122 6f08 0b11 0000 -0000 0000 2240 1a12 097b 14ae 47e1 7a84 -3f11 0000 0000 0000 0000 1a12 099a 9999 -9999 99a9 3f11 0000 0000 0000 0000 1a12 -0900 0000 0000 00e0 3f11 0000 0000 0000 -0000 1a12 09cd cccc cccc ccec 3f11 0000 -0000 0000 f03f 1a12 09ae 47e1 7a14 aeef -3f11 0000 0000 0000 f03f a301 0a39 7072 -6f6d 6574 6865 7573 5f6c 6f63 616c 5f73 -746f 7261 6765 5f63 6865 636b 706f 696e -745f 6475 7261 7469 6f6e 5f6d 696c 6c69 -7365 636f 6e64 7312 5754 6865 2064 7572 -6174 696f 6e20 2869 6e20 6d69 6c6c 6973 -6563 6f6e 6473 2920 6974 2074 6f6f 6b20 -746f 2063 6865 636b 706f 696e 7420 696e -2d6d 656d 6f72 7920 6d65 7472 6963 7320 -616e 6420 6865 6164 2063 6875 6e6b 732e -1801 220b 1209 0900 0000 0000 0000 00f2 -010a 2870 726f 6d65 7468 6575 735f 6c6f -6361 6c5f 7374 6f72 6167 655f 6368 756e -6b5f 6f70 735f 746f 7461 6c12 3354 6865 -2074 6f74 616c 206e 756d 6265 7220 6f66 -2063 6875 6e6b 206f 7065 7261 7469 6f6e -7320 6279 2074 6865 6972 2074 7970 652e -1800 221b 0a0e 0a04 7479 7065 1206 6372 -6561 7465 1a09 0900 0000 0000 b880 4022 -1c0a 0f0a 0474 7970 6512 0770 6572 7369 -7374 1a09 0900 0000 0000 c05b 4022 180a -0b0a 0474 7970 6512 0370 696e 1a09 0900 -0000 0000 807b 4022 1e0a 110a 0474 7970 -6512 0974 7261 6e73 636f 6465 1a09 0900 -0000 0000 a06b 4022 1a0a 0d0a 0474 7970 -6512 0575 6e70 696e 1a09 0900 0000 0000 -807b 40c4 010a 3c70 726f 6d65 7468 6575 -735f 6c6f 6361 6c5f 7374 6f72 6167 655f -696e 6465 7869 6e67 5f62 6174 6368 5f6c -6174 656e 6379 5f6d 696c 6c69 7365 636f -6e64 7312 3751 7561 6e74 696c 6573 2066 -6f72 2062 6174 6368 2069 6e64 6578 696e -6720 6c61 7465 6e63 6965 7320 696e 206d -696c 6c69 7365 636f 6e64 732e 1802 2249 -2247 0801 1100 0000 0000 0000 001a 1209 -0000 0000 0000 e03f 1100 0000 0000 0000 -001a 1209 cdcc cccc cccc ec3f 1100 0000 -0000 0000 001a 1209 ae47 e17a 14ae ef3f -1100 0000 0000 0000 00bf 010a 2d70 726f -6d65 7468 6575 735f 6c6f 6361 6c5f 7374 -6f72 6167 655f 696e 6465 7869 6e67 5f62 -6174 6368 5f73 697a 6573 1241 5175 616e -7469 6c65 7320 666f 7220 696e 6465 7869 -6e67 2062 6174 6368 2073 697a 6573 2028 -6e75 6d62 6572 206f 6620 6d65 7472 6963 -7320 7065 7220 6261 7463 6829 2e18 0222 -4922 4708 0111 0000 0000 0000 0040 1a12 -0900 0000 0000 00e0 3f11 0000 0000 0000 -0040 1a12 09cd cccc cccc ccec 3f11 0000 -0000 0000 0040 1a12 09ae 47e1 7a14 aeef -3f11 0000 0000 0000 0040 660a 3070 726f -6d65 7468 6575 735f 6c6f 6361 6c5f 7374 -6f72 6167 655f 696e 6465 7869 6e67 5f71 -7565 7565 5f63 6170 6163 6974 7912 2354 -6865 2063 6170 6163 6974 7920 6f66 2074 -6865 2069 6e64 6578 696e 6720 7175 6575 -652e 1801 220b 1209 0900 0000 0000 00d0 -406d 0a2e 7072 6f6d 6574 6865 7573 5f6c -6f63 616c 5f73 746f 7261 6765 5f69 6e64 -6578 696e 675f 7175 6575 655f 6c65 6e67 -7468 122c 5468 6520 6e75 6d62 6572 206f -6620 6d65 7472 6963 7320 7761 6974 696e -6720 746f 2062 6520 696e 6465 7865 642e -1801 220b 1209 0900 0000 0000 0000 0067 -0a2f 7072 6f6d 6574 6865 7573 5f6c 6f63 -616c 5f73 746f 7261 6765 5f69 6e67 6573 -7465 645f 7361 6d70 6c65 735f 746f 7461 -6c12 2554 6865 2074 6f74 616c 206e 756d -6265 7220 6f66 2073 616d 706c 6573 2069 -6e67 6573 7465 642e 1800 220b 1a09 0900 -0000 0080 27cd 40c3 010a 3770 726f 6d65 -7468 6575 735f 6c6f 6361 6c5f 7374 6f72 -6167 655f 696e 7661 6c69 645f 7072 656c -6f61 645f 7265 7175 6573 7473 5f74 6f74 -616c 1279 5468 6520 746f 7461 6c20 6e75 -6d62 6572 206f 6620 7072 656c 6f61 6420 -7265 7175 6573 7473 2072 6566 6572 7269 -6e67 2074 6f20 6120 6e6f 6e2d 6578 6973 -7465 6e74 2073 6572 6965 732e 2054 6869 -7320 6973 2061 6e20 696e 6469 6361 7469 -6f6e 206f 6620 6f75 7464 6174 6564 206c -6162 656c 2069 6e64 6578 6573 2e18 0022 -0b1a 0909 0000 0000 0000 0000 6f0a 2a70 -726f 6d65 7468 6575 735f 6c6f 6361 6c5f -7374 6f72 6167 655f 6d65 6d6f 7279 5f63 -6875 6e6b 6465 7363 7312 3254 6865 2063 -7572 7265 6e74 206e 756d 6265 7220 6f66 -2063 6875 6e6b 2064 6573 6372 6970 746f -7273 2069 6e20 6d65 6d6f 7279 2e18 0122 -0b12 0909 0000 0000 0020 8f40 9c01 0a26 -7072 6f6d 6574 6865 7573 5f6c 6f63 616c -5f73 746f 7261 6765 5f6d 656d 6f72 795f -6368 756e 6b73 1263 5468 6520 6375 7272 -656e 7420 6e75 6d62 6572 206f 6620 6368 -756e 6b73 2069 6e20 6d65 6d6f 7279 2c20 -6578 636c 7564 696e 6720 636c 6f6e 6564 -2063 6875 6e6b 7320 2869 2e65 2e20 6368 -756e 6b73 2077 6974 686f 7574 2061 2064 -6573 6372 6970 746f 7229 2e18 0122 0b12 -0909 0000 0000 00e8 8d40 600a 2670 726f -6d65 7468 6575 735f 6c6f 6361 6c5f 7374 -6f72 6167 655f 6d65 6d6f 7279 5f73 6572 -6965 7312 2754 6865 2063 7572 7265 6e74 -206e 756d 6265 7220 6f66 2073 6572 6965 -7320 696e 206d 656d 6f72 792e 1801 220b -1209 0900 0000 0000 807a 40b7 010a 3570 -726f 6d65 7468 6575 735f 6c6f 6361 6c5f -7374 6f72 6167 655f 7065 7273 6973 745f -6c61 7465 6e63 795f 6d69 6372 6f73 6563 -6f6e 6473 1231 4120 7375 6d6d 6172 7920 -6f66 206c 6174 656e 6369 6573 2066 6f72 -2070 6572 7369 7374 696e 6720 6561 6368 -2063 6875 6e6b 2e18 0222 4922 4708 6f11 -1c2f dd24 e68c cc40 1a12 0900 0000 0000 -00e0 3f11 8d97 6e12 8360 3e40 1a12 09cd -cccc cccc ccec 3f11 0ad7 a370 3d62 6b40 -1a12 09ae 47e1 7a14 aeef 3f11 7b14 ae47 -e1b6 7240 6a0a 2f70 726f 6d65 7468 6575 -735f 6c6f 6361 6c5f 7374 6f72 6167 655f -7065 7273 6973 745f 7175 6575 655f 6361 -7061 6369 7479 1228 5468 6520 746f 7461 -6c20 6361 7061 6369 7479 206f 6620 7468 -6520 7065 7273 6973 7420 7175 6575 652e -1801 220b 1209 0900 0000 0000 0090 407a -0a2d 7072 6f6d 6574 6865 7573 5f6c 6f63 -616c 5f73 746f 7261 6765 5f70 6572 7369 -7374 5f71 7565 7565 5f6c 656e 6774 6812 -3a54 6865 2063 7572 7265 6e74 206e 756d -6265 7220 6f66 2063 6875 6e6b 7320 7761 -6974 696e 6720 696e 2074 6865 2070 6572 -7369 7374 2071 7565 7565 2e18 0122 0b12 -0909 0000 0000 0000 0000 ac01 0a29 7072 -6f6d 6574 6865 7573 5f6c 6f63 616c 5f73 -746f 7261 6765 5f73 6572 6965 735f 6f70 -735f 746f 7461 6c12 3454 6865 2074 6f74 -616c 206e 756d 6265 7220 6f66 2073 6572 -6965 7320 6f70 6572 6174 696f 6e73 2062 -7920 7468 6569 7220 7479 7065 2e18 0022 -1b0a 0e0a 0474 7970 6512 0663 7265 6174 -651a 0909 0000 0000 0000 0040 222a 0a1d -0a04 7479 7065 1215 6d61 696e 7465 6e61 -6e63 655f 696e 5f6d 656d 6f72 791a 0909 -0000 0000 0000 1440 d601 0a2d 7072 6f6d -6574 6865 7573 5f6e 6f74 6966 6963 6174 -696f 6e73 5f6c 6174 656e 6379 5f6d 696c -6c69 7365 636f 6e64 7312 584c 6174 656e -6379 2071 7561 6e74 696c 6573 2066 6f72 -2073 656e 6469 6e67 2061 6c65 7274 206e -6f74 6966 6963 6174 696f 6e73 2028 6e6f -7420 696e 636c 7564 696e 6720 6472 6f70 -7065 6420 6e6f 7469 6669 6361 7469 6f6e -7329 2e18 0222 4922 4708 0011 0000 0000 -0000 0000 1a12 0900 0000 0000 00e0 3f11 -0000 0000 0000 0000 1a12 09cd cccc cccc -ccec 3f11 0000 0000 0000 0000 1a12 09ae -47e1 7a14 aeef 3f11 0000 0000 0000 0000 -680a 2770 726f 6d65 7468 6575 735f 6e6f -7469 6669 6361 7469 6f6e 735f 7175 6575 -655f 6361 7061 6369 7479 122e 5468 6520 -6361 7061 6369 7479 206f 6620 7468 6520 -616c 6572 7420 6e6f 7469 6669 6361 7469 -6f6e 7320 7175 6575 652e 1801 220b 1209 -0900 0000 0000 0059 4067 0a25 7072 6f6d -6574 6865 7573 5f6e 6f74 6966 6963 6174 -696f 6e73 5f71 7565 7565 5f6c 656e 6774 -6812 2f54 6865 206e 756d 6265 7220 6f66 -2061 6c65 7274 206e 6f74 6966 6963 6174 -696f 6e73 2069 6e20 7468 6520 7175 6575 -652e 1801 220b 1209 0900 0000 0000 0000 -009e 020a 3070 726f 6d65 7468 6575 735f -7275 6c65 5f65 7661 6c75 6174 696f 6e5f -6475 7261 7469 6f6e 5f6d 696c 6c69 7365 -636f 6e64 7312 2354 6865 2064 7572 6174 -696f 6e20 666f 7220 6120 7275 6c65 2074 -6f20 6578 6563 7574 652e 1802 2260 0a15 -0a09 7275 6c65 5f74 7970 6512 0861 6c65 -7274 696e 6722 4708 3711 0000 0000 0000 -2840 1a12 0900 0000 0000 00e0 3f11 0000 -0000 0000 0000 1a12 09cd cccc cccc ccec -3f11 0000 0000 0000 0000 1a12 09ae 47e1 -7a14 aeef 3f11 0000 0000 0000 0840 2261 -0a16 0a09 7275 6c65 5f74 7970 6512 0972 -6563 6f72 6469 6e67 2247 0837 1100 0000 -0000 002e 401a 1209 0000 0000 0000 e03f -1100 0000 0000 0000 001a 1209 cdcc cccc -cccc ec3f 1100 0000 0000 0000 001a 1209 -ae47 e17a 14ae ef3f 1100 0000 0000 0008 -4069 0a29 7072 6f6d 6574 6865 7573 5f72 -756c 655f 6576 616c 7561 7469 6f6e 5f66 -6169 6c75 7265 735f 746f 7461 6c12 2d54 -6865 2074 6f74 616c 206e 756d 6265 7220 -6f66 2072 756c 6520 6576 616c 7561 7469 -6f6e 2066 6169 6c75 7265 732e 1800 220b -1a09 0900 0000 0000 0000 0060 0a21 7072 -6f6d 6574 6865 7573 5f73 616d 706c 6573 -5f71 7565 7565 5f63 6170 6163 6974 7912 -2c43 6170 6163 6974 7920 6f66 2074 6865 -2071 7565 7565 2066 6f72 2075 6e77 7269 -7474 656e 2073 616d 706c 6573 2e18 0122 -0b12 0909 0000 0000 0000 b040 da01 0a1f -7072 6f6d 6574 6865 7573 5f73 616d 706c -6573 5f71 7565 7565 5f6c 656e 6774 6812 -a701 4375 7272 656e 7420 6e75 6d62 6572 -206f 6620 6974 656d 7320 696e 2074 6865 -2071 7565 7565 2066 6f72 2075 6e77 7269 -7474 656e 2073 616d 706c 6573 2e20 4561 -6368 2069 7465 6d20 636f 6d70 7269 7365 -7320 616c 6c20 7361 6d70 6c65 7320 6578 -706f 7365 6420 6279 206f 6e65 2074 6172 -6765 7420 6173 206f 6e65 206d 6574 7269 -6320 6661 6d69 6c79 2028 692e 652e 206d -6574 7269 6373 206f 6620 7468 6520 7361 -6d65 206e 616d 6529 2e18 0122 0b12 0909 -0000 0000 0000 0000 d902 0a29 7072 6f6d -6574 6865 7573 5f74 6172 6765 745f 696e -7465 7276 616c 5f6c 656e 6774 685f 7365 -636f 6e64 7312 2141 6374 7561 6c20 696e -7465 7276 616c 7320 6265 7477 6565 6e20 -7363 7261 7065 732e 1802 2282 010a 0f0a -0869 6e74 6572 7661 6c12 0331 3573 226f -0804 1100 0000 0000 804d 401a 1209 7b14 -ae47 e17a 843f 1100 0000 0000 002c 401a -1209 9a99 9999 9999 a93f 1100 0000 0000 -002c 401a 1209 0000 0000 0000 e03f 1100 -0000 0000 002e 401a 1209 cdcc cccc cccc -ec3f 1100 0000 0000 002e 401a 1209 ae47 -e17a 14ae ef3f 1100 0000 0000 002e 4022 -8101 0a0e 0a08 696e 7465 7276 616c 1202 -3173 226f 083a 1100 0000 0000 003c 401a -1209 7b14 ae47 e17a 843f 1100 0000 0000 -0000 001a 1209 9a99 9999 9999 a93f 1100 -0000 0000 0000 001a 1209 0000 0000 0000 -e03f 1100 0000 0000 0000 001a 1209 cdcc -cccc cccc ec3f 1100 0000 0000 00f0 3f1a -1209 ae47 e17a 14ae ef3f 1100 0000 0000 -00f0 3f \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/prometheus/common/expfmt/testdata/protobuf.gz b/Godeps/_workspace/src/github.com/prometheus/common/expfmt/testdata/protobuf.gz deleted file mode 100644 index 62fccb6165..0000000000 --- a/Godeps/_workspace/src/github.com/prometheus/common/expfmt/testdata/protobuf.gz +++ /dev/null @@ -1,129 +0,0 @@ -1f8b 0808 efa0 c754 0003 7072 6f74 6f62 -7566 00ed 594d 8c1c c515 9eb1 8d3d 5b86 -6037 265e 8c4d ca03 c4bb ceee cc9a 9f58 -01cc f6ca 4424 041b 8837 21c8 24ed daee -9a99 cef6 1f55 d578 c7e4 b004 0e39 8088 -8448 048a 124b 4442 9110 e110 25b9 c54a -9072 01c5 9724 4a24 2472 413e 448a 8592 -1b87 bcea aeda eeea 99d9 3530 49a4 68e7 -b0bb 5355 fdde abf7 bef7 bdf7 7a3f 6ca0 -664f 88c4 61f4 8994 72e1 7829 23c2 8f23 -27f4 5d16 73ea c691 c7ad cf2d f628 fed2 -e2e2 c358 9dc3 0111 3472 7dca b11f e1f2 -d9d6 e496 e6a3 e86a b4a3 4722 2fa0 ccaa -b79b f737 6abb 6bea b3cf 9ac8 ff78 6fbe -bcf6 cedb f2f3 7763 ed8d fbff 766e cf1b -ff28 d69a df44 5621 7847 9bc0 2fc1 c727 -7e09 ed2d c45f dd26 89df 0ea9 60be 3b46 -1d67 d0f5 850e 94e9 008f b2fe f834 74d0 -8d85 865d 8506 8791 a84b ffa3 de12 8475 -e938 2352 f116 208c c701 e563 84d4 e368 -77a1 617b bbcb 48d2 1b9f f4d3 6857 21fd -aa76 8f92 647c c2bf 85ae 2b84 37da 5c40 -e6ba 6374 8de9 fc84 c590 0c3d 9aca f0de -bdfb f40b bffd 5763 fe9f 7659 8314 f0fb -9fbf 6897 35b4 dfbd 65fb d397 7f60 9735 -1c43 7f7e f5cd 975e b3df 6fa0 bd06 fb70 -ff1c 7596 fa82 720b 0f50 8edc cce8 263b -b0c9 339b 3cb3 c933 5afa ff2f cfc8 13f6 -5b17 ed01 0d73 cc1e d090 af99 1a60 ed3b -e8ba 32cd 7047 c482 04d6 cd8b f217 8ed2 -7089 321c 770c bae1 3824 1e6d 4dd6 9af7 -a29d 689b 1b7b d4da 7adb dcdc 085b d135 -68bb fc33 f6ac ad00 cd7d 13b9 b5ab 27ec -4b0d 34a9 b4f3 0470 45cb 2c77 b0c4 72f9 -ee26 cd7d 02ec 6cd2 dc26 cd7d 6ce1 ff73 -9a7b ef17 1f0e d2dc 1d3f 19a4 b9c6 f941 -9a43 e7ed c7d1 0d20 d5a5 9c3b 6e92 3a6a -2053 6437 9793 5dca 81ea c006 ccfb 5cd0 -101f 7ff8 6b58 f821 d04e 4223 2169 676d -8eab 3577 028d fd34 91dd dac5 f987 90a5 -8577 6316 a7c2 8f80 bf0e 9f5c 23cf 6215 -8b1e 11d8 4d19 0391 411f d315 9f8b d664 -bdb9 d352 b458 7bc4 7e00 5dab e585 64c5 -e9c0 9439 7582 acf8 611a 9618 3906 ab70 -c70f 28f6 2877 999f 8898 7153 d405 fb38 -daa5 45c9 f399 2c7c f2a3 c838 669f 4407 -b40c 6062 df03 cb9d 9086 31e4 79ce d437 -7d55 2de3 7c39 e3e9 124d 97c4 7de5 7b0b -2eda a7c5 018e 9870 a48f 7544 accf 9f92 -6bb9 dfc1 4040 0156 a741 6ae4 529c 46fe -0aa6 49ec f68c 88e4 3a8e a1bd b397 8efc -71e1 41b4 5feb 78d2 6722 2581 69f1 81af -e7ab 1b1a 8cad 0b0b 0e3a 5420 d2f1 22b0 -db73 8238 5e4e 13a7 43fc 2005 af28 24dd -2a6b 5611 a2fb 4e9e 9a3d 751f cecf 627d -56c3 47a3 ff21 f499 51f2 b5dc 03eb c8ad -c86b d87f a8a3 c325 81f4 4912 a404 025b -7e81 1104 bef6 f88c 94ad b770 2786 1c08 -02ac 9e82 25c0 6c0c 38a5 6e2a a82c b94f -34e3 c64e 95ba 4d99 6c4f ed91 e9f6 ac91 -e2af bc2c 3f3f 9bff 88f4 7079 7e90 1e2e -cfbf 5a47 5f28 5d28 885d 8827 871b 912e -75dc 1e75 9793 d88f c488 fb3d 6adc 6f2a -7b27 536c 4f63 1fd0 068e 94b7 2c64 0118 -6615 3654 5dce 9801 58d5 8353 69b4 5cc9 -925a ed83 3a9a 5ac7 4878 0432 50c7 f376 -6993 a8b4 58d9 2199 924c f97d a92f f1ef -332c fa49 d66e dd88 3e85 b6c9 2fd6 7697 -5122 a88e faaf 57ed e67e 74ad dadc 0122 -38f0 8ade bd70 da6e 4eca 4e2d dbdd 9af8 -d15a 0ff6 94dd bc09 ca52 be33 21a0 6e73 -d9ce e9fd f3cb 7673 1ff4 6ff9 fe55 6964 -3efb 561d dd33 f2ce 7ee4 01bb 455d 6789 -08b7 e7e4 6fc5 fa66 6c8e 3e92 9248 00ff -f00c 78d9 49ac 1fac be48 2b9e 9330 fc32 -d486 fa58 aacf 6fea 68f6 4a6f 9175 a0d6 -8269 f69a c1b9 fd79 973a 5504 5623 08c2 -921f 991e b8c0 6071 cbd7 aa17 182c 6eb0 -d641 731b db0f 8d59 0a40 2409 717d d187 -061f 10a8 bf69 a65d bb48 76d8 44f8 453b -44ad 2b55 13d0 a82b 7a39 b50c fae1 2cf1 -85d4 0219 b7a4 9452 af9a 4f5d d45e 475b -17c6 10ea 399c 8449 60b2 6f35 abd4 11ac -9f29 b3e5 eaa1 77ec dfd5 d1d1 7514 010d -fa9e 9330 1ac4 c4ab 4e49 fd61 0ad5 d962 -5862 b443 1953 1726 388a a3d9 acec cb82 -092d 07e0 bb85 177b 3e98 2849 46fa c377 -73b2 9215 3a15 1ea4 8107 c9b0 4403 e5ac -8112 121b 8c6f de41 15be 8c5d 6495 e7d6 -6d59 ecf3 1e64 807f 4a8d 4096 76d9 d346 -70f0 0bf6 8fea e8b3 57a4 905b ee3a ca4a -1a66 a0c4 b841 ea49 37b9 411c 51cd b3c0 -d82d dad2 5fce fa30 47a6 02dc 58d8 396d -5877 e979 fbcc c6c6 e57e b70e 0d37 2edf -1d71 fdd5 73f6 afea e8ce 911a 14f9 9608 -aff4 df82 230b 98a7 6148 5896 7305 c149 -1a51 0f4a 0f50 023c 925d 5933 45bc 7b7f -fbdd 5bde 7fee 6d83 299e ff61 643d 73e6 -5e83 29a0 254d 8e2d 2d1b 4c91 95e8 5f32 -fbdb eb24 95b6 bb42 1453 05c6 ab74 a19e -18c6 16df b7cf ad43 aaa6 2a45 1677 ad0b -14cd 1910 930d 54d7 6aaf d7d1 f448 dd79 -6c4b b5f8 8ea1 ac91 23e0 6315 6360 e4e6 -6174 406d 5e1f 12e8 2768 44a0 7905 3e51 -005c 3bbb c7fe 9359 7ea2 58f8 1d45 007c -78d5 fcc6 83f9 2adc be5c 8638 8db2 f4c9 -de55 6043 0e54 a358 f634 3ac3 3c16 2709 -a498 7168 ad2a 8d67 a8eb 196d b379 ad0a -c65a c38a d1b0 6b0c 09f7 6376 17dd ba81 -2285 b0b6 598e 8629 50f0 1a0a ab1f 6f31 -ea2c 4b03 ea14 6df2 88ee f3e6 c1ee 1acb -272b 4db5 1c80 2732 8919 681a 996d 1029 -88c6 51e5 d1a9 613d c215 46a3 6137 09fa -7459 c304 0303 9967 aa68 7d22 15be 9175 -55f7 5426 a5d9 6159 9739 a678 66e4 c474 -061d 2c69 d24d 4005 5433 c72b 80ca f6b3 -10a4 d159 e60b c821 dd1d 98a1 7ed3 fe6b -dd98 c94c 0d0a 4daf d58f 0f90 952f 6868 -8268 843e fc45 c9f0 f238 76e3 3061 8017 -9ecd 5dba 5da1 2b09 140d 4fd2 0e14 439c -bfee c284 67df f246 0adc 0350 ebab 02a9 -9b2b 7559 9003 5887 1fd3 5518 ff65 8b11 -a75c b223 398a 81e7 d5ed d6e6 f183 0b6e -3628 eb7d 2042 2ace 5279 1597 9124 7f0b -fbdd 3acc 1e0d 7dc4 da7a e44e 0e43 e2b6 -1c19 ab27 860c 8933 f6e0 9038 3304 7dad -214d 706b 4813 dcb2 9b4f d781 900b 23b6 -1c91 36dc a5f6 eff9 af0c aaff 06f1 48e5 -4433 2000 00 \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/prometheus/common/expfmt/testdata/test.gz b/Godeps/_workspace/src/github.com/prometheus/common/expfmt/testdata/test.gz deleted file mode 100644 index 3f8199dfb2..0000000000 --- a/Godeps/_workspace/src/github.com/prometheus/common/expfmt/testdata/test.gz +++ /dev/null @@ -1,163 +0,0 @@ -1f8b 0808 2aa1 c754 0003 7465 7874 00b5 -5b5d 939b 3816 7def 5fa1 ea79 99a9 4d3c -601b db3c f4c3 5426 55f3 309b ca6e 7ab7 -6a9e 281a d436 150c 04c4 a4bd 5df3 dff7 -4a88 361f 025d 094f 1e92 34e8 1cae 8ea4 -ab7b 04fd 03f9 ede3 ef9f c989 b122 28e9 -b79a 562c 88eb 3264 499e 05e7 242a f38a -4679 1657 e4f1 44c9 6f8f 8f9f 896c 46d2 -90d1 2c4a 6845 928c 749b aeee 7e20 8f7f -7cfe 8861 adea f339 2c2f 77fa a6af a730 -8b53 5a3e dcff 7cff ee5b 1d66 2c49 e9c3 -bdb3 f2ee ff22 ce12 027f 3101 9621 80ee -7659 90a8 28af 3366 8eeb 2042 f887 558b -7553 d158 a8a7 a4b1 d450 7259 2a69 84ee -e28a e4e7 3365 6512 dd40 d429 2e1b 6527 -b96c e5ed 10da 6a6c 4c31 0043 cbf2 7213 -9915 4c96 22ab 9816 48dc d02d 10d8 8440 -050d ca30 3bd2 db89 ace2 5b22 b592 6fa9 -e092 74a9 ec46 3403 0216 9647 7a8b cc3c -c565 29ba 9a6b 81e0 2de1 02b1 cd28 3a60 -f8b9 ca53 5a2d 2f1c 2698 2c44 9e62 b294 -f84a 6729 b029 4107 7a2c c3e2 b458 5a05 -8b85 ac2a 164b 491b 2a4b 394d c01d d889 -86c5 6225 c724 1642 2a48 2c75 144c 9632 -1a60 3ba8 8ac1 ed68 f96a 57f2 5868 a9e6 -b194 b325 b354 d40c 7e05 1665 0e45 dc89 -d68a bdca dd38 fbd5 7aef dd84 90cb e21e -bcc3 6ab7 59df 8690 336e 9cc3 7eb5 396c -8df5 eeb0 425c 7bff 70d8 ad3c 47fe 712d -46a0 4fe8 fa60 96c7 16bc 4afe 4783 a70b -a30a dfcd ef09 cf2d eeab cd76 07af 74d8 -d7fb 26b6 1a81 524c 6a0c 6a16 a675 cd9d -a67a abac 0c07 e98f d158 ac0c 5827 3c29 -c694 819d 9144 0fb1 34ba 6604 6889 4c2c -edb4 4e73 2674 4e2c 1cce cab1 9ac0 4dd4 -427a d359 ad26 fca4 4629 2d6a 81f5 3427 -31d6 0c6b 32f5 ca4d 5942 8c7e 7aac a587 -3423 3051 0fed 1667 959b f477 1ad5 1038 -2b33 6802 c7aa 6560 fb26 b59a b16a 334a -a150 c6ae 0e0b c5ea 83f4 6f93 da4c f8ae -195d b408 537b 8644 6215 c119 b149 41d4 -0e6a 460f 1dc0 c267 e1c1 5851 d08e 6a52 -9749 1f34 230d 0283 334c 6bdf b527 f017 -1368 1866 0cd0 66bb 3d1c b07a 619c 4e15 -b09c 8529 7914 7f67 f5f9 8996 247f ee39 -9e8a 9cc3 982a 8d4e 0b17 4fa6 e59d e2de -6b94 c7d0 edb5 e3dc bf53 4ac3 ff93 c70f -f7b0 8728 e3ac 0ac8 9c74 c292 3537 359e -6ccc 3030 65a3 0638 5786 87f9 96b0 79dc -8c31 1bb7 9d73 6673 1169 ad99 2918 ad85 -de9c e914 195b 2dbd 2e08 8cb1 3fb3 62c0 -eb84 7368 5ab1 d456 0ba1 1812 6868 d22c -f046 9269 6d1a 46b0 91e3 c2c9 a587 5939 -356b 1673 e1f4 5e0d 2ddf d870 1988 8800 -1bdb 352b 0623 0911 860d 239f c279 e1a4 -c300 0d3d 9b05 1e2d 19ca b5e9 0453 1a30 -bd5c 3898 8171 33c4 a245 d25a 379d 4023 -27a6 1747 0fc1 bb37 3328 5a16 9d7f d3a9 -32f4 637a 51b4 0823 0b67 8c46 2b83 3071 -3a71 148e 4caf 0f06 84f4 71ce d65f 4021 -7c98 e31d 9650 341c bb2d 52b1 9e27 5b6f -f79d 7758 5ae1 a6fc 1c5c 8f68 05cd 8b3a -685f 7a75 5d5d 5d81 a703 1252 5d2a 46cf -e4c3 e7ff 1096 9cc1 3515 3463 dc35 0d3f -1c9d 666c 8dde 740b 1819 6f18 d931 2ff3 -9a25 1938 af4f 6f16 b373 919d 4246 a2ba -2c21 9ef4 42e8 4b52 b151 309d f6c7 b03e -d23b c58d bd33 7cf4 397c 099e e38a fc33 -7c49 cef5 b963 7173 e83d 7986 7124 31ad -a232 2958 5e8e 2568 f1fd 47b6 570f aebf -1e3e 91f3 8a9b 9f0c 1ff5 06ec 3feb edf2 -7a34 e230 6992 1834 0bce f49c 432d d498 -db7f cbab a4b9 2acc f1d8 1bcf 73f4 4350 -b7f1 569b c3de f1fc 35fd 87b3 1f86 068b -bc64 019f 66ed fc20 5ff8 a566 e681 2630 -91db c610 6116 5152 67c9 0ba1 451e 9de6 -e6a4 82b8 1fac a281 bbda aed7 9bdd c1df -1e36 3b88 7624 e49f 49c9 ea30 edf7 efbf -cd45 9c8c 4a86 7e60 ca26 de6a eb6e f707 -dfe5 2a1e 3a71 c9a5 1ec4 1974 290e d23c -ff5a 17c1 7398 a435 0c47 bbc0 41c4 eb8c -fef5 d397 f75f 7e25 4d53 d236 ed86 8a22 -edac 7154 7b47 1735 225a 7d94 d8e8 da76 -7b45 54f4 cf30 ad43 587c dd4f 05d2 34e9 -7e63 dfde 21cf 3964 cd34 2512 0497 2051 -e590 9c68 5433 aa8a 5747 df9e 3ae1 21af -ddbd c671 c596 698b f696 a017 81c5 2725 -d660 5334 df70 89bb 3641 8839 45d6 1bc5 -9449 f308 966c 05d8 f048 83e8 44a3 af45 -9e64 0c33 837e 14bf 9871 bdfb 1349 20ff -c12c e5f3 e84a 0549 e5bd cc31 f218 45ec -d650 46c6 d0aa cebe 2a17 8761 606f a9c8 -12af 5ae4 430a 0815 76ab ee6a 6783 6365 -d186 6f87 a55c 504f 17be 1124 2561 9742 -b9a6 e69f a148 06b3 8057 fe98 87fb a8a4 -21e3 8706 9e7f 30c5 42ec 1594 27e2 6ba4 -ad31 38c9 00e8 af1d 5320 2bc3 ace2 27e9 -00df ba9e 29bc ceae 4fd6 8d63 92c5 5080 -65c7 e029 64d1 2968 7ecd e8d2 9f0d ff92 -0bb4 1259 5234 242d 6ef8 8b49 5798 7e7c -31cf 5664 5163 92f9 dcb6 8cce bf31 dd72 -3e91 1117 5234 29d2 359d 3dcd 8b99 fe74 -799b 28cd bc69 9afc 784d 126d 1284 95d6 -34f9 c978 e234 9ca6 3345 a046 5363 bd00 -ef2f c55b 1088 d136 c518 0fef b79a d690 -6dc2 228c 1276 11c9 feed 0759 ddbf 8db3 -686b 3086 036e cdd6 3505 7377 fc7b 53c3 -0ea5 343b b2d3 a052 6d27 e4f7 3061 bc3f -b07b 3fc9 eed1 d8b8 5ff2 1166 bd92 204c -f63e 5270 f971 5085 e722 a573 9bb1 6c41 -5a08 a627 4a72 ed2e 3c81 db38 dbbd bee6 -4a32 a8de 9238 284a 9ae6 613c 7a73 ade8 -996c 7a7d 815d d267 5a96 72ec 4292 e5d9 -7b71 c8c0 5d72 454b d8ab 5640 9480 16bc -f6e2 439b 444d 0dc7 dd7b cd62 4889 316c -6c4f 3495 e38e dacc 6603 47a8 368b d7cf -0569 3445 49c0 0f1e 9af2 549e b38c aab2 -ced1 84d8 b805 58df cbf1 4334 337b 0c70 -1dcf 37ea cc6c 473a d1bf 03b7 16a5 75cc -073e 4af3 8cb6 0535 94e6 2bba 6a7f f89e -b013 0c32 4c8c ab06 883d a71f 9141 af79 -8f11 8598 8434 f373 a2c7 f2a6 f978 4920 -2e6a d978 bbd6 e753 591e 778a 88ce 6f9b -ffd2 6ec9 3cf4 6b99 c88b 0289 e323 4543 -a80a 8450 fade cc3e 4ebb ffcf a147 75c0 -c659 6df6 fb1b 9035 47c6 9b95 b7f1 6fc1 -26e8 76eb dd6a bbdb d8f1 3515 8303 c3bb -9af5 16b3 1cb2 82d8 e3a7 88a2 8490 9971 -5048 4800 b68e 98e0 d74c f509 14ac 54d3 -1e75 6a88 c914 d596 12b0 7017 f710 5750 -2831 fa24 d42c 7d8d ad97 f9c1 ded7 8f9e -a2dd 1c87 88a1 b39f 2980 27a0 e730 8147 -6661 16f1 ad57 a63e f1a6 4521 5296 b3e4 -59d6 0895 daa7 fede 5c24 df7a e6a7 a299 -d88e c467 46a4 4703 1e28 e787 41ed 8e15 -9779 51c0 96d5 6ba4 dc97 10d1 2872 a11e -356f 930d f123 1f6b 8ab7 2018 3b5f 04a6 -c964 aaa5 d107 232c 906a 9427 d7f8 2cfb -6875 cfb6 761d 6cf8 4ac3 a30a 5b66 2aa3 -e8a7 32d3 4c5b 55dc 659d d2e0 7a0c 8f3e -bc27 1ca8 39b3 c771 2b56 0f0a f82a 5a35 -f945 880a eb5a f5ae fff6 bca3 c572 2bde -d189 048a 58bc 0557 91ff 3538 aac7 b135 -6fc6 27f8 fa25 8c71 bf4b b854 c67f c340 -4d10 2f1f a929 62f1 8bb7 8b87 eaca 0eda -9a4b 3b1e ab1e a1eb 2116 bce2 ade7 b004 -114b fd0a 997d fba9 a157 d41e 1a84 2a69 -b547 1d83 ccfc 61b0 4388 db22 5dd5 d9f7 -3261 b01f b507 33aa d027 5847 1976 a2dd -d6f1 77da 5865 26fe 30aa 5d13 46cf fd8d -6022 70f2 915b 38de 1cc4 3c17 25cc 854a -bc4b 6d8f 9ce8 4b01 c621 e665 22b8 72d2 -7c8e 48c2 4afc d41c b7c1 08c2 34ba 48a7 -de1e c149 d580 07f6 2bf8 4b59 0e29 bba3 -9168 66fb 69a2 0b78 7558 c214 904d df3e -2ef8 2512 5f09 b4b7 a1f6 a5ec 3be5 6a44 -6558 a887 5143 a9d8 6ee6 11af edf5 877b -d71b 7ca2 245e 1bbb db1b 9179 3724 f346 -19c5 9ecb bf25 9729 9948 997d 42fe 7ad0 -84a1 c992 238e b55d 8f54 53c0 b90d d568 -1fb4 a6ba 1dd3 e813 017b 2643 aae1 c8f3 -41f3 168d 7bf3 71df feee ff2d f9e8 431a -5200 00 \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/prometheus/common/expfmt/testdata/text b/Godeps/_workspace/src/github.com/prometheus/common/expfmt/testdata/text deleted file mode 100644 index f3d8c37844..0000000000 --- a/Godeps/_workspace/src/github.com/prometheus/common/expfmt/testdata/text +++ /dev/null @@ -1,322 +0,0 @@ -# HELP http_request_duration_microseconds The HTTP request latencies in microseconds. -# TYPE http_request_duration_microseconds summary -http_request_duration_microseconds{handler="/",quantile="0.5"} 0 -http_request_duration_microseconds{handler="/",quantile="0.9"} 0 -http_request_duration_microseconds{handler="/",quantile="0.99"} 0 -http_request_duration_microseconds_sum{handler="/"} 0 -http_request_duration_microseconds_count{handler="/"} 0 -http_request_duration_microseconds{handler="/alerts",quantile="0.5"} 0 -http_request_duration_microseconds{handler="/alerts",quantile="0.9"} 0 -http_request_duration_microseconds{handler="/alerts",quantile="0.99"} 0 -http_request_duration_microseconds_sum{handler="/alerts"} 0 -http_request_duration_microseconds_count{handler="/alerts"} 0 -http_request_duration_microseconds{handler="/api/metrics",quantile="0.5"} 0 -http_request_duration_microseconds{handler="/api/metrics",quantile="0.9"} 0 -http_request_duration_microseconds{handler="/api/metrics",quantile="0.99"} 0 -http_request_duration_microseconds_sum{handler="/api/metrics"} 0 -http_request_duration_microseconds_count{handler="/api/metrics"} 0 -http_request_duration_microseconds{handler="/api/query",quantile="0.5"} 0 -http_request_duration_microseconds{handler="/api/query",quantile="0.9"} 0 -http_request_duration_microseconds{handler="/api/query",quantile="0.99"} 0 -http_request_duration_microseconds_sum{handler="/api/query"} 0 -http_request_duration_microseconds_count{handler="/api/query"} 0 -http_request_duration_microseconds{handler="/api/query_range",quantile="0.5"} 0 -http_request_duration_microseconds{handler="/api/query_range",quantile="0.9"} 0 -http_request_duration_microseconds{handler="/api/query_range",quantile="0.99"} 0 -http_request_duration_microseconds_sum{handler="/api/query_range"} 0 -http_request_duration_microseconds_count{handler="/api/query_range"} 0 -http_request_duration_microseconds{handler="/api/targets",quantile="0.5"} 0 -http_request_duration_microseconds{handler="/api/targets",quantile="0.9"} 0 -http_request_duration_microseconds{handler="/api/targets",quantile="0.99"} 0 -http_request_duration_microseconds_sum{handler="/api/targets"} 0 -http_request_duration_microseconds_count{handler="/api/targets"} 0 -http_request_duration_microseconds{handler="/consoles/",quantile="0.5"} 0 -http_request_duration_microseconds{handler="/consoles/",quantile="0.9"} 0 -http_request_duration_microseconds{handler="/consoles/",quantile="0.99"} 0 -http_request_duration_microseconds_sum{handler="/consoles/"} 0 -http_request_duration_microseconds_count{handler="/consoles/"} 0 -http_request_duration_microseconds{handler="/graph",quantile="0.5"} 0 -http_request_duration_microseconds{handler="/graph",quantile="0.9"} 0 -http_request_duration_microseconds{handler="/graph",quantile="0.99"} 0 -http_request_duration_microseconds_sum{handler="/graph"} 0 -http_request_duration_microseconds_count{handler="/graph"} 0 -http_request_duration_microseconds{handler="/heap",quantile="0.5"} 0 -http_request_duration_microseconds{handler="/heap",quantile="0.9"} 0 -http_request_duration_microseconds{handler="/heap",quantile="0.99"} 0 -http_request_duration_microseconds_sum{handler="/heap"} 0 -http_request_duration_microseconds_count{handler="/heap"} 0 -http_request_duration_microseconds{handler="/static/",quantile="0.5"} 0 -http_request_duration_microseconds{handler="/static/",quantile="0.9"} 0 -http_request_duration_microseconds{handler="/static/",quantile="0.99"} 0 -http_request_duration_microseconds_sum{handler="/static/"} 0 -http_request_duration_microseconds_count{handler="/static/"} 0 -http_request_duration_microseconds{handler="prometheus",quantile="0.5"} 1307.275 -http_request_duration_microseconds{handler="prometheus",quantile="0.9"} 1858.632 -http_request_duration_microseconds{handler="prometheus",quantile="0.99"} 3087.384 -http_request_duration_microseconds_sum{handler="prometheus"} 179886.5000000001 -http_request_duration_microseconds_count{handler="prometheus"} 119 -# HELP http_request_size_bytes The HTTP request sizes in bytes. -# TYPE http_request_size_bytes summary -http_request_size_bytes{handler="/",quantile="0.5"} 0 -http_request_size_bytes{handler="/",quantile="0.9"} 0 -http_request_size_bytes{handler="/",quantile="0.99"} 0 -http_request_size_bytes_sum{handler="/"} 0 -http_request_size_bytes_count{handler="/"} 0 -http_request_size_bytes{handler="/alerts",quantile="0.5"} 0 -http_request_size_bytes{handler="/alerts",quantile="0.9"} 0 -http_request_size_bytes{handler="/alerts",quantile="0.99"} 0 -http_request_size_bytes_sum{handler="/alerts"} 0 -http_request_size_bytes_count{handler="/alerts"} 0 -http_request_size_bytes{handler="/api/metrics",quantile="0.5"} 0 -http_request_size_bytes{handler="/api/metrics",quantile="0.9"} 0 -http_request_size_bytes{handler="/api/metrics",quantile="0.99"} 0 -http_request_size_bytes_sum{handler="/api/metrics"} 0 -http_request_size_bytes_count{handler="/api/metrics"} 0 -http_request_size_bytes{handler="/api/query",quantile="0.5"} 0 -http_request_size_bytes{handler="/api/query",quantile="0.9"} 0 -http_request_size_bytes{handler="/api/query",quantile="0.99"} 0 -http_request_size_bytes_sum{handler="/api/query"} 0 -http_request_size_bytes_count{handler="/api/query"} 0 -http_request_size_bytes{handler="/api/query_range",quantile="0.5"} 0 -http_request_size_bytes{handler="/api/query_range",quantile="0.9"} 0 -http_request_size_bytes{handler="/api/query_range",quantile="0.99"} 0 -http_request_size_bytes_sum{handler="/api/query_range"} 0 -http_request_size_bytes_count{handler="/api/query_range"} 0 -http_request_size_bytes{handler="/api/targets",quantile="0.5"} 0 -http_request_size_bytes{handler="/api/targets",quantile="0.9"} 0 -http_request_size_bytes{handler="/api/targets",quantile="0.99"} 0 -http_request_size_bytes_sum{handler="/api/targets"} 0 -http_request_size_bytes_count{handler="/api/targets"} 0 -http_request_size_bytes{handler="/consoles/",quantile="0.5"} 0 -http_request_size_bytes{handler="/consoles/",quantile="0.9"} 0 -http_request_size_bytes{handler="/consoles/",quantile="0.99"} 0 -http_request_size_bytes_sum{handler="/consoles/"} 0 -http_request_size_bytes_count{handler="/consoles/"} 0 -http_request_size_bytes{handler="/graph",quantile="0.5"} 0 -http_request_size_bytes{handler="/graph",quantile="0.9"} 0 -http_request_size_bytes{handler="/graph",quantile="0.99"} 0 -http_request_size_bytes_sum{handler="/graph"} 0 -http_request_size_bytes_count{handler="/graph"} 0 -http_request_size_bytes{handler="/heap",quantile="0.5"} 0 -http_request_size_bytes{handler="/heap",quantile="0.9"} 0 -http_request_size_bytes{handler="/heap",quantile="0.99"} 0 -http_request_size_bytes_sum{handler="/heap"} 0 -http_request_size_bytes_count{handler="/heap"} 0 -http_request_size_bytes{handler="/static/",quantile="0.5"} 0 -http_request_size_bytes{handler="/static/",quantile="0.9"} 0 -http_request_size_bytes{handler="/static/",quantile="0.99"} 0 -http_request_size_bytes_sum{handler="/static/"} 0 -http_request_size_bytes_count{handler="/static/"} 0 -http_request_size_bytes{handler="prometheus",quantile="0.5"} 291 -http_request_size_bytes{handler="prometheus",quantile="0.9"} 291 -http_request_size_bytes{handler="prometheus",quantile="0.99"} 291 -http_request_size_bytes_sum{handler="prometheus"} 34488 -http_request_size_bytes_count{handler="prometheus"} 119 -# HELP http_requests_total Total number of HTTP requests made. -# TYPE http_requests_total counter -http_requests_total{code="200",handler="prometheus",method="get"} 119 -# HELP http_response_size_bytes The HTTP response sizes in bytes. -# TYPE http_response_size_bytes summary -http_response_size_bytes{handler="/",quantile="0.5"} 0 -http_response_size_bytes{handler="/",quantile="0.9"} 0 -http_response_size_bytes{handler="/",quantile="0.99"} 0 -http_response_size_bytes_sum{handler="/"} 0 -http_response_size_bytes_count{handler="/"} 0 -http_response_size_bytes{handler="/alerts",quantile="0.5"} 0 -http_response_size_bytes{handler="/alerts",quantile="0.9"} 0 -http_response_size_bytes{handler="/alerts",quantile="0.99"} 0 -http_response_size_bytes_sum{handler="/alerts"} 0 -http_response_size_bytes_count{handler="/alerts"} 0 -http_response_size_bytes{handler="/api/metrics",quantile="0.5"} 0 -http_response_size_bytes{handler="/api/metrics",quantile="0.9"} 0 -http_response_size_bytes{handler="/api/metrics",quantile="0.99"} 0 -http_response_size_bytes_sum{handler="/api/metrics"} 0 -http_response_size_bytes_count{handler="/api/metrics"} 0 -http_response_size_bytes{handler="/api/query",quantile="0.5"} 0 -http_response_size_bytes{handler="/api/query",quantile="0.9"} 0 -http_response_size_bytes{handler="/api/query",quantile="0.99"} 0 -http_response_size_bytes_sum{handler="/api/query"} 0 -http_response_size_bytes_count{handler="/api/query"} 0 -http_response_size_bytes{handler="/api/query_range",quantile="0.5"} 0 -http_response_size_bytes{handler="/api/query_range",quantile="0.9"} 0 -http_response_size_bytes{handler="/api/query_range",quantile="0.99"} 0 -http_response_size_bytes_sum{handler="/api/query_range"} 0 -http_response_size_bytes_count{handler="/api/query_range"} 0 -http_response_size_bytes{handler="/api/targets",quantile="0.5"} 0 -http_response_size_bytes{handler="/api/targets",quantile="0.9"} 0 -http_response_size_bytes{handler="/api/targets",quantile="0.99"} 0 -http_response_size_bytes_sum{handler="/api/targets"} 0 -http_response_size_bytes_count{handler="/api/targets"} 0 -http_response_size_bytes{handler="/consoles/",quantile="0.5"} 0 -http_response_size_bytes{handler="/consoles/",quantile="0.9"} 0 -http_response_size_bytes{handler="/consoles/",quantile="0.99"} 0 -http_response_size_bytes_sum{handler="/consoles/"} 0 -http_response_size_bytes_count{handler="/consoles/"} 0 -http_response_size_bytes{handler="/graph",quantile="0.5"} 0 -http_response_size_bytes{handler="/graph",quantile="0.9"} 0 -http_response_size_bytes{handler="/graph",quantile="0.99"} 0 -http_response_size_bytes_sum{handler="/graph"} 0 -http_response_size_bytes_count{handler="/graph"} 0 -http_response_size_bytes{handler="/heap",quantile="0.5"} 0 -http_response_size_bytes{handler="/heap",quantile="0.9"} 0 -http_response_size_bytes{handler="/heap",quantile="0.99"} 0 -http_response_size_bytes_sum{handler="/heap"} 0 -http_response_size_bytes_count{handler="/heap"} 0 -http_response_size_bytes{handler="/static/",quantile="0.5"} 0 -http_response_size_bytes{handler="/static/",quantile="0.9"} 0 -http_response_size_bytes{handler="/static/",quantile="0.99"} 0 -http_response_size_bytes_sum{handler="/static/"} 0 -http_response_size_bytes_count{handler="/static/"} 0 -http_response_size_bytes{handler="prometheus",quantile="0.5"} 2049 -http_response_size_bytes{handler="prometheus",quantile="0.9"} 2058 -http_response_size_bytes{handler="prometheus",quantile="0.99"} 2064 -http_response_size_bytes_sum{handler="prometheus"} 247001 -http_response_size_bytes_count{handler="prometheus"} 119 -# HELP process_cpu_seconds_total Total user and system CPU time spent in seconds. -# TYPE process_cpu_seconds_total counter -process_cpu_seconds_total 0.55 -# HELP go_goroutines Number of goroutines that currently exist. -# TYPE go_goroutines gauge -go_goroutines 70 -# HELP process_max_fds Maximum number of open file descriptors. -# TYPE process_max_fds gauge -process_max_fds 8192 -# HELP process_open_fds Number of open file descriptors. -# TYPE process_open_fds gauge -process_open_fds 29 -# HELP process_resident_memory_bytes Resident memory size in bytes. -# TYPE process_resident_memory_bytes gauge -process_resident_memory_bytes 5.3870592e+07 -# HELP process_start_time_seconds Start time of the process since unix epoch in seconds. -# TYPE process_start_time_seconds gauge -process_start_time_seconds 1.42236894836e+09 -# HELP process_virtual_memory_bytes Virtual memory size in bytes. -# TYPE process_virtual_memory_bytes gauge -process_virtual_memory_bytes 5.41478912e+08 -# HELP prometheus_dns_sd_lookup_failures_total The number of DNS-SD lookup failures. -# TYPE prometheus_dns_sd_lookup_failures_total counter -prometheus_dns_sd_lookup_failures_total 0 -# HELP prometheus_dns_sd_lookups_total The number of DNS-SD lookups. -# TYPE prometheus_dns_sd_lookups_total counter -prometheus_dns_sd_lookups_total 7 -# HELP prometheus_evaluator_duration_milliseconds The duration for all evaluations to execute. -# TYPE prometheus_evaluator_duration_milliseconds summary -prometheus_evaluator_duration_milliseconds{quantile="0.01"} 0 -prometheus_evaluator_duration_milliseconds{quantile="0.05"} 0 -prometheus_evaluator_duration_milliseconds{quantile="0.5"} 0 -prometheus_evaluator_duration_milliseconds{quantile="0.9"} 1 -prometheus_evaluator_duration_milliseconds{quantile="0.99"} 1 -prometheus_evaluator_duration_milliseconds_sum 12 -prometheus_evaluator_duration_milliseconds_count 23 -# HELP prometheus_local_storage_checkpoint_duration_milliseconds The duration (in milliseconds) it took to checkpoint in-memory metrics and head chunks. -# TYPE prometheus_local_storage_checkpoint_duration_milliseconds gauge -prometheus_local_storage_checkpoint_duration_milliseconds 0 -# HELP prometheus_local_storage_chunk_ops_total The total number of chunk operations by their type. -# TYPE prometheus_local_storage_chunk_ops_total counter -prometheus_local_storage_chunk_ops_total{type="create"} 598 -prometheus_local_storage_chunk_ops_total{type="persist"} 174 -prometheus_local_storage_chunk_ops_total{type="pin"} 920 -prometheus_local_storage_chunk_ops_total{type="transcode"} 415 -prometheus_local_storage_chunk_ops_total{type="unpin"} 920 -# HELP prometheus_local_storage_indexing_batch_latency_milliseconds Quantiles for batch indexing latencies in milliseconds. -# TYPE prometheus_local_storage_indexing_batch_latency_milliseconds summary -prometheus_local_storage_indexing_batch_latency_milliseconds{quantile="0.5"} 0 -prometheus_local_storage_indexing_batch_latency_milliseconds{quantile="0.9"} 0 -prometheus_local_storage_indexing_batch_latency_milliseconds{quantile="0.99"} 0 -prometheus_local_storage_indexing_batch_latency_milliseconds_sum 0 -prometheus_local_storage_indexing_batch_latency_milliseconds_count 1 -# HELP prometheus_local_storage_indexing_batch_sizes Quantiles for indexing batch sizes (number of metrics per batch). -# TYPE prometheus_local_storage_indexing_batch_sizes summary -prometheus_local_storage_indexing_batch_sizes{quantile="0.5"} 2 -prometheus_local_storage_indexing_batch_sizes{quantile="0.9"} 2 -prometheus_local_storage_indexing_batch_sizes{quantile="0.99"} 2 -prometheus_local_storage_indexing_batch_sizes_sum 2 -prometheus_local_storage_indexing_batch_sizes_count 1 -# HELP prometheus_local_storage_indexing_queue_capacity The capacity of the indexing queue. -# TYPE prometheus_local_storage_indexing_queue_capacity gauge -prometheus_local_storage_indexing_queue_capacity 16384 -# HELP prometheus_local_storage_indexing_queue_length The number of metrics waiting to be indexed. -# TYPE prometheus_local_storage_indexing_queue_length gauge -prometheus_local_storage_indexing_queue_length 0 -# HELP prometheus_local_storage_ingested_samples_total The total number of samples ingested. -# TYPE prometheus_local_storage_ingested_samples_total counter -prometheus_local_storage_ingested_samples_total 30473 -# HELP prometheus_local_storage_invalid_preload_requests_total The total number of preload requests referring to a non-existent series. This is an indication of outdated label indexes. -# TYPE prometheus_local_storage_invalid_preload_requests_total counter -prometheus_local_storage_invalid_preload_requests_total 0 -# HELP prometheus_local_storage_memory_chunkdescs The current number of chunk descriptors in memory. -# TYPE prometheus_local_storage_memory_chunkdescs gauge -prometheus_local_storage_memory_chunkdescs 1059 -# HELP prometheus_local_storage_memory_chunks The current number of chunks in memory, excluding cloned chunks (i.e. chunks without a descriptor). -# TYPE prometheus_local_storage_memory_chunks gauge -prometheus_local_storage_memory_chunks 1020 -# HELP prometheus_local_storage_memory_series The current number of series in memory. -# TYPE prometheus_local_storage_memory_series gauge -prometheus_local_storage_memory_series 424 -# HELP prometheus_local_storage_persist_latency_microseconds A summary of latencies for persisting each chunk. -# TYPE prometheus_local_storage_persist_latency_microseconds summary -prometheus_local_storage_persist_latency_microseconds{quantile="0.5"} 30.377 -prometheus_local_storage_persist_latency_microseconds{quantile="0.9"} 203.539 -prometheus_local_storage_persist_latency_microseconds{quantile="0.99"} 2626.463 -prometheus_local_storage_persist_latency_microseconds_sum 20424.415 -prometheus_local_storage_persist_latency_microseconds_count 174 -# HELP prometheus_local_storage_persist_queue_capacity The total capacity of the persist queue. -# TYPE prometheus_local_storage_persist_queue_capacity gauge -prometheus_local_storage_persist_queue_capacity 1024 -# HELP prometheus_local_storage_persist_queue_length The current number of chunks waiting in the persist queue. -# TYPE prometheus_local_storage_persist_queue_length gauge -prometheus_local_storage_persist_queue_length 0 -# HELP prometheus_local_storage_series_ops_total The total number of series operations by their type. -# TYPE prometheus_local_storage_series_ops_total counter -prometheus_local_storage_series_ops_total{type="create"} 2 -prometheus_local_storage_series_ops_total{type="maintenance_in_memory"} 11 -# HELP prometheus_notifications_latency_milliseconds Latency quantiles for sending alert notifications (not including dropped notifications). -# TYPE prometheus_notifications_latency_milliseconds summary -prometheus_notifications_latency_milliseconds{quantile="0.5"} 0 -prometheus_notifications_latency_milliseconds{quantile="0.9"} 0 -prometheus_notifications_latency_milliseconds{quantile="0.99"} 0 -prometheus_notifications_latency_milliseconds_sum 0 -prometheus_notifications_latency_milliseconds_count 0 -# HELP prometheus_notifications_queue_capacity The capacity of the alert notifications queue. -# TYPE prometheus_notifications_queue_capacity gauge -prometheus_notifications_queue_capacity 100 -# HELP prometheus_notifications_queue_length The number of alert notifications in the queue. -# TYPE prometheus_notifications_queue_length gauge -prometheus_notifications_queue_length 0 -# HELP prometheus_rule_evaluation_duration_milliseconds The duration for a rule to execute. -# TYPE prometheus_rule_evaluation_duration_milliseconds summary -prometheus_rule_evaluation_duration_milliseconds{rule_type="alerting",quantile="0.5"} 0 -prometheus_rule_evaluation_duration_milliseconds{rule_type="alerting",quantile="0.9"} 0 -prometheus_rule_evaluation_duration_milliseconds{rule_type="alerting",quantile="0.99"} 2 -prometheus_rule_evaluation_duration_milliseconds_sum{rule_type="alerting"} 12 -prometheus_rule_evaluation_duration_milliseconds_count{rule_type="alerting"} 115 -prometheus_rule_evaluation_duration_milliseconds{rule_type="recording",quantile="0.5"} 0 -prometheus_rule_evaluation_duration_milliseconds{rule_type="recording",quantile="0.9"} 0 -prometheus_rule_evaluation_duration_milliseconds{rule_type="recording",quantile="0.99"} 3 -prometheus_rule_evaluation_duration_milliseconds_sum{rule_type="recording"} 15 -prometheus_rule_evaluation_duration_milliseconds_count{rule_type="recording"} 115 -# HELP prometheus_rule_evaluation_failures_total The total number of rule evaluation failures. -# TYPE prometheus_rule_evaluation_failures_total counter -prometheus_rule_evaluation_failures_total 0 -# HELP prometheus_samples_queue_capacity Capacity of the queue for unwritten samples. -# TYPE prometheus_samples_queue_capacity gauge -prometheus_samples_queue_capacity 4096 -# HELP prometheus_samples_queue_length Current number of items in the queue for unwritten samples. Each item comprises all samples exposed by one target as one metric family (i.e. metrics of the same name). -# TYPE prometheus_samples_queue_length gauge -prometheus_samples_queue_length 0 -# HELP prometheus_target_interval_length_seconds Actual intervals between scrapes. -# TYPE prometheus_target_interval_length_seconds summary -prometheus_target_interval_length_seconds{interval="15s",quantile="0.01"} 14 -prometheus_target_interval_length_seconds{interval="15s",quantile="0.05"} 14 -prometheus_target_interval_length_seconds{interval="15s",quantile="0.5"} 15 -prometheus_target_interval_length_seconds{interval="15s",quantile="0.9"} 15 -prometheus_target_interval_length_seconds{interval="15s",quantile="0.99"} 15 -prometheus_target_interval_length_seconds_sum{interval="15s"} 175 -prometheus_target_interval_length_seconds_count{interval="15s"} 12 -prometheus_target_interval_length_seconds{interval="1s",quantile="0.01"} 0 -prometheus_target_interval_length_seconds{interval="1s",quantile="0.05"} 0 -prometheus_target_interval_length_seconds{interval="1s",quantile="0.5"} 0 -prometheus_target_interval_length_seconds{interval="1s",quantile="0.9"} 1 -prometheus_target_interval_length_seconds{interval="1s",quantile="0.99"} 1 -prometheus_target_interval_length_seconds_sum{interval="1s"} 55 -prometheus_target_interval_length_seconds_count{interval="1s"} 117 diff --git a/Godeps/_workspace/src/github.com/prometheus/common/expfmt/text_create.go b/Godeps/_workspace/src/github.com/prometheus/common/expfmt/text_create.go deleted file mode 100644 index cc2a616acc..0000000000 --- a/Godeps/_workspace/src/github.com/prometheus/common/expfmt/text_create.go +++ /dev/null @@ -1,308 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package expfmt - -import ( - "bytes" - "fmt" - "io" - "math" - "strings" - - dto "github.com/prometheus/client_model/go" - "github.com/prometheus/common/model" -) - -// MetricFamilyToText converts a MetricFamily proto message into text format and -// writes the resulting lines to 'out'. It returns the number of bytes written -// and any error encountered. This function does not perform checks on the -// content of the metric and label names, i.e. invalid metric or label names -// will result in invalid text format output. -// This method fulfills the type 'prometheus.encoder'. -func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (int, error) { - var written int - - // Fail-fast checks. - if len(in.Metric) == 0 { - return written, fmt.Errorf("MetricFamily has no metrics: %s", in) - } - name := in.GetName() - if name == "" { - return written, fmt.Errorf("MetricFamily has no name: %s", in) - } - if in.Type == nil { - return written, fmt.Errorf("MetricFamily has no type: %s", in) - } - - // Comments, first HELP, then TYPE. - if in.Help != nil { - n, err := fmt.Fprintf( - out, "# HELP %s %s\n", - name, escapeString(*in.Help, false), - ) - written += n - if err != nil { - return written, err - } - } - metricType := in.GetType() - n, err := fmt.Fprintf( - out, "# TYPE %s %s\n", - name, strings.ToLower(metricType.String()), - ) - written += n - if err != nil { - return written, err - } - - // Finally the samples, one line for each. - for _, metric := range in.Metric { - switch metricType { - case dto.MetricType_COUNTER: - if metric.Counter == nil { - return written, fmt.Errorf( - "expected counter in metric %s %s", name, metric, - ) - } - n, err = writeSample( - name, metric, "", "", - metric.Counter.GetValue(), - out, - ) - case dto.MetricType_GAUGE: - if metric.Gauge == nil { - return written, fmt.Errorf( - "expected gauge in metric %s %s", name, metric, - ) - } - n, err = writeSample( - name, metric, "", "", - metric.Gauge.GetValue(), - out, - ) - case dto.MetricType_UNTYPED: - if metric.Untyped == nil { - return written, fmt.Errorf( - "expected untyped in metric %s %s", name, metric, - ) - } - n, err = writeSample( - name, metric, "", "", - metric.Untyped.GetValue(), - out, - ) - case dto.MetricType_SUMMARY: - if metric.Summary == nil { - return written, fmt.Errorf( - "expected summary in metric %s %s", name, metric, - ) - } - for _, q := range metric.Summary.Quantile { - n, err = writeSample( - name, metric, - model.QuantileLabel, fmt.Sprint(q.GetQuantile()), - q.GetValue(), - out, - ) - written += n - if err != nil { - return written, err - } - } - n, err = writeSample( - name+"_sum", metric, "", "", - metric.Summary.GetSampleSum(), - out, - ) - if err != nil { - return written, err - } - written += n - n, err = writeSample( - name+"_count", metric, "", "", - float64(metric.Summary.GetSampleCount()), - out, - ) - case dto.MetricType_HISTOGRAM: - if metric.Histogram == nil { - return written, fmt.Errorf( - "expected histogram in metric %s %s", name, metric, - ) - } - infSeen := false - for _, q := range metric.Histogram.Bucket { - n, err = writeSample( - name+"_bucket", metric, - model.BucketLabel, fmt.Sprint(q.GetUpperBound()), - float64(q.GetCumulativeCount()), - out, - ) - written += n - if err != nil { - return written, err - } - if math.IsInf(q.GetUpperBound(), +1) { - infSeen = true - } - } - if !infSeen { - n, err = writeSample( - name+"_bucket", metric, - model.BucketLabel, "+Inf", - float64(metric.Histogram.GetSampleCount()), - out, - ) - if err != nil { - return written, err - } - written += n - } - n, err = writeSample( - name+"_sum", metric, "", "", - metric.Histogram.GetSampleSum(), - out, - ) - if err != nil { - return written, err - } - written += n - n, err = writeSample( - name+"_count", metric, "", "", - float64(metric.Histogram.GetSampleCount()), - out, - ) - default: - return written, fmt.Errorf( - "unexpected type in metric %s %s", name, metric, - ) - } - written += n - if err != nil { - return written, err - } - } - return written, nil -} - -// writeSample writes a single sample in text format to out, given the metric -// name, the metric proto message itself, optionally an additional label name -// and value (use empty strings if not required), and the value. The function -// returns the number of bytes written and any error encountered. -func writeSample( - name string, - metric *dto.Metric, - additionalLabelName, additionalLabelValue string, - value float64, - out io.Writer, -) (int, error) { - var written int - n, err := fmt.Fprint(out, name) - written += n - if err != nil { - return written, err - } - n, err = labelPairsToText( - metric.Label, - additionalLabelName, additionalLabelValue, - out, - ) - written += n - if err != nil { - return written, err - } - n, err = fmt.Fprintf(out, " %v", value) - written += n - if err != nil { - return written, err - } - if metric.TimestampMs != nil { - n, err = fmt.Fprintf(out, " %v", *metric.TimestampMs) - written += n - if err != nil { - return written, err - } - } - n, err = out.Write([]byte{'\n'}) - written += n - if err != nil { - return written, err - } - return written, nil -} - -// labelPairsToText converts a slice of LabelPair proto messages plus the -// explicitly given additional label pair into text formatted as required by the -// text format and writes it to 'out'. An empty slice in combination with an -// empty string 'additionalLabelName' results in nothing being -// written. Otherwise, the label pairs are written, escaped as required by the -// text format, and enclosed in '{...}'. The function returns the number of -// bytes written and any error encountered. -func labelPairsToText( - in []*dto.LabelPair, - additionalLabelName, additionalLabelValue string, - out io.Writer, -) (int, error) { - if len(in) == 0 && additionalLabelName == "" { - return 0, nil - } - var written int - separator := '{' - for _, lp := range in { - n, err := fmt.Fprintf( - out, `%c%s="%s"`, - separator, lp.GetName(), escapeString(lp.GetValue(), true), - ) - written += n - if err != nil { - return written, err - } - separator = ',' - } - if additionalLabelName != "" { - n, err := fmt.Fprintf( - out, `%c%s="%s"`, - separator, additionalLabelName, - escapeString(additionalLabelValue, true), - ) - written += n - if err != nil { - return written, err - } - } - n, err := out.Write([]byte{'}'}) - written += n - if err != nil { - return written, err - } - return written, nil -} - -// escapeString replaces '\' by '\\', new line character by '\n', and - if -// includeDoubleQuote is true - '"' by '\"'. -func escapeString(v string, includeDoubleQuote bool) string { - result := bytes.NewBuffer(make([]byte, 0, len(v))) - for _, c := range v { - switch { - case c == '\\': - result.WriteString(`\\`) - case includeDoubleQuote && c == '"': - result.WriteString(`\"`) - case c == '\n': - result.WriteString(`\n`) - default: - result.WriteRune(c) - } - } - return result.String() -} diff --git a/Godeps/_workspace/src/github.com/prometheus/common/expfmt/text_create_test.go b/Godeps/_workspace/src/github.com/prometheus/common/expfmt/text_create_test.go deleted file mode 100644 index 5e276c3928..0000000000 --- a/Godeps/_workspace/src/github.com/prometheus/common/expfmt/text_create_test.go +++ /dev/null @@ -1,440 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package expfmt - -import ( - "bytes" - "math" - "strings" - "testing" - - "github.com/golang/protobuf/proto" - - dto "github.com/prometheus/client_model/go" -) - -func testCreate(t testing.TB) { - var scenarios = []struct { - in *dto.MetricFamily - out string - }{ - // 0: Counter, NaN as value, timestamp given. - { - in: &dto.MetricFamily{ - Name: proto.String("name"), - Help: proto.String("two-line\n doc str\\ing"), - Type: dto.MetricType_COUNTER.Enum(), - Metric: []*dto.Metric{ - &dto.Metric{ - Label: []*dto.LabelPair{ - &dto.LabelPair{ - Name: proto.String("labelname"), - Value: proto.String("val1"), - }, - &dto.LabelPair{ - Name: proto.String("basename"), - Value: proto.String("basevalue"), - }, - }, - Counter: &dto.Counter{ - Value: proto.Float64(math.NaN()), - }, - }, - &dto.Metric{ - Label: []*dto.LabelPair{ - &dto.LabelPair{ - Name: proto.String("labelname"), - Value: proto.String("val2"), - }, - &dto.LabelPair{ - Name: proto.String("basename"), - Value: proto.String("basevalue"), - }, - }, - Counter: &dto.Counter{ - Value: proto.Float64(.23), - }, - TimestampMs: proto.Int64(1234567890), - }, - }, - }, - out: `# HELP name two-line\n doc str\\ing -# TYPE name counter -name{labelname="val1",basename="basevalue"} NaN -name{labelname="val2",basename="basevalue"} 0.23 1234567890 -`, - }, - // 1: Gauge, some escaping required, +Inf as value, multi-byte characters in label values. - { - in: &dto.MetricFamily{ - Name: proto.String("gauge_name"), - Help: proto.String("gauge\ndoc\nstr\"ing"), - Type: dto.MetricType_GAUGE.Enum(), - Metric: []*dto.Metric{ - &dto.Metric{ - Label: []*dto.LabelPair{ - &dto.LabelPair{ - Name: proto.String("name_1"), - Value: proto.String("val with\nnew line"), - }, - &dto.LabelPair{ - Name: proto.String("name_2"), - Value: proto.String("val with \\backslash and \"quotes\""), - }, - }, - Gauge: &dto.Gauge{ - Value: proto.Float64(math.Inf(+1)), - }, - }, - &dto.Metric{ - Label: []*dto.LabelPair{ - &dto.LabelPair{ - Name: proto.String("name_1"), - Value: proto.String("Björn"), - }, - &dto.LabelPair{ - Name: proto.String("name_2"), - Value: proto.String("佖佥"), - }, - }, - Gauge: &dto.Gauge{ - Value: proto.Float64(3.14E42), - }, - }, - }, - }, - out: `# HELP gauge_name gauge\ndoc\nstr"ing -# TYPE gauge_name gauge -gauge_name{name_1="val with\nnew line",name_2="val with \\backslash and \"quotes\""} +Inf -gauge_name{name_1="Björn",name_2="佖佥"} 3.14e+42 -`, - }, - // 2: Untyped, no help, one sample with no labels and -Inf as value, another sample with one label. - { - in: &dto.MetricFamily{ - Name: proto.String("untyped_name"), - Type: dto.MetricType_UNTYPED.Enum(), - Metric: []*dto.Metric{ - &dto.Metric{ - Untyped: &dto.Untyped{ - Value: proto.Float64(math.Inf(-1)), - }, - }, - &dto.Metric{ - Label: []*dto.LabelPair{ - &dto.LabelPair{ - Name: proto.String("name_1"), - Value: proto.String("value 1"), - }, - }, - Untyped: &dto.Untyped{ - Value: proto.Float64(-1.23e-45), - }, - }, - }, - }, - out: `# TYPE untyped_name untyped -untyped_name -Inf -untyped_name{name_1="value 1"} -1.23e-45 -`, - }, - // 3: Summary. - { - in: &dto.MetricFamily{ - Name: proto.String("summary_name"), - Help: proto.String("summary docstring"), - Type: dto.MetricType_SUMMARY.Enum(), - Metric: []*dto.Metric{ - &dto.Metric{ - Summary: &dto.Summary{ - SampleCount: proto.Uint64(42), - SampleSum: proto.Float64(-3.4567), - Quantile: []*dto.Quantile{ - &dto.Quantile{ - Quantile: proto.Float64(0.5), - Value: proto.Float64(-1.23), - }, - &dto.Quantile{ - Quantile: proto.Float64(0.9), - Value: proto.Float64(.2342354), - }, - &dto.Quantile{ - Quantile: proto.Float64(0.99), - Value: proto.Float64(0), - }, - }, - }, - }, - &dto.Metric{ - Label: []*dto.LabelPair{ - &dto.LabelPair{ - Name: proto.String("name_1"), - Value: proto.String("value 1"), - }, - &dto.LabelPair{ - Name: proto.String("name_2"), - Value: proto.String("value 2"), - }, - }, - Summary: &dto.Summary{ - SampleCount: proto.Uint64(4711), - SampleSum: proto.Float64(2010.1971), - Quantile: []*dto.Quantile{ - &dto.Quantile{ - Quantile: proto.Float64(0.5), - Value: proto.Float64(1), - }, - &dto.Quantile{ - Quantile: proto.Float64(0.9), - Value: proto.Float64(2), - }, - &dto.Quantile{ - Quantile: proto.Float64(0.99), - Value: proto.Float64(3), - }, - }, - }, - }, - }, - }, - out: `# HELP summary_name summary docstring -# TYPE summary_name summary -summary_name{quantile="0.5"} -1.23 -summary_name{quantile="0.9"} 0.2342354 -summary_name{quantile="0.99"} 0 -summary_name_sum -3.4567 -summary_name_count 42 -summary_name{name_1="value 1",name_2="value 2",quantile="0.5"} 1 -summary_name{name_1="value 1",name_2="value 2",quantile="0.9"} 2 -summary_name{name_1="value 1",name_2="value 2",quantile="0.99"} 3 -summary_name_sum{name_1="value 1",name_2="value 2"} 2010.1971 -summary_name_count{name_1="value 1",name_2="value 2"} 4711 -`, - }, - // 4: Histogram - { - in: &dto.MetricFamily{ - Name: proto.String("request_duration_microseconds"), - Help: proto.String("The response latency."), - Type: dto.MetricType_HISTOGRAM.Enum(), - Metric: []*dto.Metric{ - &dto.Metric{ - Histogram: &dto.Histogram{ - SampleCount: proto.Uint64(2693), - SampleSum: proto.Float64(1756047.3), - Bucket: []*dto.Bucket{ - &dto.Bucket{ - UpperBound: proto.Float64(100), - CumulativeCount: proto.Uint64(123), - }, - &dto.Bucket{ - UpperBound: proto.Float64(120), - CumulativeCount: proto.Uint64(412), - }, - &dto.Bucket{ - UpperBound: proto.Float64(144), - CumulativeCount: proto.Uint64(592), - }, - &dto.Bucket{ - UpperBound: proto.Float64(172.8), - CumulativeCount: proto.Uint64(1524), - }, - &dto.Bucket{ - UpperBound: proto.Float64(math.Inf(+1)), - CumulativeCount: proto.Uint64(2693), - }, - }, - }, - }, - }, - }, - out: `# HELP request_duration_microseconds The response latency. -# TYPE request_duration_microseconds histogram -request_duration_microseconds_bucket{le="100"} 123 -request_duration_microseconds_bucket{le="120"} 412 -request_duration_microseconds_bucket{le="144"} 592 -request_duration_microseconds_bucket{le="172.8"} 1524 -request_duration_microseconds_bucket{le="+Inf"} 2693 -request_duration_microseconds_sum 1.7560473e+06 -request_duration_microseconds_count 2693 -`, - }, - // 5: Histogram with missing +Inf bucket. - { - in: &dto.MetricFamily{ - Name: proto.String("request_duration_microseconds"), - Help: proto.String("The response latency."), - Type: dto.MetricType_HISTOGRAM.Enum(), - Metric: []*dto.Metric{ - &dto.Metric{ - Histogram: &dto.Histogram{ - SampleCount: proto.Uint64(2693), - SampleSum: proto.Float64(1756047.3), - Bucket: []*dto.Bucket{ - &dto.Bucket{ - UpperBound: proto.Float64(100), - CumulativeCount: proto.Uint64(123), - }, - &dto.Bucket{ - UpperBound: proto.Float64(120), - CumulativeCount: proto.Uint64(412), - }, - &dto.Bucket{ - UpperBound: proto.Float64(144), - CumulativeCount: proto.Uint64(592), - }, - &dto.Bucket{ - UpperBound: proto.Float64(172.8), - CumulativeCount: proto.Uint64(1524), - }, - }, - }, - }, - }, - }, - out: `# HELP request_duration_microseconds The response latency. -# TYPE request_duration_microseconds histogram -request_duration_microseconds_bucket{le="100"} 123 -request_duration_microseconds_bucket{le="120"} 412 -request_duration_microseconds_bucket{le="144"} 592 -request_duration_microseconds_bucket{le="172.8"} 1524 -request_duration_microseconds_bucket{le="+Inf"} 2693 -request_duration_microseconds_sum 1.7560473e+06 -request_duration_microseconds_count 2693 -`, - }, - } - - for i, scenario := range scenarios { - out := bytes.NewBuffer(make([]byte, 0, len(scenario.out))) - n, err := MetricFamilyToText(out, scenario.in) - if err != nil { - t.Errorf("%d. error: %s", i, err) - continue - } - if expected, got := len(scenario.out), n; expected != got { - t.Errorf( - "%d. expected %d bytes written, got %d", - i, expected, got, - ) - } - if expected, got := scenario.out, out.String(); expected != got { - t.Errorf( - "%d. expected out=%q, got %q", - i, expected, got, - ) - } - } - -} - -func TestCreate(t *testing.T) { - testCreate(t) -} - -func BenchmarkCreate(b *testing.B) { - for i := 0; i < b.N; i++ { - testCreate(b) - } -} - -func testCreateError(t testing.TB) { - var scenarios = []struct { - in *dto.MetricFamily - err string - }{ - // 0: No metric. - { - in: &dto.MetricFamily{ - Name: proto.String("name"), - Help: proto.String("doc string"), - Type: dto.MetricType_COUNTER.Enum(), - Metric: []*dto.Metric{}, - }, - err: "MetricFamily has no metrics", - }, - // 1: No metric name. - { - in: &dto.MetricFamily{ - Help: proto.String("doc string"), - Type: dto.MetricType_UNTYPED.Enum(), - Metric: []*dto.Metric{ - &dto.Metric{ - Untyped: &dto.Untyped{ - Value: proto.Float64(math.Inf(-1)), - }, - }, - }, - }, - err: "MetricFamily has no name", - }, - // 2: No metric type. - { - in: &dto.MetricFamily{ - Name: proto.String("name"), - Help: proto.String("doc string"), - Metric: []*dto.Metric{ - &dto.Metric{ - Untyped: &dto.Untyped{ - Value: proto.Float64(math.Inf(-1)), - }, - }, - }, - }, - err: "MetricFamily has no type", - }, - // 3: Wrong type. - { - in: &dto.MetricFamily{ - Name: proto.String("name"), - Help: proto.String("doc string"), - Type: dto.MetricType_COUNTER.Enum(), - Metric: []*dto.Metric{ - &dto.Metric{ - Untyped: &dto.Untyped{ - Value: proto.Float64(math.Inf(-1)), - }, - }, - }, - }, - err: "expected counter in metric", - }, - } - - for i, scenario := range scenarios { - var out bytes.Buffer - _, err := MetricFamilyToText(&out, scenario.in) - if err == nil { - t.Errorf("%d. expected error, got nil", i) - continue - } - if expected, got := scenario.err, err.Error(); strings.Index(got, expected) != 0 { - t.Errorf( - "%d. expected error starting with %q, got %q", - i, expected, got, - ) - } - } - -} - -func TestCreateError(t *testing.T) { - testCreateError(t) -} - -func BenchmarkCreateError(b *testing.B) { - for i := 0; i < b.N; i++ { - testCreateError(b) - } -} diff --git a/Godeps/_workspace/src/github.com/prometheus/common/expfmt/text_parse.go b/Godeps/_workspace/src/github.com/prometheus/common/expfmt/text_parse.go deleted file mode 100644 index 84433bc4f6..0000000000 --- a/Godeps/_workspace/src/github.com/prometheus/common/expfmt/text_parse.go +++ /dev/null @@ -1,746 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package expfmt - -import ( - "bufio" - "bytes" - "fmt" - "io" - "math" - "strconv" - "strings" - - dto "github.com/prometheus/client_model/go" - - "github.com/golang/protobuf/proto" - "github.com/prometheus/common/model" -) - -// A stateFn is a function that represents a state in a state machine. By -// executing it, the state is progressed to the next state. The stateFn returns -// another stateFn, which represents the new state. The end state is represented -// by nil. -type stateFn func() stateFn - -// ParseError signals errors while parsing the simple and flat text-based -// exchange format. -type ParseError struct { - Line int - Msg string -} - -// Error implements the error interface. -func (e ParseError) Error() string { - return fmt.Sprintf("text format parsing error in line %d: %s", e.Line, e.Msg) -} - -// TextParser is used to parse the simple and flat text-based exchange format. Its -// nil value is ready to use. -type TextParser struct { - metricFamiliesByName map[string]*dto.MetricFamily - buf *bufio.Reader // Where the parsed input is read through. - err error // Most recent error. - lineCount int // Tracks the line count for error messages. - currentByte byte // The most recent byte read. - currentToken bytes.Buffer // Re-used each time a token has to be gathered from multiple bytes. - currentMF *dto.MetricFamily - currentMetric *dto.Metric - currentLabelPair *dto.LabelPair - - // The remaining member variables are only used for summaries/histograms. - currentLabels map[string]string // All labels including '__name__' but excluding 'quantile'/'le' - // Summary specific. - summaries map[uint64]*dto.Metric // Key is created with LabelsToSignature. - currentQuantile float64 - // Histogram specific. - histograms map[uint64]*dto.Metric // Key is created with LabelsToSignature. - currentBucket float64 - // These tell us if the currently processed line ends on '_count' or - // '_sum' respectively and belong to a summary/histogram, representing the sample - // count and sum of that summary/histogram. - currentIsSummaryCount, currentIsSummarySum bool - currentIsHistogramCount, currentIsHistogramSum bool -} - -// TextToMetricFamilies reads 'in' as the simple and flat text-based exchange -// format and creates MetricFamily proto messages. It returns the MetricFamily -// proto messages in a map where the metric names are the keys, along with any -// error encountered. -// -// If the input contains duplicate metrics (i.e. lines with the same metric name -// and exactly the same label set), the resulting MetricFamily will contain -// duplicate Metric proto messages. Similar is true for duplicate label -// names. Checks for duplicates have to be performed separately, if required. -// Also note that neither the metrics within each MetricFamily are sorted nor -// the label pairs within each Metric. Sorting is not required for the most -// frequent use of this method, which is sample ingestion in the Prometheus -// server. However, for presentation purposes, you might want to sort the -// metrics, and in some cases, you must sort the labels, e.g. for consumption by -// the metric family injection hook of the Prometheus registry. -// -// Summaries and histograms are rather special beasts. You would probably not -// use them in the simple text format anyway. This method can deal with -// summaries and histograms if they are presented in exactly the way the -// text.Create function creates them. -// -// This method must not be called concurrently. If you want to parse different -// input concurrently, instantiate a separate Parser for each goroutine. -func (p *TextParser) TextToMetricFamilies(in io.Reader) (map[string]*dto.MetricFamily, error) { - p.reset(in) - for nextState := p.startOfLine; nextState != nil; nextState = nextState() { - // Magic happens here... - } - // Get rid of empty metric families. - for k, mf := range p.metricFamiliesByName { - if len(mf.GetMetric()) == 0 { - delete(p.metricFamiliesByName, k) - } - } - return p.metricFamiliesByName, p.err -} - -func (p *TextParser) reset(in io.Reader) { - p.metricFamiliesByName = map[string]*dto.MetricFamily{} - if p.buf == nil { - p.buf = bufio.NewReader(in) - } else { - p.buf.Reset(in) - } - p.err = nil - p.lineCount = 0 - if p.summaries == nil || len(p.summaries) > 0 { - p.summaries = map[uint64]*dto.Metric{} - } - if p.histograms == nil || len(p.histograms) > 0 { - p.histograms = map[uint64]*dto.Metric{} - } - p.currentQuantile = math.NaN() - p.currentBucket = math.NaN() -} - -// startOfLine represents the state where the next byte read from p.buf is the -// start of a line (or whitespace leading up to it). -func (p *TextParser) startOfLine() stateFn { - p.lineCount++ - if p.skipBlankTab(); p.err != nil { - // End of input reached. This is the only case where - // that is not an error but a signal that we are done. - p.err = nil - return nil - } - switch p.currentByte { - case '#': - return p.startComment - case '\n': - return p.startOfLine // Empty line, start the next one. - } - return p.readingMetricName -} - -// startComment represents the state where the next byte read from p.buf is the -// start of a comment (or whitespace leading up to it). -func (p *TextParser) startComment() stateFn { - if p.skipBlankTab(); p.err != nil { - return nil // Unexpected end of input. - } - if p.currentByte == '\n' { - return p.startOfLine - } - if p.readTokenUntilWhitespace(); p.err != nil { - return nil // Unexpected end of input. - } - // If we have hit the end of line already, there is nothing left - // to do. This is not considered a syntax error. - if p.currentByte == '\n' { - return p.startOfLine - } - keyword := p.currentToken.String() - if keyword != "HELP" && keyword != "TYPE" { - // Generic comment, ignore by fast forwarding to end of line. - for p.currentByte != '\n' { - if p.currentByte, p.err = p.buf.ReadByte(); p.err != nil { - return nil // Unexpected end of input. - } - } - return p.startOfLine - } - // There is something. Next has to be a metric name. - if p.skipBlankTab(); p.err != nil { - return nil // Unexpected end of input. - } - if p.readTokenAsMetricName(); p.err != nil { - return nil // Unexpected end of input. - } - if p.currentByte == '\n' { - // At the end of the line already. - // Again, this is not considered a syntax error. - return p.startOfLine - } - if !isBlankOrTab(p.currentByte) { - p.parseError("invalid metric name in comment") - return nil - } - p.setOrCreateCurrentMF() - if p.skipBlankTab(); p.err != nil { - return nil // Unexpected end of input. - } - if p.currentByte == '\n' { - // At the end of the line already. - // Again, this is not considered a syntax error. - return p.startOfLine - } - switch keyword { - case "HELP": - return p.readingHelp - case "TYPE": - return p.readingType - } - panic(fmt.Sprintf("code error: unexpected keyword %q", keyword)) -} - -// readingMetricName represents the state where the last byte read (now in -// p.currentByte) is the first byte of a metric name. -func (p *TextParser) readingMetricName() stateFn { - if p.readTokenAsMetricName(); p.err != nil { - return nil - } - if p.currentToken.Len() == 0 { - p.parseError("invalid metric name") - return nil - } - p.setOrCreateCurrentMF() - // Now is the time to fix the type if it hasn't happened yet. - if p.currentMF.Type == nil { - p.currentMF.Type = dto.MetricType_UNTYPED.Enum() - } - p.currentMetric = &dto.Metric{} - // Do not append the newly created currentMetric to - // currentMF.Metric right now. First wait if this is a summary, - // and the metric exists already, which we can only know after - // having read all the labels. - if p.skipBlankTabIfCurrentBlankTab(); p.err != nil { - return nil // Unexpected end of input. - } - return p.readingLabels -} - -// readingLabels represents the state where the last byte read (now in -// p.currentByte) is either the first byte of the label set (i.e. a '{'), or the -// first byte of the value (otherwise). -func (p *TextParser) readingLabels() stateFn { - // Summaries/histograms are special. We have to reset the - // currentLabels map, currentQuantile and currentBucket before starting to - // read labels. - if p.currentMF.GetType() == dto.MetricType_SUMMARY || p.currentMF.GetType() == dto.MetricType_HISTOGRAM { - p.currentLabels = map[string]string{} - p.currentLabels[string(model.MetricNameLabel)] = p.currentMF.GetName() - p.currentQuantile = math.NaN() - p.currentBucket = math.NaN() - } - if p.currentByte != '{' { - return p.readingValue - } - return p.startLabelName -} - -// startLabelName represents the state where the next byte read from p.buf is -// the start of a label name (or whitespace leading up to it). -func (p *TextParser) startLabelName() stateFn { - if p.skipBlankTab(); p.err != nil { - return nil // Unexpected end of input. - } - if p.currentByte == '}' { - if p.skipBlankTab(); p.err != nil { - return nil // Unexpected end of input. - } - return p.readingValue - } - if p.readTokenAsLabelName(); p.err != nil { - return nil // Unexpected end of input. - } - if p.currentToken.Len() == 0 { - p.parseError(fmt.Sprintf("invalid label name for metric %q", p.currentMF.GetName())) - return nil - } - p.currentLabelPair = &dto.LabelPair{Name: proto.String(p.currentToken.String())} - if p.currentLabelPair.GetName() == string(model.MetricNameLabel) { - p.parseError(fmt.Sprintf("label name %q is reserved", model.MetricNameLabel)) - return nil - } - // Special summary/histogram treatment. Don't add 'quantile' and 'le' - // labels to 'real' labels. - if !(p.currentMF.GetType() == dto.MetricType_SUMMARY && p.currentLabelPair.GetName() == model.QuantileLabel) && - !(p.currentMF.GetType() == dto.MetricType_HISTOGRAM && p.currentLabelPair.GetName() == model.BucketLabel) { - p.currentMetric.Label = append(p.currentMetric.Label, p.currentLabelPair) - } - if p.skipBlankTabIfCurrentBlankTab(); p.err != nil { - return nil // Unexpected end of input. - } - if p.currentByte != '=' { - p.parseError(fmt.Sprintf("expected '=' after label name, found %q", p.currentByte)) - return nil - } - return p.startLabelValue -} - -// startLabelValue represents the state where the next byte read from p.buf is -// the start of a (quoted) label value (or whitespace leading up to it). -func (p *TextParser) startLabelValue() stateFn { - if p.skipBlankTab(); p.err != nil { - return nil // Unexpected end of input. - } - if p.currentByte != '"' { - p.parseError(fmt.Sprintf("expected '\"' at start of label value, found %q", p.currentByte)) - return nil - } - if p.readTokenAsLabelValue(); p.err != nil { - return nil - } - p.currentLabelPair.Value = proto.String(p.currentToken.String()) - // Special treatment of summaries: - // - Quantile labels are special, will result in dto.Quantile later. - // - Other labels have to be added to currentLabels for signature calculation. - if p.currentMF.GetType() == dto.MetricType_SUMMARY { - if p.currentLabelPair.GetName() == model.QuantileLabel { - if p.currentQuantile, p.err = strconv.ParseFloat(p.currentLabelPair.GetValue(), 64); p.err != nil { - // Create a more helpful error message. - p.parseError(fmt.Sprintf("expected float as value for 'quantile' label, got %q", p.currentLabelPair.GetValue())) - return nil - } - } else { - p.currentLabels[p.currentLabelPair.GetName()] = p.currentLabelPair.GetValue() - } - } - // Similar special treatment of histograms. - if p.currentMF.GetType() == dto.MetricType_HISTOGRAM { - if p.currentLabelPair.GetName() == model.BucketLabel { - if p.currentBucket, p.err = strconv.ParseFloat(p.currentLabelPair.GetValue(), 64); p.err != nil { - // Create a more helpful error message. - p.parseError(fmt.Sprintf("expected float as value for 'le' label, got %q", p.currentLabelPair.GetValue())) - return nil - } - } else { - p.currentLabels[p.currentLabelPair.GetName()] = p.currentLabelPair.GetValue() - } - } - if p.skipBlankTab(); p.err != nil { - return nil // Unexpected end of input. - } - switch p.currentByte { - case ',': - return p.startLabelName - - case '}': - if p.skipBlankTab(); p.err != nil { - return nil // Unexpected end of input. - } - return p.readingValue - default: - p.parseError(fmt.Sprintf("unexpected end of label value %q", p.currentLabelPair.Value)) - return nil - } -} - -// readingValue represents the state where the last byte read (now in -// p.currentByte) is the first byte of the sample value (i.e. a float). -func (p *TextParser) readingValue() stateFn { - // When we are here, we have read all the labels, so for the - // special case of a summary/histogram, we can finally find out - // if the metric already exists. - if p.currentMF.GetType() == dto.MetricType_SUMMARY { - signature := model.LabelsToSignature(p.currentLabels) - if summary := p.summaries[signature]; summary != nil { - p.currentMetric = summary - } else { - p.summaries[signature] = p.currentMetric - p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric) - } - } else if p.currentMF.GetType() == dto.MetricType_HISTOGRAM { - signature := model.LabelsToSignature(p.currentLabels) - if histogram := p.histograms[signature]; histogram != nil { - p.currentMetric = histogram - } else { - p.histograms[signature] = p.currentMetric - p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric) - } - } else { - p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric) - } - if p.readTokenUntilWhitespace(); p.err != nil { - return nil // Unexpected end of input. - } - value, err := strconv.ParseFloat(p.currentToken.String(), 64) - if err != nil { - // Create a more helpful error message. - p.parseError(fmt.Sprintf("expected float as value, got %q", p.currentToken.String())) - return nil - } - switch p.currentMF.GetType() { - case dto.MetricType_COUNTER: - p.currentMetric.Counter = &dto.Counter{Value: proto.Float64(value)} - case dto.MetricType_GAUGE: - p.currentMetric.Gauge = &dto.Gauge{Value: proto.Float64(value)} - case dto.MetricType_UNTYPED: - p.currentMetric.Untyped = &dto.Untyped{Value: proto.Float64(value)} - case dto.MetricType_SUMMARY: - // *sigh* - if p.currentMetric.Summary == nil { - p.currentMetric.Summary = &dto.Summary{} - } - switch { - case p.currentIsSummaryCount: - p.currentMetric.Summary.SampleCount = proto.Uint64(uint64(value)) - case p.currentIsSummarySum: - p.currentMetric.Summary.SampleSum = proto.Float64(value) - case !math.IsNaN(p.currentQuantile): - p.currentMetric.Summary.Quantile = append( - p.currentMetric.Summary.Quantile, - &dto.Quantile{ - Quantile: proto.Float64(p.currentQuantile), - Value: proto.Float64(value), - }, - ) - } - case dto.MetricType_HISTOGRAM: - // *sigh* - if p.currentMetric.Histogram == nil { - p.currentMetric.Histogram = &dto.Histogram{} - } - switch { - case p.currentIsHistogramCount: - p.currentMetric.Histogram.SampleCount = proto.Uint64(uint64(value)) - case p.currentIsHistogramSum: - p.currentMetric.Histogram.SampleSum = proto.Float64(value) - case !math.IsNaN(p.currentBucket): - p.currentMetric.Histogram.Bucket = append( - p.currentMetric.Histogram.Bucket, - &dto.Bucket{ - UpperBound: proto.Float64(p.currentBucket), - CumulativeCount: proto.Uint64(uint64(value)), - }, - ) - } - default: - p.err = fmt.Errorf("unexpected type for metric name %q", p.currentMF.GetName()) - } - if p.currentByte == '\n' { - return p.startOfLine - } - return p.startTimestamp -} - -// startTimestamp represents the state where the next byte read from p.buf is -// the start of the timestamp (or whitespace leading up to it). -func (p *TextParser) startTimestamp() stateFn { - if p.skipBlankTab(); p.err != nil { - return nil // Unexpected end of input. - } - if p.readTokenUntilWhitespace(); p.err != nil { - return nil // Unexpected end of input. - } - timestamp, err := strconv.ParseInt(p.currentToken.String(), 10, 64) - if err != nil { - // Create a more helpful error message. - p.parseError(fmt.Sprintf("expected integer as timestamp, got %q", p.currentToken.String())) - return nil - } - p.currentMetric.TimestampMs = proto.Int64(timestamp) - if p.readTokenUntilNewline(false); p.err != nil { - return nil // Unexpected end of input. - } - if p.currentToken.Len() > 0 { - p.parseError(fmt.Sprintf("spurious string after timestamp: %q", p.currentToken.String())) - return nil - } - return p.startOfLine -} - -// readingHelp represents the state where the last byte read (now in -// p.currentByte) is the first byte of the docstring after 'HELP'. -func (p *TextParser) readingHelp() stateFn { - if p.currentMF.Help != nil { - p.parseError(fmt.Sprintf("second HELP line for metric name %q", p.currentMF.GetName())) - return nil - } - // Rest of line is the docstring. - if p.readTokenUntilNewline(true); p.err != nil { - return nil // Unexpected end of input. - } - p.currentMF.Help = proto.String(p.currentToken.String()) - return p.startOfLine -} - -// readingType represents the state where the last byte read (now in -// p.currentByte) is the first byte of the type hint after 'HELP'. -func (p *TextParser) readingType() stateFn { - if p.currentMF.Type != nil { - p.parseError(fmt.Sprintf("second TYPE line for metric name %q, or TYPE reported after samples", p.currentMF.GetName())) - return nil - } - // Rest of line is the type. - if p.readTokenUntilNewline(false); p.err != nil { - return nil // Unexpected end of input. - } - metricType, ok := dto.MetricType_value[strings.ToUpper(p.currentToken.String())] - if !ok { - p.parseError(fmt.Sprintf("unknown metric type %q", p.currentToken.String())) - return nil - } - p.currentMF.Type = dto.MetricType(metricType).Enum() - return p.startOfLine -} - -// parseError sets p.err to a ParseError at the current line with the given -// message. -func (p *TextParser) parseError(msg string) { - p.err = ParseError{ - Line: p.lineCount, - Msg: msg, - } -} - -// skipBlankTab reads (and discards) bytes from p.buf until it encounters a byte -// that is neither ' ' nor '\t'. That byte is left in p.currentByte. -func (p *TextParser) skipBlankTab() { - for { - if p.currentByte, p.err = p.buf.ReadByte(); p.err != nil || !isBlankOrTab(p.currentByte) { - return - } - } -} - -// skipBlankTabIfCurrentBlankTab works exactly as skipBlankTab but doesn't do -// anything if p.currentByte is neither ' ' nor '\t'. -func (p *TextParser) skipBlankTabIfCurrentBlankTab() { - if isBlankOrTab(p.currentByte) { - p.skipBlankTab() - } -} - -// readTokenUntilWhitespace copies bytes from p.buf into p.currentToken. The -// first byte considered is the byte already read (now in p.currentByte). The -// first whitespace byte encountered is still copied into p.currentByte, but not -// into p.currentToken. -func (p *TextParser) readTokenUntilWhitespace() { - p.currentToken.Reset() - for p.err == nil && !isBlankOrTab(p.currentByte) && p.currentByte != '\n' { - p.currentToken.WriteByte(p.currentByte) - p.currentByte, p.err = p.buf.ReadByte() - } -} - -// readTokenUntilNewline copies bytes from p.buf into p.currentToken. The first -// byte considered is the byte already read (now in p.currentByte). The first -// newline byte encountered is still copied into p.currentByte, but not into -// p.currentToken. If recognizeEscapeSequence is true, two escape sequences are -// recognized: '\\' tranlates into '\', and '\n' into a line-feed character. All -// other escape sequences are invalid and cause an error. -func (p *TextParser) readTokenUntilNewline(recognizeEscapeSequence bool) { - p.currentToken.Reset() - escaped := false - for p.err == nil { - if recognizeEscapeSequence && escaped { - switch p.currentByte { - case '\\': - p.currentToken.WriteByte(p.currentByte) - case 'n': - p.currentToken.WriteByte('\n') - default: - p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte)) - return - } - escaped = false - } else { - switch p.currentByte { - case '\n': - return - case '\\': - escaped = true - default: - p.currentToken.WriteByte(p.currentByte) - } - } - p.currentByte, p.err = p.buf.ReadByte() - } -} - -// readTokenAsMetricName copies a metric name from p.buf into p.currentToken. -// The first byte considered is the byte already read (now in p.currentByte). -// The first byte not part of a metric name is still copied into p.currentByte, -// but not into p.currentToken. -func (p *TextParser) readTokenAsMetricName() { - p.currentToken.Reset() - if !isValidMetricNameStart(p.currentByte) { - return - } - for { - p.currentToken.WriteByte(p.currentByte) - p.currentByte, p.err = p.buf.ReadByte() - if p.err != nil || !isValidMetricNameContinuation(p.currentByte) { - return - } - } -} - -// readTokenAsLabelName copies a label name from p.buf into p.currentToken. -// The first byte considered is the byte already read (now in p.currentByte). -// The first byte not part of a label name is still copied into p.currentByte, -// but not into p.currentToken. -func (p *TextParser) readTokenAsLabelName() { - p.currentToken.Reset() - if !isValidLabelNameStart(p.currentByte) { - return - } - for { - p.currentToken.WriteByte(p.currentByte) - p.currentByte, p.err = p.buf.ReadByte() - if p.err != nil || !isValidLabelNameContinuation(p.currentByte) { - return - } - } -} - -// readTokenAsLabelValue copies a label value from p.buf into p.currentToken. -// In contrast to the other 'readTokenAs...' functions, which start with the -// last read byte in p.currentByte, this method ignores p.currentByte and starts -// with reading a new byte from p.buf. The first byte not part of a label value -// is still copied into p.currentByte, but not into p.currentToken. -func (p *TextParser) readTokenAsLabelValue() { - p.currentToken.Reset() - escaped := false - for { - if p.currentByte, p.err = p.buf.ReadByte(); p.err != nil { - return - } - if escaped { - switch p.currentByte { - case '"', '\\': - p.currentToken.WriteByte(p.currentByte) - case 'n': - p.currentToken.WriteByte('\n') - default: - p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte)) - return - } - escaped = false - continue - } - switch p.currentByte { - case '"': - return - case '\n': - p.parseError(fmt.Sprintf("label value %q contains unescaped new-line", p.currentToken.String())) - return - case '\\': - escaped = true - default: - p.currentToken.WriteByte(p.currentByte) - } - } -} - -func (p *TextParser) setOrCreateCurrentMF() { - p.currentIsSummaryCount = false - p.currentIsSummarySum = false - p.currentIsHistogramCount = false - p.currentIsHistogramSum = false - name := p.currentToken.String() - if p.currentMF = p.metricFamiliesByName[name]; p.currentMF != nil { - return - } - // Try out if this is a _sum or _count for a summary/histogram. - summaryName := summaryMetricName(name) - if p.currentMF = p.metricFamiliesByName[summaryName]; p.currentMF != nil { - if p.currentMF.GetType() == dto.MetricType_SUMMARY { - if isCount(name) { - p.currentIsSummaryCount = true - } - if isSum(name) { - p.currentIsSummarySum = true - } - return - } - } - histogramName := histogramMetricName(name) - if p.currentMF = p.metricFamiliesByName[histogramName]; p.currentMF != nil { - if p.currentMF.GetType() == dto.MetricType_HISTOGRAM { - if isCount(name) { - p.currentIsHistogramCount = true - } - if isSum(name) { - p.currentIsHistogramSum = true - } - return - } - } - p.currentMF = &dto.MetricFamily{Name: proto.String(name)} - p.metricFamiliesByName[name] = p.currentMF -} - -func isValidLabelNameStart(b byte) bool { - return (b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' -} - -func isValidLabelNameContinuation(b byte) bool { - return isValidLabelNameStart(b) || (b >= '0' && b <= '9') -} - -func isValidMetricNameStart(b byte) bool { - return isValidLabelNameStart(b) || b == ':' -} - -func isValidMetricNameContinuation(b byte) bool { - return isValidLabelNameContinuation(b) || b == ':' -} - -func isBlankOrTab(b byte) bool { - return b == ' ' || b == '\t' -} - -func isCount(name string) bool { - return len(name) > 6 && name[len(name)-6:] == "_count" -} - -func isSum(name string) bool { - return len(name) > 4 && name[len(name)-4:] == "_sum" -} - -func isBucket(name string) bool { - return len(name) > 7 && name[len(name)-7:] == "_bucket" -} - -func summaryMetricName(name string) string { - switch { - case isCount(name): - return name[:len(name)-6] - case isSum(name): - return name[:len(name)-4] - default: - return name - } -} - -func histogramMetricName(name string) string { - switch { - case isCount(name): - return name[:len(name)-6] - case isSum(name): - return name[:len(name)-4] - case isBucket(name): - return name[:len(name)-7] - default: - return name - } -} diff --git a/Godeps/_workspace/src/github.com/prometheus/common/expfmt/text_parse_test.go b/Godeps/_workspace/src/github.com/prometheus/common/expfmt/text_parse_test.go deleted file mode 100644 index 589c87a9d5..0000000000 --- a/Godeps/_workspace/src/github.com/prometheus/common/expfmt/text_parse_test.go +++ /dev/null @@ -1,586 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package expfmt - -import ( - "math" - "strings" - "testing" - - "github.com/golang/protobuf/proto" - dto "github.com/prometheus/client_model/go" -) - -func testTextParse(t testing.TB) { - var scenarios = []struct { - in string - out []*dto.MetricFamily - }{ - // 0: Empty lines as input. - { - in: ` - -`, - out: []*dto.MetricFamily{}, - }, - // 1: Minimal case. - { - in: ` -minimal_metric 1.234 -another_metric -3e3 103948 -# Even that: -no_labels{} 3 -# HELP line for non-existing metric will be ignored. -`, - out: []*dto.MetricFamily{ - &dto.MetricFamily{ - Name: proto.String("minimal_metric"), - Type: dto.MetricType_UNTYPED.Enum(), - Metric: []*dto.Metric{ - &dto.Metric{ - Untyped: &dto.Untyped{ - Value: proto.Float64(1.234), - }, - }, - }, - }, - &dto.MetricFamily{ - Name: proto.String("another_metric"), - Type: dto.MetricType_UNTYPED.Enum(), - Metric: []*dto.Metric{ - &dto.Metric{ - Untyped: &dto.Untyped{ - Value: proto.Float64(-3e3), - }, - TimestampMs: proto.Int64(103948), - }, - }, - }, - &dto.MetricFamily{ - Name: proto.String("no_labels"), - Type: dto.MetricType_UNTYPED.Enum(), - Metric: []*dto.Metric{ - &dto.Metric{ - Untyped: &dto.Untyped{ - Value: proto.Float64(3), - }, - }, - }, - }, - }, - }, - // 2: Counters & gauges, docstrings, various whitespace, escape sequences. - { - in: ` -# A normal comment. -# -# TYPE name counter -name{labelname="val1",basename="basevalue"} NaN -name {labelname="val2",basename="base\"v\\al\nue"} 0.23 1234567890 -# HELP name two-line\n doc str\\ing - - # HELP name2 doc str"ing 2 - # TYPE name2 gauge -name2{labelname="val2" ,basename = "basevalue2" } +Inf 54321 -name2{ labelname = "val1" , }-Inf -`, - out: []*dto.MetricFamily{ - &dto.MetricFamily{ - Name: proto.String("name"), - Help: proto.String("two-line\n doc str\\ing"), - Type: dto.MetricType_COUNTER.Enum(), - Metric: []*dto.Metric{ - &dto.Metric{ - Label: []*dto.LabelPair{ - &dto.LabelPair{ - Name: proto.String("labelname"), - Value: proto.String("val1"), - }, - &dto.LabelPair{ - Name: proto.String("basename"), - Value: proto.String("basevalue"), - }, - }, - Counter: &dto.Counter{ - Value: proto.Float64(math.NaN()), - }, - }, - &dto.Metric{ - Label: []*dto.LabelPair{ - &dto.LabelPair{ - Name: proto.String("labelname"), - Value: proto.String("val2"), - }, - &dto.LabelPair{ - Name: proto.String("basename"), - Value: proto.String("base\"v\\al\nue"), - }, - }, - Counter: &dto.Counter{ - Value: proto.Float64(.23), - }, - TimestampMs: proto.Int64(1234567890), - }, - }, - }, - &dto.MetricFamily{ - Name: proto.String("name2"), - Help: proto.String("doc str\"ing 2"), - Type: dto.MetricType_GAUGE.Enum(), - Metric: []*dto.Metric{ - &dto.Metric{ - Label: []*dto.LabelPair{ - &dto.LabelPair{ - Name: proto.String("labelname"), - Value: proto.String("val2"), - }, - &dto.LabelPair{ - Name: proto.String("basename"), - Value: proto.String("basevalue2"), - }, - }, - Gauge: &dto.Gauge{ - Value: proto.Float64(math.Inf(+1)), - }, - TimestampMs: proto.Int64(54321), - }, - &dto.Metric{ - Label: []*dto.LabelPair{ - &dto.LabelPair{ - Name: proto.String("labelname"), - Value: proto.String("val1"), - }, - }, - Gauge: &dto.Gauge{ - Value: proto.Float64(math.Inf(-1)), - }, - }, - }, - }, - }, - }, - // 3: The evil summary, mixed with other types and funny comments. - { - in: ` -# TYPE my_summary summary -my_summary{n1="val1",quantile="0.5"} 110 -decoy -1 -2 -my_summary{n1="val1",quantile="0.9"} 140 1 -my_summary_count{n1="val1"} 42 -# Latest timestamp wins in case of a summary. -my_summary_sum{n1="val1"} 4711 2 -fake_sum{n1="val1"} 2001 -# TYPE another_summary summary -another_summary_count{n2="val2",n1="val1"} 20 -my_summary_count{n2="val2",n1="val1"} 5 5 -another_summary{n1="val1",n2="val2",quantile=".3"} -1.2 -my_summary_sum{n1="val2"} 08 15 -my_summary{n1="val3", quantile="0.2"} 4711 - my_summary{n1="val1",n2="val2",quantile="-12.34",} NaN -# some -# funny comments -# HELP -# HELP -# HELP my_summary -# HELP my_summary -`, - out: []*dto.MetricFamily{ - &dto.MetricFamily{ - Name: proto.String("fake_sum"), - Type: dto.MetricType_UNTYPED.Enum(), - Metric: []*dto.Metric{ - &dto.Metric{ - Label: []*dto.LabelPair{ - &dto.LabelPair{ - Name: proto.String("n1"), - Value: proto.String("val1"), - }, - }, - Untyped: &dto.Untyped{ - Value: proto.Float64(2001), - }, - }, - }, - }, - &dto.MetricFamily{ - Name: proto.String("decoy"), - Type: dto.MetricType_UNTYPED.Enum(), - Metric: []*dto.Metric{ - &dto.Metric{ - Untyped: &dto.Untyped{ - Value: proto.Float64(-1), - }, - TimestampMs: proto.Int64(-2), - }, - }, - }, - &dto.MetricFamily{ - Name: proto.String("my_summary"), - Type: dto.MetricType_SUMMARY.Enum(), - Metric: []*dto.Metric{ - &dto.Metric{ - Label: []*dto.LabelPair{ - &dto.LabelPair{ - Name: proto.String("n1"), - Value: proto.String("val1"), - }, - }, - Summary: &dto.Summary{ - SampleCount: proto.Uint64(42), - SampleSum: proto.Float64(4711), - Quantile: []*dto.Quantile{ - &dto.Quantile{ - Quantile: proto.Float64(0.5), - Value: proto.Float64(110), - }, - &dto.Quantile{ - Quantile: proto.Float64(0.9), - Value: proto.Float64(140), - }, - }, - }, - TimestampMs: proto.Int64(2), - }, - &dto.Metric{ - Label: []*dto.LabelPair{ - &dto.LabelPair{ - Name: proto.String("n2"), - Value: proto.String("val2"), - }, - &dto.LabelPair{ - Name: proto.String("n1"), - Value: proto.String("val1"), - }, - }, - Summary: &dto.Summary{ - SampleCount: proto.Uint64(5), - Quantile: []*dto.Quantile{ - &dto.Quantile{ - Quantile: proto.Float64(-12.34), - Value: proto.Float64(math.NaN()), - }, - }, - }, - TimestampMs: proto.Int64(5), - }, - &dto.Metric{ - Label: []*dto.LabelPair{ - &dto.LabelPair{ - Name: proto.String("n1"), - Value: proto.String("val2"), - }, - }, - Summary: &dto.Summary{ - SampleSum: proto.Float64(8), - }, - TimestampMs: proto.Int64(15), - }, - &dto.Metric{ - Label: []*dto.LabelPair{ - &dto.LabelPair{ - Name: proto.String("n1"), - Value: proto.String("val3"), - }, - }, - Summary: &dto.Summary{ - Quantile: []*dto.Quantile{ - &dto.Quantile{ - Quantile: proto.Float64(0.2), - Value: proto.Float64(4711), - }, - }, - }, - }, - }, - }, - &dto.MetricFamily{ - Name: proto.String("another_summary"), - Type: dto.MetricType_SUMMARY.Enum(), - Metric: []*dto.Metric{ - &dto.Metric{ - Label: []*dto.LabelPair{ - &dto.LabelPair{ - Name: proto.String("n2"), - Value: proto.String("val2"), - }, - &dto.LabelPair{ - Name: proto.String("n1"), - Value: proto.String("val1"), - }, - }, - Summary: &dto.Summary{ - SampleCount: proto.Uint64(20), - Quantile: []*dto.Quantile{ - &dto.Quantile{ - Quantile: proto.Float64(0.3), - Value: proto.Float64(-1.2), - }, - }, - }, - }, - }, - }, - }, - }, - // 4: The histogram. - { - in: ` -# HELP request_duration_microseconds The response latency. -# TYPE request_duration_microseconds histogram -request_duration_microseconds_bucket{le="100"} 123 -request_duration_microseconds_bucket{le="120"} 412 -request_duration_microseconds_bucket{le="144"} 592 -request_duration_microseconds_bucket{le="172.8"} 1524 -request_duration_microseconds_bucket{le="+Inf"} 2693 -request_duration_microseconds_sum 1.7560473e+06 -request_duration_microseconds_count 2693 -`, - out: []*dto.MetricFamily{ - { - Name: proto.String("request_duration_microseconds"), - Help: proto.String("The response latency."), - Type: dto.MetricType_HISTOGRAM.Enum(), - Metric: []*dto.Metric{ - &dto.Metric{ - Histogram: &dto.Histogram{ - SampleCount: proto.Uint64(2693), - SampleSum: proto.Float64(1756047.3), - Bucket: []*dto.Bucket{ - &dto.Bucket{ - UpperBound: proto.Float64(100), - CumulativeCount: proto.Uint64(123), - }, - &dto.Bucket{ - UpperBound: proto.Float64(120), - CumulativeCount: proto.Uint64(412), - }, - &dto.Bucket{ - UpperBound: proto.Float64(144), - CumulativeCount: proto.Uint64(592), - }, - &dto.Bucket{ - UpperBound: proto.Float64(172.8), - CumulativeCount: proto.Uint64(1524), - }, - &dto.Bucket{ - UpperBound: proto.Float64(math.Inf(+1)), - CumulativeCount: proto.Uint64(2693), - }, - }, - }, - }, - }, - }, - }, - }, - } - - for i, scenario := range scenarios { - out, err := parser.TextToMetricFamilies(strings.NewReader(scenario.in)) - if err != nil { - t.Errorf("%d. error: %s", i, err) - continue - } - if expected, got := len(scenario.out), len(out); expected != got { - t.Errorf( - "%d. expected %d MetricFamilies, got %d", - i, expected, got, - ) - } - for _, expected := range scenario.out { - got, ok := out[expected.GetName()] - if !ok { - t.Errorf( - "%d. expected MetricFamily %q, found none", - i, expected.GetName(), - ) - continue - } - if expected.String() != got.String() { - t.Errorf( - "%d. expected MetricFamily %s, got %s", - i, expected, got, - ) - } - } - } -} - -func TestTextParse(t *testing.T) { - testTextParse(t) -} - -func BenchmarkTextParse(b *testing.B) { - for i := 0; i < b.N; i++ { - testTextParse(b) - } -} - -func testTextParseError(t testing.TB) { - var scenarios = []struct { - in string - err string - }{ - // 0: No new-line at end of input. - { - in: `bla 3.14`, - err: "EOF", - }, - // 1: Invalid escape sequence in label value. - { - in: `metric{label="\t"} 3.14`, - err: "text format parsing error in line 1: invalid escape sequence", - }, - // 2: Newline in label value. - { - in: ` -metric{label="new -line"} 3.14 -`, - err: `text format parsing error in line 2: label value "new" contains unescaped new-line`, - }, - // 3: - { - in: `metric{@="bla"} 3.14`, - err: "text format parsing error in line 1: invalid label name for metric", - }, - // 4: - { - in: `metric{__name__="bla"} 3.14`, - err: `text format parsing error in line 1: label name "__name__" is reserved`, - }, - // 5: - { - in: `metric{label+="bla"} 3.14`, - err: "text format parsing error in line 1: expected '=' after label name", - }, - // 6: - { - in: `metric{label=bla} 3.14`, - err: "text format parsing error in line 1: expected '\"' at start of label value", - }, - // 7: - { - in: ` -# TYPE metric summary -metric{quantile="bla"} 3.14 -`, - err: "text format parsing error in line 3: expected float as value for 'quantile' label", - }, - // 8: - { - in: `metric{label="bla"+} 3.14`, - err: "text format parsing error in line 1: unexpected end of label value", - }, - // 9: - { - in: `metric{label="bla"} 3.14 2.72 -`, - err: "text format parsing error in line 1: expected integer as timestamp", - }, - // 10: - { - in: `metric{label="bla"} 3.14 2 3 -`, - err: "text format parsing error in line 1: spurious string after timestamp", - }, - // 11: - { - in: `metric{label="bla"} blubb -`, - err: "text format parsing error in line 1: expected float as value", - }, - // 12: - { - in: ` -# HELP metric one -# HELP metric two -`, - err: "text format parsing error in line 3: second HELP line for metric name", - }, - // 13: - { - in: ` -# TYPE metric counter -# TYPE metric untyped -`, - err: `text format parsing error in line 3: second TYPE line for metric name "metric", or TYPE reported after samples`, - }, - // 14: - { - in: ` -metric 4.12 -# TYPE metric counter -`, - err: `text format parsing error in line 3: second TYPE line for metric name "metric", or TYPE reported after samples`, - }, - // 14: - { - in: ` -# TYPE metric bla -`, - err: "text format parsing error in line 2: unknown metric type", - }, - // 15: - { - in: ` -# TYPE met-ric -`, - err: "text format parsing error in line 2: invalid metric name in comment", - }, - // 16: - { - in: `@invalidmetric{label="bla"} 3.14 2`, - err: "text format parsing error in line 1: invalid metric name", - }, - // 17: - { - in: `{label="bla"} 3.14 2`, - err: "text format parsing error in line 1: invalid metric name", - }, - // 18: - { - in: ` -# TYPE metric histogram -metric_bucket{le="bla"} 3.14 -`, - err: "text format parsing error in line 3: expected float as value for 'le' label", - }, - } - - for i, scenario := range scenarios { - _, err := parser.TextToMetricFamilies(strings.NewReader(scenario.in)) - if err == nil { - t.Errorf("%d. expected error, got nil", i) - continue - } - if expected, got := scenario.err, err.Error(); strings.Index(got, expected) != 0 { - t.Errorf( - "%d. expected error starting with %q, got %q", - i, expected, got, - ) - } - } - -} - -func TestTextParseError(t *testing.T) { - testTextParseError(t) -} - -func BenchmarkParseError(b *testing.B) { - for i := 0; i < b.N; i++ { - testTextParseError(b) - } -} diff --git a/Godeps/_workspace/src/github.com/prometheus/common/model/fingerprinting.go b/Godeps/_workspace/src/github.com/prometheus/common/model/fingerprinting.go deleted file mode 100644 index fc4de4106e..0000000000 --- a/Godeps/_workspace/src/github.com/prometheus/common/model/fingerprinting.go +++ /dev/null @@ -1,105 +0,0 @@ -// Copyright 2013 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package model - -import ( - "fmt" - "strconv" -) - -// Fingerprint provides a hash-capable representation of a Metric. -// For our purposes, FNV-1A 64-bit is used. -type Fingerprint uint64 - -// FingerprintFromString transforms a string representation into a Fingerprint. -func FingerprintFromString(s string) (Fingerprint, error) { - num, err := strconv.ParseUint(s, 16, 64) - return Fingerprint(num), err -} - -// ParseFingerprint parses the input string into a fingerprint. -func ParseFingerprint(s string) (Fingerprint, error) { - num, err := strconv.ParseUint(s, 16, 64) - if err != nil { - return 0, err - } - return Fingerprint(num), nil -} - -func (f Fingerprint) String() string { - return fmt.Sprintf("%016x", uint64(f)) -} - -// Fingerprints represents a collection of Fingerprint subject to a given -// natural sorting scheme. It implements sort.Interface. -type Fingerprints []Fingerprint - -// Len implements sort.Interface. -func (f Fingerprints) Len() int { - return len(f) -} - -// Less implements sort.Interface. -func (f Fingerprints) Less(i, j int) bool { - return f[i] < f[j] -} - -// Swap implements sort.Interface. -func (f Fingerprints) Swap(i, j int) { - f[i], f[j] = f[j], f[i] -} - -// FingerprintSet is a set of Fingerprints. -type FingerprintSet map[Fingerprint]struct{} - -// Equal returns true if both sets contain the same elements (and not more). -func (s FingerprintSet) Equal(o FingerprintSet) bool { - if len(s) != len(o) { - return false - } - - for k := range s { - if _, ok := o[k]; !ok { - return false - } - } - - return true -} - -// Intersection returns the elements contained in both sets. -func (s FingerprintSet) Intersection(o FingerprintSet) FingerprintSet { - myLength, otherLength := len(s), len(o) - if myLength == 0 || otherLength == 0 { - return FingerprintSet{} - } - - subSet := s - superSet := o - - if otherLength < myLength { - subSet = o - superSet = s - } - - out := FingerprintSet{} - - for k := range subSet { - if _, ok := superSet[k]; ok { - out[k] = struct{}{} - } - } - - return out -} diff --git a/Godeps/_workspace/src/github.com/prometheus/common/model/labels.go b/Godeps/_workspace/src/github.com/prometheus/common/model/labels.go deleted file mode 100644 index ba694b98ad..0000000000 --- a/Godeps/_workspace/src/github.com/prometheus/common/model/labels.go +++ /dev/null @@ -1,185 +0,0 @@ -// Copyright 2013 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package model - -import ( - "encoding/json" - "fmt" - "regexp" - "sort" - "strings" -) - -const ( - // ExportedLabelPrefix is the prefix to prepend to the label names present in - // exported metrics if a label of the same name is added by the server. - ExportedLabelPrefix = "exported_" - - // MetricNameLabel is the label name indicating the metric name of a - // timeseries. - MetricNameLabel = "__name__" - - // SchemeLabel is the name of the label that holds the scheme on which to - // scrape a target. - SchemeLabel = "__scheme__" - - // AddressLabel is the name of the label that holds the address of - // a scrape target. - AddressLabel = "__address__" - - // MetricsPathLabel is the name of the label that holds the path on which to - // scrape a target. - MetricsPathLabel = "__metrics_path__" - - // ReservedLabelPrefix is a prefix which is not legal in user-supplied - // label names. - ReservedLabelPrefix = "__" - - // MetaLabelPrefix is a prefix for labels that provide meta information. - // Labels with this prefix are used for intermediate label processing and - // will not be attached to time series. - MetaLabelPrefix = "__meta_" - - // TmpLabelPrefix is a prefix for temporary labels as part of relabelling. - // Labels with this prefix are used for intermediate label processing and - // will not be attached to time series. This is reserved for use in - // Prometheus configuration files by users. - TmpLabelPrefix = "__tmp_" - - // ParamLabelPrefix is a prefix for labels that provide URL parameters - // used to scrape a target. - ParamLabelPrefix = "__param_" - - // JobLabel is the label name indicating the job from which a timeseries - // was scraped. - JobLabel = "job" - - // InstanceLabel is the label name used for the instance label. - InstanceLabel = "instance" - - // BucketLabel is used for the label that defines the upper bound of a - // bucket of a histogram ("le" -> "less or equal"). - BucketLabel = "le" - - // QuantileLabel is used for the label that defines the quantile in a - // summary. - QuantileLabel = "quantile" -) - -// LabelNameRE is a regular expression matching valid label names. -var LabelNameRE = regexp.MustCompile("^[a-zA-Z_][a-zA-Z0-9_]*$") - -// A LabelName is a key for a LabelSet or Metric. It has a value associated -// therewith. -type LabelName string - -// UnmarshalYAML implements the yaml.Unmarshaler interface. -func (ln *LabelName) UnmarshalYAML(unmarshal func(interface{}) error) error { - var s string - if err := unmarshal(&s); err != nil { - return err - } - if !LabelNameRE.MatchString(s) { - return fmt.Errorf("%q is not a valid label name", s) - } - *ln = LabelName(s) - return nil -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (ln *LabelName) UnmarshalJSON(b []byte) error { - var s string - if err := json.Unmarshal(b, &s); err != nil { - return err - } - if !LabelNameRE.MatchString(s) { - return fmt.Errorf("%q is not a valid label name", s) - } - *ln = LabelName(s) - return nil -} - -// LabelNames is a sortable LabelName slice. In implements sort.Interface. -type LabelNames []LabelName - -func (l LabelNames) Len() int { - return len(l) -} - -func (l LabelNames) Less(i, j int) bool { - return l[i] < l[j] -} - -func (l LabelNames) Swap(i, j int) { - l[i], l[j] = l[j], l[i] -} - -func (l LabelNames) String() string { - labelStrings := make([]string, 0, len(l)) - for _, label := range l { - labelStrings = append(labelStrings, string(label)) - } - return strings.Join(labelStrings, ", ") -} - -// A LabelValue is an associated value for a LabelName. -type LabelValue string - -// LabelValues is a sortable LabelValue slice. It implements sort.Interface. -type LabelValues []LabelValue - -func (l LabelValues) Len() int { - return len(l) -} - -func (l LabelValues) Less(i, j int) bool { - return sort.StringsAreSorted([]string{string(l[i]), string(l[j])}) -} - -func (l LabelValues) Swap(i, j int) { - l[i], l[j] = l[j], l[i] -} - -// LabelPair pairs a name with a value. -type LabelPair struct { - Name LabelName - Value LabelValue -} - -// LabelPairs is a sortable slice of LabelPair pointers. It implements -// sort.Interface. -type LabelPairs []*LabelPair - -func (l LabelPairs) Len() int { - return len(l) -} - -func (l LabelPairs) Less(i, j int) bool { - switch { - case l[i].Name > l[j].Name: - return false - case l[i].Name < l[j].Name: - return true - case l[i].Value > l[j].Value: - return false - case l[i].Value < l[j].Value: - return true - default: - return false - } -} - -func (l LabelPairs) Swap(i, j int) { - l[i], l[j] = l[j], l[i] -} diff --git a/Godeps/_workspace/src/github.com/prometheus/common/model/labels_test.go b/Godeps/_workspace/src/github.com/prometheus/common/model/labels_test.go deleted file mode 100644 index ab17025c76..0000000000 --- a/Godeps/_workspace/src/github.com/prometheus/common/model/labels_test.go +++ /dev/null @@ -1,91 +0,0 @@ -// Copyright 2013 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package model - -import ( - "sort" - "testing" -) - -func testLabelNames(t testing.TB) { - var scenarios = []struct { - in LabelNames - out LabelNames - }{ - { - in: LabelNames{"ZZZ", "zzz"}, - out: LabelNames{"ZZZ", "zzz"}, - }, - { - in: LabelNames{"aaa", "AAA"}, - out: LabelNames{"AAA", "aaa"}, - }, - } - - for i, scenario := range scenarios { - sort.Sort(scenario.in) - - for j, expected := range scenario.out { - if expected != scenario.in[j] { - t.Errorf("%d.%d expected %s, got %s", i, j, expected, scenario.in[j]) - } - } - } -} - -func TestLabelNames(t *testing.T) { - testLabelNames(t) -} - -func BenchmarkLabelNames(b *testing.B) { - for i := 0; i < b.N; i++ { - testLabelNames(b) - } -} - -func testLabelValues(t testing.TB) { - var scenarios = []struct { - in LabelValues - out LabelValues - }{ - { - in: LabelValues{"ZZZ", "zzz"}, - out: LabelValues{"ZZZ", "zzz"}, - }, - { - in: LabelValues{"aaa", "AAA"}, - out: LabelValues{"AAA", "aaa"}, - }, - } - - for i, scenario := range scenarios { - sort.Sort(scenario.in) - - for j, expected := range scenario.out { - if expected != scenario.in[j] { - t.Errorf("%d.%d expected %s, got %s", i, j, expected, scenario.in[j]) - } - } - } -} - -func TestLabelValues(t *testing.T) { - testLabelValues(t) -} - -func BenchmarkLabelValues(b *testing.B) { - for i := 0; i < b.N; i++ { - testLabelValues(b) - } -} diff --git a/Godeps/_workspace/src/github.com/prometheus/common/model/labelset.go b/Godeps/_workspace/src/github.com/prometheus/common/model/labelset.go deleted file mode 100644 index 142b9d1e2d..0000000000 --- a/Godeps/_workspace/src/github.com/prometheus/common/model/labelset.go +++ /dev/null @@ -1,153 +0,0 @@ -// Copyright 2013 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package model - -import ( - "encoding/json" - "fmt" - "sort" - "strings" -) - -// A LabelSet is a collection of LabelName and LabelValue pairs. The LabelSet -// may be fully-qualified down to the point where it may resolve to a single -// Metric in the data store or not. All operations that occur within the realm -// of a LabelSet can emit a vector of Metric entities to which the LabelSet may -// match. -type LabelSet map[LabelName]LabelValue - -func (ls LabelSet) Equal(o LabelSet) bool { - if len(ls) != len(o) { - return false - } - for ln, lv := range ls { - olv, ok := o[ln] - if !ok { - return false - } - if olv != lv { - return false - } - } - return true -} - -// Before compares the metrics, using the following criteria: -// -// If m has fewer labels than o, it is before o. If it has more, it is not. -// -// If the number of labels is the same, the superset of all label names is -// sorted alphanumerically. The first differing label pair found in that order -// determines the outcome: If the label does not exist at all in m, then m is -// before o, and vice versa. Otherwise the label value is compared -// alphanumerically. -// -// If m and o are equal, the method returns false. -func (ls LabelSet) Before(o LabelSet) bool { - if len(ls) < len(o) { - return true - } - if len(ls) > len(o) { - return false - } - - lns := make(LabelNames, 0, len(ls)+len(o)) - for ln := range ls { - lns = append(lns, ln) - } - for ln := range o { - lns = append(lns, ln) - } - // It's probably not worth it to de-dup lns. - sort.Sort(lns) - for _, ln := range lns { - mlv, ok := ls[ln] - if !ok { - return true - } - olv, ok := o[ln] - if !ok { - return false - } - if mlv < olv { - return true - } - if mlv > olv { - return false - } - } - return false -} - -func (ls LabelSet) Clone() LabelSet { - lsn := make(LabelSet, len(ls)) - for ln, lv := range ls { - lsn[ln] = lv - } - return lsn -} - -// Merge is a helper function to non-destructively merge two label sets. -func (l LabelSet) Merge(other LabelSet) LabelSet { - result := make(LabelSet, len(l)) - - for k, v := range l { - result[k] = v - } - - for k, v := range other { - result[k] = v - } - - return result -} - -func (l LabelSet) String() string { - lstrs := make([]string, 0, len(l)) - for l, v := range l { - lstrs = append(lstrs, fmt.Sprintf("%s=%q", l, v)) - } - - sort.Strings(lstrs) - return fmt.Sprintf("{%s}", strings.Join(lstrs, ", ")) -} - -// Fingerprint returns the LabelSet's fingerprint. -func (ls LabelSet) Fingerprint() Fingerprint { - return labelSetToFingerprint(ls) -} - -// FastFingerprint returns the LabelSet's Fingerprint calculated by a faster hashing -// algorithm, which is, however, more susceptible to hash collisions. -func (ls LabelSet) FastFingerprint() Fingerprint { - return labelSetToFastFingerprint(ls) -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (l *LabelSet) UnmarshalJSON(b []byte) error { - var m map[LabelName]LabelValue - if err := json.Unmarshal(b, &m); err != nil { - return err - } - // encoding/json only unmarshals maps of the form map[string]T. It treats - // LabelName as a string and does not call its UnmarshalJSON method. - // Thus, we have to replicate the behavior here. - for ln := range m { - if !LabelNameRE.MatchString(string(ln)) { - return fmt.Errorf("%q is not a valid label name", ln) - } - } - *l = LabelSet(m) - return nil -} diff --git a/Godeps/_workspace/src/github.com/prometheus/common/model/metric.go b/Godeps/_workspace/src/github.com/prometheus/common/model/metric.go deleted file mode 100644 index 25fc3c9425..0000000000 --- a/Godeps/_workspace/src/github.com/prometheus/common/model/metric.go +++ /dev/null @@ -1,81 +0,0 @@ -// Copyright 2013 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package model - -import ( - "fmt" - "sort" - "strings" -) - -var separator = []byte{0} - -// A Metric is similar to a LabelSet, but the key difference is that a Metric is -// a singleton and refers to one and only one stream of samples. -type Metric LabelSet - -// Equal compares the metrics. -func (m Metric) Equal(o Metric) bool { - return LabelSet(m).Equal(LabelSet(o)) -} - -// Before compares the metrics' underlying label sets. -func (m Metric) Before(o Metric) bool { - return LabelSet(m).Before(LabelSet(o)) -} - -// Clone returns a copy of the Metric. -func (m Metric) Clone() Metric { - clone := Metric{} - for k, v := range m { - clone[k] = v - } - return clone -} - -func (m Metric) String() string { - metricName, hasName := m[MetricNameLabel] - numLabels := len(m) - 1 - if !hasName { - numLabels = len(m) - } - labelStrings := make([]string, 0, numLabels) - for label, value := range m { - if label != MetricNameLabel { - labelStrings = append(labelStrings, fmt.Sprintf("%s=%q", label, value)) - } - } - - switch numLabels { - case 0: - if hasName { - return string(metricName) - } - return "{}" - default: - sort.Strings(labelStrings) - return fmt.Sprintf("%s{%s}", metricName, strings.Join(labelStrings, ", ")) - } -} - -// Fingerprint returns a Metric's Fingerprint. -func (m Metric) Fingerprint() Fingerprint { - return LabelSet(m).Fingerprint() -} - -// FastFingerprint returns a Metric's Fingerprint calculated by a faster hashing -// algorithm, which is, however, more susceptible to hash collisions. -func (m Metric) FastFingerprint() Fingerprint { - return LabelSet(m).FastFingerprint() -} diff --git a/Godeps/_workspace/src/github.com/prometheus/common/model/metric_test.go b/Godeps/_workspace/src/github.com/prometheus/common/model/metric_test.go deleted file mode 100644 index 5c7cfceafe..0000000000 --- a/Godeps/_workspace/src/github.com/prometheus/common/model/metric_test.go +++ /dev/null @@ -1,83 +0,0 @@ -// Copyright 2013 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package model - -import "testing" - -func testMetric(t testing.TB) { - var scenarios = []struct { - input LabelSet - fingerprint Fingerprint - fastFingerprint Fingerprint - }{ - { - input: LabelSet{}, - fingerprint: 14695981039346656037, - fastFingerprint: 14695981039346656037, - }, - { - input: LabelSet{ - "first_name": "electro", - "occupation": "robot", - "manufacturer": "westinghouse", - }, - fingerprint: 5911716720268894962, - fastFingerprint: 11310079640881077873, - }, - { - input: LabelSet{ - "x": "y", - }, - fingerprint: 8241431561484471700, - fastFingerprint: 13948396922932177635, - }, - { - input: LabelSet{ - "a": "bb", - "b": "c", - }, - fingerprint: 3016285359649981711, - fastFingerprint: 3198632812309449502, - }, - { - input: LabelSet{ - "a": "b", - "bb": "c", - }, - fingerprint: 7122421792099404749, - fastFingerprint: 5774953389407657638, - }, - } - - for i, scenario := range scenarios { - input := Metric(scenario.input) - - if scenario.fingerprint != input.Fingerprint() { - t.Errorf("%d. expected %d, got %d", i, scenario.fingerprint, input.Fingerprint()) - } - if scenario.fastFingerprint != input.FastFingerprint() { - t.Errorf("%d. expected %d, got %d", i, scenario.fastFingerprint, input.FastFingerprint()) - } - } -} - -func TestMetric(t *testing.T) { - testMetric(t) -} - -func BenchmarkMetric(b *testing.B) { - for i := 0; i < b.N; i++ { - testMetric(b) - } -} diff --git a/Godeps/_workspace/src/github.com/prometheus/common/model/model.go b/Godeps/_workspace/src/github.com/prometheus/common/model/model.go deleted file mode 100644 index 88f013a47a..0000000000 --- a/Godeps/_workspace/src/github.com/prometheus/common/model/model.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2013 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package model contains common data structures that are shared across -// Prometheus componenets and libraries. -package model diff --git a/Godeps/_workspace/src/github.com/prometheus/common/model/signature.go b/Godeps/_workspace/src/github.com/prometheus/common/model/signature.go deleted file mode 100644 index 28f370065a..0000000000 --- a/Godeps/_workspace/src/github.com/prometheus/common/model/signature.go +++ /dev/null @@ -1,190 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package model - -import ( - "bytes" - "hash" - "hash/fnv" - "sort" - "sync" -) - -// SeparatorByte is a byte that cannot occur in valid UTF-8 sequences and is -// used to separate label names, label values, and other strings from each other -// when calculating their combined hash value (aka signature aka fingerprint). -const SeparatorByte byte = 255 - -var ( - // cache the signature of an empty label set. - emptyLabelSignature = fnv.New64a().Sum64() - - hashAndBufPool sync.Pool -) - -type hashAndBuf struct { - h hash.Hash64 - b bytes.Buffer -} - -func getHashAndBuf() *hashAndBuf { - hb := hashAndBufPool.Get() - if hb == nil { - return &hashAndBuf{h: fnv.New64a()} - } - return hb.(*hashAndBuf) -} - -func putHashAndBuf(hb *hashAndBuf) { - hb.h.Reset() - hb.b.Reset() - hashAndBufPool.Put(hb) -} - -// LabelsToSignature returns a quasi-unique signature (i.e., fingerprint) for a -// given label set. (Collisions are possible but unlikely if the number of label -// sets the function is applied to is small.) -func LabelsToSignature(labels map[string]string) uint64 { - if len(labels) == 0 { - return emptyLabelSignature - } - - labelNames := make([]string, 0, len(labels)) - for labelName := range labels { - labelNames = append(labelNames, labelName) - } - sort.Strings(labelNames) - - hb := getHashAndBuf() - defer putHashAndBuf(hb) - - for _, labelName := range labelNames { - hb.b.WriteString(labelName) - hb.b.WriteByte(SeparatorByte) - hb.b.WriteString(labels[labelName]) - hb.b.WriteByte(SeparatorByte) - hb.h.Write(hb.b.Bytes()) - hb.b.Reset() - } - return hb.h.Sum64() -} - -// labelSetToFingerprint works exactly as LabelsToSignature but takes a LabelSet as -// parameter (rather than a label map) and returns a Fingerprint. -func labelSetToFingerprint(ls LabelSet) Fingerprint { - if len(ls) == 0 { - return Fingerprint(emptyLabelSignature) - } - - labelNames := make(LabelNames, 0, len(ls)) - for labelName := range ls { - labelNames = append(labelNames, labelName) - } - sort.Sort(labelNames) - - hb := getHashAndBuf() - defer putHashAndBuf(hb) - - for _, labelName := range labelNames { - hb.b.WriteString(string(labelName)) - hb.b.WriteByte(SeparatorByte) - hb.b.WriteString(string(ls[labelName])) - hb.b.WriteByte(SeparatorByte) - hb.h.Write(hb.b.Bytes()) - hb.b.Reset() - } - return Fingerprint(hb.h.Sum64()) -} - -// labelSetToFastFingerprint works similar to labelSetToFingerprint but uses a -// faster and less allocation-heavy hash function, which is more susceptible to -// create hash collisions. Therefore, collision detection should be applied. -func labelSetToFastFingerprint(ls LabelSet) Fingerprint { - if len(ls) == 0 { - return Fingerprint(emptyLabelSignature) - } - - var result uint64 - hb := getHashAndBuf() - defer putHashAndBuf(hb) - - for labelName, labelValue := range ls { - hb.b.WriteString(string(labelName)) - hb.b.WriteByte(SeparatorByte) - hb.b.WriteString(string(labelValue)) - hb.h.Write(hb.b.Bytes()) - result ^= hb.h.Sum64() - hb.h.Reset() - hb.b.Reset() - } - return Fingerprint(result) -} - -// SignatureForLabels works like LabelsToSignature but takes a Metric as -// parameter (rather than a label map) and only includes the labels with the -// specified LabelNames into the signature calculation. The labels passed in -// will be sorted by this function. -func SignatureForLabels(m Metric, labels ...LabelName) uint64 { - if len(m) == 0 || len(labels) == 0 { - return emptyLabelSignature - } - - sort.Sort(LabelNames(labels)) - - hb := getHashAndBuf() - defer putHashAndBuf(hb) - - for _, label := range labels { - hb.b.WriteString(string(label)) - hb.b.WriteByte(SeparatorByte) - hb.b.WriteString(string(m[label])) - hb.b.WriteByte(SeparatorByte) - hb.h.Write(hb.b.Bytes()) - hb.b.Reset() - } - return hb.h.Sum64() -} - -// SignatureWithoutLabels works like LabelsToSignature but takes a Metric as -// parameter (rather than a label map) and excludes the labels with any of the -// specified LabelNames from the signature calculation. -func SignatureWithoutLabels(m Metric, labels map[LabelName]struct{}) uint64 { - if len(m) == 0 { - return emptyLabelSignature - } - - labelNames := make(LabelNames, 0, len(m)) - for labelName := range m { - if _, exclude := labels[labelName]; !exclude { - labelNames = append(labelNames, labelName) - } - } - if len(labelNames) == 0 { - return emptyLabelSignature - } - sort.Sort(labelNames) - - hb := getHashAndBuf() - defer putHashAndBuf(hb) - - for _, labelName := range labelNames { - hb.b.WriteString(string(labelName)) - hb.b.WriteByte(SeparatorByte) - hb.b.WriteString(string(m[labelName])) - hb.b.WriteByte(SeparatorByte) - hb.h.Write(hb.b.Bytes()) - hb.b.Reset() - } - return hb.h.Sum64() -} diff --git a/Godeps/_workspace/src/github.com/prometheus/common/model/signature_test.go b/Godeps/_workspace/src/github.com/prometheus/common/model/signature_test.go deleted file mode 100644 index d9c665f8c7..0000000000 --- a/Godeps/_workspace/src/github.com/prometheus/common/model/signature_test.go +++ /dev/null @@ -1,304 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package model - -import ( - "runtime" - "sync" - "testing" -) - -func TestLabelsToSignature(t *testing.T) { - var scenarios = []struct { - in map[string]string - out uint64 - }{ - { - in: map[string]string{}, - out: 14695981039346656037, - }, - { - in: map[string]string{"name": "garland, briggs", "fear": "love is not enough"}, - out: 5799056148416392346, - }, - } - - for i, scenario := range scenarios { - actual := LabelsToSignature(scenario.in) - - if actual != scenario.out { - t.Errorf("%d. expected %d, got %d", i, scenario.out, actual) - } - } -} - -func TestMetricToFingerprint(t *testing.T) { - var scenarios = []struct { - in LabelSet - out Fingerprint - }{ - { - in: LabelSet{}, - out: 14695981039346656037, - }, - { - in: LabelSet{"name": "garland, briggs", "fear": "love is not enough"}, - out: 5799056148416392346, - }, - } - - for i, scenario := range scenarios { - actual := labelSetToFingerprint(scenario.in) - - if actual != scenario.out { - t.Errorf("%d. expected %d, got %d", i, scenario.out, actual) - } - } -} - -func TestMetricToFastFingerprint(t *testing.T) { - var scenarios = []struct { - in LabelSet - out Fingerprint - }{ - { - in: LabelSet{}, - out: 14695981039346656037, - }, - { - in: LabelSet{"name": "garland, briggs", "fear": "love is not enough"}, - out: 12952432476264840823, - }, - } - - for i, scenario := range scenarios { - actual := labelSetToFastFingerprint(scenario.in) - - if actual != scenario.out { - t.Errorf("%d. expected %d, got %d", i, scenario.out, actual) - } - } -} - -func TestSignatureForLabels(t *testing.T) { - var scenarios = []struct { - in Metric - labels LabelNames - out uint64 - }{ - { - in: Metric{}, - labels: nil, - out: 14695981039346656037, - }, - { - in: Metric{"name": "garland, briggs", "fear": "love is not enough"}, - labels: LabelNames{"fear", "name"}, - out: 5799056148416392346, - }, - { - in: Metric{"name": "garland, briggs", "fear": "love is not enough", "foo": "bar"}, - labels: LabelNames{"fear", "name"}, - out: 5799056148416392346, - }, - { - in: Metric{"name": "garland, briggs", "fear": "love is not enough"}, - labels: LabelNames{}, - out: 14695981039346656037, - }, - { - in: Metric{"name": "garland, briggs", "fear": "love is not enough"}, - labels: nil, - out: 14695981039346656037, - }, - } - - for i, scenario := range scenarios { - actual := SignatureForLabels(scenario.in, scenario.labels...) - - if actual != scenario.out { - t.Errorf("%d. expected %d, got %d", i, scenario.out, actual) - } - } -} - -func TestSignatureWithoutLabels(t *testing.T) { - var scenarios = []struct { - in Metric - labels map[LabelName]struct{} - out uint64 - }{ - { - in: Metric{}, - labels: nil, - out: 14695981039346656037, - }, - { - in: Metric{"name": "garland, briggs", "fear": "love is not enough"}, - labels: map[LabelName]struct{}{"fear": struct{}{}, "name": struct{}{}}, - out: 14695981039346656037, - }, - { - in: Metric{"name": "garland, briggs", "fear": "love is not enough", "foo": "bar"}, - labels: map[LabelName]struct{}{"foo": struct{}{}}, - out: 5799056148416392346, - }, - { - in: Metric{"name": "garland, briggs", "fear": "love is not enough"}, - labels: map[LabelName]struct{}{}, - out: 5799056148416392346, - }, - { - in: Metric{"name": "garland, briggs", "fear": "love is not enough"}, - labels: nil, - out: 5799056148416392346, - }, - } - - for i, scenario := range scenarios { - actual := SignatureWithoutLabels(scenario.in, scenario.labels) - - if actual != scenario.out { - t.Errorf("%d. expected %d, got %d", i, scenario.out, actual) - } - } -} - -func benchmarkLabelToSignature(b *testing.B, l map[string]string, e uint64) { - for i := 0; i < b.N; i++ { - if a := LabelsToSignature(l); a != e { - b.Fatalf("expected signature of %d for %s, got %d", e, l, a) - } - } -} - -func BenchmarkLabelToSignatureScalar(b *testing.B) { - benchmarkLabelToSignature(b, nil, 14695981039346656037) -} - -func BenchmarkLabelToSignatureSingle(b *testing.B) { - benchmarkLabelToSignature(b, map[string]string{"first-label": "first-label-value"}, 5146282821936882169) -} - -func BenchmarkLabelToSignatureDouble(b *testing.B) { - benchmarkLabelToSignature(b, map[string]string{"first-label": "first-label-value", "second-label": "second-label-value"}, 3195800080984914717) -} - -func BenchmarkLabelToSignatureTriple(b *testing.B) { - benchmarkLabelToSignature(b, map[string]string{"first-label": "first-label-value", "second-label": "second-label-value", "third-label": "third-label-value"}, 13843036195897128121) -} - -func benchmarkMetricToFingerprint(b *testing.B, ls LabelSet, e Fingerprint) { - for i := 0; i < b.N; i++ { - if a := labelSetToFingerprint(ls); a != e { - b.Fatalf("expected signature of %d for %s, got %d", e, ls, a) - } - } -} - -func BenchmarkMetricToFingerprintScalar(b *testing.B) { - benchmarkMetricToFingerprint(b, nil, 14695981039346656037) -} - -func BenchmarkMetricToFingerprintSingle(b *testing.B) { - benchmarkMetricToFingerprint(b, LabelSet{"first-label": "first-label-value"}, 5146282821936882169) -} - -func BenchmarkMetricToFingerprintDouble(b *testing.B) { - benchmarkMetricToFingerprint(b, LabelSet{"first-label": "first-label-value", "second-label": "second-label-value"}, 3195800080984914717) -} - -func BenchmarkMetricToFingerprintTriple(b *testing.B) { - benchmarkMetricToFingerprint(b, LabelSet{"first-label": "first-label-value", "second-label": "second-label-value", "third-label": "third-label-value"}, 13843036195897128121) -} - -func benchmarkMetricToFastFingerprint(b *testing.B, ls LabelSet, e Fingerprint) { - for i := 0; i < b.N; i++ { - if a := labelSetToFastFingerprint(ls); a != e { - b.Fatalf("expected signature of %d for %s, got %d", e, ls, a) - } - } -} - -func BenchmarkMetricToFastFingerprintScalar(b *testing.B) { - benchmarkMetricToFastFingerprint(b, nil, 14695981039346656037) -} - -func BenchmarkMetricToFastFingerprintSingle(b *testing.B) { - benchmarkMetricToFastFingerprint(b, LabelSet{"first-label": "first-label-value"}, 5147259542624943964) -} - -func BenchmarkMetricToFastFingerprintDouble(b *testing.B) { - benchmarkMetricToFastFingerprint(b, LabelSet{"first-label": "first-label-value", "second-label": "second-label-value"}, 18269973311206963528) -} - -func BenchmarkMetricToFastFingerprintTriple(b *testing.B) { - benchmarkMetricToFastFingerprint(b, LabelSet{"first-label": "first-label-value", "second-label": "second-label-value", "third-label": "third-label-value"}, 15738406913934009676) -} - -func BenchmarkEmptyLabelSignature(b *testing.B) { - input := []map[string]string{nil, {}} - - var ms runtime.MemStats - runtime.ReadMemStats(&ms) - - alloc := ms.Alloc - - for _, labels := range input { - LabelsToSignature(labels) - } - - runtime.ReadMemStats(&ms) - - if got := ms.Alloc; alloc != got { - b.Fatal("expected LabelsToSignature with empty labels not to perform allocations") - } -} - -func benchmarkMetricToFastFingerprintConc(b *testing.B, ls LabelSet, e Fingerprint, concLevel int) { - var start, end sync.WaitGroup - start.Add(1) - end.Add(concLevel) - - for i := 0; i < concLevel; i++ { - go func() { - start.Wait() - for j := b.N / concLevel; j >= 0; j-- { - if a := labelSetToFastFingerprint(ls); a != e { - b.Fatalf("expected signature of %d for %s, got %d", e, ls, a) - } - } - end.Done() - }() - } - b.ResetTimer() - start.Done() - end.Wait() -} - -func BenchmarkMetricToFastFingerprintTripleConc1(b *testing.B) { - benchmarkMetricToFastFingerprintConc(b, LabelSet{"first-label": "first-label-value", "second-label": "second-label-value", "third-label": "third-label-value"}, 15738406913934009676, 1) -} - -func BenchmarkMetricToFastFingerprintTripleConc2(b *testing.B) { - benchmarkMetricToFastFingerprintConc(b, LabelSet{"first-label": "first-label-value", "second-label": "second-label-value", "third-label": "third-label-value"}, 15738406913934009676, 2) -} - -func BenchmarkMetricToFastFingerprintTripleConc4(b *testing.B) { - benchmarkMetricToFastFingerprintConc(b, LabelSet{"first-label": "first-label-value", "second-label": "second-label-value", "third-label": "third-label-value"}, 15738406913934009676, 4) -} - -func BenchmarkMetricToFastFingerprintTripleConc8(b *testing.B) { - benchmarkMetricToFastFingerprintConc(b, LabelSet{"first-label": "first-label-value", "second-label": "second-label-value", "third-label": "third-label-value"}, 15738406913934009676, 8) -} diff --git a/Godeps/_workspace/src/github.com/prometheus/common/model/time.go b/Godeps/_workspace/src/github.com/prometheus/common/model/time.go deleted file mode 100644 index ebc8bf6cc8..0000000000 --- a/Godeps/_workspace/src/github.com/prometheus/common/model/time.go +++ /dev/null @@ -1,230 +0,0 @@ -// Copyright 2013 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package model - -import ( - "fmt" - "math" - "regexp" - "strconv" - "strings" - "time" -) - -const ( - // MinimumTick is the minimum supported time resolution. This has to be - // at least time.Second in order for the code below to work. - minimumTick = time.Millisecond - // second is the Time duration equivalent to one second. - second = int64(time.Second / minimumTick) - // The number of nanoseconds per minimum tick. - nanosPerTick = int64(minimumTick / time.Nanosecond) - - // Earliest is the earliest Time representable. Handy for - // initializing a high watermark. - Earliest = Time(math.MinInt64) - // Latest is the latest Time representable. Handy for initializing - // a low watermark. - Latest = Time(math.MaxInt64) -) - -// Time is the number of milliseconds since the epoch -// (1970-01-01 00:00 UTC) excluding leap seconds. -type Time int64 - -// Interval describes and interval between two timestamps. -type Interval struct { - Start, End Time -} - -// Now returns the current time as a Time. -func Now() Time { - return TimeFromUnixNano(time.Now().UnixNano()) -} - -// TimeFromUnix returns the Time equivalent to the Unix Time t -// provided in seconds. -func TimeFromUnix(t int64) Time { - return Time(t * second) -} - -// TimeFromUnixNano returns the Time equivalent to the Unix Time -// t provided in nanoseconds. -func TimeFromUnixNano(t int64) Time { - return Time(t / nanosPerTick) -} - -// Equal reports whether two Times represent the same instant. -func (t Time) Equal(o Time) bool { - return t == o -} - -// Before reports whether the Time t is before o. -func (t Time) Before(o Time) bool { - return t < o -} - -// After reports whether the Time t is after o. -func (t Time) After(o Time) bool { - return t > o -} - -// Add returns the Time t + d. -func (t Time) Add(d time.Duration) Time { - return t + Time(d/minimumTick) -} - -// Sub returns the Duration t - o. -func (t Time) Sub(o Time) time.Duration { - return time.Duration(t-o) * minimumTick -} - -// Time returns the time.Time representation of t. -func (t Time) Time() time.Time { - return time.Unix(int64(t)/second, (int64(t)%second)*nanosPerTick) -} - -// Unix returns t as a Unix time, the number of seconds elapsed -// since January 1, 1970 UTC. -func (t Time) Unix() int64 { - return int64(t) / second -} - -// UnixNano returns t as a Unix time, the number of nanoseconds elapsed -// since January 1, 1970 UTC. -func (t Time) UnixNano() int64 { - return int64(t) * nanosPerTick -} - -// The number of digits after the dot. -var dotPrecision = int(math.Log10(float64(second))) - -// String returns a string representation of the Time. -func (t Time) String() string { - return strconv.FormatFloat(float64(t)/float64(second), 'f', -1, 64) -} - -// MarshalJSON implements the json.Marshaler interface. -func (t Time) MarshalJSON() ([]byte, error) { - return []byte(t.String()), nil -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (t *Time) UnmarshalJSON(b []byte) error { - p := strings.Split(string(b), ".") - switch len(p) { - case 1: - v, err := strconv.ParseInt(string(p[0]), 10, 64) - if err != nil { - return err - } - *t = Time(v * second) - - case 2: - v, err := strconv.ParseInt(string(p[0]), 10, 64) - if err != nil { - return err - } - v *= second - - prec := dotPrecision - len(p[1]) - if prec < 0 { - p[1] = p[1][:dotPrecision] - } else if prec > 0 { - p[1] = p[1] + strings.Repeat("0", prec) - } - - va, err := strconv.ParseInt(p[1], 10, 32) - if err != nil { - return err - } - - *t = Time(v + va) - - default: - return fmt.Errorf("invalid time %q", string(b)) - } - return nil -} - -// Duration wraps time.Duration. It is used to parse the custom duration format -// from YAML. -// This type should not propagate beyond the scope of input/output processing. -type Duration time.Duration - -// StringToDuration parses a string into a time.Duration, assuming that a year -// a day always has 24h. -func ParseDuration(durationStr string) (Duration, error) { - matches := durationRE.FindStringSubmatch(durationStr) - if len(matches) != 3 { - return 0, fmt.Errorf("not a valid duration string: %q", durationStr) - } - durSeconds, _ := strconv.Atoi(matches[1]) - dur := time.Duration(durSeconds) * time.Second - unit := matches[2] - switch unit { - case "d": - dur *= 60 * 60 * 24 - case "h": - dur *= 60 * 60 - case "m": - dur *= 60 - case "s": - dur *= 1 - default: - return 0, fmt.Errorf("invalid time unit in duration string: %q", unit) - } - return Duration(dur), nil -} - -var durationRE = regexp.MustCompile("^([0-9]+)([ywdhms]+)$") - -func (d Duration) String() string { - seconds := int64(time.Duration(d) / time.Second) - factors := map[string]int64{ - "d": 60 * 60 * 24, - "h": 60 * 60, - "m": 60, - "s": 1, - } - unit := "s" - switch int64(0) { - case seconds % factors["d"]: - unit = "d" - case seconds % factors["h"]: - unit = "h" - case seconds % factors["m"]: - unit = "m" - } - return fmt.Sprintf("%v%v", seconds/factors[unit], unit) -} - -// MarshalYAML implements the yaml.Marshaler interface. -func (d Duration) MarshalYAML() (interface{}, error) { - return d.String(), nil -} - -// UnmarshalYAML implements the yaml.Unmarshaler interface. -func (d *Duration) UnmarshalYAML(unmarshal func(interface{}) error) error { - var s string - if err := unmarshal(&s); err != nil { - return err - } - dur, err := ParseDuration(s) - if err != nil { - return err - } - *d = dur - return nil -} diff --git a/Godeps/_workspace/src/github.com/prometheus/common/model/time_test.go b/Godeps/_workspace/src/github.com/prometheus/common/model/time_test.go deleted file mode 100644 index 9013a62775..0000000000 --- a/Godeps/_workspace/src/github.com/prometheus/common/model/time_test.go +++ /dev/null @@ -1,86 +0,0 @@ -// Copyright 2013 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package model - -import ( - "testing" - "time" -) - -func TestComparators(t *testing.T) { - t1a := TimeFromUnix(0) - t1b := TimeFromUnix(0) - t2 := TimeFromUnix(2*second - 1) - - if !t1a.Equal(t1b) { - t.Fatalf("Expected %s to be equal to %s", t1a, t1b) - } - if t1a.Equal(t2) { - t.Fatalf("Expected %s to not be equal to %s", t1a, t2) - } - - if !t1a.Before(t2) { - t.Fatalf("Expected %s to be before %s", t1a, t2) - } - if t1a.Before(t1b) { - t.Fatalf("Expected %s to not be before %s", t1a, t1b) - } - - if !t2.After(t1a) { - t.Fatalf("Expected %s to be after %s", t2, t1a) - } - if t1b.After(t1a) { - t.Fatalf("Expected %s to not be after %s", t1b, t1a) - } -} - -func TestTimeConversions(t *testing.T) { - unixSecs := int64(1136239445) - unixNsecs := int64(123456789) - unixNano := unixSecs*1e9 + unixNsecs - - t1 := time.Unix(unixSecs, unixNsecs-unixNsecs%nanosPerTick) - t2 := time.Unix(unixSecs, unixNsecs) - - ts := TimeFromUnixNano(unixNano) - if !ts.Time().Equal(t1) { - t.Fatalf("Expected %s, got %s", t1, ts.Time()) - } - - // Test available precision. - ts = TimeFromUnixNano(t2.UnixNano()) - if !ts.Time().Equal(t1) { - t.Fatalf("Expected %s, got %s", t1, ts.Time()) - } - - if ts.UnixNano() != unixNano-unixNano%nanosPerTick { - t.Fatalf("Expected %d, got %d", unixNano, ts.UnixNano()) - } -} - -func TestDuration(t *testing.T) { - duration := time.Second + time.Minute + time.Hour - goTime := time.Unix(1136239445, 0) - - ts := TimeFromUnix(goTime.Unix()) - if !goTime.Add(duration).Equal(ts.Add(duration).Time()) { - t.Fatalf("Expected %s to be equal to %s", goTime.Add(duration), ts.Add(duration)) - } - - earlier := ts.Add(-duration) - delta := ts.Sub(earlier) - if delta != duration { - t.Fatalf("Expected %s to be equal to %s", delta, duration) - } -} diff --git a/Godeps/_workspace/src/github.com/prometheus/common/model/value.go b/Godeps/_workspace/src/github.com/prometheus/common/model/value.go deleted file mode 100644 index 10ffb0bd61..0000000000 --- a/Godeps/_workspace/src/github.com/prometheus/common/model/value.go +++ /dev/null @@ -1,395 +0,0 @@ -// Copyright 2013 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package model - -import ( - "encoding/json" - "fmt" - "sort" - "strconv" - "strings" -) - -// A SampleValue is a representation of a value for a given sample at a given -// time. -type SampleValue float64 - -// MarshalJSON implements json.Marshaler. -func (v SampleValue) MarshalJSON() ([]byte, error) { - return json.Marshal(v.String()) -} - -// UnmarshalJSON implements json.Unmarshaler. -func (v *SampleValue) UnmarshalJSON(b []byte) error { - if len(b) < 2 || b[0] != '"' || b[len(b)-1] != '"' { - return fmt.Errorf("sample value must be a quoted string") - } - f, err := strconv.ParseFloat(string(b[1:len(b)-1]), 64) - if err != nil { - return err - } - *v = SampleValue(f) - return nil -} - -func (v SampleValue) Equal(o SampleValue) bool { - return v == o -} - -func (v SampleValue) String() string { - return strconv.FormatFloat(float64(v), 'f', -1, 64) -} - -// SamplePair pairs a SampleValue with a Timestamp. -type SamplePair struct { - Timestamp Time - Value SampleValue -} - -// MarshalJSON implements json.Marshaler. -func (s SamplePair) MarshalJSON() ([]byte, error) { - t, err := json.Marshal(s.Timestamp) - if err != nil { - return nil, err - } - v, err := json.Marshal(s.Value) - if err != nil { - return nil, err - } - return []byte(fmt.Sprintf("[%s,%s]", t, v)), nil -} - -// UnmarshalJSON implements json.Unmarshaler. -func (s *SamplePair) UnmarshalJSON(b []byte) error { - v := [...]json.Unmarshaler{&s.Timestamp, &s.Value} - return json.Unmarshal(b, &v) -} - -// Equal returns true if this SamplePair and o have equal Values and equal -// Timestamps. -func (s *SamplePair) Equal(o *SamplePair) bool { - return s == o || (s.Value == o.Value && s.Timestamp.Equal(o.Timestamp)) -} - -func (s SamplePair) String() string { - return fmt.Sprintf("%s @[%s]", s.Value, s.Timestamp) -} - -// Sample is a sample pair associated with a metric. -type Sample struct { - Metric Metric `json:"metric"` - Value SampleValue `json:"value"` - Timestamp Time `json:"timestamp"` -} - -// Equal compares first the metrics, then the timestamp, then the value. -func (s *Sample) Equal(o *Sample) bool { - if s == o { - return true - } - - if !s.Metric.Equal(o.Metric) { - return false - } - if !s.Timestamp.Equal(o.Timestamp) { - return false - } - if s.Value != o.Value { - return false - } - - return true -} - -func (s Sample) String() string { - return fmt.Sprintf("%s => %s", s.Metric, SamplePair{ - Timestamp: s.Timestamp, - Value: s.Value, - }) -} - -// MarshalJSON implements json.Marshaler. -func (s Sample) MarshalJSON() ([]byte, error) { - v := struct { - Metric Metric `json:"metric"` - Value SamplePair `json:"value"` - }{ - Metric: s.Metric, - Value: SamplePair{ - Timestamp: s.Timestamp, - Value: s.Value, - }, - } - - return json.Marshal(&v) -} - -// UnmarshalJSON implements json.Unmarshaler. -func (s *Sample) UnmarshalJSON(b []byte) error { - v := struct { - Metric Metric `json:"metric"` - Value SamplePair `json:"value"` - }{ - Metric: s.Metric, - Value: SamplePair{ - Timestamp: s.Timestamp, - Value: s.Value, - }, - } - - if err := json.Unmarshal(b, &v); err != nil { - return err - } - - s.Metric = v.Metric - s.Timestamp = v.Value.Timestamp - s.Value = v.Value.Value - - return nil -} - -// Samples is a sortable Sample slice. It implements sort.Interface. -type Samples []*Sample - -func (s Samples) Len() int { - return len(s) -} - -// Less compares first the metrics, then the timestamp. -func (s Samples) Less(i, j int) bool { - switch { - case s[i].Metric.Before(s[j].Metric): - return true - case s[j].Metric.Before(s[i].Metric): - return false - case s[i].Timestamp.Before(s[j].Timestamp): - return true - default: - return false - } -} - -func (s Samples) Swap(i, j int) { - s[i], s[j] = s[j], s[i] -} - -// Equal compares two sets of samples and returns true if they are equal. -func (s Samples) Equal(o Samples) bool { - if len(s) != len(o) { - return false - } - - for i, sample := range s { - if !sample.Equal(o[i]) { - return false - } - } - return true -} - -// SampleStream is a stream of Values belonging to an attached COWMetric. -type SampleStream struct { - Metric Metric `json:"metric"` - Values []SamplePair `json:"values"` -} - -func (ss SampleStream) String() string { - vals := make([]string, len(ss.Values)) - for i, v := range ss.Values { - vals[i] = v.String() - } - return fmt.Sprintf("%s =>\n%s", ss.Metric, strings.Join(vals, "\n")) -} - -// Value is a generic interface for values resulting from a query evaluation. -type Value interface { - Type() ValueType - String() string -} - -func (Matrix) Type() ValueType { return ValMatrix } -func (Vector) Type() ValueType { return ValVector } -func (*Scalar) Type() ValueType { return ValScalar } -func (*String) Type() ValueType { return ValString } - -type ValueType int - -const ( - ValNone ValueType = iota - ValScalar - ValVector - ValMatrix - ValString -) - -// MarshalJSON implements json.Marshaler. -func (et ValueType) MarshalJSON() ([]byte, error) { - return json.Marshal(et.String()) -} - -func (et *ValueType) UnmarshalJSON(b []byte) error { - var s string - if err := json.Unmarshal(b, &s); err != nil { - return err - } - switch s { - case "": - *et = ValNone - case "scalar": - *et = ValScalar - case "vector": - *et = ValVector - case "matrix": - *et = ValMatrix - case "string": - *et = ValString - default: - return fmt.Errorf("unknown value type %q", s) - } - return nil -} - -func (e ValueType) String() string { - switch e { - case ValNone: - return "" - case ValScalar: - return "scalar" - case ValVector: - return "vector" - case ValMatrix: - return "matrix" - case ValString: - return "string" - } - panic("ValueType.String: unhandled value type") -} - -// Scalar is a scalar value evaluated at the set timestamp. -type Scalar struct { - Value SampleValue `json:"value"` - Timestamp Time `json:"timestamp"` -} - -func (s Scalar) String() string { - return fmt.Sprintf("scalar: %v @[%v]", s.Value, s.Timestamp) -} - -// MarshalJSON implements json.Marshaler. -func (s Scalar) MarshalJSON() ([]byte, error) { - v := strconv.FormatFloat(float64(s.Value), 'f', -1, 64) - return json.Marshal([...]interface{}{s.Timestamp, string(v)}) -} - -// UnmarshalJSON implements json.Unmarshaler. -func (s *Scalar) UnmarshalJSON(b []byte) error { - var f string - v := [...]interface{}{&s.Timestamp, &f} - - if err := json.Unmarshal(b, &v); err != nil { - return err - } - - value, err := strconv.ParseFloat(f, 64) - if err != nil { - return fmt.Errorf("error parsing sample value: %s", err) - } - s.Value = SampleValue(value) - return nil -} - -// String is a string value evaluated at the set timestamp. -type String struct { - Value string `json:"value"` - Timestamp Time `json:"timestamp"` -} - -func (s *String) String() string { - return s.Value -} - -// MarshalJSON implements json.Marshaler. -func (s String) MarshalJSON() ([]byte, error) { - return json.Marshal([]interface{}{s.Timestamp, s.Value}) -} - -// UnmarshalJSON implements json.Unmarshaler. -func (s *String) UnmarshalJSON(b []byte) error { - v := [...]interface{}{&s.Timestamp, &s.Value} - return json.Unmarshal(b, &v) -} - -// Vector is basically only an alias for Samples, but the -// contract is that in a Vector, all Samples have the same timestamp. -type Vector []*Sample - -func (vec Vector) String() string { - entries := make([]string, len(vec)) - for i, s := range vec { - entries[i] = s.String() - } - return strings.Join(entries, "\n") -} - -func (vec Vector) Len() int { return len(vec) } -func (vec Vector) Swap(i, j int) { vec[i], vec[j] = vec[j], vec[i] } - -// Less compares first the metrics, then the timestamp. -func (vec Vector) Less(i, j int) bool { - switch { - case vec[i].Metric.Before(vec[j].Metric): - return true - case vec[j].Metric.Before(vec[i].Metric): - return false - case vec[i].Timestamp.Before(vec[j].Timestamp): - return true - default: - return false - } -} - -// Equal compares two sets of samples and returns true if they are equal. -func (vec Vector) Equal(o Vector) bool { - if len(vec) != len(o) { - return false - } - - for i, sample := range vec { - if !sample.Equal(o[i]) { - return false - } - } - return true -} - -// Matrix is a list of time series. -type Matrix []*SampleStream - -func (m Matrix) Len() int { return len(m) } -func (m Matrix) Less(i, j int) bool { return m[i].Metric.Before(m[j].Metric) } -func (m Matrix) Swap(i, j int) { m[i], m[j] = m[j], m[i] } - -func (mat Matrix) String() string { - matCp := make(Matrix, len(mat)) - copy(matCp, mat) - sort.Sort(matCp) - - strs := make([]string, len(matCp)) - - for i, ss := range matCp { - strs[i] = ss.String() - } - - return strings.Join(strs, "\n") -} diff --git a/Godeps/_workspace/src/github.com/prometheus/common/model/value_test.go b/Godeps/_workspace/src/github.com/prometheus/common/model/value_test.go deleted file mode 100644 index 2e9c7eb09d..0000000000 --- a/Godeps/_workspace/src/github.com/prometheus/common/model/value_test.go +++ /dev/null @@ -1,362 +0,0 @@ -// Copyright 2013 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package model - -import ( - "encoding/json" - "math" - "reflect" - "sort" - "testing" -) - -func TestSamplePairJSON(t *testing.T) { - input := []struct { - plain string - value SamplePair - }{ - { - plain: `[1234.567,"123.1"]`, - value: SamplePair{ - Value: 123.1, - Timestamp: 1234567, - }, - }, - } - - for _, test := range input { - b, err := json.Marshal(test.value) - if err != nil { - t.Error(err) - continue - } - - if string(b) != test.plain { - t.Errorf("encoding error: expected %q, got %q", test.plain, b) - continue - } - - var sp SamplePair - err = json.Unmarshal(b, &sp) - if err != nil { - t.Error(err) - continue - } - - if sp != test.value { - t.Errorf("decoding error: expected %v, got %v", test.value, sp) - } - } -} - -func TestSampleJSON(t *testing.T) { - input := []struct { - plain string - value Sample - }{ - { - plain: `{"metric":{"__name__":"test_metric"},"value":[1234.567,"123.1"]}`, - value: Sample{ - Metric: Metric{ - MetricNameLabel: "test_metric", - }, - Value: 123.1, - Timestamp: 1234567, - }, - }, - } - - for _, test := range input { - b, err := json.Marshal(test.value) - if err != nil { - t.Error(err) - continue - } - - if string(b) != test.plain { - t.Errorf("encoding error: expected %q, got %q", test.plain, b) - continue - } - - var sv Sample - err = json.Unmarshal(b, &sv) - if err != nil { - t.Error(err) - continue - } - - if !reflect.DeepEqual(sv, test.value) { - t.Errorf("decoding error: expected %v, got %v", test.value, sv) - } - } -} - -func TestVectorJSON(t *testing.T) { - input := []struct { - plain string - value Vector - }{ - { - plain: `[]`, - value: Vector{}, - }, - { - plain: `[{"metric":{"__name__":"test_metric"},"value":[1234.567,"123.1"]}]`, - value: Vector{&Sample{ - Metric: Metric{ - MetricNameLabel: "test_metric", - }, - Value: 123.1, - Timestamp: 1234567, - }}, - }, - { - plain: `[{"metric":{"__name__":"test_metric"},"value":[1234.567,"123.1"]},{"metric":{"foo":"bar"},"value":[1.234,"+Inf"]}]`, - value: Vector{ - &Sample{ - Metric: Metric{ - MetricNameLabel: "test_metric", - }, - Value: 123.1, - Timestamp: 1234567, - }, - &Sample{ - Metric: Metric{ - "foo": "bar", - }, - Value: SampleValue(math.Inf(1)), - Timestamp: 1234, - }, - }, - }, - } - - for _, test := range input { - b, err := json.Marshal(test.value) - if err != nil { - t.Error(err) - continue - } - - if string(b) != test.plain { - t.Errorf("encoding error: expected %q, got %q", test.plain, b) - continue - } - - var vec Vector - err = json.Unmarshal(b, &vec) - if err != nil { - t.Error(err) - continue - } - - if !reflect.DeepEqual(vec, test.value) { - t.Errorf("decoding error: expected %v, got %v", test.value, vec) - } - } -} - -func TestScalarJSON(t *testing.T) { - input := []struct { - plain string - value Scalar - }{ - { - plain: `[123.456,"456"]`, - value: Scalar{ - Timestamp: 123456, - Value: 456, - }, - }, - { - plain: `[123123.456,"+Inf"]`, - value: Scalar{ - Timestamp: 123123456, - Value: SampleValue(math.Inf(1)), - }, - }, - { - plain: `[123123.456,"-Inf"]`, - value: Scalar{ - Timestamp: 123123456, - Value: SampleValue(math.Inf(-1)), - }, - }, - } - - for _, test := range input { - b, err := json.Marshal(test.value) - if err != nil { - t.Error(err) - continue - } - - if string(b) != test.plain { - t.Errorf("encoding error: expected %q, got %q", test.plain, b) - continue - } - - var sv Scalar - err = json.Unmarshal(b, &sv) - if err != nil { - t.Error(err) - continue - } - - if sv != test.value { - t.Errorf("decoding error: expected %v, got %v", test.value, sv) - } - } -} - -func TestStringJSON(t *testing.T) { - input := []struct { - plain string - value String - }{ - { - plain: `[123.456,"test"]`, - value: String{ - Timestamp: 123456, - Value: "test", - }, - }, - { - plain: `[123123.456,"台北"]`, - value: String{ - Timestamp: 123123456, - Value: "台北", - }, - }, - } - - for _, test := range input { - b, err := json.Marshal(test.value) - if err != nil { - t.Error(err) - continue - } - - if string(b) != test.plain { - t.Errorf("encoding error: expected %q, got %q", test.plain, b) - continue - } - - var sv String - err = json.Unmarshal(b, &sv) - if err != nil { - t.Error(err) - continue - } - - if sv != test.value { - t.Errorf("decoding error: expected %v, got %v", test.value, sv) - } - } -} - -func TestVectorSort(t *testing.T) { - input := Vector{ - &Sample{ - Metric: Metric{ - MetricNameLabel: "A", - }, - Timestamp: 1, - }, - &Sample{ - Metric: Metric{ - MetricNameLabel: "A", - }, - Timestamp: 2, - }, - &Sample{ - Metric: Metric{ - MetricNameLabel: "C", - }, - Timestamp: 1, - }, - &Sample{ - Metric: Metric{ - MetricNameLabel: "C", - }, - Timestamp: 2, - }, - &Sample{ - Metric: Metric{ - MetricNameLabel: "B", - }, - Timestamp: 1, - }, - &Sample{ - Metric: Metric{ - MetricNameLabel: "B", - }, - Timestamp: 2, - }, - } - - expected := Vector{ - &Sample{ - Metric: Metric{ - MetricNameLabel: "A", - }, - Timestamp: 1, - }, - &Sample{ - Metric: Metric{ - MetricNameLabel: "A", - }, - Timestamp: 2, - }, - &Sample{ - Metric: Metric{ - MetricNameLabel: "B", - }, - Timestamp: 1, - }, - &Sample{ - Metric: Metric{ - MetricNameLabel: "B", - }, - Timestamp: 2, - }, - &Sample{ - Metric: Metric{ - MetricNameLabel: "C", - }, - Timestamp: 1, - }, - &Sample{ - Metric: Metric{ - MetricNameLabel: "C", - }, - Timestamp: 2, - }, - } - - sort.Sort(input) - - for i, actual := range input { - actualFp := actual.Metric.Fingerprint() - expectedFp := expected[i].Metric.Fingerprint() - - if actualFp != expectedFp { - t.Fatalf("%d. Incorrect fingerprint. Got %s; want %s", i, actualFp.String(), expectedFp.String()) - } - - if actual.Timestamp != expected[i].Timestamp { - t.Fatalf("%d. Incorrect timestamp. Got %s; want %s", i, actual.Timestamp, expected[i].Timestamp) - } - } -} diff --git a/Godeps/_workspace/src/github.com/prometheus/log/AUTHORS.md b/Godeps/_workspace/src/github.com/prometheus/log/AUTHORS.md deleted file mode 100644 index e2b42a716b..0000000000 --- a/Godeps/_workspace/src/github.com/prometheus/log/AUTHORS.md +++ /dev/null @@ -1,11 +0,0 @@ -The Prometheus project was started by Matt T. Proud (emeritus) and -Julius Volz in 2012. - -Maintainers of this repository: - -* Julius Volz - -The following individuals have contributed code to this repository -(listed in alphabetical order): - -* Julius Volz diff --git a/Godeps/_workspace/src/github.com/prometheus/log/CONTRIBUTING.md b/Godeps/_workspace/src/github.com/prometheus/log/CONTRIBUTING.md deleted file mode 100644 index 5705f0fbea..0000000000 --- a/Godeps/_workspace/src/github.com/prometheus/log/CONTRIBUTING.md +++ /dev/null @@ -1,18 +0,0 @@ -# Contributing - -Prometheus uses GitHub to manage reviews of pull requests. - -* If you have a trivial fix or improvement, go ahead and create a pull - request, addressing (with `@...`) one or more of the maintainers - (see [AUTHORS.md](AUTHORS.md)) in the description of the pull request. - -* If you plan to do something more involved, first discuss your ideas - on our [mailing list](https://groups.google.com/forum/?fromgroups#!forum/prometheus-developers). - This will avoid unnecessary work and surely give you and us a good deal - of inspiration. - -* Relevant coding style guidelines are the [Go Code Review - Comments](https://code.google.com/p/go-wiki/wiki/CodeReviewComments) - and the _Formatting and style_ section of Peter Bourgon's [Go: Best - Practices for Production - Environments](http://peter.bourgon.org/go-in-production/#formatting-and-style). diff --git a/Godeps/_workspace/src/github.com/prometheus/log/LICENSE b/Godeps/_workspace/src/github.com/prometheus/log/LICENSE deleted file mode 100644 index 261eeb9e9f..0000000000 --- a/Godeps/_workspace/src/github.com/prometheus/log/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/Godeps/_workspace/src/github.com/prometheus/log/NOTICE b/Godeps/_workspace/src/github.com/prometheus/log/NOTICE deleted file mode 100644 index 1f37552eb9..0000000000 --- a/Godeps/_workspace/src/github.com/prometheus/log/NOTICE +++ /dev/null @@ -1,2 +0,0 @@ -Standard logging library for Go-based Prometheus components. -Copyright 2015 The Prometheus Authors diff --git a/Godeps/_workspace/src/github.com/prometheus/log/README.md b/Godeps/_workspace/src/github.com/prometheus/log/README.md deleted file mode 100644 index d86afac153..0000000000 --- a/Godeps/_workspace/src/github.com/prometheus/log/README.md +++ /dev/null @@ -1,8 +0,0 @@ -# Prometheus Logging Library - -Standard logging library for Go-based Prometheus components. - -This library wraps -[https://github.com/Sirupsen/logrus](https://github.com/Sirupsen/logrus) in -order to add line:file annotations to log lines, as well as to provide common -command-line flags for Prometheus components using it. diff --git a/Godeps/_workspace/src/github.com/prometheus/log/log.go b/Godeps/_workspace/src/github.com/prometheus/log/log.go deleted file mode 100644 index 8c85df3b46..0000000000 --- a/Godeps/_workspace/src/github.com/prometheus/log/log.go +++ /dev/null @@ -1,171 +0,0 @@ -// Copyright 2015 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package log - -import ( - "flag" - "runtime" - "strings" - - "github.com/Sirupsen/logrus" -) - -var logger = logrus.New() - -type levelFlag struct{} - -// String implements flag.Value. -func (f levelFlag) String() string { - return logger.Level.String() -} - -// Set implements flag.Value. -func (f levelFlag) Set(level string) error { - l, err := logrus.ParseLevel(level) - if err != nil { - return err - } - logger.Level = l - return nil -} - -func init() { - // In order for this flag to take effect, the user of the package must call - // flag.Parse() before logging anything. - flag.Var(levelFlag{}, "log.level", "Only log messages with the given severity or above. Valid levels: [debug, info, warn, error, fatal, panic].") -} - -// fileLineEntry returns a logrus.Entry with file and line annotations for the -// original user log statement (two stack frames up from this function). -func fileLineEntry() *logrus.Entry { - _, file, line, ok := runtime.Caller(2) - if !ok { - file = "" - line = 1 - } else { - slash := strings.LastIndex(file, "/") - if slash >= 0 { - file = file[slash+1:] - } - } - return logger.WithFields(logrus.Fields{ - "file": file, - "line": line, - }) -} - -// Debug logs a message at level Debug on the standard logger. -func Debug(args ...interface{}) { - fileLineEntry().Debug(args...) -} - -// Debugln logs a message at level Debug on the standard logger. -func Debugln(args ...interface{}) { - fileLineEntry().Debugln(args...) -} - -// Debugf logs a message at level Debug on the standard logger. -func Debugf(format string, args ...interface{}) { - fileLineEntry().Debugf(format, args...) -} - -// Info logs a message at level Info on the standard logger. -func Info(args ...interface{}) { - fileLineEntry().Info(args...) -} - -// Infoln logs a message at level Info on the standard logger. -func Infoln(args ...interface{}) { - fileLineEntry().Infoln(args...) -} - -// Infof logs a message at level Info on the standard logger. -func Infof(format string, args ...interface{}) { - fileLineEntry().Infof(format, args...) -} - -// Print logs a message at level Info on the standard logger. -func Print(args ...interface{}) { - fileLineEntry().Info(args...) -} - -// Println logs a message at level Info on the standard logger. -func Println(args ...interface{}) { - fileLineEntry().Infoln(args...) -} - -// Printf logs a message at level Info on the standard logger. -func Printf(format string, args ...interface{}) { - fileLineEntry().Infof(format, args...) -} - -// Warn logs a message at level Warn on the standard logger. -func Warn(args ...interface{}) { - fileLineEntry().Warn(args...) -} - -// Warnln logs a message at level Warn on the standard logger. -func Warnln(args ...interface{}) { - fileLineEntry().Warnln(args...) -} - -// Warnf logs a message at level Warn on the standard logger. -func Warnf(format string, args ...interface{}) { - fileLineEntry().Warnf(format, args...) -} - -// Error logs a message at level Error on the standard logger. -func Error(args ...interface{}) { - fileLineEntry().Error(args...) -} - -// Errorln logs a message at level Error on the standard logger. -func Errorln(args ...interface{}) { - fileLineEntry().Errorln(args...) -} - -// Errorf logs a message at level Error on the standard logger. -func Errorf(format string, args ...interface{}) { - fileLineEntry().Errorf(format, args...) -} - -// Fatal logs a message at level Fatal on the standard logger. -func Fatal(args ...interface{}) { - fileLineEntry().Fatal(args...) -} - -// Fatalln logs a message at level Fatal on the standard logger. -func Fatalln(args ...interface{}) { - fileLineEntry().Fatalln(args...) -} - -// Fatalf logs a message at level Fatal on the standard logger. -func Fatalf(format string, args ...interface{}) { - fileLineEntry().Fatalf(format, args...) -} - -// Panic logs a message at level Panic on the standard logger. -func Panic(args ...interface{}) { - fileLineEntry().Panicln(args...) -} - -// Panicln logs a message at level Panic on the standard logger. -func Panicln(args ...interface{}) { - fileLineEntry().Panicln(args...) -} - -// Panicf logs a message at level Panic on the standard logger. -func Panicf(format string, args ...interface{}) { - fileLineEntry().Panicf(format, args...) -} diff --git a/Godeps/_workspace/src/github.com/prometheus/log/log_test.go b/Godeps/_workspace/src/github.com/prometheus/log/log_test.go deleted file mode 100644 index 76708b661e..0000000000 --- a/Godeps/_workspace/src/github.com/prometheus/log/log_test.go +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright 2015 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package log - -import ( - "bytes" - "regexp" - "testing" - - "github.com/Sirupsen/logrus" -) - -func TestFileLineLogging(t *testing.T) { - var buf bytes.Buffer - logger.Out = &buf - logger.Formatter = &logrus.TextFormatter{ - DisableColors: true, - } - - // The default logging level should be "info". - Debugln("This debug-level line should not show up in the output.") - Infof("This %s-level line should show up in the output.", "info") - - re := `^time=".*" level=info msg="This info-level line should show up in the output." file="log_test.go" line=33 \n$` - if !regexp.MustCompile(re).Match(buf.Bytes()) { - t.Fatalf("%q did not match expected regex %q", buf.String(), re) - } -} diff --git a/Godeps/_workspace/src/github.com/prometheus/procfs/.travis.yml b/Godeps/_workspace/src/github.com/prometheus/procfs/.travis.yml deleted file mode 100644 index b1e6743f9c..0000000000 --- a/Godeps/_workspace/src/github.com/prometheus/procfs/.travis.yml +++ /dev/null @@ -1,5 +0,0 @@ -language: go -go: - - 1.3 - - 1.4 - - tip diff --git a/Godeps/_workspace/src/github.com/prometheus/procfs/AUTHORS.md b/Godeps/_workspace/src/github.com/prometheus/procfs/AUTHORS.md deleted file mode 100644 index 8dde8e31f9..0000000000 --- a/Godeps/_workspace/src/github.com/prometheus/procfs/AUTHORS.md +++ /dev/null @@ -1,12 +0,0 @@ -The Prometheus project was started by Matt T. Proud (emeritus) and -Julius Volz in 2012. - -Maintainers of this repository: - -* Tobias Schmidt - -The following individuals have contributed code to this repository -(listed in alphabetical order): - -* Ji-Hoon, Seol -* Tobias Schmidt diff --git a/Godeps/_workspace/src/github.com/prometheus/procfs/CONTRIBUTING.md b/Godeps/_workspace/src/github.com/prometheus/procfs/CONTRIBUTING.md deleted file mode 100644 index 5705f0fbea..0000000000 --- a/Godeps/_workspace/src/github.com/prometheus/procfs/CONTRIBUTING.md +++ /dev/null @@ -1,18 +0,0 @@ -# Contributing - -Prometheus uses GitHub to manage reviews of pull requests. - -* If you have a trivial fix or improvement, go ahead and create a pull - request, addressing (with `@...`) one or more of the maintainers - (see [AUTHORS.md](AUTHORS.md)) in the description of the pull request. - -* If you plan to do something more involved, first discuss your ideas - on our [mailing list](https://groups.google.com/forum/?fromgroups#!forum/prometheus-developers). - This will avoid unnecessary work and surely give you and us a good deal - of inspiration. - -* Relevant coding style guidelines are the [Go Code Review - Comments](https://code.google.com/p/go-wiki/wiki/CodeReviewComments) - and the _Formatting and style_ section of Peter Bourgon's [Go: Best - Practices for Production - Environments](http://peter.bourgon.org/go-in-production/#formatting-and-style). diff --git a/Godeps/_workspace/src/github.com/prometheus/procfs/LICENSE b/Godeps/_workspace/src/github.com/prometheus/procfs/LICENSE deleted file mode 100644 index 261eeb9e9f..0000000000 --- a/Godeps/_workspace/src/github.com/prometheus/procfs/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/Godeps/_workspace/src/github.com/prometheus/procfs/NOTICE b/Godeps/_workspace/src/github.com/prometheus/procfs/NOTICE deleted file mode 100644 index 53c5e9aa11..0000000000 --- a/Godeps/_workspace/src/github.com/prometheus/procfs/NOTICE +++ /dev/null @@ -1,7 +0,0 @@ -procfs provides functions to retrieve system, kernel and process -metrics from the pseudo-filesystem proc. - -Copyright 2014-2015 The Prometheus Authors - -This product includes software developed at -SoundCloud Ltd. (http://soundcloud.com/). diff --git a/Godeps/_workspace/src/github.com/prometheus/procfs/README.md b/Godeps/_workspace/src/github.com/prometheus/procfs/README.md deleted file mode 100644 index 6e7ee6b8b7..0000000000 --- a/Godeps/_workspace/src/github.com/prometheus/procfs/README.md +++ /dev/null @@ -1,10 +0,0 @@ -# procfs - -This procfs package provides functions to retrieve system, kernel and process -metrics from the pseudo-filesystem proc. - -*WARNING*: This package is a work in progress. Its API may still break in -backwards-incompatible ways without warnings. Use it at your own risk. - -[![GoDoc](https://godoc.org/github.com/prometheus/procfs?status.png)](https://godoc.org/github.com/prometheus/procfs) -[![Build Status](https://travis-ci.org/prometheus/procfs.svg?branch=master)](https://travis-ci.org/prometheus/procfs) diff --git a/Godeps/_workspace/src/github.com/prometheus/procfs/doc.go b/Godeps/_workspace/src/github.com/prometheus/procfs/doc.go deleted file mode 100644 index e2acd6d40a..0000000000 --- a/Godeps/_workspace/src/github.com/prometheus/procfs/doc.go +++ /dev/null @@ -1,45 +0,0 @@ -// Copyright 2014 Prometheus Team -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package procfs provides functions to retrieve system, kernel and process -// metrics from the pseudo-filesystem proc. -// -// Example: -// -// package main -// -// import ( -// "fmt" -// "log" -// -// "github.com/prometheus/procfs" -// ) -// -// func main() { -// p, err := procfs.Self() -// if err != nil { -// log.Fatalf("could not get process: %s", err) -// } -// -// stat, err := p.NewStat() -// if err != nil { -// log.Fatalf("could not get process stat: %s", err) -// } -// -// fmt.Printf("command: %s\n", stat.Comm) -// fmt.Printf("cpu time: %fs\n", stat.CPUTime()) -// fmt.Printf("vsize: %dB\n", stat.VirtualMemory()) -// fmt.Printf("rss: %dB\n", stat.ResidentMemory()) -// } -// -package procfs diff --git a/Godeps/_workspace/src/github.com/prometheus/procfs/fixtures/26231/cmdline b/Godeps/_workspace/src/github.com/prometheus/procfs/fixtures/26231/cmdline deleted file mode 100644 index d2d8ef8876..0000000000 Binary files a/Godeps/_workspace/src/github.com/prometheus/procfs/fixtures/26231/cmdline and /dev/null differ diff --git a/Godeps/_workspace/src/github.com/prometheus/procfs/fixtures/26231/exe b/Godeps/_workspace/src/github.com/prometheus/procfs/fixtures/26231/exe deleted file mode 120000 index a91bec4dac..0000000000 --- a/Godeps/_workspace/src/github.com/prometheus/procfs/fixtures/26231/exe +++ /dev/null @@ -1 +0,0 @@ -/usr/bin/vim \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/prometheus/procfs/fixtures/26231/fd/0 b/Godeps/_workspace/src/github.com/prometheus/procfs/fixtures/26231/fd/0 deleted file mode 120000 index da9c5dff3e..0000000000 --- a/Godeps/_workspace/src/github.com/prometheus/procfs/fixtures/26231/fd/0 +++ /dev/null @@ -1 +0,0 @@ -../../symlinktargets/abc \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/prometheus/procfs/fixtures/26231/fd/1 b/Godeps/_workspace/src/github.com/prometheus/procfs/fixtures/26231/fd/1 deleted file mode 120000 index ca47b50ca5..0000000000 --- a/Godeps/_workspace/src/github.com/prometheus/procfs/fixtures/26231/fd/1 +++ /dev/null @@ -1 +0,0 @@ -../../symlinktargets/def \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/prometheus/procfs/fixtures/26231/fd/10 b/Godeps/_workspace/src/github.com/prometheus/procfs/fixtures/26231/fd/10 deleted file mode 120000 index c086831683..0000000000 --- a/Godeps/_workspace/src/github.com/prometheus/procfs/fixtures/26231/fd/10 +++ /dev/null @@ -1 +0,0 @@ -../../symlinktargets/xyz \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/prometheus/procfs/fixtures/26231/fd/2 b/Godeps/_workspace/src/github.com/prometheus/procfs/fixtures/26231/fd/2 deleted file mode 120000 index 66731c0689..0000000000 --- a/Godeps/_workspace/src/github.com/prometheus/procfs/fixtures/26231/fd/2 +++ /dev/null @@ -1 +0,0 @@ -../../symlinktargets/ghi \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/prometheus/procfs/fixtures/26231/fd/3 b/Godeps/_workspace/src/github.com/prometheus/procfs/fixtures/26231/fd/3 deleted file mode 120000 index 0135dce35f..0000000000 --- a/Godeps/_workspace/src/github.com/prometheus/procfs/fixtures/26231/fd/3 +++ /dev/null @@ -1 +0,0 @@ -../../symlinktargets/uvw \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/prometheus/procfs/fixtures/26231/io b/Godeps/_workspace/src/github.com/prometheus/procfs/fixtures/26231/io deleted file mode 100644 index b6210a7a7d..0000000000 --- a/Godeps/_workspace/src/github.com/prometheus/procfs/fixtures/26231/io +++ /dev/null @@ -1,7 +0,0 @@ -rchar: 750339 -wchar: 818609 -syscr: 7405 -syscw: 5245 -read_bytes: 1024 -write_bytes: 2048 -cancelled_write_bytes: -1024 diff --git a/Godeps/_workspace/src/github.com/prometheus/procfs/fixtures/26231/limits b/Godeps/_workspace/src/github.com/prometheus/procfs/fixtures/26231/limits deleted file mode 100644 index 23c6b6898f..0000000000 --- a/Godeps/_workspace/src/github.com/prometheus/procfs/fixtures/26231/limits +++ /dev/null @@ -1,17 +0,0 @@ -Limit Soft Limit Hard Limit Units -Max cpu time unlimited unlimited seconds -Max file size unlimited unlimited bytes -Max data size unlimited unlimited bytes -Max stack size 8388608 unlimited bytes -Max core file size 0 unlimited bytes -Max resident set unlimited unlimited bytes -Max processes 62898 62898 processes -Max open files 2048 4096 files -Max locked memory 65536 65536 bytes -Max address space unlimited unlimited bytes -Max file locks unlimited unlimited locks -Max pending signals 62898 62898 signals -Max msgqueue size 819200 819200 bytes -Max nice priority 0 0 -Max realtime priority 0 0 -Max realtime timeout unlimited unlimited us diff --git a/Godeps/_workspace/src/github.com/prometheus/procfs/fixtures/26231/stat b/Godeps/_workspace/src/github.com/prometheus/procfs/fixtures/26231/stat deleted file mode 100644 index 438aaa9dce..0000000000 --- a/Godeps/_workspace/src/github.com/prometheus/procfs/fixtures/26231/stat +++ /dev/null @@ -1 +0,0 @@ -26231 (vim) R 5392 7446 5392 34835 7446 4218880 32533 309516 26 82 1677 44 158 99 20 0 1 0 82375 56274944 1981 18446744073709551615 4194304 6294284 140736914091744 140736914087944 139965136429984 0 0 12288 1870679807 0 0 0 17 0 0 0 31 0 0 8391624 8481048 16420864 140736914093252 140736914093279 140736914093279 140736914096107 0 diff --git a/Godeps/_workspace/src/github.com/prometheus/procfs/fixtures/26232/cmdline b/Godeps/_workspace/src/github.com/prometheus/procfs/fixtures/26232/cmdline deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/Godeps/_workspace/src/github.com/prometheus/procfs/fixtures/26232/fd/0 b/Godeps/_workspace/src/github.com/prometheus/procfs/fixtures/26232/fd/0 deleted file mode 120000 index da9c5dff3e..0000000000 --- a/Godeps/_workspace/src/github.com/prometheus/procfs/fixtures/26232/fd/0 +++ /dev/null @@ -1 +0,0 @@ -../../symlinktargets/abc \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/prometheus/procfs/fixtures/26232/fd/1 b/Godeps/_workspace/src/github.com/prometheus/procfs/fixtures/26232/fd/1 deleted file mode 120000 index ca47b50ca5..0000000000 --- a/Godeps/_workspace/src/github.com/prometheus/procfs/fixtures/26232/fd/1 +++ /dev/null @@ -1 +0,0 @@ -../../symlinktargets/def \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/prometheus/procfs/fixtures/26232/fd/2 b/Godeps/_workspace/src/github.com/prometheus/procfs/fixtures/26232/fd/2 deleted file mode 120000 index 66731c0689..0000000000 --- a/Godeps/_workspace/src/github.com/prometheus/procfs/fixtures/26232/fd/2 +++ /dev/null @@ -1 +0,0 @@ -../../symlinktargets/ghi \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/prometheus/procfs/fixtures/26232/fd/3 b/Godeps/_workspace/src/github.com/prometheus/procfs/fixtures/26232/fd/3 deleted file mode 120000 index 0135dce35f..0000000000 --- a/Godeps/_workspace/src/github.com/prometheus/procfs/fixtures/26232/fd/3 +++ /dev/null @@ -1 +0,0 @@ -../../symlinktargets/uvw \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/prometheus/procfs/fixtures/26232/fd/4 b/Godeps/_workspace/src/github.com/prometheus/procfs/fixtures/26232/fd/4 deleted file mode 120000 index c086831683..0000000000 --- a/Godeps/_workspace/src/github.com/prometheus/procfs/fixtures/26232/fd/4 +++ /dev/null @@ -1 +0,0 @@ -../../symlinktargets/xyz \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/prometheus/procfs/fixtures/26232/limits b/Godeps/_workspace/src/github.com/prometheus/procfs/fixtures/26232/limits deleted file mode 100644 index 3f9bf16a9f..0000000000 --- a/Godeps/_workspace/src/github.com/prometheus/procfs/fixtures/26232/limits +++ /dev/null @@ -1,17 +0,0 @@ -Limit Soft Limit Hard Limit Units -Max cpu time unlimited unlimited seconds -Max file size unlimited unlimited bytes -Max data size unlimited unlimited bytes -Max stack size 8388608 unlimited bytes -Max core file size 0 unlimited bytes -Max resident set unlimited unlimited bytes -Max processes 29436 29436 processes -Max open files 1024 4096 files -Max locked memory 65536 65536 bytes -Max address space unlimited unlimited bytes -Max file locks unlimited unlimited locks -Max pending signals 29436 29436 signals -Max msgqueue size 819200 819200 bytes -Max nice priority 0 0 -Max realtime priority 0 0 -Max realtime timeout unlimited unlimited us diff --git a/Godeps/_workspace/src/github.com/prometheus/procfs/fixtures/26232/stat b/Godeps/_workspace/src/github.com/prometheus/procfs/fixtures/26232/stat deleted file mode 100644 index 321b160734..0000000000 --- a/Godeps/_workspace/src/github.com/prometheus/procfs/fixtures/26232/stat +++ /dev/null @@ -1 +0,0 @@ -33 (ata_sff) S 2 0 0 0 -1 69238880 0 0 0 0 0 0 0 0 0 -20 1 0 5 0 0 18446744073709551615 0 0 0 0 0 0 0 2147483647 0 18446744073709551615 0 0 17 1 0 0 0 0 0 0 0 0 0 0 0 0 0 diff --git a/Godeps/_workspace/src/github.com/prometheus/procfs/fixtures/584/stat b/Godeps/_workspace/src/github.com/prometheus/procfs/fixtures/584/stat deleted file mode 100644 index 65b9369d13..0000000000 --- a/Godeps/_workspace/src/github.com/prometheus/procfs/fixtures/584/stat +++ /dev/null @@ -1,2 +0,0 @@ -1020 ((a b ) ( c d) ) R 28378 1020 28378 34842 1020 4218880 286 0 0 0 0 0 0 0 20 0 1 0 10839175 10395648 155 18446744073709551615 4194304 4238788 140736466511168 140736466511168 140609271124624 0 0 0 0 0 0 0 17 5 0 0 0 0 0 6336016 6337300 25579520 140736466515030 140736466515061 140736466515061 140736466518002 0 -#!/bin/cat /proc/self/stat diff --git a/Godeps/_workspace/src/github.com/prometheus/procfs/fixtures/net/ip_vs b/Godeps/_workspace/src/github.com/prometheus/procfs/fixtures/net/ip_vs deleted file mode 100644 index 6a6a97d7d6..0000000000 --- a/Godeps/_workspace/src/github.com/prometheus/procfs/fixtures/net/ip_vs +++ /dev/null @@ -1,14 +0,0 @@ -IP Virtual Server version 1.2.1 (size=4096) -Prot LocalAddress:Port Scheduler Flags - -> RemoteAddress:Port Forward Weight ActiveConn InActConn -TCP C0A80016:0CEA wlc - -> C0A85216:0CEA Tunnel 100 248 2 - -> C0A85318:0CEA Tunnel 100 248 2 - -> C0A85315:0CEA Tunnel 100 248 1 -TCP C0A80039:0CEA wlc - -> C0A85416:0CEA Tunnel 0 0 0 - -> C0A85215:0CEA Tunnel 100 1499 0 - -> C0A83215:0CEA Tunnel 100 1498 0 -TCP C0A80037:0CEA wlc - -> C0A8321A:0CEA Tunnel 0 0 0 - -> C0A83120:0CEA Tunnel 100 0 0 diff --git a/Godeps/_workspace/src/github.com/prometheus/procfs/fixtures/net/ip_vs_stats b/Godeps/_workspace/src/github.com/prometheus/procfs/fixtures/net/ip_vs_stats deleted file mode 100644 index c00724e0f0..0000000000 --- a/Godeps/_workspace/src/github.com/prometheus/procfs/fixtures/net/ip_vs_stats +++ /dev/null @@ -1,6 +0,0 @@ - Total Incoming Outgoing Incoming Outgoing - Conns Packets Packets Bytes Bytes - 16AA370 E33656E5 0 51D8C8883AB3 0 - - Conns/s Pkts/s Pkts/s Bytes/s Bytes/s - 4 1FB3C 0 1282A8F 0 diff --git a/Godeps/_workspace/src/github.com/prometheus/procfs/fixtures/stat b/Godeps/_workspace/src/github.com/prometheus/procfs/fixtures/stat deleted file mode 100644 index dabb96f747..0000000000 --- a/Godeps/_workspace/src/github.com/prometheus/procfs/fixtures/stat +++ /dev/null @@ -1,16 +0,0 @@ -cpu 301854 612 111922 8979004 3552 2 3944 0 0 0 -cpu0 44490 19 21045 1087069 220 1 3410 0 0 0 -cpu1 47869 23 16474 1110787 591 0 46 0 0 0 -cpu2 46504 36 15916 1112321 441 0 326 0 0 0 -cpu3 47054 102 15683 1113230 533 0 60 0 0 0 -cpu4 28413 25 10776 1140321 217 0 8 0 0 0 -cpu5 29271 101 11586 1136270 672 0 30 0 0 0 -cpu6 29152 36 10276 1139721 319 0 29 0 0 0 -cpu7 29098 268 10164 1139282 555 0 31 0 0 0 -intr 8885917 17 0 0 0 0 0 0 0 1 79281 0 0 0 0 0 0 0 231237 0 0 0 0 250586 103 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 223424 190745 13 906 1283803 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 -ctxt 38014093 -btime 1418183276 -processes 26442 -procs_running 2 -procs_blocked 0 -softirq 5057579 250191 1481983 1647 211099 186066 0 1783454 622196 12499 508444 diff --git a/Godeps/_workspace/src/github.com/prometheus/procfs/fixtures/symlinktargets/README b/Godeps/_workspace/src/github.com/prometheus/procfs/fixtures/symlinktargets/README deleted file mode 100644 index 5cf184ea05..0000000000 --- a/Godeps/_workspace/src/github.com/prometheus/procfs/fixtures/symlinktargets/README +++ /dev/null @@ -1,2 +0,0 @@ -This directory contains some empty files that are the symlinks the files in the "fd" directory point to. -They are otherwise ignored by the tests diff --git a/Godeps/_workspace/src/github.com/prometheus/procfs/fixtures/symlinktargets/abc b/Godeps/_workspace/src/github.com/prometheus/procfs/fixtures/symlinktargets/abc deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/Godeps/_workspace/src/github.com/prometheus/procfs/fixtures/symlinktargets/def b/Godeps/_workspace/src/github.com/prometheus/procfs/fixtures/symlinktargets/def deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/Godeps/_workspace/src/github.com/prometheus/procfs/fixtures/symlinktargets/ghi b/Godeps/_workspace/src/github.com/prometheus/procfs/fixtures/symlinktargets/ghi deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/Godeps/_workspace/src/github.com/prometheus/procfs/fixtures/symlinktargets/uvw b/Godeps/_workspace/src/github.com/prometheus/procfs/fixtures/symlinktargets/uvw deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/Godeps/_workspace/src/github.com/prometheus/procfs/fixtures/symlinktargets/xyz b/Godeps/_workspace/src/github.com/prometheus/procfs/fixtures/symlinktargets/xyz deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/Godeps/_workspace/src/github.com/prometheus/procfs/fs.go b/Godeps/_workspace/src/github.com/prometheus/procfs/fs.go deleted file mode 100644 index 6a8d97b11e..0000000000 --- a/Godeps/_workspace/src/github.com/prometheus/procfs/fs.go +++ /dev/null @@ -1,40 +0,0 @@ -package procfs - -import ( - "fmt" - "os" - "path" -) - -// FS represents the pseudo-filesystem proc, which provides an interface to -// kernel data structures. -type FS string - -// DefaultMountPoint is the common mount point of the proc filesystem. -const DefaultMountPoint = "/proc" - -// NewFS returns a new FS mounted under the given mountPoint. It will error -// if the mount point can't be read. -func NewFS(mountPoint string) (FS, error) { - info, err := os.Stat(mountPoint) - if err != nil { - return "", fmt.Errorf("could not read %s: %s", mountPoint, err) - } - if !info.IsDir() { - return "", fmt.Errorf("mount point %s is not a directory", mountPoint) - } - - return FS(mountPoint), nil -} - -func (fs FS) stat(p string) (os.FileInfo, error) { - return os.Stat(path.Join(string(fs), p)) -} - -func (fs FS) open(p string) (*os.File, error) { - return os.Open(path.Join(string(fs), p)) -} - -func (fs FS) readlink(p string) (string, error) { - return os.Readlink(path.Join(string(fs), p)) -} diff --git a/Godeps/_workspace/src/github.com/prometheus/procfs/fs_test.go b/Godeps/_workspace/src/github.com/prometheus/procfs/fs_test.go deleted file mode 100644 index 91f1c6c976..0000000000 --- a/Godeps/_workspace/src/github.com/prometheus/procfs/fs_test.go +++ /dev/null @@ -1,13 +0,0 @@ -package procfs - -import "testing" - -func TestNewFS(t *testing.T) { - if _, err := NewFS("foobar"); err == nil { - t.Error("want NewFS to fail for non-existing mount point") - } - - if _, err := NewFS("procfs.go"); err == nil { - t.Error("want NewFS to fail if mount point is not a directory") - } -} diff --git a/Godeps/_workspace/src/github.com/prometheus/procfs/ipvs.go b/Godeps/_workspace/src/github.com/prometheus/procfs/ipvs.go deleted file mode 100644 index 26da5000e3..0000000000 --- a/Godeps/_workspace/src/github.com/prometheus/procfs/ipvs.go +++ /dev/null @@ -1,223 +0,0 @@ -package procfs - -import ( - "bufio" - "encoding/hex" - "errors" - "fmt" - "io" - "io/ioutil" - "net" - "strconv" - "strings" -) - -// IPVSStats holds IPVS statistics, as exposed by the kernel in `/proc/net/ip_vs_stats`. -type IPVSStats struct { - // Total count of connections. - Connections uint64 - // Total incoming packages processed. - IncomingPackets uint64 - // Total outgoing packages processed. - OutgoingPackets uint64 - // Total incoming traffic. - IncomingBytes uint64 - // Total outgoing traffic. - OutgoingBytes uint64 -} - -// IPVSBackendStatus holds current metrics of one virtual / real address pair. -type IPVSBackendStatus struct { - // The local (virtual) IP address. - LocalAddress net.IP - // The local (virtual) port. - LocalPort uint16 - // The transport protocol (TCP, UDP). - Proto string - // The remote (real) IP address. - RemoteAddress net.IP - // The remote (real) port. - RemotePort uint16 - // The current number of active connections for this virtual/real address pair. - ActiveConn uint64 - // The current number of inactive connections for this virtual/real address pair. - InactConn uint64 - // The current weight of this virtual/real address pair. - Weight uint64 -} - -// NewIPVSStats reads the IPVS statistics. -func NewIPVSStats() (IPVSStats, error) { - fs, err := NewFS(DefaultMountPoint) - if err != nil { - return IPVSStats{}, err - } - - return fs.NewIPVSStats() -} - -// NewIPVSStats reads the IPVS statistics from the specified `proc` filesystem. -func (fs FS) NewIPVSStats() (IPVSStats, error) { - file, err := fs.open("net/ip_vs_stats") - if err != nil { - return IPVSStats{}, err - } - defer file.Close() - - return parseIPVSStats(file) -} - -// parseIPVSStats performs the actual parsing of `ip_vs_stats`. -func parseIPVSStats(file io.Reader) (IPVSStats, error) { - var ( - statContent []byte - statLines []string - statFields []string - stats IPVSStats - ) - - statContent, err := ioutil.ReadAll(file) - if err != nil { - return IPVSStats{}, err - } - - statLines = strings.SplitN(string(statContent), "\n", 4) - if len(statLines) != 4 { - return IPVSStats{}, errors.New("ip_vs_stats corrupt: too short") - } - - statFields = strings.Fields(statLines[2]) - if len(statFields) != 5 { - return IPVSStats{}, errors.New("ip_vs_stats corrupt: unexpected number of fields") - } - - stats.Connections, err = strconv.ParseUint(statFields[0], 16, 64) - if err != nil { - return IPVSStats{}, err - } - stats.IncomingPackets, err = strconv.ParseUint(statFields[1], 16, 64) - if err != nil { - return IPVSStats{}, err - } - stats.OutgoingPackets, err = strconv.ParseUint(statFields[2], 16, 64) - if err != nil { - return IPVSStats{}, err - } - stats.IncomingBytes, err = strconv.ParseUint(statFields[3], 16, 64) - if err != nil { - return IPVSStats{}, err - } - stats.OutgoingBytes, err = strconv.ParseUint(statFields[4], 16, 64) - if err != nil { - return IPVSStats{}, err - } - - return stats, nil -} - -// NewIPVSBackendStatus reads and returns the status of all (virtual,real) server pairs. -func NewIPVSBackendStatus() ([]IPVSBackendStatus, error) { - fs, err := NewFS(DefaultMountPoint) - if err != nil { - return []IPVSBackendStatus{}, err - } - - return fs.NewIPVSBackendStatus() -} - -// NewIPVSBackendStatus reads and returns the status of all (virtual,real) server pairs from the specified `proc` filesystem. -func (fs FS) NewIPVSBackendStatus() ([]IPVSBackendStatus, error) { - file, err := fs.open("net/ip_vs") - if err != nil { - return nil, err - } - defer file.Close() - - return parseIPVSBackendStatus(file) -} - -func parseIPVSBackendStatus(file io.Reader) ([]IPVSBackendStatus, error) { - var ( - status []IPVSBackendStatus - scanner = bufio.NewScanner(file) - proto string - localAddress net.IP - localPort uint16 - err error - ) - - for scanner.Scan() { - fields := strings.Fields(string(scanner.Text())) - if len(fields) == 0 { - continue - } - switch { - case fields[0] == "IP" || fields[0] == "Prot" || fields[1] == "RemoteAddress:Port": - continue - case fields[0] == "TCP" || fields[0] == "UDP": - if len(fields) < 2 { - continue - } - proto = fields[0] - localAddress, localPort, err = parseIPPort(fields[1]) - if err != nil { - return nil, err - } - case fields[0] == "->": - if len(fields) < 6 { - continue - } - remoteAddress, remotePort, err := parseIPPort(fields[1]) - if err != nil { - return nil, err - } - weight, err := strconv.ParseUint(fields[3], 10, 64) - if err != nil { - return nil, err - } - activeConn, err := strconv.ParseUint(fields[4], 10, 64) - if err != nil { - return nil, err - } - inactConn, err := strconv.ParseUint(fields[5], 10, 64) - if err != nil { - return nil, err - } - status = append(status, IPVSBackendStatus{ - LocalAddress: localAddress, - LocalPort: localPort, - RemoteAddress: remoteAddress, - RemotePort: remotePort, - Proto: proto, - Weight: weight, - ActiveConn: activeConn, - InactConn: inactConn, - }) - } - } - return status, nil -} - -func parseIPPort(s string) (net.IP, uint16, error) { - tmp := strings.SplitN(s, ":", 2) - - if len(tmp) != 2 { - return nil, 0, fmt.Errorf("invalid IP:Port: %s", s) - } - - if len(tmp[0]) != 8 && len(tmp[0]) != 32 { - return nil, 0, fmt.Errorf("invalid IP: %s", tmp[0]) - } - - ip, err := hex.DecodeString(tmp[0]) - if err != nil { - return nil, 0, err - } - - port, err := strconv.ParseUint(tmp[1], 16, 16) - if err != nil { - return nil, 0, err - } - - return ip, uint16(port), nil -} diff --git a/Godeps/_workspace/src/github.com/prometheus/procfs/ipvs_test.go b/Godeps/_workspace/src/github.com/prometheus/procfs/ipvs_test.go deleted file mode 100644 index 6036cde843..0000000000 --- a/Godeps/_workspace/src/github.com/prometheus/procfs/ipvs_test.go +++ /dev/null @@ -1,196 +0,0 @@ -package procfs - -import ( - "net" - "testing" -) - -var ( - expectedIPVSStats = IPVSStats{ - Connections: 23765872, - IncomingPackets: 3811989221, - OutgoingPackets: 0, - IncomingBytes: 89991519156915, - OutgoingBytes: 0, - } - expectedIPVSBackendStatuses = []IPVSBackendStatus{ - IPVSBackendStatus{ - LocalAddress: net.ParseIP("192.168.0.22"), - LocalPort: 3306, - RemoteAddress: net.ParseIP("192.168.82.22"), - RemotePort: 3306, - Proto: "TCP", - Weight: 100, - ActiveConn: 248, - InactConn: 2, - }, - IPVSBackendStatus{ - LocalAddress: net.ParseIP("192.168.0.22"), - LocalPort: 3306, - RemoteAddress: net.ParseIP("192.168.83.24"), - RemotePort: 3306, - Proto: "TCP", - Weight: 100, - ActiveConn: 248, - InactConn: 2, - }, - IPVSBackendStatus{ - LocalAddress: net.ParseIP("192.168.0.22"), - LocalPort: 3306, - RemoteAddress: net.ParseIP("192.168.83.21"), - RemotePort: 3306, - Proto: "TCP", - Weight: 100, - ActiveConn: 248, - InactConn: 1, - }, - IPVSBackendStatus{ - LocalAddress: net.ParseIP("192.168.0.57"), - LocalPort: 3306, - RemoteAddress: net.ParseIP("192.168.84.22"), - RemotePort: 3306, - Proto: "TCP", - Weight: 0, - ActiveConn: 0, - InactConn: 0, - }, - IPVSBackendStatus{ - LocalAddress: net.ParseIP("192.168.0.57"), - LocalPort: 3306, - RemoteAddress: net.ParseIP("192.168.82.21"), - RemotePort: 3306, - Proto: "TCP", - Weight: 100, - ActiveConn: 1499, - InactConn: 0, - }, - IPVSBackendStatus{ - LocalAddress: net.ParseIP("192.168.0.57"), - LocalPort: 3306, - RemoteAddress: net.ParseIP("192.168.50.21"), - RemotePort: 3306, - Proto: "TCP", - Weight: 100, - ActiveConn: 1498, - InactConn: 0, - }, - IPVSBackendStatus{ - LocalAddress: net.ParseIP("192.168.0.55"), - LocalPort: 3306, - RemoteAddress: net.ParseIP("192.168.50.26"), - RemotePort: 3306, - Proto: "TCP", - Weight: 0, - ActiveConn: 0, - InactConn: 0, - }, - IPVSBackendStatus{ - LocalAddress: net.ParseIP("192.168.0.55"), - LocalPort: 3306, - RemoteAddress: net.ParseIP("192.168.49.32"), - RemotePort: 3306, - Proto: "TCP", - Weight: 100, - ActiveConn: 0, - InactConn: 0, - }, - } -) - -func TestIPVSStats(t *testing.T) { - fs, err := NewFS("fixtures") - if err != nil { - t.Fatal(err) - } - stats, err := fs.NewIPVSStats() - if err != nil { - t.Fatal(err) - } - - if stats != expectedIPVSStats { - t.Errorf("want %+v, got %+v", expectedIPVSStats, stats) - } -} - -func TestParseIPPort(t *testing.T) { - ip := net.ParseIP("192.168.0.22") - port := uint16(3306) - - gotIP, gotPort, err := parseIPPort("C0A80016:0CEA") - if err != nil { - t.Fatal(err) - } - if !(gotIP.Equal(ip) && port == gotPort) { - t.Errorf("want %s:%d, got %s:%d", ip, port, gotIP, gotPort) - } -} - -func TestParseIPPortInvalid(t *testing.T) { - testcases := []string{ - "", - "C0A80016", - "C0A800:1234", - "FOOBARBA:1234", - "C0A80016:0CEA:1234", - } - - for _, s := range testcases { - ip, port, err := parseIPPort(s) - if ip != nil || port != uint16(0) || err == nil { - t.Errorf("Expected error for input %s, got ip = %s, port = %v, err = %v", s, ip, port, err) - } - } -} - -func TestParseIPPortIPv6(t *testing.T) { - ip := net.ParseIP("dead:beef::1") - port := uint16(8080) - - gotIP, gotPort, err := parseIPPort("DEADBEEF000000000000000000000001:1F90") - if err != nil { - t.Fatal(err) - } - if !(gotIP.Equal(ip) && port == gotPort) { - t.Errorf("want %s:%d, got %s:%d", ip, port, gotIP, gotPort) - } - -} - -func TestIPVSBackendStatus(t *testing.T) { - fs, err := NewFS("fixtures") - if err != nil { - t.Fatal(err) - } - - backendStats, err := fs.NewIPVSBackendStatus() - if err != nil { - t.Fatal(err) - } - - for idx, expect := range expectedIPVSBackendStatuses { - if !backendStats[idx].LocalAddress.Equal(expect.LocalAddress) { - t.Errorf("expected LocalAddress %s, got %s", expect.LocalAddress, backendStats[idx].LocalAddress) - } - if backendStats[idx].LocalPort != expect.LocalPort { - t.Errorf("expected LocalPort %d, got %d", expect.LocalPort, backendStats[idx].LocalPort) - } - if !backendStats[idx].RemoteAddress.Equal(expect.RemoteAddress) { - t.Errorf("expected RemoteAddress %s, got %s", expect.RemoteAddress, backendStats[idx].RemoteAddress) - } - if backendStats[idx].RemotePort != expect.RemotePort { - t.Errorf("expected RemotePort %d, got %d", expect.RemotePort, backendStats[idx].RemotePort) - } - if backendStats[idx].Proto != expect.Proto { - t.Errorf("expected Proto %s, got %s", expect.Proto, backendStats[idx].Proto) - } - if backendStats[idx].Weight != expect.Weight { - t.Errorf("expected Weight %d, got %d", expect.Weight, backendStats[idx].Weight) - } - if backendStats[idx].ActiveConn != expect.ActiveConn { - t.Errorf("expected ActiveConn %d, got %d", expect.ActiveConn, backendStats[idx].ActiveConn) - } - if backendStats[idx].InactConn != expect.InactConn { - t.Errorf("expected InactConn %d, got %d", expect.InactConn, backendStats[idx].InactConn) - } - } -} diff --git a/Godeps/_workspace/src/github.com/prometheus/procfs/proc.go b/Godeps/_workspace/src/github.com/prometheus/procfs/proc.go deleted file mode 100644 index ca5f12a595..0000000000 --- a/Godeps/_workspace/src/github.com/prometheus/procfs/proc.go +++ /dev/null @@ -1,188 +0,0 @@ -package procfs - -import ( - "fmt" - "io/ioutil" - "os" - "path" - "strconv" - "strings" -) - -// Proc provides information about a running process. -type Proc struct { - // The process ID. - PID int - - fs FS -} - -// Procs represents a list of Proc structs. -type Procs []Proc - -func (p Procs) Len() int { return len(p) } -func (p Procs) Swap(i, j int) { p[i], p[j] = p[j], p[i] } -func (p Procs) Less(i, j int) bool { return p[i].PID < p[j].PID } - -// Self returns a process for the current process. -func Self() (Proc, error) { - return NewProc(os.Getpid()) -} - -// NewProc returns a process for the given pid under /proc. -func NewProc(pid int) (Proc, error) { - fs, err := NewFS(DefaultMountPoint) - if err != nil { - return Proc{}, err - } - - return fs.NewProc(pid) -} - -// AllProcs returns a list of all currently avaible processes under /proc. -func AllProcs() (Procs, error) { - fs, err := NewFS(DefaultMountPoint) - if err != nil { - return Procs{}, err - } - - return fs.AllProcs() -} - -// NewProc returns a process for the given pid. -func (fs FS) NewProc(pid int) (Proc, error) { - if _, err := fs.stat(strconv.Itoa(pid)); err != nil { - return Proc{}, err - } - - return Proc{PID: pid, fs: fs}, nil -} - -// AllProcs returns a list of all currently avaible processes. -func (fs FS) AllProcs() (Procs, error) { - d, err := fs.open("") - if err != nil { - return Procs{}, err - } - defer d.Close() - - names, err := d.Readdirnames(-1) - if err != nil { - return Procs{}, fmt.Errorf("could not read %s: %s", d.Name(), err) - } - - p := Procs{} - for _, n := range names { - pid, err := strconv.ParseInt(n, 10, 64) - if err != nil { - continue - } - p = append(p, Proc{PID: int(pid), fs: fs}) - } - - return p, nil -} - -// CmdLine returns the command line of a process. -func (p Proc) CmdLine() ([]string, error) { - f, err := p.open("cmdline") - if err != nil { - return nil, err - } - defer f.Close() - - data, err := ioutil.ReadAll(f) - if err != nil { - return nil, err - } - - if len(data) < 1 { - return []string{}, nil - } - - return strings.Split(string(data[:len(data)-1]), string(byte(0))), nil -} - -// Executable returns the absolute path of the executable command of a process. -func (p Proc) Executable() (string, error) { - exe, err := p.readlink("exe") - - if os.IsNotExist(err) { - return "", nil - } - - return exe, err -} - -// FileDescriptors returns the currently open file descriptors of a process. -func (p Proc) FileDescriptors() ([]uintptr, error) { - names, err := p.fileDescriptors() - if err != nil { - return nil, err - } - - fds := make([]uintptr, len(names)) - for i, n := range names { - fd, err := strconv.ParseInt(n, 10, 32) - if err != nil { - return nil, fmt.Errorf("could not parse fd %s: %s", n, err) - } - fds[i] = uintptr(fd) - } - - return fds, nil -} - -// FileDescriptorTargets returns the targets of all file descriptors of a process. -// If a file descriptor is not a symlink to a file (like a socket), that value will be the empty string. -func (p Proc) FileDescriptorTargets() ([]string, error) { - names, err := p.fileDescriptors() - if err != nil { - return nil, err - } - - targets := make([]string, len(names)) - - for i, name := range names { - target, err := p.readlink("fd/" + name) - if err == nil { - targets[i] = target - } - } - - return targets, nil -} - -// FileDescriptorsLen returns the number of currently open file descriptors of -// a process. -func (p Proc) FileDescriptorsLen() (int, error) { - fds, err := p.fileDescriptors() - if err != nil { - return 0, err - } - - return len(fds), nil -} - -func (p Proc) fileDescriptors() ([]string, error) { - d, err := p.open("fd") - if err != nil { - return nil, err - } - defer d.Close() - - names, err := d.Readdirnames(-1) - if err != nil { - return nil, fmt.Errorf("could not read %s: %s", d.Name(), err) - } - - return names, nil -} - -func (p Proc) open(pa string) (*os.File, error) { - return p.fs.open(path.Join(strconv.Itoa(p.PID), pa)) -} - -func (p Proc) readlink(pa string) (string, error) { - return p.fs.readlink(path.Join(strconv.Itoa(p.PID), pa)) -} diff --git a/Godeps/_workspace/src/github.com/prometheus/procfs/proc_io.go b/Godeps/_workspace/src/github.com/prometheus/procfs/proc_io.go deleted file mode 100644 index 7c6dc86970..0000000000 --- a/Godeps/_workspace/src/github.com/prometheus/procfs/proc_io.go +++ /dev/null @@ -1,54 +0,0 @@ -package procfs - -import ( - "fmt" - "io/ioutil" -) - -// ProcIO models the content of /proc//io. -type ProcIO struct { - // Chars read. - RChar uint64 - // Chars written. - WChar uint64 - // Read syscalls. - SyscR uint64 - // Write syscalls. - SyscW uint64 - // Bytes read. - ReadBytes uint64 - // Bytes written. - WriteBytes uint64 - // Bytes written, but taking into account truncation. See - // Documentation/filesystems/proc.txt in the kernel sources for - // detailed explanation. - CancelledWriteBytes int64 -} - -// NewIO creates a new ProcIO instance from a given Proc instance. -func (p Proc) NewIO() (ProcIO, error) { - pio := ProcIO{} - - f, err := p.open("io") - if err != nil { - return pio, err - } - defer f.Close() - - data, err := ioutil.ReadAll(f) - if err != nil { - return pio, err - } - - ioFormat := "rchar: %d\nwchar: %d\nsyscr: %d\nsyscw: %d\n" + - "read_bytes: %d\nwrite_bytes: %d\n" + - "cancelled_write_bytes: %d\n" - - _, err = fmt.Sscanf(string(data), ioFormat, &pio.RChar, &pio.WChar, &pio.SyscR, - &pio.SyscW, &pio.ReadBytes, &pio.WriteBytes, &pio.CancelledWriteBytes) - if err != nil { - return pio, err - } - - return pio, nil -} diff --git a/Godeps/_workspace/src/github.com/prometheus/procfs/proc_io_test.go b/Godeps/_workspace/src/github.com/prometheus/procfs/proc_io_test.go deleted file mode 100644 index 5ef524d8e0..0000000000 --- a/Godeps/_workspace/src/github.com/prometheus/procfs/proc_io_test.go +++ /dev/null @@ -1,49 +0,0 @@ -package procfs - -import "testing" - -func TestProcIO(t *testing.T) { - fs, err := NewFS("fixtures") - if err != nil { - t.Fatal(err) - } - - p, err := fs.NewProc(26231) - if err != nil { - t.Fatal(err) - } - - s, err := p.NewIO() - if err != nil { - t.Fatal(err) - } - - for _, test := range []struct { - name string - want uint64 - got uint64 - }{ - {name: "RChar", want: 750339, got: s.RChar}, - {name: "WChar", want: 818609, got: s.WChar}, - {name: "SyscR", want: 7405, got: s.SyscR}, - {name: "SyscW", want: 5245, got: s.SyscW}, - {name: "ReadBytes", want: 1024, got: s.ReadBytes}, - {name: "WriteBytes", want: 2048, got: s.WriteBytes}, - } { - if test.want != test.got { - t.Errorf("want %s %d, got %d", test.name, test.want, test.got) - } - } - - for _, test := range []struct { - name string - want int64 - got int64 - }{ - {name: "CancelledWriteBytes", want: -1024, got: s.CancelledWriteBytes}, - } { - if test.want != test.got { - t.Errorf("want %s %d, got %d", test.name, test.want, test.got) - } - } -} diff --git a/Godeps/_workspace/src/github.com/prometheus/procfs/proc_limits.go b/Godeps/_workspace/src/github.com/prometheus/procfs/proc_limits.go deleted file mode 100644 index 9f080b9f62..0000000000 --- a/Godeps/_workspace/src/github.com/prometheus/procfs/proc_limits.go +++ /dev/null @@ -1,111 +0,0 @@ -package procfs - -import ( - "bufio" - "fmt" - "regexp" - "strconv" -) - -// ProcLimits represents the soft limits for each of the process's resource -// limits. -type ProcLimits struct { - CPUTime int - FileSize int - DataSize int - StackSize int - CoreFileSize int - ResidentSet int - Processes int - OpenFiles int - LockedMemory int - AddressSpace int - FileLocks int - PendingSignals int - MsqqueueSize int - NicePriority int - RealtimePriority int - RealtimeTimeout int -} - -const ( - limitsFields = 3 - limitsUnlimited = "unlimited" -) - -var ( - limitsDelimiter = regexp.MustCompile(" +") -) - -// NewLimits returns the current soft limits of the process. -func (p Proc) NewLimits() (ProcLimits, error) { - f, err := p.open("limits") - if err != nil { - return ProcLimits{}, err - } - defer f.Close() - - var ( - l = ProcLimits{} - s = bufio.NewScanner(f) - ) - for s.Scan() { - fields := limitsDelimiter.Split(s.Text(), limitsFields) - if len(fields) != limitsFields { - return ProcLimits{}, fmt.Errorf( - "couldn't parse %s line %s", f.Name(), s.Text()) - } - - switch fields[0] { - case "Max cpu time": - l.CPUTime, err = parseInt(fields[1]) - case "Max file size": - l.FileLocks, err = parseInt(fields[1]) - case "Max data size": - l.DataSize, err = parseInt(fields[1]) - case "Max stack size": - l.StackSize, err = parseInt(fields[1]) - case "Max core file size": - l.CoreFileSize, err = parseInt(fields[1]) - case "Max resident set": - l.ResidentSet, err = parseInt(fields[1]) - case "Max processes": - l.Processes, err = parseInt(fields[1]) - case "Max open files": - l.OpenFiles, err = parseInt(fields[1]) - case "Max locked memory": - l.LockedMemory, err = parseInt(fields[1]) - case "Max address space": - l.AddressSpace, err = parseInt(fields[1]) - case "Max file locks": - l.FileLocks, err = parseInt(fields[1]) - case "Max pending signals": - l.PendingSignals, err = parseInt(fields[1]) - case "Max msgqueue size": - l.MsqqueueSize, err = parseInt(fields[1]) - case "Max nice priority": - l.NicePriority, err = parseInt(fields[1]) - case "Max realtime priority": - l.RealtimePriority, err = parseInt(fields[1]) - case "Max realtime timeout": - l.RealtimeTimeout, err = parseInt(fields[1]) - } - - if err != nil { - return ProcLimits{}, err - } - } - - return l, s.Err() -} - -func parseInt(s string) (int, error) { - if s == limitsUnlimited { - return -1, nil - } - i, err := strconv.ParseInt(s, 10, 32) - if err != nil { - return 0, fmt.Errorf("couldn't parse value %s: %s", s, err) - } - return int(i), nil -} diff --git a/Godeps/_workspace/src/github.com/prometheus/procfs/proc_limits_test.go b/Godeps/_workspace/src/github.com/prometheus/procfs/proc_limits_test.go deleted file mode 100644 index ca7a254da7..0000000000 --- a/Godeps/_workspace/src/github.com/prometheus/procfs/proc_limits_test.go +++ /dev/null @@ -1,36 +0,0 @@ -package procfs - -import "testing" - -func TestNewLimits(t *testing.T) { - fs, err := NewFS("fixtures") - if err != nil { - t.Fatal(err) - } - - p, err := fs.NewProc(26231) - if err != nil { - t.Fatal(err) - } - - l, err := p.NewLimits() - if err != nil { - t.Fatal(err) - } - - for _, test := range []struct { - name string - want int - got int - }{ - {name: "cpu time", want: -1, got: l.CPUTime}, - {name: "open files", want: 2048, got: l.OpenFiles}, - {name: "msgqueue size", want: 819200, got: l.MsqqueueSize}, - {name: "nice priority", want: 0, got: l.NicePriority}, - {name: "address space", want: -1, got: l.AddressSpace}, - } { - if test.want != test.got { - t.Errorf("want %s %d, got %d", test.name, test.want, test.got) - } - } -} diff --git a/Godeps/_workspace/src/github.com/prometheus/procfs/proc_stat.go b/Godeps/_workspace/src/github.com/prometheus/procfs/proc_stat.go deleted file mode 100644 index 30a403b6c7..0000000000 --- a/Godeps/_workspace/src/github.com/prometheus/procfs/proc_stat.go +++ /dev/null @@ -1,175 +0,0 @@ -package procfs - -import ( - "bytes" - "fmt" - "io/ioutil" - "os" -) - -// Originally, this USER_HZ value was dynamically retrieved via a sysconf call which -// required cgo. However, that caused a lot of problems regarding -// cross-compilation. Alternatives such as running a binary to determine the -// value, or trying to derive it in some other way were all problematic. -// After much research it was determined that USER_HZ is actually hardcoded to -// 100 on all Go-supported platforms as of the time of this writing. This is -// why we decided to hardcode it here as well. It is not impossible that there -// could be systems with exceptions, but they should be very exotic edge cases, -// and in that case, the worst outcome will be two misreported metrics. -// -// See also the following discussions: -// -// - https://github.com/prometheus/node_exporter/issues/52 -// - https://github.com/prometheus/procfs/pull/2 -// - http://stackoverflow.com/questions/17410841/how-does-user-hz-solve-the-jiffy-scaling-issue -const userHZ = 100 - -// ProcStat provides status information about the process, -// read from /proc/[pid]/stat. -type ProcStat struct { - // The process ID. - PID int - // The filename of the executable. - Comm string - // The process state. - State string - // The PID of the parent of this process. - PPID int - // The process group ID of the process. - PGRP int - // The session ID of the process. - Session int - // The controlling terminal of the process. - TTY int - // The ID of the foreground process group of the controlling terminal of - // the process. - TPGID int - // The kernel flags word of the process. - Flags uint - // The number of minor faults the process has made which have not required - // loading a memory page from disk. - MinFlt uint - // The number of minor faults that the process's waited-for children have - // made. - CMinFlt uint - // The number of major faults the process has made which have required - // loading a memory page from disk. - MajFlt uint - // The number of major faults that the process's waited-for children have - // made. - CMajFlt uint - // Amount of time that this process has been scheduled in user mode, - // measured in clock ticks. - UTime uint - // Amount of time that this process has been scheduled in kernel mode, - // measured in clock ticks. - STime uint - // Amount of time that this process's waited-for children have been - // scheduled in user mode, measured in clock ticks. - CUTime uint - // Amount of time that this process's waited-for children have been - // scheduled in kernel mode, measured in clock ticks. - CSTime uint - // For processes running a real-time scheduling policy, this is the negated - // scheduling priority, minus one. - Priority int - // The nice value, a value in the range 19 (low priority) to -20 (high - // priority). - Nice int - // Number of threads in this process. - NumThreads int - // The time the process started after system boot, the value is expressed - // in clock ticks. - Starttime uint64 - // Virtual memory size in bytes. - VSize int - // Resident set size in pages. - RSS int - - fs FS -} - -// NewStat returns the current status information of the process. -func (p Proc) NewStat() (ProcStat, error) { - f, err := p.open("stat") - if err != nil { - return ProcStat{}, err - } - defer f.Close() - - data, err := ioutil.ReadAll(f) - if err != nil { - return ProcStat{}, err - } - - var ( - ignore int - - s = ProcStat{PID: p.PID, fs: p.fs} - l = bytes.Index(data, []byte("(")) - r = bytes.LastIndex(data, []byte(")")) - ) - - if l < 0 || r < 0 { - return ProcStat{}, fmt.Errorf( - "unexpected format, couldn't extract comm: %s", - data, - ) - } - - s.Comm = string(data[l+1 : r]) - _, err = fmt.Fscan( - bytes.NewBuffer(data[r+2:]), - &s.State, - &s.PPID, - &s.PGRP, - &s.Session, - &s.TTY, - &s.TPGID, - &s.Flags, - &s.MinFlt, - &s.CMinFlt, - &s.MajFlt, - &s.CMajFlt, - &s.UTime, - &s.STime, - &s.CUTime, - &s.CSTime, - &s.Priority, - &s.Nice, - &s.NumThreads, - &ignore, - &s.Starttime, - &s.VSize, - &s.RSS, - ) - if err != nil { - return ProcStat{}, err - } - - return s, nil -} - -// VirtualMemory returns the virtual memory size in bytes. -func (s ProcStat) VirtualMemory() int { - return s.VSize -} - -// ResidentMemory returns the resident memory size in bytes. -func (s ProcStat) ResidentMemory() int { - return s.RSS * os.Getpagesize() -} - -// StartTime returns the unix timestamp of the process in seconds. -func (s ProcStat) StartTime() (float64, error) { - stat, err := s.fs.NewStat() - if err != nil { - return 0, err - } - return float64(stat.BootTime) + (float64(s.Starttime) / userHZ), nil -} - -// CPUTime returns the total CPU user and system time in seconds. -func (s ProcStat) CPUTime() float64 { - return float64(s.UTime+s.STime) / userHZ -} diff --git a/Godeps/_workspace/src/github.com/prometheus/procfs/proc_stat_test.go b/Godeps/_workspace/src/github.com/prometheus/procfs/proc_stat_test.go deleted file mode 100644 index e4d5cacfa4..0000000000 --- a/Godeps/_workspace/src/github.com/prometheus/procfs/proc_stat_test.go +++ /dev/null @@ -1,112 +0,0 @@ -package procfs - -import "testing" - -func TestProcStat(t *testing.T) { - fs, err := NewFS("fixtures") - if err != nil { - t.Fatal(err) - } - - p, err := fs.NewProc(26231) - if err != nil { - t.Fatal(err) - } - - s, err := p.NewStat() - if err != nil { - t.Fatal(err) - } - - for _, test := range []struct { - name string - want int - got int - }{ - {name: "pid", want: 26231, got: s.PID}, - {name: "user time", want: 1677, got: int(s.UTime)}, - {name: "system time", want: 44, got: int(s.STime)}, - {name: "start time", want: 82375, got: int(s.Starttime)}, - {name: "virtual memory size", want: 56274944, got: s.VSize}, - {name: "resident set size", want: 1981, got: s.RSS}, - } { - if test.want != test.got { - t.Errorf("want %s %d, got %d", test.name, test.want, test.got) - } - } -} - -func TestProcStatComm(t *testing.T) { - s1, err := testProcStat(26231) - if err != nil { - t.Fatal(err) - } - if want, got := "vim", s1.Comm; want != got { - t.Errorf("want comm %s, got %s", want, got) - } - - s2, err := testProcStat(584) - if err != nil { - t.Fatal(err) - } - if want, got := "(a b ) ( c d) ", s2.Comm; want != got { - t.Errorf("want comm %s, got %s", want, got) - } -} - -func TestProcStatVirtualMemory(t *testing.T) { - s, err := testProcStat(26231) - if err != nil { - t.Fatal(err) - } - - if want, got := 56274944, s.VirtualMemory(); want != got { - t.Errorf("want virtual memory %d, got %d", want, got) - } -} - -func TestProcStatResidentMemory(t *testing.T) { - s, err := testProcStat(26231) - if err != nil { - t.Fatal(err) - } - - if want, got := 1981*4096, s.ResidentMemory(); want != got { - t.Errorf("want resident memory %d, got %d", want, got) - } -} - -func TestProcStatStartTime(t *testing.T) { - s, err := testProcStat(26231) - if err != nil { - t.Fatal(err) - } - - time, err := s.StartTime() - if err != nil { - t.Fatal(err) - } - if want, got := 1418184099.75, time; want != got { - t.Errorf("want start time %f, got %f", want, got) - } -} - -func TestProcStatCPUTime(t *testing.T) { - s, err := testProcStat(26231) - if err != nil { - t.Fatal(err) - } - - if want, got := 17.21, s.CPUTime(); want != got { - t.Errorf("want cpu time %f, got %f", want, got) - } -} - -func testProcStat(pid int) (ProcStat, error) { - p, err := testProcess(pid) - if err != nil { - return ProcStat{}, err - } - - return p.NewStat() -} diff --git a/Godeps/_workspace/src/github.com/prometheus/procfs/proc_test.go b/Godeps/_workspace/src/github.com/prometheus/procfs/proc_test.go deleted file mode 100644 index 4d53c69fbf..0000000000 --- a/Godeps/_workspace/src/github.com/prometheus/procfs/proc_test.go +++ /dev/null @@ -1,200 +0,0 @@ -package procfs - -import ( - "os" - "reflect" - "sort" - "testing" -) - -func TestSelf(t *testing.T) { - p1, err := NewProc(os.Getpid()) - if err != nil { - t.Fatal(err) - } - p2, err := Self() - if err != nil { - t.Fatal(err) - } - - if !reflect.DeepEqual(p1, p2) { - t.Errorf("want process %v to equal %v", p1, p2) - } -} - -func TestAllProcs(t *testing.T) { - fs, err := NewFS("fixtures") - if err != nil { - t.Fatal(err) - } - procs, err := fs.AllProcs() - if err != nil { - t.Fatal(err) - } - sort.Sort(procs) - for i, p := range []*Proc{{PID: 584}, {PID: 26231}} { - if want, got := p.PID, procs[i].PID; want != got { - t.Errorf("want processes %d, got %d", want, got) - } - } -} - -func TestCmdLine(t *testing.T) { - for _, tt := range []struct { - process int - want []string - }{ - {process: 26231, want: []string{"vim", "test.go", "+10"}}, - {process: 26232, want: []string{}}, - } { - p1, err := testProcess(tt.process) - if err != nil { - t.Fatal(err) - } - c1, err := p1.CmdLine() - if err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(tt.want, c1) { - t.Errorf("want cmdline %v, got %v", tt.want, c1) - } - } -} - -func TestExecutable(t *testing.T) { - for _, tt := range []struct { - process int - want string - }{ - {process: 26231, want: "/usr/bin/vim"}, - {process: 26232, want: ""}, - } { - p, err := testProcess(tt.process) - if err != nil { - t.Fatal(err) - } - exe, err := p.Executable() - if err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(tt.want, exe) { - t.Errorf("want absolute path to cmdline %v, got %v", tt.want, exe) - } - } -} - -func TestFileDescriptors(t *testing.T) { - p1, err := testProcess(26231) - if err != nil { - t.Fatal(err) - } - fds, err := p1.FileDescriptors() - if err != nil { - t.Fatal(err) - } - sort.Sort(byUintptr(fds)) - if want := []uintptr{0, 1, 2, 3, 10}; !reflect.DeepEqual(want, fds) { - t.Errorf("want fds %v, got %v", want, fds) - } - - p2, err := Self() - if err != nil { - t.Fatal(err) - } - - fdsBefore, err := p2.FileDescriptors() - if err != nil { - t.Fatal(err) - } - - s, err := os.Open("fixtures") - if err != nil { - t.Fatal(err) - } - defer s.Close() - - fdsAfter, err := p2.FileDescriptors() - if err != nil { - t.Fatal(err) - } - - if len(fdsBefore)+1 != len(fdsAfter) { - t.Errorf("want fds %v+1 to equal %v", fdsBefore, fdsAfter) - } -} - -func TestFileDescriptorTargets(t *testing.T) { - p1, err := testProcess(26231) - if err != nil { - t.Fatal(err) - } - fds, err := p1.FileDescriptorTargets() - if err != nil { - t.Fatal(err) - } - sort.Strings(fds) - var want = []string{ - "../../symlinktargets/abc", - "../../symlinktargets/def", - "../../symlinktargets/ghi", - "../../symlinktargets/uvw", - "../../symlinktargets/xyz", - } - if !reflect.DeepEqual(want, fds) { - t.Errorf("want fds %v, got %v", want, fds) - } - - p2, err := Self() - if err != nil { - t.Fatal(err) - } - - fdsBefore, err := p2.FileDescriptors() - if err != nil { - t.Fatal(err) - } - - s, err := os.Open("fixtures") - if err != nil { - t.Fatal(err) - } - defer s.Close() - - fdsAfter, err := p2.FileDescriptors() - if err != nil { - t.Fatal(err) - } - - if len(fdsBefore)+1 != len(fdsAfter) { - t.Errorf("want fds %v+1 to equal %v", fdsBefore, fdsAfter) - } -} - -func TestFileDescriptorsLen(t *testing.T) { - p1, err := testProcess(26231) - if err != nil { - t.Fatal(err) - } - l, err := p1.FileDescriptorsLen() - if err != nil { - t.Fatal(err) - } - if want, got := 5, l; want != got { - t.Errorf("want fds %d, got %d", want, got) - } -} - -func testProcess(pid int) (Proc, error) { - fs, err := NewFS("fixtures") - if err != nil { - return Proc{}, err - } - - return fs.NewProc(pid) -} - -type byUintptr []uintptr - -func (a byUintptr) Len() int { return len(a) } -func (a byUintptr) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a byUintptr) Less(i, j int) bool { return a[i] < a[j] } diff --git a/Godeps/_workspace/src/github.com/prometheus/procfs/stat.go b/Godeps/_workspace/src/github.com/prometheus/procfs/stat.go deleted file mode 100644 index 26fefb0fa0..0000000000 --- a/Godeps/_workspace/src/github.com/prometheus/procfs/stat.go +++ /dev/null @@ -1,55 +0,0 @@ -package procfs - -import ( - "bufio" - "fmt" - "strconv" - "strings" -) - -// Stat represents kernel/system statistics. -type Stat struct { - // Boot time in seconds since the Epoch. - BootTime int64 -} - -// NewStat returns kernel/system statistics read from /proc/stat. -func NewStat() (Stat, error) { - fs, err := NewFS(DefaultMountPoint) - if err != nil { - return Stat{}, err - } - - return fs.NewStat() -} - -// NewStat returns an information about current kernel/system statistics. -func (fs FS) NewStat() (Stat, error) { - f, err := fs.open("stat") - if err != nil { - return Stat{}, err - } - defer f.Close() - - s := bufio.NewScanner(f) - for s.Scan() { - line := s.Text() - if !strings.HasPrefix(line, "btime") { - continue - } - fields := strings.Fields(line) - if len(fields) != 2 { - return Stat{}, fmt.Errorf("couldn't parse %s line %s", f.Name(), line) - } - i, err := strconv.ParseInt(fields[1], 10, 32) - if err != nil { - return Stat{}, fmt.Errorf("couldn't parse %s: %s", fields[1], err) - } - return Stat{BootTime: i}, nil - } - if err := s.Err(); err != nil { - return Stat{}, fmt.Errorf("couldn't parse %s: %s", f.Name(), err) - } - - return Stat{}, fmt.Errorf("couldn't parse %s, missing btime", f.Name()) -} diff --git a/Godeps/_workspace/src/github.com/prometheus/procfs/stat_test.go b/Godeps/_workspace/src/github.com/prometheus/procfs/stat_test.go deleted file mode 100644 index 24b5d61f8e..0000000000 --- a/Godeps/_workspace/src/github.com/prometheus/procfs/stat_test.go +++ /dev/null @@ -1,19 +0,0 @@ -package procfs - -import "testing" - -func TestStat(t *testing.T) { - fs, err := NewFS("fixtures") - if err != nil { - t.Fatal(err) - } - - s, err := fs.NewStat() - if err != nil { - t.Fatal(err) - } - - if want, got := int64(1418183276), s.BootTime; want != got { - t.Errorf("want boot time %d, got %d", want, got) - } -} diff --git a/Godeps/_workspace/src/github.com/samuel/go-zookeeper/LICENSE b/Godeps/_workspace/src/github.com/samuel/go-zookeeper/LICENSE deleted file mode 100644 index bc00498c52..0000000000 --- a/Godeps/_workspace/src/github.com/samuel/go-zookeeper/LICENSE +++ /dev/null @@ -1,25 +0,0 @@ -Copyright (c) 2013, Samuel Stauffer -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -* Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. -* Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. -* Neither the name of the author nor the - names of its contributors may be used to endorse or promote products - derived from this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY -DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/Godeps/_workspace/src/github.com/samuel/go-zookeeper/zk/cluster_test.go b/Godeps/_workspace/src/github.com/samuel/go-zookeeper/zk/cluster_test.go deleted file mode 100644 index b8571734ec..0000000000 --- a/Godeps/_workspace/src/github.com/samuel/go-zookeeper/zk/cluster_test.go +++ /dev/null @@ -1,166 +0,0 @@ -package zk - -import ( - "fmt" - "strings" - "testing" - "time" -) - -type logWriter struct { - t *testing.T - p string -} - -func (lw logWriter) Write(b []byte) (int, error) { - lw.t.Logf("%s%s", lw.p, string(b)) - return len(b), nil -} - -func TestBasicCluster(t *testing.T) { - ts, err := StartTestCluster(3, nil, logWriter{t: t, p: "[ZKERR] "}) - if err != nil { - t.Fatal(err) - } - defer ts.Stop() - zk1, err := ts.Connect(0) - if err != nil { - t.Fatalf("Connect returned error: %+v", err) - } - defer zk1.Close() - zk2, err := ts.Connect(1) - if err != nil { - t.Fatalf("Connect returned error: %+v", err) - } - defer zk2.Close() - - time.Sleep(time.Second * 5) - - if _, err := zk1.Create("/gozk-test", []byte("foo-cluster"), 0, WorldACL(PermAll)); err != nil { - t.Fatalf("Create failed on node 1: %+v", err) - } - if by, _, err := zk2.Get("/gozk-test"); err != nil { - t.Fatalf("Get failed on node 2: %+v", err) - } else if string(by) != "foo-cluster" { - t.Fatal("Wrong data for node 2") - } -} - -func TestClientClusterFailover(t *testing.T) { - ts, err := StartTestCluster(3, nil, logWriter{t: t, p: "[ZKERR] "}) - if err != nil { - t.Fatal(err) - } - defer ts.Stop() - zk, evCh, err := ts.ConnectAll() - if err != nil { - t.Fatalf("Connect returned error: %+v", err) - } - defer zk.Close() - - hasSession := make(chan string, 1) - go func() { - for ev := range evCh { - if ev.Type == EventSession && ev.State == StateHasSession { - select { - case hasSession <- ev.Server: - default: - } - } - } - }() - - waitSession := func() string { - select { - case srv := <-hasSession: - return srv - case <-time.After(time.Second * 8): - t.Fatal("Failed to connect and get a session") - } - return "" - } - - srv := waitSession() - if _, err := zk.Create("/gozk-test", []byte("foo-cluster"), 0, WorldACL(PermAll)); err != nil { - t.Fatalf("Create failed on node 1: %+v", err) - } - - stopped := false - for _, s := range ts.Servers { - if strings.HasSuffix(srv, fmt.Sprintf(":%d", s.Port)) { - s.Srv.Stop() - stopped = true - break - } - } - if !stopped { - t.Fatal("Failed to stop server") - } - - waitSession() - if by, _, err := zk.Get("/gozk-test"); err != nil { - t.Fatalf("Get failed on node 2: %+v", err) - } else if string(by) != "foo-cluster" { - t.Fatal("Wrong data for node 2") - } -} - -func TestWaitForClose(t *testing.T) { - ts, err := StartTestCluster(1, nil, logWriter{t: t, p: "[ZKERR] "}) - if err != nil { - t.Fatal(err) - } - defer ts.Stop() - zk, err := ts.Connect(0) - if err != nil { - t.Fatalf("Connect returned error: %+v", err) - } - timeout := time.After(30 * time.Second) -CONNECTED: - for { - select { - case ev := <-zk.eventChan: - if ev.State == StateConnected { - break CONNECTED - } - case <-timeout: - zk.Close() - t.Fatal("Timeout") - } - } - zk.Close() - for { - select { - case _, ok := <-zk.eventChan: - if !ok { - return - } - case <-timeout: - t.Fatal("Timeout") - } - } -} - -func TestBadSession(t *testing.T) { - ts, err := StartTestCluster(1, nil, logWriter{t: t, p: "[ZKERR] "}) - if err != nil { - t.Fatal(err) - } - defer ts.Stop() - zk, _, err := ts.ConnectAll() - if err != nil { - t.Fatalf("Connect returned error: %+v", err) - } - defer zk.Close() - - if err := zk.Delete("/gozk-test", -1); err != nil && err != ErrNoNode { - t.Fatalf("Delete returned error: %+v", err) - } - - zk.conn.Close() - time.Sleep(time.Millisecond * 100) - - if err := zk.Delete("/gozk-test", -1); err != nil && err != ErrNoNode { - t.Fatalf("Delete returned error: %+v", err) - } -} diff --git a/Godeps/_workspace/src/github.com/samuel/go-zookeeper/zk/conn.go b/Godeps/_workspace/src/github.com/samuel/go-zookeeper/zk/conn.go deleted file mode 100644 index eef2572de0..0000000000 --- a/Godeps/_workspace/src/github.com/samuel/go-zookeeper/zk/conn.go +++ /dev/null @@ -1,871 +0,0 @@ -// Package zk is a native Go client library for the ZooKeeper orchestration service. -package zk - -/* -TODO: -* make sure a ping response comes back in a reasonable time - -Possible watcher events: -* Event{Type: EventNotWatching, State: StateDisconnected, Path: path, Err: err} -*/ - -import ( - "crypto/rand" - "encoding/binary" - "errors" - "fmt" - "io" - "net" - "strconv" - "strings" - "sync" - "sync/atomic" - "time" -) - -// ErrNoServer indicates that an operation cannot be completed -// because attempts to connect to all servers in the list failed. -var ErrNoServer = errors.New("zk: could not connect to a server") - -// ErrInvalidPath indicates that an operation was being attempted on -// an invalid path. (e.g. empty path) -var ErrInvalidPath = errors.New("zk: invalid path") - -// DefaultLogger uses the stdlib log package for logging. -var DefaultLogger = defaultLogger{} - -const ( - bufferSize = 1536 * 1024 - eventChanSize = 6 - sendChanSize = 16 - protectedPrefix = "_c_" -) - -type watchType int - -const ( - watchTypeData = iota - watchTypeExist = iota - watchTypeChild = iota -) - -type watchPathType struct { - path string - wType watchType -} - -type Dialer func(network, address string, timeout time.Duration) (net.Conn, error) - -// Logger is an interface that can be implemented to provide custom log output. -type Logger interface { - Printf(string, ...interface{}) -} - -type Conn struct { - lastZxid int64 - sessionID int64 - state State // must be 32-bit aligned - xid uint32 - timeout int32 // session timeout in milliseconds - passwd []byte - - dialer Dialer - servers []string - serverIndex int // remember last server that was tried during connect to round-robin attempts to servers - lastServerIndex int // index of the last server that was successfully connected to and authenticated with - conn net.Conn - eventChan chan Event - shouldQuit chan struct{} - pingInterval time.Duration - recvTimeout time.Duration - connectTimeout time.Duration - - sendChan chan *request - requests map[int32]*request // Xid -> pending request - requestsLock sync.Mutex - watchers map[watchPathType][]chan Event - watchersLock sync.Mutex - - // Debug (used by unit tests) - reconnectDelay time.Duration - - logger Logger -} - -type request struct { - xid int32 - opcode int32 - pkt interface{} - recvStruct interface{} - recvChan chan response - - // Because sending and receiving happen in separate go routines, there's - // a possible race condition when creating watches from outside the read - // loop. We must ensure that a watcher gets added to the list synchronously - // with the response from the server on any request that creates a watch. - // In order to not hard code the watch logic for each opcode in the recv - // loop the caller can use recvFunc to insert some synchronously code - // after a response. - recvFunc func(*request, *responseHeader, error) -} - -type response struct { - zxid int64 - err error -} - -type Event struct { - Type EventType - State State - Path string // For non-session events, the path of the watched node. - Err error - Server string // For connection events -} - -// Connect establishes a new connection to a pool of zookeeper servers -// using the default net.Dialer. See ConnectWithDialer for further -// information about session timeout. -func Connect(servers []string, sessionTimeout time.Duration) (*Conn, <-chan Event, error) { - return ConnectWithDialer(servers, sessionTimeout, nil) -} - -// ConnectWithDialer establishes a new connection to a pool of zookeeper -// servers. The provided session timeout sets the amount of time for which -// a session is considered valid after losing connection to a server. Within -// the session timeout it's possible to reestablish a connection to a different -// server and keep the same session. This is means any ephemeral nodes and -// watches are maintained. -func ConnectWithDialer(servers []string, sessionTimeout time.Duration, dialer Dialer) (*Conn, <-chan Event, error) { - if len(servers) == 0 { - return nil, nil, errors.New("zk: server list must not be empty") - } - - recvTimeout := sessionTimeout * 2 / 3 - - srvs := make([]string, len(servers)) - - for i, addr := range servers { - if strings.Contains(addr, ":") { - srvs[i] = addr - } else { - srvs[i] = addr + ":" + strconv.Itoa(DefaultPort) - } - } - - // Randomize the order of the servers to avoid creating hotspots - stringShuffle(srvs) - - ec := make(chan Event, eventChanSize) - if dialer == nil { - dialer = net.DialTimeout - } - conn := Conn{ - dialer: dialer, - servers: srvs, - serverIndex: 0, - lastServerIndex: -1, - conn: nil, - state: StateDisconnected, - eventChan: ec, - shouldQuit: make(chan struct{}), - recvTimeout: recvTimeout, - pingInterval: recvTimeout / 2, - connectTimeout: 1 * time.Second, - sendChan: make(chan *request, sendChanSize), - requests: make(map[int32]*request), - watchers: make(map[watchPathType][]chan Event), - passwd: emptyPassword, - timeout: int32(sessionTimeout.Nanoseconds() / 1e6), - logger: DefaultLogger, - - // Debug - reconnectDelay: 0, - } - go func() { - conn.loop() - conn.flushRequests(ErrClosing) - conn.invalidateWatches(ErrClosing) - close(conn.eventChan) - }() - return &conn, ec, nil -} - -func (c *Conn) Close() { - close(c.shouldQuit) - - select { - case <-c.queueRequest(opClose, &closeRequest{}, &closeResponse{}, nil): - case <-time.After(time.Second): - } -} - -// States returns the current state of the connection. -func (c *Conn) State() State { - return State(atomic.LoadInt32((*int32)(&c.state))) -} - -// SetLogger sets the logger to be used for printing errors. -// Logger is an interface provided by this package. -func (c *Conn) SetLogger(l Logger) { - c.logger = l -} - -func (c *Conn) setState(state State) { - atomic.StoreInt32((*int32)(&c.state), int32(state)) - select { - case c.eventChan <- Event{Type: EventSession, State: state, Server: c.servers[c.serverIndex]}: - default: - // panic("zk: event channel full - it must be monitored and never allowed to be full") - } -} - -func (c *Conn) connect() error { - c.setState(StateConnecting) - for { - c.serverIndex = (c.serverIndex + 1) % len(c.servers) - if c.serverIndex == c.lastServerIndex { - c.flushUnsentRequests(ErrNoServer) - select { - case <-time.After(time.Second): - // pass - case <-c.shouldQuit: - c.setState(StateDisconnected) - c.flushUnsentRequests(ErrClosing) - return ErrClosing - } - } else if c.lastServerIndex < 0 { - // lastServerIndex defaults to -1 to avoid a delay on the initial connect - c.lastServerIndex = 0 - } - - zkConn, err := c.dialer("tcp", c.servers[c.serverIndex], c.connectTimeout) - if err == nil { - c.conn = zkConn - c.setState(StateConnected) - return nil - } - - c.logger.Printf("Failed to connect to %s: %+v", c.servers[c.serverIndex], err) - } -} - -func (c *Conn) loop() { - for { - if err := c.connect(); err != nil { - // c.Close() was called - return - } - - err := c.authenticate() - switch { - case err == ErrSessionExpired: - c.invalidateWatches(err) - case err != nil && c.conn != nil: - c.conn.Close() - case err == nil: - c.lastServerIndex = c.serverIndex - closeChan := make(chan struct{}) // channel to tell send loop stop - var wg sync.WaitGroup - - wg.Add(1) - go func() { - c.sendLoop(c.conn, closeChan) - c.conn.Close() // causes recv loop to EOF/exit - wg.Done() - }() - - wg.Add(1) - go func() { - err = c.recvLoop(c.conn) - if err == nil { - panic("zk: recvLoop should never return nil error") - } - close(closeChan) // tell send loop to exit - wg.Done() - }() - - wg.Wait() - } - - c.setState(StateDisconnected) - - // Yeesh - if err != io.EOF && err != ErrSessionExpired && !strings.Contains(err.Error(), "use of closed network connection") { - c.logger.Printf(err.Error()) - } - - select { - case <-c.shouldQuit: - c.flushRequests(ErrClosing) - return - default: - } - - if err != ErrSessionExpired { - err = ErrConnectionClosed - } - c.flushRequests(err) - - if c.reconnectDelay > 0 { - select { - case <-c.shouldQuit: - return - case <-time.After(c.reconnectDelay): - } - } - } -} - -func (c *Conn) flushUnsentRequests(err error) { - for { - select { - default: - return - case req := <-c.sendChan: - req.recvChan <- response{-1, err} - } - } -} - -// Send error to all pending requests and clear request map -func (c *Conn) flushRequests(err error) { - c.requestsLock.Lock() - for _, req := range c.requests { - req.recvChan <- response{-1, err} - } - c.requests = make(map[int32]*request) - c.requestsLock.Unlock() -} - -// Send error to all watchers and clear watchers map -func (c *Conn) invalidateWatches(err error) { - c.watchersLock.Lock() - defer c.watchersLock.Unlock() - - if len(c.watchers) >= 0 { - for pathType, watchers := range c.watchers { - ev := Event{Type: EventNotWatching, State: StateDisconnected, Path: pathType.path, Err: err} - for _, ch := range watchers { - ch <- ev - close(ch) - } - } - c.watchers = make(map[watchPathType][]chan Event) - } -} - -func (c *Conn) sendSetWatches() { - c.watchersLock.Lock() - defer c.watchersLock.Unlock() - - if len(c.watchers) == 0 { - return - } - - req := &setWatchesRequest{ - RelativeZxid: c.lastZxid, - DataWatches: make([]string, 0), - ExistWatches: make([]string, 0), - ChildWatches: make([]string, 0), - } - n := 0 - for pathType, watchers := range c.watchers { - if len(watchers) == 0 { - continue - } - switch pathType.wType { - case watchTypeData: - req.DataWatches = append(req.DataWatches, pathType.path) - case watchTypeExist: - req.ExistWatches = append(req.ExistWatches, pathType.path) - case watchTypeChild: - req.ChildWatches = append(req.ChildWatches, pathType.path) - } - n++ - } - if n == 0 { - return - } - - go func() { - res := &setWatchesResponse{} - _, err := c.request(opSetWatches, req, res, nil) - if err != nil { - c.logger.Printf("Failed to set previous watches: %s", err.Error()) - } - }() -} - -func (c *Conn) authenticate() error { - buf := make([]byte, 256) - - // connect request - - n, err := encodePacket(buf[4:], &connectRequest{ - ProtocolVersion: protocolVersion, - LastZxidSeen: c.lastZxid, - TimeOut: c.timeout, - SessionID: c.sessionID, - Passwd: c.passwd, - }) - if err != nil { - return err - } - - binary.BigEndian.PutUint32(buf[:4], uint32(n)) - - c.conn.SetWriteDeadline(time.Now().Add(c.recvTimeout * 10)) - _, err = c.conn.Write(buf[:n+4]) - c.conn.SetWriteDeadline(time.Time{}) - if err != nil { - return err - } - - c.sendSetWatches() - - // connect response - - // package length - c.conn.SetReadDeadline(time.Now().Add(c.recvTimeout * 10)) - _, err = io.ReadFull(c.conn, buf[:4]) - c.conn.SetReadDeadline(time.Time{}) - if err != nil { - // Sometimes zookeeper just drops connection on invalid session data, - // we prefer to drop session and start from scratch when that event - // occurs instead of dropping into loop of connect/disconnect attempts - c.sessionID = 0 - c.passwd = emptyPassword - c.lastZxid = 0 - c.setState(StateExpired) - return ErrSessionExpired - } - - blen := int(binary.BigEndian.Uint32(buf[:4])) - if cap(buf) < blen { - buf = make([]byte, blen) - } - - _, err = io.ReadFull(c.conn, buf[:blen]) - if err != nil { - return err - } - - r := connectResponse{} - _, err = decodePacket(buf[:blen], &r) - if err != nil { - return err - } - if r.SessionID == 0 { - c.sessionID = 0 - c.passwd = emptyPassword - c.lastZxid = 0 - c.setState(StateExpired) - return ErrSessionExpired - } - - if c.sessionID != r.SessionID { - atomic.StoreUint32(&c.xid, 0) - } - c.timeout = r.TimeOut - c.sessionID = r.SessionID - c.passwd = r.Passwd - c.setState(StateHasSession) - - return nil -} - -func (c *Conn) sendLoop(conn net.Conn, closeChan <-chan struct{}) error { - pingTicker := time.NewTicker(c.pingInterval) - defer pingTicker.Stop() - - buf := make([]byte, bufferSize) - for { - select { - case req := <-c.sendChan: - header := &requestHeader{req.xid, req.opcode} - n, err := encodePacket(buf[4:], header) - if err != nil { - req.recvChan <- response{-1, err} - continue - } - - n2, err := encodePacket(buf[4+n:], req.pkt) - if err != nil { - req.recvChan <- response{-1, err} - continue - } - - n += n2 - - binary.BigEndian.PutUint32(buf[:4], uint32(n)) - - c.requestsLock.Lock() - select { - case <-closeChan: - req.recvChan <- response{-1, ErrConnectionClosed} - c.requestsLock.Unlock() - return ErrConnectionClosed - default: - } - c.requests[req.xid] = req - c.requestsLock.Unlock() - - conn.SetWriteDeadline(time.Now().Add(c.recvTimeout)) - _, err = conn.Write(buf[:n+4]) - conn.SetWriteDeadline(time.Time{}) - if err != nil { - req.recvChan <- response{-1, err} - conn.Close() - return err - } - case <-pingTicker.C: - n, err := encodePacket(buf[4:], &requestHeader{Xid: -2, Opcode: opPing}) - if err != nil { - panic("zk: opPing should never fail to serialize") - } - - binary.BigEndian.PutUint32(buf[:4], uint32(n)) - - conn.SetWriteDeadline(time.Now().Add(c.recvTimeout)) - _, err = conn.Write(buf[:n+4]) - conn.SetWriteDeadline(time.Time{}) - if err != nil { - conn.Close() - return err - } - case <-closeChan: - return nil - } - } -} - -func (c *Conn) recvLoop(conn net.Conn) error { - buf := make([]byte, bufferSize) - for { - // package length - conn.SetReadDeadline(time.Now().Add(c.recvTimeout)) - _, err := io.ReadFull(conn, buf[:4]) - if err != nil { - return err - } - - blen := int(binary.BigEndian.Uint32(buf[:4])) - if cap(buf) < blen { - buf = make([]byte, blen) - } - - _, err = io.ReadFull(conn, buf[:blen]) - conn.SetReadDeadline(time.Time{}) - if err != nil { - return err - } - - res := responseHeader{} - _, err = decodePacket(buf[:16], &res) - if err != nil { - return err - } - - if res.Xid == -1 { - res := &watcherEvent{} - _, err := decodePacket(buf[16:16+blen], res) - if err != nil { - return err - } - ev := Event{ - Type: res.Type, - State: res.State, - Path: res.Path, - Err: nil, - } - select { - case c.eventChan <- ev: - default: - } - wTypes := make([]watchType, 0, 2) - switch res.Type { - case EventNodeCreated: - wTypes = append(wTypes, watchTypeExist) - case EventNodeDeleted, EventNodeDataChanged: - wTypes = append(wTypes, watchTypeExist, watchTypeData, watchTypeChild) - case EventNodeChildrenChanged: - wTypes = append(wTypes, watchTypeChild) - } - c.watchersLock.Lock() - for _, t := range wTypes { - wpt := watchPathType{res.Path, t} - if watchers := c.watchers[wpt]; watchers != nil && len(watchers) > 0 { - for _, ch := range watchers { - ch <- ev - close(ch) - } - delete(c.watchers, wpt) - } - } - c.watchersLock.Unlock() - } else if res.Xid == -2 { - // Ping response. Ignore. - } else if res.Xid < 0 { - c.logger.Printf("Xid < 0 (%d) but not ping or watcher event", res.Xid) - } else { - if res.Zxid > 0 { - c.lastZxid = res.Zxid - } - - c.requestsLock.Lock() - req, ok := c.requests[res.Xid] - if ok { - delete(c.requests, res.Xid) - } - c.requestsLock.Unlock() - - if !ok { - c.logger.Printf("Response for unknown request with xid %d", res.Xid) - } else { - if res.Err != 0 { - err = res.Err.toError() - } else { - _, err = decodePacket(buf[16:16+blen], req.recvStruct) - } - if req.recvFunc != nil { - req.recvFunc(req, &res, err) - } - req.recvChan <- response{res.Zxid, err} - if req.opcode == opClose { - return io.EOF - } - } - } - } -} - -func (c *Conn) nextXid() int32 { - return int32(atomic.AddUint32(&c.xid, 1) & 0x7fffffff) -} - -func (c *Conn) addWatcher(path string, watchType watchType) <-chan Event { - c.watchersLock.Lock() - defer c.watchersLock.Unlock() - - ch := make(chan Event, 1) - wpt := watchPathType{path, watchType} - c.watchers[wpt] = append(c.watchers[wpt], ch) - return ch -} - -func (c *Conn) queueRequest(opcode int32, req interface{}, res interface{}, recvFunc func(*request, *responseHeader, error)) <-chan response { - rq := &request{ - xid: c.nextXid(), - opcode: opcode, - pkt: req, - recvStruct: res, - recvChan: make(chan response, 1), - recvFunc: recvFunc, - } - c.sendChan <- rq - return rq.recvChan -} - -func (c *Conn) request(opcode int32, req interface{}, res interface{}, recvFunc func(*request, *responseHeader, error)) (int64, error) { - r := <-c.queueRequest(opcode, req, res, recvFunc) - return r.zxid, r.err -} - -func (c *Conn) AddAuth(scheme string, auth []byte) error { - _, err := c.request(opSetAuth, &setAuthRequest{Type: 0, Scheme: scheme, Auth: auth}, &setAuthResponse{}, nil) - return err -} - -func (c *Conn) Children(path string) ([]string, *Stat, error) { - res := &getChildren2Response{} - _, err := c.request(opGetChildren2, &getChildren2Request{Path: path, Watch: false}, res, nil) - return res.Children, &res.Stat, err -} - -func (c *Conn) ChildrenW(path string) ([]string, *Stat, <-chan Event, error) { - var ech <-chan Event - res := &getChildren2Response{} - _, err := c.request(opGetChildren2, &getChildren2Request{Path: path, Watch: true}, res, func(req *request, res *responseHeader, err error) { - if err == nil { - ech = c.addWatcher(path, watchTypeChild) - } - }) - if err != nil { - return nil, nil, nil, err - } - return res.Children, &res.Stat, ech, err -} - -func (c *Conn) Get(path string) ([]byte, *Stat, error) { - res := &getDataResponse{} - _, err := c.request(opGetData, &getDataRequest{Path: path, Watch: false}, res, nil) - return res.Data, &res.Stat, err -} - -// GetW returns the contents of a znode and sets a watch -func (c *Conn) GetW(path string) ([]byte, *Stat, <-chan Event, error) { - var ech <-chan Event - res := &getDataResponse{} - _, err := c.request(opGetData, &getDataRequest{Path: path, Watch: true}, res, func(req *request, res *responseHeader, err error) { - if err == nil { - ech = c.addWatcher(path, watchTypeData) - } - }) - if err != nil { - return nil, nil, nil, err - } - return res.Data, &res.Stat, ech, err -} - -func (c *Conn) Set(path string, data []byte, version int32) (*Stat, error) { - if path == "" { - return nil, ErrInvalidPath - } - res := &setDataResponse{} - _, err := c.request(opSetData, &SetDataRequest{path, data, version}, res, nil) - return &res.Stat, err -} - -func (c *Conn) Create(path string, data []byte, flags int32, acl []ACL) (string, error) { - res := &createResponse{} - _, err := c.request(opCreate, &CreateRequest{path, data, acl, flags}, res, nil) - return res.Path, err -} - -// CreateProtectedEphemeralSequential fixes a race condition if the server crashes -// after it creates the node. On reconnect the session may still be valid so the -// ephemeral node still exists. Therefore, on reconnect we need to check if a node -// with a GUID generated on create exists. -func (c *Conn) CreateProtectedEphemeralSequential(path string, data []byte, acl []ACL) (string, error) { - var guid [16]byte - _, err := io.ReadFull(rand.Reader, guid[:16]) - if err != nil { - return "", err - } - guidStr := fmt.Sprintf("%x", guid) - - parts := strings.Split(path, "/") - parts[len(parts)-1] = fmt.Sprintf("%s%s-%s", protectedPrefix, guidStr, parts[len(parts)-1]) - rootPath := strings.Join(parts[:len(parts)-1], "/") - protectedPath := strings.Join(parts, "/") - - var newPath string - for i := 0; i < 3; i++ { - newPath, err = c.Create(protectedPath, data, FlagEphemeral|FlagSequence, acl) - switch err { - case ErrSessionExpired: - // No need to search for the node since it can't exist. Just try again. - case ErrConnectionClosed: - children, _, err := c.Children(rootPath) - if err != nil { - return "", err - } - for _, p := range children { - parts := strings.Split(p, "/") - if pth := parts[len(parts)-1]; strings.HasPrefix(pth, protectedPrefix) { - if g := pth[len(protectedPrefix) : len(protectedPrefix)+32]; g == guidStr { - return rootPath + "/" + p, nil - } - } - } - case nil: - return newPath, nil - default: - return "", err - } - } - return "", err -} - -func (c *Conn) Delete(path string, version int32) error { - _, err := c.request(opDelete, &DeleteRequest{path, version}, &deleteResponse{}, nil) - return err -} - -func (c *Conn) Exists(path string) (bool, *Stat, error) { - res := &existsResponse{} - _, err := c.request(opExists, &existsRequest{Path: path, Watch: false}, res, nil) - exists := true - if err == ErrNoNode { - exists = false - err = nil - } - return exists, &res.Stat, err -} - -func (c *Conn) ExistsW(path string) (bool, *Stat, <-chan Event, error) { - var ech <-chan Event - res := &existsResponse{} - _, err := c.request(opExists, &existsRequest{Path: path, Watch: true}, res, func(req *request, res *responseHeader, err error) { - if err == nil { - ech = c.addWatcher(path, watchTypeData) - } else if err == ErrNoNode { - ech = c.addWatcher(path, watchTypeExist) - } - }) - exists := true - if err == ErrNoNode { - exists = false - err = nil - } - if err != nil { - return false, nil, nil, err - } - return exists, &res.Stat, ech, err -} - -func (c *Conn) GetACL(path string) ([]ACL, *Stat, error) { - res := &getAclResponse{} - _, err := c.request(opGetAcl, &getAclRequest{Path: path}, res, nil) - return res.Acl, &res.Stat, err -} - -func (c *Conn) SetACL(path string, acl []ACL, version int32) (*Stat, error) { - res := &setAclResponse{} - _, err := c.request(opSetAcl, &setAclRequest{Path: path, Acl: acl, Version: version}, res, nil) - return &res.Stat, err -} - -func (c *Conn) Sync(path string) (string, error) { - res := &syncResponse{} - _, err := c.request(opSync, &syncRequest{Path: path}, res, nil) - return res.Path, err -} - -type MultiResponse struct { - Stat *Stat - String string -} - -// Multi executes multiple ZooKeeper operations or none of them. The provided -// ops must be one of *CreateRequest, *DeleteRequest, *SetDataRequest, or -// *CheckVersionRequest. -func (c *Conn) Multi(ops ...interface{}) ([]MultiResponse, error) { - req := &multiRequest{ - Ops: make([]multiRequestOp, 0, len(ops)), - DoneHeader: multiHeader{Type: -1, Done: true, Err: -1}, - } - for _, op := range ops { - var opCode int32 - switch op.(type) { - case *CreateRequest: - opCode = opCreate - case *SetDataRequest: - opCode = opSetData - case *DeleteRequest: - opCode = opDelete - case *CheckVersionRequest: - opCode = opCheck - default: - return nil, fmt.Errorf("uknown operation type %T", op) - } - req.Ops = append(req.Ops, multiRequestOp{multiHeader{opCode, false, -1}, op}) - } - res := &multiResponse{} - _, err := c.request(opMulti, req, res, nil) - mr := make([]MultiResponse, len(res.Ops)) - for i, op := range res.Ops { - mr[i] = MultiResponse{Stat: op.Stat, String: op.String} - } - return mr, err -} diff --git a/Godeps/_workspace/src/github.com/samuel/go-zookeeper/zk/constants.go b/Godeps/_workspace/src/github.com/samuel/go-zookeeper/zk/constants.go deleted file mode 100644 index f9b39b904f..0000000000 --- a/Godeps/_workspace/src/github.com/samuel/go-zookeeper/zk/constants.go +++ /dev/null @@ -1,240 +0,0 @@ -package zk - -import ( - "errors" -) - -const ( - protocolVersion = 0 - - DefaultPort = 2181 -) - -const ( - opNotify = 0 - opCreate = 1 - opDelete = 2 - opExists = 3 - opGetData = 4 - opSetData = 5 - opGetAcl = 6 - opSetAcl = 7 - opGetChildren = 8 - opSync = 9 - opPing = 11 - opGetChildren2 = 12 - opCheck = 13 - opMulti = 14 - opClose = -11 - opSetAuth = 100 - opSetWatches = 101 - // Not in protocol, used internally - opWatcherEvent = -2 -) - -const ( - EventNodeCreated = EventType(1) - EventNodeDeleted = EventType(2) - EventNodeDataChanged = EventType(3) - EventNodeChildrenChanged = EventType(4) - - EventSession = EventType(-1) - EventNotWatching = EventType(-2) -) - -var ( - eventNames = map[EventType]string{ - EventNodeCreated: "EventNodeCreated", - EventNodeDeleted: "EventNodeDeleted", - EventNodeDataChanged: "EventNodeDataChanged", - EventNodeChildrenChanged: "EventNodeChildrenChanged", - EventSession: "EventSession", - EventNotWatching: "EventNotWatching", - } -) - -const ( - StateUnknown = State(-1) - StateDisconnected = State(0) - StateConnecting = State(1) - StateAuthFailed = State(4) - StateConnectedReadOnly = State(5) - StateSaslAuthenticated = State(6) - StateExpired = State(-112) - // StateAuthFailed = State(-113) - - StateConnected = State(100) - StateHasSession = State(101) -) - -const ( - FlagEphemeral = 1 - FlagSequence = 2 -) - -var ( - stateNames = map[State]string{ - StateUnknown: "StateUnknown", - StateDisconnected: "StateDisconnected", - StateConnectedReadOnly: "StateConnectedReadOnly", - StateSaslAuthenticated: "StateSaslAuthenticated", - StateExpired: "StateExpired", - StateAuthFailed: "StateAuthFailed", - StateConnecting: "StateConnecting", - StateConnected: "StateConnected", - StateHasSession: "StateHasSession", - } -) - -type State int32 - -func (s State) String() string { - if name := stateNames[s]; name != "" { - return name - } - return "Unknown" -} - -type ErrCode int32 - -var ( - ErrConnectionClosed = errors.New("zk: connection closed") - ErrUnknown = errors.New("zk: unknown error") - ErrAPIError = errors.New("zk: api error") - ErrNoNode = errors.New("zk: node does not exist") - ErrNoAuth = errors.New("zk: not authenticated") - ErrBadVersion = errors.New("zk: version conflict") - ErrNoChildrenForEphemerals = errors.New("zk: ephemeral nodes may not have children") - ErrNodeExists = errors.New("zk: node already exists") - ErrNotEmpty = errors.New("zk: node has children") - ErrSessionExpired = errors.New("zk: session has been expired by the server") - ErrInvalidACL = errors.New("zk: invalid ACL specified") - ErrAuthFailed = errors.New("zk: client authentication failed") - ErrClosing = errors.New("zk: zookeeper is closing") - ErrNothing = errors.New("zk: no server responsees to process") - ErrSessionMoved = errors.New("zk: session moved to another server, so operation is ignored") - - // ErrInvalidCallback = errors.New("zk: invalid callback specified") - errCodeToError = map[ErrCode]error{ - 0: nil, - errAPIError: ErrAPIError, - errNoNode: ErrNoNode, - errNoAuth: ErrNoAuth, - errBadVersion: ErrBadVersion, - errNoChildrenForEphemerals: ErrNoChildrenForEphemerals, - errNodeExists: ErrNodeExists, - errNotEmpty: ErrNotEmpty, - errSessionExpired: ErrSessionExpired, - // errInvalidCallback: ErrInvalidCallback, - errInvalidAcl: ErrInvalidACL, - errAuthFailed: ErrAuthFailed, - errClosing: ErrClosing, - errNothing: ErrNothing, - errSessionMoved: ErrSessionMoved, - } -) - -func (e ErrCode) toError() error { - if err, ok := errCodeToError[e]; ok { - return err - } - return ErrUnknown -} - -const ( - errOk = 0 - // System and server-side errors - errSystemError = -1 - errRuntimeInconsistency = -2 - errDataInconsistency = -3 - errConnectionLoss = -4 - errMarshallingError = -5 - errUnimplemented = -6 - errOperationTimeout = -7 - errBadArguments = -8 - errInvalidState = -9 - // API errors - errAPIError = ErrCode(-100) - errNoNode = ErrCode(-101) // * - errNoAuth = ErrCode(-102) - errBadVersion = ErrCode(-103) // * - errNoChildrenForEphemerals = ErrCode(-108) - errNodeExists = ErrCode(-110) // * - errNotEmpty = ErrCode(-111) - errSessionExpired = ErrCode(-112) - errInvalidCallback = ErrCode(-113) - errInvalidAcl = ErrCode(-114) - errAuthFailed = ErrCode(-115) - errClosing = ErrCode(-116) - errNothing = ErrCode(-117) - errSessionMoved = ErrCode(-118) -) - -// Constants for ACL permissions -const ( - PermRead = 1 << iota - PermWrite - PermCreate - PermDelete - PermAdmin - PermAll = 0x1f -) - -var ( - emptyPassword = []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} - opNames = map[int32]string{ - opNotify: "notify", - opCreate: "create", - opDelete: "delete", - opExists: "exists", - opGetData: "getData", - opSetData: "setData", - opGetAcl: "getACL", - opSetAcl: "setACL", - opGetChildren: "getChildren", - opSync: "sync", - opPing: "ping", - opGetChildren2: "getChildren2", - opCheck: "check", - opMulti: "multi", - opClose: "close", - opSetAuth: "setAuth", - opSetWatches: "setWatches", - - opWatcherEvent: "watcherEvent", - } -) - -type EventType int32 - -func (t EventType) String() string { - if name := eventNames[t]; name != "" { - return name - } - return "Unknown" -} - -// Mode is used to build custom server modes (leader|follower|standalone). -type Mode uint8 - -func (m Mode) String() string { - if name := modeNames[m]; name != "" { - return name - } - return "unknown" -} - -const ( - ModeUnknown Mode = iota - ModeLeader Mode = iota - ModeFollower Mode = iota - ModeStandalone Mode = iota -) - -var ( - modeNames = map[Mode]string{ - ModeLeader: "leader", - ModeFollower: "follower", - ModeStandalone: "standalone", - } -) diff --git a/Godeps/_workspace/src/github.com/samuel/go-zookeeper/zk/constants_test.go b/Godeps/_workspace/src/github.com/samuel/go-zookeeper/zk/constants_test.go deleted file mode 100644 index 9fe6b04ceb..0000000000 --- a/Godeps/_workspace/src/github.com/samuel/go-zookeeper/zk/constants_test.go +++ /dev/null @@ -1,24 +0,0 @@ -package zk - -import ( - "fmt" - "testing" -) - -func TestModeString(t *testing.T) { - if fmt.Sprintf("%v", ModeUnknown) != "unknown" { - t.Errorf("unknown value should be 'unknown'") - } - - if fmt.Sprintf("%v", ModeLeader) != "leader" { - t.Errorf("leader value should be 'leader'") - } - - if fmt.Sprintf("%v", ModeFollower) != "follower" { - t.Errorf("follower value should be 'follower'") - } - - if fmt.Sprintf("%v", ModeStandalone) != "standalone" { - t.Errorf("standlone value should be 'standalone'") - } -} diff --git a/Godeps/_workspace/src/github.com/samuel/go-zookeeper/zk/flw.go b/Godeps/_workspace/src/github.com/samuel/go-zookeeper/zk/flw.go deleted file mode 100644 index c1225ffa25..0000000000 --- a/Godeps/_workspace/src/github.com/samuel/go-zookeeper/zk/flw.go +++ /dev/null @@ -1,288 +0,0 @@ -package zk - -import ( - "bufio" - "bytes" - "fmt" - "io/ioutil" - "math/big" - "net" - "regexp" - "strconv" - "time" -) - -// FLWSrvr is a FourLetterWord helper function. In particular, this function pulls the srvr output -// from the zookeeper instances and parses the output. A slice of *ServerStats structs are returned -// as well as a boolean value to indicate whether this function processed successfully. -// -// If the boolean value is false there was a problem. If the *ServerStats slice is empty or nil, -// then the error happened before we started to obtain 'srvr' values. Otherwise, one of the -// servers had an issue and the "Error" value in the struct should be inspected to determine -// which server had the issue. -func FLWSrvr(servers []string, timeout time.Duration) ([]*ServerStats, bool) { - // different parts of the regular expression that are required to parse the srvr output - var ( - zrVer = `^Zookeeper version: ([A-Za-z0-9\.\-]+), built on (\d\d/\d\d/\d\d\d\d \d\d:\d\d [A-Za-z0-9:\+\-]+)` - zrLat = `^Latency min/avg/max: (\d+)/(\d+)/(\d+)` - zrNet = `^Received: (\d+).*\n^Sent: (\d+).*\n^Connections: (\d+).*\n^Outstanding: (\d+)` - zrState = `^Zxid: (0x[A-Za-z0-9]+).*\n^Mode: (\w+).*\n^Node count: (\d+)` - ) - - // build the regex from the pieces above - re, err := regexp.Compile(fmt.Sprintf(`(?m:\A%v.*\n%v.*\n%v.*\n%v)`, zrVer, zrLat, zrNet, zrState)) - - if err != nil { - return nil, false - } - - imOk := true - servers = FormatServers(servers) - ss := make([]*ServerStats, len(servers)) - - for i := range ss { - response, err := fourLetterWord(servers[i], "srvr", timeout) - - if err != nil { - ss[i] = &ServerStats{Error: err} - imOk = false - continue - } - - match := re.FindAllStringSubmatch(string(response), -1)[0][1:] - - if match == nil { - err := fmt.Errorf("unable to parse fields from zookeeper response (no regex matches)") - ss[i] = &ServerStats{Error: err} - imOk = false - continue - } - - // determine current server - var srvrMode Mode - switch match[10] { - case "leader": - srvrMode = ModeLeader - case "follower": - srvrMode = ModeFollower - case "standalone": - srvrMode = ModeStandalone - default: - srvrMode = ModeUnknown - } - - buildTime, err := time.Parse("01/02/2006 15:04 MST", match[1]) - - if err != nil { - ss[i] = &ServerStats{Error: err} - imOk = false - continue - } - - parsedInt, err := strconv.ParseInt(match[9], 0, 64) - - if err != nil { - ss[i] = &ServerStats{Error: err} - imOk = false - continue - } - - // the ZxID value is an int64 with two int32s packed inside - // the high int32 is the epoch (i.e., number of leader elections) - // the low int32 is the counter - epoch := int32(parsedInt >> 32) - counter := int32(parsedInt & 0xFFFFFFFF) - - // within the regex above, these values must be numerical - // so we can avoid useless checking of the error return value - minLatency, _ := strconv.ParseInt(match[2], 0, 64) - avgLatency, _ := strconv.ParseInt(match[3], 0, 64) - maxLatency, _ := strconv.ParseInt(match[4], 0, 64) - recv, _ := strconv.ParseInt(match[5], 0, 64) - sent, _ := strconv.ParseInt(match[6], 0, 64) - cons, _ := strconv.ParseInt(match[7], 0, 64) - outs, _ := strconv.ParseInt(match[8], 0, 64) - ncnt, _ := strconv.ParseInt(match[11], 0, 64) - - ss[i] = &ServerStats{ - Sent: sent, - Received: recv, - NodeCount: ncnt, - MinLatency: minLatency, - AvgLatency: avgLatency, - MaxLatency: maxLatency, - Connections: cons, - Outstanding: outs, - Epoch: epoch, - Counter: counter, - BuildTime: buildTime, - Mode: srvrMode, - Version: match[0], - } - } - - return ss, imOk -} - -// FLWRuok is a FourLetterWord helper function. In particular, this function -// pulls the ruok output from each server. -func FLWRuok(servers []string, timeout time.Duration) []bool { - servers = FormatServers(servers) - oks := make([]bool, len(servers)) - - for i := range oks { - response, err := fourLetterWord(servers[i], "ruok", timeout) - - if err != nil { - continue - } - - if bytes.Equal(response[:4], []byte("imok")) { - oks[i] = true - } - } - return oks -} - -// FLWCons is a FourLetterWord helper function. In particular, this function -// pulls the ruok output from each server. -// -// As with FLWSrvr, the boolean value indicates whether one of the requests had -// an issue. The Clients struct has an Error value that can be checked. -func FLWCons(servers []string, timeout time.Duration) ([]*ServerClients, bool) { - var ( - zrAddr = `^ /((?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?):(?:\d+))\[\d+\]` - zrPac = `\(queued=(\d+),recved=(\d+),sent=(\d+),sid=(0x[A-Za-z0-9]+),lop=(\w+),est=(\d+),to=(\d+),` - zrSesh = `lcxid=(0x[A-Za-z0-9]+),lzxid=(0x[A-Za-z0-9]+),lresp=(\d+),llat=(\d+),minlat=(\d+),avglat=(\d+),maxlat=(\d+)\)` - ) - - re, err := regexp.Compile(fmt.Sprintf("%v%v%v", zrAddr, zrPac, zrSesh)) - - if err != nil { - return nil, false - } - - servers = FormatServers(servers) - sc := make([]*ServerClients, len(servers)) - imOk := true - - for i := range sc { - response, err := fourLetterWord(servers[i], "cons", timeout) - - if err != nil { - sc[i] = &ServerClients{Error: err} - imOk = false - continue - } - - scan := bufio.NewScanner(bytes.NewReader(response)) - - var clients []*ServerClient - - for scan.Scan() { - line := scan.Bytes() - - if len(line) == 0 { - continue - } - - m := re.FindAllStringSubmatch(string(line), -1) - - if m == nil { - err := fmt.Errorf("unable to parse fields from zookeeper response (no regex matches)") - sc[i] = &ServerClients{Error: err} - imOk = false - continue - } - - match := m[0][1:] - - queued, _ := strconv.ParseInt(match[1], 0, 64) - recvd, _ := strconv.ParseInt(match[2], 0, 64) - sent, _ := strconv.ParseInt(match[3], 0, 64) - sid, _ := strconv.ParseInt(match[4], 0, 64) - est, _ := strconv.ParseInt(match[6], 0, 64) - timeout, _ := strconv.ParseInt(match[7], 0, 32) - lresp, _ := strconv.ParseInt(match[10], 0, 64) - llat, _ := strconv.ParseInt(match[11], 0, 32) - minlat, _ := strconv.ParseInt(match[12], 0, 32) - avglat, _ := strconv.ParseInt(match[13], 0, 32) - maxlat, _ := strconv.ParseInt(match[14], 0, 32) - - // zookeeper returns a value, '0xffffffffffffffff', as the - // Lzxid for PING requests in the 'cons' output. - // unfortunately, in Go that is an invalid int64 and is not represented - // as -1. - // However, converting the string value to a big.Int and then back to - // and int64 properly sets the value to -1 - lzxid, ok := new(big.Int).SetString(match[9], 0) - - var errVal error - - if !ok { - errVal = fmt.Errorf("failed to convert lzxid value to big.Int") - imOk = false - } - - lcxid, ok := new(big.Int).SetString(match[8], 0) - - if !ok && errVal == nil { - errVal = fmt.Errorf("failed to convert lcxid value to big.Int") - imOk = false - } - - clients = append(clients, &ServerClient{ - Queued: queued, - Received: recvd, - Sent: sent, - SessionID: sid, - Lcxid: lcxid.Int64(), - Lzxid: lzxid.Int64(), - Timeout: int32(timeout), - LastLatency: int32(llat), - MinLatency: int32(minlat), - AvgLatency: int32(avglat), - MaxLatency: int32(maxlat), - Established: time.Unix(est, 0), - LastResponse: time.Unix(lresp, 0), - Addr: match[0], - LastOperation: match[5], - Error: errVal, - }) - } - - sc[i] = &ServerClients{Clients: clients} - } - - return sc, imOk -} - -func fourLetterWord(server, command string, timeout time.Duration) ([]byte, error) { - conn, err := net.DialTimeout("tcp", server, timeout) - - if err != nil { - return nil, err - } - - // the zookeeper server should automatically close this socket - // once the command has been processed, but better safe than sorry - defer conn.Close() - - conn.SetWriteDeadline(time.Now().Add(timeout)) - - _, err = conn.Write([]byte(command)) - - if err != nil { - return nil, err - } - - conn.SetReadDeadline(time.Now().Add(timeout)) - - resp, err := ioutil.ReadAll(conn) - - if err != nil { - return nil, err - } - - return resp, nil -} diff --git a/Godeps/_workspace/src/github.com/samuel/go-zookeeper/zk/flw_test.go b/Godeps/_workspace/src/github.com/samuel/go-zookeeper/zk/flw_test.go deleted file mode 100644 index 63907268d9..0000000000 --- a/Godeps/_workspace/src/github.com/samuel/go-zookeeper/zk/flw_test.go +++ /dev/null @@ -1,367 +0,0 @@ -package zk - -import ( - "net" - "testing" - "time" -) - -var ( - zkSrvrOut = `Zookeeper version: 3.4.6-1569965, built on 02/20/2014 09:09 GMT -Latency min/avg/max: 0/1/10 -Received: 4207 -Sent: 4220 -Connections: 81 -Outstanding: 1 -Zxid: 0x110a7a8f37 -Mode: leader -Node count: 306 -` - zkConsOut = ` /10.42.45.231:45361[1](queued=0,recved=9435,sent=9457,sid=0x94c2989e04716b5,lop=PING,est=1427238717217,to=20001,lcxid=0x55120915,lzxid=0xffffffffffffffff,lresp=1427259255908,llat=0,minlat=0,avglat=1,maxlat=17) - /10.55.33.98:34342[1](queued=0,recved=9338,sent=9350,sid=0x94c2989e0471731,lop=PING,est=1427238849319,to=20001,lcxid=0x55120944,lzxid=0xffffffffffffffff,lresp=1427259252294,llat=0,minlat=0,avglat=1,maxlat=18) - /10.44.145.114:46556[1](queued=0,recved=109253,sent=109617,sid=0x94c2989e0471709,lop=DELE,est=1427238791305,to=20001,lcxid=0x55139618,lzxid=0x110a7b187d,lresp=1427259257423,llat=2,minlat=0,avglat=1,maxlat=23) - -` -) - -func TestFLWRuok(t *testing.T) { - l, err := net.Listen("tcp", "127.0.0.1:2181") - - if err != nil { - t.Fatalf(err.Error()) - } - - go tcpServer(l, "") - - var oks []bool - var ok bool - - oks = FLWRuok([]string{"127.0.0.1"}, time.Second*10) - - // close the connection, and pause shortly - // to cheat around a race condition - l.Close() - time.Sleep(time.Millisecond * 1) - - if len(oks) == 0 { - t.Errorf("no values returned") - } - - ok = oks[0] - - if !ok { - t.Errorf("instance should be marked as OK") - } - - // - // Confirm that it also returns false for dead instances - // - l, err = net.Listen("tcp", "127.0.0.1:2181") - - if err != nil { - t.Fatalf(err.Error()) - } - - defer l.Close() - - go tcpServer(l, "dead") - - oks = FLWRuok([]string{"127.0.0.1"}, time.Second*10) - - if len(oks) == 0 { - t.Errorf("no values returned") - } - - ok = oks[0] - - if ok { - t.Errorf("instance should be marked as not OK") - } -} - -func TestFLWSrvr(t *testing.T) { - l, err := net.Listen("tcp", "127.0.0.1:2181") - - if err != nil { - t.Fatalf(err.Error()) - } - - defer l.Close() - - go tcpServer(l, "") - - var statsSlice []*ServerStats - var stats *ServerStats - var ok bool - - statsSlice, ok = FLWSrvr([]string{"127.0.0.1:2181"}, time.Second*10) - - if !ok { - t.Errorf("failure indicated on 'srvr' parsing") - } - - if len(statsSlice) == 0 { - t.Errorf("no *ServerStats instances returned") - } - - stats = statsSlice[0] - - if stats.Error != nil { - t.Fatalf("error seen in stats: %v", err.Error()) - } - - if stats.Sent != 4220 { - t.Errorf("Sent != 4220") - } - - if stats.Received != 4207 { - t.Errorf("Received != 4207") - } - - if stats.NodeCount != 306 { - t.Errorf("NodeCount != 306") - } - - if stats.MinLatency != 0 { - t.Errorf("MinLatency != 0") - } - - if stats.AvgLatency != 1 { - t.Errorf("AvgLatency != 1") - } - - if stats.MaxLatency != 10 { - t.Errorf("MaxLatency != 10") - } - - if stats.Connections != 81 { - t.Errorf("Connection != 81") - } - - if stats.Outstanding != 1 { - t.Errorf("Outstanding != 1") - } - - if stats.Epoch != 17 { - t.Errorf("Epoch != 17") - } - - if stats.Counter != 175804215 { - t.Errorf("Counter != 175804215") - } - - if stats.Mode != ModeLeader { - t.Errorf("Mode != ModeLeader") - } - - if stats.Version != "3.4.6-1569965" { - t.Errorf("Version expected: 3.4.6-1569965") - } - - buildTime, err := time.Parse("01/02/2006 15:04 MST", "02/20/2014 09:09 GMT") - - if !stats.BuildTime.Equal(buildTime) { - - } -} - -func TestFLWCons(t *testing.T) { - l, err := net.Listen("tcp", "127.0.0.1:2181") - - if err != nil { - t.Fatalf(err.Error()) - } - - defer l.Close() - - go tcpServer(l, "") - - var clients []*ServerClients - var ok bool - - clients, ok = FLWCons([]string{"127.0.0.1"}, time.Second*10) - - if !ok { - t.Errorf("failure indicated on 'cons' parsing") - } - - if len(clients) == 0 { - t.Errorf("no *ServerClients instances returned") - } - - results := []*ServerClient{ - &ServerClient{ - Queued: 0, - Received: 9435, - Sent: 9457, - SessionID: 669956116721374901, - LastOperation: "PING", - Established: time.Unix(1427238717217, 0), - Timeout: 20001, - Lcxid: 1427245333, - Lzxid: -1, - LastResponse: time.Unix(1427259255908, 0), - LastLatency: 0, - MinLatency: 0, - AvgLatency: 1, - MaxLatency: 17, - Addr: "10.42.45.231:45361", - }, - &ServerClient{ - Queued: 0, - Received: 9338, - Sent: 9350, - SessionID: 669956116721375025, - LastOperation: "PING", - Established: time.Unix(1427238849319, 0), - Timeout: 20001, - Lcxid: 1427245380, - Lzxid: -1, - LastResponse: time.Unix(1427259252294, 0), - LastLatency: 0, - MinLatency: 0, - AvgLatency: 1, - MaxLatency: 18, - Addr: "10.55.33.98:34342", - }, - &ServerClient{ - Queued: 0, - Received: 109253, - Sent: 109617, - SessionID: 669956116721374985, - LastOperation: "DELE", - Established: time.Unix(1427238791305, 0), - Timeout: 20001, - Lcxid: 1427346968, - Lzxid: 73190283389, - LastResponse: time.Unix(1427259257423, 0), - LastLatency: 2, - MinLatency: 0, - AvgLatency: 1, - MaxLatency: 23, - Addr: "10.44.145.114:46556", - }, - } - - for _, z := range clients { - if z.Error != nil { - t.Errorf("error seen: %v", err.Error()) - } - - for i, v := range z.Clients { - c := results[i] - - if v.Error != nil { - t.Errorf("client error seen: %v", err.Error()) - } - - if v.Queued != c.Queued { - t.Errorf("Queued value mismatch (%d/%d)", v.Queued, c.Queued) - } - - if v.Received != c.Received { - t.Errorf("Received value mismatch (%d/%d)", v.Received, c.Received) - } - - if v.Sent != c.Sent { - t.Errorf("Sent value mismatch (%d/%d)", v.Sent, c.Sent) - } - - if v.SessionID != c.SessionID { - t.Errorf("SessionID value mismatch (%d/%d)", v.SessionID, c.SessionID) - } - - if v.LastOperation != c.LastOperation { - t.Errorf("LastOperation value mismatch ('%v'/'%v')", v.LastOperation, c.LastOperation) - } - - if v.Timeout != c.Timeout { - t.Errorf("Timeout value mismatch (%d/%d)", v.Timeout, c.Timeout) - } - - if v.Lcxid != c.Lcxid { - t.Errorf("Lcxid value mismatch (%d/%d)", v.Lcxid, c.Lcxid) - } - - if v.Lzxid != c.Lzxid { - t.Errorf("Lzxid value mismatch (%d/%d)", v.Lzxid, c.Lzxid) - } - - if v.LastLatency != c.LastLatency { - t.Errorf("LastLatency value mismatch (%d/%d)", v.LastLatency, c.LastLatency) - } - - if v.MinLatency != c.MinLatency { - t.Errorf("MinLatency value mismatch (%d/%d)", v.MinLatency, c.MinLatency) - } - - if v.AvgLatency != c.AvgLatency { - t.Errorf("AvgLatency value mismatch (%d/%d)", v.AvgLatency, c.AvgLatency) - } - - if v.MaxLatency != c.MaxLatency { - t.Errorf("MaxLatency value mismatch (%d/%d)", v.MaxLatency, c.MaxLatency) - } - - if v.Addr != c.Addr { - t.Errorf("Addr value mismatch ('%v'/'%v')", v.Addr, c.Addr) - } - - if !c.Established.Equal(v.Established) { - t.Errorf("Established value mismatch (%v/%v)", c.Established, v.Established) - } - - if !c.LastResponse.Equal(v.LastResponse) { - t.Errorf("Established value mismatch (%v/%v)", c.LastResponse, v.LastResponse) - } - } - } -} - -func tcpServer(listener net.Listener, thing string) { - for { - conn, err := listener.Accept() - if err != nil { - return - } - go connHandler(conn, thing) - } -} - -func connHandler(conn net.Conn, thing string) { - defer conn.Close() - - data := make([]byte, 4) - - _, err := conn.Read(data) - - if err != nil { - return - } - - switch string(data) { - case "ruok": - switch thing { - case "dead": - return - default: - conn.Write([]byte("imok")) - } - case "srvr": - switch thing { - case "dead": - return - default: - conn.Write([]byte(zkSrvrOut)) - } - case "cons": - switch thing { - case "dead": - return - default: - conn.Write([]byte(zkConsOut)) - } - default: - conn.Write([]byte("This ZooKeeper instance is not currently serving requests.")) - } -} diff --git a/Godeps/_workspace/src/github.com/samuel/go-zookeeper/zk/lock.go b/Godeps/_workspace/src/github.com/samuel/go-zookeeper/zk/lock.go deleted file mode 100644 index f13a8b0ba6..0000000000 --- a/Godeps/_workspace/src/github.com/samuel/go-zookeeper/zk/lock.go +++ /dev/null @@ -1,142 +0,0 @@ -package zk - -import ( - "errors" - "fmt" - "strconv" - "strings" -) - -var ( - // ErrDeadlock is returned by Lock when trying to lock twice without unlocking first - ErrDeadlock = errors.New("zk: trying to acquire a lock twice") - // ErrNotLocked is returned by Unlock when trying to release a lock that has not first be acquired. - ErrNotLocked = errors.New("zk: not locked") -) - -// Lock is a mutual exclusion lock. -type Lock struct { - c *Conn - path string - acl []ACL - lockPath string - seq int -} - -// NewLock creates a new lock instance using the provided connection, path, and acl. -// The path must be a node that is only used by this lock. A lock instances starts -// unlocked until Lock() is called. -func NewLock(c *Conn, path string, acl []ACL) *Lock { - return &Lock{ - c: c, - path: path, - acl: acl, - } -} - -func parseSeq(path string) (int, error) { - parts := strings.Split(path, "-") - return strconv.Atoi(parts[len(parts)-1]) -} - -// Lock attempts to acquire the lock. It will wait to return until the lock -// is acquired or an error occurs. If this instance already has the lock -// then ErrDeadlock is returned. -func (l *Lock) Lock() error { - if l.lockPath != "" { - return ErrDeadlock - } - - prefix := fmt.Sprintf("%s/lock-", l.path) - - path := "" - var err error - for i := 0; i < 3; i++ { - path, err = l.c.CreateProtectedEphemeralSequential(prefix, []byte{}, l.acl) - if err == ErrNoNode { - // Create parent node. - parts := strings.Split(l.path, "/") - pth := "" - for _, p := range parts[1:] { - pth += "/" + p - _, err := l.c.Create(pth, []byte{}, 0, l.acl) - if err != nil && err != ErrNodeExists { - return err - } - } - } else if err == nil { - break - } else { - return err - } - } - if err != nil { - return err - } - - seq, err := parseSeq(path) - if err != nil { - return err - } - - for { - children, _, err := l.c.Children(l.path) - if err != nil { - return err - } - - lowestSeq := seq - prevSeq := 0 - prevSeqPath := "" - for _, p := range children { - s, err := parseSeq(p) - if err != nil { - return err - } - if s < lowestSeq { - lowestSeq = s - } - if s < seq && s > prevSeq { - prevSeq = s - prevSeqPath = p - } - } - - if seq == lowestSeq { - // Acquired the lock - break - } - - // Wait on the node next in line for the lock - _, _, ch, err := l.c.GetW(l.path + "/" + prevSeqPath) - if err != nil && err != ErrNoNode { - return err - } else if err != nil && err == ErrNoNode { - // try again - continue - } - - ev := <-ch - if ev.Err != nil { - return ev.Err - } - } - - l.seq = seq - l.lockPath = path - return nil -} - -// Unlock releases an acquired lock. If the lock is not currently acquired by -// this Lock instance than ErrNotLocked is returned. -func (l *Lock) Unlock() error { - if l.lockPath == "" { - return ErrNotLocked - } - if err := l.c.Delete(l.lockPath, -1); err != nil { - return err - } - l.lockPath = "" - l.seq = 0 - return nil -} diff --git a/Godeps/_workspace/src/github.com/samuel/go-zookeeper/zk/lock_test.go b/Godeps/_workspace/src/github.com/samuel/go-zookeeper/zk/lock_test.go deleted file mode 100644 index 8a3478a336..0000000000 --- a/Godeps/_workspace/src/github.com/samuel/go-zookeeper/zk/lock_test.go +++ /dev/null @@ -1,94 +0,0 @@ -package zk - -import ( - "testing" - "time" -) - -func TestLock(t *testing.T) { - ts, err := StartTestCluster(1, nil, logWriter{t: t, p: "[ZKERR] "}) - if err != nil { - t.Fatal(err) - } - defer ts.Stop() - zk, _, err := ts.ConnectAll() - if err != nil { - t.Fatalf("Connect returned error: %+v", err) - } - defer zk.Close() - - acls := WorldACL(PermAll) - - l := NewLock(zk, "/test", acls) - if err := l.Lock(); err != nil { - t.Fatal(err) - } - if err := l.Unlock(); err != nil { - t.Fatal(err) - } - - val := make(chan int, 3) - - if err := l.Lock(); err != nil { - t.Fatal(err) - } - - l2 := NewLock(zk, "/test", acls) - go func() { - if err := l2.Lock(); err != nil { - t.Fatal(err) - } - val <- 2 - if err := l2.Unlock(); err != nil { - t.Fatal(err) - } - val <- 3 - }() - time.Sleep(time.Millisecond * 100) - - val <- 1 - if err := l.Unlock(); err != nil { - t.Fatal(err) - } - if x := <-val; x != 1 { - t.Fatalf("Expected 1 instead of %d", x) - } - if x := <-val; x != 2 { - t.Fatalf("Expected 2 instead of %d", x) - } - if x := <-val; x != 3 { - t.Fatalf("Expected 3 instead of %d", x) - } -} - -// This tests creating a lock with a path that's more than 1 node deep (e.g. "/test-multi-level/lock"), -// when a part of that path already exists (i.e. "/test-multi-level" node already exists). -func TestMultiLevelLock(t *testing.T) { - ts, err := StartTestCluster(1, nil, logWriter{t: t, p: "[ZKERR] "}) - if err != nil { - t.Fatal(err) - } - defer ts.Stop() - zk, _, err := ts.ConnectAll() - if err != nil { - t.Fatalf("Connect returned error: %+v", err) - } - defer zk.Close() - - acls := WorldACL(PermAll) - path := "/test-multi-level" - if p, err := zk.Create(path, []byte{1, 2, 3, 4}, 0, WorldACL(PermAll)); err != nil { - t.Fatalf("Create returned error: %+v", err) - } else if p != path { - t.Fatalf("Create returned different path '%s' != '%s'", p, path) - } - l := NewLock(zk, "/test-multi-level/lock", acls) - defer zk.Delete("/test-multi-level", -1) // Clean up what we've created for this test - defer zk.Delete("/test-multi-level/lock", -1) - if err := l.Lock(); err != nil { - t.Fatal(err) - } - if err := l.Unlock(); err != nil { - t.Fatal(err) - } -} diff --git a/Godeps/_workspace/src/github.com/samuel/go-zookeeper/zk/server_help.go b/Godeps/_workspace/src/github.com/samuel/go-zookeeper/zk/server_help.go deleted file mode 100644 index 4a53772bde..0000000000 --- a/Godeps/_workspace/src/github.com/samuel/go-zookeeper/zk/server_help.go +++ /dev/null @@ -1,119 +0,0 @@ -package zk - -import ( - "fmt" - "io" - "io/ioutil" - "math/rand" - "os" - "path/filepath" - "time" -) - -type TestServer struct { - Port int - Path string - Srv *Server -} - -type TestCluster struct { - Path string - Servers []TestServer -} - -func StartTestCluster(size int, stdout, stderr io.Writer) (*TestCluster, error) { - tmpPath, err := ioutil.TempDir("", "gozk") - if err != nil { - return nil, err - } - success := false - startPort := int(rand.Int31n(6000) + 10000) - cluster := &TestCluster{Path: tmpPath} - defer func() { - if !success { - cluster.Stop() - } - }() - for serverN := 0; serverN < size; serverN++ { - srvPath := filepath.Join(tmpPath, fmt.Sprintf("srv%d", serverN)) - if err := os.Mkdir(srvPath, 0700); err != nil { - return nil, err - } - port := startPort + serverN*3 - cfg := ServerConfig{ - ClientPort: port, - DataDir: srvPath, - } - for i := 0; i < size; i++ { - cfg.Servers = append(cfg.Servers, ServerConfigServer{ - ID: i + 1, - Host: "127.0.0.1", - PeerPort: startPort + i*3 + 1, - LeaderElectionPort: startPort + i*3 + 2, - }) - } - cfgPath := filepath.Join(srvPath, "zoo.cfg") - fi, err := os.Create(cfgPath) - if err != nil { - return nil, err - } - err = cfg.Marshall(fi) - fi.Close() - if err != nil { - return nil, err - } - - fi, err = os.Create(filepath.Join(srvPath, "myid")) - if err != nil { - return nil, err - } - _, err = fmt.Fprintf(fi, "%d\n", serverN+1) - fi.Close() - if err != nil { - return nil, err - } - - srv := &Server{ - ConfigPath: cfgPath, - Stdout: stdout, - Stderr: stderr, - } - if err := srv.Start(); err != nil { - return nil, err - } - cluster.Servers = append(cluster.Servers, TestServer{ - Path: srvPath, - Port: cfg.ClientPort, - Srv: srv, - }) - } - success = true - time.Sleep(time.Second) // Give the server time to become active. Should probably actually attempt to connect to verify. - return cluster, nil -} - -func (ts *TestCluster) Connect(idx int) (*Conn, error) { - zk, _, err := Connect([]string{fmt.Sprintf("127.0.0.1:%d", ts.Servers[idx].Port)}, time.Second*15) - return zk, err -} - -func (ts *TestCluster) ConnectAll() (*Conn, <-chan Event, error) { - return ts.ConnectAllTimeout(time.Second * 15) -} - -func (ts *TestCluster) ConnectAllTimeout(sessionTimeout time.Duration) (*Conn, <-chan Event, error) { - hosts := make([]string, len(ts.Servers)) - for i, srv := range ts.Servers { - hosts[i] = fmt.Sprintf("127.0.0.1:%d", srv.Port) - } - zk, ch, err := Connect(hosts, sessionTimeout) - return zk, ch, err -} - -func (ts *TestCluster) Stop() error { - for _, srv := range ts.Servers { - srv.Srv.Stop() - } - defer os.RemoveAll(ts.Path) - return nil -} diff --git a/Godeps/_workspace/src/github.com/samuel/go-zookeeper/zk/server_java.go b/Godeps/_workspace/src/github.com/samuel/go-zookeeper/zk/server_java.go deleted file mode 100644 index e553ec1d9f..0000000000 --- a/Godeps/_workspace/src/github.com/samuel/go-zookeeper/zk/server_java.go +++ /dev/null @@ -1,136 +0,0 @@ -package zk - -import ( - "fmt" - "io" - "os" - "os/exec" - "path/filepath" -) - -type ErrMissingServerConfigField string - -func (e ErrMissingServerConfigField) Error() string { - return fmt.Sprintf("zk: missing server config field '%s'", string(e)) -} - -const ( - DefaultServerTickTime = 2000 - DefaultServerInitLimit = 10 - DefaultServerSyncLimit = 5 - DefaultServerAutoPurgeSnapRetainCount = 3 - DefaultPeerPort = 2888 - DefaultLeaderElectionPort = 3888 -) - -type ServerConfigServer struct { - ID int - Host string - PeerPort int - LeaderElectionPort int -} - -type ServerConfig struct { - TickTime int // Number of milliseconds of each tick - InitLimit int // Number of ticks that the initial synchronization phase can take - SyncLimit int // Number of ticks that can pass between sending a request and getting an acknowledgement - DataDir string // Direcrory where the snapshot is stored - ClientPort int // Port at which clients will connect - AutoPurgeSnapRetainCount int // Number of snapshots to retain in dataDir - AutoPurgePurgeInterval int // Purge task internal in hours (0 to disable auto purge) - Servers []ServerConfigServer -} - -func (sc ServerConfig) Marshall(w io.Writer) error { - if sc.DataDir == "" { - return ErrMissingServerConfigField("dataDir") - } - fmt.Fprintf(w, "dataDir=%s\n", sc.DataDir) - if sc.TickTime <= 0 { - sc.TickTime = DefaultServerTickTime - } - fmt.Fprintf(w, "tickTime=%d\n", sc.TickTime) - if sc.InitLimit <= 0 { - sc.InitLimit = DefaultServerInitLimit - } - fmt.Fprintf(w, "initLimit=%d\n", sc.InitLimit) - if sc.SyncLimit <= 0 { - sc.SyncLimit = DefaultServerSyncLimit - } - fmt.Fprintf(w, "syncLimit=%d\n", sc.SyncLimit) - if sc.ClientPort <= 0 { - sc.ClientPort = DefaultPort - } - fmt.Fprintf(w, "clientPort=%d\n", sc.ClientPort) - if sc.AutoPurgePurgeInterval > 0 { - if sc.AutoPurgeSnapRetainCount <= 0 { - sc.AutoPurgeSnapRetainCount = DefaultServerAutoPurgeSnapRetainCount - } - fmt.Fprintf(w, "autopurge.snapRetainCount=%d\n", sc.AutoPurgeSnapRetainCount) - fmt.Fprintf(w, "autopurge.purgeInterval=%d\n", sc.AutoPurgePurgeInterval) - } - if len(sc.Servers) > 0 { - for _, srv := range sc.Servers { - if srv.PeerPort <= 0 { - srv.PeerPort = DefaultPeerPort - } - if srv.LeaderElectionPort <= 0 { - srv.LeaderElectionPort = DefaultLeaderElectionPort - } - fmt.Fprintf(w, "server.%d=%s:%d:%d\n", srv.ID, srv.Host, srv.PeerPort, srv.LeaderElectionPort) - } - } - return nil -} - -var jarSearchPaths = []string{ - "zookeeper-*/contrib/fatjar/zookeeper-*-fatjar.jar", - "../zookeeper-*/contrib/fatjar/zookeeper-*-fatjar.jar", - "/usr/share/java/zookeeper-*.jar", - "/usr/local/zookeeper-*/contrib/fatjar/zookeeper-*-fatjar.jar", - "/usr/local/Cellar/zookeeper/*/libexec/contrib/fatjar/zookeeper-*-fatjar.jar", -} - -func findZookeeperFatJar() string { - var paths []string - zkPath := os.Getenv("ZOOKEEPER_PATH") - if zkPath == "" { - paths = jarSearchPaths - } else { - paths = []string{filepath.Join(zkPath, "contrib/fatjar/zookeeper-*-fatjar.jar")} - } - for _, path := range paths { - matches, _ := filepath.Glob(path) - // TODO: could sort by version and pick latest - if len(matches) > 0 { - return matches[0] - } - } - return "" -} - -type Server struct { - JarPath string - ConfigPath string - Stdout, Stderr io.Writer - - cmd *exec.Cmd -} - -func (srv *Server) Start() error { - if srv.JarPath == "" { - srv.JarPath = findZookeeperFatJar() - if srv.JarPath == "" { - return fmt.Errorf("zk: unable to find server jar") - } - } - srv.cmd = exec.Command("java", "-jar", srv.JarPath, "server", srv.ConfigPath) - srv.cmd.Stdout = srv.Stdout - srv.cmd.Stderr = srv.Stderr - return srv.cmd.Start() -} - -func (srv *Server) Stop() error { - srv.cmd.Process.Signal(os.Kill) - return srv.cmd.Wait() -} diff --git a/Godeps/_workspace/src/github.com/samuel/go-zookeeper/zk/structs.go b/Godeps/_workspace/src/github.com/samuel/go-zookeeper/zk/structs.go deleted file mode 100644 index 8fbc069ee1..0000000000 --- a/Godeps/_workspace/src/github.com/samuel/go-zookeeper/zk/structs.go +++ /dev/null @@ -1,640 +0,0 @@ -package zk - -import ( - "encoding/binary" - "errors" - "log" - "reflect" - "runtime" - "time" -) - -var ( - ErrUnhandledFieldType = errors.New("zk: unhandled field type") - ErrPtrExpected = errors.New("zk: encode/decode expect a non-nil pointer to struct") - ErrShortBuffer = errors.New("zk: buffer too small") -) - -type defaultLogger struct{} - -func (defaultLogger) Printf(format string, a ...interface{}) { - log.Printf(format, a...) -} - -type ACL struct { - Perms int32 - Scheme string - ID string -} - -type Stat struct { - Czxid int64 // The zxid of the change that caused this znode to be created. - Mzxid int64 // The zxid of the change that last modified this znode. - Ctime int64 // The time in milliseconds from epoch when this znode was created. - Mtime int64 // The time in milliseconds from epoch when this znode was last modified. - Version int32 // The number of changes to the data of this znode. - Cversion int32 // The number of changes to the children of this znode. - Aversion int32 // The number of changes to the ACL of this znode. - EphemeralOwner int64 // The session id of the owner of this znode if the znode is an ephemeral node. If it is not an ephemeral node, it will be zero. - DataLength int32 // The length of the data field of this znode. - NumChildren int32 // The number of children of this znode. - Pzxid int64 // last modified children -} - -// ServerClient is the information for a single Zookeeper client and its session. -// This is used to parse/extract the output fo the `cons` command. -type ServerClient struct { - Queued int64 - Received int64 - Sent int64 - SessionID int64 - Lcxid int64 - Lzxid int64 - Timeout int32 - LastLatency int32 - MinLatency int32 - AvgLatency int32 - MaxLatency int32 - Established time.Time - LastResponse time.Time - Addr string - LastOperation string // maybe? - Error error -} - -// ServerClients is a struct for the FLWCons() function. It's used to provide -// the list of Clients. -// -// This is needed because FLWCons() takes multiple servers. -type ServerClients struct { - Clients []*ServerClient - Error error -} - -// ServerStats is the information pulled from the Zookeeper `stat` command. -type ServerStats struct { - Sent int64 - Received int64 - NodeCount int64 - MinLatency int64 - AvgLatency int64 - MaxLatency int64 - Connections int64 - Outstanding int64 - Epoch int32 - Counter int32 - BuildTime time.Time - Mode Mode - Version string - Error error -} - -type requestHeader struct { - Xid int32 - Opcode int32 -} - -type responseHeader struct { - Xid int32 - Zxid int64 - Err ErrCode -} - -type multiHeader struct { - Type int32 - Done bool - Err ErrCode -} - -type auth struct { - Type int32 - Scheme string - Auth []byte -} - -// Generic request structs - -type pathRequest struct { - Path string -} - -type PathVersionRequest struct { - Path string - Version int32 -} - -type pathWatchRequest struct { - Path string - Watch bool -} - -type pathResponse struct { - Path string -} - -type statResponse struct { - Stat Stat -} - -// - -type CheckVersionRequest PathVersionRequest -type closeRequest struct{} -type closeResponse struct{} - -type connectRequest struct { - ProtocolVersion int32 - LastZxidSeen int64 - TimeOut int32 - SessionID int64 - Passwd []byte -} - -type connectResponse struct { - ProtocolVersion int32 - TimeOut int32 - SessionID int64 - Passwd []byte -} - -type CreateRequest struct { - Path string - Data []byte - Acl []ACL - Flags int32 -} - -type createResponse pathResponse -type DeleteRequest PathVersionRequest -type deleteResponse struct{} - -type errorResponse struct { - Err int32 -} - -type existsRequest pathWatchRequest -type existsResponse statResponse -type getAclRequest pathRequest - -type getAclResponse struct { - Acl []ACL - Stat Stat -} - -type getChildrenRequest pathRequest - -type getChildrenResponse struct { - Children []string -} - -type getChildren2Request pathWatchRequest - -type getChildren2Response struct { - Children []string - Stat Stat -} - -type getDataRequest pathWatchRequest - -type getDataResponse struct { - Data []byte - Stat Stat -} - -type getMaxChildrenRequest pathRequest - -type getMaxChildrenResponse struct { - Max int32 -} - -type getSaslRequest struct { - Token []byte -} - -type pingRequest struct{} -type pingResponse struct{} - -type setAclRequest struct { - Path string - Acl []ACL - Version int32 -} - -type setAclResponse statResponse - -type SetDataRequest struct { - Path string - Data []byte - Version int32 -} - -type setDataResponse statResponse - -type setMaxChildren struct { - Path string - Max int32 -} - -type setSaslRequest struct { - Token string -} - -type setSaslResponse struct { - Token string -} - -type setWatchesRequest struct { - RelativeZxid int64 - DataWatches []string - ExistWatches []string - ChildWatches []string -} - -type setWatchesResponse struct{} - -type syncRequest pathRequest -type syncResponse pathResponse - -type setAuthRequest auth -type setAuthResponse struct{} - -type multiRequestOp struct { - Header multiHeader - Op interface{} -} -type multiRequest struct { - Ops []multiRequestOp - DoneHeader multiHeader -} -type multiResponseOp struct { - Header multiHeader - String string - Stat *Stat -} -type multiResponse struct { - Ops []multiResponseOp - DoneHeader multiHeader -} - -func (r *multiRequest) Encode(buf []byte) (int, error) { - total := 0 - for _, op := range r.Ops { - op.Header.Done = false - n, err := encodePacketValue(buf[total:], reflect.ValueOf(op)) - if err != nil { - return total, err - } - total += n - } - r.DoneHeader.Done = true - n, err := encodePacketValue(buf[total:], reflect.ValueOf(r.DoneHeader)) - if err != nil { - return total, err - } - total += n - - return total, nil -} - -func (r *multiRequest) Decode(buf []byte) (int, error) { - r.Ops = make([]multiRequestOp, 0) - r.DoneHeader = multiHeader{-1, true, -1} - total := 0 - for { - header := &multiHeader{} - n, err := decodePacketValue(buf[total:], reflect.ValueOf(header)) - if err != nil { - return total, err - } - total += n - if header.Done { - r.DoneHeader = *header - break - } - - req := requestStructForOp(header.Type) - if req == nil { - return total, ErrAPIError - } - n, err = decodePacketValue(buf[total:], reflect.ValueOf(req)) - if err != nil { - return total, err - } - total += n - r.Ops = append(r.Ops, multiRequestOp{*header, req}) - } - return total, nil -} - -func (r *multiResponse) Decode(buf []byte) (int, error) { - r.Ops = make([]multiResponseOp, 0) - r.DoneHeader = multiHeader{-1, true, -1} - total := 0 - for { - header := &multiHeader{} - n, err := decodePacketValue(buf[total:], reflect.ValueOf(header)) - if err != nil { - return total, err - } - total += n - if header.Done { - r.DoneHeader = *header - break - } - - res := multiResponseOp{Header: *header} - var w reflect.Value - switch header.Type { - default: - return total, ErrAPIError - case opCreate: - w = reflect.ValueOf(&res.String) - case opSetData: - res.Stat = new(Stat) - w = reflect.ValueOf(res.Stat) - case opCheck, opDelete: - } - if w.IsValid() { - n, err := decodePacketValue(buf[total:], w) - if err != nil { - return total, err - } - total += n - } - r.Ops = append(r.Ops, res) - } - return total, nil -} - -type watcherEvent struct { - Type EventType - State State - Path string -} - -type decoder interface { - Decode(buf []byte) (int, error) -} - -type encoder interface { - Encode(buf []byte) (int, error) -} - -func decodePacket(buf []byte, st interface{}) (n int, err error) { - defer func() { - if r := recover(); r != nil { - if e, ok := r.(runtime.Error); ok && e.Error() == "runtime error: slice bounds out of range" { - err = ErrShortBuffer - } else { - panic(r) - } - } - }() - - v := reflect.ValueOf(st) - if v.Kind() != reflect.Ptr || v.IsNil() { - return 0, ErrPtrExpected - } - return decodePacketValue(buf, v) -} - -func decodePacketValue(buf []byte, v reflect.Value) (int, error) { - rv := v - kind := v.Kind() - if kind == reflect.Ptr { - if v.IsNil() { - v.Set(reflect.New(v.Type().Elem())) - } - v = v.Elem() - kind = v.Kind() - } - - n := 0 - switch kind { - default: - return n, ErrUnhandledFieldType - case reflect.Struct: - if de, ok := rv.Interface().(decoder); ok { - return de.Decode(buf) - } else if de, ok := v.Interface().(decoder); ok { - return de.Decode(buf) - } else { - for i := 0; i < v.NumField(); i++ { - field := v.Field(i) - n2, err := decodePacketValue(buf[n:], field) - n += n2 - if err != nil { - return n, err - } - } - } - case reflect.Bool: - v.SetBool(buf[n] != 0) - n++ - case reflect.Int32: - v.SetInt(int64(binary.BigEndian.Uint32(buf[n : n+4]))) - n += 4 - case reflect.Int64: - v.SetInt(int64(binary.BigEndian.Uint64(buf[n : n+8]))) - n += 8 - case reflect.String: - ln := int(binary.BigEndian.Uint32(buf[n : n+4])) - v.SetString(string(buf[n+4 : n+4+ln])) - n += 4 + ln - case reflect.Slice: - switch v.Type().Elem().Kind() { - default: - count := int(binary.BigEndian.Uint32(buf[n : n+4])) - n += 4 - values := reflect.MakeSlice(v.Type(), count, count) - v.Set(values) - for i := 0; i < count; i++ { - n2, err := decodePacketValue(buf[n:], values.Index(i)) - n += n2 - if err != nil { - return n, err - } - } - case reflect.Uint8: - ln := int(int32(binary.BigEndian.Uint32(buf[n : n+4]))) - if ln < 0 { - n += 4 - v.SetBytes(nil) - } else { - bytes := make([]byte, ln) - copy(bytes, buf[n+4:n+4+ln]) - v.SetBytes(bytes) - n += 4 + ln - } - } - } - return n, nil -} - -func encodePacket(buf []byte, st interface{}) (n int, err error) { - defer func() { - if r := recover(); r != nil { - if e, ok := r.(runtime.Error); ok && e.Error() == "runtime error: slice bounds out of range" { - err = ErrShortBuffer - } else { - panic(r) - } - } - }() - - v := reflect.ValueOf(st) - if v.Kind() != reflect.Ptr || v.IsNil() { - return 0, ErrPtrExpected - } - return encodePacketValue(buf, v) -} - -func encodePacketValue(buf []byte, v reflect.Value) (int, error) { - rv := v - for v.Kind() == reflect.Ptr || v.Kind() == reflect.Interface { - v = v.Elem() - } - - n := 0 - switch v.Kind() { - default: - return n, ErrUnhandledFieldType - case reflect.Struct: - if en, ok := rv.Interface().(encoder); ok { - return en.Encode(buf) - } else if en, ok := v.Interface().(encoder); ok { - return en.Encode(buf) - } else { - for i := 0; i < v.NumField(); i++ { - field := v.Field(i) - n2, err := encodePacketValue(buf[n:], field) - n += n2 - if err != nil { - return n, err - } - } - } - case reflect.Bool: - if v.Bool() { - buf[n] = 1 - } else { - buf[n] = 0 - } - n++ - case reflect.Int32: - binary.BigEndian.PutUint32(buf[n:n+4], uint32(v.Int())) - n += 4 - case reflect.Int64: - binary.BigEndian.PutUint64(buf[n:n+8], uint64(v.Int())) - n += 8 - case reflect.String: - str := v.String() - binary.BigEndian.PutUint32(buf[n:n+4], uint32(len(str))) - copy(buf[n+4:n+4+len(str)], []byte(str)) - n += 4 + len(str) - case reflect.Slice: - switch v.Type().Elem().Kind() { - default: - count := v.Len() - startN := n - n += 4 - for i := 0; i < count; i++ { - n2, err := encodePacketValue(buf[n:], v.Index(i)) - n += n2 - if err != nil { - return n, err - } - } - binary.BigEndian.PutUint32(buf[startN:startN+4], uint32(count)) - case reflect.Uint8: - if v.IsNil() { - binary.BigEndian.PutUint32(buf[n:n+4], uint32(0xffffffff)) - n += 4 - } else { - bytes := v.Bytes() - binary.BigEndian.PutUint32(buf[n:n+4], uint32(len(bytes))) - copy(buf[n+4:n+4+len(bytes)], bytes) - n += 4 + len(bytes) - } - } - } - return n, nil -} - -func requestStructForOp(op int32) interface{} { - switch op { - case opClose: - return &closeRequest{} - case opCreate: - return &CreateRequest{} - case opDelete: - return &DeleteRequest{} - case opExists: - return &existsRequest{} - case opGetAcl: - return &getAclRequest{} - case opGetChildren: - return &getChildrenRequest{} - case opGetChildren2: - return &getChildren2Request{} - case opGetData: - return &getDataRequest{} - case opPing: - return &pingRequest{} - case opSetAcl: - return &setAclRequest{} - case opSetData: - return &SetDataRequest{} - case opSetWatches: - return &setWatchesRequest{} - case opSync: - return &syncRequest{} - case opSetAuth: - return &setAuthRequest{} - case opCheck: - return &CheckVersionRequest{} - case opMulti: - return &multiRequest{} - } - return nil -} - -func responseStructForOp(op int32) interface{} { - switch op { - case opClose: - return &closeResponse{} - case opCreate: - return &createResponse{} - case opDelete: - return &deleteResponse{} - case opExists: - return &existsResponse{} - case opGetAcl: - return &getAclResponse{} - case opGetChildren: - return &getChildrenResponse{} - case opGetChildren2: - return &getChildren2Response{} - case opGetData: - return &getDataResponse{} - case opPing: - return &pingResponse{} - case opSetAcl: - return &setAclResponse{} - case opSetData: - return &setDataResponse{} - case opSetWatches: - return &setWatchesResponse{} - case opSync: - return &syncResponse{} - case opWatcherEvent: - return &watcherEvent{} - case opSetAuth: - return &setAuthResponse{} - // case opCheck: - // return &checkVersionResponse{} - case opMulti: - return &multiResponse{} - } - return nil -} diff --git a/Godeps/_workspace/src/github.com/samuel/go-zookeeper/zk/structs_test.go b/Godeps/_workspace/src/github.com/samuel/go-zookeeper/zk/structs_test.go deleted file mode 100644 index cafbbd95c2..0000000000 --- a/Godeps/_workspace/src/github.com/samuel/go-zookeeper/zk/structs_test.go +++ /dev/null @@ -1,71 +0,0 @@ -package zk - -import ( - "reflect" - "testing" -) - -func TestEncodeDecodePacket(t *testing.T) { - encodeDecodeTest(t, &requestHeader{-2, 5}) - encodeDecodeTest(t, &connectResponse{1, 2, 3, nil}) - encodeDecodeTest(t, &connectResponse{1, 2, 3, []byte{4, 5, 6}}) - encodeDecodeTest(t, &getAclResponse{[]ACL{{12, "s", "anyone"}}, Stat{}}) - encodeDecodeTest(t, &getChildrenResponse{[]string{"foo", "bar"}}) - encodeDecodeTest(t, &pathWatchRequest{"path", true}) - encodeDecodeTest(t, &pathWatchRequest{"path", false}) - encodeDecodeTest(t, &CheckVersionRequest{"/", -1}) - encodeDecodeTest(t, &multiRequest{Ops: []multiRequestOp{{multiHeader{opCheck, false, -1}, &CheckVersionRequest{"/", -1}}}}) -} - -func encodeDecodeTest(t *testing.T, r interface{}) { - buf := make([]byte, 1024) - n, err := encodePacket(buf, r) - if err != nil { - t.Errorf("encodePacket returned non-nil error %+v\n", err) - return - } - t.Logf("%+v %x", r, buf[:n]) - r2 := reflect.New(reflect.ValueOf(r).Elem().Type()).Interface() - n2, err := decodePacket(buf[:n], r2) - if err != nil { - t.Errorf("decodePacket returned non-nil error %+v\n", err) - return - } - if n != n2 { - t.Errorf("sizes don't match: %d != %d", n, n2) - return - } - if !reflect.DeepEqual(r, r2) { - t.Errorf("results don't match: %+v != %+v", r, r2) - return - } -} - -func TestEncodeShortBuffer(t *testing.T) { - buf := make([]byte, 0) - _, err := encodePacket(buf, &requestHeader{1, 2}) - if err != ErrShortBuffer { - t.Errorf("encodePacket should return ErrShortBuffer on a short buffer instead of '%+v'", err) - return - } -} - -func TestDecodeShortBuffer(t *testing.T) { - buf := make([]byte, 0) - _, err := decodePacket(buf, &responseHeader{}) - if err != ErrShortBuffer { - t.Errorf("decodePacket should return ErrShortBuffer on a short buffer instead of '%+v'", err) - return - } -} - -func BenchmarkEncode(b *testing.B) { - buf := make([]byte, 4096) - st := &connectRequest{Passwd: []byte("1234567890")} - b.ReportAllocs() - for i := 0; i < b.N; i++ { - if _, err := encodePacket(buf, st); err != nil { - b.Fatal(err) - } - } -} diff --git a/Godeps/_workspace/src/github.com/samuel/go-zookeeper/zk/tracer.go b/Godeps/_workspace/src/github.com/samuel/go-zookeeper/zk/tracer.go deleted file mode 100644 index 7af2e96bbc..0000000000 --- a/Godeps/_workspace/src/github.com/samuel/go-zookeeper/zk/tracer.go +++ /dev/null @@ -1,148 +0,0 @@ -package zk - -import ( - "encoding/binary" - "fmt" - "io" - "net" - "sync" -) - -var ( - requests = make(map[int32]int32) // Map of Xid -> Opcode - requestsLock = &sync.Mutex{} -) - -func trace(conn1, conn2 net.Conn, client bool) { - defer conn1.Close() - defer conn2.Close() - buf := make([]byte, 10*1024) - init := true - for { - _, err := io.ReadFull(conn1, buf[:4]) - if err != nil { - fmt.Println("1>", client, err) - return - } - - blen := int(binary.BigEndian.Uint32(buf[:4])) - - _, err = io.ReadFull(conn1, buf[4:4+blen]) - if err != nil { - fmt.Println("2>", client, err) - return - } - - var cr interface{} - opcode := int32(-1) - readHeader := true - if client { - if init { - cr = &connectRequest{} - readHeader = false - } else { - xid := int32(binary.BigEndian.Uint32(buf[4:8])) - opcode = int32(binary.BigEndian.Uint32(buf[8:12])) - requestsLock.Lock() - requests[xid] = opcode - requestsLock.Unlock() - cr = requestStructForOp(opcode) - if cr == nil { - fmt.Printf("Unknown opcode %d\n", opcode) - } - } - } else { - if init { - cr = &connectResponse{} - readHeader = false - } else { - xid := int32(binary.BigEndian.Uint32(buf[4:8])) - zxid := int64(binary.BigEndian.Uint64(buf[8:16])) - errnum := int32(binary.BigEndian.Uint32(buf[16:20])) - if xid != -1 || zxid != -1 { - requestsLock.Lock() - found := false - opcode, found = requests[xid] - if !found { - opcode = 0 - } - delete(requests, xid) - requestsLock.Unlock() - } else { - opcode = opWatcherEvent - } - cr = responseStructForOp(opcode) - if cr == nil { - fmt.Printf("Unknown opcode %d\n", opcode) - } - if errnum != 0 { - cr = &struct{}{} - } - } - } - opname := "." - if opcode != -1 { - opname = opNames[opcode] - } - if cr == nil { - fmt.Printf("%+v %s %+v\n", client, opname, buf[4:4+blen]) - } else { - n := 4 - hdrStr := "" - if readHeader { - var hdr interface{} - if client { - hdr = &requestHeader{} - } else { - hdr = &responseHeader{} - } - if n2, err := decodePacket(buf[n:n+blen], hdr); err != nil { - fmt.Println(err) - } else { - n += n2 - } - hdrStr = fmt.Sprintf(" %+v", hdr) - } - if _, err := decodePacket(buf[n:n+blen], cr); err != nil { - fmt.Println(err) - } - fmt.Printf("%+v %s%s %+v\n", client, opname, hdrStr, cr) - } - - init = false - - written, err := conn2.Write(buf[:4+blen]) - if err != nil { - fmt.Println("3>", client, err) - return - } else if written != 4+blen { - fmt.Printf("Written != read: %d != %d\n", written, blen) - return - } - } -} - -func handleConnection(addr string, conn net.Conn) { - zkConn, err := net.Dial("tcp", addr) - if err != nil { - fmt.Println(err) - return - } - go trace(conn, zkConn, true) - trace(zkConn, conn, false) -} - -func StartTracer(listenAddr, serverAddr string) { - ln, err := net.Listen("tcp", listenAddr) - if err != nil { - panic(err) - } - for { - conn, err := ln.Accept() - if err != nil { - fmt.Println(err) - continue - } - go handleConnection(serverAddr, conn) - } -} diff --git a/Godeps/_workspace/src/github.com/samuel/go-zookeeper/zk/util.go b/Godeps/_workspace/src/github.com/samuel/go-zookeeper/zk/util.go deleted file mode 100644 index 769bbe878b..0000000000 --- a/Godeps/_workspace/src/github.com/samuel/go-zookeeper/zk/util.go +++ /dev/null @@ -1,54 +0,0 @@ -package zk - -import ( - "crypto/sha1" - "encoding/base64" - "fmt" - "math/rand" - "strconv" - "strings" -) - -// AuthACL produces an ACL list containing a single ACL which uses the -// provided permissions, with the scheme "auth", and ID "", which is used -// by ZooKeeper to represent any authenticated user. -func AuthACL(perms int32) []ACL { - return []ACL{{perms, "auth", ""}} -} - -// WorldACL produces an ACL list containing a single ACL which uses the -// provided permissions, with the scheme "world", and ID "anyone", which -// is used by ZooKeeper to represent any user at all. -func WorldACL(perms int32) []ACL { - return []ACL{{perms, "world", "anyone"}} -} - -func DigestACL(perms int32, user, password string) []ACL { - userPass := []byte(fmt.Sprintf("%s:%s", user, password)) - h := sha1.New() - if n, err := h.Write(userPass); err != nil || n != len(userPass) { - panic("SHA1 failed") - } - digest := base64.StdEncoding.EncodeToString(h.Sum(nil)) - return []ACL{{perms, "digest", fmt.Sprintf("%s:%s", user, digest)}} -} - -// FormatServers takes a slice of addresses, and makes sure they are in a format -// that resembles :. If the server has no port provided, the -// DefaultPort constant is added to the end. -func FormatServers(servers []string) []string { - for i := range servers { - if !strings.Contains(servers[i], ":") { - servers[i] = servers[i] + ":" + strconv.Itoa(DefaultPort) - } - } - return servers -} - -// stringShuffle performs a Fisher-Yates shuffle on a slice of strings -func stringShuffle(s []string) { - for i := len(s) - 1; i > 0; i-- { - j := rand.Intn(i + 1) - s[i], s[j] = s[j], s[i] - } -} diff --git a/Godeps/_workspace/src/github.com/samuel/go-zookeeper/zk/util_test.go b/Godeps/_workspace/src/github.com/samuel/go-zookeeper/zk/util_test.go deleted file mode 100644 index b56f77550d..0000000000 --- a/Godeps/_workspace/src/github.com/samuel/go-zookeeper/zk/util_test.go +++ /dev/null @@ -1,17 +0,0 @@ -package zk - -import "testing" - -func TestFormatServers(t *testing.T) { - servers := []string{"127.0.0.1:2181", "127.0.0.42", "127.0.42.1:8811"} - r := []string{"127.0.0.1:2181", "127.0.0.42:2181", "127.0.42.1:8811"} - - var s []string - s = FormatServers(servers) - - for i := range s { - if s[i] != r[i] { - t.Errorf("%v should equal %v", s[i], r[i]) - } - } -} diff --git a/Godeps/_workspace/src/github.com/samuel/go-zookeeper/zk/zk_test.go b/Godeps/_workspace/src/github.com/samuel/go-zookeeper/zk/zk_test.go deleted file mode 100644 index fdbe517273..0000000000 --- a/Godeps/_workspace/src/github.com/samuel/go-zookeeper/zk/zk_test.go +++ /dev/null @@ -1,518 +0,0 @@ -package zk - -import ( - "fmt" - "io" - "net" - "strings" - "testing" - "time" - - "camlistore.org/pkg/throttle" -) - -func TestCreate(t *testing.T) { - ts, err := StartTestCluster(1, nil, logWriter{t: t, p: "[ZKERR] "}) - if err != nil { - t.Fatal(err) - } - defer ts.Stop() - zk, _, err := ts.ConnectAll() - if err != nil { - t.Fatalf("Connect returned error: %+v", err) - } - defer zk.Close() - - path := "/gozk-test" - - if err := zk.Delete(path, -1); err != nil && err != ErrNoNode { - t.Fatalf("Delete returned error: %+v", err) - } - if p, err := zk.Create(path, []byte{1, 2, 3, 4}, 0, WorldACL(PermAll)); err != nil { - t.Fatalf("Create returned error: %+v", err) - } else if p != path { - t.Fatalf("Create returned different path '%s' != '%s'", p, path) - } - if data, stat, err := zk.Get(path); err != nil { - t.Fatalf("Get returned error: %+v", err) - } else if stat == nil { - t.Fatal("Get returned nil stat") - } else if len(data) < 4 { - t.Fatal("Get returned wrong size data") - } -} - -func TestMulti(t *testing.T) { - ts, err := StartTestCluster(1, nil, logWriter{t: t, p: "[ZKERR] "}) - if err != nil { - t.Fatal(err) - } - defer ts.Stop() - zk, _, err := ts.ConnectAll() - if err != nil { - t.Fatalf("Connect returned error: %+v", err) - } - defer zk.Close() - - path := "/gozk-test" - - if err := zk.Delete(path, -1); err != nil && err != ErrNoNode { - t.Fatalf("Delete returned error: %+v", err) - } - ops := []interface{}{ - &CreateRequest{Path: path, Data: []byte{1, 2, 3, 4}, Acl: WorldACL(PermAll)}, - &SetDataRequest{Path: path, Data: []byte{1, 2, 3, 4}, Version: -1}, - } - if res, err := zk.Multi(ops...); err != nil { - t.Fatalf("Multi returned error: %+v", err) - } else if len(res) != 2 { - t.Fatalf("Expected 2 responses got %d", len(res)) - } else { - t.Logf("%+v", res) - } - if data, stat, err := zk.Get(path); err != nil { - t.Fatalf("Get returned error: %+v", err) - } else if stat == nil { - t.Fatal("Get returned nil stat") - } else if len(data) < 4 { - t.Fatal("Get returned wrong size data") - } -} - -func TestGetSetACL(t *testing.T) { - ts, err := StartTestCluster(1, nil, logWriter{t: t, p: "[ZKERR] "}) - if err != nil { - t.Fatal(err) - } - defer ts.Stop() - zk, _, err := ts.ConnectAll() - if err != nil { - t.Fatalf("Connect returned error: %+v", err) - } - defer zk.Close() - - if err := zk.AddAuth("digest", []byte("blah")); err != nil { - t.Fatalf("AddAuth returned error %+v", err) - } - - path := "/gozk-test" - - if err := zk.Delete(path, -1); err != nil && err != ErrNoNode { - t.Fatalf("Delete returned error: %+v", err) - } - if path, err := zk.Create(path, []byte{1, 2, 3, 4}, 0, WorldACL(PermAll)); err != nil { - t.Fatalf("Create returned error: %+v", err) - } else if path != "/gozk-test" { - t.Fatalf("Create returned different path '%s' != '/gozk-test'", path) - } - - expected := WorldACL(PermAll) - - if acl, stat, err := zk.GetACL(path); err != nil { - t.Fatalf("GetACL returned error %+v", err) - } else if stat == nil { - t.Fatalf("GetACL returned nil Stat") - } else if len(acl) != 1 || expected[0] != acl[0] { - t.Fatalf("GetACL mismatch expected %+v instead of %+v", expected, acl) - } - - expected = []ACL{{PermAll, "ip", "127.0.0.1"}} - - if stat, err := zk.SetACL(path, expected, -1); err != nil { - t.Fatalf("SetACL returned error %+v", err) - } else if stat == nil { - t.Fatalf("SetACL returned nil Stat") - } - - if acl, stat, err := zk.GetACL(path); err != nil { - t.Fatalf("GetACL returned error %+v", err) - } else if stat == nil { - t.Fatalf("GetACL returned nil Stat") - } else if len(acl) != 1 || expected[0] != acl[0] { - t.Fatalf("GetACL mismatch expected %+v instead of %+v", expected, acl) - } -} - -func TestAuth(t *testing.T) { - ts, err := StartTestCluster(1, nil, logWriter{t: t, p: "[ZKERR] "}) - if err != nil { - t.Fatal(err) - } - defer ts.Stop() - zk, _, err := ts.ConnectAll() - if err != nil { - t.Fatalf("Connect returned error: %+v", err) - } - defer zk.Close() - - path := "/gozk-digest-test" - if err := zk.Delete(path, -1); err != nil && err != ErrNoNode { - t.Fatalf("Delete returned error: %+v", err) - } - - acl := DigestACL(PermAll, "user", "password") - - if p, err := zk.Create(path, []byte{1, 2, 3, 4}, 0, acl); err != nil { - t.Fatalf("Create returned error: %+v", err) - } else if p != path { - t.Fatalf("Create returned different path '%s' != '%s'", p, path) - } - - if a, stat, err := zk.GetACL(path); err != nil { - t.Fatalf("GetACL returned error %+v", err) - } else if stat == nil { - t.Fatalf("GetACL returned nil Stat") - } else if len(a) != 1 || acl[0] != a[0] { - t.Fatalf("GetACL mismatch expected %+v instead of %+v", acl, a) - } - - if _, _, err := zk.Get(path); err != ErrNoAuth { - t.Fatalf("Get returned error %+v instead of ErrNoAuth", err) - } - - if err := zk.AddAuth("digest", []byte("user:password")); err != nil { - t.Fatalf("AddAuth returned error %+v", err) - } - - if data, stat, err := zk.Get(path); err != nil { - t.Fatalf("Get returned error %+v", err) - } else if stat == nil { - t.Fatalf("Get returned nil Stat") - } else if len(data) != 4 { - t.Fatalf("Get returned wrong data length") - } -} - -func TestChildWatch(t *testing.T) { - ts, err := StartTestCluster(1, nil, logWriter{t: t, p: "[ZKERR] "}) - if err != nil { - t.Fatal(err) - } - defer ts.Stop() - zk, _, err := ts.ConnectAll() - if err != nil { - t.Fatalf("Connect returned error: %+v", err) - } - defer zk.Close() - - if err := zk.Delete("/gozk-test", -1); err != nil && err != ErrNoNode { - t.Fatalf("Delete returned error: %+v", err) - } - - children, stat, childCh, err := zk.ChildrenW("/") - if err != nil { - t.Fatalf("Children returned error: %+v", err) - } else if stat == nil { - t.Fatal("Children returned nil stat") - } else if len(children) < 1 { - t.Fatal("Children should return at least 1 child") - } - - if path, err := zk.Create("/gozk-test", []byte{1, 2, 3, 4}, 0, WorldACL(PermAll)); err != nil { - t.Fatalf("Create returned error: %+v", err) - } else if path != "/gozk-test" { - t.Fatalf("Create returned different path '%s' != '/gozk-test'", path) - } - - select { - case ev := <-childCh: - if ev.Err != nil { - t.Fatalf("Child watcher error %+v", ev.Err) - } - if ev.Path != "/" { - t.Fatalf("Child watcher wrong path %s instead of %s", ev.Path, "/") - } - case _ = <-time.After(time.Second * 2): - t.Fatal("Child watcher timed out") - } - - // Delete of the watched node should trigger the watch - - children, stat, childCh, err = zk.ChildrenW("/gozk-test") - if err != nil { - t.Fatalf("Children returned error: %+v", err) - } else if stat == nil { - t.Fatal("Children returned nil stat") - } else if len(children) != 0 { - t.Fatal("Children should return 0 children") - } - - if err := zk.Delete("/gozk-test", -1); err != nil && err != ErrNoNode { - t.Fatalf("Delete returned error: %+v", err) - } - - select { - case ev := <-childCh: - if ev.Err != nil { - t.Fatalf("Child watcher error %+v", ev.Err) - } - if ev.Path != "/gozk-test" { - t.Fatalf("Child watcher wrong path %s instead of %s", ev.Path, "/") - } - case _ = <-time.After(time.Second * 2): - t.Fatal("Child watcher timed out") - } -} - -func TestSetWatchers(t *testing.T) { - ts, err := StartTestCluster(1, nil, logWriter{t: t, p: "[ZKERR] "}) - if err != nil { - t.Fatal(err) - } - defer ts.Stop() - zk, _, err := ts.ConnectAll() - if err != nil { - t.Fatalf("Connect returned error: %+v", err) - } - defer zk.Close() - - zk.reconnectDelay = time.Second - - zk2, _, err := ts.ConnectAll() - if err != nil { - t.Fatalf("Connect returned error: %+v", err) - } - defer zk2.Close() - - if err := zk.Delete("/gozk-test", -1); err != nil && err != ErrNoNode { - t.Fatalf("Delete returned error: %+v", err) - } - - testPath, err := zk.Create("/gozk-test-2", []byte{}, 0, WorldACL(PermAll)) - if err != nil { - t.Fatalf("Create returned: %+v", err) - } - - _, _, testEvCh, err := zk.GetW(testPath) - if err != nil { - t.Fatalf("GetW returned: %+v", err) - } - - children, stat, childCh, err := zk.ChildrenW("/") - if err != nil { - t.Fatalf("Children returned error: %+v", err) - } else if stat == nil { - t.Fatal("Children returned nil stat") - } else if len(children) < 1 { - t.Fatal("Children should return at least 1 child") - } - - zk.conn.Close() - if err := zk2.Delete(testPath, -1); err != nil && err != ErrNoNode { - t.Fatalf("Delete returned error: %+v", err) - } - time.Sleep(time.Millisecond * 100) - - if path, err := zk2.Create("/gozk-test", []byte{1, 2, 3, 4}, 0, WorldACL(PermAll)); err != nil { - t.Fatalf("Create returned error: %+v", err) - } else if path != "/gozk-test" { - t.Fatalf("Create returned different path '%s' != '/gozk-test'", path) - } - - select { - case ev := <-testEvCh: - if ev.Err != nil { - t.Fatalf("GetW watcher error %+v", ev.Err) - } - if ev.Path != testPath { - t.Fatalf("GetW watcher wrong path %s instead of %s", ev.Path, testPath) - } - case <-time.After(2 * time.Second): - t.Fatal("GetW watcher timed out") - } - - select { - case ev := <-childCh: - if ev.Err != nil { - t.Fatalf("Child watcher error %+v", ev.Err) - } - if ev.Path != "/" { - t.Fatalf("Child watcher wrong path %s instead of %s", ev.Path, "/") - } - case <-time.After(2 * time.Second): - t.Fatal("Child watcher timed out") - } -} - -func TestExpiringWatch(t *testing.T) { - ts, err := StartTestCluster(1, nil, logWriter{t: t, p: "[ZKERR] "}) - if err != nil { - t.Fatal(err) - } - defer ts.Stop() - zk, _, err := ts.ConnectAll() - if err != nil { - t.Fatalf("Connect returned error: %+v", err) - } - defer zk.Close() - - if err := zk.Delete("/gozk-test", -1); err != nil && err != ErrNoNode { - t.Fatalf("Delete returned error: %+v", err) - } - - children, stat, childCh, err := zk.ChildrenW("/") - if err != nil { - t.Fatalf("Children returned error: %+v", err) - } else if stat == nil { - t.Fatal("Children returned nil stat") - } else if len(children) < 1 { - t.Fatal("Children should return at least 1 child") - } - - zk.sessionID = 99999 - zk.conn.Close() - - select { - case ev := <-childCh: - if ev.Err != ErrSessionExpired { - t.Fatalf("Child watcher error %+v instead of expected ErrSessionExpired", ev.Err) - } - if ev.Path != "/" { - t.Fatalf("Child watcher wrong path %s instead of %s", ev.Path, "/") - } - case <-time.After(2 * time.Second): - t.Fatal("Child watcher timed out") - } -} - -func TestRequestFail(t *testing.T) { - // If connecting fails to all servers in the list then pending requests - // should be errored out so they don't hang forever. - - zk, _, err := Connect([]string{"127.0.0.1:32444"}, time.Second*15) - if err != nil { - t.Fatal(err) - } - defer zk.Close() - - ch := make(chan error) - go func() { - _, _, err := zk.Get("/blah") - ch <- err - }() - select { - case err := <-ch: - if err == nil { - t.Fatal("Expected non-nil error on failed request due to connection failure") - } - case <-time.After(time.Second * 2): - t.Fatal("Get hung when connection could not be made") - } -} - -func TestSlowServer(t *testing.T) { - ts, err := StartTestCluster(1, nil, logWriter{t: t, p: "[ZKERR] "}) - if err != nil { - t.Fatal(err) - } - defer ts.Stop() - - realAddr := fmt.Sprintf("127.0.0.1:%d", ts.Servers[0].Port) - proxyAddr, stopCh, err := startSlowProxy(t, - throttle.Rate{}, throttle.Rate{}, - realAddr, func(ln *throttle.Listener) { - if ln.Up.Latency == 0 { - ln.Up.Latency = time.Millisecond * 2000 - ln.Down.Latency = time.Millisecond * 2000 - } else { - ln.Up.Latency = 0 - ln.Down.Latency = 0 - } - }) - if err != nil { - t.Fatal(err) - } - defer close(stopCh) - - zk, _, err := Connect([]string{proxyAddr}, time.Millisecond*500) - if err != nil { - t.Fatal(err) - } - defer zk.Close() - - _, _, wch, err := zk.ChildrenW("/") - if err != nil { - t.Fatal(err) - } - - // Force a reconnect to get a throttled connection - zk.conn.Close() - - time.Sleep(time.Millisecond * 100) - - if err := zk.Delete("/gozk-test", -1); err == nil { - t.Fatal("Delete should have failed") - } - - // The previous request should have timed out causing the server to be disconnected and reconnected - - if _, err := zk.Create("/gozk-test", []byte{1, 2, 3, 4}, 0, WorldACL(PermAll)); err != nil { - t.Fatal(err) - } - - // Make sure event is still returned because the session should not have been affected - select { - case ev := <-wch: - t.Logf("Received event: %+v", ev) - case <-time.After(time.Second): - t.Fatal("Expected to receive a watch event") - } -} - -func startSlowProxy(t *testing.T, up, down throttle.Rate, upstream string, adj func(ln *throttle.Listener)) (string, chan bool, error) { - ln, err := net.Listen("tcp", "127.0.0.1:0") - if err != nil { - return "", nil, err - } - tln := &throttle.Listener{ - Listener: ln, - Up: up, - Down: down, - } - stopCh := make(chan bool) - go func() { - <-stopCh - tln.Close() - }() - go func() { - for { - cn, err := tln.Accept() - if err != nil { - if !strings.Contains(err.Error(), "use of closed network connection") { - t.Fatalf("Accept failed: %s", err.Error()) - } - return - } - if adj != nil { - adj(tln) - } - go func(cn net.Conn) { - defer cn.Close() - upcn, err := net.Dial("tcp", upstream) - if err != nil { - t.Log(err) - return - } - // This will leave hanging goroutines util stopCh is closed - // but it doesn't matter in the context of running tests. - go func() { - <-stopCh - upcn.Close() - }() - go func() { - if _, err := io.Copy(upcn, cn); err != nil { - if !strings.Contains(err.Error(), "use of closed network connection") { - // log.Printf("Upstream write failed: %s", err.Error()) - } - } - }() - if _, err := io.Copy(cn, upcn); err != nil { - if !strings.Contains(err.Error(), "use of closed network connection") { - // log.Printf("Upstream read failed: %s", err.Error()) - } - } - }(cn) - } - }() - return ln.Addr().String(), stopCh, nil -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/LICENSE b/Godeps/_workspace/src/github.com/syndtr/goleveldb/LICENSE deleted file mode 100644 index 4a772d1ab3..0000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/LICENSE +++ /dev/null @@ -1,24 +0,0 @@ -Copyright 2012 Suryandaru Triandana -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright -notice, this list of conditions and the following disclaimer in the -documentation and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/batch.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/batch.go deleted file mode 100644 index ccf390c9cf..0000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/batch.go +++ /dev/null @@ -1,252 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - "encoding/binary" - "fmt" - - "github.com/syndtr/goleveldb/leveldb/errors" - "github.com/syndtr/goleveldb/leveldb/memdb" -) - -type ErrBatchCorrupted struct { - Reason string -} - -func (e *ErrBatchCorrupted) Error() string { - return fmt.Sprintf("leveldb: batch corrupted: %s", e.Reason) -} - -func newErrBatchCorrupted(reason string) error { - return errors.NewErrCorrupted(nil, &ErrBatchCorrupted{reason}) -} - -const ( - batchHdrLen = 8 + 4 - batchGrowRec = 3000 -) - -type BatchReplay interface { - Put(key, value []byte) - Delete(key []byte) -} - -// Batch is a write batch. -type Batch struct { - data []byte - rLen, bLen int - seq uint64 - sync bool -} - -func (b *Batch) grow(n int) { - off := len(b.data) - if off == 0 { - off = batchHdrLen - if b.data != nil { - b.data = b.data[:off] - } - } - if cap(b.data)-off < n { - if b.data == nil { - b.data = make([]byte, off, off+n) - } else { - odata := b.data - div := 1 - if b.rLen > batchGrowRec { - div = b.rLen / batchGrowRec - } - b.data = make([]byte, off, off+n+(off-batchHdrLen)/div) - copy(b.data, odata) - } - } -} - -func (b *Batch) appendRec(kt kType, key, value []byte) { - n := 1 + binary.MaxVarintLen32 + len(key) - if kt == ktVal { - n += binary.MaxVarintLen32 + len(value) - } - b.grow(n) - off := len(b.data) - data := b.data[:off+n] - data[off] = byte(kt) - off += 1 - off += binary.PutUvarint(data[off:], uint64(len(key))) - copy(data[off:], key) - off += len(key) - if kt == ktVal { - off += binary.PutUvarint(data[off:], uint64(len(value))) - copy(data[off:], value) - off += len(value) - } - b.data = data[:off] - b.rLen++ - // Include 8-byte ikey header - b.bLen += len(key) + len(value) + 8 -} - -// Put appends 'put operation' of the given key/value pair to the batch. -// It is safe to modify the contents of the argument after Put returns. -func (b *Batch) Put(key, value []byte) { - b.appendRec(ktVal, key, value) -} - -// Delete appends 'delete operation' of the given key to the batch. -// It is safe to modify the contents of the argument after Delete returns. -func (b *Batch) Delete(key []byte) { - b.appendRec(ktDel, key, nil) -} - -// Dump dumps batch contents. The returned slice can be loaded into the -// batch using Load method. -// The returned slice is not its own copy, so the contents should not be -// modified. -func (b *Batch) Dump() []byte { - return b.encode() -} - -// Load loads given slice into the batch. Previous contents of the batch -// will be discarded. -// The given slice will not be copied and will be used as batch buffer, so -// it is not safe to modify the contents of the slice. -func (b *Batch) Load(data []byte) error { - return b.decode(0, data) -} - -// Replay replays batch contents. -func (b *Batch) Replay(r BatchReplay) error { - return b.decodeRec(func(i int, kt kType, key, value []byte) { - switch kt { - case ktVal: - r.Put(key, value) - case ktDel: - r.Delete(key) - } - }) -} - -// Len returns number of records in the batch. -func (b *Batch) Len() int { - return b.rLen -} - -// Reset resets the batch. -func (b *Batch) Reset() { - b.data = b.data[:0] - b.seq = 0 - b.rLen = 0 - b.bLen = 0 - b.sync = false -} - -func (b *Batch) init(sync bool) { - b.sync = sync -} - -func (b *Batch) append(p *Batch) { - if p.rLen > 0 { - b.grow(len(p.data) - batchHdrLen) - b.data = append(b.data, p.data[batchHdrLen:]...) - b.rLen += p.rLen - } - if p.sync { - b.sync = true - } -} - -// size returns sums of key/value pair length plus 8-bytes ikey. -func (b *Batch) size() int { - return b.bLen -} - -func (b *Batch) encode() []byte { - b.grow(0) - binary.LittleEndian.PutUint64(b.data, b.seq) - binary.LittleEndian.PutUint32(b.data[8:], uint32(b.rLen)) - - return b.data -} - -func (b *Batch) decode(prevSeq uint64, data []byte) error { - if len(data) < batchHdrLen { - return newErrBatchCorrupted("too short") - } - - b.seq = binary.LittleEndian.Uint64(data) - if b.seq < prevSeq { - return newErrBatchCorrupted("invalid sequence number") - } - b.rLen = int(binary.LittleEndian.Uint32(data[8:])) - if b.rLen < 0 { - return newErrBatchCorrupted("invalid records length") - } - // No need to be precise at this point, it won't be used anyway - b.bLen = len(data) - batchHdrLen - b.data = data - - return nil -} - -func (b *Batch) decodeRec(f func(i int, kt kType, key, value []byte)) (err error) { - off := batchHdrLen - for i := 0; i < b.rLen; i++ { - if off >= len(b.data) { - return newErrBatchCorrupted("invalid records length") - } - - kt := kType(b.data[off]) - if kt > ktVal { - return newErrBatchCorrupted("bad record: invalid type") - } - off += 1 - - x, n := binary.Uvarint(b.data[off:]) - off += n - if n <= 0 || off+int(x) > len(b.data) { - return newErrBatchCorrupted("bad record: invalid key length") - } - key := b.data[off : off+int(x)] - off += int(x) - var value []byte - if kt == ktVal { - x, n := binary.Uvarint(b.data[off:]) - off += n - if n <= 0 || off+int(x) > len(b.data) { - return newErrBatchCorrupted("bad record: invalid value length") - } - value = b.data[off : off+int(x)] - off += int(x) - } - - f(i, kt, key, value) - } - - return nil -} - -func (b *Batch) memReplay(to *memdb.DB) error { - return b.decodeRec(func(i int, kt kType, key, value []byte) { - ikey := newIkey(key, b.seq+uint64(i), kt) - to.Put(ikey, value) - }) -} - -func (b *Batch) memDecodeAndReplay(prevSeq uint64, data []byte, to *memdb.DB) error { - if err := b.decode(prevSeq, data); err != nil { - return err - } - return b.memReplay(to) -} - -func (b *Batch) revertMemReplay(to *memdb.DB) error { - return b.decodeRec(func(i int, kt kType, key, value []byte) { - ikey := newIkey(key, b.seq+uint64(i), kt) - to.Delete(ikey) - }) -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/batch_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/batch_test.go deleted file mode 100644 index 7fc842f4fe..0000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/batch_test.go +++ /dev/null @@ -1,120 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - "bytes" - "testing" - - "github.com/syndtr/goleveldb/leveldb/comparer" - "github.com/syndtr/goleveldb/leveldb/memdb" -) - -type tbRec struct { - kt kType - key, value []byte -} - -type testBatch struct { - rec []*tbRec -} - -func (p *testBatch) Put(key, value []byte) { - p.rec = append(p.rec, &tbRec{ktVal, key, value}) -} - -func (p *testBatch) Delete(key []byte) { - p.rec = append(p.rec, &tbRec{ktDel, key, nil}) -} - -func compareBatch(t *testing.T, b1, b2 *Batch) { - if b1.seq != b2.seq { - t.Errorf("invalid seq number want %d, got %d", b1.seq, b2.seq) - } - if b1.Len() != b2.Len() { - t.Fatalf("invalid record length want %d, got %d", b1.Len(), b2.Len()) - } - p1, p2 := new(testBatch), new(testBatch) - err := b1.Replay(p1) - if err != nil { - t.Fatal("error when replaying batch 1: ", err) - } - err = b2.Replay(p2) - if err != nil { - t.Fatal("error when replaying batch 2: ", err) - } - for i := range p1.rec { - r1, r2 := p1.rec[i], p2.rec[i] - if r1.kt != r2.kt { - t.Errorf("invalid type on record '%d' want %d, got %d", i, r1.kt, r2.kt) - } - if !bytes.Equal(r1.key, r2.key) { - t.Errorf("invalid key on record '%d' want %s, got %s", i, string(r1.key), string(r2.key)) - } - if r1.kt == ktVal { - if !bytes.Equal(r1.value, r2.value) { - t.Errorf("invalid value on record '%d' want %s, got %s", i, string(r1.value), string(r2.value)) - } - } - } -} - -func TestBatch_EncodeDecode(t *testing.T) { - b1 := new(Batch) - b1.seq = 10009 - b1.Put([]byte("key1"), []byte("value1")) - b1.Put([]byte("key2"), []byte("value2")) - b1.Delete([]byte("key1")) - b1.Put([]byte("k"), []byte("")) - b1.Put([]byte("zzzzzzzzzzz"), []byte("zzzzzzzzzzzzzzzzzzzzzzzz")) - b1.Delete([]byte("key10000")) - b1.Delete([]byte("k")) - buf := b1.encode() - b2 := new(Batch) - err := b2.decode(0, buf) - if err != nil { - t.Error("error when decoding batch: ", err) - } - compareBatch(t, b1, b2) -} - -func TestBatch_Append(t *testing.T) { - b1 := new(Batch) - b1.seq = 10009 - b1.Put([]byte("key1"), []byte("value1")) - b1.Put([]byte("key2"), []byte("value2")) - b1.Delete([]byte("key1")) - b1.Put([]byte("foo"), []byte("foovalue")) - b1.Put([]byte("bar"), []byte("barvalue")) - b2a := new(Batch) - b2a.seq = 10009 - b2a.Put([]byte("key1"), []byte("value1")) - b2a.Put([]byte("key2"), []byte("value2")) - b2a.Delete([]byte("key1")) - b2b := new(Batch) - b2b.Put([]byte("foo"), []byte("foovalue")) - b2b.Put([]byte("bar"), []byte("barvalue")) - b2a.append(b2b) - compareBatch(t, b1, b2a) -} - -func TestBatch_Size(t *testing.T) { - b := new(Batch) - for i := 0; i < 2; i++ { - b.Put([]byte("key1"), []byte("value1")) - b.Put([]byte("key2"), []byte("value2")) - b.Delete([]byte("key1")) - b.Put([]byte("foo"), []byte("foovalue")) - b.Put([]byte("bar"), []byte("barvalue")) - mem := memdb.New(&iComparer{comparer.DefaultComparer}, 0) - b.memReplay(mem) - if b.size() != mem.Size() { - t.Errorf("invalid batch size calculation, want=%d got=%d", mem.Size(), b.size()) - } - b.Reset() - } -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/bench2_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/bench2_test.go deleted file mode 100644 index 0dd60fd829..0000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/bench2_test.go +++ /dev/null @@ -1,58 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -// +build !go1.2 - -package leveldb - -import ( - "sync/atomic" - "testing" -) - -func BenchmarkDBReadConcurrent(b *testing.B) { - p := openDBBench(b, false) - p.populate(b.N) - p.fill() - p.gc() - defer p.close() - - b.ResetTimer() - b.SetBytes(116) - - b.RunParallel(func(pb *testing.PB) { - iter := p.newIter() - defer iter.Release() - for pb.Next() && iter.Next() { - } - }) -} - -func BenchmarkDBReadConcurrent2(b *testing.B) { - p := openDBBench(b, false) - p.populate(b.N) - p.fill() - p.gc() - defer p.close() - - b.ResetTimer() - b.SetBytes(116) - - var dir uint32 - b.RunParallel(func(pb *testing.PB) { - iter := p.newIter() - defer iter.Release() - if atomic.AddUint32(&dir, 1)%2 == 0 { - for pb.Next() && iter.Next() { - } - } else { - if pb.Next() && iter.Last() { - for pb.Next() && iter.Prev() { - } - } - } - }) -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/bench_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/bench_test.go deleted file mode 100644 index 91b426709d..0000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/bench_test.go +++ /dev/null @@ -1,464 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - "bytes" - "fmt" - "math/rand" - "os" - "path/filepath" - "runtime" - "testing" - - "github.com/syndtr/goleveldb/leveldb/iterator" - "github.com/syndtr/goleveldb/leveldb/opt" - "github.com/syndtr/goleveldb/leveldb/storage" -) - -func randomString(r *rand.Rand, n int) []byte { - b := new(bytes.Buffer) - for i := 0; i < n; i++ { - b.WriteByte(' ' + byte(r.Intn(95))) - } - return b.Bytes() -} - -func compressibleStr(r *rand.Rand, frac float32, n int) []byte { - nn := int(float32(n) * frac) - rb := randomString(r, nn) - b := make([]byte, 0, n+nn) - for len(b) < n { - b = append(b, rb...) - } - return b[:n] -} - -type valueGen struct { - src []byte - pos int -} - -func newValueGen(frac float32) *valueGen { - v := new(valueGen) - r := rand.New(rand.NewSource(301)) - v.src = make([]byte, 0, 1048576+100) - for len(v.src) < 1048576 { - v.src = append(v.src, compressibleStr(r, frac, 100)...) - } - return v -} - -func (v *valueGen) get(n int) []byte { - if v.pos+n > len(v.src) { - v.pos = 0 - } - v.pos += n - return v.src[v.pos-n : v.pos] -} - -var benchDB = filepath.Join(os.TempDir(), fmt.Sprintf("goleveldbbench-%d", os.Getuid())) - -type dbBench struct { - b *testing.B - stor storage.Storage - db *DB - - o *opt.Options - ro *opt.ReadOptions - wo *opt.WriteOptions - - keys, values [][]byte -} - -func openDBBench(b *testing.B, noCompress bool) *dbBench { - _, err := os.Stat(benchDB) - if err == nil { - err = os.RemoveAll(benchDB) - if err != nil { - b.Fatal("cannot remove old db: ", err) - } - } - - p := &dbBench{ - b: b, - o: &opt.Options{}, - ro: &opt.ReadOptions{}, - wo: &opt.WriteOptions{}, - } - p.stor, err = storage.OpenFile(benchDB) - if err != nil { - b.Fatal("cannot open stor: ", err) - } - if noCompress { - p.o.Compression = opt.NoCompression - } - - p.db, err = Open(p.stor, p.o) - if err != nil { - b.Fatal("cannot open db: ", err) - } - - runtime.GOMAXPROCS(runtime.NumCPU()) - return p -} - -func (p *dbBench) reopen() { - p.db.Close() - var err error - p.db, err = Open(p.stor, p.o) - if err != nil { - p.b.Fatal("Reopen: got error: ", err) - } -} - -func (p *dbBench) populate(n int) { - p.keys, p.values = make([][]byte, n), make([][]byte, n) - v := newValueGen(0.5) - for i := range p.keys { - p.keys[i], p.values[i] = []byte(fmt.Sprintf("%016d", i)), v.get(100) - } -} - -func (p *dbBench) randomize() { - m := len(p.keys) - times := m * 2 - r1, r2 := rand.New(rand.NewSource(0xdeadbeef)), rand.New(rand.NewSource(0xbeefface)) - for n := 0; n < times; n++ { - i, j := r1.Int()%m, r2.Int()%m - if i == j { - continue - } - p.keys[i], p.keys[j] = p.keys[j], p.keys[i] - p.values[i], p.values[j] = p.values[j], p.values[i] - } -} - -func (p *dbBench) writes(perBatch int) { - b := p.b - db := p.db - - n := len(p.keys) - m := n / perBatch - if n%perBatch > 0 { - m++ - } - batches := make([]Batch, m) - j := 0 - for i := range batches { - first := true - for ; j < n && ((j+1)%perBatch != 0 || first); j++ { - first = false - batches[i].Put(p.keys[j], p.values[j]) - } - } - runtime.GC() - - b.ResetTimer() - b.StartTimer() - for i := range batches { - err := db.Write(&(batches[i]), p.wo) - if err != nil { - b.Fatal("write failed: ", err) - } - } - b.StopTimer() - b.SetBytes(116) -} - -func (p *dbBench) gc() { - p.keys, p.values = nil, nil - runtime.GC() -} - -func (p *dbBench) puts() { - b := p.b - db := p.db - - b.ResetTimer() - b.StartTimer() - for i := range p.keys { - err := db.Put(p.keys[i], p.values[i], p.wo) - if err != nil { - b.Fatal("put failed: ", err) - } - } - b.StopTimer() - b.SetBytes(116) -} - -func (p *dbBench) fill() { - b := p.b - db := p.db - - perBatch := 10000 - batch := new(Batch) - for i, n := 0, len(p.keys); i < n; { - first := true - for ; i < n && ((i+1)%perBatch != 0 || first); i++ { - first = false - batch.Put(p.keys[i], p.values[i]) - } - err := db.Write(batch, p.wo) - if err != nil { - b.Fatal("write failed: ", err) - } - batch.Reset() - } -} - -func (p *dbBench) gets() { - b := p.b - db := p.db - - b.ResetTimer() - for i := range p.keys { - _, err := db.Get(p.keys[i], p.ro) - if err != nil { - b.Error("got error: ", err) - } - } - b.StopTimer() -} - -func (p *dbBench) seeks() { - b := p.b - - iter := p.newIter() - defer iter.Release() - b.ResetTimer() - for i := range p.keys { - if !iter.Seek(p.keys[i]) { - b.Error("value not found for: ", string(p.keys[i])) - } - } - b.StopTimer() -} - -func (p *dbBench) newIter() iterator.Iterator { - iter := p.db.NewIterator(nil, p.ro) - err := iter.Error() - if err != nil { - p.b.Fatal("cannot create iterator: ", err) - } - return iter -} - -func (p *dbBench) close() { - if bp, err := p.db.GetProperty("leveldb.blockpool"); err == nil { - p.b.Log("Block pool stats: ", bp) - } - p.db.Close() - p.stor.Close() - os.RemoveAll(benchDB) - p.db = nil - p.keys = nil - p.values = nil - runtime.GC() - runtime.GOMAXPROCS(1) -} - -func BenchmarkDBWrite(b *testing.B) { - p := openDBBench(b, false) - p.populate(b.N) - p.writes(1) - p.close() -} - -func BenchmarkDBWriteBatch(b *testing.B) { - p := openDBBench(b, false) - p.populate(b.N) - p.writes(1000) - p.close() -} - -func BenchmarkDBWriteUncompressed(b *testing.B) { - p := openDBBench(b, true) - p.populate(b.N) - p.writes(1) - p.close() -} - -func BenchmarkDBWriteBatchUncompressed(b *testing.B) { - p := openDBBench(b, true) - p.populate(b.N) - p.writes(1000) - p.close() -} - -func BenchmarkDBWriteRandom(b *testing.B) { - p := openDBBench(b, false) - p.populate(b.N) - p.randomize() - p.writes(1) - p.close() -} - -func BenchmarkDBWriteRandomSync(b *testing.B) { - p := openDBBench(b, false) - p.wo.Sync = true - p.populate(b.N) - p.writes(1) - p.close() -} - -func BenchmarkDBOverwrite(b *testing.B) { - p := openDBBench(b, false) - p.populate(b.N) - p.writes(1) - p.writes(1) - p.close() -} - -func BenchmarkDBOverwriteRandom(b *testing.B) { - p := openDBBench(b, false) - p.populate(b.N) - p.writes(1) - p.randomize() - p.writes(1) - p.close() -} - -func BenchmarkDBPut(b *testing.B) { - p := openDBBench(b, false) - p.populate(b.N) - p.puts() - p.close() -} - -func BenchmarkDBRead(b *testing.B) { - p := openDBBench(b, false) - p.populate(b.N) - p.fill() - p.gc() - - iter := p.newIter() - b.ResetTimer() - for iter.Next() { - } - iter.Release() - b.StopTimer() - b.SetBytes(116) - p.close() -} - -func BenchmarkDBReadGC(b *testing.B) { - p := openDBBench(b, false) - p.populate(b.N) - p.fill() - - iter := p.newIter() - b.ResetTimer() - for iter.Next() { - } - iter.Release() - b.StopTimer() - b.SetBytes(116) - p.close() -} - -func BenchmarkDBReadUncompressed(b *testing.B) { - p := openDBBench(b, true) - p.populate(b.N) - p.fill() - p.gc() - - iter := p.newIter() - b.ResetTimer() - for iter.Next() { - } - iter.Release() - b.StopTimer() - b.SetBytes(116) - p.close() -} - -func BenchmarkDBReadTable(b *testing.B) { - p := openDBBench(b, false) - p.populate(b.N) - p.fill() - p.reopen() - p.gc() - - iter := p.newIter() - b.ResetTimer() - for iter.Next() { - } - iter.Release() - b.StopTimer() - b.SetBytes(116) - p.close() -} - -func BenchmarkDBReadReverse(b *testing.B) { - p := openDBBench(b, false) - p.populate(b.N) - p.fill() - p.gc() - - iter := p.newIter() - b.ResetTimer() - iter.Last() - for iter.Prev() { - } - iter.Release() - b.StopTimer() - b.SetBytes(116) - p.close() -} - -func BenchmarkDBReadReverseTable(b *testing.B) { - p := openDBBench(b, false) - p.populate(b.N) - p.fill() - p.reopen() - p.gc() - - iter := p.newIter() - b.ResetTimer() - iter.Last() - for iter.Prev() { - } - iter.Release() - b.StopTimer() - b.SetBytes(116) - p.close() -} - -func BenchmarkDBSeek(b *testing.B) { - p := openDBBench(b, false) - p.populate(b.N) - p.fill() - p.seeks() - p.close() -} - -func BenchmarkDBSeekRandom(b *testing.B) { - p := openDBBench(b, false) - p.populate(b.N) - p.fill() - p.randomize() - p.seeks() - p.close() -} - -func BenchmarkDBGet(b *testing.B) { - p := openDBBench(b, false) - p.populate(b.N) - p.fill() - p.gets() - p.close() -} - -func BenchmarkDBGetRandom(b *testing.B) { - p := openDBBench(b, false) - p.populate(b.N) - p.fill() - p.randomize() - p.gets() - p.close() -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/bench2_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/bench2_test.go deleted file mode 100644 index 175e222032..0000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/bench2_test.go +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -// +build !go1.2 - -package cache - -import ( - "math/rand" - "testing" -) - -func BenchmarkLRUCache(b *testing.B) { - c := NewCache(NewLRU(10000)) - - b.SetParallelism(10) - b.RunParallel(func(pb *testing.PB) { - r := rand.New(rand.NewSource(time.Now().UnixNano())) - - for pb.Next() { - key := uint64(r.Intn(1000000)) - c.Get(0, key, func() (int, Value) { - return 1, key - }).Release() - } - }) -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/cache.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/cache.go deleted file mode 100644 index c9670de5de..0000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/cache.go +++ /dev/null @@ -1,676 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -// Package cache provides interface and implementation of a cache algorithms. -package cache - -import ( - "sync" - "sync/atomic" - "unsafe" - - "github.com/syndtr/goleveldb/leveldb/util" -) - -// Cacher provides interface to implements a caching functionality. -// An implementation must be goroutine-safe. -type Cacher interface { - // Capacity returns cache capacity. - Capacity() int - - // SetCapacity sets cache capacity. - SetCapacity(capacity int) - - // Promote promotes the 'cache node'. - Promote(n *Node) - - // Ban evicts the 'cache node' and prevent subsequent 'promote'. - Ban(n *Node) - - // Evict evicts the 'cache node'. - Evict(n *Node) - - // EvictNS evicts 'cache node' with the given namespace. - EvictNS(ns uint64) - - // EvictAll evicts all 'cache node'. - EvictAll() - - // Close closes the 'cache tree' - Close() error -} - -// Value is a 'cacheable object'. It may implements util.Releaser, if -// so the the Release method will be called once object is released. -type Value interface{} - -type CacheGetter struct { - Cache *Cache - NS uint64 -} - -func (g *CacheGetter) Get(key uint64, setFunc func() (size int, value Value)) *Handle { - return g.Cache.Get(g.NS, key, setFunc) -} - -// The hash tables implementation is based on: -// "Dynamic-Sized Nonblocking Hash Tables", by Yujie Liu, Kunlong Zhang, and Michael Spear. ACM Symposium on Principles of Distributed Computing, Jul 2014. - -const ( - mInitialSize = 1 << 4 - mOverflowThreshold = 1 << 5 - mOverflowGrowThreshold = 1 << 7 -) - -type mBucket struct { - mu sync.Mutex - node []*Node - frozen bool -} - -func (b *mBucket) freeze() []*Node { - b.mu.Lock() - defer b.mu.Unlock() - if !b.frozen { - b.frozen = true - } - return b.node -} - -func (b *mBucket) get(r *Cache, h *mNode, hash uint32, ns, key uint64, noset bool) (done, added bool, n *Node) { - b.mu.Lock() - - if b.frozen { - b.mu.Unlock() - return - } - - // Scan the node. - for _, n := range b.node { - if n.hash == hash && n.ns == ns && n.key == key { - atomic.AddInt32(&n.ref, 1) - b.mu.Unlock() - return true, false, n - } - } - - // Get only. - if noset { - b.mu.Unlock() - return true, false, nil - } - - // Create node. - n = &Node{ - r: r, - hash: hash, - ns: ns, - key: key, - ref: 1, - } - // Add node to bucket. - b.node = append(b.node, n) - bLen := len(b.node) - b.mu.Unlock() - - // Update counter. - grow := atomic.AddInt32(&r.nodes, 1) >= h.growThreshold - if bLen > mOverflowThreshold { - grow = grow || atomic.AddInt32(&h.overflow, 1) >= mOverflowGrowThreshold - } - - // Grow. - if grow && atomic.CompareAndSwapInt32(&h.resizeInProgess, 0, 1) { - nhLen := len(h.buckets) << 1 - nh := &mNode{ - buckets: make([]unsafe.Pointer, nhLen), - mask: uint32(nhLen) - 1, - pred: unsafe.Pointer(h), - growThreshold: int32(nhLen * mOverflowThreshold), - shrinkThreshold: int32(nhLen >> 1), - } - ok := atomic.CompareAndSwapPointer(&r.mHead, unsafe.Pointer(h), unsafe.Pointer(nh)) - if !ok { - panic("BUG: failed swapping head") - } - go nh.initBuckets() - } - - return true, true, n -} - -func (b *mBucket) delete(r *Cache, h *mNode, hash uint32, ns, key uint64) (done, deleted bool) { - b.mu.Lock() - - if b.frozen { - b.mu.Unlock() - return - } - - // Scan the node. - var ( - n *Node - bLen int - ) - for i := range b.node { - n = b.node[i] - if n.ns == ns && n.key == key { - if atomic.LoadInt32(&n.ref) == 0 { - deleted = true - - // Call releaser. - if n.value != nil { - if r, ok := n.value.(util.Releaser); ok { - r.Release() - } - n.value = nil - } - - // Remove node from bucket. - b.node = append(b.node[:i], b.node[i+1:]...) - bLen = len(b.node) - } - break - } - } - b.mu.Unlock() - - if deleted { - // Call OnDel. - for _, f := range n.onDel { - f() - } - - // Update counter. - atomic.AddInt32(&r.size, int32(n.size)*-1) - shrink := atomic.AddInt32(&r.nodes, -1) < h.shrinkThreshold - if bLen >= mOverflowThreshold { - atomic.AddInt32(&h.overflow, -1) - } - - // Shrink. - if shrink && len(h.buckets) > mInitialSize && atomic.CompareAndSwapInt32(&h.resizeInProgess, 0, 1) { - nhLen := len(h.buckets) >> 1 - nh := &mNode{ - buckets: make([]unsafe.Pointer, nhLen), - mask: uint32(nhLen) - 1, - pred: unsafe.Pointer(h), - growThreshold: int32(nhLen * mOverflowThreshold), - shrinkThreshold: int32(nhLen >> 1), - } - ok := atomic.CompareAndSwapPointer(&r.mHead, unsafe.Pointer(h), unsafe.Pointer(nh)) - if !ok { - panic("BUG: failed swapping head") - } - go nh.initBuckets() - } - } - - return true, deleted -} - -type mNode struct { - buckets []unsafe.Pointer // []*mBucket - mask uint32 - pred unsafe.Pointer // *mNode - resizeInProgess int32 - - overflow int32 - growThreshold int32 - shrinkThreshold int32 -} - -func (n *mNode) initBucket(i uint32) *mBucket { - if b := (*mBucket)(atomic.LoadPointer(&n.buckets[i])); b != nil { - return b - } - - p := (*mNode)(atomic.LoadPointer(&n.pred)) - if p != nil { - var node []*Node - if n.mask > p.mask { - // Grow. - pb := (*mBucket)(atomic.LoadPointer(&p.buckets[i&p.mask])) - if pb == nil { - pb = p.initBucket(i & p.mask) - } - m := pb.freeze() - // Split nodes. - for _, x := range m { - if x.hash&n.mask == i { - node = append(node, x) - } - } - } else { - // Shrink. - pb0 := (*mBucket)(atomic.LoadPointer(&p.buckets[i])) - if pb0 == nil { - pb0 = p.initBucket(i) - } - pb1 := (*mBucket)(atomic.LoadPointer(&p.buckets[i+uint32(len(n.buckets))])) - if pb1 == nil { - pb1 = p.initBucket(i + uint32(len(n.buckets))) - } - m0 := pb0.freeze() - m1 := pb1.freeze() - // Merge nodes. - node = make([]*Node, 0, len(m0)+len(m1)) - node = append(node, m0...) - node = append(node, m1...) - } - b := &mBucket{node: node} - if atomic.CompareAndSwapPointer(&n.buckets[i], nil, unsafe.Pointer(b)) { - if len(node) > mOverflowThreshold { - atomic.AddInt32(&n.overflow, int32(len(node)-mOverflowThreshold)) - } - return b - } - } - - return (*mBucket)(atomic.LoadPointer(&n.buckets[i])) -} - -func (n *mNode) initBuckets() { - for i := range n.buckets { - n.initBucket(uint32(i)) - } - atomic.StorePointer(&n.pred, nil) -} - -// Cache is a 'cache map'. -type Cache struct { - mu sync.RWMutex - mHead unsafe.Pointer // *mNode - nodes int32 - size int32 - cacher Cacher - closed bool -} - -// NewCache creates a new 'cache map'. The cacher is optional and -// may be nil. -func NewCache(cacher Cacher) *Cache { - h := &mNode{ - buckets: make([]unsafe.Pointer, mInitialSize), - mask: mInitialSize - 1, - growThreshold: int32(mInitialSize * mOverflowThreshold), - shrinkThreshold: 0, - } - for i := range h.buckets { - h.buckets[i] = unsafe.Pointer(&mBucket{}) - } - r := &Cache{ - mHead: unsafe.Pointer(h), - cacher: cacher, - } - return r -} - -func (r *Cache) getBucket(hash uint32) (*mNode, *mBucket) { - h := (*mNode)(atomic.LoadPointer(&r.mHead)) - i := hash & h.mask - b := (*mBucket)(atomic.LoadPointer(&h.buckets[i])) - if b == nil { - b = h.initBucket(i) - } - return h, b -} - -func (r *Cache) delete(n *Node) bool { - for { - h, b := r.getBucket(n.hash) - done, deleted := b.delete(r, h, n.hash, n.ns, n.key) - if done { - return deleted - } - } - return false -} - -// Nodes returns number of 'cache node' in the map. -func (r *Cache) Nodes() int { - return int(atomic.LoadInt32(&r.nodes)) -} - -// Size returns sums of 'cache node' size in the map. -func (r *Cache) Size() int { - return int(atomic.LoadInt32(&r.size)) -} - -// Capacity returns cache capacity. -func (r *Cache) Capacity() int { - if r.cacher == nil { - return 0 - } - return r.cacher.Capacity() -} - -// SetCapacity sets cache capacity. -func (r *Cache) SetCapacity(capacity int) { - if r.cacher != nil { - r.cacher.SetCapacity(capacity) - } -} - -// Get gets 'cache node' with the given namespace and key. -// If cache node is not found and setFunc is not nil, Get will atomically creates -// the 'cache node' by calling setFunc. Otherwise Get will returns nil. -// -// The returned 'cache handle' should be released after use by calling Release -// method. -func (r *Cache) Get(ns, key uint64, setFunc func() (size int, value Value)) *Handle { - r.mu.RLock() - defer r.mu.RUnlock() - if r.closed { - return nil - } - - hash := murmur32(ns, key, 0xf00) - for { - h, b := r.getBucket(hash) - done, _, n := b.get(r, h, hash, ns, key, setFunc == nil) - if done { - if n != nil { - n.mu.Lock() - if n.value == nil { - if setFunc == nil { - n.mu.Unlock() - n.unref() - return nil - } - - n.size, n.value = setFunc() - if n.value == nil { - n.size = 0 - n.mu.Unlock() - n.unref() - return nil - } - atomic.AddInt32(&r.size, int32(n.size)) - } - n.mu.Unlock() - if r.cacher != nil { - r.cacher.Promote(n) - } - return &Handle{unsafe.Pointer(n)} - } - - break - } - } - return nil -} - -// Delete removes and ban 'cache node' with the given namespace and key. -// A banned 'cache node' will never inserted into the 'cache tree'. Ban -// only attributed to the particular 'cache node', so when a 'cache node' -// is recreated it will not be banned. -// -// If onDel is not nil, then it will be executed if such 'cache node' -// doesn't exist or once the 'cache node' is released. -// -// Delete return true is such 'cache node' exist. -func (r *Cache) Delete(ns, key uint64, onDel func()) bool { - r.mu.RLock() - defer r.mu.RUnlock() - if r.closed { - return false - } - - hash := murmur32(ns, key, 0xf00) - for { - h, b := r.getBucket(hash) - done, _, n := b.get(r, h, hash, ns, key, true) - if done { - if n != nil { - if onDel != nil { - n.mu.Lock() - n.onDel = append(n.onDel, onDel) - n.mu.Unlock() - } - if r.cacher != nil { - r.cacher.Ban(n) - } - n.unref() - return true - } - - break - } - } - - if onDel != nil { - onDel() - } - - return false -} - -// Evict evicts 'cache node' with the given namespace and key. This will -// simply call Cacher.Evict. -// -// Evict return true is such 'cache node' exist. -func (r *Cache) Evict(ns, key uint64) bool { - r.mu.RLock() - defer r.mu.RUnlock() - if r.closed { - return false - } - - hash := murmur32(ns, key, 0xf00) - for { - h, b := r.getBucket(hash) - done, _, n := b.get(r, h, hash, ns, key, true) - if done { - if n != nil { - if r.cacher != nil { - r.cacher.Evict(n) - } - n.unref() - return true - } - - break - } - } - - return false -} - -// EvictNS evicts 'cache node' with the given namespace. This will -// simply call Cacher.EvictNS. -func (r *Cache) EvictNS(ns uint64) { - r.mu.RLock() - defer r.mu.RUnlock() - if r.closed { - return - } - - if r.cacher != nil { - r.cacher.EvictNS(ns) - } -} - -// EvictAll evicts all 'cache node'. This will simply call Cacher.EvictAll. -func (r *Cache) EvictAll() { - r.mu.RLock() - defer r.mu.RUnlock() - if r.closed { - return - } - - if r.cacher != nil { - r.cacher.EvictAll() - } -} - -// Close closes the 'cache map' and releases all 'cache node'. -func (r *Cache) Close() error { - r.mu.Lock() - if !r.closed { - r.closed = true - - if r.cacher != nil { - if err := r.cacher.Close(); err != nil { - return err - } - } - - h := (*mNode)(r.mHead) - h.initBuckets() - - for i := range h.buckets { - b := (*mBucket)(h.buckets[i]) - for _, n := range b.node { - // Call releaser. - if n.value != nil { - if r, ok := n.value.(util.Releaser); ok { - r.Release() - } - n.value = nil - } - - // Call OnDel. - for _, f := range n.onDel { - f() - } - } - } - } - r.mu.Unlock() - return nil -} - -// Node is a 'cache node'. -type Node struct { - r *Cache - - hash uint32 - ns, key uint64 - - mu sync.Mutex - size int - value Value - - ref int32 - onDel []func() - - CacheData unsafe.Pointer -} - -// NS returns this 'cache node' namespace. -func (n *Node) NS() uint64 { - return n.ns -} - -// Key returns this 'cache node' key. -func (n *Node) Key() uint64 { - return n.key -} - -// Size returns this 'cache node' size. -func (n *Node) Size() int { - return n.size -} - -// Value returns this 'cache node' value. -func (n *Node) Value() Value { - return n.value -} - -// Ref returns this 'cache node' ref counter. -func (n *Node) Ref() int32 { - return atomic.LoadInt32(&n.ref) -} - -// GetHandle returns an handle for this 'cache node'. -func (n *Node) GetHandle() *Handle { - if atomic.AddInt32(&n.ref, 1) <= 1 { - panic("BUG: Node.GetHandle on zero ref") - } - return &Handle{unsafe.Pointer(n)} -} - -func (n *Node) unref() { - if atomic.AddInt32(&n.ref, -1) == 0 { - n.r.delete(n) - } -} - -func (n *Node) unrefLocked() { - if atomic.AddInt32(&n.ref, -1) == 0 { - n.r.mu.RLock() - if !n.r.closed { - n.r.delete(n) - } - n.r.mu.RUnlock() - } -} - -type Handle struct { - n unsafe.Pointer // *Node -} - -func (h *Handle) Value() Value { - n := (*Node)(atomic.LoadPointer(&h.n)) - if n != nil { - return n.value - } - return nil -} - -func (h *Handle) Release() { - nPtr := atomic.LoadPointer(&h.n) - if nPtr != nil && atomic.CompareAndSwapPointer(&h.n, nPtr, nil) { - n := (*Node)(nPtr) - n.unrefLocked() - } -} - -func murmur32(ns, key uint64, seed uint32) uint32 { - const ( - m = uint32(0x5bd1e995) - r = 24 - ) - - k1 := uint32(ns >> 32) - k2 := uint32(ns) - k3 := uint32(key >> 32) - k4 := uint32(key) - - k1 *= m - k1 ^= k1 >> r - k1 *= m - - k2 *= m - k2 ^= k2 >> r - k2 *= m - - k3 *= m - k3 ^= k3 >> r - k3 *= m - - k4 *= m - k4 ^= k4 >> r - k4 *= m - - h := seed - - h *= m - h ^= k1 - h *= m - h ^= k2 - h *= m - h ^= k3 - h *= m - h ^= k4 - - h ^= h >> 13 - h *= m - h ^= h >> 15 - - return h -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/cache_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/cache_test.go deleted file mode 100644 index c2a50156f0..0000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/cache_test.go +++ /dev/null @@ -1,554 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package cache - -import ( - "math/rand" - "runtime" - "sync" - "sync/atomic" - "testing" - "time" - "unsafe" -) - -type int32o int32 - -func (o *int32o) acquire() { - if atomic.AddInt32((*int32)(o), 1) != 1 { - panic("BUG: invalid ref") - } -} - -func (o *int32o) Release() { - if atomic.AddInt32((*int32)(o), -1) != 0 { - panic("BUG: invalid ref") - } -} - -type releaserFunc struct { - fn func() - value Value -} - -func (r releaserFunc) Release() { - if r.fn != nil { - r.fn() - } -} - -func set(c *Cache, ns, key uint64, value Value, charge int, relf func()) *Handle { - return c.Get(ns, key, func() (int, Value) { - if relf != nil { - return charge, releaserFunc{relf, value} - } else { - return charge, value - } - }) -} - -func TestCacheMap(t *testing.T) { - runtime.GOMAXPROCS(runtime.NumCPU()) - - nsx := []struct { - nobjects, nhandles, concurrent, repeat int - }{ - {10000, 400, 50, 3}, - {100000, 1000, 100, 10}, - } - - var ( - objects [][]int32o - handles [][]unsafe.Pointer - ) - - for _, x := range nsx { - objects = append(objects, make([]int32o, x.nobjects)) - handles = append(handles, make([]unsafe.Pointer, x.nhandles)) - } - - c := NewCache(nil) - - wg := new(sync.WaitGroup) - var done int32 - - for ns, x := range nsx { - for i := 0; i < x.concurrent; i++ { - wg.Add(1) - go func(ns, i, repeat int, objects []int32o, handles []unsafe.Pointer) { - defer wg.Done() - r := rand.New(rand.NewSource(time.Now().UnixNano())) - - for j := len(objects) * repeat; j >= 0; j-- { - key := uint64(r.Intn(len(objects))) - h := c.Get(uint64(ns), key, func() (int, Value) { - o := &objects[key] - o.acquire() - return 1, o - }) - if v := h.Value().(*int32o); v != &objects[key] { - t.Fatalf("#%d invalid value: want=%p got=%p", ns, &objects[key], v) - } - if objects[key] != 1 { - t.Fatalf("#%d invalid object %d: %d", ns, key, objects[key]) - } - if !atomic.CompareAndSwapPointer(&handles[r.Intn(len(handles))], nil, unsafe.Pointer(h)) { - h.Release() - } - } - }(ns, i, x.repeat, objects[ns], handles[ns]) - } - - go func(handles []unsafe.Pointer) { - r := rand.New(rand.NewSource(time.Now().UnixNano())) - - for atomic.LoadInt32(&done) == 0 { - i := r.Intn(len(handles)) - h := (*Handle)(atomic.LoadPointer(&handles[i])) - if h != nil && atomic.CompareAndSwapPointer(&handles[i], unsafe.Pointer(h), nil) { - h.Release() - } - time.Sleep(time.Millisecond) - } - }(handles[ns]) - } - - go func() { - handles := make([]*Handle, 100000) - for atomic.LoadInt32(&done) == 0 { - for i := range handles { - handles[i] = c.Get(999999999, uint64(i), func() (int, Value) { - return 1, 1 - }) - } - for _, h := range handles { - h.Release() - } - } - }() - - wg.Wait() - - atomic.StoreInt32(&done, 1) - - for _, handles0 := range handles { - for i := range handles0 { - h := (*Handle)(atomic.LoadPointer(&handles0[i])) - if h != nil && atomic.CompareAndSwapPointer(&handles0[i], unsafe.Pointer(h), nil) { - h.Release() - } - } - } - - for ns, objects0 := range objects { - for i, o := range objects0 { - if o != 0 { - t.Fatalf("invalid object #%d.%d: ref=%d", ns, i, o) - } - } - } -} - -func TestCacheMap_NodesAndSize(t *testing.T) { - c := NewCache(nil) - if c.Nodes() != 0 { - t.Errorf("invalid nodes counter: want=%d got=%d", 0, c.Nodes()) - } - if c.Size() != 0 { - t.Errorf("invalid size counter: want=%d got=%d", 0, c.Size()) - } - set(c, 0, 1, 1, 1, nil) - set(c, 0, 2, 2, 2, nil) - set(c, 1, 1, 3, 3, nil) - set(c, 2, 1, 4, 1, nil) - if c.Nodes() != 4 { - t.Errorf("invalid nodes counter: want=%d got=%d", 4, c.Nodes()) - } - if c.Size() != 7 { - t.Errorf("invalid size counter: want=%d got=%d", 4, c.Size()) - } -} - -func TestLRUCache_Capacity(t *testing.T) { - c := NewCache(NewLRU(10)) - if c.Capacity() != 10 { - t.Errorf("invalid capacity: want=%d got=%d", 10, c.Capacity()) - } - set(c, 0, 1, 1, 1, nil).Release() - set(c, 0, 2, 2, 2, nil).Release() - set(c, 1, 1, 3, 3, nil).Release() - set(c, 2, 1, 4, 1, nil).Release() - set(c, 2, 2, 5, 1, nil).Release() - set(c, 2, 3, 6, 1, nil).Release() - set(c, 2, 4, 7, 1, nil).Release() - set(c, 2, 5, 8, 1, nil).Release() - if c.Nodes() != 7 { - t.Errorf("invalid nodes counter: want=%d got=%d", 7, c.Nodes()) - } - if c.Size() != 10 { - t.Errorf("invalid size counter: want=%d got=%d", 10, c.Size()) - } - c.SetCapacity(9) - if c.Capacity() != 9 { - t.Errorf("invalid capacity: want=%d got=%d", 9, c.Capacity()) - } - if c.Nodes() != 6 { - t.Errorf("invalid nodes counter: want=%d got=%d", 6, c.Nodes()) - } - if c.Size() != 8 { - t.Errorf("invalid size counter: want=%d got=%d", 8, c.Size()) - } -} - -func TestCacheMap_NilValue(t *testing.T) { - c := NewCache(NewLRU(10)) - h := c.Get(0, 0, func() (size int, value Value) { - return 1, nil - }) - if h != nil { - t.Error("cache handle is non-nil") - } - if c.Nodes() != 0 { - t.Errorf("invalid nodes counter: want=%d got=%d", 0, c.Nodes()) - } - if c.Size() != 0 { - t.Errorf("invalid size counter: want=%d got=%d", 0, c.Size()) - } -} - -func TestLRUCache_GetLatency(t *testing.T) { - runtime.GOMAXPROCS(runtime.NumCPU()) - - const ( - concurrentSet = 30 - concurrentGet = 3 - duration = 3 * time.Second - delay = 3 * time.Millisecond - maxkey = 100000 - ) - - var ( - set, getHit, getAll int32 - getMaxLatency, getDuration int64 - ) - - c := NewCache(NewLRU(5000)) - wg := &sync.WaitGroup{} - until := time.Now().Add(duration) - for i := 0; i < concurrentSet; i++ { - wg.Add(1) - go func(i int) { - defer wg.Done() - r := rand.New(rand.NewSource(time.Now().UnixNano())) - for time.Now().Before(until) { - c.Get(0, uint64(r.Intn(maxkey)), func() (int, Value) { - time.Sleep(delay) - atomic.AddInt32(&set, 1) - return 1, 1 - }).Release() - } - }(i) - } - for i := 0; i < concurrentGet; i++ { - wg.Add(1) - go func(i int) { - defer wg.Done() - r := rand.New(rand.NewSource(time.Now().UnixNano())) - for { - mark := time.Now() - if mark.Before(until) { - h := c.Get(0, uint64(r.Intn(maxkey)), nil) - latency := int64(time.Now().Sub(mark)) - m := atomic.LoadInt64(&getMaxLatency) - if latency > m { - atomic.CompareAndSwapInt64(&getMaxLatency, m, latency) - } - atomic.AddInt64(&getDuration, latency) - if h != nil { - atomic.AddInt32(&getHit, 1) - h.Release() - } - atomic.AddInt32(&getAll, 1) - } else { - break - } - } - }(i) - } - - wg.Wait() - getAvglatency := time.Duration(getDuration) / time.Duration(getAll) - t.Logf("set=%d getHit=%d getAll=%d getMaxLatency=%v getAvgLatency=%v", - set, getHit, getAll, time.Duration(getMaxLatency), getAvglatency) - - if getAvglatency > delay/3 { - t.Errorf("get avg latency > %v: got=%v", delay/3, getAvglatency) - } -} - -func TestLRUCache_HitMiss(t *testing.T) { - cases := []struct { - key uint64 - value string - }{ - {1, "vvvvvvvvv"}, - {100, "v1"}, - {0, "v2"}, - {12346, "v3"}, - {777, "v4"}, - {999, "v5"}, - {7654, "v6"}, - {2, "v7"}, - {3, "v8"}, - {9, "v9"}, - } - - setfin := 0 - c := NewCache(NewLRU(1000)) - for i, x := range cases { - set(c, 0, x.key, x.value, len(x.value), func() { - setfin++ - }).Release() - for j, y := range cases { - h := c.Get(0, y.key, nil) - if j <= i { - // should hit - if h == nil { - t.Errorf("case '%d' iteration '%d' is miss", i, j) - } else { - if x := h.Value().(releaserFunc).value.(string); x != y.value { - t.Errorf("case '%d' iteration '%d' has invalid value got '%s', want '%s'", i, j, x, y.value) - } - } - } else { - // should miss - if h != nil { - t.Errorf("case '%d' iteration '%d' is hit , value '%s'", i, j, h.Value().(releaserFunc).value.(string)) - } - } - if h != nil { - h.Release() - } - } - } - - for i, x := range cases { - finalizerOk := false - c.Delete(0, x.key, func() { - finalizerOk = true - }) - - if !finalizerOk { - t.Errorf("case %d delete finalizer not executed", i) - } - - for j, y := range cases { - h := c.Get(0, y.key, nil) - if j > i { - // should hit - if h == nil { - t.Errorf("case '%d' iteration '%d' is miss", i, j) - } else { - if x := h.Value().(releaserFunc).value.(string); x != y.value { - t.Errorf("case '%d' iteration '%d' has invalid value got '%s', want '%s'", i, j, x, y.value) - } - } - } else { - // should miss - if h != nil { - t.Errorf("case '%d' iteration '%d' is hit, value '%s'", i, j, h.Value().(releaserFunc).value.(string)) - } - } - if h != nil { - h.Release() - } - } - } - - if setfin != len(cases) { - t.Errorf("some set finalizer may not be executed, want=%d got=%d", len(cases), setfin) - } -} - -func TestLRUCache_Eviction(t *testing.T) { - c := NewCache(NewLRU(12)) - o1 := set(c, 0, 1, 1, 1, nil) - set(c, 0, 2, 2, 1, nil).Release() - set(c, 0, 3, 3, 1, nil).Release() - set(c, 0, 4, 4, 1, nil).Release() - set(c, 0, 5, 5, 1, nil).Release() - if h := c.Get(0, 2, nil); h != nil { // 1,3,4,5,2 - h.Release() - } - set(c, 0, 9, 9, 10, nil).Release() // 5,2,9 - - for _, key := range []uint64{9, 2, 5, 1} { - h := c.Get(0, key, nil) - if h == nil { - t.Errorf("miss for key '%d'", key) - } else { - if x := h.Value().(int); x != int(key) { - t.Errorf("invalid value for key '%d' want '%d', got '%d'", key, key, x) - } - h.Release() - } - } - o1.Release() - for _, key := range []uint64{1, 2, 5} { - h := c.Get(0, key, nil) - if h == nil { - t.Errorf("miss for key '%d'", key) - } else { - if x := h.Value().(int); x != int(key) { - t.Errorf("invalid value for key '%d' want '%d', got '%d'", key, key, x) - } - h.Release() - } - } - for _, key := range []uint64{3, 4, 9} { - h := c.Get(0, key, nil) - if h != nil { - t.Errorf("hit for key '%d'", key) - if x := h.Value().(int); x != int(key) { - t.Errorf("invalid value for key '%d' want '%d', got '%d'", key, key, x) - } - h.Release() - } - } -} - -func TestLRUCache_Evict(t *testing.T) { - c := NewCache(NewLRU(6)) - set(c, 0, 1, 1, 1, nil).Release() - set(c, 0, 2, 2, 1, nil).Release() - set(c, 1, 1, 4, 1, nil).Release() - set(c, 1, 2, 5, 1, nil).Release() - set(c, 2, 1, 6, 1, nil).Release() - set(c, 2, 2, 7, 1, nil).Release() - - for ns := 0; ns < 3; ns++ { - for key := 1; key < 3; key++ { - if h := c.Get(uint64(ns), uint64(key), nil); h != nil { - h.Release() - } else { - t.Errorf("Cache.Get on #%d.%d return nil", ns, key) - } - } - } - - if ok := c.Evict(0, 1); !ok { - t.Error("first Cache.Evict on #0.1 return false") - } - if ok := c.Evict(0, 1); ok { - t.Error("second Cache.Evict on #0.1 return true") - } - if h := c.Get(0, 1, nil); h != nil { - t.Errorf("Cache.Get on #0.1 return non-nil: %v", h.Value()) - } - - c.EvictNS(1) - if h := c.Get(1, 1, nil); h != nil { - t.Errorf("Cache.Get on #1.1 return non-nil: %v", h.Value()) - } - if h := c.Get(1, 2, nil); h != nil { - t.Errorf("Cache.Get on #1.2 return non-nil: %v", h.Value()) - } - - c.EvictAll() - for ns := 0; ns < 3; ns++ { - for key := 1; key < 3; key++ { - if h := c.Get(uint64(ns), uint64(key), nil); h != nil { - t.Errorf("Cache.Get on #%d.%d return non-nil: %v", ns, key, h.Value()) - } - } - } -} - -func TestLRUCache_Delete(t *testing.T) { - delFuncCalled := 0 - delFunc := func() { - delFuncCalled++ - } - - c := NewCache(NewLRU(2)) - set(c, 0, 1, 1, 1, nil).Release() - set(c, 0, 2, 2, 1, nil).Release() - - if ok := c.Delete(0, 1, delFunc); !ok { - t.Error("Cache.Delete on #1 return false") - } - if h := c.Get(0, 1, nil); h != nil { - t.Errorf("Cache.Get on #1 return non-nil: %v", h.Value()) - } - if ok := c.Delete(0, 1, delFunc); ok { - t.Error("Cache.Delete on #1 return true") - } - - h2 := c.Get(0, 2, nil) - if h2 == nil { - t.Error("Cache.Get on #2 return nil") - } - if ok := c.Delete(0, 2, delFunc); !ok { - t.Error("(1) Cache.Delete on #2 return false") - } - if ok := c.Delete(0, 2, delFunc); !ok { - t.Error("(2) Cache.Delete on #2 return false") - } - - set(c, 0, 3, 3, 1, nil).Release() - set(c, 0, 4, 4, 1, nil).Release() - c.Get(0, 2, nil).Release() - - for key := 2; key <= 4; key++ { - if h := c.Get(0, uint64(key), nil); h != nil { - h.Release() - } else { - t.Errorf("Cache.Get on #%d return nil", key) - } - } - - h2.Release() - if h := c.Get(0, 2, nil); h != nil { - t.Errorf("Cache.Get on #2 return non-nil: %v", h.Value()) - } - - if delFuncCalled != 4 { - t.Errorf("delFunc isn't called 4 times: got=%d", delFuncCalled) - } -} - -func TestLRUCache_Close(t *testing.T) { - relFuncCalled := 0 - relFunc := func() { - relFuncCalled++ - } - delFuncCalled := 0 - delFunc := func() { - delFuncCalled++ - } - - c := NewCache(NewLRU(2)) - set(c, 0, 1, 1, 1, relFunc).Release() - set(c, 0, 2, 2, 1, relFunc).Release() - - h3 := set(c, 0, 3, 3, 1, relFunc) - if h3 == nil { - t.Error("Cache.Get on #3 return nil") - } - if ok := c.Delete(0, 3, delFunc); !ok { - t.Error("Cache.Delete on #3 return false") - } - - c.Close() - - if relFuncCalled != 3 { - t.Errorf("relFunc isn't called 3 times: got=%d", relFuncCalled) - } - if delFuncCalled != 1 { - t.Errorf("delFunc isn't called 1 times: got=%d", delFuncCalled) - } -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/lru.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/lru.go deleted file mode 100644 index d9a84cde15..0000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/lru.go +++ /dev/null @@ -1,195 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package cache - -import ( - "sync" - "unsafe" -) - -type lruNode struct { - n *Node - h *Handle - ban bool - - next, prev *lruNode -} - -func (n *lruNode) insert(at *lruNode) { - x := at.next - at.next = n - n.prev = at - n.next = x - x.prev = n -} - -func (n *lruNode) remove() { - if n.prev != nil { - n.prev.next = n.next - n.next.prev = n.prev - n.prev = nil - n.next = nil - } else { - panic("BUG: removing removed node") - } -} - -type lru struct { - mu sync.Mutex - capacity int - used int - recent lruNode -} - -func (r *lru) reset() { - r.recent.next = &r.recent - r.recent.prev = &r.recent - r.used = 0 -} - -func (r *lru) Capacity() int { - r.mu.Lock() - defer r.mu.Unlock() - return r.capacity -} - -func (r *lru) SetCapacity(capacity int) { - var evicted []*lruNode - - r.mu.Lock() - r.capacity = capacity - for r.used > r.capacity { - rn := r.recent.prev - if rn == nil { - panic("BUG: invalid LRU used or capacity counter") - } - rn.remove() - rn.n.CacheData = nil - r.used -= rn.n.Size() - evicted = append(evicted, rn) - } - r.mu.Unlock() - - for _, rn := range evicted { - rn.h.Release() - } -} - -func (r *lru) Promote(n *Node) { - var evicted []*lruNode - - r.mu.Lock() - if n.CacheData == nil { - if n.Size() <= r.capacity { - rn := &lruNode{n: n, h: n.GetHandle()} - rn.insert(&r.recent) - n.CacheData = unsafe.Pointer(rn) - r.used += n.Size() - - for r.used > r.capacity { - rn := r.recent.prev - if rn == nil { - panic("BUG: invalid LRU used or capacity counter") - } - rn.remove() - rn.n.CacheData = nil - r.used -= rn.n.Size() - evicted = append(evicted, rn) - } - } - } else { - rn := (*lruNode)(n.CacheData) - if !rn.ban { - rn.remove() - rn.insert(&r.recent) - } - } - r.mu.Unlock() - - for _, rn := range evicted { - rn.h.Release() - } -} - -func (r *lru) Ban(n *Node) { - r.mu.Lock() - if n.CacheData == nil { - n.CacheData = unsafe.Pointer(&lruNode{n: n, ban: true}) - } else { - rn := (*lruNode)(n.CacheData) - if !rn.ban { - rn.remove() - rn.ban = true - r.used -= rn.n.Size() - r.mu.Unlock() - - rn.h.Release() - rn.h = nil - return - } - } - r.mu.Unlock() -} - -func (r *lru) Evict(n *Node) { - r.mu.Lock() - rn := (*lruNode)(n.CacheData) - if rn == nil || rn.ban { - r.mu.Unlock() - return - } - n.CacheData = nil - r.mu.Unlock() - - rn.h.Release() -} - -func (r *lru) EvictNS(ns uint64) { - var evicted []*lruNode - - r.mu.Lock() - for e := r.recent.prev; e != &r.recent; { - rn := e - e = e.prev - if rn.n.NS() == ns { - rn.remove() - rn.n.CacheData = nil - r.used -= rn.n.Size() - evicted = append(evicted, rn) - } - } - r.mu.Unlock() - - for _, rn := range evicted { - rn.h.Release() - } -} - -func (r *lru) EvictAll() { - r.mu.Lock() - back := r.recent.prev - for rn := back; rn != &r.recent; rn = rn.prev { - rn.n.CacheData = nil - } - r.reset() - r.mu.Unlock() - - for rn := back; rn != &r.recent; rn = rn.prev { - rn.h.Release() - } -} - -func (r *lru) Close() error { - return nil -} - -// NewLRU create a new LRU-cache. -func NewLRU(capacity int) Cacher { - r := &lru{capacity: capacity} - r.reset() - return r -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/comparer.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/comparer.go deleted file mode 100644 index d33d5e9c78..0000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/comparer.go +++ /dev/null @@ -1,75 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import "github.com/syndtr/goleveldb/leveldb/comparer" - -type iComparer struct { - ucmp comparer.Comparer -} - -func (icmp *iComparer) uName() string { - return icmp.ucmp.Name() -} - -func (icmp *iComparer) uCompare(a, b []byte) int { - return icmp.ucmp.Compare(a, b) -} - -func (icmp *iComparer) uSeparator(dst, a, b []byte) []byte { - return icmp.ucmp.Separator(dst, a, b) -} - -func (icmp *iComparer) uSuccessor(dst, b []byte) []byte { - return icmp.ucmp.Successor(dst, b) -} - -func (icmp *iComparer) Name() string { - return icmp.uName() -} - -func (icmp *iComparer) Compare(a, b []byte) int { - x := icmp.ucmp.Compare(iKey(a).ukey(), iKey(b).ukey()) - if x == 0 { - if m, n := iKey(a).num(), iKey(b).num(); m > n { - x = -1 - } else if m < n { - x = 1 - } - } - return x -} - -func (icmp *iComparer) Separator(dst, a, b []byte) []byte { - ua, ub := iKey(a).ukey(), iKey(b).ukey() - dst = icmp.ucmp.Separator(dst, ua, ub) - if dst == nil { - return nil - } - if len(dst) < len(ua) && icmp.uCompare(ua, dst) < 0 { - dst = append(dst, kMaxNumBytes...) - } else { - // Did not close possibilities that n maybe longer than len(ub). - dst = append(dst, a[len(a)-8:]...) - } - return dst -} - -func (icmp *iComparer) Successor(dst, b []byte) []byte { - ub := iKey(b).ukey() - dst = icmp.ucmp.Successor(dst, ub) - if dst == nil { - return nil - } - if len(dst) < len(ub) && icmp.uCompare(ub, dst) < 0 { - dst = append(dst, kMaxNumBytes...) - } else { - // Did not close possibilities that n maybe longer than len(ub). - dst = append(dst, b[len(b)-8:]...) - } - return dst -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/comparer/bytes_comparer.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/comparer/bytes_comparer.go deleted file mode 100644 index 14dddf88dd..0000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/comparer/bytes_comparer.go +++ /dev/null @@ -1,51 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package comparer - -import "bytes" - -type bytesComparer struct{} - -func (bytesComparer) Compare(a, b []byte) int { - return bytes.Compare(a, b) -} - -func (bytesComparer) Name() string { - return "leveldb.BytewiseComparator" -} - -func (bytesComparer) Separator(dst, a, b []byte) []byte { - i, n := 0, len(a) - if n > len(b) { - n = len(b) - } - for ; i < n && a[i] == b[i]; i++ { - } - if i >= n { - // Do not shorten if one string is a prefix of the other - } else if c := a[i]; c < 0xff && c+1 < b[i] { - dst = append(dst, a[:i+1]...) - dst[i]++ - return dst - } - return nil -} - -func (bytesComparer) Successor(dst, b []byte) []byte { - for i, c := range b { - if c != 0xff { - dst = append(dst, b[:i+1]...) - dst[i]++ - return dst - } - } - return nil -} - -// DefaultComparer are default implementation of the Comparer interface. -// It uses the natural ordering, consistent with bytes.Compare. -var DefaultComparer = bytesComparer{} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/comparer/comparer.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/comparer/comparer.go deleted file mode 100644 index 14a28f16fc..0000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/comparer/comparer.go +++ /dev/null @@ -1,57 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -// Package comparer provides interface and implementation for ordering -// sets of data. -package comparer - -// BasicComparer is the interface that wraps the basic Compare method. -type BasicComparer interface { - // Compare returns -1, 0, or +1 depending on whether a is 'less than', - // 'equal to' or 'greater than' b. The two arguments can only be 'equal' - // if their contents are exactly equal. Furthermore, the empty slice - // must be 'less than' any non-empty slice. - Compare(a, b []byte) int -} - -// Comparer defines a total ordering over the space of []byte keys: a 'less -// than' relationship. -type Comparer interface { - BasicComparer - - // Name returns name of the comparer. - // - // The Level-DB on-disk format stores the comparer name, and opening a - // database with a different comparer from the one it was created with - // will result in an error. - // - // An implementation to a new name whenever the comparer implementation - // changes in a way that will cause the relative ordering of any two keys - // to change. - // - // Names starting with "leveldb." are reserved and should not be used - // by any users of this package. - Name() string - - // Bellow are advanced functions used used to reduce the space requirements - // for internal data structures such as index blocks. - - // Separator appends a sequence of bytes x to dst such that a <= x && x < b, - // where 'less than' is consistent with Compare. An implementation should - // return nil if x equal to a. - // - // Either contents of a or b should not by any means modified. Doing so - // may cause corruption on the internal state. - Separator(dst, a, b []byte) []byte - - // Successor appends a sequence of bytes x to dst such that x >= b, where - // 'less than' is consistent with Compare. An implementation should return - // nil if x equal to b. - // - // Contents of b should not by any means modified. Doing so may cause - // corruption on the internal state. - Successor(dst, b []byte) []byte -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/corrupt_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/corrupt_test.go deleted file mode 100644 index a351874ed4..0000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/corrupt_test.go +++ /dev/null @@ -1,500 +0,0 @@ -// Copyright (c) 2013, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - "bytes" - "fmt" - "github.com/syndtr/goleveldb/leveldb/filter" - "github.com/syndtr/goleveldb/leveldb/opt" - "github.com/syndtr/goleveldb/leveldb/storage" - "io" - "math/rand" - "testing" -) - -const ctValSize = 1000 - -type dbCorruptHarness struct { - dbHarness -} - -func newDbCorruptHarnessWopt(t *testing.T, o *opt.Options) *dbCorruptHarness { - h := new(dbCorruptHarness) - h.init(t, o) - return h -} - -func newDbCorruptHarness(t *testing.T) *dbCorruptHarness { - return newDbCorruptHarnessWopt(t, &opt.Options{ - BlockCacheCapacity: 100, - Strict: opt.StrictJournalChecksum, - }) -} - -func (h *dbCorruptHarness) recover() { - p := &h.dbHarness - t := p.t - - var err error - p.db, err = Recover(h.stor, h.o) - if err != nil { - t.Fatal("Repair: got error: ", err) - } -} - -func (h *dbCorruptHarness) build(n int) { - p := &h.dbHarness - t := p.t - db := p.db - - batch := new(Batch) - for i := 0; i < n; i++ { - batch.Reset() - batch.Put(tkey(i), tval(i, ctValSize)) - err := db.Write(batch, p.wo) - if err != nil { - t.Fatal("write error: ", err) - } - } -} - -func (h *dbCorruptHarness) buildShuffled(n int, rnd *rand.Rand) { - p := &h.dbHarness - t := p.t - db := p.db - - batch := new(Batch) - for i := range rnd.Perm(n) { - batch.Reset() - batch.Put(tkey(i), tval(i, ctValSize)) - err := db.Write(batch, p.wo) - if err != nil { - t.Fatal("write error: ", err) - } - } -} - -func (h *dbCorruptHarness) deleteRand(n, max int, rnd *rand.Rand) { - p := &h.dbHarness - t := p.t - db := p.db - - batch := new(Batch) - for i := 0; i < n; i++ { - batch.Reset() - batch.Delete(tkey(rnd.Intn(max))) - err := db.Write(batch, p.wo) - if err != nil { - t.Fatal("write error: ", err) - } - } -} - -func (h *dbCorruptHarness) corrupt(ft storage.FileType, fi, offset, n int) { - p := &h.dbHarness - t := p.t - - ff, _ := p.stor.GetFiles(ft) - sff := files(ff) - sff.sort() - if fi < 0 { - fi = len(sff) - 1 - } - if fi >= len(sff) { - t.Fatalf("no such file with type %q with index %d", ft, fi) - } - - file := sff[fi] - - r, err := file.Open() - if err != nil { - t.Fatal("cannot open file: ", err) - } - x, err := r.Seek(0, 2) - if err != nil { - t.Fatal("cannot query file size: ", err) - } - m := int(x) - if _, err := r.Seek(0, 0); err != nil { - t.Fatal(err) - } - - if offset < 0 { - if -offset > m { - offset = 0 - } else { - offset = m + offset - } - } - if offset > m { - offset = m - } - if offset+n > m { - n = m - offset - } - - buf := make([]byte, m) - _, err = io.ReadFull(r, buf) - if err != nil { - t.Fatal("cannot read file: ", err) - } - r.Close() - - for i := 0; i < n; i++ { - buf[offset+i] ^= 0x80 - } - - err = file.Remove() - if err != nil { - t.Fatal("cannot remove old file: ", err) - } - w, err := file.Create() - if err != nil { - t.Fatal("cannot create new file: ", err) - } - _, err = w.Write(buf) - if err != nil { - t.Fatal("cannot write new file: ", err) - } - w.Close() -} - -func (h *dbCorruptHarness) removeAll(ft storage.FileType) { - ff, err := h.stor.GetFiles(ft) - if err != nil { - h.t.Fatal("get files: ", err) - } - for _, f := range ff { - if err := f.Remove(); err != nil { - h.t.Error("remove file: ", err) - } - } -} - -func (h *dbCorruptHarness) removeOne(ft storage.FileType) { - ff, err := h.stor.GetFiles(ft) - if err != nil { - h.t.Fatal("get files: ", err) - } - f := ff[rand.Intn(len(ff))] - h.t.Logf("removing file @%d", f.Num()) - if err := f.Remove(); err != nil { - h.t.Error("remove file: ", err) - } -} - -func (h *dbCorruptHarness) check(min, max int) { - p := &h.dbHarness - t := p.t - db := p.db - - var n, badk, badv, missed, good int - iter := db.NewIterator(nil, p.ro) - for iter.Next() { - k := 0 - fmt.Sscanf(string(iter.Key()), "%d", &k) - if k < n { - badk++ - continue - } - missed += k - n - n = k + 1 - if !bytes.Equal(iter.Value(), tval(k, ctValSize)) { - badv++ - } else { - good++ - } - } - err := iter.Error() - iter.Release() - t.Logf("want=%d..%d got=%d badkeys=%d badvalues=%d missed=%d, err=%v", - min, max, good, badk, badv, missed, err) - if good < min || good > max { - t.Errorf("good entries number not in range") - } -} - -func TestCorruptDB_Journal(t *testing.T) { - h := newDbCorruptHarness(t) - - h.build(100) - h.check(100, 100) - h.closeDB() - h.corrupt(storage.TypeJournal, -1, 19, 1) - h.corrupt(storage.TypeJournal, -1, 32*1024+1000, 1) - - h.openDB() - h.check(36, 36) - - h.close() -} - -func TestCorruptDB_Table(t *testing.T) { - h := newDbCorruptHarness(t) - - h.build(100) - h.compactMem() - h.compactRangeAt(0, "", "") - h.compactRangeAt(1, "", "") - h.closeDB() - h.corrupt(storage.TypeTable, -1, 100, 1) - - h.openDB() - h.check(99, 99) - - h.close() -} - -func TestCorruptDB_TableIndex(t *testing.T) { - h := newDbCorruptHarness(t) - - h.build(10000) - h.compactMem() - h.closeDB() - h.corrupt(storage.TypeTable, -1, -2000, 500) - - h.openDB() - h.check(5000, 9999) - - h.close() -} - -func TestCorruptDB_MissingManifest(t *testing.T) { - rnd := rand.New(rand.NewSource(0x0badda7a)) - h := newDbCorruptHarnessWopt(t, &opt.Options{ - BlockCacheCapacity: 100, - Strict: opt.StrictJournalChecksum, - WriteBuffer: 1000 * 60, - }) - - h.build(1000) - h.compactMem() - h.buildShuffled(1000, rnd) - h.compactMem() - h.deleteRand(500, 1000, rnd) - h.compactMem() - h.buildShuffled(1000, rnd) - h.compactMem() - h.deleteRand(500, 1000, rnd) - h.compactMem() - h.buildShuffled(1000, rnd) - h.compactMem() - h.closeDB() - - h.stor.SetIgnoreOpenErr(storage.TypeManifest) - h.removeAll(storage.TypeManifest) - h.openAssert(false) - h.stor.SetIgnoreOpenErr(0) - - h.recover() - h.check(1000, 1000) - h.build(1000) - h.compactMem() - h.compactRange("", "") - h.closeDB() - - h.recover() - h.check(1000, 1000) - - h.close() -} - -func TestCorruptDB_SequenceNumberRecovery(t *testing.T) { - h := newDbCorruptHarness(t) - - h.put("foo", "v1") - h.put("foo", "v2") - h.put("foo", "v3") - h.put("foo", "v4") - h.put("foo", "v5") - h.closeDB() - - h.recover() - h.getVal("foo", "v5") - h.put("foo", "v6") - h.getVal("foo", "v6") - - h.reopenDB() - h.getVal("foo", "v6") - - h.close() -} - -func TestCorruptDB_SequenceNumberRecoveryTable(t *testing.T) { - h := newDbCorruptHarness(t) - - h.put("foo", "v1") - h.put("foo", "v2") - h.put("foo", "v3") - h.compactMem() - h.put("foo", "v4") - h.put("foo", "v5") - h.compactMem() - h.closeDB() - - h.recover() - h.getVal("foo", "v5") - h.put("foo", "v6") - h.getVal("foo", "v6") - - h.reopenDB() - h.getVal("foo", "v6") - - h.close() -} - -func TestCorruptDB_CorruptedManifest(t *testing.T) { - h := newDbCorruptHarness(t) - - h.put("foo", "hello") - h.compactMem() - h.compactRange("", "") - h.closeDB() - h.corrupt(storage.TypeManifest, -1, 0, 1000) - h.openAssert(false) - - h.recover() - h.getVal("foo", "hello") - - h.close() -} - -func TestCorruptDB_CompactionInputError(t *testing.T) { - h := newDbCorruptHarness(t) - - h.build(10) - h.compactMem() - h.closeDB() - h.corrupt(storage.TypeTable, -1, 100, 1) - - h.openDB() - h.check(9, 9) - - h.build(10000) - h.check(10000, 10000) - - h.close() -} - -func TestCorruptDB_UnrelatedKeys(t *testing.T) { - h := newDbCorruptHarness(t) - - h.build(10) - h.compactMem() - h.closeDB() - h.corrupt(storage.TypeTable, -1, 100, 1) - - h.openDB() - h.put(string(tkey(1000)), string(tval(1000, ctValSize))) - h.getVal(string(tkey(1000)), string(tval(1000, ctValSize))) - h.compactMem() - h.getVal(string(tkey(1000)), string(tval(1000, ctValSize))) - - h.close() -} - -func TestCorruptDB_Level0NewerFileHasOlderSeqnum(t *testing.T) { - h := newDbCorruptHarness(t) - - h.put("a", "v1") - h.put("b", "v1") - h.compactMem() - h.put("a", "v2") - h.put("b", "v2") - h.compactMem() - h.put("a", "v3") - h.put("b", "v3") - h.compactMem() - h.put("c", "v0") - h.put("d", "v0") - h.compactMem() - h.compactRangeAt(1, "", "") - h.closeDB() - - h.recover() - h.getVal("a", "v3") - h.getVal("b", "v3") - h.getVal("c", "v0") - h.getVal("d", "v0") - - h.close() -} - -func TestCorruptDB_RecoverInvalidSeq_Issue53(t *testing.T) { - h := newDbCorruptHarness(t) - - h.put("a", "v1") - h.put("b", "v1") - h.compactMem() - h.put("a", "v2") - h.put("b", "v2") - h.compactMem() - h.put("a", "v3") - h.put("b", "v3") - h.compactMem() - h.put("c", "v0") - h.put("d", "v0") - h.compactMem() - h.compactRangeAt(0, "", "") - h.closeDB() - - h.recover() - h.getVal("a", "v3") - h.getVal("b", "v3") - h.getVal("c", "v0") - h.getVal("d", "v0") - - h.close() -} - -func TestCorruptDB_MissingTableFiles(t *testing.T) { - h := newDbCorruptHarness(t) - - h.put("a", "v1") - h.put("b", "v1") - h.compactMem() - h.put("c", "v2") - h.put("d", "v2") - h.compactMem() - h.put("e", "v3") - h.put("f", "v3") - h.closeDB() - - h.removeOne(storage.TypeTable) - h.openAssert(false) - - h.close() -} - -func TestCorruptDB_RecoverTable(t *testing.T) { - h := newDbCorruptHarnessWopt(t, &opt.Options{ - WriteBuffer: 112 * opt.KiB, - CompactionTableSize: 90 * opt.KiB, - Filter: filter.NewBloomFilter(10), - }) - - h.build(1000) - h.compactMem() - h.compactRangeAt(0, "", "") - h.compactRangeAt(1, "", "") - seq := h.db.seq - h.closeDB() - h.corrupt(storage.TypeTable, 0, 1000, 1) - h.corrupt(storage.TypeTable, 3, 10000, 1) - // Corrupted filter shouldn't affect recovery. - h.corrupt(storage.TypeTable, 3, 113888, 10) - h.corrupt(storage.TypeTable, -1, 20000, 1) - - h.recover() - if h.db.seq != seq { - t.Errorf("invalid seq, want=%d got=%d", seq, h.db.seq) - } - h.check(985, 985) - - h.close() -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db.go deleted file mode 100644 index def86bc1aa..0000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db.go +++ /dev/null @@ -1,1068 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - "container/list" - "fmt" - "io" - "os" - "runtime" - "strings" - "sync" - "sync/atomic" - "time" - - "github.com/syndtr/goleveldb/leveldb/errors" - "github.com/syndtr/goleveldb/leveldb/iterator" - "github.com/syndtr/goleveldb/leveldb/journal" - "github.com/syndtr/goleveldb/leveldb/memdb" - "github.com/syndtr/goleveldb/leveldb/opt" - "github.com/syndtr/goleveldb/leveldb/storage" - "github.com/syndtr/goleveldb/leveldb/table" - "github.com/syndtr/goleveldb/leveldb/util" -) - -// DB is a LevelDB database. -type DB struct { - // Need 64-bit alignment. - seq uint64 - - // Session. - s *session - - // MemDB. - memMu sync.RWMutex - memPool chan *memdb.DB - mem, frozenMem *memDB - journal *journal.Writer - journalWriter storage.Writer - journalFile storage.File - frozenJournalFile storage.File - frozenSeq uint64 - - // Snapshot. - snapsMu sync.Mutex - snapsList *list.List - - // Stats. - aliveSnaps, aliveIters int32 - - // Write. - writeC chan *Batch - writeMergedC chan bool - writeLockC chan struct{} - writeAckC chan error - writeDelay time.Duration - writeDelayN int - journalC chan *Batch - journalAckC chan error - - // Compaction. - tcompCmdC chan cCmd - tcompPauseC chan chan<- struct{} - mcompCmdC chan cCmd - compErrC chan error - compPerErrC chan error - compErrSetC chan error - compWriteLocking bool - compStats []cStats - - // Close. - closeW sync.WaitGroup - closeC chan struct{} - closed uint32 - closer io.Closer -} - -func openDB(s *session) (*DB, error) { - s.log("db@open opening") - start := time.Now() - db := &DB{ - s: s, - // Initial sequence - seq: s.stSeqNum, - // MemDB - memPool: make(chan *memdb.DB, 1), - // Snapshot - snapsList: list.New(), - // Write - writeC: make(chan *Batch), - writeMergedC: make(chan bool), - writeLockC: make(chan struct{}, 1), - writeAckC: make(chan error), - journalC: make(chan *Batch), - journalAckC: make(chan error), - // Compaction - tcompCmdC: make(chan cCmd), - tcompPauseC: make(chan chan<- struct{}), - mcompCmdC: make(chan cCmd), - compErrC: make(chan error), - compPerErrC: make(chan error), - compErrSetC: make(chan error), - compStats: make([]cStats, s.o.GetNumLevel()), - // Close - closeC: make(chan struct{}), - } - - // Read-only mode. - readOnly := s.o.GetReadOnly() - - if readOnly { - // Recover journals (read-only mode). - if err := db.recoverJournalRO(); err != nil { - return nil, err - } - } else { - // Recover journals. - if err := db.recoverJournal(); err != nil { - return nil, err - } - - // Remove any obsolete files. - if err := db.checkAndCleanFiles(); err != nil { - // Close journal. - if db.journal != nil { - db.journal.Close() - db.journalWriter.Close() - } - return nil, err - } - - } - - // Doesn't need to be included in the wait group. - go db.compactionError() - go db.mpoolDrain() - - if readOnly { - db.SetReadOnly() - } else { - db.closeW.Add(3) - go db.tCompaction() - go db.mCompaction() - go db.jWriter() - } - - s.logf("db@open done T·%v", time.Since(start)) - - runtime.SetFinalizer(db, (*DB).Close) - return db, nil -} - -// Open opens or creates a DB for the given storage. -// The DB will be created if not exist, unless ErrorIfMissing is true. -// Also, if ErrorIfExist is true and the DB exist Open will returns -// os.ErrExist error. -// -// Open will return an error with type of ErrCorrupted if corruption -// detected in the DB. Corrupted DB can be recovered with Recover -// function. -// -// The returned DB instance is goroutine-safe. -// The DB must be closed after use, by calling Close method. -func Open(stor storage.Storage, o *opt.Options) (db *DB, err error) { - s, err := newSession(stor, o) - if err != nil { - return - } - defer func() { - if err != nil { - s.close() - s.release() - } - }() - - err = s.recover() - if err != nil { - if !os.IsNotExist(err) || s.o.GetErrorIfMissing() { - return - } - err = s.create() - if err != nil { - return - } - } else if s.o.GetErrorIfExist() { - err = os.ErrExist - return - } - - return openDB(s) -} - -// OpenFile opens or creates a DB for the given path. -// The DB will be created if not exist, unless ErrorIfMissing is true. -// Also, if ErrorIfExist is true and the DB exist OpenFile will returns -// os.ErrExist error. -// -// OpenFile uses standard file-system backed storage implementation as -// desribed in the leveldb/storage package. -// -// OpenFile will return an error with type of ErrCorrupted if corruption -// detected in the DB. Corrupted DB can be recovered with Recover -// function. -// -// The returned DB instance is goroutine-safe. -// The DB must be closed after use, by calling Close method. -func OpenFile(path string, o *opt.Options) (db *DB, err error) { - stor, err := storage.OpenFile(path) - if err != nil { - return - } - db, err = Open(stor, o) - if err != nil { - stor.Close() - } else { - db.closer = stor - } - return -} - -// Recover recovers and opens a DB with missing or corrupted manifest files -// for the given storage. It will ignore any manifest files, valid or not. -// The DB must already exist or it will returns an error. -// Also, Recover will ignore ErrorIfMissing and ErrorIfExist options. -// -// The returned DB instance is goroutine-safe. -// The DB must be closed after use, by calling Close method. -func Recover(stor storage.Storage, o *opt.Options) (db *DB, err error) { - s, err := newSession(stor, o) - if err != nil { - return - } - defer func() { - if err != nil { - s.close() - s.release() - } - }() - - err = recoverTable(s, o) - if err != nil { - return - } - return openDB(s) -} - -// RecoverFile recovers and opens a DB with missing or corrupted manifest files -// for the given path. It will ignore any manifest files, valid or not. -// The DB must already exist or it will returns an error. -// Also, Recover will ignore ErrorIfMissing and ErrorIfExist options. -// -// RecoverFile uses standard file-system backed storage implementation as desribed -// in the leveldb/storage package. -// -// The returned DB instance is goroutine-safe. -// The DB must be closed after use, by calling Close method. -func RecoverFile(path string, o *opt.Options) (db *DB, err error) { - stor, err := storage.OpenFile(path) - if err != nil { - return - } - db, err = Recover(stor, o) - if err != nil { - stor.Close() - } else { - db.closer = stor - } - return -} - -func recoverTable(s *session, o *opt.Options) error { - o = dupOptions(o) - // Mask StrictReader, lets StrictRecovery doing its job. - o.Strict &= ^opt.StrictReader - - // Get all tables and sort it by file number. - tableFiles_, err := s.getFiles(storage.TypeTable) - if err != nil { - return err - } - tableFiles := files(tableFiles_) - tableFiles.sort() - - var ( - maxSeq uint64 - recoveredKey, goodKey, corruptedKey, corruptedBlock, droppedTable int - - // We will drop corrupted table. - strict = o.GetStrict(opt.StrictRecovery) - - rec = &sessionRecord{} - bpool = util.NewBufferPool(o.GetBlockSize() + 5) - ) - buildTable := func(iter iterator.Iterator) (tmp storage.File, size int64, err error) { - tmp = s.newTemp() - writer, err := tmp.Create() - if err != nil { - return - } - defer func() { - writer.Close() - if err != nil { - tmp.Remove() - tmp = nil - } - }() - - // Copy entries. - tw := table.NewWriter(writer, o) - for iter.Next() { - key := iter.Key() - if validIkey(key) { - err = tw.Append(key, iter.Value()) - if err != nil { - return - } - } - } - err = iter.Error() - if err != nil { - return - } - err = tw.Close() - if err != nil { - return - } - err = writer.Sync() - if err != nil { - return - } - size = int64(tw.BytesLen()) - return - } - recoverTable := func(file storage.File) error { - s.logf("table@recovery recovering @%d", file.Num()) - reader, err := file.Open() - if err != nil { - return err - } - var closed bool - defer func() { - if !closed { - reader.Close() - } - }() - - // Get file size. - size, err := reader.Seek(0, 2) - if err != nil { - return err - } - - var ( - tSeq uint64 - tgoodKey, tcorruptedKey, tcorruptedBlock int - imin, imax []byte - ) - tr, err := table.NewReader(reader, size, storage.NewFileInfo(file), nil, bpool, o) - if err != nil { - return err - } - iter := tr.NewIterator(nil, nil) - if itererr, ok := iter.(iterator.ErrorCallbackSetter); ok { - itererr.SetErrorCallback(func(err error) { - if errors.IsCorrupted(err) { - s.logf("table@recovery block corruption @%d %q", file.Num(), err) - tcorruptedBlock++ - } - }) - } - - // Scan the table. - for iter.Next() { - key := iter.Key() - _, seq, _, kerr := parseIkey(key) - if kerr != nil { - tcorruptedKey++ - continue - } - tgoodKey++ - if seq > tSeq { - tSeq = seq - } - if imin == nil { - imin = append([]byte{}, key...) - } - imax = append(imax[:0], key...) - } - if err := iter.Error(); err != nil { - iter.Release() - return err - } - iter.Release() - - goodKey += tgoodKey - corruptedKey += tcorruptedKey - corruptedBlock += tcorruptedBlock - - if strict && (tcorruptedKey > 0 || tcorruptedBlock > 0) { - droppedTable++ - s.logf("table@recovery dropped @%d Gk·%d Ck·%d Cb·%d S·%d Q·%d", file.Num(), tgoodKey, tcorruptedKey, tcorruptedBlock, size, tSeq) - return nil - } - - if tgoodKey > 0 { - if tcorruptedKey > 0 || tcorruptedBlock > 0 { - // Rebuild the table. - s.logf("table@recovery rebuilding @%d", file.Num()) - iter := tr.NewIterator(nil, nil) - tmp, newSize, err := buildTable(iter) - iter.Release() - if err != nil { - return err - } - closed = true - reader.Close() - if err := file.Replace(tmp); err != nil { - return err - } - size = newSize - } - if tSeq > maxSeq { - maxSeq = tSeq - } - recoveredKey += tgoodKey - // Add table to level 0. - rec.addTable(0, file.Num(), uint64(size), imin, imax) - s.logf("table@recovery recovered @%d Gk·%d Ck·%d Cb·%d S·%d Q·%d", file.Num(), tgoodKey, tcorruptedKey, tcorruptedBlock, size, tSeq) - } else { - droppedTable++ - s.logf("table@recovery unrecoverable @%d Ck·%d Cb·%d S·%d", file.Num(), tcorruptedKey, tcorruptedBlock, size) - } - - return nil - } - - // Recover all tables. - if len(tableFiles) > 0 { - s.logf("table@recovery F·%d", len(tableFiles)) - - // Mark file number as used. - s.markFileNum(tableFiles[len(tableFiles)-1].Num()) - - for _, file := range tableFiles { - if err := recoverTable(file); err != nil { - return err - } - } - - s.logf("table@recovery recovered F·%d N·%d Gk·%d Ck·%d Q·%d", len(tableFiles), recoveredKey, goodKey, corruptedKey, maxSeq) - } - - // Set sequence number. - rec.setSeqNum(maxSeq) - - // Create new manifest. - if err := s.create(); err != nil { - return err - } - - // Commit. - return s.commit(rec) -} - -func (db *DB) recoverJournal() error { - // Get all journals and sort it by file number. - allJournalFiles, err := db.s.getFiles(storage.TypeJournal) - if err != nil { - return err - } - files(allJournalFiles).sort() - - // Journals that will be recovered. - var recJournalFiles []storage.File - for _, jf := range allJournalFiles { - if jf.Num() >= db.s.stJournalNum || jf.Num() == db.s.stPrevJournalNum { - recJournalFiles = append(recJournalFiles, jf) - } - } - - var ( - of storage.File // Obsolete file. - rec = &sessionRecord{} - ) - - // Recover journals. - if len(recJournalFiles) > 0 { - db.logf("journal@recovery F·%d", len(recJournalFiles)) - - // Mark file number as used. - db.s.markFileNum(recJournalFiles[len(recJournalFiles)-1].Num()) - - var ( - // Options. - strict = db.s.o.GetStrict(opt.StrictJournal) - checksum = db.s.o.GetStrict(opt.StrictJournalChecksum) - writeBuffer = db.s.o.GetWriteBuffer() - - jr *journal.Reader - mdb = memdb.New(db.s.icmp, writeBuffer) - buf = &util.Buffer{} - batch = &Batch{} - ) - - for _, jf := range recJournalFiles { - db.logf("journal@recovery recovering @%d", jf.Num()) - - fr, err := jf.Open() - if err != nil { - return err - } - - // Create or reset journal reader instance. - if jr == nil { - jr = journal.NewReader(fr, dropper{db.s, jf}, strict, checksum) - } else { - jr.Reset(fr, dropper{db.s, jf}, strict, checksum) - } - - // Flush memdb and remove obsolete journal file. - if of != nil { - if mdb.Len() > 0 { - if _, err := db.s.flushMemdb(rec, mdb, -1); err != nil { - fr.Close() - return err - } - } - - rec.setJournalNum(jf.Num()) - rec.setSeqNum(db.seq) - if err := db.s.commit(rec); err != nil { - fr.Close() - return err - } - rec.resetAddedTables() - - of.Remove() - of = nil - } - - // Replay journal to memdb. - mdb.Reset() - for { - r, err := jr.Next() - if err != nil { - if err == io.EOF { - break - } - - fr.Close() - return errors.SetFile(err, jf) - } - - buf.Reset() - if _, err := buf.ReadFrom(r); err != nil { - if err == io.ErrUnexpectedEOF { - // This is error returned due to corruption, with strict == false. - continue - } - - fr.Close() - return errors.SetFile(err, jf) - } - if err := batch.memDecodeAndReplay(db.seq, buf.Bytes(), mdb); err != nil { - if !strict && errors.IsCorrupted(err) { - db.s.logf("journal error: %v (skipped)", err) - // We won't apply sequence number as it might be corrupted. - continue - } - - fr.Close() - return errors.SetFile(err, jf) - } - - // Save sequence number. - db.seq = batch.seq + uint64(batch.Len()) - - // Flush it if large enough. - if mdb.Size() >= writeBuffer { - if _, err := db.s.flushMemdb(rec, mdb, 0); err != nil { - fr.Close() - return err - } - - mdb.Reset() - } - } - - fr.Close() - of = jf - } - - // Flush the last memdb. - if mdb.Len() > 0 { - if _, err := db.s.flushMemdb(rec, mdb, 0); err != nil { - return err - } - } - } - - // Create a new journal. - if _, err := db.newMem(0); err != nil { - return err - } - - // Commit. - rec.setJournalNum(db.journalFile.Num()) - rec.setSeqNum(db.seq) - if err := db.s.commit(rec); err != nil { - // Close journal on error. - if db.journal != nil { - db.journal.Close() - db.journalWriter.Close() - } - return err - } - - // Remove the last obsolete journal file. - if of != nil { - of.Remove() - } - - return nil -} - -func (db *DB) recoverJournalRO() error { - // Get all journals and sort it by file number. - allJournalFiles, err := db.s.getFiles(storage.TypeJournal) - if err != nil { - return err - } - files(allJournalFiles).sort() - - // Journals that will be recovered. - var recJournalFiles []storage.File - for _, jf := range allJournalFiles { - if jf.Num() >= db.s.stJournalNum || jf.Num() == db.s.stPrevJournalNum { - recJournalFiles = append(recJournalFiles, jf) - } - } - - var ( - // Options. - strict = db.s.o.GetStrict(opt.StrictJournal) - checksum = db.s.o.GetStrict(opt.StrictJournalChecksum) - writeBuffer = db.s.o.GetWriteBuffer() - - mdb = memdb.New(db.s.icmp, writeBuffer) - ) - - // Recover journals. - if len(recJournalFiles) > 0 { - db.logf("journal@recovery RO·Mode F·%d", len(recJournalFiles)) - - var ( - jr *journal.Reader - buf = &util.Buffer{} - batch = &Batch{} - ) - - for _, jf := range recJournalFiles { - db.logf("journal@recovery recovering @%d", jf.Num()) - - fr, err := jf.Open() - if err != nil { - return err - } - - // Create or reset journal reader instance. - if jr == nil { - jr = journal.NewReader(fr, dropper{db.s, jf}, strict, checksum) - } else { - jr.Reset(fr, dropper{db.s, jf}, strict, checksum) - } - - // Replay journal to memdb. - for { - r, err := jr.Next() - if err != nil { - if err == io.EOF { - break - } - - fr.Close() - return errors.SetFile(err, jf) - } - - buf.Reset() - if _, err := buf.ReadFrom(r); err != nil { - if err == io.ErrUnexpectedEOF { - // This is error returned due to corruption, with strict == false. - continue - } - - fr.Close() - return errors.SetFile(err, jf) - } - if err := batch.memDecodeAndReplay(db.seq, buf.Bytes(), mdb); err != nil { - if !strict && errors.IsCorrupted(err) { - db.s.logf("journal error: %v (skipped)", err) - // We won't apply sequence number as it might be corrupted. - continue - } - - fr.Close() - return errors.SetFile(err, jf) - } - - // Save sequence number. - db.seq = batch.seq + uint64(batch.Len()) - } - - fr.Close() - } - } - - // Set memDB. - db.mem = &memDB{db: db, DB: mdb, ref: 1} - - return nil -} - -func (db *DB) get(key []byte, seq uint64, ro *opt.ReadOptions) (value []byte, err error) { - ikey := newIkey(key, seq, ktSeek) - - em, fm := db.getMems() - for _, m := range [...]*memDB{em, fm} { - if m == nil { - continue - } - defer m.decref() - - mk, mv, me := m.Find(ikey) - if me == nil { - ukey, _, kt, kerr := parseIkey(mk) - if kerr != nil { - // Shouldn't have had happen. - panic(kerr) - } - if db.s.icmp.uCompare(ukey, key) == 0 { - if kt == ktDel { - return nil, ErrNotFound - } - return append([]byte{}, mv...), nil - } - } else if me != ErrNotFound { - return nil, me - } - } - - v := db.s.version() - value, cSched, err := v.get(ikey, ro, false) - v.release() - if cSched { - // Trigger table compaction. - db.compSendTrigger(db.tcompCmdC) - } - return -} - -func (db *DB) has(key []byte, seq uint64, ro *opt.ReadOptions) (ret bool, err error) { - ikey := newIkey(key, seq, ktSeek) - - em, fm := db.getMems() - for _, m := range [...]*memDB{em, fm} { - if m == nil { - continue - } - defer m.decref() - - mk, _, me := m.Find(ikey) - if me == nil { - ukey, _, kt, kerr := parseIkey(mk) - if kerr != nil { - // Shouldn't have had happen. - panic(kerr) - } - if db.s.icmp.uCompare(ukey, key) == 0 { - if kt == ktDel { - return false, nil - } - return true, nil - } - } else if me != ErrNotFound { - return false, me - } - } - - v := db.s.version() - _, cSched, err := v.get(ikey, ro, true) - v.release() - if cSched { - // Trigger table compaction. - db.compSendTrigger(db.tcompCmdC) - } - if err == nil { - ret = true - } else if err == ErrNotFound { - err = nil - } - return -} - -// Get gets the value for the given key. It returns ErrNotFound if the -// DB does not contains the key. -// -// The returned slice is its own copy, it is safe to modify the contents -// of the returned slice. -// It is safe to modify the contents of the argument after Get returns. -func (db *DB) Get(key []byte, ro *opt.ReadOptions) (value []byte, err error) { - err = db.ok() - if err != nil { - return - } - - se := db.acquireSnapshot() - defer db.releaseSnapshot(se) - return db.get(key, se.seq, ro) -} - -// Has returns true if the DB does contains the given key. -// -// It is safe to modify the contents of the argument after Get returns. -func (db *DB) Has(key []byte, ro *opt.ReadOptions) (ret bool, err error) { - err = db.ok() - if err != nil { - return - } - - se := db.acquireSnapshot() - defer db.releaseSnapshot(se) - return db.has(key, se.seq, ro) -} - -// NewIterator returns an iterator for the latest snapshot of the -// uderlying DB. -// The returned iterator is not goroutine-safe, but it is safe to use -// multiple iterators concurrently, with each in a dedicated goroutine. -// It is also safe to use an iterator concurrently with modifying its -// underlying DB. The resultant key/value pairs are guaranteed to be -// consistent. -// -// Slice allows slicing the iterator to only contains keys in the given -// range. A nil Range.Start is treated as a key before all keys in the -// DB. And a nil Range.Limit is treated as a key after all keys in -// the DB. -// -// The iterator must be released after use, by calling Release method. -// -// Also read Iterator documentation of the leveldb/iterator package. -func (db *DB) NewIterator(slice *util.Range, ro *opt.ReadOptions) iterator.Iterator { - if err := db.ok(); err != nil { - return iterator.NewEmptyIterator(err) - } - - se := db.acquireSnapshot() - defer db.releaseSnapshot(se) - // Iterator holds 'version' lock, 'version' is immutable so snapshot - // can be released after iterator created. - return db.newIterator(se.seq, slice, ro) -} - -// GetSnapshot returns a latest snapshot of the underlying DB. A snapshot -// is a frozen snapshot of a DB state at a particular point in time. The -// content of snapshot are guaranteed to be consistent. -// -// The snapshot must be released after use, by calling Release method. -func (db *DB) GetSnapshot() (*Snapshot, error) { - if err := db.ok(); err != nil { - return nil, err - } - - return db.newSnapshot(), nil -} - -// GetProperty returns value of the given property name. -// -// Property names: -// leveldb.num-files-at-level{n} -// Returns the number of files at level 'n'. -// leveldb.stats -// Returns statistics of the underlying DB. -// leveldb.sstables -// Returns sstables list for each level. -// leveldb.blockpool -// Returns block pool stats. -// leveldb.cachedblock -// Returns size of cached block. -// leveldb.openedtables -// Returns number of opened tables. -// leveldb.alivesnaps -// Returns number of alive snapshots. -// leveldb.aliveiters -// Returns number of alive iterators. -func (db *DB) GetProperty(name string) (value string, err error) { - err = db.ok() - if err != nil { - return - } - - const prefix = "leveldb." - if !strings.HasPrefix(name, prefix) { - return "", ErrNotFound - } - p := name[len(prefix):] - - v := db.s.version() - defer v.release() - - numFilesPrefix := "num-files-at-level" - switch { - case strings.HasPrefix(p, numFilesPrefix): - var level uint - var rest string - n, _ := fmt.Sscanf(p[len(numFilesPrefix):], "%d%s", &level, &rest) - if n != 1 || int(level) >= db.s.o.GetNumLevel() { - err = ErrNotFound - } else { - value = fmt.Sprint(v.tLen(int(level))) - } - case p == "stats": - value = "Compactions\n" + - " Level | Tables | Size(MB) | Time(sec) | Read(MB) | Write(MB)\n" + - "-------+------------+---------------+---------------+---------------+---------------\n" - for level, tables := range v.tables { - duration, read, write := db.compStats[level].get() - if len(tables) == 0 && duration == 0 { - continue - } - value += fmt.Sprintf(" %3d | %10d | %13.5f | %13.5f | %13.5f | %13.5f\n", - level, len(tables), float64(tables.size())/1048576.0, duration.Seconds(), - float64(read)/1048576.0, float64(write)/1048576.0) - } - case p == "sstables": - for level, tables := range v.tables { - value += fmt.Sprintf("--- level %d ---\n", level) - for _, t := range tables { - value += fmt.Sprintf("%d:%d[%q .. %q]\n", t.file.Num(), t.size, t.imin, t.imax) - } - } - case p == "blockpool": - value = fmt.Sprintf("%v", db.s.tops.bpool) - case p == "cachedblock": - if db.s.tops.bcache != nil { - value = fmt.Sprintf("%d", db.s.tops.bcache.Size()) - } else { - value = "" - } - case p == "openedtables": - value = fmt.Sprintf("%d", db.s.tops.cache.Size()) - case p == "alivesnaps": - value = fmt.Sprintf("%d", atomic.LoadInt32(&db.aliveSnaps)) - case p == "aliveiters": - value = fmt.Sprintf("%d", atomic.LoadInt32(&db.aliveIters)) - default: - err = ErrNotFound - } - - return -} - -// SizeOf calculates approximate sizes of the given key ranges. -// The length of the returned sizes are equal with the length of the given -// ranges. The returned sizes measure storage space usage, so if the user -// data compresses by a factor of ten, the returned sizes will be one-tenth -// the size of the corresponding user data size. -// The results may not include the sizes of recently written data. -func (db *DB) SizeOf(ranges []util.Range) (Sizes, error) { - if err := db.ok(); err != nil { - return nil, err - } - - v := db.s.version() - defer v.release() - - sizes := make(Sizes, 0, len(ranges)) - for _, r := range ranges { - imin := newIkey(r.Start, kMaxSeq, ktSeek) - imax := newIkey(r.Limit, kMaxSeq, ktSeek) - start, err := v.offsetOf(imin) - if err != nil { - return nil, err - } - limit, err := v.offsetOf(imax) - if err != nil { - return nil, err - } - var size uint64 - if limit >= start { - size = limit - start - } - sizes = append(sizes, size) - } - - return sizes, nil -} - -// Close closes the DB. This will also releases any outstanding snapshot and -// abort any in-flight compaction. -// -// It is not safe to close a DB until all outstanding iterators are released. -// It is valid to call Close multiple times. Other methods should not be -// called after the DB has been closed. -func (db *DB) Close() error { - if !db.setClosed() { - return ErrClosed - } - - start := time.Now() - db.log("db@close closing") - - // Clear the finalizer. - runtime.SetFinalizer(db, nil) - - // Get compaction error. - var err error - select { - case err = <-db.compErrC: - if err == ErrReadOnly { - err = nil - } - default: - } - - // Signal all goroutines. - close(db.closeC) - - // Wait for all gorotines to exit. - db.closeW.Wait() - - // Lock writer and closes journal. - db.writeLockC <- struct{}{} - if db.journal != nil { - db.journal.Close() - db.journalWriter.Close() - } - - if db.writeDelayN > 0 { - db.logf("db@write was delayed N·%d T·%v", db.writeDelayN, db.writeDelay) - } - - // Close session. - db.s.close() - db.logf("db@close done T·%v", time.Since(start)) - db.s.release() - - if db.closer != nil { - if err1 := db.closer.Close(); err == nil { - err = err1 - } - } - - // NIL'ing pointers. - db.s = nil - db.mem = nil - db.frozenMem = nil - db.journal = nil - db.journalWriter = nil - db.journalFile = nil - db.frozenJournalFile = nil - db.closer = nil - - return err -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_compaction.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_compaction.go deleted file mode 100644 index 26003106ea..0000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_compaction.go +++ /dev/null @@ -1,791 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - "sync" - "time" - - "github.com/syndtr/goleveldb/leveldb/errors" - "github.com/syndtr/goleveldb/leveldb/opt" -) - -var ( - errCompactionTransactExiting = errors.New("leveldb: compaction transact exiting") -) - -type cStats struct { - sync.Mutex - duration time.Duration - read uint64 - write uint64 -} - -func (p *cStats) add(n *cStatsStaging) { - p.Lock() - p.duration += n.duration - p.read += n.read - p.write += n.write - p.Unlock() -} - -func (p *cStats) get() (duration time.Duration, read, write uint64) { - p.Lock() - defer p.Unlock() - return p.duration, p.read, p.write -} - -type cStatsStaging struct { - start time.Time - duration time.Duration - on bool - read uint64 - write uint64 -} - -func (p *cStatsStaging) startTimer() { - if !p.on { - p.start = time.Now() - p.on = true - } -} - -func (p *cStatsStaging) stopTimer() { - if p.on { - p.duration += time.Since(p.start) - p.on = false - } -} - -func (db *DB) compactionError() { - var err error -noerr: - // No error. - for { - select { - case err = <-db.compErrSetC: - switch { - case err == nil: - case err == ErrReadOnly, errors.IsCorrupted(err): - goto hasperr - default: - goto haserr - } - case _, _ = <-db.closeC: - return - } - } -haserr: - // Transient error. - for { - select { - case db.compErrC <- err: - case err = <-db.compErrSetC: - switch { - case err == nil: - goto noerr - case err == ErrReadOnly, errors.IsCorrupted(err): - goto hasperr - default: - } - case _, _ = <-db.closeC: - return - } - } -hasperr: - // Persistent error. - for { - select { - case db.compErrC <- err: - case db.compPerErrC <- err: - case db.writeLockC <- struct{}{}: - // Hold write lock, so that write won't pass-through. - db.compWriteLocking = true - case _, _ = <-db.closeC: - if db.compWriteLocking { - // We should release the lock or Close will hang. - <-db.writeLockC - } - return - } - } -} - -type compactionTransactCounter int - -func (cnt *compactionTransactCounter) incr() { - *cnt++ -} - -type compactionTransactInterface interface { - run(cnt *compactionTransactCounter) error - revert() error -} - -func (db *DB) compactionTransact(name string, t compactionTransactInterface) { - defer func() { - if x := recover(); x != nil { - if x == errCompactionTransactExiting { - if err := t.revert(); err != nil { - db.logf("%s revert error %q", name, err) - } - } - panic(x) - } - }() - - const ( - backoffMin = 1 * time.Second - backoffMax = 8 * time.Second - backoffMul = 2 * time.Second - ) - var ( - backoff = backoffMin - backoffT = time.NewTimer(backoff) - lastCnt = compactionTransactCounter(0) - - disableBackoff = db.s.o.GetDisableCompactionBackoff() - ) - for n := 0; ; n++ { - // Check wether the DB is closed. - if db.isClosed() { - db.logf("%s exiting", name) - db.compactionExitTransact() - } else if n > 0 { - db.logf("%s retrying N·%d", name, n) - } - - // Execute. - cnt := compactionTransactCounter(0) - err := t.run(&cnt) - if err != nil { - db.logf("%s error I·%d %q", name, cnt, err) - } - - // Set compaction error status. - select { - case db.compErrSetC <- err: - case perr := <-db.compPerErrC: - if err != nil { - db.logf("%s exiting (persistent error %q)", name, perr) - db.compactionExitTransact() - } - case _, _ = <-db.closeC: - db.logf("%s exiting", name) - db.compactionExitTransact() - } - if err == nil { - return - } - if errors.IsCorrupted(err) { - db.logf("%s exiting (corruption detected)", name) - db.compactionExitTransact() - } - - if !disableBackoff { - // Reset backoff duration if counter is advancing. - if cnt > lastCnt { - backoff = backoffMin - lastCnt = cnt - } - - // Backoff. - backoffT.Reset(backoff) - if backoff < backoffMax { - backoff *= backoffMul - if backoff > backoffMax { - backoff = backoffMax - } - } - select { - case <-backoffT.C: - case _, _ = <-db.closeC: - db.logf("%s exiting", name) - db.compactionExitTransact() - } - } - } -} - -type compactionTransactFunc struct { - runFunc func(cnt *compactionTransactCounter) error - revertFunc func() error -} - -func (t *compactionTransactFunc) run(cnt *compactionTransactCounter) error { - return t.runFunc(cnt) -} - -func (t *compactionTransactFunc) revert() error { - if t.revertFunc != nil { - return t.revertFunc() - } - return nil -} - -func (db *DB) compactionTransactFunc(name string, run func(cnt *compactionTransactCounter) error, revert func() error) { - db.compactionTransact(name, &compactionTransactFunc{run, revert}) -} - -func (db *DB) compactionExitTransact() { - panic(errCompactionTransactExiting) -} - -func (db *DB) memCompaction() { - mdb := db.getFrozenMem() - if mdb == nil { - return - } - defer mdb.decref() - - db.logf("memdb@flush N·%d S·%s", mdb.Len(), shortenb(mdb.Size())) - - // Don't compact empty memdb. - if mdb.Len() == 0 { - db.logf("memdb@flush skipping") - // drop frozen memdb - db.dropFrozenMem() - return - } - - // Pause table compaction. - resumeC := make(chan struct{}) - select { - case db.tcompPauseC <- (chan<- struct{})(resumeC): - case <-db.compPerErrC: - close(resumeC) - resumeC = nil - case _, _ = <-db.closeC: - return - } - - var ( - rec = &sessionRecord{} - stats = &cStatsStaging{} - flushLevel int - ) - - db.compactionTransactFunc("memdb@flush", func(cnt *compactionTransactCounter) (err error) { - stats.startTimer() - flushLevel, err = db.s.flushMemdb(rec, mdb.DB, -1) - stats.stopTimer() - return - }, func() error { - for _, r := range rec.addedTables { - db.logf("memdb@flush revert @%d", r.num) - f := db.s.getTableFile(r.num) - if err := f.Remove(); err != nil { - return err - } - } - return nil - }) - - db.compactionTransactFunc("memdb@commit", func(cnt *compactionTransactCounter) (err error) { - stats.startTimer() - rec.setJournalNum(db.journalFile.Num()) - rec.setSeqNum(db.frozenSeq) - err = db.s.commit(rec) - stats.stopTimer() - return - }, nil) - - db.logf("memdb@flush committed F·%d T·%v", len(rec.addedTables), stats.duration) - - for _, r := range rec.addedTables { - stats.write += r.size - } - db.compStats[flushLevel].add(stats) - - // Drop frozen memdb. - db.dropFrozenMem() - - // Resume table compaction. - if resumeC != nil { - select { - case <-resumeC: - close(resumeC) - case _, _ = <-db.closeC: - return - } - } - - // Trigger table compaction. - db.compSendTrigger(db.tcompCmdC) -} - -type tableCompactionBuilder struct { - db *DB - s *session - c *compaction - rec *sessionRecord - stat0, stat1 *cStatsStaging - - snapHasLastUkey bool - snapLastUkey []byte - snapLastSeq uint64 - snapIter int - snapKerrCnt int - snapDropCnt int - - kerrCnt int - dropCnt int - - minSeq uint64 - strict bool - tableSize int - - tw *tWriter -} - -func (b *tableCompactionBuilder) appendKV(key, value []byte) error { - // Create new table if not already. - if b.tw == nil { - // Check for pause event. - if b.db != nil { - select { - case ch := <-b.db.tcompPauseC: - b.db.pauseCompaction(ch) - case _, _ = <-b.db.closeC: - b.db.compactionExitTransact() - default: - } - } - - // Create new table. - var err error - b.tw, err = b.s.tops.create() - if err != nil { - return err - } - } - - // Write key/value into table. - return b.tw.append(key, value) -} - -func (b *tableCompactionBuilder) needFlush() bool { - return b.tw.tw.BytesLen() >= b.tableSize -} - -func (b *tableCompactionBuilder) flush() error { - t, err := b.tw.finish() - if err != nil { - return err - } - b.rec.addTableFile(b.c.level+1, t) - b.stat1.write += t.size - b.s.logf("table@build created L%d@%d N·%d S·%s %q:%q", b.c.level+1, t.file.Num(), b.tw.tw.EntriesLen(), shortenb(int(t.size)), t.imin, t.imax) - b.tw = nil - return nil -} - -func (b *tableCompactionBuilder) cleanup() { - if b.tw != nil { - b.tw.drop() - b.tw = nil - } -} - -func (b *tableCompactionBuilder) run(cnt *compactionTransactCounter) error { - snapResumed := b.snapIter > 0 - hasLastUkey := b.snapHasLastUkey // The key might has zero length, so this is necessary. - lastUkey := append([]byte{}, b.snapLastUkey...) - lastSeq := b.snapLastSeq - b.kerrCnt = b.snapKerrCnt - b.dropCnt = b.snapDropCnt - // Restore compaction state. - b.c.restore() - - defer b.cleanup() - - b.stat1.startTimer() - defer b.stat1.stopTimer() - - iter := b.c.newIterator() - defer iter.Release() - for i := 0; iter.Next(); i++ { - // Incr transact counter. - cnt.incr() - - // Skip until last state. - if i < b.snapIter { - continue - } - - resumed := false - if snapResumed { - resumed = true - snapResumed = false - } - - ikey := iter.Key() - ukey, seq, kt, kerr := parseIkey(ikey) - - if kerr == nil { - shouldStop := !resumed && b.c.shouldStopBefore(ikey) - - if !hasLastUkey || b.s.icmp.uCompare(lastUkey, ukey) != 0 { - // First occurrence of this user key. - - // Only rotate tables if ukey doesn't hop across. - if b.tw != nil && (shouldStop || b.needFlush()) { - if err := b.flush(); err != nil { - return err - } - - // Creates snapshot of the state. - b.c.save() - b.snapHasLastUkey = hasLastUkey - b.snapLastUkey = append(b.snapLastUkey[:0], lastUkey...) - b.snapLastSeq = lastSeq - b.snapIter = i - b.snapKerrCnt = b.kerrCnt - b.snapDropCnt = b.dropCnt - } - - hasLastUkey = true - lastUkey = append(lastUkey[:0], ukey...) - lastSeq = kMaxSeq - } - - switch { - case lastSeq <= b.minSeq: - // Dropped because newer entry for same user key exist - fallthrough // (A) - case kt == ktDel && seq <= b.minSeq && b.c.baseLevelForKey(lastUkey): - // For this user key: - // (1) there is no data in higher levels - // (2) data in lower levels will have larger seq numbers - // (3) data in layers that are being compacted here and have - // smaller seq numbers will be dropped in the next - // few iterations of this loop (by rule (A) above). - // Therefore this deletion marker is obsolete and can be dropped. - lastSeq = seq - b.dropCnt++ - continue - default: - lastSeq = seq - } - } else { - if b.strict { - return kerr - } - - // Don't drop corrupted keys. - hasLastUkey = false - lastUkey = lastUkey[:0] - lastSeq = kMaxSeq - b.kerrCnt++ - } - - if err := b.appendKV(ikey, iter.Value()); err != nil { - return err - } - } - - if err := iter.Error(); err != nil { - return err - } - - // Finish last table. - if b.tw != nil && !b.tw.empty() { - return b.flush() - } - return nil -} - -func (b *tableCompactionBuilder) revert() error { - for _, at := range b.rec.addedTables { - b.s.logf("table@build revert @%d", at.num) - f := b.s.getTableFile(at.num) - if err := f.Remove(); err != nil { - return err - } - } - return nil -} - -func (db *DB) tableCompaction(c *compaction, noTrivial bool) { - defer c.release() - - rec := &sessionRecord{} - rec.addCompPtr(c.level, c.imax) - - if !noTrivial && c.trivial() { - t := c.tables[0][0] - db.logf("table@move L%d@%d -> L%d", c.level, t.file.Num(), c.level+1) - rec.delTable(c.level, t.file.Num()) - rec.addTableFile(c.level+1, t) - db.compactionTransactFunc("table@move", func(cnt *compactionTransactCounter) (err error) { - return db.s.commit(rec) - }, nil) - return - } - - var stats [2]cStatsStaging - for i, tables := range c.tables { - for _, t := range tables { - stats[i].read += t.size - // Insert deleted tables into record - rec.delTable(c.level+i, t.file.Num()) - } - } - sourceSize := int(stats[0].read + stats[1].read) - minSeq := db.minSeq() - db.logf("table@compaction L%d·%d -> L%d·%d S·%s Q·%d", c.level, len(c.tables[0]), c.level+1, len(c.tables[1]), shortenb(sourceSize), minSeq) - - b := &tableCompactionBuilder{ - db: db, - s: db.s, - c: c, - rec: rec, - stat1: &stats[1], - minSeq: minSeq, - strict: db.s.o.GetStrict(opt.StrictCompaction), - tableSize: db.s.o.GetCompactionTableSize(c.level + 1), - } - db.compactionTransact("table@build", b) - - // Commit changes - db.compactionTransactFunc("table@commit", func(cnt *compactionTransactCounter) (err error) { - stats[1].startTimer() - defer stats[1].stopTimer() - return db.s.commit(rec) - }, nil) - - resultSize := int(stats[1].write) - db.logf("table@compaction committed F%s S%s Ke·%d D·%d T·%v", sint(len(rec.addedTables)-len(rec.deletedTables)), sshortenb(resultSize-sourceSize), b.kerrCnt, b.dropCnt, stats[1].duration) - - // Save compaction stats - for i := range stats { - db.compStats[c.level+1].add(&stats[i]) - } -} - -func (db *DB) tableRangeCompaction(level int, umin, umax []byte) { - db.logf("table@compaction range L%d %q:%q", level, umin, umax) - - if level >= 0 { - if c := db.s.getCompactionRange(level, umin, umax); c != nil { - db.tableCompaction(c, true) - } - } else { - v := db.s.version() - m := 1 - for i, t := range v.tables[1:] { - if t.overlaps(db.s.icmp, umin, umax, false) { - m = i + 1 - } - } - v.release() - - for level := 0; level < m; level++ { - if c := db.s.getCompactionRange(level, umin, umax); c != nil { - db.tableCompaction(c, true) - } - } - } -} - -func (db *DB) tableAutoCompaction() { - if c := db.s.pickCompaction(); c != nil { - db.tableCompaction(c, false) - } -} - -func (db *DB) tableNeedCompaction() bool { - v := db.s.version() - defer v.release() - return v.needCompaction() -} - -func (db *DB) pauseCompaction(ch chan<- struct{}) { - select { - case ch <- struct{}{}: - case _, _ = <-db.closeC: - db.compactionExitTransact() - } -} - -type cCmd interface { - ack(err error) -} - -type cIdle struct { - ackC chan<- error -} - -func (r cIdle) ack(err error) { - if r.ackC != nil { - defer func() { - recover() - }() - r.ackC <- err - } -} - -type cRange struct { - level int - min, max []byte - ackC chan<- error -} - -func (r cRange) ack(err error) { - if r.ackC != nil { - defer func() { - recover() - }() - r.ackC <- err - } -} - -// This will trigger auto compation and/or wait for all compaction to be done. -func (db *DB) compSendIdle(compC chan<- cCmd) (err error) { - ch := make(chan error) - defer close(ch) - // Send cmd. - select { - case compC <- cIdle{ch}: - case err = <-db.compErrC: - return - case _, _ = <-db.closeC: - return ErrClosed - } - // Wait cmd. - select { - case err = <-ch: - case err = <-db.compErrC: - case _, _ = <-db.closeC: - return ErrClosed - } - return err -} - -// This will trigger auto compaction but will not wait for it. -func (db *DB) compSendTrigger(compC chan<- cCmd) { - select { - case compC <- cIdle{}: - default: - } -} - -// Send range compaction request. -func (db *DB) compSendRange(compC chan<- cCmd, level int, min, max []byte) (err error) { - ch := make(chan error) - defer close(ch) - // Send cmd. - select { - case compC <- cRange{level, min, max, ch}: - case err := <-db.compErrC: - return err - case _, _ = <-db.closeC: - return ErrClosed - } - // Wait cmd. - select { - case err = <-ch: - case err = <-db.compErrC: - case _, _ = <-db.closeC: - return ErrClosed - } - return err -} - -func (db *DB) mCompaction() { - var x cCmd - - defer func() { - if x := recover(); x != nil { - if x != errCompactionTransactExiting { - panic(x) - } - } - if x != nil { - x.ack(ErrClosed) - } - db.closeW.Done() - }() - - for { - select { - case x = <-db.mcompCmdC: - switch x.(type) { - case cIdle: - db.memCompaction() - x.ack(nil) - x = nil - default: - panic("leveldb: unknown command") - } - case _, _ = <-db.closeC: - return - } - } -} - -func (db *DB) tCompaction() { - var x cCmd - var ackQ []cCmd - - defer func() { - if x := recover(); x != nil { - if x != errCompactionTransactExiting { - panic(x) - } - } - for i := range ackQ { - ackQ[i].ack(ErrClosed) - ackQ[i] = nil - } - if x != nil { - x.ack(ErrClosed) - } - db.closeW.Done() - }() - - for { - if db.tableNeedCompaction() { - select { - case x = <-db.tcompCmdC: - case ch := <-db.tcompPauseC: - db.pauseCompaction(ch) - continue - case _, _ = <-db.closeC: - return - default: - } - } else { - for i := range ackQ { - ackQ[i].ack(nil) - ackQ[i] = nil - } - ackQ = ackQ[:0] - select { - case x = <-db.tcompCmdC: - case ch := <-db.tcompPauseC: - db.pauseCompaction(ch) - continue - case _, _ = <-db.closeC: - return - } - } - if x != nil { - switch cmd := x.(type) { - case cIdle: - ackQ = append(ackQ, x) - case cRange: - db.tableRangeCompaction(cmd.level, cmd.min, cmd.max) - x.ack(nil) - default: - panic("leveldb: unknown command") - } - x = nil - } - db.tableAutoCompaction() - } -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_iter.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_iter.go deleted file mode 100644 index 656ae98567..0000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_iter.go +++ /dev/null @@ -1,350 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - "errors" - "math/rand" - "runtime" - "sync" - "sync/atomic" - - "github.com/syndtr/goleveldb/leveldb/iterator" - "github.com/syndtr/goleveldb/leveldb/opt" - "github.com/syndtr/goleveldb/leveldb/util" -) - -var ( - errInvalidIkey = errors.New("leveldb: Iterator: invalid internal key") -) - -type memdbReleaser struct { - once sync.Once - m *memDB -} - -func (mr *memdbReleaser) Release() { - mr.once.Do(func() { - mr.m.decref() - }) -} - -func (db *DB) newRawIterator(slice *util.Range, ro *opt.ReadOptions) iterator.Iterator { - em, fm := db.getMems() - v := db.s.version() - - ti := v.getIterators(slice, ro) - n := len(ti) + 2 - i := make([]iterator.Iterator, 0, n) - emi := em.NewIterator(slice) - emi.SetReleaser(&memdbReleaser{m: em}) - i = append(i, emi) - if fm != nil { - fmi := fm.NewIterator(slice) - fmi.SetReleaser(&memdbReleaser{m: fm}) - i = append(i, fmi) - } - i = append(i, ti...) - strict := opt.GetStrict(db.s.o.Options, ro, opt.StrictReader) - mi := iterator.NewMergedIterator(i, db.s.icmp, strict) - mi.SetReleaser(&versionReleaser{v: v}) - return mi -} - -func (db *DB) newIterator(seq uint64, slice *util.Range, ro *opt.ReadOptions) *dbIter { - var islice *util.Range - if slice != nil { - islice = &util.Range{} - if slice.Start != nil { - islice.Start = newIkey(slice.Start, kMaxSeq, ktSeek) - } - if slice.Limit != nil { - islice.Limit = newIkey(slice.Limit, kMaxSeq, ktSeek) - } - } - rawIter := db.newRawIterator(islice, ro) - iter := &dbIter{ - db: db, - icmp: db.s.icmp, - iter: rawIter, - seq: seq, - strict: opt.GetStrict(db.s.o.Options, ro, opt.StrictReader), - key: make([]byte, 0), - value: make([]byte, 0), - } - atomic.AddInt32(&db.aliveIters, 1) - runtime.SetFinalizer(iter, (*dbIter).Release) - return iter -} - -func (db *DB) iterSamplingRate() int { - return rand.Intn(2 * db.s.o.GetIteratorSamplingRate()) -} - -type dir int - -const ( - dirReleased dir = iota - 1 - dirSOI - dirEOI - dirBackward - dirForward -) - -// dbIter represent an interator states over a database session. -type dbIter struct { - db *DB - icmp *iComparer - iter iterator.Iterator - seq uint64 - strict bool - - smaplingGap int - dir dir - key []byte - value []byte - err error - releaser util.Releaser -} - -func (i *dbIter) sampleSeek() { - ikey := i.iter.Key() - i.smaplingGap -= len(ikey) + len(i.iter.Value()) - for i.smaplingGap < 0 { - i.smaplingGap += i.db.iterSamplingRate() - i.db.sampleSeek(ikey) - } -} - -func (i *dbIter) setErr(err error) { - i.err = err - i.key = nil - i.value = nil -} - -func (i *dbIter) iterErr() { - if err := i.iter.Error(); err != nil { - i.setErr(err) - } -} - -func (i *dbIter) Valid() bool { - return i.err == nil && i.dir > dirEOI -} - -func (i *dbIter) First() bool { - if i.err != nil { - return false - } else if i.dir == dirReleased { - i.err = ErrIterReleased - return false - } - - if i.iter.First() { - i.dir = dirSOI - return i.next() - } - i.dir = dirEOI - i.iterErr() - return false -} - -func (i *dbIter) Last() bool { - if i.err != nil { - return false - } else if i.dir == dirReleased { - i.err = ErrIterReleased - return false - } - - if i.iter.Last() { - return i.prev() - } - i.dir = dirSOI - i.iterErr() - return false -} - -func (i *dbIter) Seek(key []byte) bool { - if i.err != nil { - return false - } else if i.dir == dirReleased { - i.err = ErrIterReleased - return false - } - - ikey := newIkey(key, i.seq, ktSeek) - if i.iter.Seek(ikey) { - i.dir = dirSOI - return i.next() - } - i.dir = dirEOI - i.iterErr() - return false -} - -func (i *dbIter) next() bool { - for { - if ukey, seq, kt, kerr := parseIkey(i.iter.Key()); kerr == nil { - i.sampleSeek() - if seq <= i.seq { - switch kt { - case ktDel: - // Skip deleted key. - i.key = append(i.key[:0], ukey...) - i.dir = dirForward - case ktVal: - if i.dir == dirSOI || i.icmp.uCompare(ukey, i.key) > 0 { - i.key = append(i.key[:0], ukey...) - i.value = append(i.value[:0], i.iter.Value()...) - i.dir = dirForward - return true - } - } - } - } else if i.strict { - i.setErr(kerr) - break - } - if !i.iter.Next() { - i.dir = dirEOI - i.iterErr() - break - } - } - return false -} - -func (i *dbIter) Next() bool { - if i.dir == dirEOI || i.err != nil { - return false - } else if i.dir == dirReleased { - i.err = ErrIterReleased - return false - } - - if !i.iter.Next() || (i.dir == dirBackward && !i.iter.Next()) { - i.dir = dirEOI - i.iterErr() - return false - } - return i.next() -} - -func (i *dbIter) prev() bool { - i.dir = dirBackward - del := true - if i.iter.Valid() { - for { - if ukey, seq, kt, kerr := parseIkey(i.iter.Key()); kerr == nil { - i.sampleSeek() - if seq <= i.seq { - if !del && i.icmp.uCompare(ukey, i.key) < 0 { - return true - } - del = (kt == ktDel) - if !del { - i.key = append(i.key[:0], ukey...) - i.value = append(i.value[:0], i.iter.Value()...) - } - } - } else if i.strict { - i.setErr(kerr) - return false - } - if !i.iter.Prev() { - break - } - } - } - if del { - i.dir = dirSOI - i.iterErr() - return false - } - return true -} - -func (i *dbIter) Prev() bool { - if i.dir == dirSOI || i.err != nil { - return false - } else if i.dir == dirReleased { - i.err = ErrIterReleased - return false - } - - switch i.dir { - case dirEOI: - return i.Last() - case dirForward: - for i.iter.Prev() { - if ukey, _, _, kerr := parseIkey(i.iter.Key()); kerr == nil { - i.sampleSeek() - if i.icmp.uCompare(ukey, i.key) < 0 { - goto cont - } - } else if i.strict { - i.setErr(kerr) - return false - } - } - i.dir = dirSOI - i.iterErr() - return false - } - -cont: - return i.prev() -} - -func (i *dbIter) Key() []byte { - if i.err != nil || i.dir <= dirEOI { - return nil - } - return i.key -} - -func (i *dbIter) Value() []byte { - if i.err != nil || i.dir <= dirEOI { - return nil - } - return i.value -} - -func (i *dbIter) Release() { - if i.dir != dirReleased { - // Clear the finalizer. - runtime.SetFinalizer(i, nil) - - if i.releaser != nil { - i.releaser.Release() - i.releaser = nil - } - - i.dir = dirReleased - i.key = nil - i.value = nil - i.iter.Release() - i.iter = nil - atomic.AddInt32(&i.db.aliveIters, -1) - i.db = nil - } -} - -func (i *dbIter) SetReleaser(releaser util.Releaser) { - if i.dir == dirReleased { - panic(util.ErrReleased) - } - if i.releaser != nil && releaser != nil { - panic(util.ErrHasReleaser) - } - i.releaser = releaser -} - -func (i *dbIter) Error() error { - return i.err -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_snapshot.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_snapshot.go deleted file mode 100644 index 0372848ff1..0000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_snapshot.go +++ /dev/null @@ -1,183 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - "container/list" - "fmt" - "runtime" - "sync" - "sync/atomic" - - "github.com/syndtr/goleveldb/leveldb/iterator" - "github.com/syndtr/goleveldb/leveldb/opt" - "github.com/syndtr/goleveldb/leveldb/util" -) - -type snapshotElement struct { - seq uint64 - ref int - e *list.Element -} - -// Acquires a snapshot, based on latest sequence. -func (db *DB) acquireSnapshot() *snapshotElement { - db.snapsMu.Lock() - defer db.snapsMu.Unlock() - - seq := db.getSeq() - - if e := db.snapsList.Back(); e != nil { - se := e.Value.(*snapshotElement) - if se.seq == seq { - se.ref++ - return se - } else if seq < se.seq { - panic("leveldb: sequence number is not increasing") - } - } - se := &snapshotElement{seq: seq, ref: 1} - se.e = db.snapsList.PushBack(se) - return se -} - -// Releases given snapshot element. -func (db *DB) releaseSnapshot(se *snapshotElement) { - db.snapsMu.Lock() - defer db.snapsMu.Unlock() - - se.ref-- - if se.ref == 0 { - db.snapsList.Remove(se.e) - se.e = nil - } else if se.ref < 0 { - panic("leveldb: Snapshot: negative element reference") - } -} - -// Gets minimum sequence that not being snapshoted. -func (db *DB) minSeq() uint64 { - db.snapsMu.Lock() - defer db.snapsMu.Unlock() - - if e := db.snapsList.Front(); e != nil { - return e.Value.(*snapshotElement).seq - } - - return db.getSeq() -} - -// Snapshot is a DB snapshot. -type Snapshot struct { - db *DB - elem *snapshotElement - mu sync.RWMutex - released bool -} - -// Creates new snapshot object. -func (db *DB) newSnapshot() *Snapshot { - snap := &Snapshot{ - db: db, - elem: db.acquireSnapshot(), - } - atomic.AddInt32(&db.aliveSnaps, 1) - runtime.SetFinalizer(snap, (*Snapshot).Release) - return snap -} - -func (snap *Snapshot) String() string { - return fmt.Sprintf("leveldb.Snapshot{%d}", snap.elem.seq) -} - -// Get gets the value for the given key. It returns ErrNotFound if -// the DB does not contains the key. -// -// The caller should not modify the contents of the returned slice, but -// it is safe to modify the contents of the argument after Get returns. -func (snap *Snapshot) Get(key []byte, ro *opt.ReadOptions) (value []byte, err error) { - err = snap.db.ok() - if err != nil { - return - } - snap.mu.RLock() - defer snap.mu.RUnlock() - if snap.released { - err = ErrSnapshotReleased - return - } - return snap.db.get(key, snap.elem.seq, ro) -} - -// Has returns true if the DB does contains the given key. -// -// It is safe to modify the contents of the argument after Get returns. -func (snap *Snapshot) Has(key []byte, ro *opt.ReadOptions) (ret bool, err error) { - err = snap.db.ok() - if err != nil { - return - } - snap.mu.RLock() - defer snap.mu.RUnlock() - if snap.released { - err = ErrSnapshotReleased - return - } - return snap.db.has(key, snap.elem.seq, ro) -} - -// NewIterator returns an iterator for the snapshot of the uderlying DB. -// The returned iterator is not goroutine-safe, but it is safe to use -// multiple iterators concurrently, with each in a dedicated goroutine. -// It is also safe to use an iterator concurrently with modifying its -// underlying DB. The resultant key/value pairs are guaranteed to be -// consistent. -// -// Slice allows slicing the iterator to only contains keys in the given -// range. A nil Range.Start is treated as a key before all keys in the -// DB. And a nil Range.Limit is treated as a key after all keys in -// the DB. -// -// The iterator must be released after use, by calling Release method. -// Releasing the snapshot doesn't mean releasing the iterator too, the -// iterator would be still valid until released. -// -// Also read Iterator documentation of the leveldb/iterator package. -func (snap *Snapshot) NewIterator(slice *util.Range, ro *opt.ReadOptions) iterator.Iterator { - if err := snap.db.ok(); err != nil { - return iterator.NewEmptyIterator(err) - } - snap.mu.Lock() - defer snap.mu.Unlock() - if snap.released { - return iterator.NewEmptyIterator(ErrSnapshotReleased) - } - // Since iterator already hold version ref, it doesn't need to - // hold snapshot ref. - return snap.db.newIterator(snap.elem.seq, slice, ro) -} - -// Release releases the snapshot. This will not release any returned -// iterators, the iterators would still be valid until released or the -// underlying DB is closed. -// -// Other methods should not be called after the snapshot has been released. -func (snap *Snapshot) Release() { - snap.mu.Lock() - defer snap.mu.Unlock() - - if !snap.released { - // Clear the finalizer. - runtime.SetFinalizer(snap, nil) - - snap.released = true - snap.db.releaseSnapshot(snap.elem) - atomic.AddInt32(&snap.db.aliveSnaps, -1) - snap.db = nil - snap.elem = nil - } -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_state.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_state.go deleted file mode 100644 index 24671dd39e..0000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_state.go +++ /dev/null @@ -1,211 +0,0 @@ -// Copyright (c) 2013, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - "sync/atomic" - "time" - - "github.com/syndtr/goleveldb/leveldb/journal" - "github.com/syndtr/goleveldb/leveldb/memdb" -) - -type memDB struct { - db *DB - *memdb.DB - ref int32 -} - -func (m *memDB) incref() { - atomic.AddInt32(&m.ref, 1) -} - -func (m *memDB) decref() { - if ref := atomic.AddInt32(&m.ref, -1); ref == 0 { - // Only put back memdb with std capacity. - if m.Capacity() == m.db.s.o.GetWriteBuffer() { - m.Reset() - m.db.mpoolPut(m.DB) - } - m.db = nil - m.DB = nil - } else if ref < 0 { - panic("negative memdb ref") - } -} - -// Get latest sequence number. -func (db *DB) getSeq() uint64 { - return atomic.LoadUint64(&db.seq) -} - -// Atomically adds delta to seq. -func (db *DB) addSeq(delta uint64) { - atomic.AddUint64(&db.seq, delta) -} - -func (db *DB) sampleSeek(ikey iKey) { - v := db.s.version() - if v.sampleSeek(ikey) { - // Trigger table compaction. - db.compSendTrigger(db.tcompCmdC) - } - v.release() -} - -func (db *DB) mpoolPut(mem *memdb.DB) { - defer func() { - recover() - }() - select { - case db.memPool <- mem: - default: - } -} - -func (db *DB) mpoolGet() *memdb.DB { - select { - case mem := <-db.memPool: - return mem - default: - return nil - } -} - -func (db *DB) mpoolDrain() { - ticker := time.NewTicker(30 * time.Second) - for { - select { - case <-ticker.C: - select { - case <-db.memPool: - default: - } - case _, _ = <-db.closeC: - close(db.memPool) - return - } - } -} - -// Create new memdb and froze the old one; need external synchronization. -// newMem only called synchronously by the writer. -func (db *DB) newMem(n int) (mem *memDB, err error) { - num := db.s.allocFileNum() - file := db.s.getJournalFile(num) - w, err := file.Create() - if err != nil { - db.s.reuseFileNum(num) - return - } - - db.memMu.Lock() - defer db.memMu.Unlock() - - if db.frozenMem != nil { - panic("still has frozen mem") - } - - if db.journal == nil { - db.journal = journal.NewWriter(w) - } else { - db.journal.Reset(w) - db.journalWriter.Close() - db.frozenJournalFile = db.journalFile - } - db.journalWriter = w - db.journalFile = file - db.frozenMem = db.mem - mdb := db.mpoolGet() - if mdb == nil || mdb.Capacity() < n { - mdb = memdb.New(db.s.icmp, maxInt(db.s.o.GetWriteBuffer(), n)) - } - mem = &memDB{ - db: db, - DB: mdb, - ref: 2, - } - db.mem = mem - // The seq only incremented by the writer. And whoever called newMem - // should hold write lock, so no need additional synchronization here. - db.frozenSeq = db.seq - return -} - -// Get all memdbs. -func (db *DB) getMems() (e, f *memDB) { - db.memMu.RLock() - defer db.memMu.RUnlock() - if db.mem == nil { - panic("nil effective mem") - } - db.mem.incref() - if db.frozenMem != nil { - db.frozenMem.incref() - } - return db.mem, db.frozenMem -} - -// Get frozen memdb. -func (db *DB) getEffectiveMem() *memDB { - db.memMu.RLock() - defer db.memMu.RUnlock() - if db.mem == nil { - panic("nil effective mem") - } - db.mem.incref() - return db.mem -} - -// Check whether we has frozen memdb. -func (db *DB) hasFrozenMem() bool { - db.memMu.RLock() - defer db.memMu.RUnlock() - return db.frozenMem != nil -} - -// Get frozen memdb. -func (db *DB) getFrozenMem() *memDB { - db.memMu.RLock() - defer db.memMu.RUnlock() - if db.frozenMem != nil { - db.frozenMem.incref() - } - return db.frozenMem -} - -// Drop frozen memdb; assume that frozen memdb isn't nil. -func (db *DB) dropFrozenMem() { - db.memMu.Lock() - if err := db.frozenJournalFile.Remove(); err != nil { - db.logf("journal@remove removing @%d %q", db.frozenJournalFile.Num(), err) - } else { - db.logf("journal@remove removed @%d", db.frozenJournalFile.Num()) - } - db.frozenJournalFile = nil - db.frozenMem.decref() - db.frozenMem = nil - db.memMu.Unlock() -} - -// Set closed flag; return true if not already closed. -func (db *DB) setClosed() bool { - return atomic.CompareAndSwapUint32(&db.closed, 0, 1) -} - -// Check whether DB was closed. -func (db *DB) isClosed() bool { - return atomic.LoadUint32(&db.closed) != 0 -} - -// Check read ok status. -func (db *DB) ok() error { - if db.isClosed() { - return ErrClosed - } - return nil -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_test.go deleted file mode 100644 index 9d91ebf1a8..0000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_test.go +++ /dev/null @@ -1,2701 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - "bytes" - "container/list" - crand "crypto/rand" - "encoding/binary" - "fmt" - "math/rand" - "os" - "path/filepath" - "runtime" - "strings" - "sync" - "sync/atomic" - "testing" - "time" - "unsafe" - - "github.com/syndtr/goleveldb/leveldb/comparer" - "github.com/syndtr/goleveldb/leveldb/errors" - "github.com/syndtr/goleveldb/leveldb/filter" - "github.com/syndtr/goleveldb/leveldb/iterator" - "github.com/syndtr/goleveldb/leveldb/opt" - "github.com/syndtr/goleveldb/leveldb/storage" - "github.com/syndtr/goleveldb/leveldb/util" -) - -func tkey(i int) []byte { - return []byte(fmt.Sprintf("%016d", i)) -} - -func tval(seed, n int) []byte { - r := rand.New(rand.NewSource(int64(seed))) - return randomString(r, n) -} - -type dbHarness struct { - t *testing.T - - stor *testStorage - db *DB - o *opt.Options - ro *opt.ReadOptions - wo *opt.WriteOptions -} - -func newDbHarnessWopt(t *testing.T, o *opt.Options) *dbHarness { - h := new(dbHarness) - h.init(t, o) - return h -} - -func newDbHarness(t *testing.T) *dbHarness { - return newDbHarnessWopt(t, &opt.Options{}) -} - -func (h *dbHarness) init(t *testing.T, o *opt.Options) { - h.t = t - h.stor = newTestStorage(t) - h.o = o - h.ro = nil - h.wo = nil - - if err := h.openDB0(); err != nil { - // So that it will come after fatal message. - defer h.stor.Close() - h.t.Fatal("Open (init): got error: ", err) - } -} - -func (h *dbHarness) openDB0() (err error) { - h.t.Log("opening DB") - h.db, err = Open(h.stor, h.o) - return -} - -func (h *dbHarness) openDB() { - if err := h.openDB0(); err != nil { - h.t.Fatal("Open: got error: ", err) - } -} - -func (h *dbHarness) closeDB0() error { - h.t.Log("closing DB") - return h.db.Close() -} - -func (h *dbHarness) closeDB() { - if err := h.closeDB0(); err != nil { - h.t.Error("Close: got error: ", err) - } - h.stor.CloseCheck() - runtime.GC() -} - -func (h *dbHarness) reopenDB() { - h.closeDB() - h.openDB() -} - -func (h *dbHarness) close() { - h.closeDB0() - h.db = nil - h.stor.Close() - h.stor = nil - runtime.GC() -} - -func (h *dbHarness) openAssert(want bool) { - db, err := Open(h.stor, h.o) - if err != nil { - if want { - h.t.Error("Open: assert: got error: ", err) - } else { - h.t.Log("Open: assert: got error (expected): ", err) - } - } else { - if !want { - h.t.Error("Open: assert: expect error") - } - db.Close() - } -} - -func (h *dbHarness) write(batch *Batch) { - if err := h.db.Write(batch, h.wo); err != nil { - h.t.Error("Write: got error: ", err) - } -} - -func (h *dbHarness) put(key, value string) { - if err := h.db.Put([]byte(key), []byte(value), h.wo); err != nil { - h.t.Error("Put: got error: ", err) - } -} - -func (h *dbHarness) putMulti(n int, low, hi string) { - for i := 0; i < n; i++ { - h.put(low, "begin") - h.put(hi, "end") - h.compactMem() - } -} - -func (h *dbHarness) maxNextLevelOverlappingBytes(want uint64) { - t := h.t - db := h.db - - var ( - maxOverlaps uint64 - maxLevel int - ) - v := db.s.version() - for i, tt := range v.tables[1 : len(v.tables)-1] { - level := i + 1 - next := v.tables[level+1] - for _, t := range tt { - r := next.getOverlaps(nil, db.s.icmp, t.imin.ukey(), t.imax.ukey(), false) - sum := r.size() - if sum > maxOverlaps { - maxOverlaps = sum - maxLevel = level - } - } - } - v.release() - - if maxOverlaps > want { - t.Errorf("next level most overlapping bytes is more than %d, got=%d level=%d", want, maxOverlaps, maxLevel) - } else { - t.Logf("next level most overlapping bytes is %d, level=%d want=%d", maxOverlaps, maxLevel, want) - } -} - -func (h *dbHarness) delete(key string) { - t := h.t - db := h.db - - err := db.Delete([]byte(key), h.wo) - if err != nil { - t.Error("Delete: got error: ", err) - } -} - -func (h *dbHarness) assertNumKeys(want int) { - iter := h.db.NewIterator(nil, h.ro) - defer iter.Release() - got := 0 - for iter.Next() { - got++ - } - if err := iter.Error(); err != nil { - h.t.Error("assertNumKeys: ", err) - } - if want != got { - h.t.Errorf("assertNumKeys: want=%d got=%d", want, got) - } -} - -func (h *dbHarness) getr(db Reader, key string, expectFound bool) (found bool, v []byte) { - t := h.t - v, err := db.Get([]byte(key), h.ro) - switch err { - case ErrNotFound: - if expectFound { - t.Errorf("Get: key '%s' not found, want found", key) - } - case nil: - found = true - if !expectFound { - t.Errorf("Get: key '%s' found, want not found", key) - } - default: - t.Error("Get: got error: ", err) - } - return -} - -func (h *dbHarness) get(key string, expectFound bool) (found bool, v []byte) { - return h.getr(h.db, key, expectFound) -} - -func (h *dbHarness) getValr(db Reader, key, value string) { - t := h.t - found, r := h.getr(db, key, true) - if !found { - return - } - rval := string(r) - if rval != value { - t.Errorf("Get: invalid value, got '%s', want '%s'", rval, value) - } -} - -func (h *dbHarness) getVal(key, value string) { - h.getValr(h.db, key, value) -} - -func (h *dbHarness) allEntriesFor(key, want string) { - t := h.t - db := h.db - s := db.s - - ikey := newIkey([]byte(key), kMaxSeq, ktVal) - iter := db.newRawIterator(nil, nil) - if !iter.Seek(ikey) && iter.Error() != nil { - t.Error("AllEntries: error during seek, err: ", iter.Error()) - return - } - res := "[ " - first := true - for iter.Valid() { - if ukey, _, kt, kerr := parseIkey(iter.Key()); kerr == nil { - if s.icmp.uCompare(ikey.ukey(), ukey) != 0 { - break - } - if !first { - res += ", " - } - first = false - switch kt { - case ktVal: - res += string(iter.Value()) - case ktDel: - res += "DEL" - } - } else { - if !first { - res += ", " - } - first = false - res += "CORRUPTED" - } - iter.Next() - } - if !first { - res += " " - } - res += "]" - if res != want { - t.Errorf("AllEntries: assert failed for key %q, got=%q want=%q", key, res, want) - } -} - -// Return a string that contains all key,value pairs in order, -// formatted like "(k1->v1)(k2->v2)". -func (h *dbHarness) getKeyVal(want string) { - t := h.t - db := h.db - - s, err := db.GetSnapshot() - if err != nil { - t.Fatal("GetSnapshot: got error: ", err) - } - res := "" - iter := s.NewIterator(nil, nil) - for iter.Next() { - res += fmt.Sprintf("(%s->%s)", string(iter.Key()), string(iter.Value())) - } - iter.Release() - - if res != want { - t.Errorf("GetKeyVal: invalid key/value pair, got=%q want=%q", res, want) - } - s.Release() -} - -func (h *dbHarness) waitCompaction() { - t := h.t - db := h.db - if err := db.compSendIdle(db.tcompCmdC); err != nil { - t.Error("compaction error: ", err) - } -} - -func (h *dbHarness) waitMemCompaction() { - t := h.t - db := h.db - - if err := db.compSendIdle(db.mcompCmdC); err != nil { - t.Error("compaction error: ", err) - } -} - -func (h *dbHarness) compactMem() { - t := h.t - db := h.db - - t.Log("starting memdb compaction") - - db.writeLockC <- struct{}{} - defer func() { - <-db.writeLockC - }() - - if _, err := db.rotateMem(0); err != nil { - t.Error("compaction error: ", err) - } - if err := db.compSendIdle(db.mcompCmdC); err != nil { - t.Error("compaction error: ", err) - } - - if h.totalTables() == 0 { - t.Error("zero tables after mem compaction") - } - - t.Log("memdb compaction done") -} - -func (h *dbHarness) compactRangeAtErr(level int, min, max string, wanterr bool) { - t := h.t - db := h.db - - var _min, _max []byte - if min != "" { - _min = []byte(min) - } - if max != "" { - _max = []byte(max) - } - - t.Logf("starting table range compaction: level=%d, min=%q, max=%q", level, min, max) - - if err := db.compSendRange(db.tcompCmdC, level, _min, _max); err != nil { - if wanterr { - t.Log("CompactRangeAt: got error (expected): ", err) - } else { - t.Error("CompactRangeAt: got error: ", err) - } - } else if wanterr { - t.Error("CompactRangeAt: expect error") - } - - t.Log("table range compaction done") -} - -func (h *dbHarness) compactRangeAt(level int, min, max string) { - h.compactRangeAtErr(level, min, max, false) -} - -func (h *dbHarness) compactRange(min, max string) { - t := h.t - db := h.db - - t.Logf("starting DB range compaction: min=%q, max=%q", min, max) - - var r util.Range - if min != "" { - r.Start = []byte(min) - } - if max != "" { - r.Limit = []byte(max) - } - if err := db.CompactRange(r); err != nil { - t.Error("CompactRange: got error: ", err) - } - - t.Log("DB range compaction done") -} - -func (h *dbHarness) sizeOf(start, limit string) uint64 { - sz, err := h.db.SizeOf([]util.Range{ - {[]byte(start), []byte(limit)}, - }) - if err != nil { - h.t.Error("SizeOf: got error: ", err) - } - return sz.Sum() -} - -func (h *dbHarness) sizeAssert(start, limit string, low, hi uint64) { - sz := h.sizeOf(start, limit) - if sz < low || sz > hi { - h.t.Errorf("sizeOf %q to %q not in range, want %d - %d, got %d", - shorten(start), shorten(limit), low, hi, sz) - } -} - -func (h *dbHarness) getSnapshot() (s *Snapshot) { - s, err := h.db.GetSnapshot() - if err != nil { - h.t.Fatal("GetSnapshot: got error: ", err) - } - return -} -func (h *dbHarness) tablesPerLevel(want string) { - res := "" - nz := 0 - v := h.db.s.version() - for level, tt := range v.tables { - if level > 0 { - res += "," - } - res += fmt.Sprint(len(tt)) - if len(tt) > 0 { - nz = len(res) - } - } - v.release() - res = res[:nz] - if res != want { - h.t.Errorf("invalid tables len, want=%s, got=%s", want, res) - } -} - -func (h *dbHarness) totalTables() (n int) { - v := h.db.s.version() - for _, tt := range v.tables { - n += len(tt) - } - v.release() - return -} - -type keyValue interface { - Key() []byte - Value() []byte -} - -func testKeyVal(t *testing.T, kv keyValue, want string) { - res := string(kv.Key()) + "->" + string(kv.Value()) - if res != want { - t.Errorf("invalid key/value, want=%q, got=%q", want, res) - } -} - -func numKey(num int) string { - return fmt.Sprintf("key%06d", num) -} - -var _bloom_filter = filter.NewBloomFilter(10) - -func truno(t *testing.T, o *opt.Options, f func(h *dbHarness)) { - for i := 0; i < 4; i++ { - func() { - switch i { - case 0: - case 1: - if o == nil { - o = &opt.Options{Filter: _bloom_filter} - } else { - old := o - o = &opt.Options{} - *o = *old - o.Filter = _bloom_filter - } - case 2: - if o == nil { - o = &opt.Options{Compression: opt.NoCompression} - } else { - old := o - o = &opt.Options{} - *o = *old - o.Compression = opt.NoCompression - } - } - h := newDbHarnessWopt(t, o) - defer h.close() - switch i { - case 3: - h.reopenDB() - } - f(h) - }() - } -} - -func trun(t *testing.T, f func(h *dbHarness)) { - truno(t, nil, f) -} - -func testAligned(t *testing.T, name string, offset uintptr) { - if offset%8 != 0 { - t.Errorf("field %s offset is not 64-bit aligned", name) - } -} - -func Test_FieldsAligned(t *testing.T) { - p1 := new(DB) - testAligned(t, "DB.seq", unsafe.Offsetof(p1.seq)) - p2 := new(session) - testAligned(t, "session.stNextFileNum", unsafe.Offsetof(p2.stNextFileNum)) - testAligned(t, "session.stJournalNum", unsafe.Offsetof(p2.stJournalNum)) - testAligned(t, "session.stPrevJournalNum", unsafe.Offsetof(p2.stPrevJournalNum)) - testAligned(t, "session.stSeqNum", unsafe.Offsetof(p2.stSeqNum)) -} - -func TestDB_Locking(t *testing.T) { - h := newDbHarness(t) - defer h.stor.Close() - h.openAssert(false) - h.closeDB() - h.openAssert(true) -} - -func TestDB_Empty(t *testing.T) { - trun(t, func(h *dbHarness) { - h.get("foo", false) - - h.reopenDB() - h.get("foo", false) - }) -} - -func TestDB_ReadWrite(t *testing.T) { - trun(t, func(h *dbHarness) { - h.put("foo", "v1") - h.getVal("foo", "v1") - h.put("bar", "v2") - h.put("foo", "v3") - h.getVal("foo", "v3") - h.getVal("bar", "v2") - - h.reopenDB() - h.getVal("foo", "v3") - h.getVal("bar", "v2") - }) -} - -func TestDB_PutDeleteGet(t *testing.T) { - trun(t, func(h *dbHarness) { - h.put("foo", "v1") - h.getVal("foo", "v1") - h.put("foo", "v2") - h.getVal("foo", "v2") - h.delete("foo") - h.get("foo", false) - - h.reopenDB() - h.get("foo", false) - }) -} - -func TestDB_EmptyBatch(t *testing.T) { - h := newDbHarness(t) - defer h.close() - - h.get("foo", false) - err := h.db.Write(new(Batch), h.wo) - if err != nil { - t.Error("writing empty batch yield error: ", err) - } - h.get("foo", false) -} - -func TestDB_GetFromFrozen(t *testing.T) { - h := newDbHarnessWopt(t, &opt.Options{WriteBuffer: 100100}) - defer h.close() - - h.put("foo", "v1") - h.getVal("foo", "v1") - - h.stor.DelaySync(storage.TypeTable) // Block sync calls - h.put("k1", strings.Repeat("x", 100000)) // Fill memtable - h.put("k2", strings.Repeat("y", 100000)) // Trigger compaction - for i := 0; h.db.getFrozenMem() == nil && i < 100; i++ { - time.Sleep(10 * time.Microsecond) - } - if h.db.getFrozenMem() == nil { - h.stor.ReleaseSync(storage.TypeTable) - t.Fatal("No frozen mem") - } - h.getVal("foo", "v1") - h.stor.ReleaseSync(storage.TypeTable) // Release sync calls - - h.reopenDB() - h.getVal("foo", "v1") - h.get("k1", true) - h.get("k2", true) -} - -func TestDB_GetFromTable(t *testing.T) { - trun(t, func(h *dbHarness) { - h.put("foo", "v1") - h.compactMem() - h.getVal("foo", "v1") - }) -} - -func TestDB_GetSnapshot(t *testing.T) { - trun(t, func(h *dbHarness) { - bar := strings.Repeat("b", 200) - h.put("foo", "v1") - h.put(bar, "v1") - - snap, err := h.db.GetSnapshot() - if err != nil { - t.Fatal("GetSnapshot: got error: ", err) - } - - h.put("foo", "v2") - h.put(bar, "v2") - - h.getVal("foo", "v2") - h.getVal(bar, "v2") - h.getValr(snap, "foo", "v1") - h.getValr(snap, bar, "v1") - - h.compactMem() - - h.getVal("foo", "v2") - h.getVal(bar, "v2") - h.getValr(snap, "foo", "v1") - h.getValr(snap, bar, "v1") - - snap.Release() - - h.reopenDB() - h.getVal("foo", "v2") - h.getVal(bar, "v2") - }) -} - -func TestDB_GetLevel0Ordering(t *testing.T) { - trun(t, func(h *dbHarness) { - for i := 0; i < 4; i++ { - h.put("bar", fmt.Sprintf("b%d", i)) - h.put("foo", fmt.Sprintf("v%d", i)) - h.compactMem() - } - h.getVal("foo", "v3") - h.getVal("bar", "b3") - - v := h.db.s.version() - t0len := v.tLen(0) - v.release() - if t0len < 2 { - t.Errorf("level-0 tables is less than 2, got %d", t0len) - } - - h.reopenDB() - h.getVal("foo", "v3") - h.getVal("bar", "b3") - }) -} - -func TestDB_GetOrderedByLevels(t *testing.T) { - trun(t, func(h *dbHarness) { - h.put("foo", "v1") - h.compactMem() - h.compactRange("a", "z") - h.getVal("foo", "v1") - h.put("foo", "v2") - h.compactMem() - h.getVal("foo", "v2") - }) -} - -func TestDB_GetPicksCorrectFile(t *testing.T) { - trun(t, func(h *dbHarness) { - // Arrange to have multiple files in a non-level-0 level. - h.put("a", "va") - h.compactMem() - h.compactRange("a", "b") - h.put("x", "vx") - h.compactMem() - h.compactRange("x", "y") - h.put("f", "vf") - h.compactMem() - h.compactRange("f", "g") - - h.getVal("a", "va") - h.getVal("f", "vf") - h.getVal("x", "vx") - - h.compactRange("", "") - h.getVal("a", "va") - h.getVal("f", "vf") - h.getVal("x", "vx") - }) -} - -func TestDB_GetEncountersEmptyLevel(t *testing.T) { - trun(t, func(h *dbHarness) { - // Arrange for the following to happen: - // * sstable A in level 0 - // * nothing in level 1 - // * sstable B in level 2 - // Then do enough Get() calls to arrange for an automatic compaction - // of sstable A. A bug would cause the compaction to be marked as - // occuring at level 1 (instead of the correct level 0). - - // Step 1: First place sstables in levels 0 and 2 - for i := 0; ; i++ { - if i >= 100 { - t.Fatal("could not fill levels-0 and level-2") - } - v := h.db.s.version() - if v.tLen(0) > 0 && v.tLen(2) > 0 { - v.release() - break - } - v.release() - h.put("a", "begin") - h.put("z", "end") - h.compactMem() - - h.getVal("a", "begin") - h.getVal("z", "end") - } - - // Step 2: clear level 1 if necessary. - h.compactRangeAt(1, "", "") - h.tablesPerLevel("1,0,1") - - h.getVal("a", "begin") - h.getVal("z", "end") - - // Step 3: read a bunch of times - for i := 0; i < 200; i++ { - h.get("missing", false) - } - - // Step 4: Wait for compaction to finish - h.waitCompaction() - - v := h.db.s.version() - if v.tLen(0) > 0 { - t.Errorf("level-0 tables more than 0, got %d", v.tLen(0)) - } - v.release() - - h.getVal("a", "begin") - h.getVal("z", "end") - }) -} - -func TestDB_IterMultiWithDelete(t *testing.T) { - trun(t, func(h *dbHarness) { - h.put("a", "va") - h.put("b", "vb") - h.put("c", "vc") - h.delete("b") - h.get("b", false) - - iter := h.db.NewIterator(nil, nil) - iter.Seek([]byte("c")) - testKeyVal(t, iter, "c->vc") - iter.Prev() - testKeyVal(t, iter, "a->va") - iter.Release() - - h.compactMem() - - iter = h.db.NewIterator(nil, nil) - iter.Seek([]byte("c")) - testKeyVal(t, iter, "c->vc") - iter.Prev() - testKeyVal(t, iter, "a->va") - iter.Release() - }) -} - -func TestDB_IteratorPinsRef(t *testing.T) { - h := newDbHarness(t) - defer h.close() - - h.put("foo", "hello") - - // Get iterator that will yield the current contents of the DB. - iter := h.db.NewIterator(nil, nil) - - // Write to force compactions - h.put("foo", "newvalue1") - for i := 0; i < 100; i++ { - h.put(numKey(i), strings.Repeat(fmt.Sprintf("v%09d", i), 100000/10)) - } - h.put("foo", "newvalue2") - - iter.First() - testKeyVal(t, iter, "foo->hello") - if iter.Next() { - t.Errorf("expect eof") - } - iter.Release() -} - -func TestDB_Recover(t *testing.T) { - trun(t, func(h *dbHarness) { - h.put("foo", "v1") - h.put("baz", "v5") - - h.reopenDB() - h.getVal("foo", "v1") - - h.getVal("foo", "v1") - h.getVal("baz", "v5") - h.put("bar", "v2") - h.put("foo", "v3") - - h.reopenDB() - h.getVal("foo", "v3") - h.put("foo", "v4") - h.getVal("foo", "v4") - h.getVal("bar", "v2") - h.getVal("baz", "v5") - }) -} - -func TestDB_RecoverWithEmptyJournal(t *testing.T) { - trun(t, func(h *dbHarness) { - h.put("foo", "v1") - h.put("foo", "v2") - - h.reopenDB() - h.reopenDB() - h.put("foo", "v3") - - h.reopenDB() - h.getVal("foo", "v3") - }) -} - -func TestDB_RecoverDuringMemtableCompaction(t *testing.T) { - truno(t, &opt.Options{WriteBuffer: 1000000}, func(h *dbHarness) { - - h.stor.DelaySync(storage.TypeTable) - h.put("big1", strings.Repeat("x", 10000000)) - h.put("big2", strings.Repeat("y", 1000)) - h.put("bar", "v2") - h.stor.ReleaseSync(storage.TypeTable) - - h.reopenDB() - h.getVal("bar", "v2") - h.getVal("big1", strings.Repeat("x", 10000000)) - h.getVal("big2", strings.Repeat("y", 1000)) - }) -} - -func TestDB_MinorCompactionsHappen(t *testing.T) { - h := newDbHarnessWopt(t, &opt.Options{WriteBuffer: 10000}) - defer h.close() - - n := 500 - - key := func(i int) string { - return fmt.Sprintf("key%06d", i) - } - - for i := 0; i < n; i++ { - h.put(key(i), key(i)+strings.Repeat("v", 1000)) - } - - for i := 0; i < n; i++ { - h.getVal(key(i), key(i)+strings.Repeat("v", 1000)) - } - - h.reopenDB() - for i := 0; i < n; i++ { - h.getVal(key(i), key(i)+strings.Repeat("v", 1000)) - } -} - -func TestDB_RecoverWithLargeJournal(t *testing.T) { - h := newDbHarness(t) - defer h.close() - - h.put("big1", strings.Repeat("1", 200000)) - h.put("big2", strings.Repeat("2", 200000)) - h.put("small3", strings.Repeat("3", 10)) - h.put("small4", strings.Repeat("4", 10)) - h.tablesPerLevel("") - - // Make sure that if we re-open with a small write buffer size that - // we flush table files in the middle of a large journal file. - h.o.WriteBuffer = 100000 - h.reopenDB() - h.getVal("big1", strings.Repeat("1", 200000)) - h.getVal("big2", strings.Repeat("2", 200000)) - h.getVal("small3", strings.Repeat("3", 10)) - h.getVal("small4", strings.Repeat("4", 10)) - v := h.db.s.version() - if v.tLen(0) <= 1 { - t.Errorf("tables-0 less than one") - } - v.release() -} - -func TestDB_CompactionsGenerateMultipleFiles(t *testing.T) { - h := newDbHarnessWopt(t, &opt.Options{ - WriteBuffer: 10000000, - Compression: opt.NoCompression, - }) - defer h.close() - - v := h.db.s.version() - if v.tLen(0) > 0 { - t.Errorf("level-0 tables more than 0, got %d", v.tLen(0)) - } - v.release() - - n := 80 - - // Write 8MB (80 values, each 100K) - for i := 0; i < n; i++ { - h.put(numKey(i), strings.Repeat(fmt.Sprintf("v%09d", i), 100000/10)) - } - - // Reopening moves updates to level-0 - h.reopenDB() - h.compactRangeAt(0, "", "") - - v = h.db.s.version() - if v.tLen(0) > 0 { - t.Errorf("level-0 tables more than 0, got %d", v.tLen(0)) - } - if v.tLen(1) <= 1 { - t.Errorf("level-1 tables less than 1, got %d", v.tLen(1)) - } - v.release() - - for i := 0; i < n; i++ { - h.getVal(numKey(i), strings.Repeat(fmt.Sprintf("v%09d", i), 100000/10)) - } -} - -func TestDB_RepeatedWritesToSameKey(t *testing.T) { - h := newDbHarnessWopt(t, &opt.Options{WriteBuffer: 100000}) - defer h.close() - - maxTables := h.o.GetNumLevel() + h.o.GetWriteL0PauseTrigger() - - value := strings.Repeat("v", 2*h.o.GetWriteBuffer()) - for i := 0; i < 5*maxTables; i++ { - h.put("key", value) - n := h.totalTables() - if n > maxTables { - t.Errorf("total tables exceed %d, got=%d, iter=%d", maxTables, n, i) - } - } -} - -func TestDB_RepeatedWritesToSameKeyAfterReopen(t *testing.T) { - h := newDbHarnessWopt(t, &opt.Options{WriteBuffer: 100000}) - defer h.close() - - h.reopenDB() - - maxTables := h.o.GetNumLevel() + h.o.GetWriteL0PauseTrigger() - - value := strings.Repeat("v", 2*h.o.GetWriteBuffer()) - for i := 0; i < 5*maxTables; i++ { - h.put("key", value) - n := h.totalTables() - if n > maxTables { - t.Errorf("total tables exceed %d, got=%d, iter=%d", maxTables, n, i) - } - } -} - -func TestDB_SparseMerge(t *testing.T) { - h := newDbHarnessWopt(t, &opt.Options{Compression: opt.NoCompression}) - defer h.close() - - h.putMulti(h.o.GetNumLevel(), "A", "Z") - - // Suppose there is: - // small amount of data with prefix A - // large amount of data with prefix B - // small amount of data with prefix C - // and that recent updates have made small changes to all three prefixes. - // Check that we do not do a compaction that merges all of B in one shot. - h.put("A", "va") - value := strings.Repeat("x", 1000) - for i := 0; i < 100000; i++ { - h.put(fmt.Sprintf("B%010d", i), value) - } - h.put("C", "vc") - h.compactMem() - h.compactRangeAt(0, "", "") - h.waitCompaction() - - // Make sparse update - h.put("A", "va2") - h.put("B100", "bvalue2") - h.put("C", "vc2") - h.compactMem() - - h.waitCompaction() - h.maxNextLevelOverlappingBytes(20 * 1048576) - h.compactRangeAt(0, "", "") - h.waitCompaction() - h.maxNextLevelOverlappingBytes(20 * 1048576) - h.compactRangeAt(1, "", "") - h.waitCompaction() - h.maxNextLevelOverlappingBytes(20 * 1048576) -} - -func TestDB_SizeOf(t *testing.T) { - h := newDbHarnessWopt(t, &opt.Options{ - Compression: opt.NoCompression, - WriteBuffer: 10000000, - }) - defer h.close() - - h.sizeAssert("", "xyz", 0, 0) - h.reopenDB() - h.sizeAssert("", "xyz", 0, 0) - - // Write 8MB (80 values, each 100K) - n := 80 - s1 := 100000 - s2 := 105000 - - for i := 0; i < n; i++ { - h.put(numKey(i), strings.Repeat(fmt.Sprintf("v%09d", i), s1/10)) - } - - // 0 because SizeOf() does not account for memtable space - h.sizeAssert("", numKey(50), 0, 0) - - for r := 0; r < 3; r++ { - h.reopenDB() - - for cs := 0; cs < n; cs += 10 { - for i := 0; i < n; i += 10 { - h.sizeAssert("", numKey(i), uint64(s1*i), uint64(s2*i)) - h.sizeAssert("", numKey(i)+".suffix", uint64(s1*(i+1)), uint64(s2*(i+1))) - h.sizeAssert(numKey(i), numKey(i+10), uint64(s1*10), uint64(s2*10)) - } - - h.sizeAssert("", numKey(50), uint64(s1*50), uint64(s2*50)) - h.sizeAssert("", numKey(50)+".suffix", uint64(s1*50), uint64(s2*50)) - - h.compactRangeAt(0, numKey(cs), numKey(cs+9)) - } - - v := h.db.s.version() - if v.tLen(0) != 0 { - t.Errorf("level-0 tables was not zero, got %d", v.tLen(0)) - } - if v.tLen(1) == 0 { - t.Error("level-1 tables was zero") - } - v.release() - } -} - -func TestDB_SizeOf_MixOfSmallAndLarge(t *testing.T) { - h := newDbHarnessWopt(t, &opt.Options{Compression: opt.NoCompression}) - defer h.close() - - sizes := []uint64{ - 10000, - 10000, - 100000, - 10000, - 100000, - 10000, - 300000, - 10000, - } - - for i, n := range sizes { - h.put(numKey(i), strings.Repeat(fmt.Sprintf("v%09d", i), int(n)/10)) - } - - for r := 0; r < 3; r++ { - h.reopenDB() - - var x uint64 - for i, n := range sizes { - y := x - if i > 0 { - y += 1000 - } - h.sizeAssert("", numKey(i), x, y) - x += n - } - - h.sizeAssert(numKey(3), numKey(5), 110000, 111000) - - h.compactRangeAt(0, "", "") - } -} - -func TestDB_Snapshot(t *testing.T) { - trun(t, func(h *dbHarness) { - h.put("foo", "v1") - s1 := h.getSnapshot() - h.put("foo", "v2") - s2 := h.getSnapshot() - h.put("foo", "v3") - s3 := h.getSnapshot() - h.put("foo", "v4") - - h.getValr(s1, "foo", "v1") - h.getValr(s2, "foo", "v2") - h.getValr(s3, "foo", "v3") - h.getVal("foo", "v4") - - s3.Release() - h.getValr(s1, "foo", "v1") - h.getValr(s2, "foo", "v2") - h.getVal("foo", "v4") - - s1.Release() - h.getValr(s2, "foo", "v2") - h.getVal("foo", "v4") - - s2.Release() - h.getVal("foo", "v4") - }) -} - -func TestDB_SnapshotList(t *testing.T) { - db := &DB{snapsList: list.New()} - e0a := db.acquireSnapshot() - e0b := db.acquireSnapshot() - db.seq = 1 - e1 := db.acquireSnapshot() - db.seq = 2 - e2 := db.acquireSnapshot() - - if db.minSeq() != 0 { - t.Fatalf("invalid sequence number, got=%d", db.minSeq()) - } - db.releaseSnapshot(e0a) - if db.minSeq() != 0 { - t.Fatalf("invalid sequence number, got=%d", db.minSeq()) - } - db.releaseSnapshot(e2) - if db.minSeq() != 0 { - t.Fatalf("invalid sequence number, got=%d", db.minSeq()) - } - db.releaseSnapshot(e0b) - if db.minSeq() != 1 { - t.Fatalf("invalid sequence number, got=%d", db.minSeq()) - } - e2 = db.acquireSnapshot() - if db.minSeq() != 1 { - t.Fatalf("invalid sequence number, got=%d", db.minSeq()) - } - db.releaseSnapshot(e1) - if db.minSeq() != 2 { - t.Fatalf("invalid sequence number, got=%d", db.minSeq()) - } - db.releaseSnapshot(e2) - if db.minSeq() != 2 { - t.Fatalf("invalid sequence number, got=%d", db.minSeq()) - } -} - -func TestDB_HiddenValuesAreRemoved(t *testing.T) { - trun(t, func(h *dbHarness) { - s := h.db.s - - h.put("foo", "v1") - h.compactMem() - m := h.o.GetMaxMemCompationLevel() - v := s.version() - num := v.tLen(m) - v.release() - if num != 1 { - t.Errorf("invalid level-%d len, want=1 got=%d", m, num) - } - - // Place a table at level last-1 to prevent merging with preceding mutation - h.put("a", "begin") - h.put("z", "end") - h.compactMem() - v = s.version() - if v.tLen(m) != 1 { - t.Errorf("invalid level-%d len, want=1 got=%d", m, v.tLen(m)) - } - if v.tLen(m-1) != 1 { - t.Errorf("invalid level-%d len, want=1 got=%d", m-1, v.tLen(m-1)) - } - v.release() - - h.delete("foo") - h.put("foo", "v2") - h.allEntriesFor("foo", "[ v2, DEL, v1 ]") - h.compactMem() - h.allEntriesFor("foo", "[ v2, DEL, v1 ]") - h.compactRangeAt(m-2, "", "z") - // DEL eliminated, but v1 remains because we aren't compacting that level - // (DEL can be eliminated because v2 hides v1). - h.allEntriesFor("foo", "[ v2, v1 ]") - h.compactRangeAt(m-1, "", "") - // Merging last-1 w/ last, so we are the base level for "foo", so - // DEL is removed. (as is v1). - h.allEntriesFor("foo", "[ v2 ]") - }) -} - -func TestDB_DeletionMarkers2(t *testing.T) { - h := newDbHarness(t) - defer h.close() - s := h.db.s - - h.put("foo", "v1") - h.compactMem() - m := h.o.GetMaxMemCompationLevel() - v := s.version() - num := v.tLen(m) - v.release() - if num != 1 { - t.Errorf("invalid level-%d len, want=1 got=%d", m, num) - } - - // Place a table at level last-1 to prevent merging with preceding mutation - h.put("a", "begin") - h.put("z", "end") - h.compactMem() - v = s.version() - if v.tLen(m) != 1 { - t.Errorf("invalid level-%d len, want=1 got=%d", m, v.tLen(m)) - } - if v.tLen(m-1) != 1 { - t.Errorf("invalid level-%d len, want=1 got=%d", m-1, v.tLen(m-1)) - } - v.release() - - h.delete("foo") - h.allEntriesFor("foo", "[ DEL, v1 ]") - h.compactMem() // Moves to level last-2 - h.allEntriesFor("foo", "[ DEL, v1 ]") - h.compactRangeAt(m-2, "", "") - // DEL kept: "last" file overlaps - h.allEntriesFor("foo", "[ DEL, v1 ]") - h.compactRangeAt(m-1, "", "") - // Merging last-1 w/ last, so we are the base level for "foo", so - // DEL is removed. (as is v1). - h.allEntriesFor("foo", "[ ]") -} - -func TestDB_CompactionTableOpenError(t *testing.T) { - h := newDbHarnessWopt(t, &opt.Options{OpenFilesCacheCapacity: -1}) - defer h.close() - - im := 10 - jm := 10 - for r := 0; r < 2; r++ { - for i := 0; i < im; i++ { - for j := 0; j < jm; j++ { - h.put(fmt.Sprintf("k%d,%d", i, j), fmt.Sprintf("v%d,%d", i, j)) - } - h.compactMem() - } - } - - if n := h.totalTables(); n != im*2 { - t.Errorf("total tables is %d, want %d", n, im) - } - - h.stor.SetEmuErr(storage.TypeTable, tsOpOpen) - go h.db.CompactRange(util.Range{}) - if err := h.db.compSendIdle(h.db.tcompCmdC); err != nil { - t.Log("compaction error: ", err) - } - h.closeDB0() - h.openDB() - h.stor.SetEmuErr(0, tsOpOpen) - - for i := 0; i < im; i++ { - for j := 0; j < jm; j++ { - h.getVal(fmt.Sprintf("k%d,%d", i, j), fmt.Sprintf("v%d,%d", i, j)) - } - } -} - -func TestDB_OverlapInLevel0(t *testing.T) { - trun(t, func(h *dbHarness) { - if h.o.GetMaxMemCompationLevel() != 2 { - t.Fatal("fix test to reflect the config") - } - - // Fill levels 1 and 2 to disable the pushing of new memtables to levels > 0. - h.put("100", "v100") - h.put("999", "v999") - h.compactMem() - h.delete("100") - h.delete("999") - h.compactMem() - h.tablesPerLevel("0,1,1") - - // Make files spanning the following ranges in level-0: - // files[0] 200 .. 900 - // files[1] 300 .. 500 - // Note that files are sorted by min key. - h.put("300", "v300") - h.put("500", "v500") - h.compactMem() - h.put("200", "v200") - h.put("600", "v600") - h.put("900", "v900") - h.compactMem() - h.tablesPerLevel("2,1,1") - - // Compact away the placeholder files we created initially - h.compactRangeAt(1, "", "") - h.compactRangeAt(2, "", "") - h.tablesPerLevel("2") - - // Do a memtable compaction. Before bug-fix, the compaction would - // not detect the overlap with level-0 files and would incorrectly place - // the deletion in a deeper level. - h.delete("600") - h.compactMem() - h.tablesPerLevel("3") - h.get("600", false) - }) -} - -func TestDB_L0_CompactionBug_Issue44_a(t *testing.T) { - h := newDbHarness(t) - defer h.close() - - h.reopenDB() - h.put("b", "v") - h.reopenDB() - h.delete("b") - h.delete("a") - h.reopenDB() - h.delete("a") - h.reopenDB() - h.put("a", "v") - h.reopenDB() - h.reopenDB() - h.getKeyVal("(a->v)") - h.waitCompaction() - h.getKeyVal("(a->v)") -} - -func TestDB_L0_CompactionBug_Issue44_b(t *testing.T) { - h := newDbHarness(t) - defer h.close() - - h.reopenDB() - h.put("", "") - h.reopenDB() - h.delete("e") - h.put("", "") - h.reopenDB() - h.put("c", "cv") - h.reopenDB() - h.put("", "") - h.reopenDB() - h.put("", "") - h.waitCompaction() - h.reopenDB() - h.put("d", "dv") - h.reopenDB() - h.put("", "") - h.reopenDB() - h.delete("d") - h.delete("b") - h.reopenDB() - h.getKeyVal("(->)(c->cv)") - h.waitCompaction() - h.getKeyVal("(->)(c->cv)") -} - -func TestDB_SingleEntryMemCompaction(t *testing.T) { - trun(t, func(h *dbHarness) { - for i := 0; i < 10; i++ { - h.put("big", strings.Repeat("v", opt.DefaultWriteBuffer)) - h.compactMem() - h.put("key", strings.Repeat("v", opt.DefaultBlockSize)) - h.compactMem() - h.put("k", "v") - h.compactMem() - h.put("", "") - h.compactMem() - h.put("verybig", strings.Repeat("v", opt.DefaultWriteBuffer*2)) - h.compactMem() - } - }) -} - -func TestDB_ManifestWriteError(t *testing.T) { - for i := 0; i < 2; i++ { - func() { - h := newDbHarness(t) - defer h.close() - - h.put("foo", "bar") - h.getVal("foo", "bar") - - // Mem compaction (will succeed) - h.compactMem() - h.getVal("foo", "bar") - v := h.db.s.version() - if n := v.tLen(h.o.GetMaxMemCompationLevel()); n != 1 { - t.Errorf("invalid total tables, want=1 got=%d", n) - } - v.release() - - if i == 0 { - h.stor.SetEmuErr(storage.TypeManifest, tsOpWrite) - } else { - h.stor.SetEmuErr(storage.TypeManifest, tsOpSync) - } - - // Merging compaction (will fail) - h.compactRangeAtErr(h.o.GetMaxMemCompationLevel(), "", "", true) - - h.db.Close() - h.stor.SetEmuErr(0, tsOpWrite) - h.stor.SetEmuErr(0, tsOpSync) - - // Should not lose data - h.openDB() - h.getVal("foo", "bar") - }() - } -} - -func assertErr(t *testing.T, err error, wanterr bool) { - if err != nil { - if wanterr { - t.Log("AssertErr: got error (expected): ", err) - } else { - t.Error("AssertErr: got error: ", err) - } - } else if wanterr { - t.Error("AssertErr: expect error") - } -} - -func TestDB_ClosedIsClosed(t *testing.T) { - h := newDbHarness(t) - db := h.db - - var iter, iter2 iterator.Iterator - var snap *Snapshot - func() { - defer h.close() - - h.put("k", "v") - h.getVal("k", "v") - - iter = db.NewIterator(nil, h.ro) - iter.Seek([]byte("k")) - testKeyVal(t, iter, "k->v") - - var err error - snap, err = db.GetSnapshot() - if err != nil { - t.Fatal("GetSnapshot: got error: ", err) - } - - h.getValr(snap, "k", "v") - - iter2 = snap.NewIterator(nil, h.ro) - iter2.Seek([]byte("k")) - testKeyVal(t, iter2, "k->v") - - h.put("foo", "v2") - h.delete("foo") - - // closing DB - iter.Release() - iter2.Release() - }() - - assertErr(t, db.Put([]byte("x"), []byte("y"), h.wo), true) - _, err := db.Get([]byte("k"), h.ro) - assertErr(t, err, true) - - if iter.Valid() { - t.Errorf("iter.Valid should false") - } - assertErr(t, iter.Error(), false) - testKeyVal(t, iter, "->") - if iter.Seek([]byte("k")) { - t.Errorf("iter.Seek should false") - } - assertErr(t, iter.Error(), true) - - assertErr(t, iter2.Error(), false) - - _, err = snap.Get([]byte("k"), h.ro) - assertErr(t, err, true) - - _, err = db.GetSnapshot() - assertErr(t, err, true) - - iter3 := db.NewIterator(nil, h.ro) - assertErr(t, iter3.Error(), true) - - iter3 = snap.NewIterator(nil, h.ro) - assertErr(t, iter3.Error(), true) - - assertErr(t, db.Delete([]byte("k"), h.wo), true) - - _, err = db.GetProperty("leveldb.stats") - assertErr(t, err, true) - - _, err = db.SizeOf([]util.Range{{[]byte("a"), []byte("z")}}) - assertErr(t, err, true) - - assertErr(t, db.CompactRange(util.Range{}), true) - - assertErr(t, db.Close(), true) -} - -type numberComparer struct{} - -func (numberComparer) num(x []byte) (n int) { - fmt.Sscan(string(x[1:len(x)-1]), &n) - return -} - -func (numberComparer) Name() string { - return "test.NumberComparer" -} - -func (p numberComparer) Compare(a, b []byte) int { - return p.num(a) - p.num(b) -} - -func (numberComparer) Separator(dst, a, b []byte) []byte { return nil } -func (numberComparer) Successor(dst, b []byte) []byte { return nil } - -func TestDB_CustomComparer(t *testing.T) { - h := newDbHarnessWopt(t, &opt.Options{ - Comparer: numberComparer{}, - WriteBuffer: 1000, - }) - defer h.close() - - h.put("[10]", "ten") - h.put("[0x14]", "twenty") - for i := 0; i < 2; i++ { - h.getVal("[10]", "ten") - h.getVal("[0xa]", "ten") - h.getVal("[20]", "twenty") - h.getVal("[0x14]", "twenty") - h.get("[15]", false) - h.get("[0xf]", false) - h.compactMem() - h.compactRange("[0]", "[9999]") - } - - for n := 0; n < 2; n++ { - for i := 0; i < 100; i++ { - v := fmt.Sprintf("[%d]", i*10) - h.put(v, v) - } - h.compactMem() - h.compactRange("[0]", "[1000000]") - } -} - -func TestDB_ManualCompaction(t *testing.T) { - h := newDbHarness(t) - defer h.close() - - if h.o.GetMaxMemCompationLevel() != 2 { - t.Fatal("fix test to reflect the config") - } - - h.putMulti(3, "p", "q") - h.tablesPerLevel("1,1,1") - - // Compaction range falls before files - h.compactRange("", "c") - h.tablesPerLevel("1,1,1") - - // Compaction range falls after files - h.compactRange("r", "z") - h.tablesPerLevel("1,1,1") - - // Compaction range overlaps files - h.compactRange("p1", "p9") - h.tablesPerLevel("0,0,1") - - // Populate a different range - h.putMulti(3, "c", "e") - h.tablesPerLevel("1,1,2") - - // Compact just the new range - h.compactRange("b", "f") - h.tablesPerLevel("0,0,2") - - // Compact all - h.putMulti(1, "a", "z") - h.tablesPerLevel("0,1,2") - h.compactRange("", "") - h.tablesPerLevel("0,0,1") -} - -func TestDB_BloomFilter(t *testing.T) { - h := newDbHarnessWopt(t, &opt.Options{ - DisableBlockCache: true, - Filter: filter.NewBloomFilter(10), - }) - defer h.close() - - key := func(i int) string { - return fmt.Sprintf("key%06d", i) - } - - const n = 10000 - - // Populate multiple layers - for i := 0; i < n; i++ { - h.put(key(i), key(i)) - } - h.compactMem() - h.compactRange("a", "z") - for i := 0; i < n; i += 100 { - h.put(key(i), key(i)) - } - h.compactMem() - - // Prevent auto compactions triggered by seeks - h.stor.DelaySync(storage.TypeTable) - - // Lookup present keys. Should rarely read from small sstable. - h.stor.SetReadCounter(storage.TypeTable) - for i := 0; i < n; i++ { - h.getVal(key(i), key(i)) - } - cnt := int(h.stor.ReadCounter()) - t.Logf("lookup of %d present keys yield %d sstable I/O reads", n, cnt) - - if min, max := n, n+2*n/100; cnt < min || cnt > max { - t.Errorf("num of sstable I/O reads of present keys not in range of %d - %d, got %d", min, max, cnt) - } - - // Lookup missing keys. Should rarely read from either sstable. - h.stor.ResetReadCounter() - for i := 0; i < n; i++ { - h.get(key(i)+".missing", false) - } - cnt = int(h.stor.ReadCounter()) - t.Logf("lookup of %d missing keys yield %d sstable I/O reads", n, cnt) - if max := 3 * n / 100; cnt > max { - t.Errorf("num of sstable I/O reads of missing keys was more than %d, got %d", max, cnt) - } - - h.stor.ReleaseSync(storage.TypeTable) -} - -func TestDB_Concurrent(t *testing.T) { - const n, secs, maxkey = 4, 2, 1000 - - runtime.GOMAXPROCS(n) - trun(t, func(h *dbHarness) { - var closeWg sync.WaitGroup - var stop uint32 - var cnt [n]uint32 - - for i := 0; i < n; i++ { - closeWg.Add(1) - go func(i int) { - var put, get, found uint - defer func() { - t.Logf("goroutine %d stopped after %d ops, put=%d get=%d found=%d missing=%d", - i, cnt[i], put, get, found, get-found) - closeWg.Done() - }() - - rnd := rand.New(rand.NewSource(int64(1000 + i))) - for atomic.LoadUint32(&stop) == 0 { - x := cnt[i] - - k := rnd.Intn(maxkey) - kstr := fmt.Sprintf("%016d", k) - - if (rnd.Int() % 2) > 0 { - put++ - h.put(kstr, fmt.Sprintf("%d.%d.%-1000d", k, i, x)) - } else { - get++ - v, err := h.db.Get([]byte(kstr), h.ro) - if err == nil { - found++ - rk, ri, rx := 0, -1, uint32(0) - fmt.Sscanf(string(v), "%d.%d.%d", &rk, &ri, &rx) - if rk != k { - t.Errorf("invalid key want=%d got=%d", k, rk) - } - if ri < 0 || ri >= n { - t.Error("invalid goroutine number: ", ri) - } else { - tx := atomic.LoadUint32(&(cnt[ri])) - if rx > tx { - t.Errorf("invalid seq number, %d > %d ", rx, tx) - } - } - } else if err != ErrNotFound { - t.Error("Get: got error: ", err) - return - } - } - atomic.AddUint32(&cnt[i], 1) - } - }(i) - } - - time.Sleep(secs * time.Second) - atomic.StoreUint32(&stop, 1) - closeWg.Wait() - }) - - runtime.GOMAXPROCS(1) -} - -func TestDB_Concurrent2(t *testing.T) { - const n, n2 = 4, 4000 - - runtime.GOMAXPROCS(n*2 + 2) - truno(t, &opt.Options{WriteBuffer: 30}, func(h *dbHarness) { - var closeWg sync.WaitGroup - var stop uint32 - - for i := 0; i < n; i++ { - closeWg.Add(1) - go func(i int) { - for k := 0; atomic.LoadUint32(&stop) == 0; k++ { - h.put(fmt.Sprintf("k%d", k), fmt.Sprintf("%d.%d.", k, i)+strings.Repeat("x", 10)) - } - closeWg.Done() - }(i) - } - - for i := 0; i < n; i++ { - closeWg.Add(1) - go func(i int) { - for k := 1000000; k < 0 || atomic.LoadUint32(&stop) == 0; k-- { - h.put(fmt.Sprintf("k%d", k), fmt.Sprintf("%d.%d.", k, i)+strings.Repeat("x", 10)) - } - closeWg.Done() - }(i) - } - - cmp := comparer.DefaultComparer - for i := 0; i < n2; i++ { - closeWg.Add(1) - go func(i int) { - it := h.db.NewIterator(nil, nil) - var pk []byte - for it.Next() { - kk := it.Key() - if cmp.Compare(kk, pk) <= 0 { - t.Errorf("iter %d: %q is successor of %q", i, pk, kk) - } - pk = append(pk[:0], kk...) - var k, vk, vi int - if n, err := fmt.Sscanf(string(it.Key()), "k%d", &k); err != nil { - t.Errorf("iter %d: Scanf error on key %q: %v", i, it.Key(), err) - } else if n < 1 { - t.Errorf("iter %d: Cannot parse key %q", i, it.Key()) - } - if n, err := fmt.Sscanf(string(it.Value()), "%d.%d", &vk, &vi); err != nil { - t.Errorf("iter %d: Scanf error on value %q: %v", i, it.Value(), err) - } else if n < 2 { - t.Errorf("iter %d: Cannot parse value %q", i, it.Value()) - } - - if vk != k { - t.Errorf("iter %d: invalid value i=%d, want=%d got=%d", i, vi, k, vk) - } - } - if err := it.Error(); err != nil { - t.Errorf("iter %d: Got error: %v", i, err) - } - it.Release() - closeWg.Done() - }(i) - } - - atomic.StoreUint32(&stop, 1) - closeWg.Wait() - }) - - runtime.GOMAXPROCS(1) -} - -func TestDB_CreateReopenDbOnFile(t *testing.T) { - dbpath := filepath.Join(os.TempDir(), fmt.Sprintf("goleveldbtestCreateReopenDbOnFile-%d", os.Getuid())) - if err := os.RemoveAll(dbpath); err != nil { - t.Fatal("cannot remove old db: ", err) - } - defer os.RemoveAll(dbpath) - - for i := 0; i < 3; i++ { - stor, err := storage.OpenFile(dbpath) - if err != nil { - t.Fatalf("(%d) cannot open storage: %s", i, err) - } - db, err := Open(stor, nil) - if err != nil { - t.Fatalf("(%d) cannot open db: %s", i, err) - } - if err := db.Put([]byte("foo"), []byte("bar"), nil); err != nil { - t.Fatalf("(%d) cannot write to db: %s", i, err) - } - if err := db.Close(); err != nil { - t.Fatalf("(%d) cannot close db: %s", i, err) - } - if err := stor.Close(); err != nil { - t.Fatalf("(%d) cannot close storage: %s", i, err) - } - } -} - -func TestDB_CreateReopenDbOnFile2(t *testing.T) { - dbpath := filepath.Join(os.TempDir(), fmt.Sprintf("goleveldbtestCreateReopenDbOnFile2-%d", os.Getuid())) - if err := os.RemoveAll(dbpath); err != nil { - t.Fatal("cannot remove old db: ", err) - } - defer os.RemoveAll(dbpath) - - for i := 0; i < 3; i++ { - db, err := OpenFile(dbpath, nil) - if err != nil { - t.Fatalf("(%d) cannot open db: %s", i, err) - } - if err := db.Put([]byte("foo"), []byte("bar"), nil); err != nil { - t.Fatalf("(%d) cannot write to db: %s", i, err) - } - if err := db.Close(); err != nil { - t.Fatalf("(%d) cannot close db: %s", i, err) - } - } -} - -func TestDB_DeletionMarkersOnMemdb(t *testing.T) { - h := newDbHarness(t) - defer h.close() - - h.put("foo", "v1") - h.compactMem() - h.delete("foo") - h.get("foo", false) - h.getKeyVal("") -} - -func TestDB_LeveldbIssue178(t *testing.T) { - nKeys := (opt.DefaultCompactionTableSize / 30) * 5 - key1 := func(i int) string { - return fmt.Sprintf("my_key_%d", i) - } - key2 := func(i int) string { - return fmt.Sprintf("my_key_%d_xxx", i) - } - - // Disable compression since it affects the creation of layers and the - // code below is trying to test against a very specific scenario. - h := newDbHarnessWopt(t, &opt.Options{Compression: opt.NoCompression}) - defer h.close() - - // Create first key range. - batch := new(Batch) - for i := 0; i < nKeys; i++ { - batch.Put([]byte(key1(i)), []byte("value for range 1 key")) - } - h.write(batch) - - // Create second key range. - batch.Reset() - for i := 0; i < nKeys; i++ { - batch.Put([]byte(key2(i)), []byte("value for range 2 key")) - } - h.write(batch) - - // Delete second key range. - batch.Reset() - for i := 0; i < nKeys; i++ { - batch.Delete([]byte(key2(i))) - } - h.write(batch) - h.waitMemCompaction() - - // Run manual compaction. - h.compactRange(key1(0), key1(nKeys-1)) - - // Checking the keys. - h.assertNumKeys(nKeys) -} - -func TestDB_LeveldbIssue200(t *testing.T) { - h := newDbHarness(t) - defer h.close() - - h.put("1", "b") - h.put("2", "c") - h.put("3", "d") - h.put("4", "e") - h.put("5", "f") - - iter := h.db.NewIterator(nil, h.ro) - - // Add an element that should not be reflected in the iterator. - h.put("25", "cd") - - iter.Seek([]byte("5")) - assertBytes(t, []byte("5"), iter.Key()) - iter.Prev() - assertBytes(t, []byte("4"), iter.Key()) - iter.Prev() - assertBytes(t, []byte("3"), iter.Key()) - iter.Next() - assertBytes(t, []byte("4"), iter.Key()) - iter.Next() - assertBytes(t, []byte("5"), iter.Key()) -} - -func TestDB_GoleveldbIssue74(t *testing.T) { - h := newDbHarnessWopt(t, &opt.Options{ - WriteBuffer: 1 * opt.MiB, - }) - defer h.close() - - const n, dur = 10000, 5 * time.Second - - runtime.GOMAXPROCS(runtime.NumCPU()) - - until := time.Now().Add(dur) - wg := new(sync.WaitGroup) - wg.Add(2) - var done uint32 - go func() { - var i int - defer func() { - t.Logf("WRITER DONE #%d", i) - atomic.StoreUint32(&done, 1) - wg.Done() - }() - - b := new(Batch) - for ; time.Now().Before(until) && atomic.LoadUint32(&done) == 0; i++ { - iv := fmt.Sprintf("VAL%010d", i) - for k := 0; k < n; k++ { - key := fmt.Sprintf("KEY%06d", k) - b.Put([]byte(key), []byte(key+iv)) - b.Put([]byte(fmt.Sprintf("PTR%06d", k)), []byte(key)) - } - h.write(b) - - b.Reset() - snap := h.getSnapshot() - iter := snap.NewIterator(util.BytesPrefix([]byte("PTR")), nil) - var k int - for ; iter.Next(); k++ { - ptrKey := iter.Key() - key := iter.Value() - - if _, err := snap.Get(ptrKey, nil); err != nil { - t.Fatalf("WRITER #%d snapshot.Get %q: %v", i, ptrKey, err) - } - if value, err := snap.Get(key, nil); err != nil { - t.Fatalf("WRITER #%d snapshot.Get %q: %v", i, key, err) - } else if string(value) != string(key)+iv { - t.Fatalf("WRITER #%d snapshot.Get %q got invalid value, want %q got %q", i, key, string(key)+iv, value) - } - - b.Delete(key) - b.Delete(ptrKey) - } - h.write(b) - iter.Release() - snap.Release() - if k != n { - t.Fatalf("#%d %d != %d", i, k, n) - } - } - }() - go func() { - var i int - defer func() { - t.Logf("READER DONE #%d", i) - atomic.StoreUint32(&done, 1) - wg.Done() - }() - for ; time.Now().Before(until) && atomic.LoadUint32(&done) == 0; i++ { - snap := h.getSnapshot() - iter := snap.NewIterator(util.BytesPrefix([]byte("PTR")), nil) - var prevValue string - var k int - for ; iter.Next(); k++ { - ptrKey := iter.Key() - key := iter.Value() - - if _, err := snap.Get(ptrKey, nil); err != nil { - t.Fatalf("READER #%d snapshot.Get %q: %v", i, ptrKey, err) - } - - if value, err := snap.Get(key, nil); err != nil { - t.Fatalf("READER #%d snapshot.Get %q: %v", i, key, err) - } else if prevValue != "" && string(value) != string(key)+prevValue { - t.Fatalf("READER #%d snapshot.Get %q got invalid value, want %q got %q", i, key, string(key)+prevValue, value) - } else { - prevValue = string(value[len(key):]) - } - } - iter.Release() - snap.Release() - if k > 0 && k != n { - t.Fatalf("#%d %d != %d", i, k, n) - } - } - }() - wg.Wait() -} - -func TestDB_GetProperties(t *testing.T) { - h := newDbHarness(t) - defer h.close() - - _, err := h.db.GetProperty("leveldb.num-files-at-level") - if err == nil { - t.Error("GetProperty() failed to detect missing level") - } - - _, err = h.db.GetProperty("leveldb.num-files-at-level0") - if err != nil { - t.Error("got unexpected error", err) - } - - _, err = h.db.GetProperty("leveldb.num-files-at-level0x") - if err == nil { - t.Error("GetProperty() failed to detect invalid level") - } -} - -func TestDB_GoleveldbIssue72and83(t *testing.T) { - h := newDbHarnessWopt(t, &opt.Options{ - WriteBuffer: 1 * opt.MiB, - OpenFilesCacheCapacity: 3, - }) - defer h.close() - - const n, wn, dur = 10000, 100, 30 * time.Second - - runtime.GOMAXPROCS(runtime.NumCPU()) - - randomData := func(prefix byte, i int) []byte { - data := make([]byte, 1+4+32+64+32) - _, err := crand.Reader.Read(data[1 : len(data)-8]) - if err != nil { - panic(err) - } - data[0] = prefix - binary.LittleEndian.PutUint32(data[len(data)-8:], uint32(i)) - binary.LittleEndian.PutUint32(data[len(data)-4:], util.NewCRC(data[:len(data)-4]).Value()) - return data - } - - keys := make([][]byte, n) - for i := range keys { - keys[i] = randomData(1, 0) - } - - until := time.Now().Add(dur) - wg := new(sync.WaitGroup) - wg.Add(3) - var done uint32 - go func() { - i := 0 - defer func() { - t.Logf("WRITER DONE #%d", i) - wg.Done() - }() - - b := new(Batch) - for ; i < wn && atomic.LoadUint32(&done) == 0; i++ { - b.Reset() - for _, k1 := range keys { - k2 := randomData(2, i) - b.Put(k2, randomData(42, i)) - b.Put(k1, k2) - } - if err := h.db.Write(b, h.wo); err != nil { - atomic.StoreUint32(&done, 1) - t.Fatalf("WRITER #%d db.Write: %v", i, err) - } - } - }() - go func() { - var i int - defer func() { - t.Logf("READER0 DONE #%d", i) - atomic.StoreUint32(&done, 1) - wg.Done() - }() - for ; time.Now().Before(until) && atomic.LoadUint32(&done) == 0; i++ { - snap := h.getSnapshot() - seq := snap.elem.seq - if seq == 0 { - snap.Release() - continue - } - iter := snap.NewIterator(util.BytesPrefix([]byte{1}), nil) - writei := int(seq/(n*2) - 1) - var k int - for ; iter.Next(); k++ { - k1 := iter.Key() - k2 := iter.Value() - k1checksum0 := binary.LittleEndian.Uint32(k1[len(k1)-4:]) - k1checksum1 := util.NewCRC(k1[:len(k1)-4]).Value() - if k1checksum0 != k1checksum1 { - t.Fatalf("READER0 #%d.%d W#%d invalid K1 checksum: %#x != %#x", i, k, k1checksum0, k1checksum0) - } - k2checksum0 := binary.LittleEndian.Uint32(k2[len(k2)-4:]) - k2checksum1 := util.NewCRC(k2[:len(k2)-4]).Value() - if k2checksum0 != k2checksum1 { - t.Fatalf("READER0 #%d.%d W#%d invalid K2 checksum: %#x != %#x", i, k, k2checksum0, k2checksum1) - } - kwritei := int(binary.LittleEndian.Uint32(k2[len(k2)-8:])) - if writei != kwritei { - t.Fatalf("READER0 #%d.%d W#%d invalid write iteration num: %d", i, k, writei, kwritei) - } - if _, err := snap.Get(k2, nil); err != nil { - t.Fatalf("READER0 #%d.%d W#%d snap.Get: %v\nk1: %x\n -> k2: %x", i, k, writei, err, k1, k2) - } - } - if err := iter.Error(); err != nil { - t.Fatalf("READER0 #%d.%d W#%d snap.Iterator: %v", i, k, writei, err) - } - iter.Release() - snap.Release() - if k > 0 && k != n { - t.Fatalf("READER0 #%d W#%d short read, got=%d want=%d", i, writei, k, n) - } - } - }() - go func() { - var i int - defer func() { - t.Logf("READER1 DONE #%d", i) - atomic.StoreUint32(&done, 1) - wg.Done() - }() - for ; time.Now().Before(until) && atomic.LoadUint32(&done) == 0; i++ { - iter := h.db.NewIterator(nil, nil) - seq := iter.(*dbIter).seq - if seq == 0 { - iter.Release() - continue - } - writei := int(seq/(n*2) - 1) - var k int - for ok := iter.Last(); ok; ok = iter.Prev() { - k++ - } - if err := iter.Error(); err != nil { - t.Fatalf("READER1 #%d.%d W#%d db.Iterator: %v", i, k, writei, err) - } - iter.Release() - if m := (writei+1)*n + n; k != m { - t.Fatalf("READER1 #%d W#%d short read, got=%d want=%d", i, writei, k, m) - } - } - }() - - wg.Wait() -} - -func TestDB_TransientError(t *testing.T) { - h := newDbHarnessWopt(t, &opt.Options{ - WriteBuffer: 128 * opt.KiB, - OpenFilesCacheCapacity: 3, - DisableCompactionBackoff: true, - }) - defer h.close() - - const ( - nSnap = 20 - nKey = 10000 - ) - - var ( - snaps [nSnap]*Snapshot - b = &Batch{} - ) - for i := range snaps { - vtail := fmt.Sprintf("VAL%030d", i) - b.Reset() - for k := 0; k < nKey; k++ { - key := fmt.Sprintf("KEY%8d", k) - b.Put([]byte(key), []byte(key+vtail)) - } - h.stor.SetEmuRandErr(storage.TypeTable, tsOpOpen, tsOpRead, tsOpReadAt) - if err := h.db.Write(b, nil); err != nil { - t.Logf("WRITE #%d error: %v", i, err) - h.stor.SetEmuRandErr(0, tsOpOpen, tsOpRead, tsOpReadAt, tsOpWrite) - for { - if err := h.db.Write(b, nil); err == nil { - break - } else if errors.IsCorrupted(err) { - t.Fatalf("WRITE #%d corrupted: %v", i, err) - } - } - } - - snaps[i] = h.db.newSnapshot() - b.Reset() - for k := 0; k < nKey; k++ { - key := fmt.Sprintf("KEY%8d", k) - b.Delete([]byte(key)) - } - h.stor.SetEmuRandErr(storage.TypeTable, tsOpOpen, tsOpRead, tsOpReadAt) - if err := h.db.Write(b, nil); err != nil { - t.Logf("WRITE #%d error: %v", i, err) - h.stor.SetEmuRandErr(0, tsOpOpen, tsOpRead, tsOpReadAt) - for { - if err := h.db.Write(b, nil); err == nil { - break - } else if errors.IsCorrupted(err) { - t.Fatalf("WRITE #%d corrupted: %v", i, err) - } - } - } - } - h.stor.SetEmuRandErr(0, tsOpOpen, tsOpRead, tsOpReadAt) - - runtime.GOMAXPROCS(runtime.NumCPU()) - - rnd := rand.New(rand.NewSource(0xecafdaed)) - wg := &sync.WaitGroup{} - for i, snap := range snaps { - wg.Add(2) - - go func(i int, snap *Snapshot, sk []int) { - defer wg.Done() - - vtail := fmt.Sprintf("VAL%030d", i) - for _, k := range sk { - key := fmt.Sprintf("KEY%8d", k) - xvalue, err := snap.Get([]byte(key), nil) - if err != nil { - t.Fatalf("READER_GET #%d SEQ=%d K%d error: %v", i, snap.elem.seq, k, err) - } - value := key + vtail - if !bytes.Equal([]byte(value), xvalue) { - t.Fatalf("READER_GET #%d SEQ=%d K%d invalid value: want %q, got %q", i, snap.elem.seq, k, value, xvalue) - } - } - }(i, snap, rnd.Perm(nKey)) - - go func(i int, snap *Snapshot) { - defer wg.Done() - - vtail := fmt.Sprintf("VAL%030d", i) - iter := snap.NewIterator(nil, nil) - defer iter.Release() - for k := 0; k < nKey; k++ { - if !iter.Next() { - if err := iter.Error(); err != nil { - t.Fatalf("READER_ITER #%d K%d error: %v", i, k, err) - } else { - t.Fatalf("READER_ITER #%d K%d eoi", i, k) - } - } - key := fmt.Sprintf("KEY%8d", k) - xkey := iter.Key() - if !bytes.Equal([]byte(key), xkey) { - t.Fatalf("READER_ITER #%d K%d invalid key: want %q, got %q", i, k, key, xkey) - } - value := key + vtail - xvalue := iter.Value() - if !bytes.Equal([]byte(value), xvalue) { - t.Fatalf("READER_ITER #%d K%d invalid value: want %q, got %q", i, k, value, xvalue) - } - } - }(i, snap) - } - - wg.Wait() -} - -func TestDB_UkeyShouldntHopAcrossTable(t *testing.T) { - h := newDbHarnessWopt(t, &opt.Options{ - WriteBuffer: 112 * opt.KiB, - CompactionTableSize: 90 * opt.KiB, - CompactionExpandLimitFactor: 1, - }) - defer h.close() - - const ( - nSnap = 190 - nKey = 140 - ) - - var ( - snaps [nSnap]*Snapshot - b = &Batch{} - ) - for i := range snaps { - vtail := fmt.Sprintf("VAL%030d", i) - b.Reset() - for k := 0; k < nKey; k++ { - key := fmt.Sprintf("KEY%08d", k) - b.Put([]byte(key), []byte(key+vtail)) - } - if err := h.db.Write(b, nil); err != nil { - t.Fatalf("WRITE #%d error: %v", i, err) - } - - snaps[i] = h.db.newSnapshot() - b.Reset() - for k := 0; k < nKey; k++ { - key := fmt.Sprintf("KEY%08d", k) - b.Delete([]byte(key)) - } - if err := h.db.Write(b, nil); err != nil { - t.Fatalf("WRITE #%d error: %v", i, err) - } - } - - h.compactMem() - - h.waitCompaction() - for level, tables := range h.db.s.stVersion.tables { - for _, table := range tables { - t.Logf("L%d@%d %q:%q", level, table.file.Num(), table.imin, table.imax) - } - } - - h.compactRangeAt(0, "", "") - h.waitCompaction() - for level, tables := range h.db.s.stVersion.tables { - for _, table := range tables { - t.Logf("L%d@%d %q:%q", level, table.file.Num(), table.imin, table.imax) - } - } - h.compactRangeAt(1, "", "") - h.waitCompaction() - for level, tables := range h.db.s.stVersion.tables { - for _, table := range tables { - t.Logf("L%d@%d %q:%q", level, table.file.Num(), table.imin, table.imax) - } - } - runtime.GOMAXPROCS(runtime.NumCPU()) - - wg := &sync.WaitGroup{} - for i, snap := range snaps { - wg.Add(1) - - go func(i int, snap *Snapshot) { - defer wg.Done() - - vtail := fmt.Sprintf("VAL%030d", i) - for k := 0; k < nKey; k++ { - key := fmt.Sprintf("KEY%08d", k) - xvalue, err := snap.Get([]byte(key), nil) - if err != nil { - t.Fatalf("READER_GET #%d SEQ=%d K%d error: %v", i, snap.elem.seq, k, err) - } - value := key + vtail - if !bytes.Equal([]byte(value), xvalue) { - t.Fatalf("READER_GET #%d SEQ=%d K%d invalid value: want %q, got %q", i, snap.elem.seq, k, value, xvalue) - } - } - }(i, snap) - } - - wg.Wait() -} - -func TestDB_TableCompactionBuilder(t *testing.T) { - stor := newTestStorage(t) - defer stor.Close() - - const nSeq = 99 - - o := &opt.Options{ - WriteBuffer: 112 * opt.KiB, - CompactionTableSize: 43 * opt.KiB, - CompactionExpandLimitFactor: 1, - CompactionGPOverlapsFactor: 1, - DisableBlockCache: true, - } - s, err := newSession(stor, o) - if err != nil { - t.Fatal(err) - } - if err := s.create(); err != nil { - t.Fatal(err) - } - defer s.close() - var ( - seq uint64 - targetSize = 5 * o.CompactionTableSize - value = bytes.Repeat([]byte{'0'}, 100) - ) - for i := 0; i < 2; i++ { - tw, err := s.tops.create() - if err != nil { - t.Fatal(err) - } - for k := 0; tw.tw.BytesLen() < targetSize; k++ { - key := []byte(fmt.Sprintf("%09d", k)) - seq += nSeq - 1 - for x := uint64(0); x < nSeq; x++ { - if err := tw.append(newIkey(key, seq-x, ktVal), value); err != nil { - t.Fatal(err) - } - } - } - tf, err := tw.finish() - if err != nil { - t.Fatal(err) - } - rec := &sessionRecord{} - rec.addTableFile(i, tf) - if err := s.commit(rec); err != nil { - t.Fatal(err) - } - } - - // Build grandparent. - v := s.version() - c := newCompaction(s, v, 1, append(tFiles{}, v.tables[1]...)) - rec := &sessionRecord{} - b := &tableCompactionBuilder{ - s: s, - c: c, - rec: rec, - stat1: new(cStatsStaging), - minSeq: 0, - strict: true, - tableSize: o.CompactionTableSize/3 + 961, - } - if err := b.run(new(compactionTransactCounter)); err != nil { - t.Fatal(err) - } - for _, t := range c.tables[0] { - rec.delTable(c.level, t.file.Num()) - } - if err := s.commit(rec); err != nil { - t.Fatal(err) - } - c.release() - - // Build level-1. - v = s.version() - c = newCompaction(s, v, 0, append(tFiles{}, v.tables[0]...)) - rec = &sessionRecord{} - b = &tableCompactionBuilder{ - s: s, - c: c, - rec: rec, - stat1: new(cStatsStaging), - minSeq: 0, - strict: true, - tableSize: o.CompactionTableSize, - } - if err := b.run(new(compactionTransactCounter)); err != nil { - t.Fatal(err) - } - for _, t := range c.tables[0] { - rec.delTable(c.level, t.file.Num()) - } - // Move grandparent to level-3 - for _, t := range v.tables[2] { - rec.delTable(2, t.file.Num()) - rec.addTableFile(3, t) - } - if err := s.commit(rec); err != nil { - t.Fatal(err) - } - c.release() - - v = s.version() - for level, want := range []bool{false, true, false, true, false} { - got := len(v.tables[level]) > 0 - if want != got { - t.Fatalf("invalid level-%d tables len: want %v, got %v", level, want, got) - } - } - for i, f := range v.tables[1][:len(v.tables[1])-1] { - nf := v.tables[1][i+1] - if bytes.Equal(f.imax.ukey(), nf.imin.ukey()) { - t.Fatalf("KEY %q hop across table %d .. %d", f.imax.ukey(), f.file.Num(), nf.file.Num()) - } - } - v.release() - - // Compaction with transient error. - v = s.version() - c = newCompaction(s, v, 1, append(tFiles{}, v.tables[1]...)) - rec = &sessionRecord{} - b = &tableCompactionBuilder{ - s: s, - c: c, - rec: rec, - stat1: new(cStatsStaging), - minSeq: 0, - strict: true, - tableSize: o.CompactionTableSize, - } - stor.SetEmuErrOnce(storage.TypeTable, tsOpSync) - stor.SetEmuRandErr(storage.TypeTable, tsOpRead, tsOpReadAt, tsOpWrite) - stor.SetEmuRandErrProb(0xf0) - for { - if err := b.run(new(compactionTransactCounter)); err != nil { - t.Logf("(expected) b.run: %v", err) - } else { - break - } - } - if err := s.commit(rec); err != nil { - t.Fatal(err) - } - c.release() - - stor.SetEmuErrOnce(0, tsOpSync) - stor.SetEmuRandErr(0, tsOpRead, tsOpReadAt, tsOpWrite) - - v = s.version() - if len(v.tables[1]) != len(v.tables[2]) { - t.Fatalf("invalid tables length, want %d, got %d", len(v.tables[1]), len(v.tables[2])) - } - for i, f0 := range v.tables[1] { - f1 := v.tables[2][i] - iter0 := s.tops.newIterator(f0, nil, nil) - iter1 := s.tops.newIterator(f1, nil, nil) - for j := 0; true; j++ { - next0 := iter0.Next() - next1 := iter1.Next() - if next0 != next1 { - t.Fatalf("#%d.%d invalid eoi: want %v, got %v", i, j, next0, next1) - } - key0 := iter0.Key() - key1 := iter1.Key() - if !bytes.Equal(key0, key1) { - t.Fatalf("#%d.%d invalid key: want %q, got %q", i, j, key0, key1) - } - if next0 == false { - break - } - } - iter0.Release() - iter1.Release() - } - v.release() -} - -func testDB_IterTriggeredCompaction(t *testing.T, limitDiv int) { - const ( - vSize = 200 * opt.KiB - tSize = 100 * opt.MiB - mIter = 100 - n = tSize / vSize - ) - - h := newDbHarnessWopt(t, &opt.Options{ - Compression: opt.NoCompression, - DisableBlockCache: true, - }) - defer h.close() - - key := func(x int) string { - return fmt.Sprintf("v%06d", x) - } - - // Fill. - value := strings.Repeat("x", vSize) - for i := 0; i < n; i++ { - h.put(key(i), value) - } - h.compactMem() - - // Delete all. - for i := 0; i < n; i++ { - h.delete(key(i)) - } - h.compactMem() - - var ( - limit = n / limitDiv - - startKey = key(0) - limitKey = key(limit) - maxKey = key(n) - slice = &util.Range{Limit: []byte(limitKey)} - - initialSize0 = h.sizeOf(startKey, limitKey) - initialSize1 = h.sizeOf(limitKey, maxKey) - ) - - t.Logf("inital size %s [rest %s]", shortenb(int(initialSize0)), shortenb(int(initialSize1))) - - for r := 0; true; r++ { - if r >= mIter { - t.Fatal("taking too long to compact") - } - - // Iterates. - iter := h.db.NewIterator(slice, h.ro) - for iter.Next() { - } - if err := iter.Error(); err != nil { - t.Fatalf("Iter err: %v", err) - } - iter.Release() - - // Wait compaction. - h.waitCompaction() - - // Check size. - size0 := h.sizeOf(startKey, limitKey) - size1 := h.sizeOf(limitKey, maxKey) - t.Logf("#%03d size %s [rest %s]", r, shortenb(int(size0)), shortenb(int(size1))) - if size0 < initialSize0/10 { - break - } - } - - if initialSize1 > 0 { - h.sizeAssert(limitKey, maxKey, initialSize1/4-opt.MiB, initialSize1+opt.MiB) - } -} - -func TestDB_IterTriggeredCompaction(t *testing.T) { - testDB_IterTriggeredCompaction(t, 1) -} - -func TestDB_IterTriggeredCompactionHalf(t *testing.T) { - testDB_IterTriggeredCompaction(t, 2) -} - -func TestDB_ReadOnly(t *testing.T) { - h := newDbHarness(t) - defer h.close() - - h.put("foo", "v1") - h.put("bar", "v2") - h.compactMem() - - h.put("xfoo", "v1") - h.put("xbar", "v2") - - t.Log("Trigger read-only") - if err := h.db.SetReadOnly(); err != nil { - h.close() - t.Fatalf("SetReadOnly error: %v", err) - } - - h.stor.SetEmuErr(storage.TypeAll, tsOpCreate, tsOpReplace, tsOpRemove, tsOpWrite, tsOpWrite, tsOpSync) - - ro := func(key, value, wantValue string) { - if err := h.db.Put([]byte(key), []byte(value), h.wo); err != ErrReadOnly { - t.Fatalf("unexpected error: %v", err) - } - h.getVal(key, wantValue) - } - - ro("foo", "vx", "v1") - - h.o.ReadOnly = true - h.reopenDB() - - ro("foo", "vx", "v1") - ro("bar", "vx", "v2") - h.assertNumKeys(4) -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_util.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_util.go deleted file mode 100644 index a8a2bdf72e..0000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_util.go +++ /dev/null @@ -1,100 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - "github.com/syndtr/goleveldb/leveldb/errors" - "github.com/syndtr/goleveldb/leveldb/iterator" - "github.com/syndtr/goleveldb/leveldb/opt" - "github.com/syndtr/goleveldb/leveldb/storage" - "github.com/syndtr/goleveldb/leveldb/util" -) - -// Reader is the interface that wraps basic Get and NewIterator methods. -// This interface implemented by both DB and Snapshot. -type Reader interface { - Get(key []byte, ro *opt.ReadOptions) (value []byte, err error) - NewIterator(slice *util.Range, ro *opt.ReadOptions) iterator.Iterator -} - -type Sizes []uint64 - -// Sum returns sum of the sizes. -func (p Sizes) Sum() (n uint64) { - for _, s := range p { - n += s - } - return n -} - -// Logging. -func (db *DB) log(v ...interface{}) { db.s.log(v...) } -func (db *DB) logf(format string, v ...interface{}) { db.s.logf(format, v...) } - -// Check and clean files. -func (db *DB) checkAndCleanFiles() error { - v := db.s.version() - defer v.release() - - tablesMap := make(map[uint64]bool) - for _, tables := range v.tables { - for _, t := range tables { - tablesMap[t.file.Num()] = false - } - } - - files, err := db.s.getFiles(storage.TypeAll) - if err != nil { - return err - } - - var nTables int - var rem []storage.File - for _, f := range files { - keep := true - switch f.Type() { - case storage.TypeManifest: - keep = f.Num() >= db.s.manifestFile.Num() - case storage.TypeJournal: - if db.frozenJournalFile != nil { - keep = f.Num() >= db.frozenJournalFile.Num() - } else { - keep = f.Num() >= db.journalFile.Num() - } - case storage.TypeTable: - _, keep = tablesMap[f.Num()] - if keep { - tablesMap[f.Num()] = true - nTables++ - } - } - - if !keep { - rem = append(rem, f) - } - } - - if nTables != len(tablesMap) { - var missing []*storage.FileInfo - for num, present := range tablesMap { - if !present { - missing = append(missing, &storage.FileInfo{Type: storage.TypeTable, Num: num}) - db.logf("db@janitor table missing @%d", num) - } - } - return errors.NewErrCorrupted(nil, &errors.ErrMissingFiles{Files: missing}) - } - - db.logf("db@janitor F·%d G·%d", len(files), len(rem)) - for _, f := range rem { - db.logf("db@janitor removing %s-%d", f.Type(), f.Num()) - if err := f.Remove(); err != nil { - return err - } - } - return nil -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_write.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_write.go deleted file mode 100644 index 176ee893f1..0000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_write.go +++ /dev/null @@ -1,338 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - "time" - - "github.com/syndtr/goleveldb/leveldb/memdb" - "github.com/syndtr/goleveldb/leveldb/opt" - "github.com/syndtr/goleveldb/leveldb/util" -) - -func (db *DB) writeJournal(b *Batch) error { - w, err := db.journal.Next() - if err != nil { - return err - } - if _, err := w.Write(b.encode()); err != nil { - return err - } - if err := db.journal.Flush(); err != nil { - return err - } - if b.sync { - return db.journalWriter.Sync() - } - return nil -} - -func (db *DB) jWriter() { - defer db.closeW.Done() - for { - select { - case b := <-db.journalC: - if b != nil { - db.journalAckC <- db.writeJournal(b) - } - case _, _ = <-db.closeC: - return - } - } -} - -func (db *DB) rotateMem(n int) (mem *memDB, err error) { - // Wait for pending memdb compaction. - err = db.compSendIdle(db.mcompCmdC) - if err != nil { - return - } - - // Create new memdb and journal. - mem, err = db.newMem(n) - if err != nil { - return - } - - // Schedule memdb compaction. - db.compSendTrigger(db.mcompCmdC) - return -} - -func (db *DB) flush(n int) (mdb *memDB, mdbFree int, err error) { - delayed := false - flush := func() (retry bool) { - v := db.s.version() - defer v.release() - mdb = db.getEffectiveMem() - defer func() { - if retry { - mdb.decref() - mdb = nil - } - }() - mdbFree = mdb.Free() - switch { - case v.tLen(0) >= db.s.o.GetWriteL0SlowdownTrigger() && !delayed: - delayed = true - time.Sleep(time.Millisecond) - case mdbFree >= n: - return false - case v.tLen(0) >= db.s.o.GetWriteL0PauseTrigger(): - delayed = true - err = db.compSendIdle(db.tcompCmdC) - if err != nil { - return false - } - default: - // Allow memdb to grow if it has no entry. - if mdb.Len() == 0 { - mdbFree = n - } else { - mdb.decref() - mdb, err = db.rotateMem(n) - if err == nil { - mdbFree = mdb.Free() - } else { - mdbFree = 0 - } - } - return false - } - return true - } - start := time.Now() - for flush() { - } - if delayed { - db.writeDelay += time.Since(start) - db.writeDelayN++ - } else if db.writeDelayN > 0 { - db.logf("db@write was delayed N·%d T·%v", db.writeDelayN, db.writeDelay) - db.writeDelay = 0 - db.writeDelayN = 0 - } - return -} - -// Write apply the given batch to the DB. The batch will be applied -// sequentially. -// -// It is safe to modify the contents of the arguments after Write returns. -func (db *DB) Write(b *Batch, wo *opt.WriteOptions) (err error) { - err = db.ok() - if err != nil || b == nil || b.Len() == 0 { - return - } - - b.init(wo.GetSync()) - - // The write happen synchronously. - select { - case db.writeC <- b: - if <-db.writeMergedC { - return <-db.writeAckC - } - case db.writeLockC <- struct{}{}: - case err = <-db.compPerErrC: - return - case _, _ = <-db.closeC: - return ErrClosed - } - - merged := 0 - danglingMerge := false - defer func() { - if danglingMerge { - db.writeMergedC <- false - } else { - <-db.writeLockC - } - for i := 0; i < merged; i++ { - db.writeAckC <- err - } - }() - - mdb, mdbFree, err := db.flush(b.size()) - if err != nil { - return - } - defer mdb.decref() - - // Calculate maximum size of the batch. - m := 1 << 20 - if x := b.size(); x <= 128<<10 { - m = x + (128 << 10) - } - m = minInt(m, mdbFree) - - // Merge with other batch. -drain: - for b.size() < m && !b.sync { - select { - case nb := <-db.writeC: - if b.size()+nb.size() <= m { - b.append(nb) - db.writeMergedC <- true - merged++ - } else { - danglingMerge = true - break drain - } - default: - break drain - } - } - - // Set batch first seq number relative from last seq. - b.seq = db.seq + 1 - - // Write journal concurrently if it is large enough. - if b.size() >= (128 << 10) { - // Push the write batch to the journal writer - select { - case db.journalC <- b: - // Write into memdb - if berr := b.memReplay(mdb.DB); berr != nil { - panic(berr) - } - case err = <-db.compPerErrC: - return - case _, _ = <-db.closeC: - err = ErrClosed - return - } - // Wait for journal writer - select { - case err = <-db.journalAckC: - if err != nil { - // Revert memdb if error detected - if berr := b.revertMemReplay(mdb.DB); berr != nil { - panic(berr) - } - return - } - case _, _ = <-db.closeC: - err = ErrClosed - return - } - } else { - err = db.writeJournal(b) - if err != nil { - return - } - if berr := b.memReplay(mdb.DB); berr != nil { - panic(berr) - } - } - - // Set last seq number. - db.addSeq(uint64(b.Len())) - - if b.size() >= mdbFree { - db.rotateMem(0) - } - return -} - -// Put sets the value for the given key. It overwrites any previous value -// for that key; a DB is not a multi-map. -// -// It is safe to modify the contents of the arguments after Put returns. -func (db *DB) Put(key, value []byte, wo *opt.WriteOptions) error { - b := new(Batch) - b.Put(key, value) - return db.Write(b, wo) -} - -// Delete deletes the value for the given key. -// -// It is safe to modify the contents of the arguments after Delete returns. -func (db *DB) Delete(key []byte, wo *opt.WriteOptions) error { - b := new(Batch) - b.Delete(key) - return db.Write(b, wo) -} - -func isMemOverlaps(icmp *iComparer, mem *memdb.DB, min, max []byte) bool { - iter := mem.NewIterator(nil) - defer iter.Release() - return (max == nil || (iter.First() && icmp.uCompare(max, iKey(iter.Key()).ukey()) >= 0)) && - (min == nil || (iter.Last() && icmp.uCompare(min, iKey(iter.Key()).ukey()) <= 0)) -} - -// CompactRange compacts the underlying DB for the given key range. -// In particular, deleted and overwritten versions are discarded, -// and the data is rearranged to reduce the cost of operations -// needed to access the data. This operation should typically only -// be invoked by users who understand the underlying implementation. -// -// A nil Range.Start is treated as a key before all keys in the DB. -// And a nil Range.Limit is treated as a key after all keys in the DB. -// Therefore if both is nil then it will compact entire DB. -func (db *DB) CompactRange(r util.Range) error { - if err := db.ok(); err != nil { - return err - } - - // Lock writer. - select { - case db.writeLockC <- struct{}{}: - case err := <-db.compPerErrC: - return err - case _, _ = <-db.closeC: - return ErrClosed - } - - // Check for overlaps in memdb. - mdb := db.getEffectiveMem() - defer mdb.decref() - if isMemOverlaps(db.s.icmp, mdb.DB, r.Start, r.Limit) { - // Memdb compaction. - if _, err := db.rotateMem(0); err != nil { - <-db.writeLockC - return err - } - <-db.writeLockC - if err := db.compSendIdle(db.mcompCmdC); err != nil { - return err - } - } else { - <-db.writeLockC - } - - // Table compaction. - return db.compSendRange(db.tcompCmdC, -1, r.Start, r.Limit) -} - -// SetReadOnly makes DB read-only. It will stay read-only until reopened. -func (db *DB) SetReadOnly() error { - if err := db.ok(); err != nil { - return err - } - - // Lock writer. - select { - case db.writeLockC <- struct{}{}: - db.compWriteLocking = true - case err := <-db.compPerErrC: - return err - case _, _ = <-db.closeC: - return ErrClosed - } - - // Set compaction read-only. - select { - case db.compErrSetC <- ErrReadOnly: - case perr := <-db.compPerErrC: - return perr - case _, _ = <-db.closeC: - return ErrClosed - } - - return nil -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/doc.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/doc.go deleted file mode 100644 index 53f13bb24c..0000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/doc.go +++ /dev/null @@ -1,90 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -// Package leveldb provides implementation of LevelDB key/value database. -// -// Create or open a database: -// -// db, err := leveldb.OpenFile("path/to/db", nil) -// ... -// defer db.Close() -// ... -// -// Read or modify the database content: -// -// // Remember that the contents of the returned slice should not be modified. -// data, err := db.Get([]byte("key"), nil) -// ... -// err = db.Put([]byte("key"), []byte("value"), nil) -// ... -// err = db.Delete([]byte("key"), nil) -// ... -// -// Iterate over database content: -// -// iter := db.NewIterator(nil, nil) -// for iter.Next() { -// // Remember that the contents of the returned slice should not be modified, and -// // only valid until the next call to Next. -// key := iter.Key() -// value := iter.Value() -// ... -// } -// iter.Release() -// err = iter.Error() -// ... -// -// Iterate over subset of database content with a particular prefix: -// iter := db.NewIterator(util.BytesPrefix([]byte("foo-")), nil) -// for iter.Next() { -// // Use key/value. -// ... -// } -// iter.Release() -// err = iter.Error() -// ... -// -// Seek-then-Iterate: -// -// iter := db.NewIterator(nil, nil) -// for ok := iter.Seek(key); ok; ok = iter.Next() { -// // Use key/value. -// ... -// } -// iter.Release() -// err = iter.Error() -// ... -// -// Iterate over subset of database content: -// -// iter := db.NewIterator(&util.Range{Start: []byte("foo"), Limit: []byte("xoo")}, nil) -// for iter.Next() { -// // Use key/value. -// ... -// } -// iter.Release() -// err = iter.Error() -// ... -// -// Batch writes: -// -// batch := new(leveldb.Batch) -// batch.Put([]byte("foo"), []byte("value")) -// batch.Put([]byte("bar"), []byte("another value")) -// batch.Delete([]byte("baz")) -// err = db.Write(batch, nil) -// ... -// -// Use bloom filter: -// -// o := &opt.Options{ -// Filter: filter.NewBloomFilter(10), -// } -// db, err := leveldb.OpenFile("path/to/db", o) -// ... -// defer db.Close() -// ... -package leveldb diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/errors.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/errors.go deleted file mode 100644 index c8bd66a5aa..0000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/errors.go +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright (c) 2014, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - "github.com/syndtr/goleveldb/leveldb/errors" -) - -var ( - ErrNotFound = errors.ErrNotFound - ErrReadOnly = errors.New("leveldb: read-only mode") - ErrSnapshotReleased = errors.New("leveldb: snapshot released") - ErrIterReleased = errors.New("leveldb: iterator released") - ErrClosed = errors.New("leveldb: closed") -) diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/errors/errors.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/errors/errors.go deleted file mode 100644 index 84b5d6b7b2..0000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/errors/errors.go +++ /dev/null @@ -1,76 +0,0 @@ -// Copyright (c) 2014, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -// Package errors provides common error types used throughout leveldb. -package errors - -import ( - "errors" - "fmt" - - "github.com/syndtr/goleveldb/leveldb/storage" - "github.com/syndtr/goleveldb/leveldb/util" -) - -var ( - ErrNotFound = New("leveldb: not found") - ErrReleased = util.ErrReleased - ErrHasReleaser = util.ErrHasReleaser -) - -// New returns an error that formats as the given text. -func New(text string) error { - return errors.New(text) -} - -// ErrCorrupted is the type that wraps errors that indicate corruption in -// the database. -type ErrCorrupted struct { - File *storage.FileInfo - Err error -} - -func (e *ErrCorrupted) Error() string { - if e.File != nil { - return fmt.Sprintf("%v [file=%v]", e.Err, e.File) - } else { - return e.Err.Error() - } -} - -// NewErrCorrupted creates new ErrCorrupted error. -func NewErrCorrupted(f storage.File, err error) error { - return &ErrCorrupted{storage.NewFileInfo(f), err} -} - -// IsCorrupted returns a boolean indicating whether the error is indicating -// a corruption. -func IsCorrupted(err error) bool { - switch err.(type) { - case *ErrCorrupted: - return true - } - return false -} - -// ErrMissingFiles is the type that indicating a corruption due to missing -// files. -type ErrMissingFiles struct { - Files []*storage.FileInfo -} - -func (e *ErrMissingFiles) Error() string { return "file missing" } - -// SetFile sets 'file info' of the given error with the given file. -// Currently only ErrCorrupted is supported, otherwise will do nothing. -func SetFile(err error, f storage.File) error { - switch x := err.(type) { - case *ErrCorrupted: - x.File = storage.NewFileInfo(f) - return x - } - return err -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/external_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/external_test.go deleted file mode 100644 index b328ece4e2..0000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/external_test.go +++ /dev/null @@ -1,58 +0,0 @@ -// Copyright (c) 2014, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" - - "github.com/syndtr/goleveldb/leveldb/opt" - "github.com/syndtr/goleveldb/leveldb/testutil" -) - -var _ = testutil.Defer(func() { - Describe("Leveldb external", func() { - o := &opt.Options{ - DisableBlockCache: true, - BlockRestartInterval: 5, - BlockSize: 80, - Compression: opt.NoCompression, - OpenFilesCacheCapacity: -1, - Strict: opt.StrictAll, - WriteBuffer: 1000, - CompactionTableSize: 2000, - } - - Describe("write test", func() { - It("should do write correctly", func(done Done) { - db := newTestingDB(o, nil, nil) - t := testutil.DBTesting{ - DB: db, - Deleted: testutil.KeyValue_Generate(nil, 500, 1, 50, 5, 5).Clone(), - } - testutil.DoDBTesting(&t) - db.TestClose() - done <- true - }, 20.0) - }) - - Describe("read test", func() { - testutil.AllKeyValueTesting(nil, nil, func(kv testutil.KeyValue) testutil.DB { - // Building the DB. - db := newTestingDB(o, nil, nil) - kv.IterateShuffled(nil, func(i int, key, value []byte) { - err := db.TestPut(key, value) - Expect(err).NotTo(HaveOccurred()) - }) - - return db - }, func(db testutil.DB) { - db.(*testingDB).TestClose() - }) - }) - }) -}) diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/filter.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/filter.go deleted file mode 100644 index 37c1e146bc..0000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/filter.go +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - "github.com/syndtr/goleveldb/leveldb/filter" -) - -type iFilter struct { - filter.Filter -} - -func (f iFilter) Contains(filter, key []byte) bool { - return f.Filter.Contains(filter, iKey(key).ukey()) -} - -func (f iFilter) NewGenerator() filter.FilterGenerator { - return iFilterGenerator{f.Filter.NewGenerator()} -} - -type iFilterGenerator struct { - filter.FilterGenerator -} - -func (g iFilterGenerator) Add(key []byte) { - g.FilterGenerator.Add(iKey(key).ukey()) -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/filter/bloom.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/filter/bloom.go deleted file mode 100644 index bab0e99705..0000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/filter/bloom.go +++ /dev/null @@ -1,116 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package filter - -import ( - "github.com/syndtr/goleveldb/leveldb/util" -) - -func bloomHash(key []byte) uint32 { - return util.Hash(key, 0xbc9f1d34) -} - -type bloomFilter int - -// The bloom filter serializes its parameters and is backward compatible -// with respect to them. Therefor, its parameters are not added to its -// name. -func (bloomFilter) Name() string { - return "leveldb.BuiltinBloomFilter" -} - -func (f bloomFilter) Contains(filter, key []byte) bool { - nBytes := len(filter) - 1 - if nBytes < 1 { - return false - } - nBits := uint32(nBytes * 8) - - // Use the encoded k so that we can read filters generated by - // bloom filters created using different parameters. - k := filter[nBytes] - if k > 30 { - // Reserved for potentially new encodings for short bloom filters. - // Consider it a match. - return true - } - - kh := bloomHash(key) - delta := (kh >> 17) | (kh << 15) // Rotate right 17 bits - for j := uint8(0); j < k; j++ { - bitpos := kh % nBits - if (uint32(filter[bitpos/8]) & (1 << (bitpos % 8))) == 0 { - return false - } - kh += delta - } - return true -} - -func (f bloomFilter) NewGenerator() FilterGenerator { - // Round down to reduce probing cost a little bit. - k := uint8(f * 69 / 100) // 0.69 =~ ln(2) - if k < 1 { - k = 1 - } else if k > 30 { - k = 30 - } - return &bloomFilterGenerator{ - n: int(f), - k: k, - } -} - -type bloomFilterGenerator struct { - n int - k uint8 - - keyHashes []uint32 -} - -func (g *bloomFilterGenerator) Add(key []byte) { - // Use double-hashing to generate a sequence of hash values. - // See analysis in [Kirsch,Mitzenmacher 2006]. - g.keyHashes = append(g.keyHashes, bloomHash(key)) -} - -func (g *bloomFilterGenerator) Generate(b Buffer) { - // Compute bloom filter size (in both bits and bytes) - nBits := uint32(len(g.keyHashes) * g.n) - // For small n, we can see a very high false positive rate. Fix it - // by enforcing a minimum bloom filter length. - if nBits < 64 { - nBits = 64 - } - nBytes := (nBits + 7) / 8 - nBits = nBytes * 8 - - dest := b.Alloc(int(nBytes) + 1) - dest[nBytes] = g.k - for _, kh := range g.keyHashes { - delta := (kh >> 17) | (kh << 15) // Rotate right 17 bits - for j := uint8(0); j < g.k; j++ { - bitpos := kh % nBits - dest[bitpos/8] |= (1 << (bitpos % 8)) - kh += delta - } - } - - g.keyHashes = g.keyHashes[:0] -} - -// NewBloomFilter creates a new initialized bloom filter for given -// bitsPerKey. -// -// Since bitsPerKey is persisted individually for each bloom filter -// serialization, bloom filters are backwards compatible with respect to -// changing bitsPerKey. This means that no big performance penalty will -// be experienced when changing the parameter. See documentation for -// opt.Options.Filter for more information. -func NewBloomFilter(bitsPerKey int) Filter { - return bloomFilter(bitsPerKey) -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/filter/bloom_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/filter/bloom_test.go deleted file mode 100644 index 1fb56f0713..0000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/filter/bloom_test.go +++ /dev/null @@ -1,142 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package filter - -import ( - "encoding/binary" - "github.com/syndtr/goleveldb/leveldb/util" - "testing" -) - -type harness struct { - t *testing.T - - bloom Filter - generator FilterGenerator - filter []byte -} - -func newHarness(t *testing.T) *harness { - bloom := NewBloomFilter(10) - return &harness{ - t: t, - bloom: bloom, - generator: bloom.NewGenerator(), - } -} - -func (h *harness) add(key []byte) { - h.generator.Add(key) -} - -func (h *harness) addNum(key uint32) { - var b [4]byte - binary.LittleEndian.PutUint32(b[:], key) - h.add(b[:]) -} - -func (h *harness) build() { - b := &util.Buffer{} - h.generator.Generate(b) - h.filter = b.Bytes() -} - -func (h *harness) reset() { - h.filter = nil -} - -func (h *harness) filterLen() int { - return len(h.filter) -} - -func (h *harness) assert(key []byte, want, silent bool) bool { - got := h.bloom.Contains(h.filter, key) - if !silent && got != want { - h.t.Errorf("assert on '%v' failed got '%v', want '%v'", key, got, want) - } - return got -} - -func (h *harness) assertNum(key uint32, want, silent bool) bool { - var b [4]byte - binary.LittleEndian.PutUint32(b[:], key) - return h.assert(b[:], want, silent) -} - -func TestBloomFilter_Empty(t *testing.T) { - h := newHarness(t) - h.build() - h.assert([]byte("hello"), false, false) - h.assert([]byte("world"), false, false) -} - -func TestBloomFilter_Small(t *testing.T) { - h := newHarness(t) - h.add([]byte("hello")) - h.add([]byte("world")) - h.build() - h.assert([]byte("hello"), true, false) - h.assert([]byte("world"), true, false) - h.assert([]byte("x"), false, false) - h.assert([]byte("foo"), false, false) -} - -func nextN(n int) int { - switch { - case n < 10: - n += 1 - case n < 100: - n += 10 - case n < 1000: - n += 100 - default: - n += 1000 - } - return n -} - -func TestBloomFilter_VaryingLengths(t *testing.T) { - h := newHarness(t) - var mediocre, good int - for n := 1; n < 10000; n = nextN(n) { - h.reset() - for i := 0; i < n; i++ { - h.addNum(uint32(i)) - } - h.build() - - got := h.filterLen() - want := (n * 10 / 8) + 40 - if got > want { - t.Errorf("filter len test failed, '%d' > '%d'", got, want) - } - - for i := 0; i < n; i++ { - h.assertNum(uint32(i), true, false) - } - - var rate float32 - for i := 0; i < 10000; i++ { - if h.assertNum(uint32(i+1000000000), true, true) { - rate++ - } - } - rate /= 10000 - if rate > 0.02 { - t.Errorf("false positive rate is more than 2%%, got %v, at len %d", rate, n) - } - if rate > 0.0125 { - mediocre++ - } else { - good++ - } - } - t.Logf("false positive rate: %d good, %d mediocre", good, mediocre) - if mediocre > good/5 { - t.Error("mediocre false positive rate is more than expected") - } -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/filter/filter.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/filter/filter.go deleted file mode 100644 index 7a925c5a86..0000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/filter/filter.go +++ /dev/null @@ -1,60 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -// Package filter provides interface and implementation of probabilistic -// data structure. -// -// The filter is resposible for creating small filter from a set of keys. -// These filter will then used to test whether a key is a member of the set. -// In many cases, a filter can cut down the number of disk seeks from a -// handful to a single disk seek per DB.Get call. -package filter - -// Buffer is the interface that wraps basic Alloc, Write and WriteByte methods. -type Buffer interface { - // Alloc allocs n bytes of slice from the buffer. This also advancing - // write offset. - Alloc(n int) []byte - - // Write appends the contents of p to the buffer. - Write(p []byte) (n int, err error) - - // WriteByte appends the byte c to the buffer. - WriteByte(c byte) error -} - -// Filter is the filter. -type Filter interface { - // Name returns the name of this policy. - // - // Note that if the filter encoding changes in an incompatible way, - // the name returned by this method must be changed. Otherwise, old - // incompatible filters may be passed to methods of this type. - Name() string - - // NewGenerator creates a new filter generator. - NewGenerator() FilterGenerator - - // Contains returns true if the filter contains the given key. - // - // The filter are filters generated by the filter generator. - Contains(filter, key []byte) bool -} - -// FilterGenerator is the filter generator. -type FilterGenerator interface { - // Add adds a key to the filter generator. - // - // The key may become invalid after call to this method end, therefor - // key must be copied if implementation require keeping key for later - // use. The key should not modified directly, doing so may cause - // undefined results. - Add(key []byte) - - // Generate generates filters based on keys passed so far. After call - // to Generate the filter generator maybe resetted, depends on implementation. - Generate(b Buffer) -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/array_iter.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/array_iter.go deleted file mode 100644 index a23ab05f70..0000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/array_iter.go +++ /dev/null @@ -1,184 +0,0 @@ -// Copyright (c) 2014, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package iterator - -import ( - "github.com/syndtr/goleveldb/leveldb/util" -) - -// BasicArray is the interface that wraps basic Len and Search method. -type BasicArray interface { - // Len returns length of the array. - Len() int - - // Search finds smallest index that point to a key that is greater - // than or equal to the given key. - Search(key []byte) int -} - -// Array is the interface that wraps BasicArray and basic Index method. -type Array interface { - BasicArray - - // Index returns key/value pair with index of i. - Index(i int) (key, value []byte) -} - -// Array is the interface that wraps BasicArray and basic Get method. -type ArrayIndexer interface { - BasicArray - - // Get returns a new data iterator with index of i. - Get(i int) Iterator -} - -type basicArrayIterator struct { - util.BasicReleaser - array BasicArray - pos int - err error -} - -func (i *basicArrayIterator) Valid() bool { - return i.pos >= 0 && i.pos < i.array.Len() && !i.Released() -} - -func (i *basicArrayIterator) First() bool { - if i.Released() { - i.err = ErrIterReleased - return false - } - - if i.array.Len() == 0 { - i.pos = -1 - return false - } - i.pos = 0 - return true -} - -func (i *basicArrayIterator) Last() bool { - if i.Released() { - i.err = ErrIterReleased - return false - } - - n := i.array.Len() - if n == 0 { - i.pos = 0 - return false - } - i.pos = n - 1 - return true -} - -func (i *basicArrayIterator) Seek(key []byte) bool { - if i.Released() { - i.err = ErrIterReleased - return false - } - - n := i.array.Len() - if n == 0 { - i.pos = 0 - return false - } - i.pos = i.array.Search(key) - if i.pos >= n { - return false - } - return true -} - -func (i *basicArrayIterator) Next() bool { - if i.Released() { - i.err = ErrIterReleased - return false - } - - i.pos++ - if n := i.array.Len(); i.pos >= n { - i.pos = n - return false - } - return true -} - -func (i *basicArrayIterator) Prev() bool { - if i.Released() { - i.err = ErrIterReleased - return false - } - - i.pos-- - if i.pos < 0 { - i.pos = -1 - return false - } - return true -} - -func (i *basicArrayIterator) Error() error { return i.err } - -type arrayIterator struct { - basicArrayIterator - array Array - pos int - key, value []byte -} - -func (i *arrayIterator) updateKV() { - if i.pos == i.basicArrayIterator.pos { - return - } - i.pos = i.basicArrayIterator.pos - if i.Valid() { - i.key, i.value = i.array.Index(i.pos) - } else { - i.key = nil - i.value = nil - } -} - -func (i *arrayIterator) Key() []byte { - i.updateKV() - return i.key -} - -func (i *arrayIterator) Value() []byte { - i.updateKV() - return i.value -} - -type arrayIteratorIndexer struct { - basicArrayIterator - array ArrayIndexer -} - -func (i *arrayIteratorIndexer) Get() Iterator { - if i.Valid() { - return i.array.Get(i.basicArrayIterator.pos) - } - return nil -} - -// NewArrayIterator returns an iterator from the given array. -func NewArrayIterator(array Array) Iterator { - return &arrayIterator{ - basicArrayIterator: basicArrayIterator{array: array, pos: -1}, - array: array, - pos: -1, - } -} - -// NewArrayIndexer returns an index iterator from the given array. -func NewArrayIndexer(array ArrayIndexer) IteratorIndexer { - return &arrayIteratorIndexer{ - basicArrayIterator: basicArrayIterator{array: array, pos: -1}, - array: array, - } -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/array_iter_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/array_iter_test.go deleted file mode 100644 index 1ed6d07cbb..0000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/array_iter_test.go +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright (c) 2014, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package iterator_test - -import ( - . "github.com/onsi/ginkgo" - - . "github.com/syndtr/goleveldb/leveldb/iterator" - "github.com/syndtr/goleveldb/leveldb/testutil" -) - -var _ = testutil.Defer(func() { - Describe("Array iterator", func() { - It("Should iterates and seeks correctly", func() { - // Build key/value. - kv := testutil.KeyValue_Generate(nil, 70, 1, 5, 3, 3) - - // Test the iterator. - t := testutil.IteratorTesting{ - KeyValue: kv.Clone(), - Iter: NewArrayIterator(kv), - } - testutil.DoIteratorTesting(&t) - }) - }) -}) diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/indexed_iter.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/indexed_iter.go deleted file mode 100644 index 939adbb933..0000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/indexed_iter.go +++ /dev/null @@ -1,242 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package iterator - -import ( - "github.com/syndtr/goleveldb/leveldb/errors" - "github.com/syndtr/goleveldb/leveldb/util" -) - -// IteratorIndexer is the interface that wraps CommonIterator and basic Get -// method. IteratorIndexer provides index for indexed iterator. -type IteratorIndexer interface { - CommonIterator - - // Get returns a new data iterator for the current position, or nil if - // done. - Get() Iterator -} - -type indexedIterator struct { - util.BasicReleaser - index IteratorIndexer - strict bool - - data Iterator - err error - errf func(err error) - closed bool -} - -func (i *indexedIterator) setData() { - if i.data != nil { - i.data.Release() - } - i.data = i.index.Get() -} - -func (i *indexedIterator) clearData() { - if i.data != nil { - i.data.Release() - } - i.data = nil -} - -func (i *indexedIterator) indexErr() { - if err := i.index.Error(); err != nil { - if i.errf != nil { - i.errf(err) - } - i.err = err - } -} - -func (i *indexedIterator) dataErr() bool { - if err := i.data.Error(); err != nil { - if i.errf != nil { - i.errf(err) - } - if i.strict || !errors.IsCorrupted(err) { - i.err = err - return true - } - } - return false -} - -func (i *indexedIterator) Valid() bool { - return i.data != nil && i.data.Valid() -} - -func (i *indexedIterator) First() bool { - if i.err != nil { - return false - } else if i.Released() { - i.err = ErrIterReleased - return false - } - - if !i.index.First() { - i.indexErr() - i.clearData() - return false - } - i.setData() - return i.Next() -} - -func (i *indexedIterator) Last() bool { - if i.err != nil { - return false - } else if i.Released() { - i.err = ErrIterReleased - return false - } - - if !i.index.Last() { - i.indexErr() - i.clearData() - return false - } - i.setData() - if !i.data.Last() { - if i.dataErr() { - return false - } - i.clearData() - return i.Prev() - } - return true -} - -func (i *indexedIterator) Seek(key []byte) bool { - if i.err != nil { - return false - } else if i.Released() { - i.err = ErrIterReleased - return false - } - - if !i.index.Seek(key) { - i.indexErr() - i.clearData() - return false - } - i.setData() - if !i.data.Seek(key) { - if i.dataErr() { - return false - } - i.clearData() - return i.Next() - } - return true -} - -func (i *indexedIterator) Next() bool { - if i.err != nil { - return false - } else if i.Released() { - i.err = ErrIterReleased - return false - } - - switch { - case i.data != nil && !i.data.Next(): - if i.dataErr() { - return false - } - i.clearData() - fallthrough - case i.data == nil: - if !i.index.Next() { - i.indexErr() - return false - } - i.setData() - return i.Next() - } - return true -} - -func (i *indexedIterator) Prev() bool { - if i.err != nil { - return false - } else if i.Released() { - i.err = ErrIterReleased - return false - } - - switch { - case i.data != nil && !i.data.Prev(): - if i.dataErr() { - return false - } - i.clearData() - fallthrough - case i.data == nil: - if !i.index.Prev() { - i.indexErr() - return false - } - i.setData() - if !i.data.Last() { - if i.dataErr() { - return false - } - i.clearData() - return i.Prev() - } - } - return true -} - -func (i *indexedIterator) Key() []byte { - if i.data == nil { - return nil - } - return i.data.Key() -} - -func (i *indexedIterator) Value() []byte { - if i.data == nil { - return nil - } - return i.data.Value() -} - -func (i *indexedIterator) Release() { - i.clearData() - i.index.Release() - i.BasicReleaser.Release() -} - -func (i *indexedIterator) Error() error { - if i.err != nil { - return i.err - } - if err := i.index.Error(); err != nil { - return err - } - return nil -} - -func (i *indexedIterator) SetErrorCallback(f func(err error)) { - i.errf = f -} - -// NewIndexedIterator returns an 'indexed iterator'. An index is iterator -// that returns another iterator, a 'data iterator'. A 'data iterator' is the -// iterator that contains actual key/value pairs. -// -// If strict is true the any 'corruption errors' (i.e errors.IsCorrupted(err) == true) -// won't be ignored and will halt 'indexed iterator', otherwise the iterator will -// continue to the next 'data iterator'. Corruption on 'index iterator' will not be -// ignored and will halt the iterator. -func NewIndexedIterator(index IteratorIndexer, strict bool) Iterator { - return &indexedIterator{index: index, strict: strict} -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/indexed_iter_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/indexed_iter_test.go deleted file mode 100644 index 72a7978924..0000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/indexed_iter_test.go +++ /dev/null @@ -1,83 +0,0 @@ -// Copyright (c) 2014, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package iterator_test - -import ( - "sort" - - . "github.com/onsi/ginkgo" - - "github.com/syndtr/goleveldb/leveldb/comparer" - . "github.com/syndtr/goleveldb/leveldb/iterator" - "github.com/syndtr/goleveldb/leveldb/testutil" -) - -type keyValue struct { - key []byte - testutil.KeyValue -} - -type keyValueIndex []keyValue - -func (x keyValueIndex) Search(key []byte) int { - return sort.Search(x.Len(), func(i int) bool { - return comparer.DefaultComparer.Compare(x[i].key, key) >= 0 - }) -} - -func (x keyValueIndex) Len() int { return len(x) } -func (x keyValueIndex) Index(i int) (key, value []byte) { return x[i].key, nil } -func (x keyValueIndex) Get(i int) Iterator { return NewArrayIterator(x[i]) } - -var _ = testutil.Defer(func() { - Describe("Indexed iterator", func() { - Test := func(n ...int) func() { - if len(n) == 0 { - rnd := testutil.NewRand() - n = make([]int, rnd.Intn(17)+3) - for i := range n { - n[i] = rnd.Intn(19) + 1 - } - } - - return func() { - It("Should iterates and seeks correctly", func(done Done) { - // Build key/value. - index := make(keyValueIndex, len(n)) - sum := 0 - for _, x := range n { - sum += x - } - kv := testutil.KeyValue_Generate(nil, sum, 1, 10, 4, 4) - for i, j := 0, 0; i < len(n); i++ { - for x := n[i]; x > 0; x-- { - key, value := kv.Index(j) - index[i].key = key - index[i].Put(key, value) - j++ - } - } - - // Test the iterator. - t := testutil.IteratorTesting{ - KeyValue: kv.Clone(), - Iter: NewIndexedIterator(NewArrayIndexer(index), true), - } - testutil.DoIteratorTesting(&t) - done <- true - }, 1.5) - } - } - - Describe("with 100 keys", Test(100)) - Describe("with 50-50 keys", Test(50, 50)) - Describe("with 50-1 keys", Test(50, 1)) - Describe("with 50-1-50 keys", Test(50, 1, 50)) - Describe("with 1-50 keys", Test(1, 50)) - Describe("with random N-keys", Test()) - }) -}) diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/iter.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/iter.go deleted file mode 100644 index c2522860b0..0000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/iter.go +++ /dev/null @@ -1,131 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -// Package iterator provides interface and implementation to traverse over -// contents of a database. -package iterator - -import ( - "errors" - - "github.com/syndtr/goleveldb/leveldb/util" -) - -var ( - ErrIterReleased = errors.New("leveldb/iterator: iterator released") -) - -// IteratorSeeker is the interface that wraps the 'seeks method'. -type IteratorSeeker interface { - // First moves the iterator to the first key/value pair. If the iterator - // only contains one key/value pair then First and Last whould moves - // to the same key/value pair. - // It returns whether such pair exist. - First() bool - - // Last moves the iterator to the last key/value pair. If the iterator - // only contains one key/value pair then First and Last whould moves - // to the same key/value pair. - // It returns whether such pair exist. - Last() bool - - // Seek moves the iterator to the first key/value pair whose key is greater - // than or equal to the given key. - // It returns whether such pair exist. - // - // It is safe to modify the contents of the argument after Seek returns. - Seek(key []byte) bool - - // Next moves the iterator to the next key/value pair. - // It returns whether the iterator is exhausted. - Next() bool - - // Prev moves the iterator to the previous key/value pair. - // It returns whether the iterator is exhausted. - Prev() bool -} - -// CommonIterator is the interface that wraps common interator methods. -type CommonIterator interface { - IteratorSeeker - - // util.Releaser is the interface that wraps basic Release method. - // When called Release will releases any resources associated with the - // iterator. - util.Releaser - - // util.ReleaseSetter is the interface that wraps the basic SetReleaser - // method. - util.ReleaseSetter - - // TODO: Remove this when ready. - Valid() bool - - // Error returns any accumulated error. Exhausting all the key/value pairs - // is not considered to be an error. - Error() error -} - -// Iterator iterates over a DB's key/value pairs in key order. -// -// When encouter an error any 'seeks method' will return false and will -// yield no key/value pairs. The error can be queried by calling the Error -// method. Calling Release is still necessary. -// -// An iterator must be released after use, but it is not necessary to read -// an iterator until exhaustion. -// Also, an iterator is not necessarily goroutine-safe, but it is safe to use -// multiple iterators concurrently, with each in a dedicated goroutine. -type Iterator interface { - CommonIterator - - // Key returns the key of the current key/value pair, or nil if done. - // The caller should not modify the contents of the returned slice, and - // its contents may change on the next call to any 'seeks method'. - Key() []byte - - // Value returns the key of the current key/value pair, or nil if done. - // The caller should not modify the contents of the returned slice, and - // its contents may change on the next call to any 'seeks method'. - Value() []byte -} - -// ErrorCallbackSetter is the interface that wraps basic SetErrorCallback -// method. -// -// ErrorCallbackSetter implemented by indexed and merged iterator. -type ErrorCallbackSetter interface { - // SetErrorCallback allows set an error callback of the coresponding - // iterator. Use nil to clear the callback. - SetErrorCallback(f func(err error)) -} - -type emptyIterator struct { - util.BasicReleaser - err error -} - -func (i *emptyIterator) rErr() { - if i.err == nil && i.Released() { - i.err = ErrIterReleased - } -} - -func (*emptyIterator) Valid() bool { return false } -func (i *emptyIterator) First() bool { i.rErr(); return false } -func (i *emptyIterator) Last() bool { i.rErr(); return false } -func (i *emptyIterator) Seek(key []byte) bool { i.rErr(); return false } -func (i *emptyIterator) Next() bool { i.rErr(); return false } -func (i *emptyIterator) Prev() bool { i.rErr(); return false } -func (*emptyIterator) Key() []byte { return nil } -func (*emptyIterator) Value() []byte { return nil } -func (i *emptyIterator) Error() error { return i.err } - -// NewEmptyIterator creates an empty iterator. The err parameter can be -// nil, but if not nil the given err will be returned by Error method. -func NewEmptyIterator(err error) Iterator { - return &emptyIterator{err: err} -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/iter_suite_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/iter_suite_test.go deleted file mode 100644 index 5ef8d5bafb..0000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/iter_suite_test.go +++ /dev/null @@ -1,11 +0,0 @@ -package iterator_test - -import ( - "testing" - - "github.com/syndtr/goleveldb/leveldb/testutil" -) - -func TestIterator(t *testing.T) { - testutil.RunSuite(t, "Iterator Suite") -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/merged_iter.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/merged_iter.go deleted file mode 100644 index 1a7e29df8f..0000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/merged_iter.go +++ /dev/null @@ -1,304 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package iterator - -import ( - "github.com/syndtr/goleveldb/leveldb/comparer" - "github.com/syndtr/goleveldb/leveldb/errors" - "github.com/syndtr/goleveldb/leveldb/util" -) - -type dir int - -const ( - dirReleased dir = iota - 1 - dirSOI - dirEOI - dirBackward - dirForward -) - -type mergedIterator struct { - cmp comparer.Comparer - iters []Iterator - strict bool - - keys [][]byte - index int - dir dir - err error - errf func(err error) - releaser util.Releaser -} - -func assertKey(key []byte) []byte { - if key == nil { - panic("leveldb/iterator: nil key") - } - return key -} - -func (i *mergedIterator) iterErr(iter Iterator) bool { - if err := iter.Error(); err != nil { - if i.errf != nil { - i.errf(err) - } - if i.strict || !errors.IsCorrupted(err) { - i.err = err - return true - } - } - return false -} - -func (i *mergedIterator) Valid() bool { - return i.err == nil && i.dir > dirEOI -} - -func (i *mergedIterator) First() bool { - if i.err != nil { - return false - } else if i.dir == dirReleased { - i.err = ErrIterReleased - return false - } - - for x, iter := range i.iters { - switch { - case iter.First(): - i.keys[x] = assertKey(iter.Key()) - case i.iterErr(iter): - return false - default: - i.keys[x] = nil - } - } - i.dir = dirSOI - return i.next() -} - -func (i *mergedIterator) Last() bool { - if i.err != nil { - return false - } else if i.dir == dirReleased { - i.err = ErrIterReleased - return false - } - - for x, iter := range i.iters { - switch { - case iter.Last(): - i.keys[x] = assertKey(iter.Key()) - case i.iterErr(iter): - return false - default: - i.keys[x] = nil - } - } - i.dir = dirEOI - return i.prev() -} - -func (i *mergedIterator) Seek(key []byte) bool { - if i.err != nil { - return false - } else if i.dir == dirReleased { - i.err = ErrIterReleased - return false - } - - for x, iter := range i.iters { - switch { - case iter.Seek(key): - i.keys[x] = assertKey(iter.Key()) - case i.iterErr(iter): - return false - default: - i.keys[x] = nil - } - } - i.dir = dirSOI - return i.next() -} - -func (i *mergedIterator) next() bool { - var key []byte - if i.dir == dirForward { - key = i.keys[i.index] - } - for x, tkey := range i.keys { - if tkey != nil && (key == nil || i.cmp.Compare(tkey, key) < 0) { - key = tkey - i.index = x - } - } - if key == nil { - i.dir = dirEOI - return false - } - i.dir = dirForward - return true -} - -func (i *mergedIterator) Next() bool { - if i.dir == dirEOI || i.err != nil { - return false - } else if i.dir == dirReleased { - i.err = ErrIterReleased - return false - } - - switch i.dir { - case dirSOI: - return i.First() - case dirBackward: - key := append([]byte{}, i.keys[i.index]...) - if !i.Seek(key) { - return false - } - return i.Next() - } - - x := i.index - iter := i.iters[x] - switch { - case iter.Next(): - i.keys[x] = assertKey(iter.Key()) - case i.iterErr(iter): - return false - default: - i.keys[x] = nil - } - return i.next() -} - -func (i *mergedIterator) prev() bool { - var key []byte - if i.dir == dirBackward { - key = i.keys[i.index] - } - for x, tkey := range i.keys { - if tkey != nil && (key == nil || i.cmp.Compare(tkey, key) > 0) { - key = tkey - i.index = x - } - } - if key == nil { - i.dir = dirSOI - return false - } - i.dir = dirBackward - return true -} - -func (i *mergedIterator) Prev() bool { - if i.dir == dirSOI || i.err != nil { - return false - } else if i.dir == dirReleased { - i.err = ErrIterReleased - return false - } - - switch i.dir { - case dirEOI: - return i.Last() - case dirForward: - key := append([]byte{}, i.keys[i.index]...) - for x, iter := range i.iters { - if x == i.index { - continue - } - seek := iter.Seek(key) - switch { - case seek && iter.Prev(), !seek && iter.Last(): - i.keys[x] = assertKey(iter.Key()) - case i.iterErr(iter): - return false - default: - i.keys[x] = nil - } - } - } - - x := i.index - iter := i.iters[x] - switch { - case iter.Prev(): - i.keys[x] = assertKey(iter.Key()) - case i.iterErr(iter): - return false - default: - i.keys[x] = nil - } - return i.prev() -} - -func (i *mergedIterator) Key() []byte { - if i.err != nil || i.dir <= dirEOI { - return nil - } - return i.keys[i.index] -} - -func (i *mergedIterator) Value() []byte { - if i.err != nil || i.dir <= dirEOI { - return nil - } - return i.iters[i.index].Value() -} - -func (i *mergedIterator) Release() { - if i.dir != dirReleased { - i.dir = dirReleased - for _, iter := range i.iters { - iter.Release() - } - i.iters = nil - i.keys = nil - if i.releaser != nil { - i.releaser.Release() - i.releaser = nil - } - } -} - -func (i *mergedIterator) SetReleaser(releaser util.Releaser) { - if i.dir == dirReleased { - panic(util.ErrReleased) - } - if i.releaser != nil && releaser != nil { - panic(util.ErrHasReleaser) - } - i.releaser = releaser -} - -func (i *mergedIterator) Error() error { - return i.err -} - -func (i *mergedIterator) SetErrorCallback(f func(err error)) { - i.errf = f -} - -// NewMergedIterator returns an iterator that merges its input. Walking the -// resultant iterator will return all key/value pairs of all input iterators -// in strictly increasing key order, as defined by cmp. -// The input's key ranges may overlap, but there are assumed to be no duplicate -// keys: if iters[i] contains a key k then iters[j] will not contain that key k. -// None of the iters may be nil. -// -// If strict is true the any 'corruption errors' (i.e errors.IsCorrupted(err) == true) -// won't be ignored and will halt 'merged iterator', otherwise the iterator will -// continue to the next 'input iterator'. -func NewMergedIterator(iters []Iterator, cmp comparer.Comparer, strict bool) Iterator { - return &mergedIterator{ - iters: iters, - cmp: cmp, - strict: strict, - keys: make([][]byte, len(iters)), - } -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/merged_iter_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/merged_iter_test.go deleted file mode 100644 index e523b63e4b..0000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/merged_iter_test.go +++ /dev/null @@ -1,60 +0,0 @@ -// Copyright (c) 2014, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package iterator_test - -import ( - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" - - "github.com/syndtr/goleveldb/leveldb/comparer" - . "github.com/syndtr/goleveldb/leveldb/iterator" - "github.com/syndtr/goleveldb/leveldb/testutil" -) - -var _ = testutil.Defer(func() { - Describe("Merged iterator", func() { - Test := func(filled int, empty int) func() { - return func() { - It("Should iterates and seeks correctly", func(done Done) { - rnd := testutil.NewRand() - - // Build key/value. - filledKV := make([]testutil.KeyValue, filled) - kv := testutil.KeyValue_Generate(nil, 100, 1, 10, 4, 4) - kv.Iterate(func(i int, key, value []byte) { - filledKV[rnd.Intn(filled)].Put(key, value) - }) - - // Create itearators. - iters := make([]Iterator, filled+empty) - for i := range iters { - if empty == 0 || (rnd.Int()%2 == 0 && filled > 0) { - filled-- - Expect(filledKV[filled].Len()).ShouldNot(BeZero()) - iters[i] = NewArrayIterator(filledKV[filled]) - } else { - empty-- - iters[i] = NewEmptyIterator(nil) - } - } - - // Test the iterator. - t := testutil.IteratorTesting{ - KeyValue: kv.Clone(), - Iter: NewMergedIterator(iters, comparer.DefaultComparer, true), - } - testutil.DoIteratorTesting(&t) - done <- true - }, 1.5) - } - } - - Describe("with three, all filled iterators", Test(3, 0)) - Describe("with one filled, one empty iterators", Test(1, 1)) - Describe("with one filled, two empty iterators", Test(1, 2)) - }) -}) diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/journal/journal.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/journal/journal.go deleted file mode 100644 index 6519ec660e..0000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/journal/journal.go +++ /dev/null @@ -1,520 +0,0 @@ -// Copyright 2011 The LevelDB-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Taken from: https://code.google.com/p/leveldb-go/source/browse/leveldb/record/record.go?r=1d5ccbe03246da926391ee12d1c6caae054ff4b0 -// License, authors and contributors informations can be found at bellow URLs respectively: -// https://code.google.com/p/leveldb-go/source/browse/LICENSE -// https://code.google.com/p/leveldb-go/source/browse/AUTHORS -// https://code.google.com/p/leveldb-go/source/browse/CONTRIBUTORS - -// Package journal reads and writes sequences of journals. Each journal is a stream -// of bytes that completes before the next journal starts. -// -// When reading, call Next to obtain an io.Reader for the next journal. Next will -// return io.EOF when there are no more journals. It is valid to call Next -// without reading the current journal to exhaustion. -// -// When writing, call Next to obtain an io.Writer for the next journal. Calling -// Next finishes the current journal. Call Close to finish the final journal. -// -// Optionally, call Flush to finish the current journal and flush the underlying -// writer without starting a new journal. To start a new journal after flushing, -// call Next. -// -// Neither Readers or Writers are safe to use concurrently. -// -// Example code: -// func read(r io.Reader) ([]string, error) { -// var ss []string -// journals := journal.NewReader(r, nil, true, true) -// for { -// j, err := journals.Next() -// if err == io.EOF { -// break -// } -// if err != nil { -// return nil, err -// } -// s, err := ioutil.ReadAll(j) -// if err != nil { -// return nil, err -// } -// ss = append(ss, string(s)) -// } -// return ss, nil -// } -// -// func write(w io.Writer, ss []string) error { -// journals := journal.NewWriter(w) -// for _, s := range ss { -// j, err := journals.Next() -// if err != nil { -// return err -// } -// if _, err := j.Write([]byte(s)), err != nil { -// return err -// } -// } -// return journals.Close() -// } -// -// The wire format is that the stream is divided into 32KiB blocks, and each -// block contains a number of tightly packed chunks. Chunks cannot cross block -// boundaries. The last block may be shorter than 32 KiB. Any unused bytes in a -// block must be zero. -// -// A journal maps to one or more chunks. Each chunk has a 7 byte header (a 4 -// byte checksum, a 2 byte little-endian uint16 length, and a 1 byte chunk type) -// followed by a payload. The checksum is over the chunk type and the payload. -// -// There are four chunk types: whether the chunk is the full journal, or the -// first, middle or last chunk of a multi-chunk journal. A multi-chunk journal -// has one first chunk, zero or more middle chunks, and one last chunk. -// -// The wire format allows for limited recovery in the face of data corruption: -// on a format error (such as a checksum mismatch), the reader moves to the -// next block and looks for the next full or first chunk. -package journal - -import ( - "encoding/binary" - "fmt" - "io" - - "github.com/syndtr/goleveldb/leveldb/errors" - "github.com/syndtr/goleveldb/leveldb/util" -) - -// These constants are part of the wire format and should not be changed. -const ( - fullChunkType = 1 - firstChunkType = 2 - middleChunkType = 3 - lastChunkType = 4 -) - -const ( - blockSize = 32 * 1024 - headerSize = 7 -) - -type flusher interface { - Flush() error -} - -// ErrCorrupted is the error type that generated by corrupted block or chunk. -type ErrCorrupted struct { - Size int - Reason string -} - -func (e *ErrCorrupted) Error() string { - return fmt.Sprintf("leveldb/journal: block/chunk corrupted: %s (%d bytes)", e.Reason, e.Size) -} - -// Dropper is the interface that wrap simple Drop method. The Drop -// method will be called when the journal reader dropping a block or chunk. -type Dropper interface { - Drop(err error) -} - -// Reader reads journals from an underlying io.Reader. -type Reader struct { - // r is the underlying reader. - r io.Reader - // the dropper. - dropper Dropper - // strict flag. - strict bool - // checksum flag. - checksum bool - // seq is the sequence number of the current journal. - seq int - // buf[i:j] is the unread portion of the current chunk's payload. - // The low bound, i, excludes the chunk header. - i, j int - // n is the number of bytes of buf that are valid. Once reading has started, - // only the final block can have n < blockSize. - n int - // last is whether the current chunk is the last chunk of the journal. - last bool - // err is any accumulated error. - err error - // buf is the buffer. - buf [blockSize]byte -} - -// NewReader returns a new reader. The dropper may be nil, and if -// strict is true then corrupted or invalid chunk will halt the journal -// reader entirely. -func NewReader(r io.Reader, dropper Dropper, strict, checksum bool) *Reader { - return &Reader{ - r: r, - dropper: dropper, - strict: strict, - checksum: checksum, - last: true, - } -} - -var errSkip = errors.New("leveldb/journal: skipped") - -func (r *Reader) corrupt(n int, reason string, skip bool) error { - if r.dropper != nil { - r.dropper.Drop(&ErrCorrupted{n, reason}) - } - if r.strict && !skip { - r.err = errors.NewErrCorrupted(nil, &ErrCorrupted{n, reason}) - return r.err - } - return errSkip -} - -// nextChunk sets r.buf[r.i:r.j] to hold the next chunk's payload, reading the -// next block into the buffer if necessary. -func (r *Reader) nextChunk(first bool) error { - for { - if r.j+headerSize <= r.n { - checksum := binary.LittleEndian.Uint32(r.buf[r.j+0 : r.j+4]) - length := binary.LittleEndian.Uint16(r.buf[r.j+4 : r.j+6]) - chunkType := r.buf[r.j+6] - - if checksum == 0 && length == 0 && chunkType == 0 { - // Drop entire block. - m := r.n - r.j - r.i = r.n - r.j = r.n - return r.corrupt(m, "zero header", false) - } else { - m := r.n - r.j - r.i = r.j + headerSize - r.j = r.j + headerSize + int(length) - if r.j > r.n { - // Drop entire block. - r.i = r.n - r.j = r.n - return r.corrupt(m, "chunk length overflows block", false) - } else if r.checksum && checksum != util.NewCRC(r.buf[r.i-1:r.j]).Value() { - // Drop entire block. - r.i = r.n - r.j = r.n - return r.corrupt(m, "checksum mismatch", false) - } - } - if first && chunkType != fullChunkType && chunkType != firstChunkType { - m := r.j - r.i - r.i = r.j - // Report the error, but skip it. - return r.corrupt(m+headerSize, "orphan chunk", true) - } - r.last = chunkType == fullChunkType || chunkType == lastChunkType - return nil - } - - // The last block. - if r.n < blockSize && r.n > 0 { - if !first { - return r.corrupt(0, "missing chunk part", false) - } - r.err = io.EOF - return r.err - } - - // Read block. - n, err := io.ReadFull(r.r, r.buf[:]) - if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF { - return err - } - if n == 0 { - if !first { - return r.corrupt(0, "missing chunk part", false) - } - r.err = io.EOF - return r.err - } - r.i, r.j, r.n = 0, 0, n - } -} - -// Next returns a reader for the next journal. It returns io.EOF if there are no -// more journals. The reader returned becomes stale after the next Next call, -// and should no longer be used. If strict is false, the reader will returns -// io.ErrUnexpectedEOF error when found corrupted journal. -func (r *Reader) Next() (io.Reader, error) { - r.seq++ - if r.err != nil { - return nil, r.err - } - r.i = r.j - for { - if err := r.nextChunk(true); err == nil { - break - } else if err != errSkip { - return nil, err - } - } - return &singleReader{r, r.seq, nil}, nil -} - -// Reset resets the journal reader, allows reuse of the journal reader. Reset returns -// last accumulated error. -func (r *Reader) Reset(reader io.Reader, dropper Dropper, strict, checksum bool) error { - r.seq++ - err := r.err - r.r = reader - r.dropper = dropper - r.strict = strict - r.checksum = checksum - r.i = 0 - r.j = 0 - r.n = 0 - r.last = true - r.err = nil - return err -} - -type singleReader struct { - r *Reader - seq int - err error -} - -func (x *singleReader) Read(p []byte) (int, error) { - r := x.r - if r.seq != x.seq { - return 0, errors.New("leveldb/journal: stale reader") - } - if x.err != nil { - return 0, x.err - } - if r.err != nil { - return 0, r.err - } - for r.i == r.j { - if r.last { - return 0, io.EOF - } - x.err = r.nextChunk(false) - if x.err != nil { - if x.err == errSkip { - x.err = io.ErrUnexpectedEOF - } - return 0, x.err - } - } - n := copy(p, r.buf[r.i:r.j]) - r.i += n - return n, nil -} - -func (x *singleReader) ReadByte() (byte, error) { - r := x.r - if r.seq != x.seq { - return 0, errors.New("leveldb/journal: stale reader") - } - if x.err != nil { - return 0, x.err - } - if r.err != nil { - return 0, r.err - } - for r.i == r.j { - if r.last { - return 0, io.EOF - } - x.err = r.nextChunk(false) - if x.err != nil { - if x.err == errSkip { - x.err = io.ErrUnexpectedEOF - } - return 0, x.err - } - } - c := r.buf[r.i] - r.i++ - return c, nil -} - -// Writer writes journals to an underlying io.Writer. -type Writer struct { - // w is the underlying writer. - w io.Writer - // seq is the sequence number of the current journal. - seq int - // f is w as a flusher. - f flusher - // buf[i:j] is the bytes that will become the current chunk. - // The low bound, i, includes the chunk header. - i, j int - // buf[:written] has already been written to w. - // written is zero unless Flush has been called. - written int - // first is whether the current chunk is the first chunk of the journal. - first bool - // pending is whether a chunk is buffered but not yet written. - pending bool - // err is any accumulated error. - err error - // buf is the buffer. - buf [blockSize]byte -} - -// NewWriter returns a new Writer. -func NewWriter(w io.Writer) *Writer { - f, _ := w.(flusher) - return &Writer{ - w: w, - f: f, - } -} - -// fillHeader fills in the header for the pending chunk. -func (w *Writer) fillHeader(last bool) { - if w.i+headerSize > w.j || w.j > blockSize { - panic("leveldb/journal: bad writer state") - } - if last { - if w.first { - w.buf[w.i+6] = fullChunkType - } else { - w.buf[w.i+6] = lastChunkType - } - } else { - if w.first { - w.buf[w.i+6] = firstChunkType - } else { - w.buf[w.i+6] = middleChunkType - } - } - binary.LittleEndian.PutUint32(w.buf[w.i+0:w.i+4], util.NewCRC(w.buf[w.i+6:w.j]).Value()) - binary.LittleEndian.PutUint16(w.buf[w.i+4:w.i+6], uint16(w.j-w.i-headerSize)) -} - -// writeBlock writes the buffered block to the underlying writer, and reserves -// space for the next chunk's header. -func (w *Writer) writeBlock() { - _, w.err = w.w.Write(w.buf[w.written:]) - w.i = 0 - w.j = headerSize - w.written = 0 -} - -// writePending finishes the current journal and writes the buffer to the -// underlying writer. -func (w *Writer) writePending() { - if w.err != nil { - return - } - if w.pending { - w.fillHeader(true) - w.pending = false - } - _, w.err = w.w.Write(w.buf[w.written:w.j]) - w.written = w.j -} - -// Close finishes the current journal and closes the writer. -func (w *Writer) Close() error { - w.seq++ - w.writePending() - if w.err != nil { - return w.err - } - w.err = errors.New("leveldb/journal: closed Writer") - return nil -} - -// Flush finishes the current journal, writes to the underlying writer, and -// flushes it if that writer implements interface{ Flush() error }. -func (w *Writer) Flush() error { - w.seq++ - w.writePending() - if w.err != nil { - return w.err - } - if w.f != nil { - w.err = w.f.Flush() - return w.err - } - return nil -} - -// Reset resets the journal writer, allows reuse of the journal writer. Reset -// will also closes the journal writer if not already. -func (w *Writer) Reset(writer io.Writer) (err error) { - w.seq++ - if w.err == nil { - w.writePending() - err = w.err - } - w.w = writer - w.f, _ = writer.(flusher) - w.i = 0 - w.j = 0 - w.written = 0 - w.first = false - w.pending = false - w.err = nil - return -} - -// Next returns a writer for the next journal. The writer returned becomes stale -// after the next Close, Flush or Next call, and should no longer be used. -func (w *Writer) Next() (io.Writer, error) { - w.seq++ - if w.err != nil { - return nil, w.err - } - if w.pending { - w.fillHeader(true) - } - w.i = w.j - w.j = w.j + headerSize - // Check if there is room in the block for the header. - if w.j > blockSize { - // Fill in the rest of the block with zeroes. - for k := w.i; k < blockSize; k++ { - w.buf[k] = 0 - } - w.writeBlock() - if w.err != nil { - return nil, w.err - } - } - w.first = true - w.pending = true - return singleWriter{w, w.seq}, nil -} - -type singleWriter struct { - w *Writer - seq int -} - -func (x singleWriter) Write(p []byte) (int, error) { - w := x.w - if w.seq != x.seq { - return 0, errors.New("leveldb/journal: stale writer") - } - if w.err != nil { - return 0, w.err - } - n0 := len(p) - for len(p) > 0 { - // Write a block, if it is full. - if w.j == blockSize { - w.fillHeader(false) - w.writeBlock() - if w.err != nil { - return 0, w.err - } - w.first = false - } - // Copy bytes into the buffer. - n := copy(w.buf[w.j:], p) - w.j += n - p = p[n:] - } - return n0, nil -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/journal/journal_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/journal/journal_test.go deleted file mode 100644 index 0fcf22599f..0000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/journal/journal_test.go +++ /dev/null @@ -1,818 +0,0 @@ -// Copyright 2011 The LevelDB-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Taken from: https://code.google.com/p/leveldb-go/source/browse/leveldb/record/record_test.go?r=df1fa28f7f3be6c3935548169002309c12967135 -// License, authors and contributors informations can be found at bellow URLs respectively: -// https://code.google.com/p/leveldb-go/source/browse/LICENSE -// https://code.google.com/p/leveldb-go/source/browse/AUTHORS -// https://code.google.com/p/leveldb-go/source/browse/CONTRIBUTORS - -package journal - -import ( - "bytes" - "encoding/binary" - "fmt" - "io" - "io/ioutil" - "math/rand" - "strings" - "testing" -) - -type dropper struct { - t *testing.T -} - -func (d dropper) Drop(err error) { - d.t.Log(err) -} - -func short(s string) string { - if len(s) < 64 { - return s - } - return fmt.Sprintf("%s...(skipping %d bytes)...%s", s[:20], len(s)-40, s[len(s)-20:]) -} - -// big returns a string of length n, composed of repetitions of partial. -func big(partial string, n int) string { - return strings.Repeat(partial, n/len(partial)+1)[:n] -} - -func TestEmpty(t *testing.T) { - buf := new(bytes.Buffer) - r := NewReader(buf, dropper{t}, true, true) - if _, err := r.Next(); err != io.EOF { - t.Fatalf("got %v, want %v", err, io.EOF) - } -} - -func testGenerator(t *testing.T, reset func(), gen func() (string, bool)) { - buf := new(bytes.Buffer) - - reset() - w := NewWriter(buf) - for { - s, ok := gen() - if !ok { - break - } - ww, err := w.Next() - if err != nil { - t.Fatal(err) - } - if _, err := ww.Write([]byte(s)); err != nil { - t.Fatal(err) - } - } - if err := w.Close(); err != nil { - t.Fatal(err) - } - - reset() - r := NewReader(buf, dropper{t}, true, true) - for { - s, ok := gen() - if !ok { - break - } - rr, err := r.Next() - if err != nil { - t.Fatal(err) - } - x, err := ioutil.ReadAll(rr) - if err != nil { - t.Fatal(err) - } - if string(x) != s { - t.Fatalf("got %q, want %q", short(string(x)), short(s)) - } - } - if _, err := r.Next(); err != io.EOF { - t.Fatalf("got %v, want %v", err, io.EOF) - } -} - -func testLiterals(t *testing.T, s []string) { - var i int - reset := func() { - i = 0 - } - gen := func() (string, bool) { - if i == len(s) { - return "", false - } - i++ - return s[i-1], true - } - testGenerator(t, reset, gen) -} - -func TestMany(t *testing.T) { - const n = 1e5 - var i int - reset := func() { - i = 0 - } - gen := func() (string, bool) { - if i == n { - return "", false - } - i++ - return fmt.Sprintf("%d.", i-1), true - } - testGenerator(t, reset, gen) -} - -func TestRandom(t *testing.T) { - const n = 1e2 - var ( - i int - r *rand.Rand - ) - reset := func() { - i, r = 0, rand.New(rand.NewSource(0)) - } - gen := func() (string, bool) { - if i == n { - return "", false - } - i++ - return strings.Repeat(string(uint8(i)), r.Intn(2*blockSize+16)), true - } - testGenerator(t, reset, gen) -} - -func TestBasic(t *testing.T) { - testLiterals(t, []string{ - strings.Repeat("a", 1000), - strings.Repeat("b", 97270), - strings.Repeat("c", 8000), - }) -} - -func TestBoundary(t *testing.T) { - for i := blockSize - 16; i < blockSize+16; i++ { - s0 := big("abcd", i) - for j := blockSize - 16; j < blockSize+16; j++ { - s1 := big("ABCDE", j) - testLiterals(t, []string{s0, s1}) - testLiterals(t, []string{s0, "", s1}) - testLiterals(t, []string{s0, "x", s1}) - } - } -} - -func TestFlush(t *testing.T) { - buf := new(bytes.Buffer) - w := NewWriter(buf) - // Write a couple of records. Everything should still be held - // in the record.Writer buffer, so that buf.Len should be 0. - w0, _ := w.Next() - w0.Write([]byte("0")) - w1, _ := w.Next() - w1.Write([]byte("11")) - if got, want := buf.Len(), 0; got != want { - t.Fatalf("buffer length #0: got %d want %d", got, want) - } - // Flush the record.Writer buffer, which should yield 17 bytes. - // 17 = 2*7 + 1 + 2, which is two headers and 1 + 2 payload bytes. - if err := w.Flush(); err != nil { - t.Fatal(err) - } - if got, want := buf.Len(), 17; got != want { - t.Fatalf("buffer length #1: got %d want %d", got, want) - } - // Do another write, one that isn't large enough to complete the block. - // The write should not have flowed through to buf. - w2, _ := w.Next() - w2.Write(bytes.Repeat([]byte("2"), 10000)) - if got, want := buf.Len(), 17; got != want { - t.Fatalf("buffer length #2: got %d want %d", got, want) - } - // Flushing should get us up to 10024 bytes written. - // 10024 = 17 + 7 + 10000. - if err := w.Flush(); err != nil { - t.Fatal(err) - } - if got, want := buf.Len(), 10024; got != want { - t.Fatalf("buffer length #3: got %d want %d", got, want) - } - // Do a bigger write, one that completes the current block. - // We should now have 32768 bytes (a complete block), without - // an explicit flush. - w3, _ := w.Next() - w3.Write(bytes.Repeat([]byte("3"), 40000)) - if got, want := buf.Len(), 32768; got != want { - t.Fatalf("buffer length #4: got %d want %d", got, want) - } - // Flushing should get us up to 50038 bytes written. - // 50038 = 10024 + 2*7 + 40000. There are two headers because - // the one record was split into two chunks. - if err := w.Flush(); err != nil { - t.Fatal(err) - } - if got, want := buf.Len(), 50038; got != want { - t.Fatalf("buffer length #5: got %d want %d", got, want) - } - // Check that reading those records give the right lengths. - r := NewReader(buf, dropper{t}, true, true) - wants := []int64{1, 2, 10000, 40000} - for i, want := range wants { - rr, _ := r.Next() - n, err := io.Copy(ioutil.Discard, rr) - if err != nil { - t.Fatalf("read #%d: %v", i, err) - } - if n != want { - t.Fatalf("read #%d: got %d bytes want %d", i, n, want) - } - } -} - -func TestNonExhaustiveRead(t *testing.T) { - const n = 100 - buf := new(bytes.Buffer) - p := make([]byte, 10) - rnd := rand.New(rand.NewSource(1)) - - w := NewWriter(buf) - for i := 0; i < n; i++ { - length := len(p) + rnd.Intn(3*blockSize) - s := string(uint8(i)) + "123456789abcdefgh" - ww, _ := w.Next() - ww.Write([]byte(big(s, length))) - } - if err := w.Close(); err != nil { - t.Fatal(err) - } - - r := NewReader(buf, dropper{t}, true, true) - for i := 0; i < n; i++ { - rr, _ := r.Next() - _, err := io.ReadFull(rr, p) - if err != nil { - t.Fatal(err) - } - want := string(uint8(i)) + "123456789" - if got := string(p); got != want { - t.Fatalf("read #%d: got %q want %q", i, got, want) - } - } -} - -func TestStaleReader(t *testing.T) { - buf := new(bytes.Buffer) - - w := NewWriter(buf) - w0, err := w.Next() - if err != nil { - t.Fatal(err) - } - w0.Write([]byte("0")) - w1, err := w.Next() - if err != nil { - t.Fatal(err) - } - w1.Write([]byte("11")) - if err := w.Close(); err != nil { - t.Fatal(err) - } - - r := NewReader(buf, dropper{t}, true, true) - r0, err := r.Next() - if err != nil { - t.Fatal(err) - } - r1, err := r.Next() - if err != nil { - t.Fatal(err) - } - p := make([]byte, 1) - if _, err := r0.Read(p); err == nil || !strings.Contains(err.Error(), "stale") { - t.Fatalf("stale read #0: unexpected error: %v", err) - } - if _, err := r1.Read(p); err != nil { - t.Fatalf("fresh read #1: got %v want nil error", err) - } - if p[0] != '1' { - t.Fatalf("fresh read #1: byte contents: got '%c' want '1'", p[0]) - } -} - -func TestStaleWriter(t *testing.T) { - buf := new(bytes.Buffer) - - w := NewWriter(buf) - w0, err := w.Next() - if err != nil { - t.Fatal(err) - } - w1, err := w.Next() - if err != nil { - t.Fatal(err) - } - if _, err := w0.Write([]byte("0")); err == nil || !strings.Contains(err.Error(), "stale") { - t.Fatalf("stale write #0: unexpected error: %v", err) - } - if _, err := w1.Write([]byte("11")); err != nil { - t.Fatalf("fresh write #1: got %v want nil error", err) - } - if err := w.Flush(); err != nil { - t.Fatalf("flush: %v", err) - } - if _, err := w1.Write([]byte("0")); err == nil || !strings.Contains(err.Error(), "stale") { - t.Fatalf("stale write #1: unexpected error: %v", err) - } -} - -func TestCorrupt_MissingLastBlock(t *testing.T) { - buf := new(bytes.Buffer) - - w := NewWriter(buf) - - // First record. - ww, err := w.Next() - if err != nil { - t.Fatal(err) - } - if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize-1024)); err != nil { - t.Fatalf("write #0: unexpected error: %v", err) - } - - // Second record. - ww, err = w.Next() - if err != nil { - t.Fatal(err) - } - if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize-headerSize)); err != nil { - t.Fatalf("write #1: unexpected error: %v", err) - } - - if err := w.Close(); err != nil { - t.Fatal(err) - } - - // Cut the last block. - b := buf.Bytes()[:blockSize] - r := NewReader(bytes.NewReader(b), dropper{t}, false, true) - - // First read. - rr, err := r.Next() - if err != nil { - t.Fatal(err) - } - n, err := io.Copy(ioutil.Discard, rr) - if err != nil { - t.Fatalf("read #0: %v", err) - } - if n != blockSize-1024 { - t.Fatalf("read #0: got %d bytes want %d", n, blockSize-1024) - } - - // Second read. - rr, err = r.Next() - if err != nil { - t.Fatal(err) - } - n, err = io.Copy(ioutil.Discard, rr) - if err != io.ErrUnexpectedEOF { - t.Fatalf("read #1: unexpected error: %v", err) - } - - if _, err := r.Next(); err != io.EOF { - t.Fatalf("last next: unexpected error: %v", err) - } -} - -func TestCorrupt_CorruptedFirstBlock(t *testing.T) { - buf := new(bytes.Buffer) - - w := NewWriter(buf) - - // First record. - ww, err := w.Next() - if err != nil { - t.Fatal(err) - } - if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize/2)); err != nil { - t.Fatalf("write #0: unexpected error: %v", err) - } - - // Second record. - ww, err = w.Next() - if err != nil { - t.Fatal(err) - } - if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize-headerSize)); err != nil { - t.Fatalf("write #1: unexpected error: %v", err) - } - - // Third record. - ww, err = w.Next() - if err != nil { - t.Fatal(err) - } - if _, err := ww.Write(bytes.Repeat([]byte("0"), (blockSize-headerSize)+1)); err != nil { - t.Fatalf("write #2: unexpected error: %v", err) - } - - // Fourth record. - ww, err = w.Next() - if err != nil { - t.Fatal(err) - } - if _, err := ww.Write(bytes.Repeat([]byte("0"), (blockSize-headerSize)+2)); err != nil { - t.Fatalf("write #3: unexpected error: %v", err) - } - - if err := w.Close(); err != nil { - t.Fatal(err) - } - - b := buf.Bytes() - // Corrupting block #0. - for i := 0; i < 1024; i++ { - b[i] = '1' - } - - r := NewReader(bytes.NewReader(b), dropper{t}, false, true) - - // First read (third record). - rr, err := r.Next() - if err != nil { - t.Fatal(err) - } - n, err := io.Copy(ioutil.Discard, rr) - if err != nil { - t.Fatalf("read #0: %v", err) - } - if want := int64(blockSize-headerSize) + 1; n != want { - t.Fatalf("read #0: got %d bytes want %d", n, want) - } - - // Second read (fourth record). - rr, err = r.Next() - if err != nil { - t.Fatal(err) - } - n, err = io.Copy(ioutil.Discard, rr) - if err != nil { - t.Fatalf("read #1: %v", err) - } - if want := int64(blockSize-headerSize) + 2; n != want { - t.Fatalf("read #1: got %d bytes want %d", n, want) - } - - if _, err := r.Next(); err != io.EOF { - t.Fatalf("last next: unexpected error: %v", err) - } -} - -func TestCorrupt_CorruptedMiddleBlock(t *testing.T) { - buf := new(bytes.Buffer) - - w := NewWriter(buf) - - // First record. - ww, err := w.Next() - if err != nil { - t.Fatal(err) - } - if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize/2)); err != nil { - t.Fatalf("write #0: unexpected error: %v", err) - } - - // Second record. - ww, err = w.Next() - if err != nil { - t.Fatal(err) - } - if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize-headerSize)); err != nil { - t.Fatalf("write #1: unexpected error: %v", err) - } - - // Third record. - ww, err = w.Next() - if err != nil { - t.Fatal(err) - } - if _, err := ww.Write(bytes.Repeat([]byte("0"), (blockSize-headerSize)+1)); err != nil { - t.Fatalf("write #2: unexpected error: %v", err) - } - - // Fourth record. - ww, err = w.Next() - if err != nil { - t.Fatal(err) - } - if _, err := ww.Write(bytes.Repeat([]byte("0"), (blockSize-headerSize)+2)); err != nil { - t.Fatalf("write #3: unexpected error: %v", err) - } - - if err := w.Close(); err != nil { - t.Fatal(err) - } - - b := buf.Bytes() - // Corrupting block #1. - for i := 0; i < 1024; i++ { - b[blockSize+i] = '1' - } - - r := NewReader(bytes.NewReader(b), dropper{t}, false, true) - - // First read (first record). - rr, err := r.Next() - if err != nil { - t.Fatal(err) - } - n, err := io.Copy(ioutil.Discard, rr) - if err != nil { - t.Fatalf("read #0: %v", err) - } - if want := int64(blockSize / 2); n != want { - t.Fatalf("read #0: got %d bytes want %d", n, want) - } - - // Second read (second record). - rr, err = r.Next() - if err != nil { - t.Fatal(err) - } - n, err = io.Copy(ioutil.Discard, rr) - if err != io.ErrUnexpectedEOF { - t.Fatalf("read #1: unexpected error: %v", err) - } - - // Third read (fourth record). - rr, err = r.Next() - if err != nil { - t.Fatal(err) - } - n, err = io.Copy(ioutil.Discard, rr) - if err != nil { - t.Fatalf("read #2: %v", err) - } - if want := int64(blockSize-headerSize) + 2; n != want { - t.Fatalf("read #2: got %d bytes want %d", n, want) - } - - if _, err := r.Next(); err != io.EOF { - t.Fatalf("last next: unexpected error: %v", err) - } -} - -func TestCorrupt_CorruptedLastBlock(t *testing.T) { - buf := new(bytes.Buffer) - - w := NewWriter(buf) - - // First record. - ww, err := w.Next() - if err != nil { - t.Fatal(err) - } - if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize/2)); err != nil { - t.Fatalf("write #0: unexpected error: %v", err) - } - - // Second record. - ww, err = w.Next() - if err != nil { - t.Fatal(err) - } - if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize-headerSize)); err != nil { - t.Fatalf("write #1: unexpected error: %v", err) - } - - // Third record. - ww, err = w.Next() - if err != nil { - t.Fatal(err) - } - if _, err := ww.Write(bytes.Repeat([]byte("0"), (blockSize-headerSize)+1)); err != nil { - t.Fatalf("write #2: unexpected error: %v", err) - } - - // Fourth record. - ww, err = w.Next() - if err != nil { - t.Fatal(err) - } - if _, err := ww.Write(bytes.Repeat([]byte("0"), (blockSize-headerSize)+2)); err != nil { - t.Fatalf("write #3: unexpected error: %v", err) - } - - if err := w.Close(); err != nil { - t.Fatal(err) - } - - b := buf.Bytes() - // Corrupting block #3. - for i := len(b) - 1; i > len(b)-1024; i-- { - b[i] = '1' - } - - r := NewReader(bytes.NewReader(b), dropper{t}, false, true) - - // First read (first record). - rr, err := r.Next() - if err != nil { - t.Fatal(err) - } - n, err := io.Copy(ioutil.Discard, rr) - if err != nil { - t.Fatalf("read #0: %v", err) - } - if want := int64(blockSize / 2); n != want { - t.Fatalf("read #0: got %d bytes want %d", n, want) - } - - // Second read (second record). - rr, err = r.Next() - if err != nil { - t.Fatal(err) - } - n, err = io.Copy(ioutil.Discard, rr) - if err != nil { - t.Fatalf("read #1: %v", err) - } - if want := int64(blockSize - headerSize); n != want { - t.Fatalf("read #1: got %d bytes want %d", n, want) - } - - // Third read (third record). - rr, err = r.Next() - if err != nil { - t.Fatal(err) - } - n, err = io.Copy(ioutil.Discard, rr) - if err != nil { - t.Fatalf("read #2: %v", err) - } - if want := int64(blockSize-headerSize) + 1; n != want { - t.Fatalf("read #2: got %d bytes want %d", n, want) - } - - // Fourth read (fourth record). - rr, err = r.Next() - if err != nil { - t.Fatal(err) - } - n, err = io.Copy(ioutil.Discard, rr) - if err != io.ErrUnexpectedEOF { - t.Fatalf("read #3: unexpected error: %v", err) - } - - if _, err := r.Next(); err != io.EOF { - t.Fatalf("last next: unexpected error: %v", err) - } -} - -func TestCorrupt_FirstChuckLengthOverflow(t *testing.T) { - buf := new(bytes.Buffer) - - w := NewWriter(buf) - - // First record. - ww, err := w.Next() - if err != nil { - t.Fatal(err) - } - if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize/2)); err != nil { - t.Fatalf("write #0: unexpected error: %v", err) - } - - // Second record. - ww, err = w.Next() - if err != nil { - t.Fatal(err) - } - if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize-headerSize)); err != nil { - t.Fatalf("write #1: unexpected error: %v", err) - } - - // Third record. - ww, err = w.Next() - if err != nil { - t.Fatal(err) - } - if _, err := ww.Write(bytes.Repeat([]byte("0"), (blockSize-headerSize)+1)); err != nil { - t.Fatalf("write #2: unexpected error: %v", err) - } - - if err := w.Close(); err != nil { - t.Fatal(err) - } - - b := buf.Bytes() - // Corrupting record #1. - x := blockSize - binary.LittleEndian.PutUint16(b[x+4:], 0xffff) - - r := NewReader(bytes.NewReader(b), dropper{t}, false, true) - - // First read (first record). - rr, err := r.Next() - if err != nil { - t.Fatal(err) - } - n, err := io.Copy(ioutil.Discard, rr) - if err != nil { - t.Fatalf("read #0: %v", err) - } - if want := int64(blockSize / 2); n != want { - t.Fatalf("read #0: got %d bytes want %d", n, want) - } - - // Second read (second record). - rr, err = r.Next() - if err != nil { - t.Fatal(err) - } - n, err = io.Copy(ioutil.Discard, rr) - if err != io.ErrUnexpectedEOF { - t.Fatalf("read #1: unexpected error: %v", err) - } - - if _, err := r.Next(); err != io.EOF { - t.Fatalf("last next: unexpected error: %v", err) - } -} - -func TestCorrupt_MiddleChuckLengthOverflow(t *testing.T) { - buf := new(bytes.Buffer) - - w := NewWriter(buf) - - // First record. - ww, err := w.Next() - if err != nil { - t.Fatal(err) - } - if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize/2)); err != nil { - t.Fatalf("write #0: unexpected error: %v", err) - } - - // Second record. - ww, err = w.Next() - if err != nil { - t.Fatal(err) - } - if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize-headerSize)); err != nil { - t.Fatalf("write #1: unexpected error: %v", err) - } - - // Third record. - ww, err = w.Next() - if err != nil { - t.Fatal(err) - } - if _, err := ww.Write(bytes.Repeat([]byte("0"), (blockSize-headerSize)+1)); err != nil { - t.Fatalf("write #2: unexpected error: %v", err) - } - - if err := w.Close(); err != nil { - t.Fatal(err) - } - - b := buf.Bytes() - // Corrupting record #1. - x := blockSize/2 + headerSize - binary.LittleEndian.PutUint16(b[x+4:], 0xffff) - - r := NewReader(bytes.NewReader(b), dropper{t}, false, true) - - // First read (first record). - rr, err := r.Next() - if err != nil { - t.Fatal(err) - } - n, err := io.Copy(ioutil.Discard, rr) - if err != nil { - t.Fatalf("read #0: %v", err) - } - if want := int64(blockSize / 2); n != want { - t.Fatalf("read #0: got %d bytes want %d", n, want) - } - - // Second read (third record). - rr, err = r.Next() - if err != nil { - t.Fatal(err) - } - n, err = io.Copy(ioutil.Discard, rr) - if err != nil { - t.Fatalf("read #1: %v", err) - } - if want := int64(blockSize-headerSize) + 1; n != want { - t.Fatalf("read #1: got %d bytes want %d", n, want) - } - - if _, err := r.Next(); err != io.EOF { - t.Fatalf("last next: unexpected error: %v", err) - } -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/key.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/key.go deleted file mode 100644 index 572ae8150c..0000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/key.go +++ /dev/null @@ -1,142 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - "encoding/binary" - "fmt" - - "github.com/syndtr/goleveldb/leveldb/errors" -) - -type ErrIkeyCorrupted struct { - Ikey []byte - Reason string -} - -func (e *ErrIkeyCorrupted) Error() string { - return fmt.Sprintf("leveldb: iKey %q corrupted: %s", e.Ikey, e.Reason) -} - -func newErrIkeyCorrupted(ikey []byte, reason string) error { - return errors.NewErrCorrupted(nil, &ErrIkeyCorrupted{append([]byte{}, ikey...), reason}) -} - -type kType int - -func (kt kType) String() string { - switch kt { - case ktDel: - return "d" - case ktVal: - return "v" - } - return "x" -} - -// Value types encoded as the last component of internal keys. -// Don't modify; this value are saved to disk. -const ( - ktDel kType = iota - ktVal -) - -// ktSeek defines the kType that should be passed when constructing an -// internal key for seeking to a particular sequence number (since we -// sort sequence numbers in decreasing order and the value type is -// embedded as the low 8 bits in the sequence number in internal keys, -// we need to use the highest-numbered ValueType, not the lowest). -const ktSeek = ktVal - -const ( - // Maximum value possible for sequence number; the 8-bits are - // used by value type, so its can packed together in single - // 64-bit integer. - kMaxSeq uint64 = (uint64(1) << 56) - 1 - // Maximum value possible for packed sequence number and type. - kMaxNum uint64 = (kMaxSeq << 8) | uint64(ktSeek) -) - -// Maximum number encoded in bytes. -var kMaxNumBytes = make([]byte, 8) - -func init() { - binary.LittleEndian.PutUint64(kMaxNumBytes, kMaxNum) -} - -type iKey []byte - -func newIkey(ukey []byte, seq uint64, kt kType) iKey { - if seq > kMaxSeq { - panic("leveldb: invalid sequence number") - } else if kt > ktVal { - panic("leveldb: invalid type") - } - - ik := make(iKey, len(ukey)+8) - copy(ik, ukey) - binary.LittleEndian.PutUint64(ik[len(ukey):], (seq<<8)|uint64(kt)) - return ik -} - -func parseIkey(ik []byte) (ukey []byte, seq uint64, kt kType, err error) { - if len(ik) < 8 { - return nil, 0, 0, newErrIkeyCorrupted(ik, "invalid length") - } - num := binary.LittleEndian.Uint64(ik[len(ik)-8:]) - seq, kt = uint64(num>>8), kType(num&0xff) - if kt > ktVal { - return nil, 0, 0, newErrIkeyCorrupted(ik, "invalid type") - } - ukey = ik[:len(ik)-8] - return -} - -func validIkey(ik []byte) bool { - _, _, _, err := parseIkey(ik) - return err == nil -} - -func (ik iKey) assert() { - if ik == nil { - panic("leveldb: nil iKey") - } - if len(ik) < 8 { - panic(fmt.Sprintf("leveldb: iKey %q, len=%d: invalid length", []byte(ik), len(ik))) - } -} - -func (ik iKey) ukey() []byte { - ik.assert() - return ik[:len(ik)-8] -} - -func (ik iKey) num() uint64 { - ik.assert() - return binary.LittleEndian.Uint64(ik[len(ik)-8:]) -} - -func (ik iKey) parseNum() (seq uint64, kt kType) { - num := ik.num() - seq, kt = uint64(num>>8), kType(num&0xff) - if kt > ktVal { - panic(fmt.Sprintf("leveldb: iKey %q, len=%d: invalid type %#x", []byte(ik), len(ik), kt)) - } - return -} - -func (ik iKey) String() string { - if ik == nil { - return "" - } - - if ukey, seq, kt, err := parseIkey(ik); err == nil { - return fmt.Sprintf("%s,%s%d", shorten(string(ukey)), kt, seq) - } else { - return "" - } -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/key_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/key_test.go deleted file mode 100644 index 30eadf7847..0000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/key_test.go +++ /dev/null @@ -1,133 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - "bytes" - "testing" - - "github.com/syndtr/goleveldb/leveldb/comparer" -) - -var defaultIComparer = &iComparer{comparer.DefaultComparer} - -func ikey(key string, seq uint64, kt kType) iKey { - return newIkey([]byte(key), uint64(seq), kt) -} - -func shortSep(a, b []byte) []byte { - dst := make([]byte, len(a)) - dst = defaultIComparer.Separator(dst[:0], a, b) - if dst == nil { - return a - } - return dst -} - -func shortSuccessor(b []byte) []byte { - dst := make([]byte, len(b)) - dst = defaultIComparer.Successor(dst[:0], b) - if dst == nil { - return b - } - return dst -} - -func testSingleKey(t *testing.T, key string, seq uint64, kt kType) { - ik := ikey(key, seq, kt) - - if !bytes.Equal(ik.ukey(), []byte(key)) { - t.Errorf("user key does not equal, got %v, want %v", string(ik.ukey()), key) - } - - rseq, rt := ik.parseNum() - if rseq != seq { - t.Errorf("seq number does not equal, got %v, want %v", rseq, seq) - } - if rt != kt { - t.Errorf("type does not equal, got %v, want %v", rt, kt) - } - - if rukey, rseq, rt, kerr := parseIkey(ik); kerr == nil { - if !bytes.Equal(rukey, []byte(key)) { - t.Errorf("user key does not equal, got %v, want %v", string(ik.ukey()), key) - } - if rseq != seq { - t.Errorf("seq number does not equal, got %v, want %v", rseq, seq) - } - if rt != kt { - t.Errorf("type does not equal, got %v, want %v", rt, kt) - } - } else { - t.Errorf("key error: %v", kerr) - } -} - -func TestIkey_EncodeDecode(t *testing.T) { - keys := []string{"", "k", "hello", "longggggggggggggggggggggg"} - seqs := []uint64{ - 1, 2, 3, - (1 << 8) - 1, 1 << 8, (1 << 8) + 1, - (1 << 16) - 1, 1 << 16, (1 << 16) + 1, - (1 << 32) - 1, 1 << 32, (1 << 32) + 1, - } - for _, key := range keys { - for _, seq := range seqs { - testSingleKey(t, key, seq, ktVal) - testSingleKey(t, "hello", 1, ktDel) - } - } -} - -func assertBytes(t *testing.T, want, got []byte) { - if !bytes.Equal(got, want) { - t.Errorf("assert failed, got %v, want %v", got, want) - } -} - -func TestIkeyShortSeparator(t *testing.T) { - // When user keys are same - assertBytes(t, ikey("foo", 100, ktVal), - shortSep(ikey("foo", 100, ktVal), - ikey("foo", 99, ktVal))) - assertBytes(t, ikey("foo", 100, ktVal), - shortSep(ikey("foo", 100, ktVal), - ikey("foo", 101, ktVal))) - assertBytes(t, ikey("foo", 100, ktVal), - shortSep(ikey("foo", 100, ktVal), - ikey("foo", 100, ktVal))) - assertBytes(t, ikey("foo", 100, ktVal), - shortSep(ikey("foo", 100, ktVal), - ikey("foo", 100, ktDel))) - - // When user keys are misordered - assertBytes(t, ikey("foo", 100, ktVal), - shortSep(ikey("foo", 100, ktVal), - ikey("bar", 99, ktVal))) - - // When user keys are different, but correctly ordered - assertBytes(t, ikey("g", uint64(kMaxSeq), ktSeek), - shortSep(ikey("foo", 100, ktVal), - ikey("hello", 200, ktVal))) - - // When start user key is prefix of limit user key - assertBytes(t, ikey("foo", 100, ktVal), - shortSep(ikey("foo", 100, ktVal), - ikey("foobar", 200, ktVal))) - - // When limit user key is prefix of start user key - assertBytes(t, ikey("foobar", 100, ktVal), - shortSep(ikey("foobar", 100, ktVal), - ikey("foo", 200, ktVal))) -} - -func TestIkeyShortestSuccessor(t *testing.T) { - assertBytes(t, ikey("g", uint64(kMaxSeq), ktSeek), - shortSuccessor(ikey("foo", 100, ktVal))) - assertBytes(t, ikey("\xff\xff", 100, ktVal), - shortSuccessor(ikey("\xff\xff", 100, ktVal))) -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/leveldb_suite_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/leveldb_suite_test.go deleted file mode 100644 index fefa007a70..0000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/leveldb_suite_test.go +++ /dev/null @@ -1,11 +0,0 @@ -package leveldb - -import ( - "testing" - - "github.com/syndtr/goleveldb/leveldb/testutil" -) - -func TestLevelDB(t *testing.T) { - testutil.RunSuite(t, "LevelDB Suite") -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/memdb/bench_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/memdb/bench_test.go deleted file mode 100644 index b05084caa6..0000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/memdb/bench_test.go +++ /dev/null @@ -1,75 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package memdb - -import ( - "encoding/binary" - "math/rand" - "testing" - - "github.com/syndtr/goleveldb/leveldb/comparer" -) - -func BenchmarkPut(b *testing.B) { - buf := make([][4]byte, b.N) - for i := range buf { - binary.LittleEndian.PutUint32(buf[i][:], uint32(i)) - } - - b.ResetTimer() - p := New(comparer.DefaultComparer, 0) - for i := range buf { - p.Put(buf[i][:], nil) - } -} - -func BenchmarkPutRandom(b *testing.B) { - buf := make([][4]byte, b.N) - for i := range buf { - binary.LittleEndian.PutUint32(buf[i][:], uint32(rand.Int())) - } - - b.ResetTimer() - p := New(comparer.DefaultComparer, 0) - for i := range buf { - p.Put(buf[i][:], nil) - } -} - -func BenchmarkGet(b *testing.B) { - buf := make([][4]byte, b.N) - for i := range buf { - binary.LittleEndian.PutUint32(buf[i][:], uint32(i)) - } - - p := New(comparer.DefaultComparer, 0) - for i := range buf { - p.Put(buf[i][:], nil) - } - - b.ResetTimer() - for i := range buf { - p.Get(buf[i][:]) - } -} - -func BenchmarkGetRandom(b *testing.B) { - buf := make([][4]byte, b.N) - for i := range buf { - binary.LittleEndian.PutUint32(buf[i][:], uint32(i)) - } - - p := New(comparer.DefaultComparer, 0) - for i := range buf { - p.Put(buf[i][:], nil) - } - - b.ResetTimer() - for i := 0; i < b.N; i++ { - p.Get(buf[rand.Int()%b.N][:]) - } -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/memdb/memdb.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/memdb/memdb.go deleted file mode 100644 index 1395bd9280..0000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/memdb/memdb.go +++ /dev/null @@ -1,471 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -// Package memdb provides in-memory key/value database implementation. -package memdb - -import ( - "math/rand" - "sync" - - "github.com/syndtr/goleveldb/leveldb/comparer" - "github.com/syndtr/goleveldb/leveldb/errors" - "github.com/syndtr/goleveldb/leveldb/iterator" - "github.com/syndtr/goleveldb/leveldb/util" -) - -var ( - ErrNotFound = errors.ErrNotFound - ErrIterReleased = errors.New("leveldb/memdb: iterator released") -) - -const tMaxHeight = 12 - -type dbIter struct { - util.BasicReleaser - p *DB - slice *util.Range - node int - forward bool - key, value []byte - err error -} - -func (i *dbIter) fill(checkStart, checkLimit bool) bool { - if i.node != 0 { - n := i.p.nodeData[i.node] - m := n + i.p.nodeData[i.node+nKey] - i.key = i.p.kvData[n:m] - if i.slice != nil { - switch { - case checkLimit && i.slice.Limit != nil && i.p.cmp.Compare(i.key, i.slice.Limit) >= 0: - fallthrough - case checkStart && i.slice.Start != nil && i.p.cmp.Compare(i.key, i.slice.Start) < 0: - i.node = 0 - goto bail - } - } - i.value = i.p.kvData[m : m+i.p.nodeData[i.node+nVal]] - return true - } -bail: - i.key = nil - i.value = nil - return false -} - -func (i *dbIter) Valid() bool { - return i.node != 0 -} - -func (i *dbIter) First() bool { - if i.Released() { - i.err = ErrIterReleased - return false - } - - i.forward = true - i.p.mu.RLock() - defer i.p.mu.RUnlock() - if i.slice != nil && i.slice.Start != nil { - i.node, _ = i.p.findGE(i.slice.Start, false) - } else { - i.node = i.p.nodeData[nNext] - } - return i.fill(false, true) -} - -func (i *dbIter) Last() bool { - if i.Released() { - i.err = ErrIterReleased - return false - } - - i.forward = false - i.p.mu.RLock() - defer i.p.mu.RUnlock() - if i.slice != nil && i.slice.Limit != nil { - i.node = i.p.findLT(i.slice.Limit) - } else { - i.node = i.p.findLast() - } - return i.fill(true, false) -} - -func (i *dbIter) Seek(key []byte) bool { - if i.Released() { - i.err = ErrIterReleased - return false - } - - i.forward = true - i.p.mu.RLock() - defer i.p.mu.RUnlock() - if i.slice != nil && i.slice.Start != nil && i.p.cmp.Compare(key, i.slice.Start) < 0 { - key = i.slice.Start - } - i.node, _ = i.p.findGE(key, false) - return i.fill(false, true) -} - -func (i *dbIter) Next() bool { - if i.Released() { - i.err = ErrIterReleased - return false - } - - if i.node == 0 { - if !i.forward { - return i.First() - } - return false - } - i.forward = true - i.p.mu.RLock() - defer i.p.mu.RUnlock() - i.node = i.p.nodeData[i.node+nNext] - return i.fill(false, true) -} - -func (i *dbIter) Prev() bool { - if i.Released() { - i.err = ErrIterReleased - return false - } - - if i.node == 0 { - if i.forward { - return i.Last() - } - return false - } - i.forward = false - i.p.mu.RLock() - defer i.p.mu.RUnlock() - i.node = i.p.findLT(i.key) - return i.fill(true, false) -} - -func (i *dbIter) Key() []byte { - return i.key -} - -func (i *dbIter) Value() []byte { - return i.value -} - -func (i *dbIter) Error() error { return i.err } - -func (i *dbIter) Release() { - if !i.Released() { - i.p = nil - i.node = 0 - i.key = nil - i.value = nil - i.BasicReleaser.Release() - } -} - -const ( - nKV = iota - nKey - nVal - nHeight - nNext -) - -// DB is an in-memory key/value database. -type DB struct { - cmp comparer.BasicComparer - rnd *rand.Rand - - mu sync.RWMutex - kvData []byte - // Node data: - // [0] : KV offset - // [1] : Key length - // [2] : Value length - // [3] : Height - // [3..height] : Next nodes - nodeData []int - prevNode [tMaxHeight]int - maxHeight int - n int - kvSize int -} - -func (p *DB) randHeight() (h int) { - const branching = 4 - h = 1 - for h < tMaxHeight && p.rnd.Int()%branching == 0 { - h++ - } - return -} - -// Must hold RW-lock if prev == true, as it use shared prevNode slice. -func (p *DB) findGE(key []byte, prev bool) (int, bool) { - node := 0 - h := p.maxHeight - 1 - for { - next := p.nodeData[node+nNext+h] - cmp := 1 - if next != 0 { - o := p.nodeData[next] - cmp = p.cmp.Compare(p.kvData[o:o+p.nodeData[next+nKey]], key) - } - if cmp < 0 { - // Keep searching in this list - node = next - } else { - if prev { - p.prevNode[h] = node - } else if cmp == 0 { - return next, true - } - if h == 0 { - return next, cmp == 0 - } - h-- - } - } -} - -func (p *DB) findLT(key []byte) int { - node := 0 - h := p.maxHeight - 1 - for { - next := p.nodeData[node+nNext+h] - o := p.nodeData[next] - if next == 0 || p.cmp.Compare(p.kvData[o:o+p.nodeData[next+nKey]], key) >= 0 { - if h == 0 { - break - } - h-- - } else { - node = next - } - } - return node -} - -func (p *DB) findLast() int { - node := 0 - h := p.maxHeight - 1 - for { - next := p.nodeData[node+nNext+h] - if next == 0 { - if h == 0 { - break - } - h-- - } else { - node = next - } - } - return node -} - -// Put sets the value for the given key. It overwrites any previous value -// for that key; a DB is not a multi-map. -// -// It is safe to modify the contents of the arguments after Put returns. -func (p *DB) Put(key []byte, value []byte) error { - p.mu.Lock() - defer p.mu.Unlock() - - if node, exact := p.findGE(key, true); exact { - kvOffset := len(p.kvData) - p.kvData = append(p.kvData, key...) - p.kvData = append(p.kvData, value...) - p.nodeData[node] = kvOffset - m := p.nodeData[node+nVal] - p.nodeData[node+nVal] = len(value) - p.kvSize += len(value) - m - return nil - } - - h := p.randHeight() - if h > p.maxHeight { - for i := p.maxHeight; i < h; i++ { - p.prevNode[i] = 0 - } - p.maxHeight = h - } - - kvOffset := len(p.kvData) - p.kvData = append(p.kvData, key...) - p.kvData = append(p.kvData, value...) - // Node - node := len(p.nodeData) - p.nodeData = append(p.nodeData, kvOffset, len(key), len(value), h) - for i, n := range p.prevNode[:h] { - m := n + nNext + i - p.nodeData = append(p.nodeData, p.nodeData[m]) - p.nodeData[m] = node - } - - p.kvSize += len(key) + len(value) - p.n++ - return nil -} - -// Delete deletes the value for the given key. It returns ErrNotFound if -// the DB does not contain the key. -// -// It is safe to modify the contents of the arguments after Delete returns. -func (p *DB) Delete(key []byte) error { - p.mu.Lock() - defer p.mu.Unlock() - - node, exact := p.findGE(key, true) - if !exact { - return ErrNotFound - } - - h := p.nodeData[node+nHeight] - for i, n := range p.prevNode[:h] { - m := n + 4 + i - p.nodeData[m] = p.nodeData[p.nodeData[m]+nNext+i] - } - - p.kvSize -= p.nodeData[node+nKey] + p.nodeData[node+nVal] - p.n-- - return nil -} - -// Contains returns true if the given key are in the DB. -// -// It is safe to modify the contents of the arguments after Contains returns. -func (p *DB) Contains(key []byte) bool { - p.mu.RLock() - _, exact := p.findGE(key, false) - p.mu.RUnlock() - return exact -} - -// Get gets the value for the given key. It returns error.ErrNotFound if the -// DB does not contain the key. -// -// The caller should not modify the contents of the returned slice, but -// it is safe to modify the contents of the argument after Get returns. -func (p *DB) Get(key []byte) (value []byte, err error) { - p.mu.RLock() - if node, exact := p.findGE(key, false); exact { - o := p.nodeData[node] + p.nodeData[node+nKey] - value = p.kvData[o : o+p.nodeData[node+nVal]] - } else { - err = ErrNotFound - } - p.mu.RUnlock() - return -} - -// Find finds key/value pair whose key is greater than or equal to the -// given key. It returns ErrNotFound if the table doesn't contain -// such pair. -// -// The caller should not modify the contents of the returned slice, but -// it is safe to modify the contents of the argument after Find returns. -func (p *DB) Find(key []byte) (rkey, value []byte, err error) { - p.mu.RLock() - if node, _ := p.findGE(key, false); node != 0 { - n := p.nodeData[node] - m := n + p.nodeData[node+nKey] - rkey = p.kvData[n:m] - value = p.kvData[m : m+p.nodeData[node+nVal]] - } else { - err = ErrNotFound - } - p.mu.RUnlock() - return -} - -// NewIterator returns an iterator of the DB. -// The returned iterator is not goroutine-safe, but it is safe to use -// multiple iterators concurrently, with each in a dedicated goroutine. -// It is also safe to use an iterator concurrently with modifying its -// underlying DB. However, the resultant key/value pairs are not guaranteed -// to be a consistent snapshot of the DB at a particular point in time. -// -// Slice allows slicing the iterator to only contains keys in the given -// range. A nil Range.Start is treated as a key before all keys in the -// DB. And a nil Range.Limit is treated as a key after all keys in -// the DB. -// -// The iterator must be released after use, by calling Release method. -// -// Also read Iterator documentation of the leveldb/iterator package. -func (p *DB) NewIterator(slice *util.Range) iterator.Iterator { - return &dbIter{p: p, slice: slice} -} - -// Capacity returns keys/values buffer capacity. -func (p *DB) Capacity() int { - p.mu.RLock() - defer p.mu.RUnlock() - return cap(p.kvData) -} - -// Size returns sum of keys and values length. Note that deleted -// key/value will not be accouted for, but it will still consume -// the buffer, since the buffer is append only. -func (p *DB) Size() int { - p.mu.RLock() - defer p.mu.RUnlock() - return p.kvSize -} - -// Free returns keys/values free buffer before need to grow. -func (p *DB) Free() int { - p.mu.RLock() - defer p.mu.RUnlock() - return cap(p.kvData) - len(p.kvData) -} - -// Len returns the number of entries in the DB. -func (p *DB) Len() int { - p.mu.RLock() - defer p.mu.RUnlock() - return p.n -} - -// Reset resets the DB to initial empty state. Allows reuse the buffer. -func (p *DB) Reset() { - p.mu.Lock() - p.rnd = rand.New(rand.NewSource(0xdeadbeef)) - p.maxHeight = 1 - p.n = 0 - p.kvSize = 0 - p.kvData = p.kvData[:0] - p.nodeData = p.nodeData[:nNext+tMaxHeight] - p.nodeData[nKV] = 0 - p.nodeData[nKey] = 0 - p.nodeData[nVal] = 0 - p.nodeData[nHeight] = tMaxHeight - for n := 0; n < tMaxHeight; n++ { - p.nodeData[nNext+n] = 0 - p.prevNode[n] = 0 - } - p.mu.Unlock() -} - -// New creates a new initalized in-memory key/value DB. The capacity -// is the initial key/value buffer capacity. The capacity is advisory, -// not enforced. -// -// The returned DB instance is goroutine-safe. -func New(cmp comparer.BasicComparer, capacity int) *DB { - p := &DB{ - cmp: cmp, - rnd: rand.New(rand.NewSource(0xdeadbeef)), - maxHeight: 1, - kvData: make([]byte, 0, capacity), - nodeData: make([]int, 4+tMaxHeight), - } - p.nodeData[nHeight] = tMaxHeight - return p -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/memdb/memdb_suite_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/memdb/memdb_suite_test.go deleted file mode 100644 index 18c304b7f1..0000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/memdb/memdb_suite_test.go +++ /dev/null @@ -1,11 +0,0 @@ -package memdb - -import ( - "testing" - - "github.com/syndtr/goleveldb/leveldb/testutil" -) - -func TestMemDB(t *testing.T) { - testutil.RunSuite(t, "MemDB Suite") -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/memdb/memdb_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/memdb/memdb_test.go deleted file mode 100644 index 5dd6dbc7b7..0000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/memdb/memdb_test.go +++ /dev/null @@ -1,135 +0,0 @@ -// Copyright (c) 2014, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package memdb - -import ( - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" - - "github.com/syndtr/goleveldb/leveldb/comparer" - "github.com/syndtr/goleveldb/leveldb/iterator" - "github.com/syndtr/goleveldb/leveldb/testutil" - "github.com/syndtr/goleveldb/leveldb/util" -) - -func (p *DB) TestFindLT(key []byte) (rkey, value []byte, err error) { - p.mu.RLock() - if node := p.findLT(key); node != 0 { - n := p.nodeData[node] - m := n + p.nodeData[node+nKey] - rkey = p.kvData[n:m] - value = p.kvData[m : m+p.nodeData[node+nVal]] - } else { - err = ErrNotFound - } - p.mu.RUnlock() - return -} - -func (p *DB) TestFindLast() (rkey, value []byte, err error) { - p.mu.RLock() - if node := p.findLast(); node != 0 { - n := p.nodeData[node] - m := n + p.nodeData[node+nKey] - rkey = p.kvData[n:m] - value = p.kvData[m : m+p.nodeData[node+nVal]] - } else { - err = ErrNotFound - } - p.mu.RUnlock() - return -} - -func (p *DB) TestPut(key []byte, value []byte) error { - p.Put(key, value) - return nil -} - -func (p *DB) TestDelete(key []byte) error { - p.Delete(key) - return nil -} - -func (p *DB) TestFind(key []byte) (rkey, rvalue []byte, err error) { - return p.Find(key) -} - -func (p *DB) TestGet(key []byte) (value []byte, err error) { - return p.Get(key) -} - -func (p *DB) TestNewIterator(slice *util.Range) iterator.Iterator { - return p.NewIterator(slice) -} - -var _ = testutil.Defer(func() { - Describe("Memdb", func() { - Describe("write test", func() { - It("should do write correctly", func() { - db := New(comparer.DefaultComparer, 0) - t := testutil.DBTesting{ - DB: db, - Deleted: testutil.KeyValue_Generate(nil, 1000, 1, 30, 5, 5).Clone(), - PostFn: func(t *testutil.DBTesting) { - Expect(db.Len()).Should(Equal(t.Present.Len())) - Expect(db.Size()).Should(Equal(t.Present.Size())) - switch t.Act { - case testutil.DBPut, testutil.DBOverwrite: - Expect(db.Contains(t.ActKey)).Should(BeTrue()) - default: - Expect(db.Contains(t.ActKey)).Should(BeFalse()) - } - }, - } - testutil.DoDBTesting(&t) - }) - }) - - Describe("read test", func() { - testutil.AllKeyValueTesting(nil, func(kv testutil.KeyValue) testutil.DB { - // Building the DB. - db := New(comparer.DefaultComparer, 0) - kv.IterateShuffled(nil, func(i int, key, value []byte) { - db.Put(key, value) - }) - - if kv.Len() > 1 { - It("Should find correct keys with findLT", func() { - testutil.ShuffledIndex(nil, kv.Len()-1, 1, func(i int) { - key_, key, _ := kv.IndexInexact(i + 1) - expectedKey, expectedValue := kv.Index(i) - - // Using key that exist. - rkey, rvalue, err := db.TestFindLT(key) - Expect(err).ShouldNot(HaveOccurred(), "Error for key %q -> %q", key, expectedKey) - Expect(rkey).Should(Equal(expectedKey), "Key") - Expect(rvalue).Should(Equal(expectedValue), "Value for key %q -> %q", key, expectedKey) - - // Using key that doesn't exist. - rkey, rvalue, err = db.TestFindLT(key_) - Expect(err).ShouldNot(HaveOccurred(), "Error for key %q (%q) -> %q", key_, key, expectedKey) - Expect(rkey).Should(Equal(expectedKey)) - Expect(rvalue).Should(Equal(expectedValue), "Value for key %q (%q) -> %q", key_, key, expectedKey) - }) - }) - } - - if kv.Len() > 0 { - It("Should find last key with findLast", func() { - key, value := kv.Index(kv.Len() - 1) - rkey, rvalue, err := db.TestFindLast() - Expect(err).ShouldNot(HaveOccurred()) - Expect(rkey).Should(Equal(key)) - Expect(rvalue).Should(Equal(value)) - }) - } - - return db - }, nil, nil) - }) - }) -}) diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/opt/options.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/opt/options.go deleted file mode 100644 index f9a309dac6..0000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/opt/options.go +++ /dev/null @@ -1,670 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -// Package opt provides sets of options used by LevelDB. -package opt - -import ( - "github.com/syndtr/goleveldb/leveldb/cache" - "github.com/syndtr/goleveldb/leveldb/comparer" - "github.com/syndtr/goleveldb/leveldb/filter" - "math" -) - -const ( - KiB = 1024 - MiB = KiB * 1024 - GiB = MiB * 1024 -) - -var ( - DefaultBlockCacher = LRUCacher - DefaultBlockCacheCapacity = 8 * MiB - DefaultBlockRestartInterval = 16 - DefaultBlockSize = 4 * KiB - DefaultCompactionExpandLimitFactor = 25 - DefaultCompactionGPOverlapsFactor = 10 - DefaultCompactionL0Trigger = 4 - DefaultCompactionSourceLimitFactor = 1 - DefaultCompactionTableSize = 2 * MiB - DefaultCompactionTableSizeMultiplier = 1.0 - DefaultCompactionTotalSize = 10 * MiB - DefaultCompactionTotalSizeMultiplier = 10.0 - DefaultCompressionType = SnappyCompression - DefaultIteratorSamplingRate = 1 * MiB - DefaultMaxMemCompationLevel = 2 - DefaultNumLevel = 7 - DefaultOpenFilesCacher = LRUCacher - DefaultOpenFilesCacheCapacity = 500 - DefaultWriteBuffer = 4 * MiB - DefaultWriteL0PauseTrigger = 12 - DefaultWriteL0SlowdownTrigger = 8 -) - -// Cacher is a caching algorithm. -type Cacher interface { - New(capacity int) cache.Cacher -} - -type CacherFunc struct { - NewFunc func(capacity int) cache.Cacher -} - -func (f *CacherFunc) New(capacity int) cache.Cacher { - if f.NewFunc != nil { - return f.NewFunc(capacity) - } - return nil -} - -func noCacher(int) cache.Cacher { return nil } - -var ( - // LRUCacher is the LRU-cache algorithm. - LRUCacher = &CacherFunc{cache.NewLRU} - - // NoCacher is the value to disable caching algorithm. - NoCacher = &CacherFunc{} -) - -// Compression is the 'sorted table' block compression algorithm to use. -type Compression uint - -func (c Compression) String() string { - switch c { - case DefaultCompression: - return "default" - case NoCompression: - return "none" - case SnappyCompression: - return "snappy" - } - return "invalid" -} - -const ( - DefaultCompression Compression = iota - NoCompression - SnappyCompression - nCompression -) - -// Strict is the DB 'strict level'. -type Strict uint - -const ( - // If present then a corrupted or invalid chunk or block in manifest - // journal will cause an error instead of being dropped. - // This will prevent database with corrupted manifest to be opened. - StrictManifest Strict = 1 << iota - - // If present then journal chunk checksum will be verified. - StrictJournalChecksum - - // If present then a corrupted or invalid chunk or block in journal - // will cause an error instead of being dropped. - // This will prevent database with corrupted journal to be opened. - StrictJournal - - // If present then 'sorted table' block checksum will be verified. - // This has effect on both 'read operation' and compaction. - StrictBlockChecksum - - // If present then a corrupted 'sorted table' will fails compaction. - // The database will enter read-only mode. - StrictCompaction - - // If present then a corrupted 'sorted table' will halts 'read operation'. - StrictReader - - // If present then leveldb.Recover will drop corrupted 'sorted table'. - StrictRecovery - - // This only applicable for ReadOptions, if present then this ReadOptions - // 'strict level' will override global ones. - StrictOverride - - // StrictAll enables all strict flags. - StrictAll = StrictManifest | StrictJournalChecksum | StrictJournal | StrictBlockChecksum | StrictCompaction | StrictReader | StrictRecovery - - // DefaultStrict is the default strict flags. Specify any strict flags - // will override default strict flags as whole (i.e. not OR'ed). - DefaultStrict = StrictJournalChecksum | StrictBlockChecksum | StrictCompaction | StrictReader - - // NoStrict disables all strict flags. Override default strict flags. - NoStrict = ^StrictAll -) - -// Options holds the optional parameters for the DB at large. -type Options struct { - // AltFilters defines one or more 'alternative filters'. - // 'alternative filters' will be used during reads if a filter block - // does not match with the 'effective filter'. - // - // The default value is nil - AltFilters []filter.Filter - - // BlockCacher provides cache algorithm for LevelDB 'sorted table' block caching. - // Specify NoCacher to disable caching algorithm. - // - // The default value is LRUCacher. - BlockCacher Cacher - - // BlockCacheCapacity defines the capacity of the 'sorted table' block caching. - // Use -1 for zero, this has same effect as specifying NoCacher to BlockCacher. - // - // The default value is 8MiB. - BlockCacheCapacity int - - // BlockRestartInterval is the number of keys between restart points for - // delta encoding of keys. - // - // The default value is 16. - BlockRestartInterval int - - // BlockSize is the minimum uncompressed size in bytes of each 'sorted table' - // block. - // - // The default value is 4KiB. - BlockSize int - - // CompactionExpandLimitFactor limits compaction size after expanded. - // This will be multiplied by table size limit at compaction target level. - // - // The default value is 25. - CompactionExpandLimitFactor int - - // CompactionGPOverlapsFactor limits overlaps in grandparent (Level + 2) that a - // single 'sorted table' generates. - // This will be multiplied by table size limit at grandparent level. - // - // The default value is 10. - CompactionGPOverlapsFactor int - - // CompactionL0Trigger defines number of 'sorted table' at level-0 that will - // trigger compaction. - // - // The default value is 4. - CompactionL0Trigger int - - // CompactionSourceLimitFactor limits compaction source size. This doesn't apply to - // level-0. - // This will be multiplied by table size limit at compaction target level. - // - // The default value is 1. - CompactionSourceLimitFactor int - - // CompactionTableSize limits size of 'sorted table' that compaction generates. - // The limits for each level will be calculated as: - // CompactionTableSize * (CompactionTableSizeMultiplier ^ Level) - // The multiplier for each level can also fine-tuned using CompactionTableSizeMultiplierPerLevel. - // - // The default value is 2MiB. - CompactionTableSize int - - // CompactionTableSizeMultiplier defines multiplier for CompactionTableSize. - // - // The default value is 1. - CompactionTableSizeMultiplier float64 - - // CompactionTableSizeMultiplierPerLevel defines per-level multiplier for - // CompactionTableSize. - // Use zero to skip a level. - // - // The default value is nil. - CompactionTableSizeMultiplierPerLevel []float64 - - // CompactionTotalSize limits total size of 'sorted table' for each level. - // The limits for each level will be calculated as: - // CompactionTotalSize * (CompactionTotalSizeMultiplier ^ Level) - // The multiplier for each level can also fine-tuned using - // CompactionTotalSizeMultiplierPerLevel. - // - // The default value is 10MiB. - CompactionTotalSize int - - // CompactionTotalSizeMultiplier defines multiplier for CompactionTotalSize. - // - // The default value is 10. - CompactionTotalSizeMultiplier float64 - - // CompactionTotalSizeMultiplierPerLevel defines per-level multiplier for - // CompactionTotalSize. - // Use zero to skip a level. - // - // The default value is nil. - CompactionTotalSizeMultiplierPerLevel []float64 - - // Comparer defines a total ordering over the space of []byte keys: a 'less - // than' relationship. The same comparison algorithm must be used for reads - // and writes over the lifetime of the DB. - // - // The default value uses the same ordering as bytes.Compare. - Comparer comparer.Comparer - - // Compression defines the 'sorted table' block compression to use. - // - // The default value (DefaultCompression) uses snappy compression. - Compression Compression - - // DisableBufferPool allows disable use of util.BufferPool functionality. - // - // The default value is false. - DisableBufferPool bool - - // DisableBlockCache allows disable use of cache.Cache functionality on - // 'sorted table' block. - // - // The default value is false. - DisableBlockCache bool - - // DisableCompactionBackoff allows disable compaction retry backoff. - // - // The default value is false. - DisableCompactionBackoff bool - - // ErrorIfExist defines whether an error should returned if the DB already - // exist. - // - // The default value is false. - ErrorIfExist bool - - // ErrorIfMissing defines whether an error should returned if the DB is - // missing. If false then the database will be created if missing, otherwise - // an error will be returned. - // - // The default value is false. - ErrorIfMissing bool - - // Filter defines an 'effective filter' to use. An 'effective filter' - // if defined will be used to generate per-table filter block. - // The filter name will be stored on disk. - // During reads LevelDB will try to find matching filter from - // 'effective filter' and 'alternative filters'. - // - // Filter can be changed after a DB has been created. It is recommended - // to put old filter to the 'alternative filters' to mitigate lack of - // filter during transition period. - // - // A filter is used to reduce disk reads when looking for a specific key. - // - // The default value is nil. - Filter filter.Filter - - // IteratorSamplingRate defines approximate gap (in bytes) between read - // sampling of an iterator. The samples will be used to determine when - // compaction should be triggered. - // - // The default is 1MiB. - IteratorSamplingRate int - - // MaxMemCompationLevel defines maximum level a newly compacted 'memdb' - // will be pushed into if doesn't creates overlap. This should less than - // NumLevel. Use -1 for level-0. - // - // The default is 2. - MaxMemCompationLevel int - - // NumLevel defines number of database level. The level shouldn't changed - // between opens, or the database will panic. - // - // The default is 7. - NumLevel int - - // OpenFilesCacher provides cache algorithm for open files caching. - // Specify NoCacher to disable caching algorithm. - // - // The default value is LRUCacher. - OpenFilesCacher Cacher - - // OpenFilesCacheCapacity defines the capacity of the open files caching. - // Use -1 for zero, this has same effect as specifying NoCacher to OpenFilesCacher. - // - // The default value is 500. - OpenFilesCacheCapacity int - - // If true then opens DB in read-only mode. - // - // The default value is false. - ReadOnly bool - - // Strict defines the DB strict level. - Strict Strict - - // WriteBuffer defines maximum size of a 'memdb' before flushed to - // 'sorted table'. 'memdb' is an in-memory DB backed by an on-disk - // unsorted journal. - // - // LevelDB may held up to two 'memdb' at the same time. - // - // The default value is 4MiB. - WriteBuffer int - - // WriteL0StopTrigger defines number of 'sorted table' at level-0 that will - // pause write. - // - // The default value is 12. - WriteL0PauseTrigger int - - // WriteL0SlowdownTrigger defines number of 'sorted table' at level-0 that - // will trigger write slowdown. - // - // The default value is 8. - WriteL0SlowdownTrigger int -} - -func (o *Options) GetAltFilters() []filter.Filter { - if o == nil { - return nil - } - return o.AltFilters -} - -func (o *Options) GetBlockCacher() Cacher { - if o == nil || o.BlockCacher == nil { - return DefaultBlockCacher - } else if o.BlockCacher == NoCacher { - return nil - } - return o.BlockCacher -} - -func (o *Options) GetBlockCacheCapacity() int { - if o == nil || o.BlockCacheCapacity == 0 { - return DefaultBlockCacheCapacity - } else if o.BlockCacheCapacity < 0 { - return 0 - } - return o.BlockCacheCapacity -} - -func (o *Options) GetBlockRestartInterval() int { - if o == nil || o.BlockRestartInterval <= 0 { - return DefaultBlockRestartInterval - } - return o.BlockRestartInterval -} - -func (o *Options) GetBlockSize() int { - if o == nil || o.BlockSize <= 0 { - return DefaultBlockSize - } - return o.BlockSize -} - -func (o *Options) GetCompactionExpandLimit(level int) int { - factor := DefaultCompactionExpandLimitFactor - if o != nil && o.CompactionExpandLimitFactor > 0 { - factor = o.CompactionExpandLimitFactor - } - return o.GetCompactionTableSize(level+1) * factor -} - -func (o *Options) GetCompactionGPOverlaps(level int) int { - factor := DefaultCompactionGPOverlapsFactor - if o != nil && o.CompactionGPOverlapsFactor > 0 { - factor = o.CompactionGPOverlapsFactor - } - return o.GetCompactionTableSize(level+2) * factor -} - -func (o *Options) GetCompactionL0Trigger() int { - if o == nil || o.CompactionL0Trigger == 0 { - return DefaultCompactionL0Trigger - } - return o.CompactionL0Trigger -} - -func (o *Options) GetCompactionSourceLimit(level int) int { - factor := DefaultCompactionSourceLimitFactor - if o != nil && o.CompactionSourceLimitFactor > 0 { - factor = o.CompactionSourceLimitFactor - } - return o.GetCompactionTableSize(level+1) * factor -} - -func (o *Options) GetCompactionTableSize(level int) int { - var ( - base = DefaultCompactionTableSize - mult float64 - ) - if o != nil { - if o.CompactionTableSize > 0 { - base = o.CompactionTableSize - } - if len(o.CompactionTableSizeMultiplierPerLevel) > level && o.CompactionTableSizeMultiplierPerLevel[level] > 0 { - mult = o.CompactionTableSizeMultiplierPerLevel[level] - } else if o.CompactionTableSizeMultiplier > 0 { - mult = math.Pow(o.CompactionTableSizeMultiplier, float64(level)) - } - } - if mult == 0 { - mult = math.Pow(DefaultCompactionTableSizeMultiplier, float64(level)) - } - return int(float64(base) * mult) -} - -func (o *Options) GetCompactionTotalSize(level int) int64 { - var ( - base = DefaultCompactionTotalSize - mult float64 - ) - if o != nil { - if o.CompactionTotalSize > 0 { - base = o.CompactionTotalSize - } - if len(o.CompactionTotalSizeMultiplierPerLevel) > level && o.CompactionTotalSizeMultiplierPerLevel[level] > 0 { - mult = o.CompactionTotalSizeMultiplierPerLevel[level] - } else if o.CompactionTotalSizeMultiplier > 0 { - mult = math.Pow(o.CompactionTotalSizeMultiplier, float64(level)) - } - } - if mult == 0 { - mult = math.Pow(DefaultCompactionTotalSizeMultiplier, float64(level)) - } - return int64(float64(base) * mult) -} - -func (o *Options) GetComparer() comparer.Comparer { - if o == nil || o.Comparer == nil { - return comparer.DefaultComparer - } - return o.Comparer -} - -func (o *Options) GetCompression() Compression { - if o == nil || o.Compression <= DefaultCompression || o.Compression >= nCompression { - return DefaultCompressionType - } - return o.Compression -} - -func (o *Options) GetDisableBufferPool() bool { - if o == nil { - return false - } - return o.DisableBufferPool -} - -func (o *Options) GetDisableBlockCache() bool { - if o == nil { - return false - } - return o.DisableBlockCache -} - -func (o *Options) GetDisableCompactionBackoff() bool { - if o == nil { - return false - } - return o.DisableCompactionBackoff -} - -func (o *Options) GetErrorIfExist() bool { - if o == nil { - return false - } - return o.ErrorIfExist -} - -func (o *Options) GetErrorIfMissing() bool { - if o == nil { - return false - } - return o.ErrorIfMissing -} - -func (o *Options) GetFilter() filter.Filter { - if o == nil { - return nil - } - return o.Filter -} - -func (o *Options) GetIteratorSamplingRate() int { - if o == nil || o.IteratorSamplingRate <= 0 { - return DefaultIteratorSamplingRate - } - return o.IteratorSamplingRate -} - -func (o *Options) GetMaxMemCompationLevel() int { - level := DefaultMaxMemCompationLevel - if o != nil { - if o.MaxMemCompationLevel > 0 { - level = o.MaxMemCompationLevel - } else if o.MaxMemCompationLevel < 0 { - level = 0 - } - } - if level >= o.GetNumLevel() { - return o.GetNumLevel() - 1 - } - return level -} - -func (o *Options) GetNumLevel() int { - if o == nil || o.NumLevel <= 0 { - return DefaultNumLevel - } - return o.NumLevel -} - -func (o *Options) GetOpenFilesCacher() Cacher { - if o == nil || o.OpenFilesCacher == nil { - return DefaultOpenFilesCacher - } - if o.OpenFilesCacher == NoCacher { - return nil - } - return o.OpenFilesCacher -} - -func (o *Options) GetOpenFilesCacheCapacity() int { - if o == nil || o.OpenFilesCacheCapacity == 0 { - return DefaultOpenFilesCacheCapacity - } else if o.OpenFilesCacheCapacity < 0 { - return 0 - } - return o.OpenFilesCacheCapacity -} - -func (o *Options) GetReadOnly() bool { - if o == nil { - return false - } - return o.ReadOnly -} - -func (o *Options) GetStrict(strict Strict) bool { - if o == nil || o.Strict == 0 { - return DefaultStrict&strict != 0 - } - return o.Strict&strict != 0 -} - -func (o *Options) GetWriteBuffer() int { - if o == nil || o.WriteBuffer <= 0 { - return DefaultWriteBuffer - } - return o.WriteBuffer -} - -func (o *Options) GetWriteL0PauseTrigger() int { - if o == nil || o.WriteL0PauseTrigger == 0 { - return DefaultWriteL0PauseTrigger - } - return o.WriteL0PauseTrigger -} - -func (o *Options) GetWriteL0SlowdownTrigger() int { - if o == nil || o.WriteL0SlowdownTrigger == 0 { - return DefaultWriteL0SlowdownTrigger - } - return o.WriteL0SlowdownTrigger -} - -// ReadOptions holds the optional parameters for 'read operation'. The -// 'read operation' includes Get, Find and NewIterator. -type ReadOptions struct { - // DontFillCache defines whether block reads for this 'read operation' - // should be cached. If false then the block will be cached. This does - // not affects already cached block. - // - // The default value is false. - DontFillCache bool - - // Strict will be OR'ed with global DB 'strict level' unless StrictOverride - // is present. Currently only StrictReader that has effect here. - Strict Strict -} - -func (ro *ReadOptions) GetDontFillCache() bool { - if ro == nil { - return false - } - return ro.DontFillCache -} - -func (ro *ReadOptions) GetStrict(strict Strict) bool { - if ro == nil { - return false - } - return ro.Strict&strict != 0 -} - -// WriteOptions holds the optional parameters for 'write operation'. The -// 'write operation' includes Write, Put and Delete. -type WriteOptions struct { - // Sync is whether to sync underlying writes from the OS buffer cache - // through to actual disk, if applicable. Setting Sync can result in - // slower writes. - // - // If false, and the machine crashes, then some recent writes may be lost. - // Note that if it is just the process that crashes (and the machine does - // not) then no writes will be lost. - // - // In other words, Sync being false has the same semantics as a write - // system call. Sync being true means write followed by fsync. - // - // The default value is false. - Sync bool -} - -func (wo *WriteOptions) GetSync() bool { - if wo == nil { - return false - } - return wo.Sync -} - -func GetStrict(o *Options, ro *ReadOptions, strict Strict) bool { - if ro.GetStrict(StrictOverride) { - return ro.GetStrict(strict) - } else { - return o.GetStrict(strict) || ro.GetStrict(strict) - } -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/options.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/options.go deleted file mode 100644 index a3d84ef60d..0000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/options.go +++ /dev/null @@ -1,92 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - "github.com/syndtr/goleveldb/leveldb/filter" - "github.com/syndtr/goleveldb/leveldb/opt" -) - -func dupOptions(o *opt.Options) *opt.Options { - newo := &opt.Options{} - if o != nil { - *newo = *o - } - if newo.Strict == 0 { - newo.Strict = opt.DefaultStrict - } - return newo -} - -func (s *session) setOptions(o *opt.Options) { - no := dupOptions(o) - // Alternative filters. - if filters := o.GetAltFilters(); len(filters) > 0 { - no.AltFilters = make([]filter.Filter, len(filters)) - for i, filter := range filters { - no.AltFilters[i] = &iFilter{filter} - } - } - // Comparer. - s.icmp = &iComparer{o.GetComparer()} - no.Comparer = s.icmp - // Filter. - if filter := o.GetFilter(); filter != nil { - no.Filter = &iFilter{filter} - } - - s.o = &cachedOptions{Options: no} - s.o.cache() -} - -type cachedOptions struct { - *opt.Options - - compactionExpandLimit []int - compactionGPOverlaps []int - compactionSourceLimit []int - compactionTableSize []int - compactionTotalSize []int64 -} - -func (co *cachedOptions) cache() { - numLevel := co.Options.GetNumLevel() - - co.compactionExpandLimit = make([]int, numLevel) - co.compactionGPOverlaps = make([]int, numLevel) - co.compactionSourceLimit = make([]int, numLevel) - co.compactionTableSize = make([]int, numLevel) - co.compactionTotalSize = make([]int64, numLevel) - - for level := 0; level < numLevel; level++ { - co.compactionExpandLimit[level] = co.Options.GetCompactionExpandLimit(level) - co.compactionGPOverlaps[level] = co.Options.GetCompactionGPOverlaps(level) - co.compactionSourceLimit[level] = co.Options.GetCompactionSourceLimit(level) - co.compactionTableSize[level] = co.Options.GetCompactionTableSize(level) - co.compactionTotalSize[level] = co.Options.GetCompactionTotalSize(level) - } -} - -func (co *cachedOptions) GetCompactionExpandLimit(level int) int { - return co.compactionExpandLimit[level] -} - -func (co *cachedOptions) GetCompactionGPOverlaps(level int) int { - return co.compactionGPOverlaps[level] -} - -func (co *cachedOptions) GetCompactionSourceLimit(level int) int { - return co.compactionSourceLimit[level] -} - -func (co *cachedOptions) GetCompactionTableSize(level int) int { - return co.compactionTableSize[level] -} - -func (co *cachedOptions) GetCompactionTotalSize(level int) int64 { - return co.compactionTotalSize[level] -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session.go deleted file mode 100644 index f0bba4602c..0000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session.go +++ /dev/null @@ -1,211 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - "fmt" - "io" - "os" - "sync" - - "github.com/syndtr/goleveldb/leveldb/errors" - "github.com/syndtr/goleveldb/leveldb/journal" - "github.com/syndtr/goleveldb/leveldb/opt" - "github.com/syndtr/goleveldb/leveldb/storage" - "github.com/syndtr/goleveldb/leveldb/util" -) - -type ErrManifestCorrupted struct { - Field string - Reason string -} - -func (e *ErrManifestCorrupted) Error() string { - return fmt.Sprintf("leveldb: manifest corrupted (field '%s'): %s", e.Field, e.Reason) -} - -func newErrManifestCorrupted(f storage.File, field, reason string) error { - return errors.NewErrCorrupted(f, &ErrManifestCorrupted{field, reason}) -} - -// session represent a persistent database session. -type session struct { - // Need 64-bit alignment. - stNextFileNum uint64 // current unused file number - stJournalNum uint64 // current journal file number; need external synchronization - stPrevJournalNum uint64 // prev journal file number; no longer used; for compatibility with older version of leveldb - stSeqNum uint64 // last mem compacted seq; need external synchronization - stTempFileNum uint64 - - stor storage.Storage - storLock util.Releaser - o *cachedOptions - icmp *iComparer - tops *tOps - - manifest *journal.Writer - manifestWriter storage.Writer - manifestFile storage.File - - stCompPtrs []iKey // compaction pointers; need external synchronization - stVersion *version // current version - vmu sync.Mutex -} - -// Creates new initialized session instance. -func newSession(stor storage.Storage, o *opt.Options) (s *session, err error) { - if stor == nil { - return nil, os.ErrInvalid - } - storLock, err := stor.Lock() - if err != nil { - return - } - s = &session{ - stor: stor, - storLock: storLock, - stCompPtrs: make([]iKey, o.GetNumLevel()), - } - s.setOptions(o) - s.tops = newTableOps(s) - s.setVersion(newVersion(s)) - s.log("log@legend F·NumFile S·FileSize N·Entry C·BadEntry B·BadBlock Ke·KeyError D·DroppedEntry L·Level Q·SeqNum T·TimeElapsed") - return -} - -// Close session. -func (s *session) close() { - s.tops.close() - if s.manifest != nil { - s.manifest.Close() - } - if s.manifestWriter != nil { - s.manifestWriter.Close() - } - s.manifest = nil - s.manifestWriter = nil - s.manifestFile = nil - s.stVersion = nil -} - -// Release session lock. -func (s *session) release() { - s.storLock.Release() -} - -// Create a new database session; need external synchronization. -func (s *session) create() error { - // create manifest - return s.newManifest(nil, nil) -} - -// Recover a database session; need external synchronization. -func (s *session) recover() (err error) { - defer func() { - if os.IsNotExist(err) { - // Don't return os.ErrNotExist if the underlying storage contains - // other files that belong to LevelDB. So the DB won't get trashed. - if files, _ := s.stor.GetFiles(storage.TypeAll); len(files) > 0 { - err = &errors.ErrCorrupted{File: &storage.FileInfo{Type: storage.TypeManifest}, Err: &errors.ErrMissingFiles{}} - } - } - }() - - m, err := s.stor.GetManifest() - if err != nil { - return - } - - reader, err := m.Open() - if err != nil { - return - } - defer reader.Close() - - var ( - // Options. - numLevel = s.o.GetNumLevel() - strict = s.o.GetStrict(opt.StrictManifest) - - jr = journal.NewReader(reader, dropper{s, m}, strict, true) - rec = &sessionRecord{} - staging = s.stVersion.newStaging() - ) - for { - var r io.Reader - r, err = jr.Next() - if err != nil { - if err == io.EOF { - err = nil - break - } - return errors.SetFile(err, m) - } - - err = rec.decode(r, numLevel) - if err == nil { - // save compact pointers - for _, r := range rec.compPtrs { - s.stCompPtrs[r.level] = iKey(r.ikey) - } - // commit record to version staging - staging.commit(rec) - } else { - err = errors.SetFile(err, m) - if strict || !errors.IsCorrupted(err) { - return - } else { - s.logf("manifest error: %v (skipped)", errors.SetFile(err, m)) - } - } - rec.resetCompPtrs() - rec.resetAddedTables() - rec.resetDeletedTables() - } - - switch { - case !rec.has(recComparer): - return newErrManifestCorrupted(m, "comparer", "missing") - case rec.comparer != s.icmp.uName(): - return newErrManifestCorrupted(m, "comparer", fmt.Sprintf("mismatch: want '%s', got '%s'", s.icmp.uName(), rec.comparer)) - case !rec.has(recNextFileNum): - return newErrManifestCorrupted(m, "next-file-num", "missing") - case !rec.has(recJournalNum): - return newErrManifestCorrupted(m, "journal-file-num", "missing") - case !rec.has(recSeqNum): - return newErrManifestCorrupted(m, "seq-num", "missing") - } - - s.manifestFile = m - s.setVersion(staging.finish()) - s.setNextFileNum(rec.nextFileNum) - s.recordCommited(rec) - return nil -} - -// Commit session; need external synchronization. -func (s *session) commit(r *sessionRecord) (err error) { - v := s.version() - defer v.release() - - // spawn new version based on current version - nv := v.spawn(r) - - if s.manifest == nil { - // manifest journal writer not yet created, create one - err = s.newManifest(r, nv) - } else { - err = s.flushManifest(r) - } - - // finally, apply new version if no error rise - if err == nil { - s.setVersion(nv) - } - - return -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session_compaction.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session_compaction.go deleted file mode 100644 index 7c5a79418c..0000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session_compaction.go +++ /dev/null @@ -1,287 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - "sync/atomic" - - "github.com/syndtr/goleveldb/leveldb/iterator" - "github.com/syndtr/goleveldb/leveldb/memdb" - "github.com/syndtr/goleveldb/leveldb/opt" -) - -func (s *session) pickMemdbLevel(umin, umax []byte) int { - v := s.version() - defer v.release() - return v.pickMemdbLevel(umin, umax) -} - -func (s *session) flushMemdb(rec *sessionRecord, mdb *memdb.DB, level int) (level_ int, err error) { - // Create sorted table. - iter := mdb.NewIterator(nil) - defer iter.Release() - t, n, err := s.tops.createFrom(iter) - if err != nil { - return level, err - } - - // Pick level and add to record. - if level < 0 { - level = s.pickMemdbLevel(t.imin.ukey(), t.imax.ukey()) - } - rec.addTableFile(level, t) - - s.logf("memdb@flush created L%d@%d N·%d S·%s %q:%q", level, t.file.Num(), n, shortenb(int(t.size)), t.imin, t.imax) - return level, nil -} - -// Pick a compaction based on current state; need external synchronization. -func (s *session) pickCompaction() *compaction { - v := s.version() - - var level int - var t0 tFiles - if v.cScore >= 1 { - level = v.cLevel - cptr := s.stCompPtrs[level] - tables := v.tables[level] - for _, t := range tables { - if cptr == nil || s.icmp.Compare(t.imax, cptr) > 0 { - t0 = append(t0, t) - break - } - } - if len(t0) == 0 { - t0 = append(t0, tables[0]) - } - } else { - if p := atomic.LoadPointer(&v.cSeek); p != nil { - ts := (*tSet)(p) - level = ts.level - t0 = append(t0, ts.table) - } else { - v.release() - return nil - } - } - - return newCompaction(s, v, level, t0) -} - -// Create compaction from given level and range; need external synchronization. -func (s *session) getCompactionRange(level int, umin, umax []byte) *compaction { - v := s.version() - - t0 := v.tables[level].getOverlaps(nil, s.icmp, umin, umax, level == 0) - if len(t0) == 0 { - v.release() - return nil - } - - // Avoid compacting too much in one shot in case the range is large. - // But we cannot do this for level-0 since level-0 files can overlap - // and we must not pick one file and drop another older file if the - // two files overlap. - if level > 0 { - limit := uint64(v.s.o.GetCompactionSourceLimit(level)) - total := uint64(0) - for i, t := range t0 { - total += t.size - if total >= limit { - s.logf("table@compaction limiting F·%d -> F·%d", len(t0), i+1) - t0 = t0[:i+1] - break - } - } - } - - return newCompaction(s, v, level, t0) -} - -func newCompaction(s *session, v *version, level int, t0 tFiles) *compaction { - c := &compaction{ - s: s, - v: v, - level: level, - tables: [2]tFiles{t0, nil}, - maxGPOverlaps: uint64(s.o.GetCompactionGPOverlaps(level)), - tPtrs: make([]int, s.o.GetNumLevel()), - } - c.expand() - c.save() - return c -} - -// compaction represent a compaction state. -type compaction struct { - s *session - v *version - - level int - tables [2]tFiles - maxGPOverlaps uint64 - - gp tFiles - gpi int - seenKey bool - gpOverlappedBytes uint64 - imin, imax iKey - tPtrs []int - released bool - - snapGPI int - snapSeenKey bool - snapGPOverlappedBytes uint64 - snapTPtrs []int -} - -func (c *compaction) save() { - c.snapGPI = c.gpi - c.snapSeenKey = c.seenKey - c.snapGPOverlappedBytes = c.gpOverlappedBytes - c.snapTPtrs = append(c.snapTPtrs[:0], c.tPtrs...) -} - -func (c *compaction) restore() { - c.gpi = c.snapGPI - c.seenKey = c.snapSeenKey - c.gpOverlappedBytes = c.snapGPOverlappedBytes - c.tPtrs = append(c.tPtrs[:0], c.snapTPtrs...) -} - -func (c *compaction) release() { - if !c.released { - c.released = true - c.v.release() - } -} - -// Expand compacted tables; need external synchronization. -func (c *compaction) expand() { - limit := uint64(c.s.o.GetCompactionExpandLimit(c.level)) - vt0, vt1 := c.v.tables[c.level], c.v.tables[c.level+1] - - t0, t1 := c.tables[0], c.tables[1] - imin, imax := t0.getRange(c.s.icmp) - // We expand t0 here just incase ukey hop across tables. - t0 = vt0.getOverlaps(t0, c.s.icmp, imin.ukey(), imax.ukey(), c.level == 0) - if len(t0) != len(c.tables[0]) { - imin, imax = t0.getRange(c.s.icmp) - } - t1 = vt1.getOverlaps(t1, c.s.icmp, imin.ukey(), imax.ukey(), false) - // Get entire range covered by compaction. - amin, amax := append(t0, t1...).getRange(c.s.icmp) - - // See if we can grow the number of inputs in "level" without - // changing the number of "level+1" files we pick up. - if len(t1) > 0 { - exp0 := vt0.getOverlaps(nil, c.s.icmp, amin.ukey(), amax.ukey(), c.level == 0) - if len(exp0) > len(t0) && t1.size()+exp0.size() < limit { - xmin, xmax := exp0.getRange(c.s.icmp) - exp1 := vt1.getOverlaps(nil, c.s.icmp, xmin.ukey(), xmax.ukey(), false) - if len(exp1) == len(t1) { - c.s.logf("table@compaction expanding L%d+L%d (F·%d S·%s)+(F·%d S·%s) -> (F·%d S·%s)+(F·%d S·%s)", - c.level, c.level+1, len(t0), shortenb(int(t0.size())), len(t1), shortenb(int(t1.size())), - len(exp0), shortenb(int(exp0.size())), len(exp1), shortenb(int(exp1.size()))) - imin, imax = xmin, xmax - t0, t1 = exp0, exp1 - amin, amax = append(t0, t1...).getRange(c.s.icmp) - } - } - } - - // Compute the set of grandparent files that overlap this compaction - // (parent == level+1; grandparent == level+2) - if c.level+2 < c.s.o.GetNumLevel() { - c.gp = c.v.tables[c.level+2].getOverlaps(c.gp, c.s.icmp, amin.ukey(), amax.ukey(), false) - } - - c.tables[0], c.tables[1] = t0, t1 - c.imin, c.imax = imin, imax -} - -// Check whether compaction is trivial. -func (c *compaction) trivial() bool { - return len(c.tables[0]) == 1 && len(c.tables[1]) == 0 && c.gp.size() <= c.maxGPOverlaps -} - -func (c *compaction) baseLevelForKey(ukey []byte) bool { - for level, tables := range c.v.tables[c.level+2:] { - for c.tPtrs[level] < len(tables) { - t := tables[c.tPtrs[level]] - if c.s.icmp.uCompare(ukey, t.imax.ukey()) <= 0 { - // We've advanced far enough. - if c.s.icmp.uCompare(ukey, t.imin.ukey()) >= 0 { - // Key falls in this file's range, so definitely not base level. - return false - } - break - } - c.tPtrs[level]++ - } - } - return true -} - -func (c *compaction) shouldStopBefore(ikey iKey) bool { - for ; c.gpi < len(c.gp); c.gpi++ { - gp := c.gp[c.gpi] - if c.s.icmp.Compare(ikey, gp.imax) <= 0 { - break - } - if c.seenKey { - c.gpOverlappedBytes += gp.size - } - } - c.seenKey = true - - if c.gpOverlappedBytes > c.maxGPOverlaps { - // Too much overlap for current output; start new output. - c.gpOverlappedBytes = 0 - return true - } - return false -} - -// Creates an iterator. -func (c *compaction) newIterator() iterator.Iterator { - // Creates iterator slice. - icap := len(c.tables) - if c.level == 0 { - // Special case for level-0. - icap = len(c.tables[0]) + 1 - } - its := make([]iterator.Iterator, 0, icap) - - // Options. - ro := &opt.ReadOptions{ - DontFillCache: true, - Strict: opt.StrictOverride, - } - strict := c.s.o.GetStrict(opt.StrictCompaction) - if strict { - ro.Strict |= opt.StrictReader - } - - for i, tables := range c.tables { - if len(tables) == 0 { - continue - } - - // Level-0 is not sorted and may overlaps each other. - if c.level+i == 0 { - for _, t := range tables { - its = append(its, c.s.tops.newIterator(t, nil, ro)) - } - } else { - it := iterator.NewIndexedIterator(tables.newIndexIterator(c.s.tops, c.s.icmp, nil, ro), strict) - its = append(its, it) - } - } - - return iterator.NewMergedIterator(its, c.s.icmp, strict) -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session_record.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session_record.go deleted file mode 100644 index 405e07bef4..0000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session_record.go +++ /dev/null @@ -1,311 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - "bufio" - "encoding/binary" - "io" - "strings" - - "github.com/syndtr/goleveldb/leveldb/errors" -) - -type byteReader interface { - io.Reader - io.ByteReader -} - -// These numbers are written to disk and should not be changed. -const ( - recComparer = 1 - recJournalNum = 2 - recNextFileNum = 3 - recSeqNum = 4 - recCompPtr = 5 - recDelTable = 6 - recAddTable = 7 - // 8 was used for large value refs - recPrevJournalNum = 9 -) - -type cpRecord struct { - level int - ikey iKey -} - -type atRecord struct { - level int - num uint64 - size uint64 - imin iKey - imax iKey -} - -type dtRecord struct { - level int - num uint64 -} - -type sessionRecord struct { - hasRec int - comparer string - journalNum uint64 - prevJournalNum uint64 - nextFileNum uint64 - seqNum uint64 - compPtrs []cpRecord - addedTables []atRecord - deletedTables []dtRecord - - scratch [binary.MaxVarintLen64]byte - err error -} - -func (p *sessionRecord) has(rec int) bool { - return p.hasRec&(1<= uint64(numLevel) { - p.err = errors.NewErrCorrupted(nil, &ErrManifestCorrupted{field, "invalid level number"}) - return 0 - } - return int(x) -} - -func (p *sessionRecord) decode(r io.Reader, numLevel int) error { - br, ok := r.(byteReader) - if !ok { - br = bufio.NewReader(r) - } - p.err = nil - for p.err == nil { - rec := p.readUvarintMayEOF("field-header", br, true) - if p.err != nil { - if p.err == io.EOF { - return nil - } - return p.err - } - switch rec { - case recComparer: - x := p.readBytes("comparer", br) - if p.err == nil { - p.setComparer(string(x)) - } - case recJournalNum: - x := p.readUvarint("journal-num", br) - if p.err == nil { - p.setJournalNum(x) - } - case recPrevJournalNum: - x := p.readUvarint("prev-journal-num", br) - if p.err == nil { - p.setPrevJournalNum(x) - } - case recNextFileNum: - x := p.readUvarint("next-file-num", br) - if p.err == nil { - p.setNextFileNum(x) - } - case recSeqNum: - x := p.readUvarint("seq-num", br) - if p.err == nil { - p.setSeqNum(x) - } - case recCompPtr: - level := p.readLevel("comp-ptr.level", br, numLevel) - ikey := p.readBytes("comp-ptr.ikey", br) - if p.err == nil { - p.addCompPtr(level, iKey(ikey)) - } - case recAddTable: - level := p.readLevel("add-table.level", br, numLevel) - num := p.readUvarint("add-table.num", br) - size := p.readUvarint("add-table.size", br) - imin := p.readBytes("add-table.imin", br) - imax := p.readBytes("add-table.imax", br) - if p.err == nil { - p.addTable(level, num, size, imin, imax) - } - case recDelTable: - level := p.readLevel("del-table.level", br, numLevel) - num := p.readUvarint("del-table.num", br) - if p.err == nil { - p.delTable(level, num) - } - } - } - - return p.err -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session_record_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session_record_test.go deleted file mode 100644 index 33c1487561..0000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session_record_test.go +++ /dev/null @@ -1,64 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - "bytes" - "testing" - - "github.com/syndtr/goleveldb/leveldb/opt" -) - -func decodeEncode(v *sessionRecord) (res bool, err error) { - b := new(bytes.Buffer) - err = v.encode(b) - if err != nil { - return - } - v2 := &sessionRecord{} - err = v.decode(b, opt.DefaultNumLevel) - if err != nil { - return - } - b2 := new(bytes.Buffer) - err = v2.encode(b2) - if err != nil { - return - } - return bytes.Equal(b.Bytes(), b2.Bytes()), nil -} - -func TestSessionRecord_EncodeDecode(t *testing.T) { - big := uint64(1) << 50 - v := &sessionRecord{} - i := uint64(0) - test := func() { - res, err := decodeEncode(v) - if err != nil { - t.Fatalf("error when testing encode/decode sessionRecord: %v", err) - } - if !res { - t.Error("encode/decode test failed at iteration:", i) - } - } - - for ; i < 4; i++ { - test() - v.addTable(3, big+300+i, big+400+i, - newIkey([]byte("foo"), big+500+1, ktVal), - newIkey([]byte("zoo"), big+600+1, ktDel)) - v.delTable(4, big+700+i) - v.addCompPtr(int(i), newIkey([]byte("x"), big+900+1, ktVal)) - } - - v.setComparer("foo") - v.setJournalNum(big + 100) - v.setPrevJournalNum(big + 99) - v.setNextFileNum(big + 200) - v.setSeqNum(big + 1000) - test() -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session_util.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session_util.go deleted file mode 100644 index 399a788bad..0000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session_util.go +++ /dev/null @@ -1,249 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - "fmt" - "sync/atomic" - - "github.com/syndtr/goleveldb/leveldb/journal" - "github.com/syndtr/goleveldb/leveldb/storage" -) - -// Logging. - -type dropper struct { - s *session - file storage.File -} - -func (d dropper) Drop(err error) { - if e, ok := err.(*journal.ErrCorrupted); ok { - d.s.logf("journal@drop %s-%d S·%s %q", d.file.Type(), d.file.Num(), shortenb(e.Size), e.Reason) - } else { - d.s.logf("journal@drop %s-%d %q", d.file.Type(), d.file.Num(), err) - } -} - -func (s *session) log(v ...interface{}) { s.stor.Log(fmt.Sprint(v...)) } -func (s *session) logf(format string, v ...interface{}) { s.stor.Log(fmt.Sprintf(format, v...)) } - -// File utils. - -func (s *session) getJournalFile(num uint64) storage.File { - return s.stor.GetFile(num, storage.TypeJournal) -} - -func (s *session) getTableFile(num uint64) storage.File { - return s.stor.GetFile(num, storage.TypeTable) -} - -func (s *session) getFiles(t storage.FileType) ([]storage.File, error) { - return s.stor.GetFiles(t) -} - -func (s *session) newTemp() storage.File { - num := atomic.AddUint64(&s.stTempFileNum, 1) - 1 - return s.stor.GetFile(num, storage.TypeTemp) -} - -func (s *session) tableFileFromRecord(r atRecord) *tFile { - return newTableFile(s.getTableFile(r.num), r.size, r.imin, r.imax) -} - -// Session state. - -// Get current version. This will incr version ref, must call -// version.release (exactly once) after use. -func (s *session) version() *version { - s.vmu.Lock() - defer s.vmu.Unlock() - s.stVersion.ref++ - return s.stVersion -} - -// Set current version to v. -func (s *session) setVersion(v *version) { - s.vmu.Lock() - v.ref = 1 // Holds by session. - if old := s.stVersion; old != nil { - v.ref++ // Holds by old version. - old.next = v - old.releaseNB() - } - s.stVersion = v - s.vmu.Unlock() -} - -// Get current unused file number. -func (s *session) nextFileNum() uint64 { - return atomic.LoadUint64(&s.stNextFileNum) -} - -// Set current unused file number to num. -func (s *session) setNextFileNum(num uint64) { - atomic.StoreUint64(&s.stNextFileNum, num) -} - -// Mark file number as used. -func (s *session) markFileNum(num uint64) { - nextFileNum := num + 1 - for { - old, x := s.stNextFileNum, nextFileNum - if old > x { - x = old - } - if atomic.CompareAndSwapUint64(&s.stNextFileNum, old, x) { - break - } - } -} - -// Allocate a file number. -func (s *session) allocFileNum() uint64 { - return atomic.AddUint64(&s.stNextFileNum, 1) - 1 -} - -// Reuse given file number. -func (s *session) reuseFileNum(num uint64) { - for { - old, x := s.stNextFileNum, num - if old != x+1 { - x = old - } - if atomic.CompareAndSwapUint64(&s.stNextFileNum, old, x) { - break - } - } -} - -// Manifest related utils. - -// Fill given session record obj with current states; need external -// synchronization. -func (s *session) fillRecord(r *sessionRecord, snapshot bool) { - r.setNextFileNum(s.nextFileNum()) - - if snapshot { - if !r.has(recJournalNum) { - r.setJournalNum(s.stJournalNum) - } - - if !r.has(recSeqNum) { - r.setSeqNum(s.stSeqNum) - } - - for level, ik := range s.stCompPtrs { - if ik != nil { - r.addCompPtr(level, ik) - } - } - - r.setComparer(s.icmp.uName()) - } -} - -// Mark if record has been committed, this will update session state; -// need external synchronization. -func (s *session) recordCommited(r *sessionRecord) { - if r.has(recJournalNum) { - s.stJournalNum = r.journalNum - } - - if r.has(recPrevJournalNum) { - s.stPrevJournalNum = r.prevJournalNum - } - - if r.has(recSeqNum) { - s.stSeqNum = r.seqNum - } - - for _, p := range r.compPtrs { - s.stCompPtrs[p.level] = iKey(p.ikey) - } -} - -// Create a new manifest file; need external synchronization. -func (s *session) newManifest(rec *sessionRecord, v *version) (err error) { - num := s.allocFileNum() - file := s.stor.GetFile(num, storage.TypeManifest) - writer, err := file.Create() - if err != nil { - return - } - jw := journal.NewWriter(writer) - - if v == nil { - v = s.version() - defer v.release() - } - if rec == nil { - rec = &sessionRecord{} - } - s.fillRecord(rec, true) - v.fillRecord(rec) - - defer func() { - if err == nil { - s.recordCommited(rec) - if s.manifest != nil { - s.manifest.Close() - } - if s.manifestWriter != nil { - s.manifestWriter.Close() - } - if s.manifestFile != nil { - s.manifestFile.Remove() - } - s.manifestFile = file - s.manifestWriter = writer - s.manifest = jw - } else { - writer.Close() - file.Remove() - s.reuseFileNum(num) - } - }() - - w, err := jw.Next() - if err != nil { - return - } - err = rec.encode(w) - if err != nil { - return - } - err = jw.Flush() - if err != nil { - return - } - err = s.stor.SetManifest(file) - return -} - -// Flush record to disk. -func (s *session) flushManifest(rec *sessionRecord) (err error) { - s.fillRecord(rec, false) - w, err := s.manifest.Next() - if err != nil { - return - } - err = rec.encode(w) - if err != nil { - return - } - err = s.manifest.Flush() - if err != nil { - return - } - err = s.manifestWriter.Sync() - if err != nil { - return - } - s.recordCommited(rec) - return -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage.go deleted file mode 100644 index 46cc9d0701..0000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage.go +++ /dev/null @@ -1,534 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reservefs. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package storage - -import ( - "errors" - "fmt" - "io/ioutil" - "os" - "path/filepath" - "runtime" - "strconv" - "strings" - "sync" - "time" - - "github.com/syndtr/goleveldb/leveldb/util" -) - -var errFileOpen = errors.New("leveldb/storage: file still open") - -type fileLock interface { - release() error -} - -type fileStorageLock struct { - fs *fileStorage -} - -func (lock *fileStorageLock) Release() { - fs := lock.fs - fs.mu.Lock() - defer fs.mu.Unlock() - if fs.slock == lock { - fs.slock = nil - } - return -} - -// fileStorage is a file-system backed storage. -type fileStorage struct { - path string - - mu sync.Mutex - flock fileLock - slock *fileStorageLock - logw *os.File - buf []byte - // Opened file counter; if open < 0 means closed. - open int - day int -} - -// OpenFile returns a new filesytem-backed storage implementation with the given -// path. This also hold a file lock, so any subsequent attempt to open the same -// path will fail. -// -// The storage must be closed after use, by calling Close method. -func OpenFile(path string) (Storage, error) { - if err := os.MkdirAll(path, 0755); err != nil { - return nil, err - } - - flock, err := newFileLock(filepath.Join(path, "LOCK")) - if err != nil { - return nil, err - } - - defer func() { - if err != nil { - flock.release() - } - }() - - rename(filepath.Join(path, "LOG"), filepath.Join(path, "LOG.old")) - logw, err := os.OpenFile(filepath.Join(path, "LOG"), os.O_WRONLY|os.O_CREATE, 0644) - if err != nil { - return nil, err - } - - fs := &fileStorage{path: path, flock: flock, logw: logw} - runtime.SetFinalizer(fs, (*fileStorage).Close) - return fs, nil -} - -func (fs *fileStorage) Lock() (util.Releaser, error) { - fs.mu.Lock() - defer fs.mu.Unlock() - if fs.open < 0 { - return nil, ErrClosed - } - if fs.slock != nil { - return nil, ErrLocked - } - fs.slock = &fileStorageLock{fs: fs} - return fs.slock, nil -} - -func itoa(buf []byte, i int, wid int) []byte { - var u uint = uint(i) - if u == 0 && wid <= 1 { - return append(buf, '0') - } - - // Assemble decimal in reverse order. - var b [32]byte - bp := len(b) - for ; u > 0 || wid > 0; u /= 10 { - bp-- - wid-- - b[bp] = byte(u%10) + '0' - } - return append(buf, b[bp:]...) -} - -func (fs *fileStorage) printDay(t time.Time) { - if fs.day == t.Day() { - return - } - fs.day = t.Day() - fs.logw.Write([]byte("=============== " + t.Format("Jan 2, 2006 (MST)") + " ===============\n")) -} - -func (fs *fileStorage) doLog(t time.Time, str string) { - fs.printDay(t) - hour, min, sec := t.Clock() - msec := t.Nanosecond() / 1e3 - // time - fs.buf = itoa(fs.buf[:0], hour, 2) - fs.buf = append(fs.buf, ':') - fs.buf = itoa(fs.buf, min, 2) - fs.buf = append(fs.buf, ':') - fs.buf = itoa(fs.buf, sec, 2) - fs.buf = append(fs.buf, '.') - fs.buf = itoa(fs.buf, msec, 6) - fs.buf = append(fs.buf, ' ') - // write - fs.buf = append(fs.buf, []byte(str)...) - fs.buf = append(fs.buf, '\n') - fs.logw.Write(fs.buf) -} - -func (fs *fileStorage) Log(str string) { - t := time.Now() - fs.mu.Lock() - defer fs.mu.Unlock() - if fs.open < 0 { - return - } - fs.doLog(t, str) -} - -func (fs *fileStorage) log(str string) { - fs.doLog(time.Now(), str) -} - -func (fs *fileStorage) GetFile(num uint64, t FileType) File { - return &file{fs: fs, num: num, t: t} -} - -func (fs *fileStorage) GetFiles(t FileType) (ff []File, err error) { - fs.mu.Lock() - defer fs.mu.Unlock() - if fs.open < 0 { - return nil, ErrClosed - } - dir, err := os.Open(fs.path) - if err != nil { - return - } - fnn, err := dir.Readdirnames(0) - // Close the dir first before checking for Readdirnames error. - if err := dir.Close(); err != nil { - fs.log(fmt.Sprintf("close dir: %v", err)) - } - if err != nil { - return - } - f := &file{fs: fs} - for _, fn := range fnn { - if f.parse(fn) && (f.t&t) != 0 { - ff = append(ff, f) - f = &file{fs: fs} - } - } - return -} - -func (fs *fileStorage) GetManifest() (f File, err error) { - fs.mu.Lock() - defer fs.mu.Unlock() - if fs.open < 0 { - return nil, ErrClosed - } - dir, err := os.Open(fs.path) - if err != nil { - return - } - fnn, err := dir.Readdirnames(0) - // Close the dir first before checking for Readdirnames error. - if err := dir.Close(); err != nil { - fs.log(fmt.Sprintf("close dir: %v", err)) - } - if err != nil { - return - } - // Find latest CURRENT file. - var rem []string - var pend bool - var cerr error - for _, fn := range fnn { - if strings.HasPrefix(fn, "CURRENT") { - pend1 := len(fn) > 7 - // Make sure it is valid name for a CURRENT file, otherwise skip it. - if pend1 { - if fn[7] != '.' || len(fn) < 9 { - fs.log(fmt.Sprintf("skipping %s: invalid file name", fn)) - continue - } - if _, e1 := strconv.ParseUint(fn[8:], 10, 0); e1 != nil { - fs.log(fmt.Sprintf("skipping %s: invalid file num: %v", fn, e1)) - continue - } - } - path := filepath.Join(fs.path, fn) - r, e1 := os.OpenFile(path, os.O_RDONLY, 0) - if e1 != nil { - return nil, e1 - } - b, e1 := ioutil.ReadAll(r) - if e1 != nil { - r.Close() - return nil, e1 - } - f1 := &file{fs: fs} - if len(b) < 1 || b[len(b)-1] != '\n' || !f1.parse(string(b[:len(b)-1])) { - fs.log(fmt.Sprintf("skipping %s: corrupted or incomplete", fn)) - if pend1 { - rem = append(rem, fn) - } - if !pend1 || cerr == nil { - cerr = fmt.Errorf("leveldb/storage: corrupted or incomplete %s file", fn) - } - } else if f != nil && f1.Num() < f.Num() { - fs.log(fmt.Sprintf("skipping %s: obsolete", fn)) - if pend1 { - rem = append(rem, fn) - } - } else { - f = f1 - pend = pend1 - } - if err := r.Close(); err != nil { - fs.log(fmt.Sprintf("close %s: %v", fn, err)) - } - } - } - // Don't remove any files if there is no valid CURRENT file. - if f == nil { - if cerr != nil { - err = cerr - } else { - err = os.ErrNotExist - } - return - } - // Rename pending CURRENT file to an effective CURRENT. - if pend { - path := fmt.Sprintf("%s.%d", filepath.Join(fs.path, "CURRENT"), f.Num()) - if err := rename(path, filepath.Join(fs.path, "CURRENT")); err != nil { - fs.log(fmt.Sprintf("CURRENT.%d -> CURRENT: %v", f.Num(), err)) - } - } - // Remove obsolete or incomplete pending CURRENT files. - for _, fn := range rem { - path := filepath.Join(fs.path, fn) - if err := os.Remove(path); err != nil { - fs.log(fmt.Sprintf("remove %s: %v", fn, err)) - } - } - return -} - -func (fs *fileStorage) SetManifest(f File) (err error) { - fs.mu.Lock() - defer fs.mu.Unlock() - if fs.open < 0 { - return ErrClosed - } - f2, ok := f.(*file) - if !ok || f2.t != TypeManifest { - return ErrInvalidFile - } - defer func() { - if err != nil { - fs.log(fmt.Sprintf("CURRENT: %v", err)) - } - }() - path := fmt.Sprintf("%s.%d", filepath.Join(fs.path, "CURRENT"), f2.Num()) - w, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644) - if err != nil { - return err - } - _, err = fmt.Fprintln(w, f2.name()) - // Close the file first. - if err := w.Close(); err != nil { - fs.log(fmt.Sprintf("close CURRENT.%d: %v", f2.num, err)) - } - if err != nil { - return err - } - return rename(path, filepath.Join(fs.path, "CURRENT")) -} - -func (fs *fileStorage) Close() error { - fs.mu.Lock() - defer fs.mu.Unlock() - if fs.open < 0 { - return ErrClosed - } - // Clear the finalizer. - runtime.SetFinalizer(fs, nil) - - if fs.open > 0 { - fs.log(fmt.Sprintf("refuse to close, %d files still open", fs.open)) - return fmt.Errorf("leveldb/storage: cannot close, %d files still open", fs.open) - } - fs.open = -1 - e1 := fs.logw.Close() - err := fs.flock.release() - if err == nil { - err = e1 - } - return err -} - -type fileWrap struct { - *os.File - f *file -} - -func (fw fileWrap) Sync() error { - if err := fw.File.Sync(); err != nil { - return err - } - if fw.f.Type() == TypeManifest { - // Also sync parent directory if file type is manifest. - // See: https://code.google.com/p/leveldb/issues/detail?id=190. - if err := syncDir(fw.f.fs.path); err != nil { - return err - } - } - return nil -} - -func (fw fileWrap) Close() error { - f := fw.f - f.fs.mu.Lock() - defer f.fs.mu.Unlock() - if !f.open { - return ErrClosed - } - f.open = false - f.fs.open-- - err := fw.File.Close() - if err != nil { - f.fs.log(fmt.Sprintf("close %s.%d: %v", f.Type(), f.Num(), err)) - } - return err -} - -type file struct { - fs *fileStorage - num uint64 - t FileType - open bool -} - -func (f *file) Open() (Reader, error) { - f.fs.mu.Lock() - defer f.fs.mu.Unlock() - if f.fs.open < 0 { - return nil, ErrClosed - } - if f.open { - return nil, errFileOpen - } - of, err := os.OpenFile(f.path(), os.O_RDONLY, 0) - if err != nil { - if f.hasOldName() && os.IsNotExist(err) { - of, err = os.OpenFile(f.oldPath(), os.O_RDONLY, 0) - if err == nil { - goto ok - } - } - return nil, err - } -ok: - f.open = true - f.fs.open++ - return fileWrap{of, f}, nil -} - -func (f *file) Create() (Writer, error) { - f.fs.mu.Lock() - defer f.fs.mu.Unlock() - if f.fs.open < 0 { - return nil, ErrClosed - } - if f.open { - return nil, errFileOpen - } - of, err := os.OpenFile(f.path(), os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644) - if err != nil { - return nil, err - } - f.open = true - f.fs.open++ - return fileWrap{of, f}, nil -} - -func (f *file) Replace(newfile File) error { - f.fs.mu.Lock() - defer f.fs.mu.Unlock() - if f.fs.open < 0 { - return ErrClosed - } - newfile2, ok := newfile.(*file) - if !ok { - return ErrInvalidFile - } - if f.open || newfile2.open { - return errFileOpen - } - return rename(newfile2.path(), f.path()) -} - -func (f *file) Type() FileType { - return f.t -} - -func (f *file) Num() uint64 { - return f.num -} - -func (f *file) Remove() error { - f.fs.mu.Lock() - defer f.fs.mu.Unlock() - if f.fs.open < 0 { - return ErrClosed - } - if f.open { - return errFileOpen - } - err := os.Remove(f.path()) - if err != nil { - f.fs.log(fmt.Sprintf("remove %s.%d: %v", f.Type(), f.Num(), err)) - } - // Also try remove file with old name, just in case. - if f.hasOldName() { - if e1 := os.Remove(f.oldPath()); !os.IsNotExist(e1) { - f.fs.log(fmt.Sprintf("remove %s.%d: %v (old name)", f.Type(), f.Num(), err)) - err = e1 - } - } - return err -} - -func (f *file) hasOldName() bool { - return f.t == TypeTable -} - -func (f *file) oldName() string { - switch f.t { - case TypeTable: - return fmt.Sprintf("%06d.sst", f.num) - } - return f.name() -} - -func (f *file) oldPath() string { - return filepath.Join(f.fs.path, f.oldName()) -} - -func (f *file) name() string { - switch f.t { - case TypeManifest: - return fmt.Sprintf("MANIFEST-%06d", f.num) - case TypeJournal: - return fmt.Sprintf("%06d.log", f.num) - case TypeTable: - return fmt.Sprintf("%06d.ldb", f.num) - case TypeTemp: - return fmt.Sprintf("%06d.tmp", f.num) - default: - panic("invalid file type") - } -} - -func (f *file) path() string { - return filepath.Join(f.fs.path, f.name()) -} - -func (f *file) parse(name string) bool { - var num uint64 - var tail string - _, err := fmt.Sscanf(name, "%d.%s", &num, &tail) - if err == nil { - switch tail { - case "log": - f.t = TypeJournal - case "ldb", "sst": - f.t = TypeTable - case "tmp": - f.t = TypeTemp - default: - return false - } - f.num = num - return true - } - n, _ := fmt.Sscanf(name, "MANIFEST-%d%s", &num, &tail) - if n == 1 { - f.t = TypeManifest - f.num = num - return true - } - - return false -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage_plan9.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage_plan9.go deleted file mode 100644 index 42940d769f..0000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage_plan9.go +++ /dev/null @@ -1,52 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package storage - -import ( - "os" - "path/filepath" -) - -type plan9FileLock struct { - f *os.File -} - -func (fl *plan9FileLock) release() error { - return fl.f.Close() -} - -func newFileLock(path string) (fl fileLock, err error) { - f, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE, os.ModeExclusive|0644) - if err != nil { - return - } - fl = &plan9FileLock{f: f} - return -} - -func rename(oldpath, newpath string) error { - if _, err := os.Stat(newpath); err == nil { - if err := os.Remove(newpath); err != nil { - return err - } - } - - _, fname := filepath.Split(newpath) - return os.Rename(oldpath, fname) -} - -func syncDir(name string) error { - f, err := os.Open(name) - if err != nil { - return err - } - defer f.Close() - if err := f.Sync(); err != nil { - return err - } - return nil -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage_solaris.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage_solaris.go deleted file mode 100644 index 102031bfd5..0000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage_solaris.go +++ /dev/null @@ -1,68 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -// +build solaris - -package storage - -import ( - "os" - "syscall" -) - -type unixFileLock struct { - f *os.File -} - -func (fl *unixFileLock) release() error { - if err := setFileLock(fl.f, false); err != nil { - return err - } - return fl.f.Close() -} - -func newFileLock(path string) (fl fileLock, err error) { - f, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE, 0644) - if err != nil { - return - } - err = setFileLock(f, true) - if err != nil { - f.Close() - return - } - fl = &unixFileLock{f: f} - return -} - -func setFileLock(f *os.File, lock bool) error { - flock := syscall.Flock_t{ - Type: syscall.F_UNLCK, - Start: 0, - Len: 0, - Whence: 1, - } - if lock { - flock.Type = syscall.F_WRLCK - } - return syscall.FcntlFlock(f.Fd(), syscall.F_SETLK, &flock) -} - -func rename(oldpath, newpath string) error { - return os.Rename(oldpath, newpath) -} - -func syncDir(name string) error { - f, err := os.Open(name) - if err != nil { - return err - } - defer f.Close() - if err := f.Sync(); err != nil { - return err - } - return nil -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage_test.go deleted file mode 100644 index 92abcbb7d0..0000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage_test.go +++ /dev/null @@ -1,142 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package storage - -import ( - "fmt" - "os" - "path/filepath" - "testing" -) - -var cases = []struct { - oldName []string - name string - ftype FileType - num uint64 -}{ - {nil, "000100.log", TypeJournal, 100}, - {nil, "000000.log", TypeJournal, 0}, - {[]string{"000000.sst"}, "000000.ldb", TypeTable, 0}, - {nil, "MANIFEST-000002", TypeManifest, 2}, - {nil, "MANIFEST-000007", TypeManifest, 7}, - {nil, "18446744073709551615.log", TypeJournal, 18446744073709551615}, - {nil, "000100.tmp", TypeTemp, 100}, -} - -var invalidCases = []string{ - "", - "foo", - "foo-dx-100.log", - ".log", - "", - "manifest", - "CURREN", - "CURRENTX", - "MANIFES", - "MANIFEST", - "MANIFEST-", - "XMANIFEST-3", - "MANIFEST-3x", - "LOC", - "LOCKx", - "LO", - "LOGx", - "18446744073709551616.log", - "184467440737095516150.log", - "100", - "100.", - "100.lop", -} - -func TestFileStorage_CreateFileName(t *testing.T) { - for _, c := range cases { - f := &file{num: c.num, t: c.ftype} - if f.name() != c.name { - t.Errorf("invalid filename got '%s', want '%s'", f.name(), c.name) - } - } -} - -func TestFileStorage_ParseFileName(t *testing.T) { - for _, c := range cases { - for _, name := range append([]string{c.name}, c.oldName...) { - f := new(file) - if !f.parse(name) { - t.Errorf("cannot parse filename '%s'", name) - continue - } - if f.Type() != c.ftype { - t.Errorf("filename '%s' invalid type got '%d', want '%d'", name, f.Type(), c.ftype) - } - if f.Num() != c.num { - t.Errorf("filename '%s' invalid number got '%d', want '%d'", name, f.Num(), c.num) - } - } - } -} - -func TestFileStorage_InvalidFileName(t *testing.T) { - for _, name := range invalidCases { - f := new(file) - if f.parse(name) { - t.Errorf("filename '%s' should be invalid", name) - } - } -} - -func TestFileStorage_Locking(t *testing.T) { - path := filepath.Join(os.TempDir(), fmt.Sprintf("goleveldbtestfd-%d", os.Getuid())) - - _, err := os.Stat(path) - if err == nil { - err = os.RemoveAll(path) - if err != nil { - t.Fatal("RemoveAll: got error: ", err) - } - } - - p1, err := OpenFile(path) - if err != nil { - t.Fatal("OpenFile(1): got error: ", err) - } - - defer os.RemoveAll(path) - - p2, err := OpenFile(path) - if err != nil { - t.Logf("OpenFile(2): got error: %s (expected)", err) - } else { - p2.Close() - p1.Close() - t.Fatal("OpenFile(2): expect error") - } - - p1.Close() - - p3, err := OpenFile(path) - if err != nil { - t.Fatal("OpenFile(3): got error: ", err) - } - defer p3.Close() - - l, err := p3.Lock() - if err != nil { - t.Fatal("storage lock failed(1): ", err) - } - _, err = p3.Lock() - if err == nil { - t.Fatal("expect error for second storage lock attempt") - } else { - t.Logf("storage lock got error: %s (expected)", err) - } - l.Release() - _, err = p3.Lock() - if err != nil { - t.Fatal("storage lock failed(2): ", err) - } -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage_unix.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage_unix.go deleted file mode 100644 index d0a604b7ab..0000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage_unix.go +++ /dev/null @@ -1,63 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -// +build darwin dragonfly freebsd linux netbsd openbsd - -package storage - -import ( - "os" - "syscall" -) - -type unixFileLock struct { - f *os.File -} - -func (fl *unixFileLock) release() error { - if err := setFileLock(fl.f, false); err != nil { - return err - } - return fl.f.Close() -} - -func newFileLock(path string) (fl fileLock, err error) { - f, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE, 0644) - if err != nil { - return - } - err = setFileLock(f, true) - if err != nil { - f.Close() - return - } - fl = &unixFileLock{f: f} - return -} - -func setFileLock(f *os.File, lock bool) error { - how := syscall.LOCK_UN - if lock { - how = syscall.LOCK_EX - } - return syscall.Flock(int(f.Fd()), how|syscall.LOCK_NB) -} - -func rename(oldpath, newpath string) error { - return os.Rename(oldpath, newpath) -} - -func syncDir(name string) error { - f, err := os.Open(name) - if err != nil { - return err - } - defer f.Close() - if err := f.Sync(); err != nil { - return err - } - return nil -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage_windows.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage_windows.go deleted file mode 100644 index 50c3c454e7..0000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage_windows.go +++ /dev/null @@ -1,69 +0,0 @@ -// Copyright (c) 2013, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package storage - -import ( - "syscall" - "unsafe" -) - -var ( - modkernel32 = syscall.NewLazyDLL("kernel32.dll") - - procMoveFileExW = modkernel32.NewProc("MoveFileExW") -) - -const ( - _MOVEFILE_REPLACE_EXISTING = 1 -) - -type windowsFileLock struct { - fd syscall.Handle -} - -func (fl *windowsFileLock) release() error { - return syscall.Close(fl.fd) -} - -func newFileLock(path string) (fl fileLock, err error) { - pathp, err := syscall.UTF16PtrFromString(path) - if err != nil { - return - } - fd, err := syscall.CreateFile(pathp, syscall.GENERIC_READ|syscall.GENERIC_WRITE, 0, nil, syscall.CREATE_ALWAYS, syscall.FILE_ATTRIBUTE_NORMAL, 0) - if err != nil { - return - } - fl = &windowsFileLock{fd: fd} - return -} - -func moveFileEx(from *uint16, to *uint16, flags uint32) error { - r1, _, e1 := syscall.Syscall(procMoveFileExW.Addr(), 3, uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(to)), uintptr(flags)) - if r1 == 0 { - if e1 != 0 { - return error(e1) - } else { - return syscall.EINVAL - } - } - return nil -} - -func rename(oldpath, newpath string) error { - from, err := syscall.UTF16PtrFromString(oldpath) - if err != nil { - return err - } - to, err := syscall.UTF16PtrFromString(newpath) - if err != nil { - return err - } - return moveFileEx(from, to, _MOVEFILE_REPLACE_EXISTING) -} - -func syncDir(name string) error { return nil } diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/mem_storage.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/mem_storage.go deleted file mode 100644 index fc1c8165df..0000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/mem_storage.go +++ /dev/null @@ -1,203 +0,0 @@ -// Copyright (c) 2013, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package storage - -import ( - "bytes" - "os" - "sync" - - "github.com/syndtr/goleveldb/leveldb/util" -) - -const typeShift = 3 - -type memStorageLock struct { - ms *memStorage -} - -func (lock *memStorageLock) Release() { - ms := lock.ms - ms.mu.Lock() - defer ms.mu.Unlock() - if ms.slock == lock { - ms.slock = nil - } - return -} - -// memStorage is a memory-backed storage. -type memStorage struct { - mu sync.Mutex - slock *memStorageLock - files map[uint64]*memFile - manifest *memFilePtr -} - -// NewMemStorage returns a new memory-backed storage implementation. -func NewMemStorage() Storage { - return &memStorage{ - files: make(map[uint64]*memFile), - } -} - -func (ms *memStorage) Lock() (util.Releaser, error) { - ms.mu.Lock() - defer ms.mu.Unlock() - if ms.slock != nil { - return nil, ErrLocked - } - ms.slock = &memStorageLock{ms: ms} - return ms.slock, nil -} - -func (*memStorage) Log(str string) {} - -func (ms *memStorage) GetFile(num uint64, t FileType) File { - return &memFilePtr{ms: ms, num: num, t: t} -} - -func (ms *memStorage) GetFiles(t FileType) ([]File, error) { - ms.mu.Lock() - var ff []File - for x, _ := range ms.files { - num, mt := x>>typeShift, FileType(x)&TypeAll - if mt&t == 0 { - continue - } - ff = append(ff, &memFilePtr{ms: ms, num: num, t: mt}) - } - ms.mu.Unlock() - return ff, nil -} - -func (ms *memStorage) GetManifest() (File, error) { - ms.mu.Lock() - defer ms.mu.Unlock() - if ms.manifest == nil { - return nil, os.ErrNotExist - } - return ms.manifest, nil -} - -func (ms *memStorage) SetManifest(f File) error { - fm, ok := f.(*memFilePtr) - if !ok || fm.t != TypeManifest { - return ErrInvalidFile - } - ms.mu.Lock() - ms.manifest = fm - ms.mu.Unlock() - return nil -} - -func (*memStorage) Close() error { return nil } - -type memReader struct { - *bytes.Reader - m *memFile -} - -func (mr *memReader) Close() error { - return mr.m.Close() -} - -type memFile struct { - bytes.Buffer - ms *memStorage - open bool -} - -func (*memFile) Sync() error { return nil } -func (m *memFile) Close() error { - m.ms.mu.Lock() - m.open = false - m.ms.mu.Unlock() - return nil -} - -type memFilePtr struct { - ms *memStorage - num uint64 - t FileType -} - -func (p *memFilePtr) x() uint64 { - return p.Num()< -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package storage - -import ( - "bytes" - "testing" -) - -func TestMemStorage(t *testing.T) { - m := NewMemStorage() - - l, err := m.Lock() - if err != nil { - t.Fatal("storage lock failed(1): ", err) - } - _, err = m.Lock() - if err == nil { - t.Fatal("expect error for second storage lock attempt") - } else { - t.Logf("storage lock got error: %s (expected)", err) - } - l.Release() - _, err = m.Lock() - if err != nil { - t.Fatal("storage lock failed(2): ", err) - } - - f := m.GetFile(1, TypeTable) - if f.Num() != 1 && f.Type() != TypeTable { - t.Fatal("invalid file number and type") - } - w, _ := f.Create() - w.Write([]byte("abc")) - w.Close() - if ff, _ := m.GetFiles(TypeAll); len(ff) != 1 { - t.Fatal("invalid GetFiles len") - } - buf := new(bytes.Buffer) - r, err := f.Open() - if err != nil { - t.Fatal("Open: got error: ", err) - } - buf.ReadFrom(r) - r.Close() - if got := buf.String(); got != "abc" { - t.Fatalf("Read: invalid value, want=abc got=%s", got) - } - if _, err := f.Open(); err != nil { - t.Fatal("Open: got error: ", err) - } - if _, err := m.GetFile(1, TypeTable).Open(); err == nil { - t.Fatal("expecting error") - } - f.Remove() - if ff, _ := m.GetFiles(TypeAll); len(ff) != 0 { - t.Fatal("invalid GetFiles len", len(ff)) - } - if _, err := f.Open(); err == nil { - t.Fatal("expecting error") - } -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/storage.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/storage.go deleted file mode 100644 index 85dd70b06f..0000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/storage.go +++ /dev/null @@ -1,157 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -// Package storage provides storage abstraction for LevelDB. -package storage - -import ( - "errors" - "fmt" - "io" - - "github.com/syndtr/goleveldb/leveldb/util" -) - -type FileType uint32 - -const ( - TypeManifest FileType = 1 << iota - TypeJournal - TypeTable - TypeTemp - - TypeAll = TypeManifest | TypeJournal | TypeTable | TypeTemp -) - -func (t FileType) String() string { - switch t { - case TypeManifest: - return "manifest" - case TypeJournal: - return "journal" - case TypeTable: - return "table" - case TypeTemp: - return "temp" - } - return fmt.Sprintf("", t) -} - -var ( - ErrInvalidFile = errors.New("leveldb/storage: invalid file for argument") - ErrLocked = errors.New("leveldb/storage: already locked") - ErrClosed = errors.New("leveldb/storage: closed") -) - -// Syncer is the interface that wraps basic Sync method. -type Syncer interface { - // Sync commits the current contents of the file to stable storage. - Sync() error -} - -// Reader is the interface that groups the basic Read, Seek, ReadAt and Close -// methods. -type Reader interface { - io.ReadSeeker - io.ReaderAt - io.Closer -} - -// Writer is the interface that groups the basic Write, Sync and Close -// methods. -type Writer interface { - io.WriteCloser - Syncer -} - -// File is the file. A file instance must be goroutine-safe. -type File interface { - // Open opens the file for read. Returns os.ErrNotExist error - // if the file does not exist. - // Returns ErrClosed if the underlying storage is closed. - Open() (r Reader, err error) - - // Create creates the file for writting. Truncate the file if - // already exist. - // Returns ErrClosed if the underlying storage is closed. - Create() (w Writer, err error) - - // Replace replaces file with newfile. - // Returns ErrClosed if the underlying storage is closed. - Replace(newfile File) error - - // Type returns the file type - Type() FileType - - // Num returns the file number. - Num() uint64 - - // Remove removes the file. - // Returns ErrClosed if the underlying storage is closed. - Remove() error -} - -// Storage is the storage. A storage instance must be goroutine-safe. -type Storage interface { - // Lock locks the storage. Any subsequent attempt to call Lock will fail - // until the last lock released. - // After use the caller should call the Release method. - Lock() (l util.Releaser, err error) - - // Log logs a string. This is used for logging. An implementation - // may write to a file, stdout or simply do nothing. - Log(str string) - - // GetFile returns a file for the given number and type. GetFile will never - // returns nil, even if the underlying storage is closed. - GetFile(num uint64, t FileType) File - - // GetFiles returns a slice of files that match the given file types. - // The file types may be OR'ed together. - GetFiles(t FileType) ([]File, error) - - // GetManifest returns a manifest file. Returns os.ErrNotExist if manifest - // file does not exist. - GetManifest() (File, error) - - // SetManifest sets the given file as manifest file. The given file should - // be a manifest file type or error will be returned. - SetManifest(f File) error - - // Close closes the storage. It is valid to call Close multiple times. - // Other methods should not be called after the storage has been closed. - Close() error -} - -// FileInfo wraps basic file info. -type FileInfo struct { - Type FileType - Num uint64 -} - -func (fi FileInfo) String() string { - switch fi.Type { - case TypeManifest: - return fmt.Sprintf("MANIFEST-%06d", fi.Num) - case TypeJournal: - return fmt.Sprintf("%06d.log", fi.Num) - case TypeTable: - return fmt.Sprintf("%06d.ldb", fi.Num) - case TypeTemp: - return fmt.Sprintf("%06d.tmp", fi.Num) - default: - return fmt.Sprintf("%#x-%d", fi.Type, fi.Num) - } -} - -// NewFileInfo creates new FileInfo from the given File. It will returns nil -// if File is nil. -func NewFileInfo(f File) *FileInfo { - if f == nil { - return nil - } - return &FileInfo{f.Type(), f.Num()} -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage_test.go deleted file mode 100644 index 08be0bab3d..0000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage_test.go +++ /dev/null @@ -1,549 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENE file. - -package leveldb - -import ( - "errors" - "fmt" - "io" - "io/ioutil" - "math/rand" - "os" - "path/filepath" - "sync" - "testing" - - "github.com/syndtr/goleveldb/leveldb/storage" - "github.com/syndtr/goleveldb/leveldb/util" -) - -const typeShift = 4 - -var ( - tsErrInvalidFile = errors.New("leveldb.testStorage: invalid file for argument") - tsErrFileOpen = errors.New("leveldb.testStorage: file still open") -) - -var ( - tsFSEnv = os.Getenv("GOLEVELDB_USEFS") - tsTempdir = os.Getenv("GOLEVELDB_TEMPDIR") - tsKeepFS = tsFSEnv == "2" - tsFS = tsKeepFS || tsFSEnv == "" || tsFSEnv == "1" - tsMU = &sync.Mutex{} - tsNum = 0 -) - -type tsOp uint - -const ( - tsOpOpen tsOp = iota - tsOpCreate - tsOpReplace - tsOpRemove - tsOpRead - tsOpReadAt - tsOpWrite - tsOpSync - - tsOpNum -) - -type tsLock struct { - ts *testStorage - r util.Releaser -} - -func (l tsLock) Release() { - l.r.Release() - l.ts.t.Log("I: storage lock released") -} - -type tsReader struct { - tf tsFile - storage.Reader -} - -func (tr tsReader) Read(b []byte) (n int, err error) { - ts := tr.tf.ts - ts.countRead(tr.tf.Type()) - if tr.tf.shouldErrLocked(tsOpRead) { - return 0, errors.New("leveldb.testStorage: emulated read error") - } - n, err = tr.Reader.Read(b) - if err != nil && err != io.EOF { - ts.t.Errorf("E: read error, num=%d type=%v n=%d: %v", tr.tf.Num(), tr.tf.Type(), n, err) - } - return -} - -func (tr tsReader) ReadAt(b []byte, off int64) (n int, err error) { - ts := tr.tf.ts - ts.countRead(tr.tf.Type()) - if tr.tf.shouldErrLocked(tsOpReadAt) { - return 0, errors.New("leveldb.testStorage: emulated readAt error") - } - n, err = tr.Reader.ReadAt(b, off) - if err != nil && err != io.EOF { - ts.t.Errorf("E: readAt error, num=%d type=%v off=%d n=%d: %v", tr.tf.Num(), tr.tf.Type(), off, n, err) - } - return -} - -func (tr tsReader) Close() (err error) { - err = tr.Reader.Close() - tr.tf.close("reader", err) - return -} - -type tsWriter struct { - tf tsFile - storage.Writer -} - -func (tw tsWriter) Write(b []byte) (n int, err error) { - if tw.tf.shouldErrLocked(tsOpWrite) { - return 0, errors.New("leveldb.testStorage: emulated write error") - } - n, err = tw.Writer.Write(b) - if err != nil { - tw.tf.ts.t.Errorf("E: write error, num=%d type=%v n=%d: %v", tw.tf.Num(), tw.tf.Type(), n, err) - } - return -} - -func (tw tsWriter) Sync() (err error) { - ts := tw.tf.ts - ts.mu.Lock() - for ts.emuDelaySync&tw.tf.Type() != 0 { - ts.cond.Wait() - } - ts.mu.Unlock() - if tw.tf.shouldErrLocked(tsOpSync) { - return errors.New("leveldb.testStorage: emulated sync error") - } - err = tw.Writer.Sync() - if err != nil { - tw.tf.ts.t.Errorf("E: sync error, num=%d type=%v: %v", tw.tf.Num(), tw.tf.Type(), err) - } - return -} - -func (tw tsWriter) Close() (err error) { - err = tw.Writer.Close() - tw.tf.close("writer", err) - return -} - -type tsFile struct { - ts *testStorage - storage.File -} - -func (tf tsFile) x() uint64 { - return tf.Num()<>typeShift, storage.FileType(x)&storage.TypeAll - ts.t.Errorf("E: * num=%d type=%v writer=%v", num, tt, writer) - } - } - ts.mu.Unlock() -} - -func newTestStorage(t *testing.T) *testStorage { - var stor storage.Storage - var closeFn func() error - if tsFS { - for { - tsMU.Lock() - num := tsNum - tsNum++ - tsMU.Unlock() - tempdir := tsTempdir - if tempdir == "" { - tempdir = os.TempDir() - } - path := filepath.Join(tempdir, fmt.Sprintf("goleveldb-test%d0%d0%d", os.Getuid(), os.Getpid(), num)) - if _, err := os.Stat(path); err != nil { - stor, err = storage.OpenFile(path) - if err != nil { - t.Fatalf("F: cannot create storage: %v", err) - } - t.Logf("I: storage created: %s", path) - closeFn = func() error { - for _, name := range []string{"LOG.old", "LOG"} { - f, err := os.Open(filepath.Join(path, name)) - if err != nil { - continue - } - if log, err := ioutil.ReadAll(f); err != nil { - t.Logf("---------------------- %s ----------------------", name) - t.Logf("cannot read log: %v", err) - t.Logf("---------------------- %s ----------------------", name) - } else if len(log) > 0 { - t.Logf("---------------------- %s ----------------------\n%s", name, string(log)) - t.Logf("---------------------- %s ----------------------", name) - } - f.Close() - } - if t.Failed() { - t.Logf("testing failed, test DB preserved at %s", path) - return nil - } - if tsKeepFS { - return nil - } - return os.RemoveAll(path) - } - - break - } - } - } else { - stor = storage.NewMemStorage() - } - ts := &testStorage{ - t: t, - Storage: stor, - closeFn: closeFn, - opens: make(map[uint64]bool), - emuErrOnceMap: make(map[uint64]uint), - emuRandErrProb: 0x999, - emuRandRand: rand.New(rand.NewSource(0xfacedead)), - } - ts.cond.L = &ts.mu - return ts -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table.go deleted file mode 100644 index db386f3b54..0000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table.go +++ /dev/null @@ -1,525 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - "fmt" - "sort" - "sync/atomic" - - "github.com/syndtr/goleveldb/leveldb/cache" - "github.com/syndtr/goleveldb/leveldb/iterator" - "github.com/syndtr/goleveldb/leveldb/opt" - "github.com/syndtr/goleveldb/leveldb/storage" - "github.com/syndtr/goleveldb/leveldb/table" - "github.com/syndtr/goleveldb/leveldb/util" -) - -// tFile holds basic information about a table. -type tFile struct { - file storage.File - seekLeft int32 - size uint64 - imin, imax iKey -} - -// Returns true if given key is after largest key of this table. -func (t *tFile) after(icmp *iComparer, ukey []byte) bool { - return ukey != nil && icmp.uCompare(ukey, t.imax.ukey()) > 0 -} - -// Returns true if given key is before smallest key of this table. -func (t *tFile) before(icmp *iComparer, ukey []byte) bool { - return ukey != nil && icmp.uCompare(ukey, t.imin.ukey()) < 0 -} - -// Returns true if given key range overlaps with this table key range. -func (t *tFile) overlaps(icmp *iComparer, umin, umax []byte) bool { - return !t.after(icmp, umin) && !t.before(icmp, umax) -} - -// Cosumes one seek and return current seeks left. -func (t *tFile) consumeSeek() int32 { - return atomic.AddInt32(&t.seekLeft, -1) -} - -// Creates new tFile. -func newTableFile(file storage.File, size uint64, imin, imax iKey) *tFile { - f := &tFile{ - file: file, - size: size, - imin: imin, - imax: imax, - } - - // We arrange to automatically compact this file after - // a certain number of seeks. Let's assume: - // (1) One seek costs 10ms - // (2) Writing or reading 1MB costs 10ms (100MB/s) - // (3) A compaction of 1MB does 25MB of IO: - // 1MB read from this level - // 10-12MB read from next level (boundaries may be misaligned) - // 10-12MB written to next level - // This implies that 25 seeks cost the same as the compaction - // of 1MB of data. I.e., one seek costs approximately the - // same as the compaction of 40KB of data. We are a little - // conservative and allow approximately one seek for every 16KB - // of data before triggering a compaction. - f.seekLeft = int32(size / 16384) - if f.seekLeft < 100 { - f.seekLeft = 100 - } - - return f -} - -// tFiles hold multiple tFile. -type tFiles []*tFile - -func (tf tFiles) Len() int { return len(tf) } -func (tf tFiles) Swap(i, j int) { tf[i], tf[j] = tf[j], tf[i] } - -func (tf tFiles) nums() string { - x := "[ " - for i, f := range tf { - if i != 0 { - x += ", " - } - x += fmt.Sprint(f.file.Num()) - } - x += " ]" - return x -} - -// Returns true if i smallest key is less than j. -// This used for sort by key in ascending order. -func (tf tFiles) lessByKey(icmp *iComparer, i, j int) bool { - a, b := tf[i], tf[j] - n := icmp.Compare(a.imin, b.imin) - if n == 0 { - return a.file.Num() < b.file.Num() - } - return n < 0 -} - -// Returns true if i file number is greater than j. -// This used for sort by file number in descending order. -func (tf tFiles) lessByNum(i, j int) bool { - return tf[i].file.Num() > tf[j].file.Num() -} - -// Sorts tables by key in ascending order. -func (tf tFiles) sortByKey(icmp *iComparer) { - sort.Sort(&tFilesSortByKey{tFiles: tf, icmp: icmp}) -} - -// Sorts tables by file number in descending order. -func (tf tFiles) sortByNum() { - sort.Sort(&tFilesSortByNum{tFiles: tf}) -} - -// Returns sum of all tables size. -func (tf tFiles) size() (sum uint64) { - for _, t := range tf { - sum += t.size - } - return sum -} - -// Searches smallest index of tables whose its smallest -// key is after or equal with given key. -func (tf tFiles) searchMin(icmp *iComparer, ikey iKey) int { - return sort.Search(len(tf), func(i int) bool { - return icmp.Compare(tf[i].imin, ikey) >= 0 - }) -} - -// Searches smallest index of tables whose its largest -// key is after or equal with given key. -func (tf tFiles) searchMax(icmp *iComparer, ikey iKey) int { - return sort.Search(len(tf), func(i int) bool { - return icmp.Compare(tf[i].imax, ikey) >= 0 - }) -} - -// Returns true if given key range overlaps with one or more -// tables key range. If unsorted is true then binary search will not be used. -func (tf tFiles) overlaps(icmp *iComparer, umin, umax []byte, unsorted bool) bool { - if unsorted { - // Check against all files. - for _, t := range tf { - if t.overlaps(icmp, umin, umax) { - return true - } - } - return false - } - - i := 0 - if len(umin) > 0 { - // Find the earliest possible internal key for min. - i = tf.searchMax(icmp, newIkey(umin, kMaxSeq, ktSeek)) - } - if i >= len(tf) { - // Beginning of range is after all files, so no overlap. - return false - } - return !tf[i].before(icmp, umax) -} - -// Returns tables whose its key range overlaps with given key range. -// Range will be expanded if ukey found hop across tables. -// If overlapped is true then the search will be restarted if umax -// expanded. -// The dst content will be overwritten. -func (tf tFiles) getOverlaps(dst tFiles, icmp *iComparer, umin, umax []byte, overlapped bool) tFiles { - dst = dst[:0] - for i := 0; i < len(tf); { - t := tf[i] - if t.overlaps(icmp, umin, umax) { - if umin != nil && icmp.uCompare(t.imin.ukey(), umin) < 0 { - umin = t.imin.ukey() - dst = dst[:0] - i = 0 - continue - } else if umax != nil && icmp.uCompare(t.imax.ukey(), umax) > 0 { - umax = t.imax.ukey() - // Restart search if it is overlapped. - if overlapped { - dst = dst[:0] - i = 0 - continue - } - } - - dst = append(dst, t) - } - i++ - } - - return dst -} - -// Returns tables key range. -func (tf tFiles) getRange(icmp *iComparer) (imin, imax iKey) { - for i, t := range tf { - if i == 0 { - imin, imax = t.imin, t.imax - continue - } - if icmp.Compare(t.imin, imin) < 0 { - imin = t.imin - } - if icmp.Compare(t.imax, imax) > 0 { - imax = t.imax - } - } - - return -} - -// Creates iterator index from tables. -func (tf tFiles) newIndexIterator(tops *tOps, icmp *iComparer, slice *util.Range, ro *opt.ReadOptions) iterator.IteratorIndexer { - if slice != nil { - var start, limit int - if slice.Start != nil { - start = tf.searchMax(icmp, iKey(slice.Start)) - } - if slice.Limit != nil { - limit = tf.searchMin(icmp, iKey(slice.Limit)) - } else { - limit = tf.Len() - } - tf = tf[start:limit] - } - return iterator.NewArrayIndexer(&tFilesArrayIndexer{ - tFiles: tf, - tops: tops, - icmp: icmp, - slice: slice, - ro: ro, - }) -} - -// Tables iterator index. -type tFilesArrayIndexer struct { - tFiles - tops *tOps - icmp *iComparer - slice *util.Range - ro *opt.ReadOptions -} - -func (a *tFilesArrayIndexer) Search(key []byte) int { - return a.searchMax(a.icmp, iKey(key)) -} - -func (a *tFilesArrayIndexer) Get(i int) iterator.Iterator { - if i == 0 || i == a.Len()-1 { - return a.tops.newIterator(a.tFiles[i], a.slice, a.ro) - } - return a.tops.newIterator(a.tFiles[i], nil, a.ro) -} - -// Helper type for sortByKey. -type tFilesSortByKey struct { - tFiles - icmp *iComparer -} - -func (x *tFilesSortByKey) Less(i, j int) bool { - return x.lessByKey(x.icmp, i, j) -} - -// Helper type for sortByNum. -type tFilesSortByNum struct { - tFiles -} - -func (x *tFilesSortByNum) Less(i, j int) bool { - return x.lessByNum(i, j) -} - -// Table operations. -type tOps struct { - s *session - cache *cache.Cache - bcache *cache.Cache - bpool *util.BufferPool -} - -// Creates an empty table and returns table writer. -func (t *tOps) create() (*tWriter, error) { - file := t.s.getTableFile(t.s.allocFileNum()) - fw, err := file.Create() - if err != nil { - return nil, err - } - return &tWriter{ - t: t, - file: file, - w: fw, - tw: table.NewWriter(fw, t.s.o.Options), - }, nil -} - -// Builds table from src iterator. -func (t *tOps) createFrom(src iterator.Iterator) (f *tFile, n int, err error) { - w, err := t.create() - if err != nil { - return - } - - defer func() { - if err != nil { - w.drop() - } - }() - - for src.Next() { - err = w.append(src.Key(), src.Value()) - if err != nil { - return - } - } - err = src.Error() - if err != nil { - return - } - - n = w.tw.EntriesLen() - f, err = w.finish() - return -} - -// Opens table. It returns a cache handle, which should -// be released after use. -func (t *tOps) open(f *tFile) (ch *cache.Handle, err error) { - num := f.file.Num() - ch = t.cache.Get(0, num, func() (size int, value cache.Value) { - var r storage.Reader - r, err = f.file.Open() - if err != nil { - return 0, nil - } - - var bcache *cache.CacheGetter - if t.bcache != nil { - bcache = &cache.CacheGetter{Cache: t.bcache, NS: num} - } - - var tr *table.Reader - tr, err = table.NewReader(r, int64(f.size), storage.NewFileInfo(f.file), bcache, t.bpool, t.s.o.Options) - if err != nil { - r.Close() - return 0, nil - } - return 1, tr - - }) - if ch == nil && err == nil { - err = ErrClosed - } - return -} - -// Finds key/value pair whose key is greater than or equal to the -// given key. -func (t *tOps) find(f *tFile, key []byte, ro *opt.ReadOptions) (rkey, rvalue []byte, err error) { - ch, err := t.open(f) - if err != nil { - return nil, nil, err - } - defer ch.Release() - return ch.Value().(*table.Reader).Find(key, true, ro) -} - -// Finds key that is greater than or equal to the given key. -func (t *tOps) findKey(f *tFile, key []byte, ro *opt.ReadOptions) (rkey []byte, err error) { - ch, err := t.open(f) - if err != nil { - return nil, err - } - defer ch.Release() - return ch.Value().(*table.Reader).FindKey(key, true, ro) -} - -// Returns approximate offset of the given key. -func (t *tOps) offsetOf(f *tFile, key []byte) (offset uint64, err error) { - ch, err := t.open(f) - if err != nil { - return - } - defer ch.Release() - offset_, err := ch.Value().(*table.Reader).OffsetOf(key) - return uint64(offset_), err -} - -// Creates an iterator from the given table. -func (t *tOps) newIterator(f *tFile, slice *util.Range, ro *opt.ReadOptions) iterator.Iterator { - ch, err := t.open(f) - if err != nil { - return iterator.NewEmptyIterator(err) - } - iter := ch.Value().(*table.Reader).NewIterator(slice, ro) - iter.SetReleaser(ch) - return iter -} - -// Removes table from persistent storage. It waits until -// no one use the the table. -func (t *tOps) remove(f *tFile) { - num := f.file.Num() - t.cache.Delete(0, num, func() { - if err := f.file.Remove(); err != nil { - t.s.logf("table@remove removing @%d %q", num, err) - } else { - t.s.logf("table@remove removed @%d", num) - } - if t.bcache != nil { - t.bcache.EvictNS(num) - } - }) -} - -// Closes the table ops instance. It will close all tables, -// regadless still used or not. -func (t *tOps) close() { - t.bpool.Close() - t.cache.Close() - if t.bcache != nil { - t.bcache.Close() - } -} - -// Creates new initialized table ops instance. -func newTableOps(s *session) *tOps { - var ( - cacher cache.Cacher - bcache *cache.Cache - bpool *util.BufferPool - ) - if s.o.GetOpenFilesCacheCapacity() > 0 { - cacher = cache.NewLRU(s.o.GetOpenFilesCacheCapacity()) - } - if !s.o.GetDisableBlockCache() { - var bcacher cache.Cacher - if s.o.GetBlockCacheCapacity() > 0 { - bcacher = cache.NewLRU(s.o.GetBlockCacheCapacity()) - } - bcache = cache.NewCache(bcacher) - } - if !s.o.GetDisableBufferPool() { - bpool = util.NewBufferPool(s.o.GetBlockSize() + 5) - } - return &tOps{ - s: s, - cache: cache.NewCache(cacher), - bcache: bcache, - bpool: bpool, - } -} - -// tWriter wraps the table writer. It keep track of file descriptor -// and added key range. -type tWriter struct { - t *tOps - - file storage.File - w storage.Writer - tw *table.Writer - - first, last []byte -} - -// Append key/value pair to the table. -func (w *tWriter) append(key, value []byte) error { - if w.first == nil { - w.first = append([]byte{}, key...) - } - w.last = append(w.last[:0], key...) - return w.tw.Append(key, value) -} - -// Returns true if the table is empty. -func (w *tWriter) empty() bool { - return w.first == nil -} - -// Closes the storage.Writer. -func (w *tWriter) close() { - if w.w != nil { - w.w.Close() - w.w = nil - } -} - -// Finalizes the table and returns table file. -func (w *tWriter) finish() (f *tFile, err error) { - defer w.close() - err = w.tw.Close() - if err != nil { - return - } - err = w.w.Sync() - if err != nil { - return - } - f = newTableFile(w.file, uint64(w.tw.BytesLen()), iKey(w.first), iKey(w.last)) - return -} - -// Drops the table. -func (w *tWriter) drop() { - w.close() - w.file.Remove() - w.t.s.reuseFileNum(w.file.Num()) - w.file = nil - w.tw = nil - w.first = nil - w.last = nil -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/block_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/block_test.go deleted file mode 100644 index 00e6f9eea0..0000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/block_test.go +++ /dev/null @@ -1,139 +0,0 @@ -// Copyright (c) 2014, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package table - -import ( - "encoding/binary" - "fmt" - - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" - - "github.com/syndtr/goleveldb/leveldb/comparer" - "github.com/syndtr/goleveldb/leveldb/iterator" - "github.com/syndtr/goleveldb/leveldb/testutil" - "github.com/syndtr/goleveldb/leveldb/util" -) - -type blockTesting struct { - tr *Reader - b *block -} - -func (t *blockTesting) TestNewIterator(slice *util.Range) iterator.Iterator { - return t.tr.newBlockIter(t.b, nil, slice, false) -} - -var _ = testutil.Defer(func() { - Describe("Block", func() { - Build := func(kv *testutil.KeyValue, restartInterval int) *blockTesting { - // Building the block. - bw := &blockWriter{ - restartInterval: restartInterval, - scratch: make([]byte, 30), - } - kv.Iterate(func(i int, key, value []byte) { - bw.append(key, value) - }) - bw.finish() - - // Opening the block. - data := bw.buf.Bytes() - restartsLen := int(binary.LittleEndian.Uint32(data[len(data)-4:])) - return &blockTesting{ - tr: &Reader{cmp: comparer.DefaultComparer}, - b: &block{ - data: data, - restartsLen: restartsLen, - restartsOffset: len(data) - (restartsLen+1)*4, - }, - } - } - - Describe("read test", func() { - for restartInterval := 1; restartInterval <= 5; restartInterval++ { - Describe(fmt.Sprintf("with restart interval of %d", restartInterval), func() { - kv := &testutil.KeyValue{} - Text := func() string { - return fmt.Sprintf("and %d keys", kv.Len()) - } - - Test := func() { - // Make block. - br := Build(kv, restartInterval) - // Do testing. - testutil.KeyValueTesting(nil, kv.Clone(), br, nil, nil) - } - - Describe(Text(), Test) - - kv.PutString("", "empty") - Describe(Text(), Test) - - kv.PutString("a1", "foo") - Describe(Text(), Test) - - kv.PutString("a2", "v") - Describe(Text(), Test) - - kv.PutString("a3qqwrkks", "hello") - Describe(Text(), Test) - - kv.PutString("a4", "bar") - Describe(Text(), Test) - - kv.PutString("a5111111", "v5") - kv.PutString("a6", "") - kv.PutString("a7", "v7") - kv.PutString("a8", "vvvvvvvvvvvvvvvvvvvvvv8") - kv.PutString("b", "v9") - kv.PutString("c9", "v9") - kv.PutString("c91", "v9") - kv.PutString("d0", "v9") - Describe(Text(), Test) - }) - } - }) - - Describe("out-of-bound slice test", func() { - kv := &testutil.KeyValue{} - kv.PutString("k1", "v1") - kv.PutString("k2", "v2") - kv.PutString("k3abcdefgg", "v3") - kv.PutString("k4", "v4") - kv.PutString("k5", "v5") - for restartInterval := 1; restartInterval <= 5; restartInterval++ { - Describe(fmt.Sprintf("with restart interval of %d", restartInterval), func() { - // Make block. - bt := Build(kv, restartInterval) - - Test := func(r *util.Range) func(done Done) { - return func(done Done) { - iter := bt.TestNewIterator(r) - Expect(iter.Error()).ShouldNot(HaveOccurred()) - - t := testutil.IteratorTesting{ - KeyValue: kv.Clone(), - Iter: iter, - } - - testutil.DoIteratorTesting(&t) - iter.Release() - done <- true - } - } - - It("Should do iterations and seeks correctly #0", - Test(&util.Range{Start: []byte("k0"), Limit: []byte("k6")}), 2.0) - - It("Should do iterations and seeks correctly #1", - Test(&util.Range{Start: []byte(""), Limit: []byte("zzzzzzz")}), 2.0) - }) - } - }) - }) -}) diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/reader.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/reader.go deleted file mode 100644 index 23c7c6129a..0000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/reader.go +++ /dev/null @@ -1,1107 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package table - -import ( - "encoding/binary" - "fmt" - "io" - "sort" - "strings" - "sync" - - "github.com/golang/snappy" - - "github.com/syndtr/goleveldb/leveldb/cache" - "github.com/syndtr/goleveldb/leveldb/comparer" - "github.com/syndtr/goleveldb/leveldb/errors" - "github.com/syndtr/goleveldb/leveldb/filter" - "github.com/syndtr/goleveldb/leveldb/iterator" - "github.com/syndtr/goleveldb/leveldb/opt" - "github.com/syndtr/goleveldb/leveldb/storage" - "github.com/syndtr/goleveldb/leveldb/util" -) - -var ( - ErrNotFound = errors.ErrNotFound - ErrReaderReleased = errors.New("leveldb/table: reader released") - ErrIterReleased = errors.New("leveldb/table: iterator released") -) - -type ErrCorrupted struct { - Pos int64 - Size int64 - Kind string - Reason string -} - -func (e *ErrCorrupted) Error() string { - return fmt.Sprintf("leveldb/table: corruption on %s (pos=%d): %s", e.Kind, e.Pos, e.Reason) -} - -func max(x, y int) int { - if x > y { - return x - } - return y -} - -type block struct { - bpool *util.BufferPool - bh blockHandle - data []byte - restartsLen int - restartsOffset int -} - -func (b *block) seek(cmp comparer.Comparer, rstart, rlimit int, key []byte) (index, offset int, err error) { - index = sort.Search(b.restartsLen-rstart-(b.restartsLen-rlimit), func(i int) bool { - offset := int(binary.LittleEndian.Uint32(b.data[b.restartsOffset+4*(rstart+i):])) - offset += 1 // shared always zero, since this is a restart point - v1, n1 := binary.Uvarint(b.data[offset:]) // key length - _, n2 := binary.Uvarint(b.data[offset+n1:]) // value length - m := offset + n1 + n2 - return cmp.Compare(b.data[m:m+int(v1)], key) > 0 - }) + rstart - 1 - if index < rstart { - // The smallest key is greater-than key sought. - index = rstart - } - offset = int(binary.LittleEndian.Uint32(b.data[b.restartsOffset+4*index:])) - return -} - -func (b *block) restartIndex(rstart, rlimit, offset int) int { - return sort.Search(b.restartsLen-rstart-(b.restartsLen-rlimit), func(i int) bool { - return int(binary.LittleEndian.Uint32(b.data[b.restartsOffset+4*(rstart+i):])) > offset - }) + rstart - 1 -} - -func (b *block) restartOffset(index int) int { - return int(binary.LittleEndian.Uint32(b.data[b.restartsOffset+4*index:])) -} - -func (b *block) entry(offset int) (key, value []byte, nShared, n int, err error) { - if offset >= b.restartsOffset { - if offset != b.restartsOffset { - err = &ErrCorrupted{Reason: "entries offset not aligned"} - } - return - } - v0, n0 := binary.Uvarint(b.data[offset:]) // Shared prefix length - v1, n1 := binary.Uvarint(b.data[offset+n0:]) // Key length - v2, n2 := binary.Uvarint(b.data[offset+n0+n1:]) // Value length - m := n0 + n1 + n2 - n = m + int(v1) + int(v2) - if n0 <= 0 || n1 <= 0 || n2 <= 0 || offset+n > b.restartsOffset { - err = &ErrCorrupted{Reason: "entries corrupted"} - return - } - key = b.data[offset+m : offset+m+int(v1)] - value = b.data[offset+m+int(v1) : offset+n] - nShared = int(v0) - return -} - -func (b *block) Release() { - b.bpool.Put(b.data) - b.bpool = nil - b.data = nil -} - -type dir int - -const ( - dirReleased dir = iota - 1 - dirSOI - dirEOI - dirBackward - dirForward -) - -type blockIter struct { - tr *Reader - block *block - blockReleaser util.Releaser - releaser util.Releaser - key, value []byte - offset int - // Previous offset, only filled by Next. - prevOffset int - prevNode []int - prevKeys []byte - restartIndex int - // Iterator direction. - dir dir - // Restart index slice range. - riStart int - riLimit int - // Offset slice range. - offsetStart int - offsetRealStart int - offsetLimit int - // Error. - err error -} - -func (i *blockIter) sErr(err error) { - i.err = err - i.key = nil - i.value = nil - i.prevNode = nil - i.prevKeys = nil -} - -func (i *blockIter) reset() { - if i.dir == dirBackward { - i.prevNode = i.prevNode[:0] - i.prevKeys = i.prevKeys[:0] - } - i.restartIndex = i.riStart - i.offset = i.offsetStart - i.dir = dirSOI - i.key = i.key[:0] - i.value = nil -} - -func (i *blockIter) isFirst() bool { - switch i.dir { - case dirForward: - return i.prevOffset == i.offsetRealStart - case dirBackward: - return len(i.prevNode) == 1 && i.restartIndex == i.riStart - } - return false -} - -func (i *blockIter) isLast() bool { - switch i.dir { - case dirForward, dirBackward: - return i.offset == i.offsetLimit - } - return false -} - -func (i *blockIter) First() bool { - if i.err != nil { - return false - } else if i.dir == dirReleased { - i.err = ErrIterReleased - return false - } - - if i.dir == dirBackward { - i.prevNode = i.prevNode[:0] - i.prevKeys = i.prevKeys[:0] - } - i.dir = dirSOI - return i.Next() -} - -func (i *blockIter) Last() bool { - if i.err != nil { - return false - } else if i.dir == dirReleased { - i.err = ErrIterReleased - return false - } - - if i.dir == dirBackward { - i.prevNode = i.prevNode[:0] - i.prevKeys = i.prevKeys[:0] - } - i.dir = dirEOI - return i.Prev() -} - -func (i *blockIter) Seek(key []byte) bool { - if i.err != nil { - return false - } else if i.dir == dirReleased { - i.err = ErrIterReleased - return false - } - - ri, offset, err := i.block.seek(i.tr.cmp, i.riStart, i.riLimit, key) - if err != nil { - i.sErr(err) - return false - } - i.restartIndex = ri - i.offset = max(i.offsetStart, offset) - if i.dir == dirSOI || i.dir == dirEOI { - i.dir = dirForward - } - for i.Next() { - if i.tr.cmp.Compare(i.key, key) >= 0 { - return true - } - } - return false -} - -func (i *blockIter) Next() bool { - if i.dir == dirEOI || i.err != nil { - return false - } else if i.dir == dirReleased { - i.err = ErrIterReleased - return false - } - - if i.dir == dirSOI { - i.restartIndex = i.riStart - i.offset = i.offsetStart - } else if i.dir == dirBackward { - i.prevNode = i.prevNode[:0] - i.prevKeys = i.prevKeys[:0] - } - for i.offset < i.offsetRealStart { - key, value, nShared, n, err := i.block.entry(i.offset) - if err != nil { - i.sErr(i.tr.fixErrCorruptedBH(i.block.bh, err)) - return false - } - if n == 0 { - i.dir = dirEOI - return false - } - i.key = append(i.key[:nShared], key...) - i.value = value - i.offset += n - } - if i.offset >= i.offsetLimit { - i.dir = dirEOI - if i.offset != i.offsetLimit { - i.sErr(i.tr.newErrCorruptedBH(i.block.bh, "entries offset not aligned")) - } - return false - } - key, value, nShared, n, err := i.block.entry(i.offset) - if err != nil { - i.sErr(i.tr.fixErrCorruptedBH(i.block.bh, err)) - return false - } - if n == 0 { - i.dir = dirEOI - return false - } - i.key = append(i.key[:nShared], key...) - i.value = value - i.prevOffset = i.offset - i.offset += n - i.dir = dirForward - return true -} - -func (i *blockIter) Prev() bool { - if i.dir == dirSOI || i.err != nil { - return false - } else if i.dir == dirReleased { - i.err = ErrIterReleased - return false - } - - var ri int - if i.dir == dirForward { - // Change direction. - i.offset = i.prevOffset - if i.offset == i.offsetRealStart { - i.dir = dirSOI - return false - } - ri = i.block.restartIndex(i.restartIndex, i.riLimit, i.offset) - i.dir = dirBackward - } else if i.dir == dirEOI { - // At the end of iterator. - i.restartIndex = i.riLimit - i.offset = i.offsetLimit - if i.offset == i.offsetRealStart { - i.dir = dirSOI - return false - } - ri = i.riLimit - 1 - i.dir = dirBackward - } else if len(i.prevNode) == 1 { - // This is the end of a restart range. - i.offset = i.prevNode[0] - i.prevNode = i.prevNode[:0] - if i.restartIndex == i.riStart { - i.dir = dirSOI - return false - } - i.restartIndex-- - ri = i.restartIndex - } else { - // In the middle of restart range, get from cache. - n := len(i.prevNode) - 3 - node := i.prevNode[n:] - i.prevNode = i.prevNode[:n] - // Get the key. - ko := node[0] - i.key = append(i.key[:0], i.prevKeys[ko:]...) - i.prevKeys = i.prevKeys[:ko] - // Get the value. - vo := node[1] - vl := vo + node[2] - i.value = i.block.data[vo:vl] - i.offset = vl - return true - } - // Build entries cache. - i.key = i.key[:0] - i.value = nil - offset := i.block.restartOffset(ri) - if offset == i.offset { - ri -= 1 - if ri < 0 { - i.dir = dirSOI - return false - } - offset = i.block.restartOffset(ri) - } - i.prevNode = append(i.prevNode, offset) - for { - key, value, nShared, n, err := i.block.entry(offset) - if err != nil { - i.sErr(i.tr.fixErrCorruptedBH(i.block.bh, err)) - return false - } - if offset >= i.offsetRealStart { - if i.value != nil { - // Appends 3 variables: - // 1. Previous keys offset - // 2. Value offset in the data block - // 3. Value length - i.prevNode = append(i.prevNode, len(i.prevKeys), offset-len(i.value), len(i.value)) - i.prevKeys = append(i.prevKeys, i.key...) - } - i.value = value - } - i.key = append(i.key[:nShared], key...) - offset += n - // Stop if target offset reached. - if offset >= i.offset { - if offset != i.offset { - i.sErr(i.tr.newErrCorruptedBH(i.block.bh, "entries offset not aligned")) - return false - } - - break - } - } - i.restartIndex = ri - i.offset = offset - return true -} - -func (i *blockIter) Key() []byte { - if i.err != nil || i.dir <= dirEOI { - return nil - } - return i.key -} - -func (i *blockIter) Value() []byte { - if i.err != nil || i.dir <= dirEOI { - return nil - } - return i.value -} - -func (i *blockIter) Release() { - if i.dir != dirReleased { - i.tr = nil - i.block = nil - i.prevNode = nil - i.prevKeys = nil - i.key = nil - i.value = nil - i.dir = dirReleased - if i.blockReleaser != nil { - i.blockReleaser.Release() - i.blockReleaser = nil - } - if i.releaser != nil { - i.releaser.Release() - i.releaser = nil - } - } -} - -func (i *blockIter) SetReleaser(releaser util.Releaser) { - if i.dir == dirReleased { - panic(util.ErrReleased) - } - if i.releaser != nil && releaser != nil { - panic(util.ErrHasReleaser) - } - i.releaser = releaser -} - -func (i *blockIter) Valid() bool { - return i.err == nil && (i.dir == dirBackward || i.dir == dirForward) -} - -func (i *blockIter) Error() error { - return i.err -} - -type filterBlock struct { - bpool *util.BufferPool - data []byte - oOffset int - baseLg uint - filtersNum int -} - -func (b *filterBlock) contains(filter filter.Filter, offset uint64, key []byte) bool { - i := int(offset >> b.baseLg) - if i < b.filtersNum { - o := b.data[b.oOffset+i*4:] - n := int(binary.LittleEndian.Uint32(o)) - m := int(binary.LittleEndian.Uint32(o[4:])) - if n < m && m <= b.oOffset { - return filter.Contains(b.data[n:m], key) - } else if n == m { - return false - } - } - return true -} - -func (b *filterBlock) Release() { - b.bpool.Put(b.data) - b.bpool = nil - b.data = nil -} - -type indexIter struct { - *blockIter - tr *Reader - slice *util.Range - // Options - fillCache bool -} - -func (i *indexIter) Get() iterator.Iterator { - value := i.Value() - if value == nil { - return nil - } - dataBH, n := decodeBlockHandle(value) - if n == 0 { - return iterator.NewEmptyIterator(i.tr.newErrCorruptedBH(i.tr.indexBH, "bad data block handle")) - } - - var slice *util.Range - if i.slice != nil && (i.blockIter.isFirst() || i.blockIter.isLast()) { - slice = i.slice - } - return i.tr.getDataIterErr(dataBH, slice, i.tr.verifyChecksum, i.fillCache) -} - -// Reader is a table reader. -type Reader struct { - mu sync.RWMutex - fi *storage.FileInfo - reader io.ReaderAt - cache *cache.CacheGetter - err error - bpool *util.BufferPool - // Options - o *opt.Options - cmp comparer.Comparer - filter filter.Filter - verifyChecksum bool - - dataEnd int64 - metaBH, indexBH, filterBH blockHandle - indexBlock *block - filterBlock *filterBlock -} - -func (r *Reader) blockKind(bh blockHandle) string { - switch bh.offset { - case r.metaBH.offset: - return "meta-block" - case r.indexBH.offset: - return "index-block" - case r.filterBH.offset: - if r.filterBH.length > 0 { - return "filter-block" - } - } - return "data-block" -} - -func (r *Reader) newErrCorrupted(pos, size int64, kind, reason string) error { - return &errors.ErrCorrupted{File: r.fi, Err: &ErrCorrupted{Pos: pos, Size: size, Kind: kind, Reason: reason}} -} - -func (r *Reader) newErrCorruptedBH(bh blockHandle, reason string) error { - return r.newErrCorrupted(int64(bh.offset), int64(bh.length), r.blockKind(bh), reason) -} - -func (r *Reader) fixErrCorruptedBH(bh blockHandle, err error) error { - if cerr, ok := err.(*ErrCorrupted); ok { - cerr.Pos = int64(bh.offset) - cerr.Size = int64(bh.length) - cerr.Kind = r.blockKind(bh) - return &errors.ErrCorrupted{File: r.fi, Err: cerr} - } - return err -} - -func (r *Reader) readRawBlock(bh blockHandle, verifyChecksum bool) ([]byte, error) { - data := r.bpool.Get(int(bh.length + blockTrailerLen)) - if _, err := r.reader.ReadAt(data, int64(bh.offset)); err != nil && err != io.EOF { - return nil, err - } - - if verifyChecksum { - n := bh.length + 1 - checksum0 := binary.LittleEndian.Uint32(data[n:]) - checksum1 := util.NewCRC(data[:n]).Value() - if checksum0 != checksum1 { - r.bpool.Put(data) - return nil, r.newErrCorruptedBH(bh, fmt.Sprintf("checksum mismatch, want=%#x got=%#x", checksum0, checksum1)) - } - } - - switch data[bh.length] { - case blockTypeNoCompression: - data = data[:bh.length] - case blockTypeSnappyCompression: - decLen, err := snappy.DecodedLen(data[:bh.length]) - if err != nil { - return nil, r.newErrCorruptedBH(bh, err.Error()) - } - decData := r.bpool.Get(decLen) - decData, err = snappy.Decode(decData, data[:bh.length]) - r.bpool.Put(data) - if err != nil { - r.bpool.Put(decData) - return nil, r.newErrCorruptedBH(bh, err.Error()) - } - data = decData - default: - r.bpool.Put(data) - return nil, r.newErrCorruptedBH(bh, fmt.Sprintf("unknown compression type %#x", data[bh.length])) - } - return data, nil -} - -func (r *Reader) readBlock(bh blockHandle, verifyChecksum bool) (*block, error) { - data, err := r.readRawBlock(bh, verifyChecksum) - if err != nil { - return nil, err - } - restartsLen := int(binary.LittleEndian.Uint32(data[len(data)-4:])) - b := &block{ - bpool: r.bpool, - bh: bh, - data: data, - restartsLen: restartsLen, - restartsOffset: len(data) - (restartsLen+1)*4, - } - return b, nil -} - -func (r *Reader) readBlockCached(bh blockHandle, verifyChecksum, fillCache bool) (*block, util.Releaser, error) { - if r.cache != nil { - var ( - err error - ch *cache.Handle - ) - if fillCache { - ch = r.cache.Get(bh.offset, func() (size int, value cache.Value) { - var b *block - b, err = r.readBlock(bh, verifyChecksum) - if err != nil { - return 0, nil - } - return cap(b.data), b - }) - } else { - ch = r.cache.Get(bh.offset, nil) - } - if ch != nil { - b, ok := ch.Value().(*block) - if !ok { - ch.Release() - return nil, nil, errors.New("leveldb/table: inconsistent block type") - } - return b, ch, err - } else if err != nil { - return nil, nil, err - } - } - - b, err := r.readBlock(bh, verifyChecksum) - return b, b, err -} - -func (r *Reader) readFilterBlock(bh blockHandle) (*filterBlock, error) { - data, err := r.readRawBlock(bh, true) - if err != nil { - return nil, err - } - n := len(data) - if n < 5 { - return nil, r.newErrCorruptedBH(bh, "too short") - } - m := n - 5 - oOffset := int(binary.LittleEndian.Uint32(data[m:])) - if oOffset > m { - return nil, r.newErrCorruptedBH(bh, "invalid data-offsets offset") - } - b := &filterBlock{ - bpool: r.bpool, - data: data, - oOffset: oOffset, - baseLg: uint(data[n-1]), - filtersNum: (m - oOffset) / 4, - } - return b, nil -} - -func (r *Reader) readFilterBlockCached(bh blockHandle, fillCache bool) (*filterBlock, util.Releaser, error) { - if r.cache != nil { - var ( - err error - ch *cache.Handle - ) - if fillCache { - ch = r.cache.Get(bh.offset, func() (size int, value cache.Value) { - var b *filterBlock - b, err = r.readFilterBlock(bh) - if err != nil { - return 0, nil - } - return cap(b.data), b - }) - } else { - ch = r.cache.Get(bh.offset, nil) - } - if ch != nil { - b, ok := ch.Value().(*filterBlock) - if !ok { - ch.Release() - return nil, nil, errors.New("leveldb/table: inconsistent block type") - } - return b, ch, err - } else if err != nil { - return nil, nil, err - } - } - - b, err := r.readFilterBlock(bh) - return b, b, err -} - -func (r *Reader) getIndexBlock(fillCache bool) (b *block, rel util.Releaser, err error) { - if r.indexBlock == nil { - return r.readBlockCached(r.indexBH, true, fillCache) - } - return r.indexBlock, util.NoopReleaser{}, nil -} - -func (r *Reader) getFilterBlock(fillCache bool) (*filterBlock, util.Releaser, error) { - if r.filterBlock == nil { - return r.readFilterBlockCached(r.filterBH, fillCache) - } - return r.filterBlock, util.NoopReleaser{}, nil -} - -func (r *Reader) newBlockIter(b *block, bReleaser util.Releaser, slice *util.Range, inclLimit bool) *blockIter { - bi := &blockIter{ - tr: r, - block: b, - blockReleaser: bReleaser, - // Valid key should never be nil. - key: make([]byte, 0), - dir: dirSOI, - riStart: 0, - riLimit: b.restartsLen, - offsetStart: 0, - offsetRealStart: 0, - offsetLimit: b.restartsOffset, - } - if slice != nil { - if slice.Start != nil { - if bi.Seek(slice.Start) { - bi.riStart = b.restartIndex(bi.restartIndex, b.restartsLen, bi.prevOffset) - bi.offsetStart = b.restartOffset(bi.riStart) - bi.offsetRealStart = bi.prevOffset - } else { - bi.riStart = b.restartsLen - bi.offsetStart = b.restartsOffset - bi.offsetRealStart = b.restartsOffset - } - } - if slice.Limit != nil { - if bi.Seek(slice.Limit) && (!inclLimit || bi.Next()) { - bi.offsetLimit = bi.prevOffset - bi.riLimit = bi.restartIndex + 1 - } - } - bi.reset() - if bi.offsetStart > bi.offsetLimit { - bi.sErr(errors.New("leveldb/table: invalid slice range")) - } - } - return bi -} - -func (r *Reader) getDataIter(dataBH blockHandle, slice *util.Range, verifyChecksum, fillCache bool) iterator.Iterator { - b, rel, err := r.readBlockCached(dataBH, verifyChecksum, fillCache) - if err != nil { - return iterator.NewEmptyIterator(err) - } - return r.newBlockIter(b, rel, slice, false) -} - -func (r *Reader) getDataIterErr(dataBH blockHandle, slice *util.Range, verifyChecksum, fillCache bool) iterator.Iterator { - r.mu.RLock() - defer r.mu.RUnlock() - - if r.err != nil { - return iterator.NewEmptyIterator(r.err) - } - - return r.getDataIter(dataBH, slice, verifyChecksum, fillCache) -} - -// NewIterator creates an iterator from the table. -// -// Slice allows slicing the iterator to only contains keys in the given -// range. A nil Range.Start is treated as a key before all keys in the -// table. And a nil Range.Limit is treated as a key after all keys in -// the table. -// -// The returned iterator is not goroutine-safe and should be released -// when not used. -// -// Also read Iterator documentation of the leveldb/iterator package. -func (r *Reader) NewIterator(slice *util.Range, ro *opt.ReadOptions) iterator.Iterator { - r.mu.RLock() - defer r.mu.RUnlock() - - if r.err != nil { - return iterator.NewEmptyIterator(r.err) - } - - fillCache := !ro.GetDontFillCache() - indexBlock, rel, err := r.getIndexBlock(fillCache) - if err != nil { - return iterator.NewEmptyIterator(err) - } - index := &indexIter{ - blockIter: r.newBlockIter(indexBlock, rel, slice, true), - tr: r, - slice: slice, - fillCache: !ro.GetDontFillCache(), - } - return iterator.NewIndexedIterator(index, opt.GetStrict(r.o, ro, opt.StrictReader)) -} - -func (r *Reader) find(key []byte, filtered bool, ro *opt.ReadOptions, noValue bool) (rkey, value []byte, err error) { - r.mu.RLock() - defer r.mu.RUnlock() - - if r.err != nil { - err = r.err - return - } - - indexBlock, rel, err := r.getIndexBlock(true) - if err != nil { - return - } - defer rel.Release() - - index := r.newBlockIter(indexBlock, nil, nil, true) - defer index.Release() - if !index.Seek(key) { - err = index.Error() - if err == nil { - err = ErrNotFound - } - return - } - dataBH, n := decodeBlockHandle(index.Value()) - if n == 0 { - r.err = r.newErrCorruptedBH(r.indexBH, "bad data block handle") - return - } - if filtered && r.filter != nil { - filterBlock, frel, ferr := r.getFilterBlock(true) - if ferr == nil { - if !filterBlock.contains(r.filter, dataBH.offset, key) { - frel.Release() - return nil, nil, ErrNotFound - } - frel.Release() - } else if !errors.IsCorrupted(ferr) { - err = ferr - return - } - } - data := r.getDataIter(dataBH, nil, r.verifyChecksum, !ro.GetDontFillCache()) - defer data.Release() - if !data.Seek(key) { - err = data.Error() - if err == nil { - err = ErrNotFound - } - return - } - // Don't use block buffer, no need to copy the buffer. - rkey = data.Key() - if !noValue { - if r.bpool == nil { - value = data.Value() - } else { - // Use block buffer, and since the buffer will be recycled, the buffer - // need to be copied. - value = append([]byte{}, data.Value()...) - } - } - return -} - -// Find finds key/value pair whose key is greater than or equal to the -// given key. It returns ErrNotFound if the table doesn't contain -// such pair. -// If filtered is true then the nearest 'block' will be checked against -// 'filter data' (if present) and will immediately return ErrNotFound if -// 'filter data' indicates that such pair doesn't exist. -// -// The caller may modify the contents of the returned slice as it is its -// own copy. -// It is safe to modify the contents of the argument after Find returns. -func (r *Reader) Find(key []byte, filtered bool, ro *opt.ReadOptions) (rkey, value []byte, err error) { - return r.find(key, filtered, ro, false) -} - -// Find finds key that is greater than or equal to the given key. -// It returns ErrNotFound if the table doesn't contain such key. -// If filtered is true then the nearest 'block' will be checked against -// 'filter data' (if present) and will immediately return ErrNotFound if -// 'filter data' indicates that such key doesn't exist. -// -// The caller may modify the contents of the returned slice as it is its -// own copy. -// It is safe to modify the contents of the argument after Find returns. -func (r *Reader) FindKey(key []byte, filtered bool, ro *opt.ReadOptions) (rkey []byte, err error) { - rkey, _, err = r.find(key, filtered, ro, true) - return -} - -// Get gets the value for the given key. It returns errors.ErrNotFound -// if the table does not contain the key. -// -// The caller may modify the contents of the returned slice as it is its -// own copy. -// It is safe to modify the contents of the argument after Find returns. -func (r *Reader) Get(key []byte, ro *opt.ReadOptions) (value []byte, err error) { - r.mu.RLock() - defer r.mu.RUnlock() - - if r.err != nil { - err = r.err - return - } - - rkey, value, err := r.find(key, false, ro, false) - if err == nil && r.cmp.Compare(rkey, key) != 0 { - value = nil - err = ErrNotFound - } - return -} - -// OffsetOf returns approximate offset for the given key. -// -// It is safe to modify the contents of the argument after Get returns. -func (r *Reader) OffsetOf(key []byte) (offset int64, err error) { - r.mu.RLock() - defer r.mu.RUnlock() - - if r.err != nil { - err = r.err - return - } - - indexBlock, rel, err := r.readBlockCached(r.indexBH, true, true) - if err != nil { - return - } - defer rel.Release() - - index := r.newBlockIter(indexBlock, nil, nil, true) - defer index.Release() - if index.Seek(key) { - dataBH, n := decodeBlockHandle(index.Value()) - if n == 0 { - r.err = r.newErrCorruptedBH(r.indexBH, "bad data block handle") - return - } - offset = int64(dataBH.offset) - return - } - err = index.Error() - if err == nil { - offset = r.dataEnd - } - return -} - -// Release implements util.Releaser. -// It also close the file if it is an io.Closer. -func (r *Reader) Release() { - r.mu.Lock() - defer r.mu.Unlock() - - if closer, ok := r.reader.(io.Closer); ok { - closer.Close() - } - if r.indexBlock != nil { - r.indexBlock.Release() - r.indexBlock = nil - } - if r.filterBlock != nil { - r.filterBlock.Release() - r.filterBlock = nil - } - r.reader = nil - r.cache = nil - r.bpool = nil - r.err = ErrReaderReleased -} - -// NewReader creates a new initialized table reader for the file. -// The fi, cache and bpool is optional and can be nil. -// -// The returned table reader instance is goroutine-safe. -func NewReader(f io.ReaderAt, size int64, fi *storage.FileInfo, cache *cache.CacheGetter, bpool *util.BufferPool, o *opt.Options) (*Reader, error) { - if f == nil { - return nil, errors.New("leveldb/table: nil file") - } - - r := &Reader{ - fi: fi, - reader: f, - cache: cache, - bpool: bpool, - o: o, - cmp: o.GetComparer(), - verifyChecksum: o.GetStrict(opt.StrictBlockChecksum), - } - - if size < footerLen { - r.err = r.newErrCorrupted(0, size, "table", "too small") - return r, nil - } - - footerPos := size - footerLen - var footer [footerLen]byte - if _, err := r.reader.ReadAt(footer[:], footerPos); err != nil && err != io.EOF { - return nil, err - } - if string(footer[footerLen-len(magic):footerLen]) != magic { - r.err = r.newErrCorrupted(footerPos, footerLen, "table-footer", "bad magic number") - return r, nil - } - - var n int - // Decode the metaindex block handle. - r.metaBH, n = decodeBlockHandle(footer[:]) - if n == 0 { - r.err = r.newErrCorrupted(footerPos, footerLen, "table-footer", "bad metaindex block handle") - return r, nil - } - - // Decode the index block handle. - r.indexBH, n = decodeBlockHandle(footer[n:]) - if n == 0 { - r.err = r.newErrCorrupted(footerPos, footerLen, "table-footer", "bad index block handle") - return r, nil - } - - // Read metaindex block. - metaBlock, err := r.readBlock(r.metaBH, true) - if err != nil { - if errors.IsCorrupted(err) { - r.err = err - return r, nil - } else { - return nil, err - } - } - - // Set data end. - r.dataEnd = int64(r.metaBH.offset) - - // Read metaindex. - metaIter := r.newBlockIter(metaBlock, nil, nil, true) - for metaIter.Next() { - key := string(metaIter.Key()) - if !strings.HasPrefix(key, "filter.") { - continue - } - fn := key[7:] - if f0 := o.GetFilter(); f0 != nil && f0.Name() == fn { - r.filter = f0 - } else { - for _, f0 := range o.GetAltFilters() { - if f0.Name() == fn { - r.filter = f0 - break - } - } - } - if r.filter != nil { - filterBH, n := decodeBlockHandle(metaIter.Value()) - if n == 0 { - continue - } - r.filterBH = filterBH - // Update data end. - r.dataEnd = int64(filterBH.offset) - break - } - } - metaIter.Release() - metaBlock.Release() - - // Cache index and filter block locally, since we don't have global cache. - if cache == nil { - r.indexBlock, err = r.readBlock(r.indexBH, true) - if err != nil { - if errors.IsCorrupted(err) { - r.err = err - return r, nil - } else { - return nil, err - } - } - if r.filter != nil { - r.filterBlock, err = r.readFilterBlock(r.filterBH) - if err != nil { - if !errors.IsCorrupted(err) { - return nil, err - } - - // Don't use filter then. - r.filter = nil - } - } - } - - return r, nil -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/table.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/table.go deleted file mode 100644 index beacdc1f02..0000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/table.go +++ /dev/null @@ -1,177 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -// Package table allows read and write sorted key/value. -package table - -import ( - "encoding/binary" -) - -/* -Table: - -Table is consist of one or more data blocks, an optional filter block -a metaindex block, an index block and a table footer. Metaindex block -is a special block used to keep parameters of the table, such as filter -block name and its block handle. Index block is a special block used to -keep record of data blocks offset and length, index block use one as -restart interval. The key used by index block are the last key of preceding -block, shorter separator of adjacent blocks or shorter successor of the -last key of the last block. Filter block is an optional block contains -sequence of filter data generated by a filter generator. - -Table data structure: - + optional - / - +--------------+--------------+--------------+------+-------+-----------------+-------------+--------+ - | data block 1 | ... | data block n | filter block | metaindex block | index block | footer | - +--------------+--------------+--------------+--------------+-----------------+-------------+--------+ - - Each block followed by a 5-bytes trailer contains compression type and checksum. - -Table block trailer: - - +---------------------------+-------------------+ - | compression type (1-byte) | checksum (4-byte) | - +---------------------------+-------------------+ - - The checksum is a CRC-32 computed using Castagnoli's polynomial. Compression - type also included in the checksum. - -Table footer: - - +------------------- 40-bytes -------------------+ - / \ - +------------------------+--------------------+------+-----------------+ - | metaindex block handle / index block handle / ---- | magic (8-bytes) | - +------------------------+--------------------+------+-----------------+ - - The magic are first 64-bit of SHA-1 sum of "http://code.google.com/p/leveldb/". - -NOTE: All fixed-length integer are little-endian. -*/ - -/* -Block: - -Block is consist of one or more key/value entries and a block trailer. -Block entry shares key prefix with its preceding key until a restart -point reached. A block should contains at least one restart point. -First restart point are always zero. - -Block data structure: - - + restart point + restart point (depends on restart interval) - / / - +---------------+---------------+---------------+---------------+---------+ - | block entry 1 | block entry 2 | ... | block entry n | trailer | - +---------------+---------------+---------------+---------------+---------+ - -Key/value entry: - - +---- key len ----+ - / \ - +-------+---------+-----------+---------+--------------------+--------------+----------------+ - | shared (varint) | not shared (varint) | value len (varint) | key (varlen) | value (varlen) | - +-----------------+---------------------+--------------------+--------------+----------------+ - - Block entry shares key prefix with its preceding key: - Conditions: - restart_interval=2 - entry one : key=deck,value=v1 - entry two : key=dock,value=v2 - entry three: key=duck,value=v3 - The entries will be encoded as follow: - - + restart point (offset=0) + restart point (offset=16) - / / - +-----+-----+-----+----------+--------+-----+-----+-----+---------+--------+-----+-----+-----+----------+--------+ - | 0 | 4 | 2 | "deck" | "v1" | 1 | 3 | 2 | "ock" | "v2" | 0 | 4 | 2 | "duck" | "v3" | - +-----+-----+-----+----------+--------+-----+-----+-----+---------+--------+-----+-----+-----+----------+--------+ - \ / \ / \ / - +----------- entry one -----------+ +----------- entry two ----------+ +---------- entry three ----------+ - - The block trailer will contains two restart points: - - +------------+-----------+--------+ - | 0 | 16 | 2 | - +------------+-----------+---+----+ - \ / \ - +-- restart points --+ + restart points length - -Block trailer: - - +-- 4-bytes --+ - / \ - +-----------------+-----------------+-----------------+------------------------------+ - | restart point 1 | .... | restart point n | restart points len (4-bytes) | - +-----------------+-----------------+-----------------+------------------------------+ - - -NOTE: All fixed-length integer are little-endian. -*/ - -/* -Filter block: - -Filter block consist of one or more filter data and a filter block trailer. -The trailer contains filter data offsets, a trailer offset and a 1-byte base Lg. - -Filter block data structure: - - + offset 1 + offset 2 + offset n + trailer offset - / / / / - +---------------+---------------+---------------+---------+ - | filter data 1 | ... | filter data n | trailer | - +---------------+---------------+---------------+---------+ - -Filter block trailer: - - +- 4-bytes -+ - / \ - +---------------+---------------+---------------+-------------------------------+------------------+ - | data 1 offset | .... | data n offset | data-offsets offset (4-bytes) | base Lg (1-byte) | - +-------------- +---------------+---------------+-------------------------------+------------------+ - - -NOTE: All fixed-length integer are little-endian. -*/ - -const ( - blockTrailerLen = 5 - footerLen = 48 - - magic = "\x57\xfb\x80\x8b\x24\x75\x47\xdb" - - // The block type gives the per-block compression format. - // These constants are part of the file format and should not be changed. - blockTypeNoCompression = 0 - blockTypeSnappyCompression = 1 - - // Generate new filter every 2KB of data - filterBaseLg = 11 - filterBase = 1 << filterBaseLg -) - -type blockHandle struct { - offset, length uint64 -} - -func decodeBlockHandle(src []byte) (blockHandle, int) { - offset, n := binary.Uvarint(src) - length, m := binary.Uvarint(src[n:]) - if n == 0 || m == 0 { - return blockHandle{}, 0 - } - return blockHandle{offset, length}, n + m -} - -func encodeBlockHandle(dst []byte, b blockHandle) int { - n := binary.PutUvarint(dst, b.offset) - m := binary.PutUvarint(dst[n:], b.length) - return n + m -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/table_suite_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/table_suite_test.go deleted file mode 100644 index 6465da6e37..0000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/table_suite_test.go +++ /dev/null @@ -1,11 +0,0 @@ -package table - -import ( - "testing" - - "github.com/syndtr/goleveldb/leveldb/testutil" -) - -func TestTable(t *testing.T) { - testutil.RunSuite(t, "Table Suite") -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/table_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/table_test.go deleted file mode 100644 index 4b59b31f52..0000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/table_test.go +++ /dev/null @@ -1,122 +0,0 @@ -// Copyright (c) 2014, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package table - -import ( - "bytes" - - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" - - "github.com/syndtr/goleveldb/leveldb/iterator" - "github.com/syndtr/goleveldb/leveldb/opt" - "github.com/syndtr/goleveldb/leveldb/testutil" - "github.com/syndtr/goleveldb/leveldb/util" -) - -type tableWrapper struct { - *Reader -} - -func (t tableWrapper) TestFind(key []byte) (rkey, rvalue []byte, err error) { - return t.Reader.Find(key, false, nil) -} - -func (t tableWrapper) TestGet(key []byte) (value []byte, err error) { - return t.Reader.Get(key, nil) -} - -func (t tableWrapper) TestNewIterator(slice *util.Range) iterator.Iterator { - return t.Reader.NewIterator(slice, nil) -} - -var _ = testutil.Defer(func() { - Describe("Table", func() { - Describe("approximate offset test", func() { - var ( - buf = &bytes.Buffer{} - o = &opt.Options{ - BlockSize: 1024, - Compression: opt.NoCompression, - } - ) - - // Building the table. - tw := NewWriter(buf, o) - tw.Append([]byte("k01"), []byte("hello")) - tw.Append([]byte("k02"), []byte("hello2")) - tw.Append([]byte("k03"), bytes.Repeat([]byte{'x'}, 10000)) - tw.Append([]byte("k04"), bytes.Repeat([]byte{'x'}, 200000)) - tw.Append([]byte("k05"), bytes.Repeat([]byte{'x'}, 300000)) - tw.Append([]byte("k06"), []byte("hello3")) - tw.Append([]byte("k07"), bytes.Repeat([]byte{'x'}, 100000)) - err := tw.Close() - - It("Should be able to approximate offset of a key correctly", func() { - Expect(err).ShouldNot(HaveOccurred()) - - tr, err := NewReader(bytes.NewReader(buf.Bytes()), int64(buf.Len()), nil, nil, nil, o) - Expect(err).ShouldNot(HaveOccurred()) - CheckOffset := func(key string, expect, threshold int) { - offset, err := tr.OffsetOf([]byte(key)) - Expect(err).ShouldNot(HaveOccurred()) - Expect(offset).Should(BeNumerically("~", expect, threshold), "Offset of key %q", key) - } - - CheckOffset("k0", 0, 0) - CheckOffset("k01a", 0, 0) - CheckOffset("k02", 0, 0) - CheckOffset("k03", 0, 0) - CheckOffset("k04", 10000, 1000) - CheckOffset("k04a", 210000, 1000) - CheckOffset("k05", 210000, 1000) - CheckOffset("k06", 510000, 1000) - CheckOffset("k07", 510000, 1000) - CheckOffset("xyz", 610000, 2000) - }) - }) - - Describe("read test", func() { - Build := func(kv testutil.KeyValue) testutil.DB { - o := &opt.Options{ - BlockSize: 512, - BlockRestartInterval: 3, - } - buf := &bytes.Buffer{} - - // Building the table. - tw := NewWriter(buf, o) - kv.Iterate(func(i int, key, value []byte) { - tw.Append(key, value) - }) - tw.Close() - - // Opening the table. - tr, _ := NewReader(bytes.NewReader(buf.Bytes()), int64(buf.Len()), nil, nil, nil, o) - return tableWrapper{tr} - } - Test := func(kv *testutil.KeyValue, body func(r *Reader)) func() { - return func() { - db := Build(*kv) - if body != nil { - body(db.(tableWrapper).Reader) - } - testutil.KeyValueTesting(nil, *kv, db, nil, nil) - } - } - - testutil.AllKeyValueTesting(nil, Build, nil, nil) - Describe("with one key per block", Test(testutil.KeyValue_Generate(nil, 9, 1, 10, 512, 512), func(r *Reader) { - It("should have correct blocks number", func() { - indexBlock, err := r.readBlock(r.indexBH, true) - Expect(err).To(BeNil()) - Expect(indexBlock.restartsLen).Should(Equal(9)) - }) - })) - }) - }) -}) diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/writer.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/writer.go deleted file mode 100644 index 274dee6da8..0000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/writer.go +++ /dev/null @@ -1,375 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package table - -import ( - "encoding/binary" - "errors" - "fmt" - "io" - - "github.com/golang/snappy" - - "github.com/syndtr/goleveldb/leveldb/comparer" - "github.com/syndtr/goleveldb/leveldb/filter" - "github.com/syndtr/goleveldb/leveldb/opt" - "github.com/syndtr/goleveldb/leveldb/util" -) - -func sharedPrefixLen(a, b []byte) int { - i, n := 0, len(a) - if n > len(b) { - n = len(b) - } - for i < n && a[i] == b[i] { - i++ - } - return i -} - -type blockWriter struct { - restartInterval int - buf util.Buffer - nEntries int - prevKey []byte - restarts []uint32 - scratch []byte -} - -func (w *blockWriter) append(key, value []byte) { - nShared := 0 - if w.nEntries%w.restartInterval == 0 { - w.restarts = append(w.restarts, uint32(w.buf.Len())) - } else { - nShared = sharedPrefixLen(w.prevKey, key) - } - n := binary.PutUvarint(w.scratch[0:], uint64(nShared)) - n += binary.PutUvarint(w.scratch[n:], uint64(len(key)-nShared)) - n += binary.PutUvarint(w.scratch[n:], uint64(len(value))) - w.buf.Write(w.scratch[:n]) - w.buf.Write(key[nShared:]) - w.buf.Write(value) - w.prevKey = append(w.prevKey[:0], key...) - w.nEntries++ -} - -func (w *blockWriter) finish() { - // Write restarts entry. - if w.nEntries == 0 { - // Must have at least one restart entry. - w.restarts = append(w.restarts, 0) - } - w.restarts = append(w.restarts, uint32(len(w.restarts))) - for _, x := range w.restarts { - buf4 := w.buf.Alloc(4) - binary.LittleEndian.PutUint32(buf4, x) - } -} - -func (w *blockWriter) reset() { - w.buf.Reset() - w.nEntries = 0 - w.restarts = w.restarts[:0] -} - -func (w *blockWriter) bytesLen() int { - restartsLen := len(w.restarts) - if restartsLen == 0 { - restartsLen = 1 - } - return w.buf.Len() + 4*restartsLen + 4 -} - -type filterWriter struct { - generator filter.FilterGenerator - buf util.Buffer - nKeys int - offsets []uint32 -} - -func (w *filterWriter) add(key []byte) { - if w.generator == nil { - return - } - w.generator.Add(key) - w.nKeys++ -} - -func (w *filterWriter) flush(offset uint64) { - if w.generator == nil { - return - } - for x := int(offset / filterBase); x > len(w.offsets); { - w.generate() - } -} - -func (w *filterWriter) finish() { - if w.generator == nil { - return - } - // Generate last keys. - - if w.nKeys > 0 { - w.generate() - } - w.offsets = append(w.offsets, uint32(w.buf.Len())) - for _, x := range w.offsets { - buf4 := w.buf.Alloc(4) - binary.LittleEndian.PutUint32(buf4, x) - } - w.buf.WriteByte(filterBaseLg) -} - -func (w *filterWriter) generate() { - // Record offset. - w.offsets = append(w.offsets, uint32(w.buf.Len())) - // Generate filters. - if w.nKeys > 0 { - w.generator.Generate(&w.buf) - w.nKeys = 0 - } -} - -// Writer is a table writer. -type Writer struct { - writer io.Writer - err error - // Options - cmp comparer.Comparer - filter filter.Filter - compression opt.Compression - blockSize int - - dataBlock blockWriter - indexBlock blockWriter - filterBlock filterWriter - pendingBH blockHandle - offset uint64 - nEntries int - // Scratch allocated enough for 5 uvarint. Block writer should not use - // first 20-bytes since it will be used to encode block handle, which - // then passed to the block writer itself. - scratch [50]byte - comparerScratch []byte - compressionScratch []byte -} - -func (w *Writer) writeBlock(buf *util.Buffer, compression opt.Compression) (bh blockHandle, err error) { - // Compress the buffer if necessary. - var b []byte - if compression == opt.SnappyCompression { - // Allocate scratch enough for compression and block trailer. - if n := snappy.MaxEncodedLen(buf.Len()) + blockTrailerLen; len(w.compressionScratch) < n { - w.compressionScratch = make([]byte, n) - } - compressed := snappy.Encode(w.compressionScratch, buf.Bytes()) - n := len(compressed) - b = compressed[:n+blockTrailerLen] - b[n] = blockTypeSnappyCompression - } else { - tmp := buf.Alloc(blockTrailerLen) - tmp[0] = blockTypeNoCompression - b = buf.Bytes() - } - - // Calculate the checksum. - n := len(b) - 4 - checksum := util.NewCRC(b[:n]).Value() - binary.LittleEndian.PutUint32(b[n:], checksum) - - // Write the buffer to the file. - _, err = w.writer.Write(b) - if err != nil { - return - } - bh = blockHandle{w.offset, uint64(len(b) - blockTrailerLen)} - w.offset += uint64(len(b)) - return -} - -func (w *Writer) flushPendingBH(key []byte) { - if w.pendingBH.length == 0 { - return - } - var separator []byte - if len(key) == 0 { - separator = w.cmp.Successor(w.comparerScratch[:0], w.dataBlock.prevKey) - } else { - separator = w.cmp.Separator(w.comparerScratch[:0], w.dataBlock.prevKey, key) - } - if separator == nil { - separator = w.dataBlock.prevKey - } else { - w.comparerScratch = separator - } - n := encodeBlockHandle(w.scratch[:20], w.pendingBH) - // Append the block handle to the index block. - w.indexBlock.append(separator, w.scratch[:n]) - // Reset prev key of the data block. - w.dataBlock.prevKey = w.dataBlock.prevKey[:0] - // Clear pending block handle. - w.pendingBH = blockHandle{} -} - -func (w *Writer) finishBlock() error { - w.dataBlock.finish() - bh, err := w.writeBlock(&w.dataBlock.buf, w.compression) - if err != nil { - return err - } - w.pendingBH = bh - // Reset the data block. - w.dataBlock.reset() - // Flush the filter block. - w.filterBlock.flush(w.offset) - return nil -} - -// Append appends key/value pair to the table. The keys passed must -// be in increasing order. -// -// It is safe to modify the contents of the arguments after Append returns. -func (w *Writer) Append(key, value []byte) error { - if w.err != nil { - return w.err - } - if w.nEntries > 0 && w.cmp.Compare(w.dataBlock.prevKey, key) >= 0 { - w.err = fmt.Errorf("leveldb/table: Writer: keys are not in increasing order: %q, %q", w.dataBlock.prevKey, key) - return w.err - } - - w.flushPendingBH(key) - // Append key/value pair to the data block. - w.dataBlock.append(key, value) - // Add key to the filter block. - w.filterBlock.add(key) - - // Finish the data block if block size target reached. - if w.dataBlock.bytesLen() >= w.blockSize { - if err := w.finishBlock(); err != nil { - w.err = err - return w.err - } - } - w.nEntries++ - return nil -} - -// BlocksLen returns number of blocks written so far. -func (w *Writer) BlocksLen() int { - n := w.indexBlock.nEntries - if w.pendingBH.length > 0 { - // Includes the pending block. - n++ - } - return n -} - -// EntriesLen returns number of entries added so far. -func (w *Writer) EntriesLen() int { - return w.nEntries -} - -// BytesLen returns number of bytes written so far. -func (w *Writer) BytesLen() int { - return int(w.offset) -} - -// Close will finalize the table. Calling Append is not possible -// after Close, but calling BlocksLen, EntriesLen and BytesLen -// is still possible. -func (w *Writer) Close() error { - if w.err != nil { - return w.err - } - - // Write the last data block. Or empty data block if there - // aren't any data blocks at all. - if w.dataBlock.nEntries > 0 || w.nEntries == 0 { - if err := w.finishBlock(); err != nil { - w.err = err - return w.err - } - } - w.flushPendingBH(nil) - - // Write the filter block. - var filterBH blockHandle - w.filterBlock.finish() - if buf := &w.filterBlock.buf; buf.Len() > 0 { - filterBH, w.err = w.writeBlock(buf, opt.NoCompression) - if w.err != nil { - return w.err - } - } - - // Write the metaindex block. - if filterBH.length > 0 { - key := []byte("filter." + w.filter.Name()) - n := encodeBlockHandle(w.scratch[:20], filterBH) - w.dataBlock.append(key, w.scratch[:n]) - } - w.dataBlock.finish() - metaindexBH, err := w.writeBlock(&w.dataBlock.buf, w.compression) - if err != nil { - w.err = err - return w.err - } - - // Write the index block. - w.indexBlock.finish() - indexBH, err := w.writeBlock(&w.indexBlock.buf, w.compression) - if err != nil { - w.err = err - return w.err - } - - // Write the table footer. - footer := w.scratch[:footerLen] - for i := range footer { - footer[i] = 0 - } - n := encodeBlockHandle(footer, metaindexBH) - encodeBlockHandle(footer[n:], indexBH) - copy(footer[footerLen-len(magic):], magic) - if _, err := w.writer.Write(footer); err != nil { - w.err = err - return w.err - } - w.offset += footerLen - - w.err = errors.New("leveldb/table: writer is closed") - return nil -} - -// NewWriter creates a new initialized table writer for the file. -// -// Table writer is not goroutine-safe. -func NewWriter(f io.Writer, o *opt.Options) *Writer { - w := &Writer{ - writer: f, - cmp: o.GetComparer(), - filter: o.GetFilter(), - compression: o.GetCompression(), - blockSize: o.GetBlockSize(), - comparerScratch: make([]byte, 0), - } - // data block - w.dataBlock.restartInterval = o.GetBlockRestartInterval() - // The first 20-bytes are used for encoding block handle. - w.dataBlock.scratch = w.scratch[20:] - // index block - w.indexBlock.restartInterval = 1 - w.indexBlock.scratch = w.scratch[20:] - // filter block - if w.filter != nil { - w.filterBlock.generator = w.filter.NewGenerator() - w.filterBlock.flush(0) - } - return w -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/db.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/db.go deleted file mode 100644 index ec3f177a12..0000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/db.go +++ /dev/null @@ -1,222 +0,0 @@ -// Copyright (c) 2014, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package testutil - -import ( - "fmt" - "math/rand" - - . "github.com/onsi/gomega" - - "github.com/syndtr/goleveldb/leveldb/errors" - "github.com/syndtr/goleveldb/leveldb/iterator" - "github.com/syndtr/goleveldb/leveldb/util" -) - -type DB interface{} - -type Put interface { - TestPut(key []byte, value []byte) error -} - -type Delete interface { - TestDelete(key []byte) error -} - -type Find interface { - TestFind(key []byte) (rkey, rvalue []byte, err error) -} - -type Get interface { - TestGet(key []byte) (value []byte, err error) -} - -type Has interface { - TestHas(key []byte) (ret bool, err error) -} - -type NewIterator interface { - TestNewIterator(slice *util.Range) iterator.Iterator -} - -type DBAct int - -func (a DBAct) String() string { - switch a { - case DBNone: - return "none" - case DBPut: - return "put" - case DBOverwrite: - return "overwrite" - case DBDelete: - return "delete" - case DBDeleteNA: - return "delete_na" - } - return "unknown" -} - -const ( - DBNone DBAct = iota - DBPut - DBOverwrite - DBDelete - DBDeleteNA -) - -type DBTesting struct { - Rand *rand.Rand - DB interface { - Get - Put - Delete - } - PostFn func(t *DBTesting) - Deleted, Present KeyValue - Act, LastAct DBAct - ActKey, LastActKey []byte -} - -func (t *DBTesting) post() { - if t.PostFn != nil { - t.PostFn(t) - } -} - -func (t *DBTesting) setAct(act DBAct, key []byte) { - t.LastAct, t.Act = t.Act, act - t.LastActKey, t.ActKey = t.ActKey, key -} - -func (t *DBTesting) text() string { - return fmt.Sprintf("last action was <%v> %q, <%v> %q", t.LastAct, t.LastActKey, t.Act, t.ActKey) -} - -func (t *DBTesting) Text() string { - return "DBTesting " + t.text() -} - -func (t *DBTesting) TestPresentKV(key, value []byte) { - rvalue, err := t.DB.TestGet(key) - Expect(err).ShouldNot(HaveOccurred(), "Get on key %q, %s", key, t.text()) - Expect(rvalue).Should(Equal(value), "Value for key %q, %s", key, t.text()) -} - -func (t *DBTesting) TestAllPresent() { - t.Present.IterateShuffled(t.Rand, func(i int, key, value []byte) { - t.TestPresentKV(key, value) - }) -} - -func (t *DBTesting) TestDeletedKey(key []byte) { - _, err := t.DB.TestGet(key) - Expect(err).Should(Equal(errors.ErrNotFound), "Get on deleted key %q, %s", key, t.text()) -} - -func (t *DBTesting) TestAllDeleted() { - t.Deleted.IterateShuffled(t.Rand, func(i int, key, value []byte) { - t.TestDeletedKey(key) - }) -} - -func (t *DBTesting) TestAll() { - dn := t.Deleted.Len() - pn := t.Present.Len() - ShuffledIndex(t.Rand, dn+pn, 1, func(i int) { - if i >= dn { - key, value := t.Present.Index(i - dn) - t.TestPresentKV(key, value) - } else { - t.TestDeletedKey(t.Deleted.KeyAt(i)) - } - }) -} - -func (t *DBTesting) Put(key, value []byte) { - if new := t.Present.PutU(key, value); new { - t.setAct(DBPut, key) - } else { - t.setAct(DBOverwrite, key) - } - t.Deleted.Delete(key) - err := t.DB.TestPut(key, value) - Expect(err).ShouldNot(HaveOccurred(), t.Text()) - t.TestPresentKV(key, value) - t.post() -} - -func (t *DBTesting) PutRandom() bool { - if t.Deleted.Len() > 0 { - i := t.Rand.Intn(t.Deleted.Len()) - key, value := t.Deleted.Index(i) - t.Put(key, value) - return true - } - return false -} - -func (t *DBTesting) Delete(key []byte) { - if exist, value := t.Present.Delete(key); exist { - t.setAct(DBDelete, key) - t.Deleted.PutU(key, value) - } else { - t.setAct(DBDeleteNA, key) - } - err := t.DB.TestDelete(key) - Expect(err).ShouldNot(HaveOccurred(), t.Text()) - t.TestDeletedKey(key) - t.post() -} - -func (t *DBTesting) DeleteRandom() bool { - if t.Present.Len() > 0 { - i := t.Rand.Intn(t.Present.Len()) - t.Delete(t.Present.KeyAt(i)) - return true - } - return false -} - -func (t *DBTesting) RandomAct(round int) { - for i := 0; i < round; i++ { - if t.Rand.Int()%2 == 0 { - t.PutRandom() - } else { - t.DeleteRandom() - } - } -} - -func DoDBTesting(t *DBTesting) { - if t.Rand == nil { - t.Rand = NewRand() - } - - t.DeleteRandom() - t.PutRandom() - t.DeleteRandom() - t.DeleteRandom() - for i := t.Deleted.Len() / 2; i >= 0; i-- { - t.PutRandom() - } - t.RandomAct((t.Deleted.Len() + t.Present.Len()) * 10) - - // Additional iterator testing - if db, ok := t.DB.(NewIterator); ok { - iter := db.TestNewIterator(nil) - Expect(iter.Error()).NotTo(HaveOccurred()) - - it := IteratorTesting{ - KeyValue: t.Present, - Iter: iter, - } - - DoIteratorTesting(&it) - iter.Release() - } -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/ginkgo.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/ginkgo.go deleted file mode 100644 index 82f3d0e811..0000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/ginkgo.go +++ /dev/null @@ -1,21 +0,0 @@ -package testutil - -import ( - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" -) - -func RunSuite(t GinkgoTestingT, name string) { - RunDefer() - - SynchronizedBeforeSuite(func() []byte { - RunDefer("setup") - return nil - }, func(data []byte) {}) - SynchronizedAfterSuite(func() { - RunDefer("teardown") - }, func() {}) - - RegisterFailHandler(Fail) - RunSpecs(t, name) -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/iter.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/iter.go deleted file mode 100644 index df6d9db6a3..0000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/iter.go +++ /dev/null @@ -1,327 +0,0 @@ -// Copyright (c) 2014, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package testutil - -import ( - "fmt" - "math/rand" - - . "github.com/onsi/gomega" - - "github.com/syndtr/goleveldb/leveldb/iterator" -) - -type IterAct int - -func (a IterAct) String() string { - switch a { - case IterNone: - return "none" - case IterFirst: - return "first" - case IterLast: - return "last" - case IterPrev: - return "prev" - case IterNext: - return "next" - case IterSeek: - return "seek" - case IterSOI: - return "soi" - case IterEOI: - return "eoi" - } - return "unknown" -} - -const ( - IterNone IterAct = iota - IterFirst - IterLast - IterPrev - IterNext - IterSeek - IterSOI - IterEOI -) - -type IteratorTesting struct { - KeyValue - Iter iterator.Iterator - Rand *rand.Rand - PostFn func(t *IteratorTesting) - Pos int - Act, LastAct IterAct - - once bool -} - -func (t *IteratorTesting) init() { - if !t.once { - t.Pos = -1 - t.once = true - } -} - -func (t *IteratorTesting) post() { - if t.PostFn != nil { - t.PostFn(t) - } -} - -func (t *IteratorTesting) setAct(act IterAct) { - t.LastAct, t.Act = t.Act, act -} - -func (t *IteratorTesting) text() string { - return fmt.Sprintf("at pos %d and last action was <%v> -> <%v>", t.Pos, t.LastAct, t.Act) -} - -func (t *IteratorTesting) Text() string { - return "IteratorTesting is " + t.text() -} - -func (t *IteratorTesting) IsFirst() bool { - t.init() - return t.Len() > 0 && t.Pos == 0 -} - -func (t *IteratorTesting) IsLast() bool { - t.init() - return t.Len() > 0 && t.Pos == t.Len()-1 -} - -func (t *IteratorTesting) TestKV() { - t.init() - key, value := t.Index(t.Pos) - Expect(t.Iter.Key()).NotTo(BeNil()) - Expect(t.Iter.Key()).Should(Equal(key), "Key is invalid, %s", t.text()) - Expect(t.Iter.Value()).Should(Equal(value), "Value for key %q, %s", key, t.text()) -} - -func (t *IteratorTesting) First() { - t.init() - t.setAct(IterFirst) - - ok := t.Iter.First() - Expect(t.Iter.Error()).ShouldNot(HaveOccurred()) - if t.Len() > 0 { - t.Pos = 0 - Expect(ok).Should(BeTrue(), t.Text()) - t.TestKV() - } else { - t.Pos = -1 - Expect(ok).ShouldNot(BeTrue(), t.Text()) - } - t.post() -} - -func (t *IteratorTesting) Last() { - t.init() - t.setAct(IterLast) - - ok := t.Iter.Last() - Expect(t.Iter.Error()).ShouldNot(HaveOccurred()) - if t.Len() > 0 { - t.Pos = t.Len() - 1 - Expect(ok).Should(BeTrue(), t.Text()) - t.TestKV() - } else { - t.Pos = 0 - Expect(ok).ShouldNot(BeTrue(), t.Text()) - } - t.post() -} - -func (t *IteratorTesting) Next() { - t.init() - t.setAct(IterNext) - - ok := t.Iter.Next() - Expect(t.Iter.Error()).ShouldNot(HaveOccurred()) - if t.Pos < t.Len()-1 { - t.Pos++ - Expect(ok).Should(BeTrue(), t.Text()) - t.TestKV() - } else { - t.Pos = t.Len() - Expect(ok).ShouldNot(BeTrue(), t.Text()) - } - t.post() -} - -func (t *IteratorTesting) Prev() { - t.init() - t.setAct(IterPrev) - - ok := t.Iter.Prev() - Expect(t.Iter.Error()).ShouldNot(HaveOccurred()) - if t.Pos > 0 { - t.Pos-- - Expect(ok).Should(BeTrue(), t.Text()) - t.TestKV() - } else { - t.Pos = -1 - Expect(ok).ShouldNot(BeTrue(), t.Text()) - } - t.post() -} - -func (t *IteratorTesting) Seek(i int) { - t.init() - t.setAct(IterSeek) - - key, _ := t.Index(i) - oldKey, _ := t.IndexOrNil(t.Pos) - - ok := t.Iter.Seek(key) - Expect(t.Iter.Error()).ShouldNot(HaveOccurred()) - Expect(ok).Should(BeTrue(), fmt.Sprintf("Seek from key %q to %q, to pos %d, %s", oldKey, key, i, t.text())) - - t.Pos = i - t.TestKV() - t.post() -} - -func (t *IteratorTesting) SeekInexact(i int) { - t.init() - t.setAct(IterSeek) - var key0 []byte - key1, _ := t.Index(i) - if i > 0 { - key0, _ = t.Index(i - 1) - } - key := BytesSeparator(key0, key1) - oldKey, _ := t.IndexOrNil(t.Pos) - - ok := t.Iter.Seek(key) - Expect(t.Iter.Error()).ShouldNot(HaveOccurred()) - Expect(ok).Should(BeTrue(), fmt.Sprintf("Seek from key %q to %q (%q), to pos %d, %s", oldKey, key, key1, i, t.text())) - - t.Pos = i - t.TestKV() - t.post() -} - -func (t *IteratorTesting) SeekKey(key []byte) { - t.init() - t.setAct(IterSeek) - oldKey, _ := t.IndexOrNil(t.Pos) - i := t.Search(key) - - ok := t.Iter.Seek(key) - Expect(t.Iter.Error()).ShouldNot(HaveOccurred()) - if i < t.Len() { - key_, _ := t.Index(i) - Expect(ok).Should(BeTrue(), fmt.Sprintf("Seek from key %q to %q (%q), to pos %d, %s", oldKey, key, key_, i, t.text())) - t.Pos = i - t.TestKV() - } else { - Expect(ok).ShouldNot(BeTrue(), fmt.Sprintf("Seek from key %q to %q, %s", oldKey, key, t.text())) - } - - t.Pos = i - t.post() -} - -func (t *IteratorTesting) SOI() { - t.init() - t.setAct(IterSOI) - Expect(t.Pos).Should(BeNumerically("<=", 0), t.Text()) - for i := 0; i < 3; i++ { - t.Prev() - } - t.post() -} - -func (t *IteratorTesting) EOI() { - t.init() - t.setAct(IterEOI) - Expect(t.Pos).Should(BeNumerically(">=", t.Len()-1), t.Text()) - for i := 0; i < 3; i++ { - t.Next() - } - t.post() -} - -func (t *IteratorTesting) WalkPrev(fn func(t *IteratorTesting)) { - t.init() - for old := t.Pos; t.Pos > 0; old = t.Pos { - fn(t) - Expect(t.Pos).Should(BeNumerically("<", old), t.Text()) - } -} - -func (t *IteratorTesting) WalkNext(fn func(t *IteratorTesting)) { - t.init() - for old := t.Pos; t.Pos < t.Len()-1; old = t.Pos { - fn(t) - Expect(t.Pos).Should(BeNumerically(">", old), t.Text()) - } -} - -func (t *IteratorTesting) PrevAll() { - t.WalkPrev(func(t *IteratorTesting) { - t.Prev() - }) -} - -func (t *IteratorTesting) NextAll() { - t.WalkNext(func(t *IteratorTesting) { - t.Next() - }) -} - -func DoIteratorTesting(t *IteratorTesting) { - if t.Rand == nil { - t.Rand = NewRand() - } - t.SOI() - t.NextAll() - t.First() - t.SOI() - t.NextAll() - t.EOI() - t.PrevAll() - t.Last() - t.EOI() - t.PrevAll() - t.SOI() - - t.NextAll() - t.PrevAll() - t.NextAll() - t.Last() - t.PrevAll() - t.First() - t.NextAll() - t.EOI() - - ShuffledIndex(t.Rand, t.Len(), 1, func(i int) { - t.Seek(i) - }) - - ShuffledIndex(t.Rand, t.Len(), 1, func(i int) { - t.SeekInexact(i) - }) - - ShuffledIndex(t.Rand, t.Len(), 1, func(i int) { - t.Seek(i) - if i%2 != 0 { - t.PrevAll() - t.SOI() - } else { - t.NextAll() - t.EOI() - } - }) - - for _, key := range []string{"", "foo", "bar", "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff"} { - t.SeekKey([]byte(key)) - } -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/kv.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/kv.go deleted file mode 100644 index 471d5708c3..0000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/kv.go +++ /dev/null @@ -1,352 +0,0 @@ -// Copyright (c) 2014, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package testutil - -import ( - "fmt" - "math/rand" - "sort" - "strings" - - "github.com/syndtr/goleveldb/leveldb/util" -) - -type KeyValueEntry struct { - key, value []byte -} - -type KeyValue struct { - entries []KeyValueEntry - nbytes int -} - -func (kv *KeyValue) Put(key, value []byte) { - if n := len(kv.entries); n > 0 && cmp.Compare(kv.entries[n-1].key, key) >= 0 { - panic(fmt.Sprintf("Put: keys are not in increasing order: %q, %q", kv.entries[n-1].key, key)) - } - kv.entries = append(kv.entries, KeyValueEntry{key, value}) - kv.nbytes += len(key) + len(value) -} - -func (kv *KeyValue) PutString(key, value string) { - kv.Put([]byte(key), []byte(value)) -} - -func (kv *KeyValue) PutU(key, value []byte) bool { - if i, exist := kv.Get(key); !exist { - if i < kv.Len() { - kv.entries = append(kv.entries[:i+1], kv.entries[i:]...) - kv.entries[i] = KeyValueEntry{key, value} - } else { - kv.entries = append(kv.entries, KeyValueEntry{key, value}) - } - kv.nbytes += len(key) + len(value) - return true - } else { - kv.nbytes += len(value) - len(kv.ValueAt(i)) - kv.entries[i].value = value - } - return false -} - -func (kv *KeyValue) PutUString(key, value string) bool { - return kv.PutU([]byte(key), []byte(value)) -} - -func (kv *KeyValue) Delete(key []byte) (exist bool, value []byte) { - i, exist := kv.Get(key) - if exist { - value = kv.entries[i].value - kv.DeleteIndex(i) - } - return -} - -func (kv *KeyValue) DeleteIndex(i int) bool { - if i < kv.Len() { - kv.nbytes -= len(kv.KeyAt(i)) + len(kv.ValueAt(i)) - kv.entries = append(kv.entries[:i], kv.entries[i+1:]...) - return true - } - return false -} - -func (kv KeyValue) Len() int { - return len(kv.entries) -} - -func (kv *KeyValue) Size() int { - return kv.nbytes -} - -func (kv KeyValue) KeyAt(i int) []byte { - return kv.entries[i].key -} - -func (kv KeyValue) ValueAt(i int) []byte { - return kv.entries[i].value -} - -func (kv KeyValue) Index(i int) (key, value []byte) { - if i < 0 || i >= len(kv.entries) { - panic(fmt.Sprintf("Index #%d: out of range", i)) - } - return kv.entries[i].key, kv.entries[i].value -} - -func (kv KeyValue) IndexInexact(i int) (key_, key, value []byte) { - key, value = kv.Index(i) - var key0 []byte - var key1 = kv.KeyAt(i) - if i > 0 { - key0 = kv.KeyAt(i - 1) - } - key_ = BytesSeparator(key0, key1) - return -} - -func (kv KeyValue) IndexOrNil(i int) (key, value []byte) { - if i >= 0 && i < len(kv.entries) { - return kv.entries[i].key, kv.entries[i].value - } - return nil, nil -} - -func (kv KeyValue) IndexString(i int) (key, value string) { - key_, _value := kv.Index(i) - return string(key_), string(_value) -} - -func (kv KeyValue) Search(key []byte) int { - return sort.Search(kv.Len(), func(i int) bool { - return cmp.Compare(kv.KeyAt(i), key) >= 0 - }) -} - -func (kv KeyValue) SearchString(key string) int { - return kv.Search([]byte(key)) -} - -func (kv KeyValue) Get(key []byte) (i int, exist bool) { - i = kv.Search(key) - if i < kv.Len() && cmp.Compare(kv.KeyAt(i), key) == 0 { - exist = true - } - return -} - -func (kv KeyValue) GetString(key string) (i int, exist bool) { - return kv.Get([]byte(key)) -} - -func (kv KeyValue) Iterate(fn func(i int, key, value []byte)) { - for i, x := range kv.entries { - fn(i, x.key, x.value) - } -} - -func (kv KeyValue) IterateString(fn func(i int, key, value string)) { - kv.Iterate(func(i int, key, value []byte) { - fn(i, string(key), string(value)) - }) -} - -func (kv KeyValue) IterateShuffled(rnd *rand.Rand, fn func(i int, key, value []byte)) { - ShuffledIndex(rnd, kv.Len(), 1, func(i int) { - fn(i, kv.entries[i].key, kv.entries[i].value) - }) -} - -func (kv KeyValue) IterateShuffledString(rnd *rand.Rand, fn func(i int, key, value string)) { - kv.IterateShuffled(rnd, func(i int, key, value []byte) { - fn(i, string(key), string(value)) - }) -} - -func (kv KeyValue) IterateInexact(fn func(i int, key_, key, value []byte)) { - for i := range kv.entries { - key_, key, value := kv.IndexInexact(i) - fn(i, key_, key, value) - } -} - -func (kv KeyValue) IterateInexactString(fn func(i int, key_, key, value string)) { - kv.IterateInexact(func(i int, key_, key, value []byte) { - fn(i, string(key_), string(key), string(value)) - }) -} - -func (kv KeyValue) Clone() KeyValue { - return KeyValue{append([]KeyValueEntry{}, kv.entries...), kv.nbytes} -} - -func (kv KeyValue) Slice(start, limit int) KeyValue { - if start < 0 || limit > kv.Len() { - panic(fmt.Sprintf("Slice %d .. %d: out of range", start, limit)) - } else if limit < start { - panic(fmt.Sprintf("Slice %d .. %d: invalid range", start, limit)) - } - return KeyValue{append([]KeyValueEntry{}, kv.entries[start:limit]...), kv.nbytes} -} - -func (kv KeyValue) SliceKey(start, limit []byte) KeyValue { - start_ := 0 - limit_ := kv.Len() - if start != nil { - start_ = kv.Search(start) - } - if limit != nil { - limit_ = kv.Search(limit) - } - return kv.Slice(start_, limit_) -} - -func (kv KeyValue) SliceKeyString(start, limit string) KeyValue { - return kv.SliceKey([]byte(start), []byte(limit)) -} - -func (kv KeyValue) SliceRange(r *util.Range) KeyValue { - if r != nil { - return kv.SliceKey(r.Start, r.Limit) - } - return kv.Clone() -} - -func (kv KeyValue) Range(start, limit int) (r util.Range) { - if kv.Len() > 0 { - if start == kv.Len() { - r.Start = BytesAfter(kv.KeyAt(start - 1)) - } else { - r.Start = kv.KeyAt(start) - } - } - if limit < kv.Len() { - r.Limit = kv.KeyAt(limit) - } - return -} - -func KeyValue_EmptyKey() *KeyValue { - kv := &KeyValue{} - kv.PutString("", "v") - return kv -} - -func KeyValue_EmptyValue() *KeyValue { - kv := &KeyValue{} - kv.PutString("abc", "") - kv.PutString("abcd", "") - return kv -} - -func KeyValue_OneKeyValue() *KeyValue { - kv := &KeyValue{} - kv.PutString("abc", "v") - return kv -} - -func KeyValue_BigValue() *KeyValue { - kv := &KeyValue{} - kv.PutString("big1", strings.Repeat("1", 200000)) - return kv -} - -func KeyValue_SpecialKey() *KeyValue { - kv := &KeyValue{} - kv.PutString("\xff\xff", "v3") - return kv -} - -func KeyValue_MultipleKeyValue() *KeyValue { - kv := &KeyValue{} - kv.PutString("a", "v") - kv.PutString("aa", "v1") - kv.PutString("aaa", "v2") - kv.PutString("aaacccccccccc", "v2") - kv.PutString("aaaccccccccccd", "v3") - kv.PutString("aaaccccccccccf", "v4") - kv.PutString("aaaccccccccccfg", "v5") - kv.PutString("ab", "v6") - kv.PutString("abc", "v7") - kv.PutString("abcd", "v8") - kv.PutString("accccccccccccccc", "v9") - kv.PutString("b", "v10") - kv.PutString("bb", "v11") - kv.PutString("bc", "v12") - kv.PutString("c", "v13") - kv.PutString("c1", "v13") - kv.PutString("czzzzzzzzzzzzzz", "v14") - kv.PutString("fffffffffffffff", "v15") - kv.PutString("g11", "v15") - kv.PutString("g111", "v15") - kv.PutString("g111\xff", "v15") - kv.PutString("zz", "v16") - kv.PutString("zzzzzzz", "v16") - kv.PutString("zzzzzzzzzzzzzzzz", "v16") - return kv -} - -var keymap = []byte("012345678ABCDEFGHIJKLMNOPQRSTUVWXYabcdefghijklmnopqrstuvwxy") - -func KeyValue_Generate(rnd *rand.Rand, n, minlen, maxlen, vminlen, vmaxlen int) *KeyValue { - if rnd == nil { - rnd = NewRand() - } - if maxlen < minlen { - panic("max len should >= min len") - } - - rrand := func(min, max int) int { - if min == max { - return max - } - return rnd.Intn(max-min) + min - } - - kv := &KeyValue{} - endC := byte(len(keymap) - 1) - gen := make([]byte, 0, maxlen) - for i := 0; i < n; i++ { - m := rrand(minlen, maxlen) - last := gen - retry: - gen = last[:m] - if k := len(last); m > k { - for j := k; j < m; j++ { - gen[j] = 0 - } - } else { - for j := m - 1; j >= 0; j-- { - c := last[j] - if c == endC { - continue - } - gen[j] = c + 1 - for j += 1; j < m; j++ { - gen[j] = 0 - } - goto ok - } - if m < maxlen { - m++ - goto retry - } - panic(fmt.Sprintf("only able to generate %d keys out of %d keys, try increasing max len", kv.Len(), n)) - ok: - } - key := make([]byte, m) - for j := 0; j < m; j++ { - key[j] = keymap[gen[j]] - } - value := make([]byte, rrand(vminlen, vmaxlen)) - for n := copy(value, []byte(fmt.Sprintf("v%d", i))); n < len(value); n++ { - value[n] = 'x' - } - kv.Put(key, value) - } - return kv -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/kvtest.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/kvtest.go deleted file mode 100644 index a0b58f0e72..0000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/kvtest.go +++ /dev/null @@ -1,187 +0,0 @@ -// Copyright (c) 2014, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package testutil - -import ( - "fmt" - "math/rand" - - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" - - "github.com/syndtr/goleveldb/leveldb/errors" - "github.com/syndtr/goleveldb/leveldb/util" -) - -func KeyValueTesting(rnd *rand.Rand, kv KeyValue, p DB, setup func(KeyValue) DB, teardown func(DB)) { - if rnd == nil { - rnd = NewRand() - } - - if p == nil { - BeforeEach(func() { - p = setup(kv) - }) - if teardown != nil { - AfterEach(func() { - teardown(p) - }) - } - } - - It("Should find all keys with Find", func() { - if db, ok := p.(Find); ok { - ShuffledIndex(nil, kv.Len(), 1, func(i int) { - key_, key, value := kv.IndexInexact(i) - - // Using exact key. - rkey, rvalue, err := db.TestFind(key) - Expect(err).ShouldNot(HaveOccurred(), "Error for key %q", key) - Expect(rkey).Should(Equal(key), "Key") - Expect(rvalue).Should(Equal(value), "Value for key %q", key) - - // Using inexact key. - rkey, rvalue, err = db.TestFind(key_) - Expect(err).ShouldNot(HaveOccurred(), "Error for key %q (%q)", key_, key) - Expect(rkey).Should(Equal(key)) - Expect(rvalue).Should(Equal(value), "Value for key %q (%q)", key_, key) - }) - } - }) - - It("Should return error if the key is not present", func() { - if db, ok := p.(Find); ok { - var key []byte - if kv.Len() > 0 { - key_, _ := kv.Index(kv.Len() - 1) - key = BytesAfter(key_) - } - rkey, _, err := db.TestFind(key) - Expect(err).Should(HaveOccurred(), "Find for key %q yield key %q", key, rkey) - Expect(err).Should(Equal(errors.ErrNotFound)) - } - }) - - It("Should only find exact key with Get", func() { - if db, ok := p.(Get); ok { - ShuffledIndex(nil, kv.Len(), 1, func(i int) { - key_, key, value := kv.IndexInexact(i) - - // Using exact key. - rvalue, err := db.TestGet(key) - Expect(err).ShouldNot(HaveOccurred(), "Error for key %q", key) - Expect(rvalue).Should(Equal(value), "Value for key %q", key) - - // Using inexact key. - if len(key_) > 0 { - _, err = db.TestGet(key_) - Expect(err).Should(HaveOccurred(), "Error for key %q", key_) - Expect(err).Should(Equal(errors.ErrNotFound)) - } - }) - } - }) - - It("Should only find present key with Has", func() { - if db, ok := p.(Has); ok { - ShuffledIndex(nil, kv.Len(), 1, func(i int) { - key_, key, _ := kv.IndexInexact(i) - - // Using exact key. - ret, err := db.TestHas(key) - Expect(err).ShouldNot(HaveOccurred(), "Error for key %q", key) - Expect(ret).Should(BeTrue(), "False for key %q", key) - - // Using inexact key. - if len(key_) > 0 { - ret, err = db.TestHas(key_) - Expect(err).ShouldNot(HaveOccurred(), "Error for key %q", key_) - Expect(ret).ShouldNot(BeTrue(), "True for key %q", key) - } - }) - } - }) - - TestIter := func(r *util.Range, _kv KeyValue) { - if db, ok := p.(NewIterator); ok { - iter := db.TestNewIterator(r) - Expect(iter.Error()).ShouldNot(HaveOccurred()) - - t := IteratorTesting{ - KeyValue: _kv, - Iter: iter, - } - - DoIteratorTesting(&t) - iter.Release() - } - } - - It("Should iterates and seeks correctly", func(done Done) { - TestIter(nil, kv.Clone()) - done <- true - }, 3.0) - - RandomIndex(rnd, kv.Len(), Min(kv.Len(), 50), func(i int) { - type slice struct { - r *util.Range - start, limit int - } - - key_, _, _ := kv.IndexInexact(i) - for _, x := range []slice{ - {&util.Range{Start: key_, Limit: nil}, i, kv.Len()}, - {&util.Range{Start: nil, Limit: key_}, 0, i}, - } { - It(fmt.Sprintf("Should iterates and seeks correctly of a slice %d .. %d", x.start, x.limit), func(done Done) { - TestIter(x.r, kv.Slice(x.start, x.limit)) - done <- true - }, 3.0) - } - }) - - RandomRange(rnd, kv.Len(), Min(kv.Len(), 50), func(start, limit int) { - It(fmt.Sprintf("Should iterates and seeks correctly of a slice %d .. %d", start, limit), func(done Done) { - r := kv.Range(start, limit) - TestIter(&r, kv.Slice(start, limit)) - done <- true - }, 3.0) - }) -} - -func AllKeyValueTesting(rnd *rand.Rand, body, setup func(KeyValue) DB, teardown func(DB)) { - Test := func(kv *KeyValue) func() { - return func() { - var p DB - if setup != nil { - Defer("setup", func() { - p = setup(*kv) - }) - } - if teardown != nil { - Defer("teardown", func() { - teardown(p) - }) - } - if body != nil { - p = body(*kv) - } - KeyValueTesting(rnd, *kv, p, func(KeyValue) DB { - return p - }, nil) - } - } - - Describe("with no key/value (empty)", Test(&KeyValue{})) - Describe("with empty key", Test(KeyValue_EmptyKey())) - Describe("with empty value", Test(KeyValue_EmptyValue())) - Describe("with one key/value", Test(KeyValue_OneKeyValue())) - Describe("with big value", Test(KeyValue_BigValue())) - Describe("with special key", Test(KeyValue_SpecialKey())) - Describe("with multiple key/value", Test(KeyValue_MultipleKeyValue())) - Describe("with generated key/value", Test(KeyValue_Generate(nil, 120, 1, 50, 10, 120))) -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/storage.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/storage.go deleted file mode 100644 index 59c496d54c..0000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/storage.go +++ /dev/null @@ -1,586 +0,0 @@ -// Copyright (c) 2014, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package testutil - -import ( - "bytes" - "fmt" - "io" - "os" - "path/filepath" - "runtime" - "strings" - "sync" - - . "github.com/onsi/gomega" - - "github.com/syndtr/goleveldb/leveldb/storage" - "github.com/syndtr/goleveldb/leveldb/util" -) - -var ( - storageMu sync.Mutex - storageUseFS bool = true - storageKeepFS bool = false - storageNum int -) - -type StorageMode int - -const ( - ModeOpen StorageMode = 1 << iota - ModeCreate - ModeRemove - ModeRead - ModeWrite - ModeSync - ModeClose -) - -const ( - modeOpen = iota - modeCreate - modeRemove - modeRead - modeWrite - modeSync - modeClose - - modeCount -) - -const ( - typeManifest = iota - typeJournal - typeTable - typeTemp - - typeCount -) - -const flattenCount = modeCount * typeCount - -func flattenType(m StorageMode, t storage.FileType) int { - var x int - switch m { - case ModeOpen: - x = modeOpen - case ModeCreate: - x = modeCreate - case ModeRemove: - x = modeRemove - case ModeRead: - x = modeRead - case ModeWrite: - x = modeWrite - case ModeSync: - x = modeSync - case ModeClose: - x = modeClose - default: - panic("invalid storage mode") - } - x *= typeCount - switch t { - case storage.TypeManifest: - return x + typeManifest - case storage.TypeJournal: - return x + typeJournal - case storage.TypeTable: - return x + typeTable - case storage.TypeTemp: - return x + typeTemp - default: - panic("invalid file type") - } -} - -func listFlattenType(m StorageMode, t storage.FileType) []int { - ret := make([]int, 0, flattenCount) - add := func(x int) { - x *= typeCount - switch { - case t&storage.TypeManifest != 0: - ret = append(ret, x+typeManifest) - case t&storage.TypeJournal != 0: - ret = append(ret, x+typeJournal) - case t&storage.TypeTable != 0: - ret = append(ret, x+typeTable) - case t&storage.TypeTemp != 0: - ret = append(ret, x+typeTemp) - } - } - switch { - case m&ModeOpen != 0: - add(modeOpen) - case m&ModeCreate != 0: - add(modeCreate) - case m&ModeRemove != 0: - add(modeRemove) - case m&ModeRead != 0: - add(modeRead) - case m&ModeWrite != 0: - add(modeWrite) - case m&ModeSync != 0: - add(modeSync) - case m&ModeClose != 0: - add(modeClose) - } - return ret -} - -func packFile(num uint64, t storage.FileType) uint64 { - if num>>(64-typeCount) != 0 { - panic("overflow") - } - return num<> typeCount, storage.FileType(x) & storage.TypeAll -} - -type emulatedError struct { - err error -} - -func (err emulatedError) Error() string { - return fmt.Sprintf("emulated storage error: %v", err.err) -} - -type storageLock struct { - s *Storage - r util.Releaser -} - -func (l storageLock) Release() { - l.r.Release() - l.s.logI("storage lock released") -} - -type reader struct { - f *file - storage.Reader -} - -func (r *reader) Read(p []byte) (n int, err error) { - err = r.f.s.emulateError(ModeRead, r.f.Type()) - if err == nil { - r.f.s.stall(ModeRead, r.f.Type()) - n, err = r.Reader.Read(p) - } - r.f.s.count(ModeRead, r.f.Type(), n) - if err != nil && err != io.EOF { - r.f.s.logI("read error, num=%d type=%v n=%d err=%v", r.f.Num(), r.f.Type(), n, err) - } - return -} - -func (r *reader) ReadAt(p []byte, off int64) (n int, err error) { - err = r.f.s.emulateError(ModeRead, r.f.Type()) - if err == nil { - r.f.s.stall(ModeRead, r.f.Type()) - n, err = r.Reader.ReadAt(p, off) - } - r.f.s.count(ModeRead, r.f.Type(), n) - if err != nil && err != io.EOF { - r.f.s.logI("readAt error, num=%d type=%v offset=%d n=%d err=%v", r.f.Num(), r.f.Type(), off, n, err) - } - return -} - -func (r *reader) Close() (err error) { - return r.f.doClose(r.Reader) -} - -type writer struct { - f *file - storage.Writer -} - -func (w *writer) Write(p []byte) (n int, err error) { - err = w.f.s.emulateError(ModeWrite, w.f.Type()) - if err == nil { - w.f.s.stall(ModeWrite, w.f.Type()) - n, err = w.Writer.Write(p) - } - w.f.s.count(ModeWrite, w.f.Type(), n) - if err != nil && err != io.EOF { - w.f.s.logI("write error, num=%d type=%v n=%d err=%v", w.f.Num(), w.f.Type(), n, err) - } - return -} - -func (w *writer) Sync() (err error) { - err = w.f.s.emulateError(ModeSync, w.f.Type()) - if err == nil { - w.f.s.stall(ModeSync, w.f.Type()) - err = w.Writer.Sync() - } - w.f.s.count(ModeSync, w.f.Type(), 0) - if err != nil { - w.f.s.logI("sync error, num=%d type=%v err=%v", w.f.Num(), w.f.Type(), err) - } - return -} - -func (w *writer) Close() (err error) { - return w.f.doClose(w.Writer) -} - -type file struct { - s *Storage - storage.File -} - -func (f *file) pack() uint64 { - return packFile(f.Num(), f.Type()) -} - -func (f *file) assertOpen() { - ExpectWithOffset(2, f.s.opens).NotTo(HaveKey(f.pack()), "File open, num=%d type=%v writer=%v", f.Num(), f.Type(), f.s.opens[f.pack()]) -} - -func (f *file) doClose(closer io.Closer) (err error) { - err = f.s.emulateError(ModeClose, f.Type()) - if err == nil { - f.s.stall(ModeClose, f.Type()) - } - f.s.mu.Lock() - defer f.s.mu.Unlock() - if err == nil { - ExpectWithOffset(2, f.s.opens).To(HaveKey(f.pack()), "File closed, num=%d type=%v", f.Num(), f.Type()) - err = closer.Close() - } - f.s.countNB(ModeClose, f.Type(), 0) - writer := f.s.opens[f.pack()] - if err != nil { - f.s.logISkip(1, "file close failed, num=%d type=%v writer=%v err=%v", f.Num(), f.Type(), writer, err) - } else { - f.s.logISkip(1, "file closed, num=%d type=%v writer=%v", f.Num(), f.Type(), writer) - delete(f.s.opens, f.pack()) - } - return -} - -func (f *file) Open() (r storage.Reader, err error) { - err = f.s.emulateError(ModeOpen, f.Type()) - if err == nil { - f.s.stall(ModeOpen, f.Type()) - } - f.s.mu.Lock() - defer f.s.mu.Unlock() - if err == nil { - f.assertOpen() - f.s.countNB(ModeOpen, f.Type(), 0) - r, err = f.File.Open() - } - if err != nil { - f.s.logI("file open failed, num=%d type=%v err=%v", f.Num(), f.Type(), err) - } else { - f.s.logI("file opened, num=%d type=%v", f.Num(), f.Type()) - f.s.opens[f.pack()] = false - r = &reader{f, r} - } - return -} - -func (f *file) Create() (w storage.Writer, err error) { - err = f.s.emulateError(ModeCreate, f.Type()) - if err == nil { - f.s.stall(ModeCreate, f.Type()) - } - f.s.mu.Lock() - defer f.s.mu.Unlock() - if err == nil { - f.assertOpen() - f.s.countNB(ModeCreate, f.Type(), 0) - w, err = f.File.Create() - } - if err != nil { - f.s.logI("file create failed, num=%d type=%v err=%v", f.Num(), f.Type(), err) - } else { - f.s.logI("file created, num=%d type=%v", f.Num(), f.Type()) - f.s.opens[f.pack()] = true - w = &writer{f, w} - } - return -} - -func (f *file) Remove() (err error) { - err = f.s.emulateError(ModeRemove, f.Type()) - if err == nil { - f.s.stall(ModeRemove, f.Type()) - } - f.s.mu.Lock() - defer f.s.mu.Unlock() - if err == nil { - f.assertOpen() - f.s.countNB(ModeRemove, f.Type(), 0) - err = f.File.Remove() - } - if err != nil { - f.s.logI("file remove failed, num=%d type=%v err=%v", f.Num(), f.Type(), err) - } else { - f.s.logI("file removed, num=%d type=%v", f.Num(), f.Type()) - } - return -} - -type Storage struct { - storage.Storage - closeFn func() error - - lmu sync.Mutex - lb bytes.Buffer - - mu sync.Mutex - // Open files, true=writer, false=reader - opens map[uint64]bool - counters [flattenCount]int - bytesCounter [flattenCount]int64 - emulatedError [flattenCount]error - stallCond sync.Cond - stalled [flattenCount]bool -} - -func (s *Storage) log(skip int, str string) { - s.lmu.Lock() - defer s.lmu.Unlock() - _, file, line, ok := runtime.Caller(skip + 2) - if ok { - // Truncate file name at last file name separator. - if index := strings.LastIndex(file, "/"); index >= 0 { - file = file[index+1:] - } else if index = strings.LastIndex(file, "\\"); index >= 0 { - file = file[index+1:] - } - } else { - file = "???" - line = 1 - } - fmt.Fprintf(&s.lb, "%s:%d: ", file, line) - lines := strings.Split(str, "\n") - if l := len(lines); l > 1 && lines[l-1] == "" { - lines = lines[:l-1] - } - for i, line := range lines { - if i > 0 { - s.lb.WriteString("\n\t") - } - s.lb.WriteString(line) - } - s.lb.WriteByte('\n') -} - -func (s *Storage) logISkip(skip int, format string, args ...interface{}) { - pc, _, _, ok := runtime.Caller(skip + 1) - if ok { - if f := runtime.FuncForPC(pc); f != nil { - fname := f.Name() - if index := strings.LastIndex(fname, "."); index >= 0 { - fname = fname[index+1:] - } - format = fname + ": " + format - } - } - s.log(skip+1, fmt.Sprintf(format, args...)) -} - -func (s *Storage) logI(format string, args ...interface{}) { - s.logISkip(1, format, args...) -} - -func (s *Storage) Log(str string) { - s.log(1, "Log: "+str) - s.Storage.Log(str) -} - -func (s *Storage) Lock() (r util.Releaser, err error) { - r, err = s.Storage.Lock() - if err != nil { - s.logI("storage locking failed, err=%v", err) - } else { - s.logI("storage locked") - r = storageLock{s, r} - } - return -} - -func (s *Storage) GetFile(num uint64, t storage.FileType) storage.File { - return &file{s, s.Storage.GetFile(num, t)} -} - -func (s *Storage) GetFiles(t storage.FileType) (files []storage.File, err error) { - rfiles, err := s.Storage.GetFiles(t) - if err != nil { - s.logI("get files failed, err=%v", err) - return - } - files = make([]storage.File, len(rfiles)) - for i, f := range rfiles { - files[i] = &file{s, f} - } - s.logI("get files, type=0x%x count=%d", int(t), len(files)) - return -} - -func (s *Storage) GetManifest() (f storage.File, err error) { - manifest, err := s.Storage.GetManifest() - if err != nil { - if !os.IsNotExist(err) { - s.logI("get manifest failed, err=%v", err) - } - return - } - s.logI("get manifest, num=%d", manifest.Num()) - return &file{s, manifest}, nil -} - -func (s *Storage) SetManifest(f storage.File) error { - f_, ok := f.(*file) - ExpectWithOffset(1, ok).To(BeTrue()) - ExpectWithOffset(1, f_.Type()).To(Equal(storage.TypeManifest)) - err := s.Storage.SetManifest(f_.File) - if err != nil { - s.logI("set manifest failed, err=%v", err) - } else { - s.logI("set manifest, num=%d", f_.Num()) - } - return err -} - -func (s *Storage) openFiles() string { - out := "Open files:" - for x, writer := range s.opens { - num, t := unpackFile(x) - out += fmt.Sprintf("\n · num=%d type=%v writer=%v", num, t, writer) - } - return out -} - -func (s *Storage) Close() error { - s.mu.Lock() - defer s.mu.Unlock() - ExpectWithOffset(1, s.opens).To(BeEmpty(), s.openFiles()) - err := s.Storage.Close() - if err != nil { - s.logI("storage closing failed, err=%v", err) - } else { - s.logI("storage closed") - } - if s.closeFn != nil { - if err1 := s.closeFn(); err1 != nil { - s.logI("close func error, err=%v", err1) - } - } - return err -} - -func (s *Storage) countNB(m StorageMode, t storage.FileType, n int) { - s.counters[flattenType(m, t)]++ - s.bytesCounter[flattenType(m, t)] += int64(n) -} - -func (s *Storage) count(m StorageMode, t storage.FileType, n int) { - s.mu.Lock() - defer s.mu.Unlock() - s.countNB(m, t, n) -} - -func (s *Storage) ResetCounter(m StorageMode, t storage.FileType) { - for _, x := range listFlattenType(m, t) { - s.counters[x] = 0 - s.bytesCounter[x] = 0 - } -} - -func (s *Storage) Counter(m StorageMode, t storage.FileType) (count int, bytes int64) { - for _, x := range listFlattenType(m, t) { - count += s.counters[x] - bytes += s.bytesCounter[x] - } - return -} - -func (s *Storage) emulateError(m StorageMode, t storage.FileType) error { - s.mu.Lock() - defer s.mu.Unlock() - err := s.emulatedError[flattenType(m, t)] - if err != nil { - return emulatedError{err} - } - return nil -} - -func (s *Storage) EmulateError(m StorageMode, t storage.FileType, err error) { - s.mu.Lock() - defer s.mu.Unlock() - for _, x := range listFlattenType(m, t) { - s.emulatedError[x] = err - } -} - -func (s *Storage) stall(m StorageMode, t storage.FileType) { - x := flattenType(m, t) - s.mu.Lock() - defer s.mu.Unlock() - for s.stalled[x] { - s.stallCond.Wait() - } -} - -func (s *Storage) Stall(m StorageMode, t storage.FileType) { - s.mu.Lock() - defer s.mu.Unlock() - for _, x := range listFlattenType(m, t) { - s.stalled[x] = true - } -} - -func (s *Storage) Release(m StorageMode, t storage.FileType) { - s.mu.Lock() - defer s.mu.Unlock() - for _, x := range listFlattenType(m, t) { - s.stalled[x] = false - } - s.stallCond.Broadcast() -} - -func NewStorage() *Storage { - var stor storage.Storage - var closeFn func() error - if storageUseFS { - for { - storageMu.Lock() - num := storageNum - storageNum++ - storageMu.Unlock() - path := filepath.Join(os.TempDir(), fmt.Sprintf("goleveldb-test%d0%d0%d", os.Getuid(), os.Getpid(), num)) - if _, err := os.Stat(path); os.IsNotExist(err) { - stor, err = storage.OpenFile(path) - ExpectWithOffset(1, err).NotTo(HaveOccurred(), "creating storage at %s", path) - closeFn = func() error { - if storageKeepFS { - return nil - } - return os.RemoveAll(path) - } - break - } - } - } else { - stor = storage.NewMemStorage() - } - s := &Storage{ - Storage: stor, - closeFn: closeFn, - opens: make(map[uint64]bool), - } - s.stallCond.L = &s.mu - return s -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/util.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/util.go deleted file mode 100644 index 97c5294b1b..0000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/util.go +++ /dev/null @@ -1,171 +0,0 @@ -// Copyright (c) 2014, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package testutil - -import ( - "bytes" - "flag" - "math/rand" - "reflect" - "sync" - - "github.com/onsi/ginkgo/config" - - "github.com/syndtr/goleveldb/leveldb/comparer" -) - -var ( - runfn = make(map[string][]func()) - runmu sync.Mutex -) - -func Defer(args ...interface{}) bool { - var ( - group string - fn func() - ) - for _, arg := range args { - v := reflect.ValueOf(arg) - switch v.Kind() { - case reflect.String: - group = v.String() - case reflect.Func: - r := reflect.ValueOf(&fn).Elem() - r.Set(v) - } - } - if fn != nil { - runmu.Lock() - runfn[group] = append(runfn[group], fn) - runmu.Unlock() - } - return true -} - -func RunDefer(groups ...string) bool { - if len(groups) == 0 { - groups = append(groups, "") - } - runmu.Lock() - var runfn_ []func() - for _, group := range groups { - runfn_ = append(runfn_, runfn[group]...) - delete(runfn, group) - } - runmu.Unlock() - for _, fn := range runfn_ { - fn() - } - return runfn_ != nil -} - -func RandomSeed() int64 { - if !flag.Parsed() { - panic("random seed not initialized") - } - return config.GinkgoConfig.RandomSeed -} - -func NewRand() *rand.Rand { - return rand.New(rand.NewSource(RandomSeed())) -} - -var cmp = comparer.DefaultComparer - -func BytesSeparator(a, b []byte) []byte { - if bytes.Equal(a, b) { - return b - } - i, n := 0, len(a) - if n > len(b) { - n = len(b) - } - for ; i < n && (a[i] == b[i]); i++ { - } - x := append([]byte{}, a[:i]...) - if i < n { - if c := a[i] + 1; c < b[i] { - return append(x, c) - } - x = append(x, a[i]) - i++ - } - for ; i < len(a); i++ { - if c := a[i]; c < 0xff { - return append(x, c+1) - } else { - x = append(x, c) - } - } - if len(b) > i && b[i] > 0 { - return append(x, b[i]-1) - } - return append(x, 'x') -} - -func BytesAfter(b []byte) []byte { - var x []byte - for _, c := range b { - if c < 0xff { - return append(x, c+1) - } else { - x = append(x, c) - } - } - return append(x, 'x') -} - -func RandomIndex(rnd *rand.Rand, n, round int, fn func(i int)) { - if rnd == nil { - rnd = NewRand() - } - for x := 0; x < round; x++ { - fn(rnd.Intn(n)) - } - return -} - -func ShuffledIndex(rnd *rand.Rand, n, round int, fn func(i int)) { - if rnd == nil { - rnd = NewRand() - } - for x := 0; x < round; x++ { - for _, i := range rnd.Perm(n) { - fn(i) - } - } - return -} - -func RandomRange(rnd *rand.Rand, n, round int, fn func(start, limit int)) { - if rnd == nil { - rnd = NewRand() - } - for x := 0; x < round; x++ { - start := rnd.Intn(n) - length := 0 - if j := n - start; j > 0 { - length = rnd.Intn(j) - } - fn(start, start+length) - } - return -} - -func Max(x, y int) int { - if x > y { - return x - } - return y -} - -func Min(x, y int) int { - if x < y { - return x - } - return y -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil_test.go deleted file mode 100644 index 25bf2b29f9..0000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil_test.go +++ /dev/null @@ -1,63 +0,0 @@ -// Copyright (c) 2014, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - . "github.com/onsi/gomega" - - "github.com/syndtr/goleveldb/leveldb/iterator" - "github.com/syndtr/goleveldb/leveldb/opt" - "github.com/syndtr/goleveldb/leveldb/testutil" - "github.com/syndtr/goleveldb/leveldb/util" -) - -type testingDB struct { - *DB - ro *opt.ReadOptions - wo *opt.WriteOptions - stor *testutil.Storage -} - -func (t *testingDB) TestPut(key []byte, value []byte) error { - return t.Put(key, value, t.wo) -} - -func (t *testingDB) TestDelete(key []byte) error { - return t.Delete(key, t.wo) -} - -func (t *testingDB) TestGet(key []byte) (value []byte, err error) { - return t.Get(key, t.ro) -} - -func (t *testingDB) TestHas(key []byte) (ret bool, err error) { - return t.Has(key, t.ro) -} - -func (t *testingDB) TestNewIterator(slice *util.Range) iterator.Iterator { - return t.NewIterator(slice, t.ro) -} - -func (t *testingDB) TestClose() { - err := t.Close() - ExpectWithOffset(1, err).NotTo(HaveOccurred()) - err = t.stor.Close() - ExpectWithOffset(1, err).NotTo(HaveOccurred()) -} - -func newTestingDB(o *opt.Options, ro *opt.ReadOptions, wo *opt.WriteOptions) *testingDB { - stor := testutil.NewStorage() - db, err := Open(stor, o) - // FIXME: This may be called from outside It, which may cause panic. - Expect(err).NotTo(HaveOccurred()) - return &testingDB{ - DB: db, - ro: ro, - wo: wo, - stor: stor, - } -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util.go deleted file mode 100644 index 1a5bf71a32..0000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util.go +++ /dev/null @@ -1,91 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - "fmt" - "sort" - - "github.com/syndtr/goleveldb/leveldb/storage" -) - -func shorten(str string) string { - if len(str) <= 8 { - return str - } - return str[:3] + ".." + str[len(str)-3:] -} - -var bunits = [...]string{"", "Ki", "Mi", "Gi"} - -func shortenb(bytes int) string { - i := 0 - for ; bytes > 1024 && i < 4; i++ { - bytes /= 1024 - } - return fmt.Sprintf("%d%sB", bytes, bunits[i]) -} - -func sshortenb(bytes int) string { - if bytes == 0 { - return "~" - } - sign := "+" - if bytes < 0 { - sign = "-" - bytes *= -1 - } - i := 0 - for ; bytes > 1024 && i < 4; i++ { - bytes /= 1024 - } - return fmt.Sprintf("%s%d%sB", sign, bytes, bunits[i]) -} - -func sint(x int) string { - if x == 0 { - return "~" - } - sign := "+" - if x < 0 { - sign = "-" - x *= -1 - } - return fmt.Sprintf("%s%d", sign, x) -} - -func minInt(a, b int) int { - if a < b { - return a - } - return b -} - -func maxInt(a, b int) int { - if a > b { - return a - } - return b -} - -type files []storage.File - -func (p files) Len() int { - return len(p) -} - -func (p files) Less(i, j int) bool { - return p[i].Num() < p[j].Num() -} - -func (p files) Swap(i, j int) { - p[i], p[j] = p[j], p[i] -} - -func (p files) sort() { - sort.Sort(p) -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/buffer.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/buffer.go deleted file mode 100644 index 21de242552..0000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/buffer.go +++ /dev/null @@ -1,293 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package util - -// This a copy of Go std bytes.Buffer with some modification -// and some features stripped. - -import ( - "bytes" - "io" -) - -// A Buffer is a variable-sized buffer of bytes with Read and Write methods. -// The zero value for Buffer is an empty buffer ready to use. -type Buffer struct { - buf []byte // contents are the bytes buf[off : len(buf)] - off int // read at &buf[off], write at &buf[len(buf)] - bootstrap [64]byte // memory to hold first slice; helps small buffers (Printf) avoid allocation. -} - -// Bytes returns a slice of the contents of the unread portion of the buffer; -// len(b.Bytes()) == b.Len(). If the caller changes the contents of the -// returned slice, the contents of the buffer will change provided there -// are no intervening method calls on the Buffer. -func (b *Buffer) Bytes() []byte { return b.buf[b.off:] } - -// String returns the contents of the unread portion of the buffer -// as a string. If the Buffer is a nil pointer, it returns "". -func (b *Buffer) String() string { - if b == nil { - // Special case, useful in debugging. - return "" - } - return string(b.buf[b.off:]) -} - -// Len returns the number of bytes of the unread portion of the buffer; -// b.Len() == len(b.Bytes()). -func (b *Buffer) Len() int { return len(b.buf) - b.off } - -// Truncate discards all but the first n unread bytes from the buffer. -// It panics if n is negative or greater than the length of the buffer. -func (b *Buffer) Truncate(n int) { - switch { - case n < 0 || n > b.Len(): - panic("leveldb/util.Buffer: truncation out of range") - case n == 0: - // Reuse buffer space. - b.off = 0 - } - b.buf = b.buf[0 : b.off+n] -} - -// Reset resets the buffer so it has no content. -// b.Reset() is the same as b.Truncate(0). -func (b *Buffer) Reset() { b.Truncate(0) } - -// grow grows the buffer to guarantee space for n more bytes. -// It returns the index where bytes should be written. -// If the buffer can't grow it will panic with bytes.ErrTooLarge. -func (b *Buffer) grow(n int) int { - m := b.Len() - // If buffer is empty, reset to recover space. - if m == 0 && b.off != 0 { - b.Truncate(0) - } - if len(b.buf)+n > cap(b.buf) { - var buf []byte - if b.buf == nil && n <= len(b.bootstrap) { - buf = b.bootstrap[0:] - } else if m+n <= cap(b.buf)/2 { - // We can slide things down instead of allocating a new - // slice. We only need m+n <= cap(b.buf) to slide, but - // we instead let capacity get twice as large so we - // don't spend all our time copying. - copy(b.buf[:], b.buf[b.off:]) - buf = b.buf[:m] - } else { - // not enough space anywhere - buf = makeSlice(2*cap(b.buf) + n) - copy(buf, b.buf[b.off:]) - } - b.buf = buf - b.off = 0 - } - b.buf = b.buf[0 : b.off+m+n] - return b.off + m -} - -// Alloc allocs n bytes of slice from the buffer, growing the buffer as -// needed. If n is negative, Alloc will panic. -// If the buffer can't grow it will panic with bytes.ErrTooLarge. -func (b *Buffer) Alloc(n int) []byte { - if n < 0 { - panic("leveldb/util.Buffer.Alloc: negative count") - } - m := b.grow(n) - return b.buf[m:] -} - -// Grow grows the buffer's capacity, if necessary, to guarantee space for -// another n bytes. After Grow(n), at least n bytes can be written to the -// buffer without another allocation. -// If n is negative, Grow will panic. -// If the buffer can't grow it will panic with bytes.ErrTooLarge. -func (b *Buffer) Grow(n int) { - if n < 0 { - panic("leveldb/util.Buffer.Grow: negative count") - } - m := b.grow(n) - b.buf = b.buf[0:m] -} - -// Write appends the contents of p to the buffer, growing the buffer as -// needed. The return value n is the length of p; err is always nil. If the -// buffer becomes too large, Write will panic with bytes.ErrTooLarge. -func (b *Buffer) Write(p []byte) (n int, err error) { - m := b.grow(len(p)) - return copy(b.buf[m:], p), nil -} - -// MinRead is the minimum slice size passed to a Read call by -// Buffer.ReadFrom. As long as the Buffer has at least MinRead bytes beyond -// what is required to hold the contents of r, ReadFrom will not grow the -// underlying buffer. -const MinRead = 512 - -// ReadFrom reads data from r until EOF and appends it to the buffer, growing -// the buffer as needed. The return value n is the number of bytes read. Any -// error except io.EOF encountered during the read is also returned. If the -// buffer becomes too large, ReadFrom will panic with bytes.ErrTooLarge. -func (b *Buffer) ReadFrom(r io.Reader) (n int64, err error) { - // If buffer is empty, reset to recover space. - if b.off >= len(b.buf) { - b.Truncate(0) - } - for { - if free := cap(b.buf) - len(b.buf); free < MinRead { - // not enough space at end - newBuf := b.buf - if b.off+free < MinRead { - // not enough space using beginning of buffer; - // double buffer capacity - newBuf = makeSlice(2*cap(b.buf) + MinRead) - } - copy(newBuf, b.buf[b.off:]) - b.buf = newBuf[:len(b.buf)-b.off] - b.off = 0 - } - m, e := r.Read(b.buf[len(b.buf):cap(b.buf)]) - b.buf = b.buf[0 : len(b.buf)+m] - n += int64(m) - if e == io.EOF { - break - } - if e != nil { - return n, e - } - } - return n, nil // err is EOF, so return nil explicitly -} - -// makeSlice allocates a slice of size n. If the allocation fails, it panics -// with bytes.ErrTooLarge. -func makeSlice(n int) []byte { - // If the make fails, give a known error. - defer func() { - if recover() != nil { - panic(bytes.ErrTooLarge) - } - }() - return make([]byte, n) -} - -// WriteTo writes data to w until the buffer is drained or an error occurs. -// The return value n is the number of bytes written; it always fits into an -// int, but it is int64 to match the io.WriterTo interface. Any error -// encountered during the write is also returned. -func (b *Buffer) WriteTo(w io.Writer) (n int64, err error) { - if b.off < len(b.buf) { - nBytes := b.Len() - m, e := w.Write(b.buf[b.off:]) - if m > nBytes { - panic("leveldb/util.Buffer.WriteTo: invalid Write count") - } - b.off += m - n = int64(m) - if e != nil { - return n, e - } - // all bytes should have been written, by definition of - // Write method in io.Writer - if m != nBytes { - return n, io.ErrShortWrite - } - } - // Buffer is now empty; reset. - b.Truncate(0) - return -} - -// WriteByte appends the byte c to the buffer, growing the buffer as needed. -// The returned error is always nil, but is included to match bufio.Writer's -// WriteByte. If the buffer becomes too large, WriteByte will panic with -// bytes.ErrTooLarge. -func (b *Buffer) WriteByte(c byte) error { - m := b.grow(1) - b.buf[m] = c - return nil -} - -// Read reads the next len(p) bytes from the buffer or until the buffer -// is drained. The return value n is the number of bytes read. If the -// buffer has no data to return, err is io.EOF (unless len(p) is zero); -// otherwise it is nil. -func (b *Buffer) Read(p []byte) (n int, err error) { - if b.off >= len(b.buf) { - // Buffer is empty, reset to recover space. - b.Truncate(0) - if len(p) == 0 { - return - } - return 0, io.EOF - } - n = copy(p, b.buf[b.off:]) - b.off += n - return -} - -// Next returns a slice containing the next n bytes from the buffer, -// advancing the buffer as if the bytes had been returned by Read. -// If there are fewer than n bytes in the buffer, Next returns the entire buffer. -// The slice is only valid until the next call to a read or write method. -func (b *Buffer) Next(n int) []byte { - m := b.Len() - if n > m { - n = m - } - data := b.buf[b.off : b.off+n] - b.off += n - return data -} - -// ReadByte reads and returns the next byte from the buffer. -// If no byte is available, it returns error io.EOF. -func (b *Buffer) ReadByte() (c byte, err error) { - if b.off >= len(b.buf) { - // Buffer is empty, reset to recover space. - b.Truncate(0) - return 0, io.EOF - } - c = b.buf[b.off] - b.off++ - return c, nil -} - -// ReadBytes reads until the first occurrence of delim in the input, -// returning a slice containing the data up to and including the delimiter. -// If ReadBytes encounters an error before finding a delimiter, -// it returns the data read before the error and the error itself (often io.EOF). -// ReadBytes returns err != nil if and only if the returned data does not end in -// delim. -func (b *Buffer) ReadBytes(delim byte) (line []byte, err error) { - slice, err := b.readSlice(delim) - // return a copy of slice. The buffer's backing array may - // be overwritten by later calls. - line = append(line, slice...) - return -} - -// readSlice is like ReadBytes but returns a reference to internal buffer data. -func (b *Buffer) readSlice(delim byte) (line []byte, err error) { - i := bytes.IndexByte(b.buf[b.off:], delim) - end := b.off + i + 1 - if i < 0 { - end = len(b.buf) - err = io.EOF - } - line = b.buf[b.off:end] - b.off = end - return line, err -} - -// NewBuffer creates and initializes a new Buffer using buf as its initial -// contents. It is intended to prepare a Buffer to read existing data. It -// can also be used to size the internal buffer for writing. To do that, -// buf should have the desired capacity but a length of zero. -// -// In most cases, new(Buffer) (or just declaring a Buffer variable) is -// sufficient to initialize a Buffer. -func NewBuffer(buf []byte) *Buffer { return &Buffer{buf: buf} } diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/buffer_pool.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/buffer_pool.go deleted file mode 100644 index 2b8453d759..0000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/buffer_pool.go +++ /dev/null @@ -1,238 +0,0 @@ -// Copyright (c) 2014, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package util - -import ( - "fmt" - "sync" - "sync/atomic" - "time" -) - -type buffer struct { - b []byte - miss int -} - -// BufferPool is a 'buffer pool'. -type BufferPool struct { - pool [6]chan []byte - size [5]uint32 - sizeMiss [5]uint32 - sizeHalf [5]uint32 - baseline [4]int - baseline0 int - - mu sync.RWMutex - closed bool - closeC chan struct{} - - get uint32 - put uint32 - half uint32 - less uint32 - equal uint32 - greater uint32 - miss uint32 -} - -func (p *BufferPool) poolNum(n int) int { - if n <= p.baseline0 && n > p.baseline0/2 { - return 0 - } - for i, x := range p.baseline { - if n <= x { - return i + 1 - } - } - return len(p.baseline) + 1 -} - -// Get returns buffer with length of n. -func (p *BufferPool) Get(n int) []byte { - if p == nil { - return make([]byte, n) - } - - p.mu.RLock() - defer p.mu.RUnlock() - - if p.closed { - return make([]byte, n) - } - - atomic.AddUint32(&p.get, 1) - - poolNum := p.poolNum(n) - pool := p.pool[poolNum] - if poolNum == 0 { - // Fast path. - select { - case b := <-pool: - switch { - case cap(b) > n: - if cap(b)-n >= n { - atomic.AddUint32(&p.half, 1) - select { - case pool <- b: - default: - } - return make([]byte, n) - } else { - atomic.AddUint32(&p.less, 1) - return b[:n] - } - case cap(b) == n: - atomic.AddUint32(&p.equal, 1) - return b[:n] - default: - atomic.AddUint32(&p.greater, 1) - } - default: - atomic.AddUint32(&p.miss, 1) - } - - return make([]byte, n, p.baseline0) - } else { - sizePtr := &p.size[poolNum-1] - - select { - case b := <-pool: - switch { - case cap(b) > n: - if cap(b)-n >= n { - atomic.AddUint32(&p.half, 1) - sizeHalfPtr := &p.sizeHalf[poolNum-1] - if atomic.AddUint32(sizeHalfPtr, 1) == 20 { - atomic.StoreUint32(sizePtr, uint32(cap(b)/2)) - atomic.StoreUint32(sizeHalfPtr, 0) - } else { - select { - case pool <- b: - default: - } - } - return make([]byte, n) - } else { - atomic.AddUint32(&p.less, 1) - return b[:n] - } - case cap(b) == n: - atomic.AddUint32(&p.equal, 1) - return b[:n] - default: - atomic.AddUint32(&p.greater, 1) - if uint32(cap(b)) >= atomic.LoadUint32(sizePtr) { - select { - case pool <- b: - default: - } - } - } - default: - atomic.AddUint32(&p.miss, 1) - } - - if size := atomic.LoadUint32(sizePtr); uint32(n) > size { - if size == 0 { - atomic.CompareAndSwapUint32(sizePtr, 0, uint32(n)) - } else { - sizeMissPtr := &p.sizeMiss[poolNum-1] - if atomic.AddUint32(sizeMissPtr, 1) == 20 { - atomic.StoreUint32(sizePtr, uint32(n)) - atomic.StoreUint32(sizeMissPtr, 0) - } - } - return make([]byte, n) - } else { - return make([]byte, n, size) - } - } -} - -// Put adds given buffer to the pool. -func (p *BufferPool) Put(b []byte) { - if p == nil { - return - } - - p.mu.RLock() - defer p.mu.RUnlock() - - if p.closed { - return - } - - atomic.AddUint32(&p.put, 1) - - pool := p.pool[p.poolNum(cap(b))] - select { - case pool <- b: - default: - } - -} - -func (p *BufferPool) Close() { - if p == nil { - return - } - - p.mu.Lock() - if !p.closed { - p.closed = true - p.closeC <- struct{}{} - } - p.mu.Unlock() -} - -func (p *BufferPool) String() string { - if p == nil { - return "" - } - - return fmt.Sprintf("BufferPool{B·%d Z·%v Zm·%v Zh·%v G·%d P·%d H·%d <·%d =·%d >·%d M·%d}", - p.baseline0, p.size, p.sizeMiss, p.sizeHalf, p.get, p.put, p.half, p.less, p.equal, p.greater, p.miss) -} - -func (p *BufferPool) drain() { - ticker := time.NewTicker(2 * time.Second) - for { - select { - case <-ticker.C: - for _, ch := range p.pool { - select { - case <-ch: - default: - } - } - case <-p.closeC: - close(p.closeC) - for _, ch := range p.pool { - close(ch) - } - return - } - } -} - -// NewBufferPool creates a new initialized 'buffer pool'. -func NewBufferPool(baseline int) *BufferPool { - if baseline <= 0 { - panic("baseline can't be <= 0") - } - p := &BufferPool{ - baseline0: baseline, - baseline: [...]int{baseline / 4, baseline / 2, baseline * 2, baseline * 4}, - closeC: make(chan struct{}, 1), - } - for i, cap := range []int{2, 2, 4, 4, 2, 1} { - p.pool[i] = make(chan []byte, cap) - } - go p.drain() - return p -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/buffer_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/buffer_test.go deleted file mode 100644 index 87d96739c4..0000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/buffer_test.go +++ /dev/null @@ -1,369 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package util - -import ( - "bytes" - "io" - "math/rand" - "runtime" - "testing" -) - -const N = 10000 // make this bigger for a larger (and slower) test -var data string // test data for write tests -var testBytes []byte // test data; same as data but as a slice. - -func init() { - testBytes = make([]byte, N) - for i := 0; i < N; i++ { - testBytes[i] = 'a' + byte(i%26) - } - data = string(testBytes) -} - -// Verify that contents of buf match the string s. -func check(t *testing.T, testname string, buf *Buffer, s string) { - bytes := buf.Bytes() - str := buf.String() - if buf.Len() != len(bytes) { - t.Errorf("%s: buf.Len() == %d, len(buf.Bytes()) == %d", testname, buf.Len(), len(bytes)) - } - - if buf.Len() != len(str) { - t.Errorf("%s: buf.Len() == %d, len(buf.String()) == %d", testname, buf.Len(), len(str)) - } - - if buf.Len() != len(s) { - t.Errorf("%s: buf.Len() == %d, len(s) == %d", testname, buf.Len(), len(s)) - } - - if string(bytes) != s { - t.Errorf("%s: string(buf.Bytes()) == %q, s == %q", testname, string(bytes), s) - } -} - -// Fill buf through n writes of byte slice fub. -// The initial contents of buf corresponds to the string s; -// the result is the final contents of buf returned as a string. -func fillBytes(t *testing.T, testname string, buf *Buffer, s string, n int, fub []byte) string { - check(t, testname+" (fill 1)", buf, s) - for ; n > 0; n-- { - m, err := buf.Write(fub) - if m != len(fub) { - t.Errorf(testname+" (fill 2): m == %d, expected %d", m, len(fub)) - } - if err != nil { - t.Errorf(testname+" (fill 3): err should always be nil, found err == %s", err) - } - s += string(fub) - check(t, testname+" (fill 4)", buf, s) - } - return s -} - -func TestNewBuffer(t *testing.T) { - buf := NewBuffer(testBytes) - check(t, "NewBuffer", buf, data) -} - -// Empty buf through repeated reads into fub. -// The initial contents of buf corresponds to the string s. -func empty(t *testing.T, testname string, buf *Buffer, s string, fub []byte) { - check(t, testname+" (empty 1)", buf, s) - - for { - n, err := buf.Read(fub) - if n == 0 { - break - } - if err != nil { - t.Errorf(testname+" (empty 2): err should always be nil, found err == %s", err) - } - s = s[n:] - check(t, testname+" (empty 3)", buf, s) - } - - check(t, testname+" (empty 4)", buf, "") -} - -func TestBasicOperations(t *testing.T) { - var buf Buffer - - for i := 0; i < 5; i++ { - check(t, "TestBasicOperations (1)", &buf, "") - - buf.Reset() - check(t, "TestBasicOperations (2)", &buf, "") - - buf.Truncate(0) - check(t, "TestBasicOperations (3)", &buf, "") - - n, err := buf.Write([]byte(data[0:1])) - if n != 1 { - t.Errorf("wrote 1 byte, but n == %d", n) - } - if err != nil { - t.Errorf("err should always be nil, but err == %s", err) - } - check(t, "TestBasicOperations (4)", &buf, "a") - - buf.WriteByte(data[1]) - check(t, "TestBasicOperations (5)", &buf, "ab") - - n, err = buf.Write([]byte(data[2:26])) - if n != 24 { - t.Errorf("wrote 25 bytes, but n == %d", n) - } - check(t, "TestBasicOperations (6)", &buf, string(data[0:26])) - - buf.Truncate(26) - check(t, "TestBasicOperations (7)", &buf, string(data[0:26])) - - buf.Truncate(20) - check(t, "TestBasicOperations (8)", &buf, string(data[0:20])) - - empty(t, "TestBasicOperations (9)", &buf, string(data[0:20]), make([]byte, 5)) - empty(t, "TestBasicOperations (10)", &buf, "", make([]byte, 100)) - - buf.WriteByte(data[1]) - c, err := buf.ReadByte() - if err != nil { - t.Error("ReadByte unexpected eof") - } - if c != data[1] { - t.Errorf("ReadByte wrong value c=%v", c) - } - c, err = buf.ReadByte() - if err == nil { - t.Error("ReadByte unexpected not eof") - } - } -} - -func TestLargeByteWrites(t *testing.T) { - var buf Buffer - limit := 30 - if testing.Short() { - limit = 9 - } - for i := 3; i < limit; i += 3 { - s := fillBytes(t, "TestLargeWrites (1)", &buf, "", 5, testBytes) - empty(t, "TestLargeByteWrites (2)", &buf, s, make([]byte, len(data)/i)) - } - check(t, "TestLargeByteWrites (3)", &buf, "") -} - -func TestLargeByteReads(t *testing.T) { - var buf Buffer - for i := 3; i < 30; i += 3 { - s := fillBytes(t, "TestLargeReads (1)", &buf, "", 5, testBytes[0:len(testBytes)/i]) - empty(t, "TestLargeReads (2)", &buf, s, make([]byte, len(data))) - } - check(t, "TestLargeByteReads (3)", &buf, "") -} - -func TestMixedReadsAndWrites(t *testing.T) { - var buf Buffer - s := "" - for i := 0; i < 50; i++ { - wlen := rand.Intn(len(data)) - s = fillBytes(t, "TestMixedReadsAndWrites (1)", &buf, s, 1, testBytes[0:wlen]) - rlen := rand.Intn(len(data)) - fub := make([]byte, rlen) - n, _ := buf.Read(fub) - s = s[n:] - } - empty(t, "TestMixedReadsAndWrites (2)", &buf, s, make([]byte, buf.Len())) -} - -func TestNil(t *testing.T) { - var b *Buffer - if b.String() != "" { - t.Errorf("expected ; got %q", b.String()) - } -} - -func TestReadFrom(t *testing.T) { - var buf Buffer - for i := 3; i < 30; i += 3 { - s := fillBytes(t, "TestReadFrom (1)", &buf, "", 5, testBytes[0:len(testBytes)/i]) - var b Buffer - b.ReadFrom(&buf) - empty(t, "TestReadFrom (2)", &b, s, make([]byte, len(data))) - } -} - -func TestWriteTo(t *testing.T) { - var buf Buffer - for i := 3; i < 30; i += 3 { - s := fillBytes(t, "TestWriteTo (1)", &buf, "", 5, testBytes[0:len(testBytes)/i]) - var b Buffer - buf.WriteTo(&b) - empty(t, "TestWriteTo (2)", &b, s, make([]byte, len(data))) - } -} - -func TestNext(t *testing.T) { - b := []byte{0, 1, 2, 3, 4} - tmp := make([]byte, 5) - for i := 0; i <= 5; i++ { - for j := i; j <= 5; j++ { - for k := 0; k <= 6; k++ { - // 0 <= i <= j <= 5; 0 <= k <= 6 - // Check that if we start with a buffer - // of length j at offset i and ask for - // Next(k), we get the right bytes. - buf := NewBuffer(b[0:j]) - n, _ := buf.Read(tmp[0:i]) - if n != i { - t.Fatalf("Read %d returned %d", i, n) - } - bb := buf.Next(k) - want := k - if want > j-i { - want = j - i - } - if len(bb) != want { - t.Fatalf("in %d,%d: len(Next(%d)) == %d", i, j, k, len(bb)) - } - for l, v := range bb { - if v != byte(l+i) { - t.Fatalf("in %d,%d: Next(%d)[%d] = %d, want %d", i, j, k, l, v, l+i) - } - } - } - } - } -} - -var readBytesTests = []struct { - buffer string - delim byte - expected []string - err error -}{ - {"", 0, []string{""}, io.EOF}, - {"a\x00", 0, []string{"a\x00"}, nil}, - {"abbbaaaba", 'b', []string{"ab", "b", "b", "aaab"}, nil}, - {"hello\x01world", 1, []string{"hello\x01"}, nil}, - {"foo\nbar", 0, []string{"foo\nbar"}, io.EOF}, - {"alpha\nbeta\ngamma\n", '\n', []string{"alpha\n", "beta\n", "gamma\n"}, nil}, - {"alpha\nbeta\ngamma", '\n', []string{"alpha\n", "beta\n", "gamma"}, io.EOF}, -} - -func TestReadBytes(t *testing.T) { - for _, test := range readBytesTests { - buf := NewBuffer([]byte(test.buffer)) - var err error - for _, expected := range test.expected { - var bytes []byte - bytes, err = buf.ReadBytes(test.delim) - if string(bytes) != expected { - t.Errorf("expected %q, got %q", expected, bytes) - } - if err != nil { - break - } - } - if err != test.err { - t.Errorf("expected error %v, got %v", test.err, err) - } - } -} - -func TestGrow(t *testing.T) { - x := []byte{'x'} - y := []byte{'y'} - tmp := make([]byte, 72) - for _, startLen := range []int{0, 100, 1000, 10000, 100000} { - xBytes := bytes.Repeat(x, startLen) - for _, growLen := range []int{0, 100, 1000, 10000, 100000} { - buf := NewBuffer(xBytes) - // If we read, this affects buf.off, which is good to test. - readBytes, _ := buf.Read(tmp) - buf.Grow(growLen) - yBytes := bytes.Repeat(y, growLen) - // Check no allocation occurs in write, as long as we're single-threaded. - var m1, m2 runtime.MemStats - runtime.ReadMemStats(&m1) - buf.Write(yBytes) - runtime.ReadMemStats(&m2) - if runtime.GOMAXPROCS(-1) == 1 && m1.Mallocs != m2.Mallocs { - t.Errorf("allocation occurred during write") - } - // Check that buffer has correct data. - if !bytes.Equal(buf.Bytes()[0:startLen-readBytes], xBytes[readBytes:]) { - t.Errorf("bad initial data at %d %d", startLen, growLen) - } - if !bytes.Equal(buf.Bytes()[startLen-readBytes:startLen-readBytes+growLen], yBytes) { - t.Errorf("bad written data at %d %d", startLen, growLen) - } - } - } -} - -// Was a bug: used to give EOF reading empty slice at EOF. -func TestReadEmptyAtEOF(t *testing.T) { - b := new(Buffer) - slice := make([]byte, 0) - n, err := b.Read(slice) - if err != nil { - t.Errorf("read error: %v", err) - } - if n != 0 { - t.Errorf("wrong count; got %d want 0", n) - } -} - -// Tests that we occasionally compact. Issue 5154. -func TestBufferGrowth(t *testing.T) { - var b Buffer - buf := make([]byte, 1024) - b.Write(buf[0:1]) - var cap0 int - for i := 0; i < 5<<10; i++ { - b.Write(buf) - b.Read(buf) - if i == 0 { - cap0 = cap(b.buf) - } - } - cap1 := cap(b.buf) - // (*Buffer).grow allows for 2x capacity slop before sliding, - // so set our error threshold at 3x. - if cap1 > cap0*3 { - t.Errorf("buffer cap = %d; too big (grew from %d)", cap1, cap0) - } -} - -// From Issue 5154. -func BenchmarkBufferNotEmptyWriteRead(b *testing.B) { - buf := make([]byte, 1024) - for i := 0; i < b.N; i++ { - var b Buffer - b.Write(buf[0:1]) - for i := 0; i < 5<<10; i++ { - b.Write(buf) - b.Read(buf) - } - } -} - -// Check that we don't compact too often. From Issue 5154. -func BenchmarkBufferFullSmallReads(b *testing.B) { - buf := make([]byte, 1024) - for i := 0; i < b.N; i++ { - var b Buffer - b.Write(buf) - for b.Len()+20 < cap(b.buf) { - b.Write(buf[:10]) - } - for i := 0; i < 5<<10; i++ { - b.Read(buf[:1]) - b.Write(buf[:1]) - } - } -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/crc32.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/crc32.go deleted file mode 100644 index 631c9d6109..0000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/crc32.go +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright 2011 The LevelDB-Go Authors. All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package util - -import ( - "hash/crc32" -) - -var table = crc32.MakeTable(crc32.Castagnoli) - -// CRC is a CRC-32 checksum computed using Castagnoli's polynomial. -type CRC uint32 - -// NewCRC creates a new crc based on the given bytes. -func NewCRC(b []byte) CRC { - return CRC(0).Update(b) -} - -// Update updates the crc with the given bytes. -func (c CRC) Update(b []byte) CRC { - return CRC(crc32.Update(uint32(c), table, b)) -} - -// Value returns a masked crc. -func (c CRC) Value() uint32 { - return uint32(c>>15|c<<17) + 0xa282ead8 -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/hash.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/hash.go deleted file mode 100644 index 54903660ff..0000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/hash.go +++ /dev/null @@ -1,48 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package util - -import ( - "bytes" - "encoding/binary" -) - -// Hash return hash of the given data. -func Hash(data []byte, seed uint32) uint32 { - // Similar to murmur hash - var m uint32 = 0xc6a4a793 - var r uint32 = 24 - h := seed ^ (uint32(len(data)) * m) - - buf := bytes.NewBuffer(data) - for buf.Len() >= 4 { - var w uint32 - binary.Read(buf, binary.LittleEndian, &w) - h += w - h *= m - h ^= (h >> 16) - } - - rest := buf.Bytes() - switch len(rest) { - default: - panic("not reached") - case 3: - h += uint32(rest[2]) << 16 - fallthrough - case 2: - h += uint32(rest[1]) << 8 - fallthrough - case 1: - h += uint32(rest[0]) - h *= m - h ^= (h >> r) - case 0: - } - - return h -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/pool.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/pool.go deleted file mode 100644 index 1f7fdd41fe..0000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/pool.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright (c) 2014, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -// +build go1.3 - -package util - -import ( - "sync" -) - -type Pool struct { - sync.Pool -} - -func NewPool(cap int) *Pool { - return &Pool{} -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/pool_legacy.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/pool_legacy.go deleted file mode 100644 index 27b8d03be9..0000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/pool_legacy.go +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright (c) 2014, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -// +build !go1.3 - -package util - -type Pool struct { - pool chan interface{} -} - -func (p *Pool) Get() interface{} { - select { - case x := <-p.pool: - return x - default: - return nil - } -} - -func (p *Pool) Put(x interface{}) { - select { - case p.pool <- x: - default: - } -} - -func NewPool(cap int) *Pool { - return &Pool{pool: make(chan interface{}, cap)} -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/range.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/range.go deleted file mode 100644 index 85159583d2..0000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/range.go +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright (c) 2014, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package util - -// Range is a key range. -type Range struct { - // Start of the key range, include in the range. - Start []byte - - // Limit of the key range, not include in the range. - Limit []byte -} - -// BytesPrefix returns key range that satisfy the given prefix. -// This only applicable for the standard 'bytes comparer'. -func BytesPrefix(prefix []byte) *Range { - var limit []byte - for i := len(prefix) - 1; i >= 0; i-- { - c := prefix[i] - if c < 0xff { - limit = make([]byte, i+1) - copy(limit, prefix) - limit[i] = c + 1 - break - } - } - return &Range{prefix, limit} -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/util.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/util.go deleted file mode 100644 index f35976865b..0000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/util.go +++ /dev/null @@ -1,73 +0,0 @@ -// Copyright (c) 2013, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -// Package util provides utilities used throughout leveldb. -package util - -import ( - "errors" -) - -var ( - ErrReleased = errors.New("leveldb: resource already relesed") - ErrHasReleaser = errors.New("leveldb: releaser already defined") -) - -// Releaser is the interface that wraps the basic Release method. -type Releaser interface { - // Release releases associated resources. Release should always success - // and can be called multipe times without causing error. - Release() -} - -// ReleaseSetter is the interface that wraps the basic SetReleaser method. -type ReleaseSetter interface { - // SetReleaser associates the given releaser to the resources. The - // releaser will be called once coresponding resources released. - // Calling SetReleaser with nil will clear the releaser. - // - // This will panic if a releaser already present or coresponding - // resource is already released. Releaser should be cleared first - // before assigned a new one. - SetReleaser(releaser Releaser) -} - -// BasicReleaser provides basic implementation of Releaser and ReleaseSetter. -type BasicReleaser struct { - releaser Releaser - released bool -} - -// Released returns whether Release method already called. -func (r *BasicReleaser) Released() bool { - return r.released -} - -// Release implements Releaser.Release. -func (r *BasicReleaser) Release() { - if !r.released { - if r.releaser != nil { - r.releaser.Release() - r.releaser = nil - } - r.released = true - } -} - -// SetReleaser implements ReleaseSetter.SetReleaser. -func (r *BasicReleaser) SetReleaser(releaser Releaser) { - if r.released { - panic(ErrReleased) - } - if r.releaser != nil && releaser != nil { - panic(ErrHasReleaser) - } - r.releaser = releaser -} - -type NoopReleaser struct{} - -func (NoopReleaser) Release() {} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/version.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/version.go deleted file mode 100644 index 011d982da8..0000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/version.go +++ /dev/null @@ -1,457 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - "sync/atomic" - "unsafe" - - "github.com/syndtr/goleveldb/leveldb/iterator" - "github.com/syndtr/goleveldb/leveldb/opt" - "github.com/syndtr/goleveldb/leveldb/util" -) - -type tSet struct { - level int - table *tFile -} - -type version struct { - s *session - - tables []tFiles - - // Level that should be compacted next and its compaction score. - // Score < 1 means compaction is not strictly needed. These fields - // are initialized by computeCompaction() - cLevel int - cScore float64 - - cSeek unsafe.Pointer - - ref int - // Succeeding version. - next *version -} - -func newVersion(s *session) *version { - return &version{s: s, tables: make([]tFiles, s.o.GetNumLevel())} -} - -func (v *version) releaseNB() { - v.ref-- - if v.ref > 0 { - return - } - if v.ref < 0 { - panic("negative version ref") - } - - tables := make(map[uint64]bool) - for _, tt := range v.next.tables { - for _, t := range tt { - num := t.file.Num() - tables[num] = true - } - } - - for _, tt := range v.tables { - for _, t := range tt { - num := t.file.Num() - if _, ok := tables[num]; !ok { - v.s.tops.remove(t) - } - } - } - - v.next.releaseNB() - v.next = nil -} - -func (v *version) release() { - v.s.vmu.Lock() - v.releaseNB() - v.s.vmu.Unlock() -} - -func (v *version) walkOverlapping(ikey iKey, f func(level int, t *tFile) bool, lf func(level int) bool) { - ukey := ikey.ukey() - - // Walk tables level-by-level. - for level, tables := range v.tables { - if len(tables) == 0 { - continue - } - - if level == 0 { - // Level-0 files may overlap each other. Find all files that - // overlap ukey. - for _, t := range tables { - if t.overlaps(v.s.icmp, ukey, ukey) { - if !f(level, t) { - return - } - } - } - } else { - if i := tables.searchMax(v.s.icmp, ikey); i < len(tables) { - t := tables[i] - if v.s.icmp.uCompare(ukey, t.imin.ukey()) >= 0 { - if !f(level, t) { - return - } - } - } - } - - if lf != nil && !lf(level) { - return - } - } -} - -func (v *version) get(ikey iKey, ro *opt.ReadOptions, noValue bool) (value []byte, tcomp bool, err error) { - ukey := ikey.ukey() - - var ( - tset *tSet - tseek bool - - // Level-0. - zfound bool - zseq uint64 - zkt kType - zval []byte - ) - - err = ErrNotFound - - // Since entries never hope across level, finding key/value - // in smaller level make later levels irrelevant. - v.walkOverlapping(ikey, func(level int, t *tFile) bool { - if !tseek { - if tset == nil { - tset = &tSet{level, t} - } else { - tseek = true - } - } - - var ( - fikey, fval []byte - ferr error - ) - if noValue { - fikey, ferr = v.s.tops.findKey(t, ikey, ro) - } else { - fikey, fval, ferr = v.s.tops.find(t, ikey, ro) - } - switch ferr { - case nil: - case ErrNotFound: - return true - default: - err = ferr - return false - } - - if fukey, fseq, fkt, fkerr := parseIkey(fikey); fkerr == nil { - if v.s.icmp.uCompare(ukey, fukey) == 0 { - if level == 0 { - if fseq >= zseq { - zfound = true - zseq = fseq - zkt = fkt - zval = fval - } - } else { - switch fkt { - case ktVal: - value = fval - err = nil - case ktDel: - default: - panic("leveldb: invalid iKey type") - } - return false - } - } - } else { - err = fkerr - return false - } - - return true - }, func(level int) bool { - if zfound { - switch zkt { - case ktVal: - value = zval - err = nil - case ktDel: - default: - panic("leveldb: invalid iKey type") - } - return false - } - - return true - }) - - if tseek && tset.table.consumeSeek() <= 0 { - tcomp = atomic.CompareAndSwapPointer(&v.cSeek, nil, unsafe.Pointer(tset)) - } - - return -} - -func (v *version) sampleSeek(ikey iKey) (tcomp bool) { - var tset *tSet - - v.walkOverlapping(ikey, func(level int, t *tFile) bool { - if tset == nil { - tset = &tSet{level, t} - return true - } else { - if tset.table.consumeSeek() <= 0 { - tcomp = atomic.CompareAndSwapPointer(&v.cSeek, nil, unsafe.Pointer(tset)) - } - return false - } - }, nil) - - return -} - -func (v *version) getIterators(slice *util.Range, ro *opt.ReadOptions) (its []iterator.Iterator) { - // Merge all level zero files together since they may overlap - for _, t := range v.tables[0] { - it := v.s.tops.newIterator(t, slice, ro) - its = append(its, it) - } - - strict := opt.GetStrict(v.s.o.Options, ro, opt.StrictReader) - for _, tables := range v.tables[1:] { - if len(tables) == 0 { - continue - } - - it := iterator.NewIndexedIterator(tables.newIndexIterator(v.s.tops, v.s.icmp, slice, ro), strict) - its = append(its, it) - } - - return -} - -func (v *version) newStaging() *versionStaging { - return &versionStaging{base: v, tables: make([]tablesScratch, v.s.o.GetNumLevel())} -} - -// Spawn a new version based on this version. -func (v *version) spawn(r *sessionRecord) *version { - staging := v.newStaging() - staging.commit(r) - return staging.finish() -} - -func (v *version) fillRecord(r *sessionRecord) { - for level, ts := range v.tables { - for _, t := range ts { - r.addTableFile(level, t) - } - } -} - -func (v *version) tLen(level int) int { - return len(v.tables[level]) -} - -func (v *version) offsetOf(ikey iKey) (n uint64, err error) { - for level, tables := range v.tables { - for _, t := range tables { - if v.s.icmp.Compare(t.imax, ikey) <= 0 { - // Entire file is before "ikey", so just add the file size - n += t.size - } else if v.s.icmp.Compare(t.imin, ikey) > 0 { - // Entire file is after "ikey", so ignore - if level > 0 { - // Files other than level 0 are sorted by meta->min, so - // no further files in this level will contain data for - // "ikey". - break - } - } else { - // "ikey" falls in the range for this table. Add the - // approximate offset of "ikey" within the table. - var nn uint64 - nn, err = v.s.tops.offsetOf(t, ikey) - if err != nil { - return 0, err - } - n += nn - } - } - } - - return -} - -func (v *version) pickMemdbLevel(umin, umax []byte) (level int) { - if !v.tables[0].overlaps(v.s.icmp, umin, umax, true) { - var overlaps tFiles - maxLevel := v.s.o.GetMaxMemCompationLevel() - for ; level < maxLevel; level++ { - if v.tables[level+1].overlaps(v.s.icmp, umin, umax, false) { - break - } - overlaps = v.tables[level+2].getOverlaps(overlaps, v.s.icmp, umin, umax, false) - if overlaps.size() > uint64(v.s.o.GetCompactionGPOverlaps(level)) { - break - } - } - } - - return -} - -func (v *version) computeCompaction() { - // Precomputed best level for next compaction - var bestLevel int = -1 - var bestScore float64 = -1 - - for level, tables := range v.tables { - var score float64 - if level == 0 { - // We treat level-0 specially by bounding the number of files - // instead of number of bytes for two reasons: - // - // (1) With larger write-buffer sizes, it is nice not to do too - // many level-0 compactions. - // - // (2) The files in level-0 are merged on every read and - // therefore we wish to avoid too many files when the individual - // file size is small (perhaps because of a small write-buffer - // setting, or very high compression ratios, or lots of - // overwrites/deletions). - score = float64(len(tables)) / float64(v.s.o.GetCompactionL0Trigger()) - } else { - score = float64(tables.size()) / float64(v.s.o.GetCompactionTotalSize(level)) - } - - if score > bestScore { - bestLevel = level - bestScore = score - } - } - - v.cLevel = bestLevel - v.cScore = bestScore -} - -func (v *version) needCompaction() bool { - return v.cScore >= 1 || atomic.LoadPointer(&v.cSeek) != nil -} - -type tablesScratch struct { - added map[uint64]atRecord - deleted map[uint64]struct{} -} - -type versionStaging struct { - base *version - tables []tablesScratch -} - -func (p *versionStaging) commit(r *sessionRecord) { - // Deleted tables. - for _, r := range r.deletedTables { - tm := &(p.tables[r.level]) - - if len(p.base.tables[r.level]) > 0 { - if tm.deleted == nil { - tm.deleted = make(map[uint64]struct{}) - } - tm.deleted[r.num] = struct{}{} - } - - if tm.added != nil { - delete(tm.added, r.num) - } - } - - // New tables. - for _, r := range r.addedTables { - tm := &(p.tables[r.level]) - - if tm.added == nil { - tm.added = make(map[uint64]atRecord) - } - tm.added[r.num] = r - - if tm.deleted != nil { - delete(tm.deleted, r.num) - } - } -} - -func (p *versionStaging) finish() *version { - // Build new version. - nv := newVersion(p.base.s) - for level, tm := range p.tables { - btables := p.base.tables[level] - - n := len(btables) + len(tm.added) - len(tm.deleted) - if n < 0 { - n = 0 - } - nt := make(tFiles, 0, n) - - // Base tables. - for _, t := range btables { - if _, ok := tm.deleted[t.file.Num()]; ok { - continue - } - if _, ok := tm.added[t.file.Num()]; ok { - continue - } - nt = append(nt, t) - } - - // New tables. - for _, r := range tm.added { - nt = append(nt, p.base.s.tableFileFromRecord(r)) - } - - // Sort tables. - if level == 0 { - nt.sortByNum() - } else { - nt.sortByKey(p.base.s.icmp) - } - nv.tables[level] = nt - } - - // Compute compaction score for new version. - nv.computeCompaction() - - return nv -} - -type versionReleaser struct { - v *version - once bool -} - -func (vr *versionReleaser) Release() { - v := vr.v - v.s.vmu.Lock() - if !vr.once { - v.releaseNB() - vr.once = true - } - v.s.vmu.Unlock() -} diff --git a/Godeps/_workspace/src/golang.org/x/net/context/context.go b/Godeps/_workspace/src/golang.org/x/net/context/context.go deleted file mode 100644 index ef2f3e86fe..0000000000 --- a/Godeps/_workspace/src/golang.org/x/net/context/context.go +++ /dev/null @@ -1,447 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package context defines the Context type, which carries deadlines, -// cancelation signals, and other request-scoped values across API boundaries -// and between processes. -// -// Incoming requests to a server should create a Context, and outgoing calls to -// servers should accept a Context. The chain of function calls between must -// propagate the Context, optionally replacing it with a modified copy created -// using WithDeadline, WithTimeout, WithCancel, or WithValue. -// -// Programs that use Contexts should follow these rules to keep interfaces -// consistent across packages and enable static analysis tools to check context -// propagation: -// -// Do not store Contexts inside a struct type; instead, pass a Context -// explicitly to each function that needs it. The Context should be the first -// parameter, typically named ctx: -// -// func DoSomething(ctx context.Context, arg Arg) error { -// // ... use ctx ... -// } -// -// Do not pass a nil Context, even if a function permits it. Pass context.TODO -// if you are unsure about which Context to use. -// -// Use context Values only for request-scoped data that transits processes and -// APIs, not for passing optional parameters to functions. -// -// The same Context may be passed to functions running in different goroutines; -// Contexts are safe for simultaneous use by multiple goroutines. -// -// See http://blog.golang.org/context for example code for a server that uses -// Contexts. -package context - -import ( - "errors" - "fmt" - "sync" - "time" -) - -// A Context carries a deadline, a cancelation signal, and other values across -// API boundaries. -// -// Context's methods may be called by multiple goroutines simultaneously. -type Context interface { - // Deadline returns the time when work done on behalf of this context - // should be canceled. Deadline returns ok==false when no deadline is - // set. Successive calls to Deadline return the same results. - Deadline() (deadline time.Time, ok bool) - - // Done returns a channel that's closed when work done on behalf of this - // context should be canceled. Done may return nil if this context can - // never be canceled. Successive calls to Done return the same value. - // - // WithCancel arranges for Done to be closed when cancel is called; - // WithDeadline arranges for Done to be closed when the deadline - // expires; WithTimeout arranges for Done to be closed when the timeout - // elapses. - // - // Done is provided for use in select statements: - // - // // Stream generates values with DoSomething and sends them to out - // // until DoSomething returns an error or ctx.Done is closed. - // func Stream(ctx context.Context, out <-chan Value) error { - // for { - // v, err := DoSomething(ctx) - // if err != nil { - // return err - // } - // select { - // case <-ctx.Done(): - // return ctx.Err() - // case out <- v: - // } - // } - // } - // - // See http://blog.golang.org/pipelines for more examples of how to use - // a Done channel for cancelation. - Done() <-chan struct{} - - // Err returns a non-nil error value after Done is closed. Err returns - // Canceled if the context was canceled or DeadlineExceeded if the - // context's deadline passed. No other values for Err are defined. - // After Done is closed, successive calls to Err return the same value. - Err() error - - // Value returns the value associated with this context for key, or nil - // if no value is associated with key. Successive calls to Value with - // the same key returns the same result. - // - // Use context values only for request-scoped data that transits - // processes and API boundaries, not for passing optional parameters to - // functions. - // - // A key identifies a specific value in a Context. Functions that wish - // to store values in Context typically allocate a key in a global - // variable then use that key as the argument to context.WithValue and - // Context.Value. A key can be any type that supports equality; - // packages should define keys as an unexported type to avoid - // collisions. - // - // Packages that define a Context key should provide type-safe accessors - // for the values stores using that key: - // - // // Package user defines a User type that's stored in Contexts. - // package user - // - // import "golang.org/x/net/context" - // - // // User is the type of value stored in the Contexts. - // type User struct {...} - // - // // key is an unexported type for keys defined in this package. - // // This prevents collisions with keys defined in other packages. - // type key int - // - // // userKey is the key for user.User values in Contexts. It is - // // unexported; clients use user.NewContext and user.FromContext - // // instead of using this key directly. - // var userKey key = 0 - // - // // NewContext returns a new Context that carries value u. - // func NewContext(ctx context.Context, u *User) context.Context { - // return context.WithValue(ctx, userKey, u) - // } - // - // // FromContext returns the User value stored in ctx, if any. - // func FromContext(ctx context.Context) (*User, bool) { - // u, ok := ctx.Value(userKey).(*User) - // return u, ok - // } - Value(key interface{}) interface{} -} - -// Canceled is the error returned by Context.Err when the context is canceled. -var Canceled = errors.New("context canceled") - -// DeadlineExceeded is the error returned by Context.Err when the context's -// deadline passes. -var DeadlineExceeded = errors.New("context deadline exceeded") - -// An emptyCtx is never canceled, has no values, and has no deadline. It is not -// struct{}, since vars of this type must have distinct addresses. -type emptyCtx int - -func (*emptyCtx) Deadline() (deadline time.Time, ok bool) { - return -} - -func (*emptyCtx) Done() <-chan struct{} { - return nil -} - -func (*emptyCtx) Err() error { - return nil -} - -func (*emptyCtx) Value(key interface{}) interface{} { - return nil -} - -func (e *emptyCtx) String() string { - switch e { - case background: - return "context.Background" - case todo: - return "context.TODO" - } - return "unknown empty Context" -} - -var ( - background = new(emptyCtx) - todo = new(emptyCtx) -) - -// Background returns a non-nil, empty Context. It is never canceled, has no -// values, and has no deadline. It is typically used by the main function, -// initialization, and tests, and as the top-level Context for incoming -// requests. -func Background() Context { - return background -} - -// TODO returns a non-nil, empty Context. Code should use context.TODO when -// it's unclear which Context to use or it's is not yet available (because the -// surrounding function has not yet been extended to accept a Context -// parameter). TODO is recognized by static analysis tools that determine -// whether Contexts are propagated correctly in a program. -func TODO() Context { - return todo -} - -// A CancelFunc tells an operation to abandon its work. -// A CancelFunc does not wait for the work to stop. -// After the first call, subsequent calls to a CancelFunc do nothing. -type CancelFunc func() - -// WithCancel returns a copy of parent with a new Done channel. The returned -// context's Done channel is closed when the returned cancel function is called -// or when the parent context's Done channel is closed, whichever happens first. -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete. -func WithCancel(parent Context) (ctx Context, cancel CancelFunc) { - c := newCancelCtx(parent) - propagateCancel(parent, &c) - return &c, func() { c.cancel(true, Canceled) } -} - -// newCancelCtx returns an initialized cancelCtx. -func newCancelCtx(parent Context) cancelCtx { - return cancelCtx{ - Context: parent, - done: make(chan struct{}), - } -} - -// propagateCancel arranges for child to be canceled when parent is. -func propagateCancel(parent Context, child canceler) { - if parent.Done() == nil { - return // parent is never canceled - } - if p, ok := parentCancelCtx(parent); ok { - p.mu.Lock() - if p.err != nil { - // parent has already been canceled - child.cancel(false, p.err) - } else { - if p.children == nil { - p.children = make(map[canceler]bool) - } - p.children[child] = true - } - p.mu.Unlock() - } else { - go func() { - select { - case <-parent.Done(): - child.cancel(false, parent.Err()) - case <-child.Done(): - } - }() - } -} - -// parentCancelCtx follows a chain of parent references until it finds a -// *cancelCtx. This function understands how each of the concrete types in this -// package represents its parent. -func parentCancelCtx(parent Context) (*cancelCtx, bool) { - for { - switch c := parent.(type) { - case *cancelCtx: - return c, true - case *timerCtx: - return &c.cancelCtx, true - case *valueCtx: - parent = c.Context - default: - return nil, false - } - } -} - -// removeChild removes a context from its parent. -func removeChild(parent Context, child canceler) { - p, ok := parentCancelCtx(parent) - if !ok { - return - } - p.mu.Lock() - if p.children != nil { - delete(p.children, child) - } - p.mu.Unlock() -} - -// A canceler is a context type that can be canceled directly. The -// implementations are *cancelCtx and *timerCtx. -type canceler interface { - cancel(removeFromParent bool, err error) - Done() <-chan struct{} -} - -// A cancelCtx can be canceled. When canceled, it also cancels any children -// that implement canceler. -type cancelCtx struct { - Context - - done chan struct{} // closed by the first cancel call. - - mu sync.Mutex - children map[canceler]bool // set to nil by the first cancel call - err error // set to non-nil by the first cancel call -} - -func (c *cancelCtx) Done() <-chan struct{} { - return c.done -} - -func (c *cancelCtx) Err() error { - c.mu.Lock() - defer c.mu.Unlock() - return c.err -} - -func (c *cancelCtx) String() string { - return fmt.Sprintf("%v.WithCancel", c.Context) -} - -// cancel closes c.done, cancels each of c's children, and, if -// removeFromParent is true, removes c from its parent's children. -func (c *cancelCtx) cancel(removeFromParent bool, err error) { - if err == nil { - panic("context: internal error: missing cancel error") - } - c.mu.Lock() - if c.err != nil { - c.mu.Unlock() - return // already canceled - } - c.err = err - close(c.done) - for child := range c.children { - // NOTE: acquiring the child's lock while holding parent's lock. - child.cancel(false, err) - } - c.children = nil - c.mu.Unlock() - - if removeFromParent { - removeChild(c.Context, c) - } -} - -// WithDeadline returns a copy of the parent context with the deadline adjusted -// to be no later than d. If the parent's deadline is already earlier than d, -// WithDeadline(parent, d) is semantically equivalent to parent. The returned -// context's Done channel is closed when the deadline expires, when the returned -// cancel function is called, or when the parent context's Done channel is -// closed, whichever happens first. -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete. -func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) { - if cur, ok := parent.Deadline(); ok && cur.Before(deadline) { - // The current deadline is already sooner than the new one. - return WithCancel(parent) - } - c := &timerCtx{ - cancelCtx: newCancelCtx(parent), - deadline: deadline, - } - propagateCancel(parent, c) - d := deadline.Sub(time.Now()) - if d <= 0 { - c.cancel(true, DeadlineExceeded) // deadline has already passed - return c, func() { c.cancel(true, Canceled) } - } - c.mu.Lock() - defer c.mu.Unlock() - if c.err == nil { - c.timer = time.AfterFunc(d, func() { - c.cancel(true, DeadlineExceeded) - }) - } - return c, func() { c.cancel(true, Canceled) } -} - -// A timerCtx carries a timer and a deadline. It embeds a cancelCtx to -// implement Done and Err. It implements cancel by stopping its timer then -// delegating to cancelCtx.cancel. -type timerCtx struct { - cancelCtx - timer *time.Timer // Under cancelCtx.mu. - - deadline time.Time -} - -func (c *timerCtx) Deadline() (deadline time.Time, ok bool) { - return c.deadline, true -} - -func (c *timerCtx) String() string { - return fmt.Sprintf("%v.WithDeadline(%s [%s])", c.cancelCtx.Context, c.deadline, c.deadline.Sub(time.Now())) -} - -func (c *timerCtx) cancel(removeFromParent bool, err error) { - c.cancelCtx.cancel(false, err) - if removeFromParent { - // Remove this timerCtx from its parent cancelCtx's children. - removeChild(c.cancelCtx.Context, c) - } - c.mu.Lock() - if c.timer != nil { - c.timer.Stop() - c.timer = nil - } - c.mu.Unlock() -} - -// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)). -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete: -// -// func slowOperationWithTimeout(ctx context.Context) (Result, error) { -// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) -// defer cancel() // releases resources if slowOperation completes before timeout elapses -// return slowOperation(ctx) -// } -func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) { - return WithDeadline(parent, time.Now().Add(timeout)) -} - -// WithValue returns a copy of parent in which the value associated with key is -// val. -// -// Use context Values only for request-scoped data that transits processes and -// APIs, not for passing optional parameters to functions. -func WithValue(parent Context, key interface{}, val interface{}) Context { - return &valueCtx{parent, key, val} -} - -// A valueCtx carries a key-value pair. It implements Value for that key and -// delegates all other calls to the embedded Context. -type valueCtx struct { - Context - key, val interface{} -} - -func (c *valueCtx) String() string { - return fmt.Sprintf("%v.WithValue(%#v, %#v)", c.Context, c.key, c.val) -} - -func (c *valueCtx) Value(key interface{}) interface{} { - if c.key == key { - return c.val - } - return c.Context.Value(key) -} diff --git a/Godeps/_workspace/src/golang.org/x/net/context/context_test.go b/Godeps/_workspace/src/golang.org/x/net/context/context_test.go deleted file mode 100644 index faf67722a0..0000000000 --- a/Godeps/_workspace/src/golang.org/x/net/context/context_test.go +++ /dev/null @@ -1,575 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package context - -import ( - "fmt" - "math/rand" - "runtime" - "strings" - "sync" - "testing" - "time" -) - -// otherContext is a Context that's not one of the types defined in context.go. -// This lets us test code paths that differ based on the underlying type of the -// Context. -type otherContext struct { - Context -} - -func TestBackground(t *testing.T) { - c := Background() - if c == nil { - t.Fatalf("Background returned nil") - } - select { - case x := <-c.Done(): - t.Errorf("<-c.Done() == %v want nothing (it should block)", x) - default: - } - if got, want := fmt.Sprint(c), "context.Background"; got != want { - t.Errorf("Background().String() = %q want %q", got, want) - } -} - -func TestTODO(t *testing.T) { - c := TODO() - if c == nil { - t.Fatalf("TODO returned nil") - } - select { - case x := <-c.Done(): - t.Errorf("<-c.Done() == %v want nothing (it should block)", x) - default: - } - if got, want := fmt.Sprint(c), "context.TODO"; got != want { - t.Errorf("TODO().String() = %q want %q", got, want) - } -} - -func TestWithCancel(t *testing.T) { - c1, cancel := WithCancel(Background()) - - if got, want := fmt.Sprint(c1), "context.Background.WithCancel"; got != want { - t.Errorf("c1.String() = %q want %q", got, want) - } - - o := otherContext{c1} - c2, _ := WithCancel(o) - contexts := []Context{c1, o, c2} - - for i, c := range contexts { - if d := c.Done(); d == nil { - t.Errorf("c[%d].Done() == %v want non-nil", i, d) - } - if e := c.Err(); e != nil { - t.Errorf("c[%d].Err() == %v want nil", i, e) - } - - select { - case x := <-c.Done(): - t.Errorf("<-c.Done() == %v want nothing (it should block)", x) - default: - } - } - - cancel() - time.Sleep(100 * time.Millisecond) // let cancelation propagate - - for i, c := range contexts { - select { - case <-c.Done(): - default: - t.Errorf("<-c[%d].Done() blocked, but shouldn't have", i) - } - if e := c.Err(); e != Canceled { - t.Errorf("c[%d].Err() == %v want %v", i, e, Canceled) - } - } -} - -func TestParentFinishesChild(t *testing.T) { - // Context tree: - // parent -> cancelChild - // parent -> valueChild -> timerChild - parent, cancel := WithCancel(Background()) - cancelChild, stop := WithCancel(parent) - defer stop() - valueChild := WithValue(parent, "key", "value") - timerChild, stop := WithTimeout(valueChild, 10000*time.Hour) - defer stop() - - select { - case x := <-parent.Done(): - t.Errorf("<-parent.Done() == %v want nothing (it should block)", x) - case x := <-cancelChild.Done(): - t.Errorf("<-cancelChild.Done() == %v want nothing (it should block)", x) - case x := <-timerChild.Done(): - t.Errorf("<-timerChild.Done() == %v want nothing (it should block)", x) - case x := <-valueChild.Done(): - t.Errorf("<-valueChild.Done() == %v want nothing (it should block)", x) - default: - } - - // The parent's children should contain the two cancelable children. - pc := parent.(*cancelCtx) - cc := cancelChild.(*cancelCtx) - tc := timerChild.(*timerCtx) - pc.mu.Lock() - if len(pc.children) != 2 || !pc.children[cc] || !pc.children[tc] { - t.Errorf("bad linkage: pc.children = %v, want %v and %v", - pc.children, cc, tc) - } - pc.mu.Unlock() - - if p, ok := parentCancelCtx(cc.Context); !ok || p != pc { - t.Errorf("bad linkage: parentCancelCtx(cancelChild.Context) = %v, %v want %v, true", p, ok, pc) - } - if p, ok := parentCancelCtx(tc.Context); !ok || p != pc { - t.Errorf("bad linkage: parentCancelCtx(timerChild.Context) = %v, %v want %v, true", p, ok, pc) - } - - cancel() - - pc.mu.Lock() - if len(pc.children) != 0 { - t.Errorf("pc.cancel didn't clear pc.children = %v", pc.children) - } - pc.mu.Unlock() - - // parent and children should all be finished. - check := func(ctx Context, name string) { - select { - case <-ctx.Done(): - default: - t.Errorf("<-%s.Done() blocked, but shouldn't have", name) - } - if e := ctx.Err(); e != Canceled { - t.Errorf("%s.Err() == %v want %v", name, e, Canceled) - } - } - check(parent, "parent") - check(cancelChild, "cancelChild") - check(valueChild, "valueChild") - check(timerChild, "timerChild") - - // WithCancel should return a canceled context on a canceled parent. - precanceledChild := WithValue(parent, "key", "value") - select { - case <-precanceledChild.Done(): - default: - t.Errorf("<-precanceledChild.Done() blocked, but shouldn't have") - } - if e := precanceledChild.Err(); e != Canceled { - t.Errorf("precanceledChild.Err() == %v want %v", e, Canceled) - } -} - -func TestChildFinishesFirst(t *testing.T) { - cancelable, stop := WithCancel(Background()) - defer stop() - for _, parent := range []Context{Background(), cancelable} { - child, cancel := WithCancel(parent) - - select { - case x := <-parent.Done(): - t.Errorf("<-parent.Done() == %v want nothing (it should block)", x) - case x := <-child.Done(): - t.Errorf("<-child.Done() == %v want nothing (it should block)", x) - default: - } - - cc := child.(*cancelCtx) - pc, pcok := parent.(*cancelCtx) // pcok == false when parent == Background() - if p, ok := parentCancelCtx(cc.Context); ok != pcok || (ok && pc != p) { - t.Errorf("bad linkage: parentCancelCtx(cc.Context) = %v, %v want %v, %v", p, ok, pc, pcok) - } - - if pcok { - pc.mu.Lock() - if len(pc.children) != 1 || !pc.children[cc] { - t.Errorf("bad linkage: pc.children = %v, cc = %v", pc.children, cc) - } - pc.mu.Unlock() - } - - cancel() - - if pcok { - pc.mu.Lock() - if len(pc.children) != 0 { - t.Errorf("child's cancel didn't remove self from pc.children = %v", pc.children) - } - pc.mu.Unlock() - } - - // child should be finished. - select { - case <-child.Done(): - default: - t.Errorf("<-child.Done() blocked, but shouldn't have") - } - if e := child.Err(); e != Canceled { - t.Errorf("child.Err() == %v want %v", e, Canceled) - } - - // parent should not be finished. - select { - case x := <-parent.Done(): - t.Errorf("<-parent.Done() == %v want nothing (it should block)", x) - default: - } - if e := parent.Err(); e != nil { - t.Errorf("parent.Err() == %v want nil", e) - } - } -} - -func testDeadline(c Context, wait time.Duration, t *testing.T) { - select { - case <-time.After(wait): - t.Fatalf("context should have timed out") - case <-c.Done(): - } - if e := c.Err(); e != DeadlineExceeded { - t.Errorf("c.Err() == %v want %v", e, DeadlineExceeded) - } -} - -func TestDeadline(t *testing.T) { - c, _ := WithDeadline(Background(), time.Now().Add(100*time.Millisecond)) - if got, prefix := fmt.Sprint(c), "context.Background.WithDeadline("; !strings.HasPrefix(got, prefix) { - t.Errorf("c.String() = %q want prefix %q", got, prefix) - } - testDeadline(c, 200*time.Millisecond, t) - - c, _ = WithDeadline(Background(), time.Now().Add(100*time.Millisecond)) - o := otherContext{c} - testDeadline(o, 200*time.Millisecond, t) - - c, _ = WithDeadline(Background(), time.Now().Add(100*time.Millisecond)) - o = otherContext{c} - c, _ = WithDeadline(o, time.Now().Add(300*time.Millisecond)) - testDeadline(c, 200*time.Millisecond, t) -} - -func TestTimeout(t *testing.T) { - c, _ := WithTimeout(Background(), 100*time.Millisecond) - if got, prefix := fmt.Sprint(c), "context.Background.WithDeadline("; !strings.HasPrefix(got, prefix) { - t.Errorf("c.String() = %q want prefix %q", got, prefix) - } - testDeadline(c, 200*time.Millisecond, t) - - c, _ = WithTimeout(Background(), 100*time.Millisecond) - o := otherContext{c} - testDeadline(o, 200*time.Millisecond, t) - - c, _ = WithTimeout(Background(), 100*time.Millisecond) - o = otherContext{c} - c, _ = WithTimeout(o, 300*time.Millisecond) - testDeadline(c, 200*time.Millisecond, t) -} - -func TestCanceledTimeout(t *testing.T) { - c, _ := WithTimeout(Background(), 200*time.Millisecond) - o := otherContext{c} - c, cancel := WithTimeout(o, 400*time.Millisecond) - cancel() - time.Sleep(100 * time.Millisecond) // let cancelation propagate - select { - case <-c.Done(): - default: - t.Errorf("<-c.Done() blocked, but shouldn't have") - } - if e := c.Err(); e != Canceled { - t.Errorf("c.Err() == %v want %v", e, Canceled) - } -} - -type key1 int -type key2 int - -var k1 = key1(1) -var k2 = key2(1) // same int as k1, different type -var k3 = key2(3) // same type as k2, different int - -func TestValues(t *testing.T) { - check := func(c Context, nm, v1, v2, v3 string) { - if v, ok := c.Value(k1).(string); ok == (len(v1) == 0) || v != v1 { - t.Errorf(`%s.Value(k1).(string) = %q, %t want %q, %t`, nm, v, ok, v1, len(v1) != 0) - } - if v, ok := c.Value(k2).(string); ok == (len(v2) == 0) || v != v2 { - t.Errorf(`%s.Value(k2).(string) = %q, %t want %q, %t`, nm, v, ok, v2, len(v2) != 0) - } - if v, ok := c.Value(k3).(string); ok == (len(v3) == 0) || v != v3 { - t.Errorf(`%s.Value(k3).(string) = %q, %t want %q, %t`, nm, v, ok, v3, len(v3) != 0) - } - } - - c0 := Background() - check(c0, "c0", "", "", "") - - c1 := WithValue(Background(), k1, "c1k1") - check(c1, "c1", "c1k1", "", "") - - if got, want := fmt.Sprint(c1), `context.Background.WithValue(1, "c1k1")`; got != want { - t.Errorf("c.String() = %q want %q", got, want) - } - - c2 := WithValue(c1, k2, "c2k2") - check(c2, "c2", "c1k1", "c2k2", "") - - c3 := WithValue(c2, k3, "c3k3") - check(c3, "c2", "c1k1", "c2k2", "c3k3") - - c4 := WithValue(c3, k1, nil) - check(c4, "c4", "", "c2k2", "c3k3") - - o0 := otherContext{Background()} - check(o0, "o0", "", "", "") - - o1 := otherContext{WithValue(Background(), k1, "c1k1")} - check(o1, "o1", "c1k1", "", "") - - o2 := WithValue(o1, k2, "o2k2") - check(o2, "o2", "c1k1", "o2k2", "") - - o3 := otherContext{c4} - check(o3, "o3", "", "c2k2", "c3k3") - - o4 := WithValue(o3, k3, nil) - check(o4, "o4", "", "c2k2", "") -} - -func TestAllocs(t *testing.T) { - bg := Background() - for _, test := range []struct { - desc string - f func() - limit float64 - gccgoLimit float64 - }{ - { - desc: "Background()", - f: func() { Background() }, - limit: 0, - gccgoLimit: 0, - }, - { - desc: fmt.Sprintf("WithValue(bg, %v, nil)", k1), - f: func() { - c := WithValue(bg, k1, nil) - c.Value(k1) - }, - limit: 3, - gccgoLimit: 3, - }, - { - desc: "WithTimeout(bg, 15*time.Millisecond)", - f: func() { - c, _ := WithTimeout(bg, 15*time.Millisecond) - <-c.Done() - }, - limit: 8, - gccgoLimit: 13, - }, - { - desc: "WithCancel(bg)", - f: func() { - c, cancel := WithCancel(bg) - cancel() - <-c.Done() - }, - limit: 5, - gccgoLimit: 8, - }, - { - desc: "WithTimeout(bg, 100*time.Millisecond)", - f: func() { - c, cancel := WithTimeout(bg, 100*time.Millisecond) - cancel() - <-c.Done() - }, - limit: 8, - gccgoLimit: 25, - }, - } { - limit := test.limit - if runtime.Compiler == "gccgo" { - // gccgo does not yet do escape analysis. - // TOOD(iant): Remove this when gccgo does do escape analysis. - limit = test.gccgoLimit - } - if n := testing.AllocsPerRun(100, test.f); n > limit { - t.Errorf("%s allocs = %f want %d", test.desc, n, int(limit)) - } - } -} - -func TestSimultaneousCancels(t *testing.T) { - root, cancel := WithCancel(Background()) - m := map[Context]CancelFunc{root: cancel} - q := []Context{root} - // Create a tree of contexts. - for len(q) != 0 && len(m) < 100 { - parent := q[0] - q = q[1:] - for i := 0; i < 4; i++ { - ctx, cancel := WithCancel(parent) - m[ctx] = cancel - q = append(q, ctx) - } - } - // Start all the cancels in a random order. - var wg sync.WaitGroup - wg.Add(len(m)) - for _, cancel := range m { - go func(cancel CancelFunc) { - cancel() - wg.Done() - }(cancel) - } - // Wait on all the contexts in a random order. - for ctx := range m { - select { - case <-ctx.Done(): - case <-time.After(1 * time.Second): - buf := make([]byte, 10<<10) - n := runtime.Stack(buf, true) - t.Fatalf("timed out waiting for <-ctx.Done(); stacks:\n%s", buf[:n]) - } - } - // Wait for all the cancel functions to return. - done := make(chan struct{}) - go func() { - wg.Wait() - close(done) - }() - select { - case <-done: - case <-time.After(1 * time.Second): - buf := make([]byte, 10<<10) - n := runtime.Stack(buf, true) - t.Fatalf("timed out waiting for cancel functions; stacks:\n%s", buf[:n]) - } -} - -func TestInterlockedCancels(t *testing.T) { - parent, cancelParent := WithCancel(Background()) - child, cancelChild := WithCancel(parent) - go func() { - parent.Done() - cancelChild() - }() - cancelParent() - select { - case <-child.Done(): - case <-time.After(1 * time.Second): - buf := make([]byte, 10<<10) - n := runtime.Stack(buf, true) - t.Fatalf("timed out waiting for child.Done(); stacks:\n%s", buf[:n]) - } -} - -func TestLayersCancel(t *testing.T) { - testLayers(t, time.Now().UnixNano(), false) -} - -func TestLayersTimeout(t *testing.T) { - testLayers(t, time.Now().UnixNano(), true) -} - -func testLayers(t *testing.T, seed int64, testTimeout bool) { - rand.Seed(seed) - errorf := func(format string, a ...interface{}) { - t.Errorf(fmt.Sprintf("seed=%d: %s", seed, format), a...) - } - const ( - timeout = 200 * time.Millisecond - minLayers = 30 - ) - type value int - var ( - vals []*value - cancels []CancelFunc - numTimers int - ctx = Background() - ) - for i := 0; i < minLayers || numTimers == 0 || len(cancels) == 0 || len(vals) == 0; i++ { - switch rand.Intn(3) { - case 0: - v := new(value) - ctx = WithValue(ctx, v, v) - vals = append(vals, v) - case 1: - var cancel CancelFunc - ctx, cancel = WithCancel(ctx) - cancels = append(cancels, cancel) - case 2: - var cancel CancelFunc - ctx, cancel = WithTimeout(ctx, timeout) - cancels = append(cancels, cancel) - numTimers++ - } - } - checkValues := func(when string) { - for _, key := range vals { - if val := ctx.Value(key).(*value); key != val { - errorf("%s: ctx.Value(%p) = %p want %p", when, key, val, key) - } - } - } - select { - case <-ctx.Done(): - errorf("ctx should not be canceled yet") - default: - } - if s, prefix := fmt.Sprint(ctx), "context.Background."; !strings.HasPrefix(s, prefix) { - t.Errorf("ctx.String() = %q want prefix %q", s, prefix) - } - t.Log(ctx) - checkValues("before cancel") - if testTimeout { - select { - case <-ctx.Done(): - case <-time.After(timeout + timeout/10): - errorf("ctx should have timed out") - } - checkValues("after timeout") - } else { - cancel := cancels[rand.Intn(len(cancels))] - cancel() - select { - case <-ctx.Done(): - default: - errorf("ctx should be canceled") - } - checkValues("after cancel") - } -} - -func TestCancelRemoves(t *testing.T) { - checkChildren := func(when string, ctx Context, want int) { - if got := len(ctx.(*cancelCtx).children); got != want { - t.Errorf("%s: context has %d children, want %d", when, got, want) - } - } - - ctx, _ := WithCancel(Background()) - checkChildren("after creation", ctx, 0) - _, cancel := WithCancel(ctx) - checkChildren("with WithCancel child ", ctx, 1) - cancel() - checkChildren("after cancelling WithCancel child", ctx, 0) - - ctx, _ = WithCancel(Background()) - checkChildren("after creation", ctx, 0) - _, cancel = WithTimeout(ctx, 60*time.Minute) - checkChildren("with WithTimeout child ", ctx, 1) - cancel() - checkChildren("after cancelling WithTimeout child", ctx, 0) -} diff --git a/Godeps/_workspace/src/golang.org/x/net/context/withtimeout_test.go b/Godeps/_workspace/src/golang.org/x/net/context/withtimeout_test.go deleted file mode 100644 index a6754dc368..0000000000 --- a/Godeps/_workspace/src/golang.org/x/net/context/withtimeout_test.go +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package context_test - -import ( - "fmt" - "time" - - "golang.org/x/net/context" -) - -func ExampleWithTimeout() { - // Pass a context with a timeout to tell a blocking function that it - // should abandon its work after the timeout elapses. - ctx, _ := context.WithTimeout(context.Background(), 100*time.Millisecond) - select { - case <-time.After(200 * time.Millisecond): - fmt.Println("overslept") - case <-ctx.Done(): - fmt.Println(ctx.Err()) // prints "context deadline exceeded" - } - // Output: - // context deadline exceeded -} diff --git a/Godeps/_workspace/src/gopkg.in/fsnotify.v1/.gitignore b/Godeps/_workspace/src/gopkg.in/fsnotify.v1/.gitignore deleted file mode 100644 index 4cd0cbaf43..0000000000 --- a/Godeps/_workspace/src/gopkg.in/fsnotify.v1/.gitignore +++ /dev/null @@ -1,6 +0,0 @@ -# Setup a Global .gitignore for OS and editor generated files: -# https://help.github.com/articles/ignoring-files -# git config --global core.excludesfile ~/.gitignore_global - -.vagrant -*.sublime-project diff --git a/Godeps/_workspace/src/gopkg.in/fsnotify.v1/.travis.yml b/Godeps/_workspace/src/gopkg.in/fsnotify.v1/.travis.yml deleted file mode 100644 index 67467e1407..0000000000 --- a/Godeps/_workspace/src/gopkg.in/fsnotify.v1/.travis.yml +++ /dev/null @@ -1,15 +0,0 @@ -sudo: false -language: go - -go: - - 1.4.1 - -before_script: - - FIXED=$(go fmt ./... | wc -l); if [ $FIXED -gt 0 ]; then echo "gofmt - $FIXED file(s) not formatted correctly, please run gofmt to fix this." && exit 1; fi - -os: - - linux - - osx - -notifications: - email: false diff --git a/Godeps/_workspace/src/gopkg.in/fsnotify.v1/AUTHORS b/Godeps/_workspace/src/gopkg.in/fsnotify.v1/AUTHORS deleted file mode 100644 index 4e0e8284e9..0000000000 --- a/Godeps/_workspace/src/gopkg.in/fsnotify.v1/AUTHORS +++ /dev/null @@ -1,34 +0,0 @@ -# Names should be added to this file as -# Name or Organization -# The email address is not required for organizations. - -# You can update this list using the following command: -# -# $ git shortlog -se | awk '{print $2 " " $3 " " $4}' - -# Please keep the list sorted. - -Adrien Bustany -Caleb Spare -Case Nelson -Chris Howey -Christoffer Buchholz -Dave Cheney -Francisco Souza -Hari haran -John C Barstow -Kelvin Fo -Matt Layher -Nathan Youngman -Paul Hammond -Pieter Droogendijk -Pursuit92 -Rob Figueiredo -Soge Zhang -Tilak Sharma -Travis Cline -Tudor Golubenco -Yukang -bronze1man -debrando -henrikedwards diff --git a/Godeps/_workspace/src/gopkg.in/fsnotify.v1/CHANGELOG.md b/Godeps/_workspace/src/gopkg.in/fsnotify.v1/CHANGELOG.md deleted file mode 100644 index ea9428a2a4..0000000000 --- a/Godeps/_workspace/src/gopkg.in/fsnotify.v1/CHANGELOG.md +++ /dev/null @@ -1,263 +0,0 @@ -# Changelog - -## v1.2.0 / 2015-02-08 - -* inotify: use epoll to wake up readEvents [#66](https://github.com/go-fsnotify/fsnotify/pull/66) (thanks @PieterD) -* inotify: closing watcher should now always shut down goroutine [#63](https://github.com/go-fsnotify/fsnotify/pull/63) (thanks @PieterD) -* kqueue: close kqueue after removing watches, fixes [#59](https://github.com/go-fsnotify/fsnotify/issues/59) - -## v1.1.1 / 2015-02-05 - -* inotify: Retry read on EINTR [#61](https://github.com/go-fsnotify/fsnotify/issues/61) (thanks @PieterD) - -## v1.1.0 / 2014-12-12 - -* kqueue: rework internals [#43](https://github.com/go-fsnotify/fsnotify/pull/43) - * add low-level functions - * only need to store flags on directories - * less mutexes [#13](https://github.com/go-fsnotify/fsnotify/issues/13) - * done can be an unbuffered channel - * remove calls to os.NewSyscallError -* More efficient string concatenation for Event.String() [#52](https://github.com/go-fsnotify/fsnotify/pull/52) (thanks @mdlayher) -* kqueue: fix regression in rework causing subdirectories to be watched [#48](https://github.com/go-fsnotify/fsnotify/issues/48) -* kqueue: cleanup internal watch before sending remove event [#51](https://github.com/go-fsnotify/fsnotify/issues/51) - -## v1.0.4 / 2014-09-07 - -* kqueue: add dragonfly to the build tags. -* Rename source code files, rearrange code so exported APIs are at the top. -* Add done channel to example code. [#37](https://github.com/go-fsnotify/fsnotify/pull/37) (thanks @chenyukang) - -## v1.0.3 / 2014-08-19 - -* [Fix] Windows MOVED_TO now translates to Create like on BSD and Linux. [#36](https://github.com/go-fsnotify/fsnotify/issues/36) - -## v1.0.2 / 2014-08-17 - -* [Fix] Missing create events on OS X. [#14](https://github.com/go-fsnotify/fsnotify/issues/14) (thanks @zhsso) -* [Fix] Make ./path and path equivalent. (thanks @zhsso) - -## v1.0.0 / 2014-08-15 - -* [API] Remove AddWatch on Windows, use Add. -* Improve documentation for exported identifiers. [#30](https://github.com/go-fsnotify/fsnotify/issues/30) -* Minor updates based on feedback from golint. - -## dev / 2014-07-09 - -* Moved to [github.com/go-fsnotify/fsnotify](https://github.com/go-fsnotify/fsnotify). -* Use os.NewSyscallError instead of returning errno (thanks @hariharan-uno) - -## dev / 2014-07-04 - -* kqueue: fix incorrect mutex used in Close() -* Update example to demonstrate usage of Op. - -## dev / 2014-06-28 - -* [API] Don't set the Write Op for attribute notifications [#4](https://github.com/go-fsnotify/fsnotify/issues/4) -* Fix for String() method on Event (thanks Alex Brainman) -* Don't build on Plan 9 or Solaris (thanks @4ad) - -## dev / 2014-06-21 - -* Events channel of type Event rather than *Event. -* [internal] use syscall constants directly for inotify and kqueue. -* [internal] kqueue: rename events to kevents and fileEvent to event. - -## dev / 2014-06-19 - -* Go 1.3+ required on Windows (uses syscall.ERROR_MORE_DATA internally). -* [internal] remove cookie from Event struct (unused). -* [internal] Event struct has the same definition across every OS. -* [internal] remove internal watch and removeWatch methods. - -## dev / 2014-06-12 - -* [API] Renamed Watch() to Add() and RemoveWatch() to Remove(). -* [API] Pluralized channel names: Events and Errors. -* [API] Renamed FileEvent struct to Event. -* [API] Op constants replace methods like IsCreate(). - -## dev / 2014-06-12 - -* Fix data race on kevent buffer (thanks @tilaks) [#98](https://github.com/howeyc/fsnotify/pull/98) - -## dev / 2014-05-23 - -* [API] Remove current implementation of WatchFlags. - * current implementation doesn't take advantage of OS for efficiency - * provides little benefit over filtering events as they are received, but has extra bookkeeping and mutexes - * no tests for the current implementation - * not fully implemented on Windows [#93](https://github.com/howeyc/fsnotify/issues/93#issuecomment-39285195) - -## v0.9.3 / 2014-12-31 - -* kqueue: cleanup internal watch before sending remove event [#51](https://github.com/go-fsnotify/fsnotify/issues/51) - -## v0.9.2 / 2014-08-17 - -* [Backport] Fix missing create events on OS X. [#14](https://github.com/go-fsnotify/fsnotify/issues/14) (thanks @zhsso) - -## v0.9.1 / 2014-06-12 - -* Fix data race on kevent buffer (thanks @tilaks) [#98](https://github.com/howeyc/fsnotify/pull/98) - -## v0.9.0 / 2014-01-17 - -* IsAttrib() for events that only concern a file's metadata [#79][] (thanks @abustany) -* [Fix] kqueue: fix deadlock [#77][] (thanks @cespare) -* [NOTICE] Development has moved to `code.google.com/p/go.exp/fsnotify` in preparation for inclusion in the Go standard library. - -## v0.8.12 / 2013-11-13 - -* [API] Remove FD_SET and friends from Linux adapter - -## v0.8.11 / 2013-11-02 - -* [Doc] Add Changelog [#72][] (thanks @nathany) -* [Doc] Spotlight and double modify events on OS X [#62][] (reported by @paulhammond) - -## v0.8.10 / 2013-10-19 - -* [Fix] kqueue: remove file watches when parent directory is removed [#71][] (reported by @mdwhatcott) -* [Fix] kqueue: race between Close and readEvents [#70][] (reported by @bernerdschaefer) -* [Doc] specify OS-specific limits in README (thanks @debrando) - -## v0.8.9 / 2013-09-08 - -* [Doc] Contributing (thanks @nathany) -* [Doc] update package path in example code [#63][] (thanks @paulhammond) -* [Doc] GoCI badge in README (Linux only) [#60][] -* [Doc] Cross-platform testing with Vagrant [#59][] (thanks @nathany) - -## v0.8.8 / 2013-06-17 - -* [Fix] Windows: handle `ERROR_MORE_DATA` on Windows [#49][] (thanks @jbowtie) - -## v0.8.7 / 2013-06-03 - -* [API] Make syscall flags internal -* [Fix] inotify: ignore event changes -* [Fix] race in symlink test [#45][] (reported by @srid) -* [Fix] tests on Windows -* lower case error messages - -## v0.8.6 / 2013-05-23 - -* kqueue: Use EVT_ONLY flag on Darwin -* [Doc] Update README with full example - -## v0.8.5 / 2013-05-09 - -* [Fix] inotify: allow monitoring of "broken" symlinks (thanks @tsg) - -## v0.8.4 / 2013-04-07 - -* [Fix] kqueue: watch all file events [#40][] (thanks @ChrisBuchholz) - -## v0.8.3 / 2013-03-13 - -* [Fix] inoitfy/kqueue memory leak [#36][] (reported by @nbkolchin) -* [Fix] kqueue: use fsnFlags for watching a directory [#33][] (reported by @nbkolchin) - -## v0.8.2 / 2013-02-07 - -* [Doc] add Authors -* [Fix] fix data races for map access [#29][] (thanks @fsouza) - -## v0.8.1 / 2013-01-09 - -* [Fix] Windows path separators -* [Doc] BSD License - -## v0.8.0 / 2012-11-09 - -* kqueue: directory watching improvements (thanks @vmirage) -* inotify: add `IN_MOVED_TO` [#25][] (requested by @cpisto) -* [Fix] kqueue: deleting watched directory [#24][] (reported by @jakerr) - -## v0.7.4 / 2012-10-09 - -* [Fix] inotify: fixes from https://codereview.appspot.com/5418045/ (ugorji) -* [Fix] kqueue: preserve watch flags when watching for delete [#21][] (reported by @robfig) -* [Fix] kqueue: watch the directory even if it isn't a new watch (thanks @robfig) -* [Fix] kqueue: modify after recreation of file - -## v0.7.3 / 2012-09-27 - -* [Fix] kqueue: watch with an existing folder inside the watched folder (thanks @vmirage) -* [Fix] kqueue: no longer get duplicate CREATE events - -## v0.7.2 / 2012-09-01 - -* kqueue: events for created directories - -## v0.7.1 / 2012-07-14 - -* [Fix] for renaming files - -## v0.7.0 / 2012-07-02 - -* [Feature] FSNotify flags -* [Fix] inotify: Added file name back to event path - -## v0.6.0 / 2012-06-06 - -* kqueue: watch files after directory created (thanks @tmc) - -## v0.5.1 / 2012-05-22 - -* [Fix] inotify: remove all watches before Close() - -## v0.5.0 / 2012-05-03 - -* [API] kqueue: return errors during watch instead of sending over channel -* kqueue: match symlink behavior on Linux -* inotify: add `DELETE_SELF` (requested by @taralx) -* [Fix] kqueue: handle EINTR (reported by @robfig) -* [Doc] Godoc example [#1][] (thanks @davecheney) - -## v0.4.0 / 2012-03-30 - -* Go 1 released: build with go tool -* [Feature] Windows support using winfsnotify -* Windows does not have attribute change notifications -* Roll attribute notifications into IsModify - -## v0.3.0 / 2012-02-19 - -* kqueue: add files when watch directory - -## v0.2.0 / 2011-12-30 - -* update to latest Go weekly code - -## v0.1.0 / 2011-10-19 - -* kqueue: add watch on file creation to match inotify -* kqueue: create file event -* inotify: ignore `IN_IGNORED` events -* event String() -* linux: common FileEvent functions -* initial commit - -[#79]: https://github.com/howeyc/fsnotify/pull/79 -[#77]: https://github.com/howeyc/fsnotify/pull/77 -[#72]: https://github.com/howeyc/fsnotify/issues/72 -[#71]: https://github.com/howeyc/fsnotify/issues/71 -[#70]: https://github.com/howeyc/fsnotify/issues/70 -[#63]: https://github.com/howeyc/fsnotify/issues/63 -[#62]: https://github.com/howeyc/fsnotify/issues/62 -[#60]: https://github.com/howeyc/fsnotify/issues/60 -[#59]: https://github.com/howeyc/fsnotify/issues/59 -[#49]: https://github.com/howeyc/fsnotify/issues/49 -[#45]: https://github.com/howeyc/fsnotify/issues/45 -[#40]: https://github.com/howeyc/fsnotify/issues/40 -[#36]: https://github.com/howeyc/fsnotify/issues/36 -[#33]: https://github.com/howeyc/fsnotify/issues/33 -[#29]: https://github.com/howeyc/fsnotify/issues/29 -[#25]: https://github.com/howeyc/fsnotify/issues/25 -[#24]: https://github.com/howeyc/fsnotify/issues/24 -[#21]: https://github.com/howeyc/fsnotify/issues/21 - diff --git a/Godeps/_workspace/src/gopkg.in/fsnotify.v1/CONTRIBUTING.md b/Godeps/_workspace/src/gopkg.in/fsnotify.v1/CONTRIBUTING.md deleted file mode 100644 index 0f377f341b..0000000000 --- a/Godeps/_workspace/src/gopkg.in/fsnotify.v1/CONTRIBUTING.md +++ /dev/null @@ -1,77 +0,0 @@ -# Contributing - -## Issues - -* Request features and report bugs using the [GitHub Issue Tracker](https://github.com/go-fsnotify/fsnotify/issues). -* Please indicate the platform you are using fsnotify on. -* A code example to reproduce the problem is appreciated. - -## Pull Requests - -### Contributor License Agreement - -fsnotify is derived from code in the [golang.org/x/exp](https://godoc.org/golang.org/x/exp) package and it may be included [in the standard library](https://github.com/go-fsnotify/fsnotify/issues/1) in the future. Therefore fsnotify carries the same [LICENSE](https://github.com/go-fsnotify/fsnotify/blob/master/LICENSE) as Go. Contributors retain their copyright, so you need to fill out a short form before we can accept your contribution: [Google Individual Contributor License Agreement](https://developers.google.com/open-source/cla/individual). - -Please indicate that you have signed the CLA in your pull request. - -### How fsnotify is Developed - -* Development is done on feature branches. -* Tests are run on BSD, Linux, OS X and Windows. -* Pull requests are reviewed and [applied to master][am] using [hub][]. - * Maintainers may modify or squash commits rather than asking contributors to. -* To issue a new release, the maintainers will: - * Update the CHANGELOG - * Tag a version, which will become available through gopkg.in. - -### How to Fork - -For smooth sailing, always use the original import path. Installing with `go get` makes this easy. - -1. Install from GitHub (`go get -u github.com/go-fsnotify/fsnotify`) -2. Create your feature branch (`git checkout -b my-new-feature`) -3. Ensure everything works and the tests pass (see below) -4. Commit your changes (`git commit -am 'Add some feature'`) - -Contribute upstream: - -1. Fork fsnotify on GitHub -2. Add your remote (`git remote add fork git@github.com:mycompany/repo.git`) -3. Push to the branch (`git push fork my-new-feature`) -4. Create a new Pull Request on GitHub - -This workflow is [thoroughly explained by Katrina Owen](https://blog.splice.com/contributing-open-source-git-repositories-go/). - -### Testing - -fsnotify uses build tags to compile different code on Linux, BSD, OS X, and Windows. - -Before doing a pull request, please do your best to test your changes on multiple platforms, and list which platforms you were able/unable to test on. - -To aid in cross-platform testing there is a Vagrantfile for Linux and BSD. - -* Install [Vagrant](http://www.vagrantup.com/) and [VirtualBox](https://www.virtualbox.org/) -* Setup [Vagrant Gopher](https://github.com/nathany/vagrant-gopher) in your `src` folder. -* Run `vagrant up` from the project folder. You can also setup just one box with `vagrant up linux` or `vagrant up bsd` (note: the BSD box doesn't support Windows hosts at this time, and NFS may prompt for your host OS password) -* Once setup, you can run the test suite on a given OS with a single command `vagrant ssh linux -c 'cd go-fsnotify/fsnotify; go test'`. -* When you're done, you will want to halt or destroy the Vagrant boxes. - -Notice: fsnotify file system events won't trigger in shared folders. The tests get around this limitation by using the /tmp directory. - -Right now there is no equivalent solution for Windows and OS X, but there are Windows VMs [freely available from Microsoft](http://www.modern.ie/en-us/virtualization-tools#downloads). - -### Maintainers - -Help maintaining fsnotify is welcome. To be a maintainer: - -* Submit a pull request and sign the CLA as above. -* You must be able to run the test suite on Mac, Windows, Linux and BSD. - -To keep master clean, the fsnotify project uses the "apply mail" workflow outlined in Nathaniel Talbott's post ["Merge pull request" Considered Harmful][am]. This requires installing [hub][]. - -All code changes should be internal pull requests. - -Releases are tagged using [Semantic Versioning](http://semver.org/). - -[hub]: https://github.com/github/hub -[am]: http://blog.spreedly.com/2014/06/24/merge-pull-request-considered-harmful/#.VGa5yZPF_Zs diff --git a/Godeps/_workspace/src/gopkg.in/fsnotify.v1/LICENSE b/Godeps/_workspace/src/gopkg.in/fsnotify.v1/LICENSE deleted file mode 100644 index f21e540800..0000000000 --- a/Godeps/_workspace/src/gopkg.in/fsnotify.v1/LICENSE +++ /dev/null @@ -1,28 +0,0 @@ -Copyright (c) 2012 The Go Authors. All rights reserved. -Copyright (c) 2012 fsnotify Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/Godeps/_workspace/src/gopkg.in/fsnotify.v1/NotUsed.xcworkspace b/Godeps/_workspace/src/gopkg.in/fsnotify.v1/NotUsed.xcworkspace deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/Godeps/_workspace/src/gopkg.in/fsnotify.v1/README.md b/Godeps/_workspace/src/gopkg.in/fsnotify.v1/README.md deleted file mode 100644 index 7a0b247364..0000000000 --- a/Godeps/_workspace/src/gopkg.in/fsnotify.v1/README.md +++ /dev/null @@ -1,59 +0,0 @@ -# File system notifications for Go - -[![Coverage](http://gocover.io/_badge/github.com/go-fsnotify/fsnotify)](http://gocover.io/github.com/go-fsnotify/fsnotify) [![GoDoc](https://godoc.org/gopkg.in/fsnotify.v1?status.svg)](https://godoc.org/gopkg.in/fsnotify.v1) - -Go 1.3+ required. - -Cross platform: Windows, Linux, BSD and OS X. - -|Adapter |OS |Status | -|----------|----------|----------| -|inotify |Linux, Android\*|Supported [![Build Status](https://travis-ci.org/go-fsnotify/fsnotify.svg?branch=master)](https://travis-ci.org/go-fsnotify/fsnotify)| -|kqueue |BSD, OS X, iOS\*|Supported [![Circle CI](https://circleci.com/gh/go-fsnotify/fsnotify.svg?style=svg)](https://circleci.com/gh/go-fsnotify/fsnotify)| -|ReadDirectoryChangesW|Windows|Supported [![Build status](https://ci.appveyor.com/api/projects/status/ivwjubaih4r0udeh/branch/master?svg=true)](https://ci.appveyor.com/project/NathanYoungman/fsnotify/branch/master)| -|FSEvents |OS X |[Planned](https://github.com/go-fsnotify/fsnotify/issues/11)| -|FEN |Solaris 11 |[Planned](https://github.com/go-fsnotify/fsnotify/issues/12)| -|fanotify |Linux 2.6.37+ | | -|USN Journals |Windows |[Maybe](https://github.com/go-fsnotify/fsnotify/issues/53)| -|Polling |*All* |[Maybe](https://github.com/go-fsnotify/fsnotify/issues/9)| - -\* Android and iOS are untested. - -Please see [the documentation](https://godoc.org/gopkg.in/fsnotify.v1) for usage. Consult the [Wiki](https://github.com/go-fsnotify/fsnotify/wiki) for the FAQ and further information. - -## API stability - -Two major versions of fsnotify exist. - -**[fsnotify.v0](https://gopkg.in/fsnotify.v0)** is API-compatible with [howeyc/fsnotify](https://godoc.org/github.com/howeyc/fsnotify). Bugfixes *may* be backported, but I recommend upgrading to v1. - -```go -import "gopkg.in/fsnotify.v0" -``` - -\* Refer to the package as fsnotify (without the .v0 suffix). - -**[fsnotify.v1](https://gopkg.in/fsnotify.v1)** provides [a new API](https://godoc.org/gopkg.in/fsnotify.v1) based on [this design document](http://goo.gl/MrYxyA). You can import v1 with: - -```go -import "gopkg.in/fsnotify.v1" -``` - -Further API changes are [planned](https://github.com/go-fsnotify/fsnotify/milestones), but a new major revision will be tagged, so you can depend on the v1 API. - -**Master** may have unreleased changes. Use it to test the very latest code or when [contributing][], but don't expect it to remain API-compatible: - -```go -import "github.com/go-fsnotify/fsnotify" -``` - -## Contributing - -Please refer to [CONTRIBUTING][] before opening an issue or pull request. - -## Example - -See [example_test.go](https://github.com/go-fsnotify/fsnotify/blob/master/example_test.go). - - -[contributing]: https://github.com/go-fsnotify/fsnotify/blob/master/CONTRIBUTING.md diff --git a/Godeps/_workspace/src/gopkg.in/fsnotify.v1/circle.yml b/Godeps/_workspace/src/gopkg.in/fsnotify.v1/circle.yml deleted file mode 100644 index 204217fb0b..0000000000 --- a/Godeps/_workspace/src/gopkg.in/fsnotify.v1/circle.yml +++ /dev/null @@ -1,26 +0,0 @@ -## OS X build (CircleCI iOS beta) - -# Pretend like it's an Xcode project, at least to get it running. -machine: - environment: - XCODE_WORKSPACE: NotUsed.xcworkspace - XCODE_SCHEME: NotUsed - # This is where the go project is actually checked out to: - CIRCLE_BUILD_DIR: $HOME/.go_project/src/github.com/go-fsnotify/fsnotify - -dependencies: - pre: - - brew upgrade go - -test: - override: - - go test ./... - -# Idealized future config, eventually with cross-platform build matrix :-) - -# machine: -# go: -# version: 1.4 -# os: -# - osx -# - linux diff --git a/Godeps/_workspace/src/gopkg.in/fsnotify.v1/example_test.go b/Godeps/_workspace/src/gopkg.in/fsnotify.v1/example_test.go deleted file mode 100644 index 3063796602..0000000000 --- a/Godeps/_workspace/src/gopkg.in/fsnotify.v1/example_test.go +++ /dev/null @@ -1,42 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !plan9,!solaris - -package fsnotify_test - -import ( - "log" - - "github.com/go-fsnotify/fsnotify" -) - -func ExampleNewWatcher() { - watcher, err := fsnotify.NewWatcher() - if err != nil { - log.Fatal(err) - } - defer watcher.Close() - - done := make(chan bool) - go func() { - for { - select { - case event := <-watcher.Events: - log.Println("event:", event) - if event.Op&fsnotify.Write == fsnotify.Write { - log.Println("modified file:", event.Name) - } - case err := <-watcher.Errors: - log.Println("error:", err) - } - } - }() - - err = watcher.Add("/tmp/foo") - if err != nil { - log.Fatal(err) - } - <-done -} diff --git a/Godeps/_workspace/src/gopkg.in/fsnotify.v1/fsnotify.go b/Godeps/_workspace/src/gopkg.in/fsnotify.v1/fsnotify.go deleted file mode 100644 index c899ee0083..0000000000 --- a/Godeps/_workspace/src/gopkg.in/fsnotify.v1/fsnotify.go +++ /dev/null @@ -1,62 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !plan9,!solaris - -// Package fsnotify provides a platform-independent interface for file system notifications. -package fsnotify - -import ( - "bytes" - "fmt" -) - -// Event represents a single file system notification. -type Event struct { - Name string // Relative path to the file or directory. - Op Op // File operation that triggered the event. -} - -// Op describes a set of file operations. -type Op uint32 - -// These are the generalized file operations that can trigger a notification. -const ( - Create Op = 1 << iota - Write - Remove - Rename - Chmod -) - -// String returns a string representation of the event in the form -// "file: REMOVE|WRITE|..." -func (e Event) String() string { - // Use a buffer for efficient string concatenation - var buffer bytes.Buffer - - if e.Op&Create == Create { - buffer.WriteString("|CREATE") - } - if e.Op&Remove == Remove { - buffer.WriteString("|REMOVE") - } - if e.Op&Write == Write { - buffer.WriteString("|WRITE") - } - if e.Op&Rename == Rename { - buffer.WriteString("|RENAME") - } - if e.Op&Chmod == Chmod { - buffer.WriteString("|CHMOD") - } - - // If buffer remains empty, return no event names - if buffer.Len() == 0 { - return fmt.Sprintf("%q: ", e.Name) - } - - // Return a list of event names, with leading pipe character stripped - return fmt.Sprintf("%q: %s", e.Name, buffer.String()[1:]) -} diff --git a/Godeps/_workspace/src/gopkg.in/fsnotify.v1/inotify.go b/Godeps/_workspace/src/gopkg.in/fsnotify.v1/inotify.go deleted file mode 100644 index d7759ec8c8..0000000000 --- a/Godeps/_workspace/src/gopkg.in/fsnotify.v1/inotify.go +++ /dev/null @@ -1,306 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build linux - -package fsnotify - -import ( - "errors" - "fmt" - "io" - "os" - "path/filepath" - "strings" - "sync" - "syscall" - "unsafe" -) - -// Watcher watches a set of files, delivering events to a channel. -type Watcher struct { - Events chan Event - Errors chan error - mu sync.Mutex // Map access - fd int - poller *fdPoller - watches map[string]*watch // Map of inotify watches (key: path) - paths map[int]string // Map of watched paths (key: watch descriptor) - done chan struct{} // Channel for sending a "quit message" to the reader goroutine - doneResp chan struct{} // Channel to respond to Close -} - -// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events. -func NewWatcher() (*Watcher, error) { - // Create inotify fd - fd, errno := syscall.InotifyInit() - if fd == -1 { - return nil, errno - } - // Create epoll - poller, err := newFdPoller(fd) - if err != nil { - syscall.Close(fd) - return nil, err - } - w := &Watcher{ - fd: fd, - poller: poller, - watches: make(map[string]*watch), - paths: make(map[int]string), - Events: make(chan Event), - Errors: make(chan error), - done: make(chan struct{}), - doneResp: make(chan struct{}), - } - - go w.readEvents() - return w, nil -} - -func (w *Watcher) isClosed() bool { - select { - case <-w.done: - return true - default: - return false - } -} - -// Close removes all watches and closes the events channel. -func (w *Watcher) Close() error { - if w.isClosed() { - return nil - } - - // Send 'close' signal to goroutine, and set the Watcher to closed. - close(w.done) - - // Wake up goroutine - w.poller.wake() - - // Wait for goroutine to close - <-w.doneResp - - return nil -} - -// Add starts watching the named file or directory (non-recursively). -func (w *Watcher) Add(name string) error { - name = filepath.Clean(name) - if w.isClosed() { - return errors.New("inotify instance already closed") - } - - const agnosticEvents = syscall.IN_MOVED_TO | syscall.IN_MOVED_FROM | - syscall.IN_CREATE | syscall.IN_ATTRIB | syscall.IN_MODIFY | - syscall.IN_MOVE_SELF | syscall.IN_DELETE | syscall.IN_DELETE_SELF - - var flags uint32 = agnosticEvents - - w.mu.Lock() - watchEntry, found := w.watches[name] - w.mu.Unlock() - if found { - watchEntry.flags |= flags - flags |= syscall.IN_MASK_ADD - } - wd, errno := syscall.InotifyAddWatch(w.fd, name, flags) - if wd == -1 { - return errno - } - - w.mu.Lock() - w.watches[name] = &watch{wd: uint32(wd), flags: flags} - w.paths[wd] = name - w.mu.Unlock() - - return nil -} - -// Remove stops watching the named file or directory (non-recursively). -func (w *Watcher) Remove(name string) error { - name = filepath.Clean(name) - - // Fetch the watch. - w.mu.Lock() - defer w.mu.Unlock() - watch, ok := w.watches[name] - - // Remove it from inotify. - if !ok { - return fmt.Errorf("can't remove non-existent inotify watch for: %s", name) - } - // inotify_rm_watch will return EINVAL if the file has been deleted; - // the inotify will already have been removed. - // That means we can safely delete it from our watches, whatever inotify_rm_watch does. - delete(w.watches, name) - success, errno := syscall.InotifyRmWatch(w.fd, watch.wd) - if success == -1 { - // TODO: Perhaps it's not helpful to return an error here in every case. - // the only two possible errors are: - // EBADF, which happens when w.fd is not a valid file descriptor of any kind. - // EINVAL, which is when fd is not an inotify descriptor or wd is not a valid watch descriptor. - // Watch descriptors are invalidated when they are removed explicitly or implicitly; - // explicitly by inotify_rm_watch, implicitly when the file they are watching is deleted. - return errno - } - return nil -} - -type watch struct { - wd uint32 // Watch descriptor (as returned by the inotify_add_watch() syscall) - flags uint32 // inotify flags of this watch (see inotify(7) for the list of valid flags) -} - -// readEvents reads from the inotify file descriptor, converts the -// received events into Event objects and sends them via the Events channel -func (w *Watcher) readEvents() { - var ( - buf [syscall.SizeofInotifyEvent * 4096]byte // Buffer for a maximum of 4096 raw events - n int // Number of bytes read with read() - errno error // Syscall errno - ok bool // For poller.wait - ) - - defer close(w.doneResp) - defer close(w.Errors) - defer close(w.Events) - defer syscall.Close(w.fd) - defer w.poller.close() - - for { - // See if we have been closed. - if w.isClosed() { - return - } - - ok, errno = w.poller.wait() - if errno != nil { - select { - case w.Errors <- errno: - case <-w.done: - return - } - continue - } - - if !ok { - continue - } - - n, errno = syscall.Read(w.fd, buf[:]) - // If a signal interrupted execution, see if we've been asked to close, and try again. - // http://man7.org/linux/man-pages/man7/signal.7.html : - // "Before Linux 3.8, reads from an inotify(7) file descriptor were not restartable" - if errno == syscall.EINTR { - continue - } - - // syscall.Read might have been woken up by Close. If so, we're done. - if w.isClosed() { - return - } - - if n < syscall.SizeofInotifyEvent { - var err error - if n == 0 { - // If EOF is received. This should really never happen. - err = io.EOF - } else if n < 0 { - // If an error occured while reading. - err = errno - } else { - // Read was too short. - err = errors.New("notify: short read in readEvents()") - } - select { - case w.Errors <- err: - case <-w.done: - return - } - continue - } - - var offset uint32 - // We don't know how many events we just read into the buffer - // While the offset points to at least one whole event... - for offset <= uint32(n-syscall.SizeofInotifyEvent) { - // Point "raw" to the event in the buffer - raw := (*syscall.InotifyEvent)(unsafe.Pointer(&buf[offset])) - - mask := uint32(raw.Mask) - nameLen := uint32(raw.Len) - // If the event happened to the watched directory or the watched file, the kernel - // doesn't append the filename to the event, but we would like to always fill the - // the "Name" field with a valid filename. We retrieve the path of the watch from - // the "paths" map. - w.mu.Lock() - name := w.paths[int(raw.Wd)] - w.mu.Unlock() - if nameLen > 0 { - // Point "bytes" at the first byte of the filename - bytes := (*[syscall.PathMax]byte)(unsafe.Pointer(&buf[offset+syscall.SizeofInotifyEvent])) - // The filename is padded with NULL bytes. TrimRight() gets rid of those. - name += "/" + strings.TrimRight(string(bytes[0:nameLen]), "\000") - } - - event := newEvent(name, mask) - - // Send the events that are not ignored on the events channel - if !event.ignoreLinux(mask) { - select { - case w.Events <- event: - case <-w.done: - return - } - } - - // Move to the next event in the buffer - offset += syscall.SizeofInotifyEvent + nameLen - } - } -} - -// Certain types of events can be "ignored" and not sent over the Events -// channel. Such as events marked ignore by the kernel, or MODIFY events -// against files that do not exist. -func (e *Event) ignoreLinux(mask uint32) bool { - // Ignore anything the inotify API says to ignore - if mask&syscall.IN_IGNORED == syscall.IN_IGNORED { - return true - } - - // If the event is not a DELETE or RENAME, the file must exist. - // Otherwise the event is ignored. - // *Note*: this was put in place because it was seen that a MODIFY - // event was sent after the DELETE. This ignores that MODIFY and - // assumes a DELETE will come or has come if the file doesn't exist. - if !(e.Op&Remove == Remove || e.Op&Rename == Rename) { - _, statErr := os.Lstat(e.Name) - return os.IsNotExist(statErr) - } - return false -} - -// newEvent returns an platform-independent Event based on an inotify mask. -func newEvent(name string, mask uint32) Event { - e := Event{Name: name} - if mask&syscall.IN_CREATE == syscall.IN_CREATE || mask&syscall.IN_MOVED_TO == syscall.IN_MOVED_TO { - e.Op |= Create - } - if mask&syscall.IN_DELETE_SELF == syscall.IN_DELETE_SELF || mask&syscall.IN_DELETE == syscall.IN_DELETE { - e.Op |= Remove - } - if mask&syscall.IN_MODIFY == syscall.IN_MODIFY { - e.Op |= Write - } - if mask&syscall.IN_MOVE_SELF == syscall.IN_MOVE_SELF || mask&syscall.IN_MOVED_FROM == syscall.IN_MOVED_FROM { - e.Op |= Rename - } - if mask&syscall.IN_ATTRIB == syscall.IN_ATTRIB { - e.Op |= Chmod - } - return e -} diff --git a/Godeps/_workspace/src/gopkg.in/fsnotify.v1/inotify_poller.go b/Godeps/_workspace/src/gopkg.in/fsnotify.v1/inotify_poller.go deleted file mode 100644 index 3b41784041..0000000000 --- a/Godeps/_workspace/src/gopkg.in/fsnotify.v1/inotify_poller.go +++ /dev/null @@ -1,186 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build linux - -package fsnotify - -import ( - "errors" - "syscall" -) - -type fdPoller struct { - fd int // File descriptor (as returned by the inotify_init() syscall) - epfd int // Epoll file descriptor - pipe [2]int // Pipe for waking up -} - -func emptyPoller(fd int) *fdPoller { - poller := new(fdPoller) - poller.fd = fd - poller.epfd = -1 - poller.pipe[0] = -1 - poller.pipe[1] = -1 - return poller -} - -// Create a new inotify poller. -// This creates an inotify handler, and an epoll handler. -func newFdPoller(fd int) (*fdPoller, error) { - var errno error - poller := emptyPoller(fd) - defer func() { - if errno != nil { - poller.close() - } - }() - poller.fd = fd - - // Create epoll fd - poller.epfd, errno = syscall.EpollCreate(1) - if poller.epfd == -1 { - return nil, errno - } - // Create pipe; pipe[0] is the read end, pipe[1] the write end. - errno = syscall.Pipe2(poller.pipe[:], syscall.O_NONBLOCK) - if errno != nil { - return nil, errno - } - - // Register inotify fd with epoll - event := syscall.EpollEvent{ - Fd: int32(poller.fd), - Events: syscall.EPOLLIN, - } - errno = syscall.EpollCtl(poller.epfd, syscall.EPOLL_CTL_ADD, poller.fd, &event) - if errno != nil { - return nil, errno - } - - // Register pipe fd with epoll - event = syscall.EpollEvent{ - Fd: int32(poller.pipe[0]), - Events: syscall.EPOLLIN, - } - errno = syscall.EpollCtl(poller.epfd, syscall.EPOLL_CTL_ADD, poller.pipe[0], &event) - if errno != nil { - return nil, errno - } - - return poller, nil -} - -// Wait using epoll. -// Returns true if something is ready to be read, -// false if there is not. -func (poller *fdPoller) wait() (bool, error) { - // 3 possible events per fd, and 2 fds, makes a maximum of 6 events. - // I don't know whether epoll_wait returns the number of events returned, - // or the total number of events ready. - // I decided to catch both by making the buffer one larger than the maximum. - events := make([]syscall.EpollEvent, 7) - for { - n, errno := syscall.EpollWait(poller.epfd, events, -1) - if n == -1 { - if errno == syscall.EINTR { - continue - } - return false, errno - } - if n == 0 { - // If there are no events, try again. - continue - } - if n > 6 { - // This should never happen. More events were returned than should be possible. - return false, errors.New("epoll_wait returned more events than I know what to do with") - } - ready := events[:n] - epollhup := false - epollerr := false - epollin := false - for _, event := range ready { - if event.Fd == int32(poller.fd) { - if event.Events&syscall.EPOLLHUP != 0 { - // This should not happen, but if it does, treat it as a wakeup. - epollhup = true - } - if event.Events&syscall.EPOLLERR != 0 { - // If an error is waiting on the file descriptor, we should pretend - // something is ready to read, and let syscall.Read pick up the error. - epollerr = true - } - if event.Events&syscall.EPOLLIN != 0 { - // There is data to read. - epollin = true - } - } - if event.Fd == int32(poller.pipe[0]) { - if event.Events&syscall.EPOLLHUP != 0 { - // Write pipe descriptor was closed, by us. This means we're closing down the - // watcher, and we should wake up. - } - if event.Events&syscall.EPOLLERR != 0 { - // If an error is waiting on the pipe file descriptor. - // This is an absolute mystery, and should never ever happen. - return false, errors.New("Error on the pipe descriptor.") - } - if event.Events&syscall.EPOLLIN != 0 { - // This is a regular wakeup, so we have to clear the buffer. - err := poller.clearWake() - if err != nil { - return false, err - } - } - } - } - - if epollhup || epollerr || epollin { - return true, nil - } - return false, nil - } -} - -// Close the write end of the poller. -func (poller *fdPoller) wake() error { - buf := make([]byte, 1) - n, errno := syscall.Write(poller.pipe[1], buf) - if n == -1 { - if errno == syscall.EAGAIN { - // Buffer is full, poller will wake. - return nil - } - return errno - } - return nil -} - -func (poller *fdPoller) clearWake() error { - // You have to be woken up a LOT in order to get to 100! - buf := make([]byte, 100) - n, errno := syscall.Read(poller.pipe[0], buf) - if n == -1 { - if errno == syscall.EAGAIN { - // Buffer is empty, someone else cleared our wake. - return nil - } - return errno - } - return nil -} - -// Close all poller file descriptors, but not the one passed to it. -func (poller *fdPoller) close() { - if poller.pipe[1] != -1 { - syscall.Close(poller.pipe[1]) - } - if poller.pipe[0] != -1 { - syscall.Close(poller.pipe[0]) - } - if poller.epfd != -1 { - syscall.Close(poller.epfd) - } -} diff --git a/Godeps/_workspace/src/gopkg.in/fsnotify.v1/inotify_poller_test.go b/Godeps/_workspace/src/gopkg.in/fsnotify.v1/inotify_poller_test.go deleted file mode 100644 index af9f407f8d..0000000000 --- a/Godeps/_workspace/src/gopkg.in/fsnotify.v1/inotify_poller_test.go +++ /dev/null @@ -1,228 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build linux - -package fsnotify - -import ( - "syscall" - "testing" - "time" -) - -type testFd [2]int - -func makeTestFd(t *testing.T) testFd { - var tfd testFd - errno := syscall.Pipe(tfd[:]) - if errno != nil { - t.Fatalf("Failed to create pipe: %v", errno) - } - return tfd -} - -func (tfd testFd) fd() int { - return tfd[0] -} - -func (tfd testFd) closeWrite(t *testing.T) { - errno := syscall.Close(tfd[1]) - if errno != nil { - t.Fatalf("Failed to close write end of pipe: %v", errno) - } -} - -func (tfd testFd) put(t *testing.T) { - buf := make([]byte, 10) - _, errno := syscall.Write(tfd[1], buf) - if errno != nil { - t.Fatalf("Failed to write to pipe: %v", errno) - } -} - -func (tfd testFd) get(t *testing.T) { - buf := make([]byte, 10) - _, errno := syscall.Read(tfd[0], buf) - if errno != nil { - t.Fatalf("Failed to read from pipe: %v", errno) - } -} - -func (tfd testFd) close() { - syscall.Close(tfd[1]) - syscall.Close(tfd[0]) -} - -func makePoller(t *testing.T) (testFd, *fdPoller) { - tfd := makeTestFd(t) - poller, err := newFdPoller(tfd.fd()) - if err != nil { - t.Fatalf("Failed to create poller: %v", err) - } - return tfd, poller -} - -func TestPollerWithBadFd(t *testing.T) { - _, err := newFdPoller(-1) - if err != syscall.EBADF { - t.Fatalf("Expected EBADF, got: %v", err) - } -} - -func TestPollerWithData(t *testing.T) { - tfd, poller := makePoller(t) - defer tfd.close() - defer poller.close() - - tfd.put(t) - ok, err := poller.wait() - if err != nil { - t.Fatalf("poller failed: %v", err) - } - if !ok { - t.Fatalf("expected poller to return true") - } - tfd.get(t) -} - -func TestPollerWithWakeup(t *testing.T) { - tfd, poller := makePoller(t) - defer tfd.close() - defer poller.close() - - err := poller.wake() - if err != nil { - t.Fatalf("wake failed: %v", err) - } - ok, err := poller.wait() - if err != nil { - t.Fatalf("poller failed: %v", err) - } - if ok { - t.Fatalf("expected poller to return false") - } -} - -func TestPollerWithClose(t *testing.T) { - tfd, poller := makePoller(t) - defer tfd.close() - defer poller.close() - - tfd.closeWrite(t) - ok, err := poller.wait() - if err != nil { - t.Fatalf("poller failed: %v", err) - } - if !ok { - t.Fatalf("expected poller to return true") - } -} - -func TestPollerWithWakeupAndData(t *testing.T) { - tfd, poller := makePoller(t) - defer tfd.close() - defer poller.close() - - tfd.put(t) - err := poller.wake() - if err != nil { - t.Fatalf("wake failed: %v", err) - } - - // both data and wakeup - ok, err := poller.wait() - if err != nil { - t.Fatalf("poller failed: %v", err) - } - if !ok { - t.Fatalf("expected poller to return true") - } - - // data is still in the buffer, wakeup is cleared - ok, err = poller.wait() - if err != nil { - t.Fatalf("poller failed: %v", err) - } - if !ok { - t.Fatalf("expected poller to return true") - } - - tfd.get(t) - // data is gone, only wakeup now - err = poller.wake() - if err != nil { - t.Fatalf("wake failed: %v", err) - } - ok, err = poller.wait() - if err != nil { - t.Fatalf("poller failed: %v", err) - } - if ok { - t.Fatalf("expected poller to return false") - } -} - -func TestPollerConcurrent(t *testing.T) { - tfd, poller := makePoller(t) - defer tfd.close() - defer poller.close() - - oks := make(chan bool) - live := make(chan bool) - defer close(live) - go func() { - defer close(oks) - for { - ok, err := poller.wait() - if err != nil { - t.Fatalf("poller failed: %v", err) - } - oks <- ok - if !<-live { - return - } - } - }() - - // Try a write - select { - case <-time.After(50 * time.Millisecond): - case <-oks: - t.Fatalf("poller did not wait") - } - tfd.put(t) - if !<-oks { - t.Fatalf("expected true") - } - tfd.get(t) - live <- true - - // Try a wakeup - select { - case <-time.After(50 * time.Millisecond): - case <-oks: - t.Fatalf("poller did not wait") - } - err := poller.wake() - if err != nil { - t.Fatalf("wake failed: %v", err) - } - if <-oks { - t.Fatalf("expected false") - } - live <- true - - // Try a close - select { - case <-time.After(50 * time.Millisecond): - case <-oks: - t.Fatalf("poller did not wait") - } - tfd.closeWrite(t) - if !<-oks { - t.Fatalf("expected true") - } - tfd.get(t) -} diff --git a/Godeps/_workspace/src/gopkg.in/fsnotify.v1/inotify_test.go b/Godeps/_workspace/src/gopkg.in/fsnotify.v1/inotify_test.go deleted file mode 100644 index 035ee8f95d..0000000000 --- a/Godeps/_workspace/src/gopkg.in/fsnotify.v1/inotify_test.go +++ /dev/null @@ -1,292 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build linux - -package fsnotify - -import ( - "os" - "path/filepath" - "syscall" - "testing" - "time" -) - -func TestInotifyCloseRightAway(t *testing.T) { - w, err := NewWatcher() - if err != nil { - t.Fatalf("Failed to create watcher") - } - - // Close immediately; it won't even reach the first syscall.Read. - w.Close() - - // Wait for the close to complete. - <-time.After(50 * time.Millisecond) - isWatcherReallyClosed(t, w) -} - -func TestInotifyCloseSlightlyLater(t *testing.T) { - w, err := NewWatcher() - if err != nil { - t.Fatalf("Failed to create watcher") - } - - // Wait until readEvents has reached syscall.Read, and Close. - <-time.After(50 * time.Millisecond) - w.Close() - - // Wait for the close to complete. - <-time.After(50 * time.Millisecond) - isWatcherReallyClosed(t, w) -} - -func TestInotifyCloseSlightlyLaterWithWatch(t *testing.T) { - testDir := tempMkdir(t) - defer os.RemoveAll(testDir) - - w, err := NewWatcher() - if err != nil { - t.Fatalf("Failed to create watcher") - } - w.Add(testDir) - - // Wait until readEvents has reached syscall.Read, and Close. - <-time.After(50 * time.Millisecond) - w.Close() - - // Wait for the close to complete. - <-time.After(50 * time.Millisecond) - isWatcherReallyClosed(t, w) -} - -func TestInotifyCloseAfterRead(t *testing.T) { - testDir := tempMkdir(t) - defer os.RemoveAll(testDir) - - w, err := NewWatcher() - if err != nil { - t.Fatalf("Failed to create watcher") - } - - err = w.Add(testDir) - if err != nil { - t.Fatalf("Failed to add .") - } - - // Generate an event. - os.Create(filepath.Join(testDir, "somethingSOMETHINGsomethingSOMETHING")) - - // Wait for readEvents to read the event, then close the watcher. - <-time.After(50 * time.Millisecond) - w.Close() - - // Wait for the close to complete. - <-time.After(50 * time.Millisecond) - isWatcherReallyClosed(t, w) -} - -func isWatcherReallyClosed(t *testing.T, w *Watcher) { - select { - case err, ok := <-w.Errors: - if ok { - t.Fatalf("w.Errors is not closed; readEvents is still alive after closing (error: %v)", err) - } - default: - t.Fatalf("w.Errors would have blocked; readEvents is still alive!") - } - - select { - case _, ok := <-w.Events: - if ok { - t.Fatalf("w.Events is not closed; readEvents is still alive after closing") - } - default: - t.Fatalf("w.Events would have blocked; readEvents is still alive!") - } -} - -func TestInotifyCloseCreate(t *testing.T) { - testDir := tempMkdir(t) - defer os.RemoveAll(testDir) - - w, err := NewWatcher() - if err != nil { - t.Fatalf("Failed to create watcher: %v", err) - } - defer w.Close() - - err = w.Add(testDir) - if err != nil { - t.Fatalf("Failed to add testDir: %v", err) - } - h, err := os.Create(filepath.Join(testDir, "testfile")) - if err != nil { - t.Fatalf("Failed to create file in testdir: %v", err) - } - h.Close() - select { - case _ = <-w.Events: - case err := <-w.Errors: - t.Fatalf("Error from watcher: %v", err) - case <-time.After(50 * time.Millisecond): - t.Fatalf("Took too long to wait for event") - } - - // At this point, we've received one event, so the goroutine is ready. - // It's also blocking on syscall.Read. - // Now we try to swap the file descriptor under its nose. - w.Close() - w, err = NewWatcher() - defer w.Close() - if err != nil { - t.Fatalf("Failed to create second watcher: %v", err) - } - - <-time.After(50 * time.Millisecond) - err = w.Add(testDir) - if err != nil { - t.Fatalf("Error adding testDir again: %v", err) - } -} - -func TestInotifyStress(t *testing.T) { - testDir := tempMkdir(t) - defer os.RemoveAll(testDir) - testFile := filepath.Join(testDir, "testfile") - - w, err := NewWatcher() - if err != nil { - t.Fatalf("Failed to create watcher: %v", err) - } - defer w.Close() - - killchan := make(chan struct{}) - defer close(killchan) - - err = w.Add(testDir) - if err != nil { - t.Fatalf("Failed to add testDir: %v", err) - } - - proc, err := os.FindProcess(os.Getpid()) - if err != nil { - t.Fatalf("Error finding process: %v", err) - } - - go func() { - for { - select { - case <-time.After(5 * time.Millisecond): - err := proc.Signal(syscall.SIGUSR1) - if err != nil { - t.Fatalf("Signal failed: %v", err) - } - case <-killchan: - return - } - } - }() - - go func() { - for { - select { - case <-time.After(11 * time.Millisecond): - err := w.poller.wake() - if err != nil { - t.Fatalf("Wake failed: %v", err) - } - case <-killchan: - return - } - } - }() - - go func() { - for { - select { - case <-killchan: - return - default: - handle, err := os.Create(testFile) - if err != nil { - t.Fatalf("Create failed: %v", err) - } - handle.Close() - time.Sleep(time.Millisecond) - err = os.Remove(testFile) - if err != nil { - t.Fatalf("Remove failed: %v", err) - } - } - } - }() - - creates := 0 - removes := 0 - after := time.After(5 * time.Second) - for { - select { - case <-after: - if creates-removes > 1 || creates-removes < -1 { - t.Fatalf("Creates and removes should not be off by more than one: %d creates, %d removes", creates, removes) - } - if creates < 50 { - t.Fatalf("Expected at least 50 creates, got %d", creates) - } - return - case err := <-w.Errors: - t.Fatalf("Got an error from watcher: %v", err) - case evt := <-w.Events: - if evt.Name != testFile { - t.Fatalf("Got an event for an unknown file: %s", evt.Name) - } - if evt.Op == Create { - creates++ - } - if evt.Op == Remove { - removes++ - } - } - } -} - -func TestInotifyRemoveTwice(t *testing.T) { - testDir := tempMkdir(t) - defer os.RemoveAll(testDir) - testFile := filepath.Join(testDir, "testfile") - - handle, err := os.Create(testFile) - if err != nil { - t.Fatalf("Create failed: %v", err) - } - handle.Close() - - w, err := NewWatcher() - if err != nil { - t.Fatalf("Failed to create watcher: %v", err) - } - defer w.Close() - - err = w.Add(testFile) - if err != nil { - t.Fatalf("Failed to add testFile: %v", err) - } - - err = os.Remove(testFile) - if err != nil { - t.Fatalf("Failed to remove testFile: %v", err) - } - - err = w.Remove(testFile) - if err != syscall.EINVAL { - t.Fatalf("Expected EINVAL from Remove, got: %v", err) - } - - err = w.Remove(testFile) - if err == syscall.EINVAL { - t.Fatalf("Got EINVAL again, watch was not removed") - } -} diff --git a/Godeps/_workspace/src/gopkg.in/fsnotify.v1/integration_test.go b/Godeps/_workspace/src/gopkg.in/fsnotify.v1/integration_test.go deleted file mode 100644 index 59169c6afa..0000000000 --- a/Godeps/_workspace/src/gopkg.in/fsnotify.v1/integration_test.go +++ /dev/null @@ -1,1135 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !plan9,!solaris - -package fsnotify - -import ( - "io/ioutil" - "os" - "os/exec" - "path/filepath" - "runtime" - "sync/atomic" - "testing" - "time" -) - -// An atomic counter -type counter struct { - val int32 -} - -func (c *counter) increment() { - atomic.AddInt32(&c.val, 1) -} - -func (c *counter) value() int32 { - return atomic.LoadInt32(&c.val) -} - -func (c *counter) reset() { - atomic.StoreInt32(&c.val, 0) -} - -// tempMkdir makes a temporary directory -func tempMkdir(t *testing.T) string { - dir, err := ioutil.TempDir("", "fsnotify") - if err != nil { - t.Fatalf("failed to create test directory: %s", err) - } - return dir -} - -// newWatcher initializes an fsnotify Watcher instance. -func newWatcher(t *testing.T) *Watcher { - watcher, err := NewWatcher() - if err != nil { - t.Fatalf("NewWatcher() failed: %s", err) - } - return watcher -} - -// addWatch adds a watch for a directory -func addWatch(t *testing.T, watcher *Watcher, dir string) { - if err := watcher.Add(dir); err != nil { - t.Fatalf("watcher.Add(%q) failed: %s", dir, err) - } -} - -func TestFsnotifyMultipleOperations(t *testing.T) { - watcher := newWatcher(t) - - // Receive errors on the error channel on a separate goroutine - go func() { - for err := range watcher.Errors { - t.Fatalf("error received: %s", err) - } - }() - - // Create directory to watch - testDir := tempMkdir(t) - defer os.RemoveAll(testDir) - - // Create directory that's not watched - testDirToMoveFiles := tempMkdir(t) - defer os.RemoveAll(testDirToMoveFiles) - - testFile := filepath.Join(testDir, "TestFsnotifySeq.testfile") - testFileRenamed := filepath.Join(testDirToMoveFiles, "TestFsnotifySeqRename.testfile") - - addWatch(t, watcher, testDir) - - // Receive events on the event channel on a separate goroutine - eventstream := watcher.Events - var createReceived, modifyReceived, deleteReceived, renameReceived counter - done := make(chan bool) - go func() { - for event := range eventstream { - // Only count relevant events - if event.Name == filepath.Clean(testDir) || event.Name == filepath.Clean(testFile) { - t.Logf("event received: %s", event) - if event.Op&Remove == Remove { - deleteReceived.increment() - } - if event.Op&Write == Write { - modifyReceived.increment() - } - if event.Op&Create == Create { - createReceived.increment() - } - if event.Op&Rename == Rename { - renameReceived.increment() - } - } else { - t.Logf("unexpected event received: %s", event) - } - } - done <- true - }() - - // Create a file - // This should add at least one event to the fsnotify event queue - var f *os.File - f, err := os.OpenFile(testFile, os.O_WRONLY|os.O_CREATE, 0666) - if err != nil { - t.Fatalf("creating test file failed: %s", err) - } - f.Sync() - - time.Sleep(time.Millisecond) - f.WriteString("data") - f.Sync() - f.Close() - - time.Sleep(50 * time.Millisecond) // give system time to sync write change before delete - - if err := testRename(testFile, testFileRenamed); err != nil { - t.Fatalf("rename failed: %s", err) - } - - // Modify the file outside of the watched dir - f, err = os.Open(testFileRenamed) - if err != nil { - t.Fatalf("open test renamed file failed: %s", err) - } - f.WriteString("data") - f.Sync() - f.Close() - - time.Sleep(50 * time.Millisecond) // give system time to sync write change before delete - - // Recreate the file that was moved - f, err = os.OpenFile(testFile, os.O_WRONLY|os.O_CREATE, 0666) - if err != nil { - t.Fatalf("creating test file failed: %s", err) - } - f.Close() - time.Sleep(50 * time.Millisecond) // give system time to sync write change before delete - - // We expect this event to be received almost immediately, but let's wait 500 ms to be sure - time.Sleep(500 * time.Millisecond) - cReceived := createReceived.value() - if cReceived != 2 { - t.Fatalf("incorrect number of create events received after 500 ms (%d vs %d)", cReceived, 2) - } - mReceived := modifyReceived.value() - if mReceived != 1 { - t.Fatalf("incorrect number of modify events received after 500 ms (%d vs %d)", mReceived, 1) - } - dReceived := deleteReceived.value() - rReceived := renameReceived.value() - if dReceived+rReceived != 1 { - t.Fatalf("incorrect number of rename+delete events received after 500 ms (%d vs %d)", rReceived+dReceived, 1) - } - - // Try closing the fsnotify instance - t.Log("calling Close()") - watcher.Close() - t.Log("waiting for the event channel to become closed...") - select { - case <-done: - t.Log("event channel closed") - case <-time.After(2 * time.Second): - t.Fatal("event stream was not closed after 2 seconds") - } -} - -func TestFsnotifyMultipleCreates(t *testing.T) { - watcher := newWatcher(t) - - // Receive errors on the error channel on a separate goroutine - go func() { - for err := range watcher.Errors { - t.Fatalf("error received: %s", err) - } - }() - - // Create directory to watch - testDir := tempMkdir(t) - defer os.RemoveAll(testDir) - - testFile := filepath.Join(testDir, "TestFsnotifySeq.testfile") - - addWatch(t, watcher, testDir) - - // Receive events on the event channel on a separate goroutine - eventstream := watcher.Events - var createReceived, modifyReceived, deleteReceived counter - done := make(chan bool) - go func() { - for event := range eventstream { - // Only count relevant events - if event.Name == filepath.Clean(testDir) || event.Name == filepath.Clean(testFile) { - t.Logf("event received: %s", event) - if event.Op&Remove == Remove { - deleteReceived.increment() - } - if event.Op&Create == Create { - createReceived.increment() - } - if event.Op&Write == Write { - modifyReceived.increment() - } - } else { - t.Logf("unexpected event received: %s", event) - } - } - done <- true - }() - - // Create a file - // This should add at least one event to the fsnotify event queue - var f *os.File - f, err := os.OpenFile(testFile, os.O_WRONLY|os.O_CREATE, 0666) - if err != nil { - t.Fatalf("creating test file failed: %s", err) - } - f.Sync() - - time.Sleep(time.Millisecond) - f.WriteString("data") - f.Sync() - f.Close() - - time.Sleep(50 * time.Millisecond) // give system time to sync write change before delete - - os.Remove(testFile) - - time.Sleep(50 * time.Millisecond) // give system time to sync write change before delete - - // Recreate the file - f, err = os.OpenFile(testFile, os.O_WRONLY|os.O_CREATE, 0666) - if err != nil { - t.Fatalf("creating test file failed: %s", err) - } - f.Close() - time.Sleep(50 * time.Millisecond) // give system time to sync write change before delete - - // Modify - f, err = os.OpenFile(testFile, os.O_WRONLY, 0666) - if err != nil { - t.Fatalf("creating test file failed: %s", err) - } - f.Sync() - - time.Sleep(time.Millisecond) - f.WriteString("data") - f.Sync() - f.Close() - - time.Sleep(50 * time.Millisecond) // give system time to sync write change before delete - - // Modify - f, err = os.OpenFile(testFile, os.O_WRONLY, 0666) - if err != nil { - t.Fatalf("creating test file failed: %s", err) - } - f.Sync() - - time.Sleep(time.Millisecond) - f.WriteString("data") - f.Sync() - f.Close() - - time.Sleep(50 * time.Millisecond) // give system time to sync write change before delete - - // We expect this event to be received almost immediately, but let's wait 500 ms to be sure - time.Sleep(500 * time.Millisecond) - cReceived := createReceived.value() - if cReceived != 2 { - t.Fatalf("incorrect number of create events received after 500 ms (%d vs %d)", cReceived, 2) - } - mReceived := modifyReceived.value() - if mReceived < 3 { - t.Fatalf("incorrect number of modify events received after 500 ms (%d vs atleast %d)", mReceived, 3) - } - dReceived := deleteReceived.value() - if dReceived != 1 { - t.Fatalf("incorrect number of rename+delete events received after 500 ms (%d vs %d)", dReceived, 1) - } - - // Try closing the fsnotify instance - t.Log("calling Close()") - watcher.Close() - t.Log("waiting for the event channel to become closed...") - select { - case <-done: - t.Log("event channel closed") - case <-time.After(2 * time.Second): - t.Fatal("event stream was not closed after 2 seconds") - } -} - -func TestFsnotifyDirOnly(t *testing.T) { - watcher := newWatcher(t) - - // Create directory to watch - testDir := tempMkdir(t) - defer os.RemoveAll(testDir) - - // Create a file before watching directory - // This should NOT add any events to the fsnotify event queue - testFileAlreadyExists := filepath.Join(testDir, "TestFsnotifyEventsExisting.testfile") - { - var f *os.File - f, err := os.OpenFile(testFileAlreadyExists, os.O_WRONLY|os.O_CREATE, 0666) - if err != nil { - t.Fatalf("creating test file failed: %s", err) - } - f.Sync() - f.Close() - } - - addWatch(t, watcher, testDir) - - // Receive errors on the error channel on a separate goroutine - go func() { - for err := range watcher.Errors { - t.Fatalf("error received: %s", err) - } - }() - - testFile := filepath.Join(testDir, "TestFsnotifyDirOnly.testfile") - - // Receive events on the event channel on a separate goroutine - eventstream := watcher.Events - var createReceived, modifyReceived, deleteReceived counter - done := make(chan bool) - go func() { - for event := range eventstream { - // Only count relevant events - if event.Name == filepath.Clean(testDir) || event.Name == filepath.Clean(testFile) || event.Name == filepath.Clean(testFileAlreadyExists) { - t.Logf("event received: %s", event) - if event.Op&Remove == Remove { - deleteReceived.increment() - } - if event.Op&Write == Write { - modifyReceived.increment() - } - if event.Op&Create == Create { - createReceived.increment() - } - } else { - t.Logf("unexpected event received: %s", event) - } - } - done <- true - }() - - // Create a file - // This should add at least one event to the fsnotify event queue - var f *os.File - f, err := os.OpenFile(testFile, os.O_WRONLY|os.O_CREATE, 0666) - if err != nil { - t.Fatalf("creating test file failed: %s", err) - } - f.Sync() - - time.Sleep(time.Millisecond) - f.WriteString("data") - f.Sync() - f.Close() - - time.Sleep(50 * time.Millisecond) // give system time to sync write change before delete - - os.Remove(testFile) - os.Remove(testFileAlreadyExists) - - // We expect this event to be received almost immediately, but let's wait 500 ms to be sure - time.Sleep(500 * time.Millisecond) - cReceived := createReceived.value() - if cReceived != 1 { - t.Fatalf("incorrect number of create events received after 500 ms (%d vs %d)", cReceived, 1) - } - mReceived := modifyReceived.value() - if mReceived != 1 { - t.Fatalf("incorrect number of modify events received after 500 ms (%d vs %d)", mReceived, 1) - } - dReceived := deleteReceived.value() - if dReceived != 2 { - t.Fatalf("incorrect number of delete events received after 500 ms (%d vs %d)", dReceived, 2) - } - - // Try closing the fsnotify instance - t.Log("calling Close()") - watcher.Close() - t.Log("waiting for the event channel to become closed...") - select { - case <-done: - t.Log("event channel closed") - case <-time.After(2 * time.Second): - t.Fatal("event stream was not closed after 2 seconds") - } -} - -func TestFsnotifyDeleteWatchedDir(t *testing.T) { - watcher := newWatcher(t) - defer watcher.Close() - - // Create directory to watch - testDir := tempMkdir(t) - defer os.RemoveAll(testDir) - - // Create a file before watching directory - testFileAlreadyExists := filepath.Join(testDir, "TestFsnotifyEventsExisting.testfile") - { - var f *os.File - f, err := os.OpenFile(testFileAlreadyExists, os.O_WRONLY|os.O_CREATE, 0666) - if err != nil { - t.Fatalf("creating test file failed: %s", err) - } - f.Sync() - f.Close() - } - - addWatch(t, watcher, testDir) - - // Add a watch for testFile - addWatch(t, watcher, testFileAlreadyExists) - - // Receive errors on the error channel on a separate goroutine - go func() { - for err := range watcher.Errors { - t.Fatalf("error received: %s", err) - } - }() - - // Receive events on the event channel on a separate goroutine - eventstream := watcher.Events - var deleteReceived counter - go func() { - for event := range eventstream { - // Only count relevant events - if event.Name == filepath.Clean(testDir) || event.Name == filepath.Clean(testFileAlreadyExists) { - t.Logf("event received: %s", event) - if event.Op&Remove == Remove { - deleteReceived.increment() - } - } else { - t.Logf("unexpected event received: %s", event) - } - } - }() - - os.RemoveAll(testDir) - - // We expect this event to be received almost immediately, but let's wait 500 ms to be sure - time.Sleep(500 * time.Millisecond) - dReceived := deleteReceived.value() - if dReceived < 2 { - t.Fatalf("did not receive at least %d delete events, received %d after 500 ms", 2, dReceived) - } -} - -func TestFsnotifySubDir(t *testing.T) { - watcher := newWatcher(t) - - // Create directory to watch - testDir := tempMkdir(t) - defer os.RemoveAll(testDir) - - testFile1 := filepath.Join(testDir, "TestFsnotifyFile1.testfile") - testSubDir := filepath.Join(testDir, "sub") - testSubDirFile := filepath.Join(testDir, "sub/TestFsnotifyFile1.testfile") - - // Receive errors on the error channel on a separate goroutine - go func() { - for err := range watcher.Errors { - t.Fatalf("error received: %s", err) - } - }() - - // Receive events on the event channel on a separate goroutine - eventstream := watcher.Events - var createReceived, deleteReceived counter - done := make(chan bool) - go func() { - for event := range eventstream { - // Only count relevant events - if event.Name == filepath.Clean(testDir) || event.Name == filepath.Clean(testSubDir) || event.Name == filepath.Clean(testFile1) { - t.Logf("event received: %s", event) - if event.Op&Create == Create { - createReceived.increment() - } - if event.Op&Remove == Remove { - deleteReceived.increment() - } - } else { - t.Logf("unexpected event received: %s", event) - } - } - done <- true - }() - - addWatch(t, watcher, testDir) - - // Create sub-directory - if err := os.Mkdir(testSubDir, 0777); err != nil { - t.Fatalf("failed to create test sub-directory: %s", err) - } - - // Create a file - var f *os.File - f, err := os.OpenFile(testFile1, os.O_WRONLY|os.O_CREATE, 0666) - if err != nil { - t.Fatalf("creating test file failed: %s", err) - } - f.Sync() - f.Close() - - // Create a file (Should not see this! we are not watching subdir) - var fs *os.File - fs, err = os.OpenFile(testSubDirFile, os.O_WRONLY|os.O_CREATE, 0666) - if err != nil { - t.Fatalf("creating test file failed: %s", err) - } - fs.Sync() - fs.Close() - - time.Sleep(200 * time.Millisecond) - - // Make sure receive deletes for both file and sub-directory - os.RemoveAll(testSubDir) - os.Remove(testFile1) - - // We expect this event to be received almost immediately, but let's wait 500 ms to be sure - time.Sleep(500 * time.Millisecond) - cReceived := createReceived.value() - if cReceived != 2 { - t.Fatalf("incorrect number of create events received after 500 ms (%d vs %d)", cReceived, 2) - } - dReceived := deleteReceived.value() - if dReceived != 2 { - t.Fatalf("incorrect number of delete events received after 500 ms (%d vs %d)", dReceived, 2) - } - - // Try closing the fsnotify instance - t.Log("calling Close()") - watcher.Close() - t.Log("waiting for the event channel to become closed...") - select { - case <-done: - t.Log("event channel closed") - case <-time.After(2 * time.Second): - t.Fatal("event stream was not closed after 2 seconds") - } -} - -func TestFsnotifyRename(t *testing.T) { - watcher := newWatcher(t) - - // Create directory to watch - testDir := tempMkdir(t) - defer os.RemoveAll(testDir) - - addWatch(t, watcher, testDir) - - // Receive errors on the error channel on a separate goroutine - go func() { - for err := range watcher.Errors { - t.Fatalf("error received: %s", err) - } - }() - - testFile := filepath.Join(testDir, "TestFsnotifyEvents.testfile") - testFileRenamed := filepath.Join(testDir, "TestFsnotifyEvents.testfileRenamed") - - // Receive events on the event channel on a separate goroutine - eventstream := watcher.Events - var renameReceived counter - done := make(chan bool) - go func() { - for event := range eventstream { - // Only count relevant events - if event.Name == filepath.Clean(testDir) || event.Name == filepath.Clean(testFile) || event.Name == filepath.Clean(testFileRenamed) { - if event.Op&Rename == Rename { - renameReceived.increment() - } - t.Logf("event received: %s", event) - } else { - t.Logf("unexpected event received: %s", event) - } - } - done <- true - }() - - // Create a file - // This should add at least one event to the fsnotify event queue - var f *os.File - f, err := os.OpenFile(testFile, os.O_WRONLY|os.O_CREATE, 0666) - if err != nil { - t.Fatalf("creating test file failed: %s", err) - } - f.Sync() - - f.WriteString("data") - f.Sync() - f.Close() - - // Add a watch for testFile - addWatch(t, watcher, testFile) - - if err := testRename(testFile, testFileRenamed); err != nil { - t.Fatalf("rename failed: %s", err) - } - - // We expect this event to be received almost immediately, but let's wait 500 ms to be sure - time.Sleep(500 * time.Millisecond) - if renameReceived.value() == 0 { - t.Fatal("fsnotify rename events have not been received after 500 ms") - } - - // Try closing the fsnotify instance - t.Log("calling Close()") - watcher.Close() - t.Log("waiting for the event channel to become closed...") - select { - case <-done: - t.Log("event channel closed") - case <-time.After(2 * time.Second): - t.Fatal("event stream was not closed after 2 seconds") - } - - os.Remove(testFileRenamed) -} - -func TestFsnotifyRenameToCreate(t *testing.T) { - watcher := newWatcher(t) - - // Create directory to watch - testDir := tempMkdir(t) - defer os.RemoveAll(testDir) - - // Create directory to get file - testDirFrom := tempMkdir(t) - defer os.RemoveAll(testDirFrom) - - addWatch(t, watcher, testDir) - - // Receive errors on the error channel on a separate goroutine - go func() { - for err := range watcher.Errors { - t.Fatalf("error received: %s", err) - } - }() - - testFile := filepath.Join(testDirFrom, "TestFsnotifyEvents.testfile") - testFileRenamed := filepath.Join(testDir, "TestFsnotifyEvents.testfileRenamed") - - // Receive events on the event channel on a separate goroutine - eventstream := watcher.Events - var createReceived counter - done := make(chan bool) - go func() { - for event := range eventstream { - // Only count relevant events - if event.Name == filepath.Clean(testDir) || event.Name == filepath.Clean(testFile) || event.Name == filepath.Clean(testFileRenamed) { - if event.Op&Create == Create { - createReceived.increment() - } - t.Logf("event received: %s", event) - } else { - t.Logf("unexpected event received: %s", event) - } - } - done <- true - }() - - // Create a file - // This should add at least one event to the fsnotify event queue - var f *os.File - f, err := os.OpenFile(testFile, os.O_WRONLY|os.O_CREATE, 0666) - if err != nil { - t.Fatalf("creating test file failed: %s", err) - } - f.Sync() - f.Close() - - if err := testRename(testFile, testFileRenamed); err != nil { - t.Fatalf("rename failed: %s", err) - } - - // We expect this event to be received almost immediately, but let's wait 500 ms to be sure - time.Sleep(500 * time.Millisecond) - if createReceived.value() == 0 { - t.Fatal("fsnotify create events have not been received after 500 ms") - } - - // Try closing the fsnotify instance - t.Log("calling Close()") - watcher.Close() - t.Log("waiting for the event channel to become closed...") - select { - case <-done: - t.Log("event channel closed") - case <-time.After(2 * time.Second): - t.Fatal("event stream was not closed after 2 seconds") - } - - os.Remove(testFileRenamed) -} - -func TestFsnotifyRenameToOverwrite(t *testing.T) { - switch runtime.GOOS { - case "plan9", "windows": - t.Skipf("skipping test on %q (os.Rename over existing file does not create event).", runtime.GOOS) - } - - watcher := newWatcher(t) - - // Create directory to watch - testDir := tempMkdir(t) - defer os.RemoveAll(testDir) - - // Create directory to get file - testDirFrom := tempMkdir(t) - defer os.RemoveAll(testDirFrom) - - testFile := filepath.Join(testDirFrom, "TestFsnotifyEvents.testfile") - testFileRenamed := filepath.Join(testDir, "TestFsnotifyEvents.testfileRenamed") - - // Create a file - var fr *os.File - fr, err := os.OpenFile(testFileRenamed, os.O_WRONLY|os.O_CREATE, 0666) - if err != nil { - t.Fatalf("creating test file failed: %s", err) - } - fr.Sync() - fr.Close() - - addWatch(t, watcher, testDir) - - // Receive errors on the error channel on a separate goroutine - go func() { - for err := range watcher.Errors { - t.Fatalf("error received: %s", err) - } - }() - - // Receive events on the event channel on a separate goroutine - eventstream := watcher.Events - var eventReceived counter - done := make(chan bool) - go func() { - for event := range eventstream { - // Only count relevant events - if event.Name == filepath.Clean(testFileRenamed) { - eventReceived.increment() - t.Logf("event received: %s", event) - } else { - t.Logf("unexpected event received: %s", event) - } - } - done <- true - }() - - // Create a file - // This should add at least one event to the fsnotify event queue - var f *os.File - f, err = os.OpenFile(testFile, os.O_WRONLY|os.O_CREATE, 0666) - if err != nil { - t.Fatalf("creating test file failed: %s", err) - } - f.Sync() - f.Close() - - if err := testRename(testFile, testFileRenamed); err != nil { - t.Fatalf("rename failed: %s", err) - } - - // We expect this event to be received almost immediately, but let's wait 500 ms to be sure - time.Sleep(500 * time.Millisecond) - if eventReceived.value() == 0 { - t.Fatal("fsnotify events have not been received after 500 ms") - } - - // Try closing the fsnotify instance - t.Log("calling Close()") - watcher.Close() - t.Log("waiting for the event channel to become closed...") - select { - case <-done: - t.Log("event channel closed") - case <-time.After(2 * time.Second): - t.Fatal("event stream was not closed after 2 seconds") - } - - os.Remove(testFileRenamed) -} - -func TestRemovalOfWatch(t *testing.T) { - // Create directory to watch - testDir := tempMkdir(t) - defer os.RemoveAll(testDir) - - // Create a file before watching directory - testFileAlreadyExists := filepath.Join(testDir, "TestFsnotifyEventsExisting.testfile") - { - var f *os.File - f, err := os.OpenFile(testFileAlreadyExists, os.O_WRONLY|os.O_CREATE, 0666) - if err != nil { - t.Fatalf("creating test file failed: %s", err) - } - f.Sync() - f.Close() - } - - watcher := newWatcher(t) - defer watcher.Close() - - addWatch(t, watcher, testDir) - if err := watcher.Remove(testDir); err != nil { - t.Fatalf("Could not remove the watch: %v\n", err) - } - - go func() { - select { - case ev := <-watcher.Events: - t.Fatalf("We received event: %v\n", ev) - case <-time.After(500 * time.Millisecond): - t.Log("No event received, as expected.") - } - }() - - time.Sleep(200 * time.Millisecond) - // Modify the file outside of the watched dir - f, err := os.Open(testFileAlreadyExists) - if err != nil { - t.Fatalf("Open test file failed: %s", err) - } - f.WriteString("data") - f.Sync() - f.Close() - if err := os.Chmod(testFileAlreadyExists, 0700); err != nil { - t.Fatalf("chmod failed: %s", err) - } - time.Sleep(400 * time.Millisecond) -} - -func TestFsnotifyAttrib(t *testing.T) { - if runtime.GOOS == "windows" { - t.Skip("attributes don't work on Windows.") - } - - watcher := newWatcher(t) - - // Create directory to watch - testDir := tempMkdir(t) - defer os.RemoveAll(testDir) - - // Receive errors on the error channel on a separate goroutine - go func() { - for err := range watcher.Errors { - t.Fatalf("error received: %s", err) - } - }() - - testFile := filepath.Join(testDir, "TestFsnotifyAttrib.testfile") - - // Receive events on the event channel on a separate goroutine - eventstream := watcher.Events - // The modifyReceived counter counts IsModify events that are not IsAttrib, - // and the attribReceived counts IsAttrib events (which are also IsModify as - // a consequence). - var modifyReceived counter - var attribReceived counter - done := make(chan bool) - go func() { - for event := range eventstream { - // Only count relevant events - if event.Name == filepath.Clean(testDir) || event.Name == filepath.Clean(testFile) { - if event.Op&Write == Write { - modifyReceived.increment() - } - if event.Op&Chmod == Chmod { - attribReceived.increment() - } - t.Logf("event received: %s", event) - } else { - t.Logf("unexpected event received: %s", event) - } - } - done <- true - }() - - // Create a file - // This should add at least one event to the fsnotify event queue - var f *os.File - f, err := os.OpenFile(testFile, os.O_WRONLY|os.O_CREATE, 0666) - if err != nil { - t.Fatalf("creating test file failed: %s", err) - } - f.Sync() - - f.WriteString("data") - f.Sync() - f.Close() - - // Add a watch for testFile - addWatch(t, watcher, testFile) - - if err := os.Chmod(testFile, 0700); err != nil { - t.Fatalf("chmod failed: %s", err) - } - - // We expect this event to be received almost immediately, but let's wait 500 ms to be sure - // Creating/writing a file changes also the mtime, so IsAttrib should be set to true here - time.Sleep(500 * time.Millisecond) - if modifyReceived.value() != 0 { - t.Fatal("received an unexpected modify event when creating a test file") - } - if attribReceived.value() == 0 { - t.Fatal("fsnotify attribute events have not received after 500 ms") - } - - // Modifying the contents of the file does not set the attrib flag (although eg. the mtime - // might have been modified). - modifyReceived.reset() - attribReceived.reset() - - f, err = os.OpenFile(testFile, os.O_WRONLY, 0) - if err != nil { - t.Fatalf("reopening test file failed: %s", err) - } - - f.WriteString("more data") - f.Sync() - f.Close() - - time.Sleep(500 * time.Millisecond) - - if modifyReceived.value() != 1 { - t.Fatal("didn't receive a modify event after changing test file contents") - } - - if attribReceived.value() != 0 { - t.Fatal("did receive an unexpected attrib event after changing test file contents") - } - - modifyReceived.reset() - attribReceived.reset() - - // Doing a chmod on the file should trigger an event with the "attrib" flag set (the contents - // of the file are not changed though) - if err := os.Chmod(testFile, 0600); err != nil { - t.Fatalf("chmod failed: %s", err) - } - - time.Sleep(500 * time.Millisecond) - - if attribReceived.value() != 1 { - t.Fatal("didn't receive an attribute change after 500ms") - } - - // Try closing the fsnotify instance - t.Log("calling Close()") - watcher.Close() - t.Log("waiting for the event channel to become closed...") - select { - case <-done: - t.Log("event channel closed") - case <-time.After(1e9): - t.Fatal("event stream was not closed after 1 second") - } - - os.Remove(testFile) -} - -func TestFsnotifyClose(t *testing.T) { - watcher := newWatcher(t) - watcher.Close() - - var done int32 - go func() { - watcher.Close() - atomic.StoreInt32(&done, 1) - }() - - time.Sleep(50e6) // 50 ms - if atomic.LoadInt32(&done) == 0 { - t.Fatal("double Close() test failed: second Close() call didn't return") - } - - testDir := tempMkdir(t) - defer os.RemoveAll(testDir) - - if err := watcher.Add(testDir); err == nil { - t.Fatal("expected error on Watch() after Close(), got nil") - } -} - -func TestFsnotifyFakeSymlink(t *testing.T) { - if runtime.GOOS == "windows" { - t.Skip("symlinks don't work on Windows.") - } - - watcher := newWatcher(t) - - // Create directory to watch - testDir := tempMkdir(t) - defer os.RemoveAll(testDir) - - var errorsReceived counter - // Receive errors on the error channel on a separate goroutine - go func() { - for errors := range watcher.Errors { - t.Logf("Received error: %s", errors) - errorsReceived.increment() - } - }() - - // Count the CREATE events received - var createEventsReceived, otherEventsReceived counter - go func() { - for ev := range watcher.Events { - t.Logf("event received: %s", ev) - if ev.Op&Create == Create { - createEventsReceived.increment() - } else { - otherEventsReceived.increment() - } - } - }() - - addWatch(t, watcher, testDir) - - if err := os.Symlink(filepath.Join(testDir, "zzz"), filepath.Join(testDir, "zzznew")); err != nil { - t.Fatalf("Failed to create bogus symlink: %s", err) - } - t.Logf("Created bogus symlink") - - // We expect this event to be received almost immediately, but let's wait 500 ms to be sure - time.Sleep(500 * time.Millisecond) - - // Should not be error, just no events for broken links (watching nothing) - if errorsReceived.value() > 0 { - t.Fatal("fsnotify errors have been received.") - } - if otherEventsReceived.value() > 0 { - t.Fatal("fsnotify other events received on the broken link") - } - - // Except for 1 create event (for the link itself) - if createEventsReceived.value() == 0 { - t.Fatal("fsnotify create events were not received after 500 ms") - } - if createEventsReceived.value() > 1 { - t.Fatal("fsnotify more create events received than expected") - } - - // Try closing the fsnotify instance - t.Log("calling Close()") - watcher.Close() -} - -// TestConcurrentRemovalOfWatch tests that concurrent calls to RemoveWatch do not race. -// See https://codereview.appspot.com/103300045/ -// go test -test.run=TestConcurrentRemovalOfWatch -test.cpu=1,1,1,1,1 -race -func TestConcurrentRemovalOfWatch(t *testing.T) { - if runtime.GOOS != "darwin" { - t.Skip("regression test for race only present on darwin") - } - - // Create directory to watch - testDir := tempMkdir(t) - defer os.RemoveAll(testDir) - - // Create a file before watching directory - testFileAlreadyExists := filepath.Join(testDir, "TestFsnotifyEventsExisting.testfile") - { - var f *os.File - f, err := os.OpenFile(testFileAlreadyExists, os.O_WRONLY|os.O_CREATE, 0666) - if err != nil { - t.Fatalf("creating test file failed: %s", err) - } - f.Sync() - f.Close() - } - - watcher := newWatcher(t) - defer watcher.Close() - - addWatch(t, watcher, testDir) - - // Test that RemoveWatch can be invoked concurrently, with no data races. - removed1 := make(chan struct{}) - go func() { - defer close(removed1) - watcher.Remove(testDir) - }() - removed2 := make(chan struct{}) - go func() { - close(removed2) - watcher.Remove(testDir) - }() - <-removed1 - <-removed2 -} - -func TestClose(t *testing.T) { - // Regression test for #59 bad file descriptor from Close - testDir := tempMkdir(t) - defer os.RemoveAll(testDir) - - watcher := newWatcher(t) - if err := watcher.Add(testDir); err != nil { - t.Fatalf("Expected no error on Add, got %v", err) - } - err := watcher.Close() - if err != nil { - t.Fatalf("Expected no error on Close, got %v.", err) - } -} - -func testRename(file1, file2 string) error { - switch runtime.GOOS { - case "windows", "plan9": - return os.Rename(file1, file2) - default: - cmd := exec.Command("mv", file1, file2) - return cmd.Run() - } -} diff --git a/Godeps/_workspace/src/gopkg.in/fsnotify.v1/kqueue.go b/Godeps/_workspace/src/gopkg.in/fsnotify.v1/kqueue.go deleted file mode 100644 index 265622d201..0000000000 --- a/Godeps/_workspace/src/gopkg.in/fsnotify.v1/kqueue.go +++ /dev/null @@ -1,463 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build freebsd openbsd netbsd dragonfly darwin - -package fsnotify - -import ( - "errors" - "fmt" - "io/ioutil" - "os" - "path/filepath" - "sync" - "syscall" - "time" -) - -// Watcher watches a set of files, delivering events to a channel. -type Watcher struct { - Events chan Event - Errors chan error - done chan bool // Channel for sending a "quit message" to the reader goroutine - - kq int // File descriptor (as returned by the kqueue() syscall). - - mu sync.Mutex // Protects access to watcher data - watches map[string]int // Map of watched file descriptors (key: path). - externalWatches map[string]bool // Map of watches added by user of the library. - dirFlags map[string]uint32 // Map of watched directories to fflags used in kqueue. - paths map[int]pathInfo // Map file descriptors to path names for processing kqueue events. - fileExists map[string]bool // Keep track of if we know this file exists (to stop duplicate create events). - isClosed bool // Set to true when Close() is first called -} - -type pathInfo struct { - name string - isDir bool -} - -// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events. -func NewWatcher() (*Watcher, error) { - kq, err := kqueue() - if err != nil { - return nil, err - } - - w := &Watcher{ - kq: kq, - watches: make(map[string]int), - dirFlags: make(map[string]uint32), - paths: make(map[int]pathInfo), - fileExists: make(map[string]bool), - externalWatches: make(map[string]bool), - Events: make(chan Event), - Errors: make(chan error), - done: make(chan bool), - } - - go w.readEvents() - return w, nil -} - -// Close removes all watches and closes the events channel. -func (w *Watcher) Close() error { - w.mu.Lock() - if w.isClosed { - w.mu.Unlock() - return nil - } - w.isClosed = true - w.mu.Unlock() - - w.mu.Lock() - ws := w.watches - w.mu.Unlock() - - var err error - for name := range ws { - if e := w.Remove(name); e != nil && err == nil { - err = e - } - } - - // Send "quit" message to the reader goroutine: - w.done <- true - - return nil -} - -// Add starts watching the named file or directory (non-recursively). -func (w *Watcher) Add(name string) error { - w.mu.Lock() - w.externalWatches[name] = true - w.mu.Unlock() - return w.addWatch(name, noteAllEvents) -} - -// Remove stops watching the the named file or directory (non-recursively). -func (w *Watcher) Remove(name string) error { - name = filepath.Clean(name) - w.mu.Lock() - watchfd, ok := w.watches[name] - w.mu.Unlock() - if !ok { - return fmt.Errorf("can't remove non-existent kevent watch for: %s", name) - } - - const registerRemove = syscall.EV_DELETE - if err := register(w.kq, []int{watchfd}, registerRemove, 0); err != nil { - return err - } - - syscall.Close(watchfd) - - w.mu.Lock() - isDir := w.paths[watchfd].isDir - delete(w.watches, name) - delete(w.paths, watchfd) - delete(w.dirFlags, name) - w.mu.Unlock() - - // Find all watched paths that are in this directory that are not external. - if isDir { - var pathsToRemove []string - w.mu.Lock() - for _, path := range w.paths { - wdir, _ := filepath.Split(path.name) - if filepath.Clean(wdir) == name { - if !w.externalWatches[path.name] { - pathsToRemove = append(pathsToRemove, path.name) - } - } - } - w.mu.Unlock() - for _, name := range pathsToRemove { - // Since these are internal, not much sense in propagating error - // to the user, as that will just confuse them with an error about - // a path they did not explicitly watch themselves. - w.Remove(name) - } - } - - return nil -} - -// Watch all events (except NOTE_EXTEND, NOTE_LINK, NOTE_REVOKE) -const noteAllEvents = syscall.NOTE_DELETE | syscall.NOTE_WRITE | syscall.NOTE_ATTRIB | syscall.NOTE_RENAME - -// keventWaitTime to block on each read from kevent -var keventWaitTime = durationToTimespec(100 * time.Millisecond) - -// addWatch adds name to the watched file set. -// The flags are interpreted as described in kevent(2). -func (w *Watcher) addWatch(name string, flags uint32) error { - var isDir bool - // Make ./name and name equivalent - name = filepath.Clean(name) - - w.mu.Lock() - if w.isClosed { - w.mu.Unlock() - return errors.New("kevent instance already closed") - } - watchfd, alreadyWatching := w.watches[name] - // We already have a watch, but we can still override flags. - if alreadyWatching { - isDir = w.paths[watchfd].isDir - } - w.mu.Unlock() - - if !alreadyWatching { - fi, err := os.Lstat(name) - if err != nil { - return err - } - - // Don't watch sockets. - if fi.Mode()&os.ModeSocket == os.ModeSocket { - return nil - } - - // Follow Symlinks - // Unfortunately, Linux can add bogus symlinks to watch list without - // issue, and Windows can't do symlinks period (AFAIK). To maintain - // consistency, we will act like everything is fine. There will simply - // be no file events for broken symlinks. - // Hence the returns of nil on errors. - if fi.Mode()&os.ModeSymlink == os.ModeSymlink { - name, err = filepath.EvalSymlinks(name) - if err != nil { - return nil - } - - fi, err = os.Lstat(name) - if err != nil { - return nil - } - } - - watchfd, err = syscall.Open(name, openMode, 0700) - if watchfd == -1 { - return err - } - - isDir = fi.IsDir() - } - - const registerAdd = syscall.EV_ADD | syscall.EV_CLEAR | syscall.EV_ENABLE - if err := register(w.kq, []int{watchfd}, registerAdd, flags); err != nil { - syscall.Close(watchfd) - return err - } - - if !alreadyWatching { - w.mu.Lock() - w.watches[name] = watchfd - w.paths[watchfd] = pathInfo{name: name, isDir: isDir} - w.mu.Unlock() - } - - if isDir { - // Watch the directory if it has not been watched before, - // or if it was watched before, but perhaps only a NOTE_DELETE (watchDirectoryFiles) - w.mu.Lock() - watchDir := (flags&syscall.NOTE_WRITE) == syscall.NOTE_WRITE && - (!alreadyWatching || (w.dirFlags[name]&syscall.NOTE_WRITE) != syscall.NOTE_WRITE) - // Store flags so this watch can be updated later - w.dirFlags[name] = flags - w.mu.Unlock() - - if watchDir { - if err := w.watchDirectoryFiles(name); err != nil { - return err - } - } - } - return nil -} - -// readEvents reads from kqueue and converts the received kevents into -// Event values that it sends down the Events channel. -func (w *Watcher) readEvents() { - eventBuffer := make([]syscall.Kevent_t, 10) - - for { - // See if there is a message on the "done" channel - select { - case <-w.done: - err := syscall.Close(w.kq) - if err != nil { - w.Errors <- err - } - close(w.Events) - close(w.Errors) - return - default: - } - - // Get new events - kevents, err := read(w.kq, eventBuffer, &keventWaitTime) - // EINTR is okay, the syscall was interrupted before timeout expired. - if err != nil && err != syscall.EINTR { - w.Errors <- err - continue - } - - // Flush the events we received to the Events channel - for len(kevents) > 0 { - kevent := &kevents[0] - watchfd := int(kevent.Ident) - mask := uint32(kevent.Fflags) - w.mu.Lock() - path := w.paths[watchfd] - w.mu.Unlock() - event := newEvent(path.name, mask) - - if path.isDir && !(event.Op&Remove == Remove) { - // Double check to make sure the directory exists. This can happen when - // we do a rm -fr on a recursively watched folders and we receive a - // modification event first but the folder has been deleted and later - // receive the delete event - if _, err := os.Lstat(event.Name); os.IsNotExist(err) { - // mark is as delete event - event.Op |= Remove - } - } - - if event.Op&Rename == Rename || event.Op&Remove == Remove { - w.Remove(event.Name) - w.mu.Lock() - delete(w.fileExists, event.Name) - w.mu.Unlock() - } - - if path.isDir && event.Op&Write == Write && !(event.Op&Remove == Remove) { - w.sendDirectoryChangeEvents(event.Name) - } else { - // Send the event on the Events channel - w.Events <- event - } - - if event.Op&Remove == Remove { - // Look for a file that may have overwritten this. - // For example, mv f1 f2 will delete f2, then create f2. - fileDir, _ := filepath.Split(event.Name) - fileDir = filepath.Clean(fileDir) - w.mu.Lock() - _, found := w.watches[fileDir] - w.mu.Unlock() - if found { - // make sure the directory exists before we watch for changes. When we - // do a recursive watch and perform rm -fr, the parent directory might - // have gone missing, ignore the missing directory and let the - // upcoming delete event remove the watch from the parent directory. - if _, err := os.Lstat(fileDir); os.IsExist(err) { - w.sendDirectoryChangeEvents(fileDir) - // FIXME: should this be for events on files or just isDir? - } - } - } - - // Move to next event - kevents = kevents[1:] - } - } -} - -// newEvent returns an platform-independent Event based on kqueue Fflags. -func newEvent(name string, mask uint32) Event { - e := Event{Name: name} - if mask&syscall.NOTE_DELETE == syscall.NOTE_DELETE { - e.Op |= Remove - } - if mask&syscall.NOTE_WRITE == syscall.NOTE_WRITE { - e.Op |= Write - } - if mask&syscall.NOTE_RENAME == syscall.NOTE_RENAME { - e.Op |= Rename - } - if mask&syscall.NOTE_ATTRIB == syscall.NOTE_ATTRIB { - e.Op |= Chmod - } - return e -} - -func newCreateEvent(name string) Event { - return Event{Name: name, Op: Create} -} - -// watchDirectoryFiles to mimic inotify when adding a watch on a directory -func (w *Watcher) watchDirectoryFiles(dirPath string) error { - // Get all files - files, err := ioutil.ReadDir(dirPath) - if err != nil { - return err - } - - for _, fileInfo := range files { - filePath := filepath.Join(dirPath, fileInfo.Name()) - if err := w.internalWatch(filePath, fileInfo); err != nil { - return err - } - - w.mu.Lock() - w.fileExists[filePath] = true - w.mu.Unlock() - } - - return nil -} - -// sendDirectoryEvents searches the directory for newly created files -// and sends them over the event channel. This functionality is to have -// the BSD version of fsnotify match Linux inotify which provides a -// create event for files created in a watched directory. -func (w *Watcher) sendDirectoryChangeEvents(dirPath string) { - // Get all files - files, err := ioutil.ReadDir(dirPath) - if err != nil { - w.Errors <- err - } - - // Search for new files - for _, fileInfo := range files { - filePath := filepath.Join(dirPath, fileInfo.Name()) - w.mu.Lock() - _, doesExist := w.fileExists[filePath] - w.mu.Unlock() - if !doesExist { - // Send create event - w.Events <- newCreateEvent(filePath) - } - - // like watchDirectoryFiles (but without doing another ReadDir) - if err := w.internalWatch(filePath, fileInfo); err != nil { - return - } - - w.mu.Lock() - w.fileExists[filePath] = true - w.mu.Unlock() - } -} - -func (w *Watcher) internalWatch(name string, fileInfo os.FileInfo) error { - if fileInfo.IsDir() { - // mimic Linux providing delete events for subdirectories - // but preserve the flags used if currently watching subdirectory - w.mu.Lock() - flags := w.dirFlags[name] - w.mu.Unlock() - - flags |= syscall.NOTE_DELETE - return w.addWatch(name, flags) - } - - // watch file to mimic Linux inotify - return w.addWatch(name, noteAllEvents) -} - -// kqueue creates a new kernel event queue and returns a descriptor. -func kqueue() (kq int, err error) { - kq, err = syscall.Kqueue() - if kq == -1 { - return kq, err - } - return kq, nil -} - -// register events with the queue -func register(kq int, fds []int, flags int, fflags uint32) error { - changes := make([]syscall.Kevent_t, len(fds)) - - for i, fd := range fds { - // SetKevent converts int to the platform-specific types: - syscall.SetKevent(&changes[i], fd, syscall.EVFILT_VNODE, flags) - changes[i].Fflags = fflags - } - - // register the events - success, err := syscall.Kevent(kq, changes, nil, nil) - if success == -1 { - return err - } - return nil -} - -// read retrieves pending events, or waits until an event occurs. -// A timeout of nil blocks indefinitely, while 0 polls the queue. -func read(kq int, events []syscall.Kevent_t, timeout *syscall.Timespec) ([]syscall.Kevent_t, error) { - n, err := syscall.Kevent(kq, nil, events, timeout) - if err != nil { - return nil, err - } - return events[0:n], nil -} - -// durationToTimespec prepares a timeout value -func durationToTimespec(d time.Duration) syscall.Timespec { - return syscall.NsecToTimespec(d.Nanoseconds()) -} diff --git a/Godeps/_workspace/src/gopkg.in/fsnotify.v1/open_mode_bsd.go b/Godeps/_workspace/src/gopkg.in/fsnotify.v1/open_mode_bsd.go deleted file mode 100644 index c57ccb427b..0000000000 --- a/Godeps/_workspace/src/gopkg.in/fsnotify.v1/open_mode_bsd.go +++ /dev/null @@ -1,11 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build freebsd openbsd netbsd dragonfly - -package fsnotify - -import "syscall" - -const openMode = syscall.O_NONBLOCK | syscall.O_RDONLY diff --git a/Godeps/_workspace/src/gopkg.in/fsnotify.v1/open_mode_darwin.go b/Godeps/_workspace/src/gopkg.in/fsnotify.v1/open_mode_darwin.go deleted file mode 100644 index 174b2c331f..0000000000 --- a/Godeps/_workspace/src/gopkg.in/fsnotify.v1/open_mode_darwin.go +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build darwin - -package fsnotify - -import "syscall" - -// note: this constant is not defined on BSD -const openMode = syscall.O_EVTONLY diff --git a/Godeps/_workspace/src/gopkg.in/fsnotify.v1/windows.go b/Godeps/_workspace/src/gopkg.in/fsnotify.v1/windows.go deleted file mode 100644 index 811585227d..0000000000 --- a/Godeps/_workspace/src/gopkg.in/fsnotify.v1/windows.go +++ /dev/null @@ -1,561 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build windows - -package fsnotify - -import ( - "errors" - "fmt" - "os" - "path/filepath" - "runtime" - "sync" - "syscall" - "unsafe" -) - -// Watcher watches a set of files, delivering events to a channel. -type Watcher struct { - Events chan Event - Errors chan error - isClosed bool // Set to true when Close() is first called - mu sync.Mutex // Map access - port syscall.Handle // Handle to completion port - watches watchMap // Map of watches (key: i-number) - input chan *input // Inputs to the reader are sent on this channel - quit chan chan<- error -} - -// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events. -func NewWatcher() (*Watcher, error) { - port, e := syscall.CreateIoCompletionPort(syscall.InvalidHandle, 0, 0, 0) - if e != nil { - return nil, os.NewSyscallError("CreateIoCompletionPort", e) - } - w := &Watcher{ - port: port, - watches: make(watchMap), - input: make(chan *input, 1), - Events: make(chan Event, 50), - Errors: make(chan error), - quit: make(chan chan<- error, 1), - } - go w.readEvents() - return w, nil -} - -// Close removes all watches and closes the events channel. -func (w *Watcher) Close() error { - if w.isClosed { - return nil - } - w.isClosed = true - - // Send "quit" message to the reader goroutine - ch := make(chan error) - w.quit <- ch - if err := w.wakeupReader(); err != nil { - return err - } - return <-ch -} - -// Add starts watching the named file or directory (non-recursively). -func (w *Watcher) Add(name string) error { - if w.isClosed { - return errors.New("watcher already closed") - } - in := &input{ - op: opAddWatch, - path: filepath.Clean(name), - flags: sys_FS_ALL_EVENTS, - reply: make(chan error), - } - w.input <- in - if err := w.wakeupReader(); err != nil { - return err - } - return <-in.reply -} - -// Remove stops watching the the named file or directory (non-recursively). -func (w *Watcher) Remove(name string) error { - in := &input{ - op: opRemoveWatch, - path: filepath.Clean(name), - reply: make(chan error), - } - w.input <- in - if err := w.wakeupReader(); err != nil { - return err - } - return <-in.reply -} - -const ( - // Options for AddWatch - sys_FS_ONESHOT = 0x80000000 - sys_FS_ONLYDIR = 0x1000000 - - // Events - sys_FS_ACCESS = 0x1 - sys_FS_ALL_EVENTS = 0xfff - sys_FS_ATTRIB = 0x4 - sys_FS_CLOSE = 0x18 - sys_FS_CREATE = 0x100 - sys_FS_DELETE = 0x200 - sys_FS_DELETE_SELF = 0x400 - sys_FS_MODIFY = 0x2 - sys_FS_MOVE = 0xc0 - sys_FS_MOVED_FROM = 0x40 - sys_FS_MOVED_TO = 0x80 - sys_FS_MOVE_SELF = 0x800 - - // Special events - sys_FS_IGNORED = 0x8000 - sys_FS_Q_OVERFLOW = 0x4000 -) - -func newEvent(name string, mask uint32) Event { - e := Event{Name: name} - if mask&sys_FS_CREATE == sys_FS_CREATE || mask&sys_FS_MOVED_TO == sys_FS_MOVED_TO { - e.Op |= Create - } - if mask&sys_FS_DELETE == sys_FS_DELETE || mask&sys_FS_DELETE_SELF == sys_FS_DELETE_SELF { - e.Op |= Remove - } - if mask&sys_FS_MODIFY == sys_FS_MODIFY { - e.Op |= Write - } - if mask&sys_FS_MOVE == sys_FS_MOVE || mask&sys_FS_MOVE_SELF == sys_FS_MOVE_SELF || mask&sys_FS_MOVED_FROM == sys_FS_MOVED_FROM { - e.Op |= Rename - } - if mask&sys_FS_ATTRIB == sys_FS_ATTRIB { - e.Op |= Chmod - } - return e -} - -const ( - opAddWatch = iota - opRemoveWatch -) - -const ( - provisional uint64 = 1 << (32 + iota) -) - -type input struct { - op int - path string - flags uint32 - reply chan error -} - -type inode struct { - handle syscall.Handle - volume uint32 - index uint64 -} - -type watch struct { - ov syscall.Overlapped - ino *inode // i-number - path string // Directory path - mask uint64 // Directory itself is being watched with these notify flags - names map[string]uint64 // Map of names being watched and their notify flags - rename string // Remembers the old name while renaming a file - buf [4096]byte -} - -type indexMap map[uint64]*watch -type watchMap map[uint32]indexMap - -func (w *Watcher) wakeupReader() error { - e := syscall.PostQueuedCompletionStatus(w.port, 0, 0, nil) - if e != nil { - return os.NewSyscallError("PostQueuedCompletionStatus", e) - } - return nil -} - -func getDir(pathname string) (dir string, err error) { - attr, e := syscall.GetFileAttributes(syscall.StringToUTF16Ptr(pathname)) - if e != nil { - return "", os.NewSyscallError("GetFileAttributes", e) - } - if attr&syscall.FILE_ATTRIBUTE_DIRECTORY != 0 { - dir = pathname - } else { - dir, _ = filepath.Split(pathname) - dir = filepath.Clean(dir) - } - return -} - -func getIno(path string) (ino *inode, err error) { - h, e := syscall.CreateFile(syscall.StringToUTF16Ptr(path), - syscall.FILE_LIST_DIRECTORY, - syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE, - nil, syscall.OPEN_EXISTING, - syscall.FILE_FLAG_BACKUP_SEMANTICS|syscall.FILE_FLAG_OVERLAPPED, 0) - if e != nil { - return nil, os.NewSyscallError("CreateFile", e) - } - var fi syscall.ByHandleFileInformation - if e = syscall.GetFileInformationByHandle(h, &fi); e != nil { - syscall.CloseHandle(h) - return nil, os.NewSyscallError("GetFileInformationByHandle", e) - } - ino = &inode{ - handle: h, - volume: fi.VolumeSerialNumber, - index: uint64(fi.FileIndexHigh)<<32 | uint64(fi.FileIndexLow), - } - return ino, nil -} - -// Must run within the I/O thread. -func (m watchMap) get(ino *inode) *watch { - if i := m[ino.volume]; i != nil { - return i[ino.index] - } - return nil -} - -// Must run within the I/O thread. -func (m watchMap) set(ino *inode, watch *watch) { - i := m[ino.volume] - if i == nil { - i = make(indexMap) - m[ino.volume] = i - } - i[ino.index] = watch -} - -// Must run within the I/O thread. -func (w *Watcher) addWatch(pathname string, flags uint64) error { - dir, err := getDir(pathname) - if err != nil { - return err - } - if flags&sys_FS_ONLYDIR != 0 && pathname != dir { - return nil - } - ino, err := getIno(dir) - if err != nil { - return err - } - w.mu.Lock() - watchEntry := w.watches.get(ino) - w.mu.Unlock() - if watchEntry == nil { - if _, e := syscall.CreateIoCompletionPort(ino.handle, w.port, 0, 0); e != nil { - syscall.CloseHandle(ino.handle) - return os.NewSyscallError("CreateIoCompletionPort", e) - } - watchEntry = &watch{ - ino: ino, - path: dir, - names: make(map[string]uint64), - } - w.mu.Lock() - w.watches.set(ino, watchEntry) - w.mu.Unlock() - flags |= provisional - } else { - syscall.CloseHandle(ino.handle) - } - if pathname == dir { - watchEntry.mask |= flags - } else { - watchEntry.names[filepath.Base(pathname)] |= flags - } - if err = w.startRead(watchEntry); err != nil { - return err - } - if pathname == dir { - watchEntry.mask &= ^provisional - } else { - watchEntry.names[filepath.Base(pathname)] &= ^provisional - } - return nil -} - -// Must run within the I/O thread. -func (w *Watcher) remWatch(pathname string) error { - dir, err := getDir(pathname) - if err != nil { - return err - } - ino, err := getIno(dir) - if err != nil { - return err - } - w.mu.Lock() - watch := w.watches.get(ino) - w.mu.Unlock() - if watch == nil { - return fmt.Errorf("can't remove non-existent watch for: %s", pathname) - } - if pathname == dir { - w.sendEvent(watch.path, watch.mask&sys_FS_IGNORED) - watch.mask = 0 - } else { - name := filepath.Base(pathname) - w.sendEvent(watch.path+"\\"+name, watch.names[name]&sys_FS_IGNORED) - delete(watch.names, name) - } - return w.startRead(watch) -} - -// Must run within the I/O thread. -func (w *Watcher) deleteWatch(watch *watch) { - for name, mask := range watch.names { - if mask&provisional == 0 { - w.sendEvent(watch.path+"\\"+name, mask&sys_FS_IGNORED) - } - delete(watch.names, name) - } - if watch.mask != 0 { - if watch.mask&provisional == 0 { - w.sendEvent(watch.path, watch.mask&sys_FS_IGNORED) - } - watch.mask = 0 - } -} - -// Must run within the I/O thread. -func (w *Watcher) startRead(watch *watch) error { - if e := syscall.CancelIo(watch.ino.handle); e != nil { - w.Errors <- os.NewSyscallError("CancelIo", e) - w.deleteWatch(watch) - } - mask := toWindowsFlags(watch.mask) - for _, m := range watch.names { - mask |= toWindowsFlags(m) - } - if mask == 0 { - if e := syscall.CloseHandle(watch.ino.handle); e != nil { - w.Errors <- os.NewSyscallError("CloseHandle", e) - } - w.mu.Lock() - delete(w.watches[watch.ino.volume], watch.ino.index) - w.mu.Unlock() - return nil - } - e := syscall.ReadDirectoryChanges(watch.ino.handle, &watch.buf[0], - uint32(unsafe.Sizeof(watch.buf)), false, mask, nil, &watch.ov, 0) - if e != nil { - err := os.NewSyscallError("ReadDirectoryChanges", e) - if e == syscall.ERROR_ACCESS_DENIED && watch.mask&provisional == 0 { - // Watched directory was probably removed - if w.sendEvent(watch.path, watch.mask&sys_FS_DELETE_SELF) { - if watch.mask&sys_FS_ONESHOT != 0 { - watch.mask = 0 - } - } - err = nil - } - w.deleteWatch(watch) - w.startRead(watch) - return err - } - return nil -} - -// readEvents reads from the I/O completion port, converts the -// received events into Event objects and sends them via the Events channel. -// Entry point to the I/O thread. -func (w *Watcher) readEvents() { - var ( - n, key uint32 - ov *syscall.Overlapped - ) - runtime.LockOSThread() - - for { - e := syscall.GetQueuedCompletionStatus(w.port, &n, &key, &ov, syscall.INFINITE) - watch := (*watch)(unsafe.Pointer(ov)) - - if watch == nil { - select { - case ch := <-w.quit: - w.mu.Lock() - var indexes []indexMap - for _, index := range w.watches { - indexes = append(indexes, index) - } - w.mu.Unlock() - for _, index := range indexes { - for _, watch := range index { - w.deleteWatch(watch) - w.startRead(watch) - } - } - var err error - if e := syscall.CloseHandle(w.port); e != nil { - err = os.NewSyscallError("CloseHandle", e) - } - close(w.Events) - close(w.Errors) - ch <- err - return - case in := <-w.input: - switch in.op { - case opAddWatch: - in.reply <- w.addWatch(in.path, uint64(in.flags)) - case opRemoveWatch: - in.reply <- w.remWatch(in.path) - } - default: - } - continue - } - - switch e { - case syscall.ERROR_MORE_DATA: - if watch == nil { - w.Errors <- errors.New("ERROR_MORE_DATA has unexpectedly null lpOverlapped buffer") - } else { - // The i/o succeeded but the buffer is full. - // In theory we should be building up a full packet. - // In practice we can get away with just carrying on. - n = uint32(unsafe.Sizeof(watch.buf)) - } - case syscall.ERROR_ACCESS_DENIED: - // Watched directory was probably removed - w.sendEvent(watch.path, watch.mask&sys_FS_DELETE_SELF) - w.deleteWatch(watch) - w.startRead(watch) - continue - case syscall.ERROR_OPERATION_ABORTED: - // CancelIo was called on this handle - continue - default: - w.Errors <- os.NewSyscallError("GetQueuedCompletionPort", e) - continue - case nil: - } - - var offset uint32 - for { - if n == 0 { - w.Events <- newEvent("", sys_FS_Q_OVERFLOW) - w.Errors <- errors.New("short read in readEvents()") - break - } - - // Point "raw" to the event in the buffer - raw := (*syscall.FileNotifyInformation)(unsafe.Pointer(&watch.buf[offset])) - buf := (*[syscall.MAX_PATH]uint16)(unsafe.Pointer(&raw.FileName)) - name := syscall.UTF16ToString(buf[:raw.FileNameLength/2]) - fullname := watch.path + "\\" + name - - var mask uint64 - switch raw.Action { - case syscall.FILE_ACTION_REMOVED: - mask = sys_FS_DELETE_SELF - case syscall.FILE_ACTION_MODIFIED: - mask = sys_FS_MODIFY - case syscall.FILE_ACTION_RENAMED_OLD_NAME: - watch.rename = name - case syscall.FILE_ACTION_RENAMED_NEW_NAME: - if watch.names[watch.rename] != 0 { - watch.names[name] |= watch.names[watch.rename] - delete(watch.names, watch.rename) - mask = sys_FS_MOVE_SELF - } - } - - sendNameEvent := func() { - if w.sendEvent(fullname, watch.names[name]&mask) { - if watch.names[name]&sys_FS_ONESHOT != 0 { - delete(watch.names, name) - } - } - } - if raw.Action != syscall.FILE_ACTION_RENAMED_NEW_NAME { - sendNameEvent() - } - if raw.Action == syscall.FILE_ACTION_REMOVED { - w.sendEvent(fullname, watch.names[name]&sys_FS_IGNORED) - delete(watch.names, name) - } - if w.sendEvent(fullname, watch.mask&toFSnotifyFlags(raw.Action)) { - if watch.mask&sys_FS_ONESHOT != 0 { - watch.mask = 0 - } - } - if raw.Action == syscall.FILE_ACTION_RENAMED_NEW_NAME { - fullname = watch.path + "\\" + watch.rename - sendNameEvent() - } - - // Move to the next event in the buffer - if raw.NextEntryOffset == 0 { - break - } - offset += raw.NextEntryOffset - - // Error! - if offset >= n { - w.Errors <- errors.New("Windows system assumed buffer larger than it is, events have likely been missed.") - break - } - } - - if err := w.startRead(watch); err != nil { - w.Errors <- err - } - } -} - -func (w *Watcher) sendEvent(name string, mask uint64) bool { - if mask == 0 { - return false - } - event := newEvent(name, uint32(mask)) - select { - case ch := <-w.quit: - w.quit <- ch - case w.Events <- event: - } - return true -} - -func toWindowsFlags(mask uint64) uint32 { - var m uint32 - if mask&sys_FS_ACCESS != 0 { - m |= syscall.FILE_NOTIFY_CHANGE_LAST_ACCESS - } - if mask&sys_FS_MODIFY != 0 { - m |= syscall.FILE_NOTIFY_CHANGE_LAST_WRITE - } - if mask&sys_FS_ATTRIB != 0 { - m |= syscall.FILE_NOTIFY_CHANGE_ATTRIBUTES - } - if mask&(sys_FS_MOVE|sys_FS_CREATE|sys_FS_DELETE) != 0 { - m |= syscall.FILE_NOTIFY_CHANGE_FILE_NAME | syscall.FILE_NOTIFY_CHANGE_DIR_NAME - } - return m -} - -func toFSnotifyFlags(action uint32) uint64 { - switch action { - case syscall.FILE_ACTION_ADDED: - return sys_FS_CREATE - case syscall.FILE_ACTION_REMOVED: - return sys_FS_DELETE - case syscall.FILE_ACTION_MODIFIED: - return sys_FS_MODIFY - case syscall.FILE_ACTION_RENAMED_OLD_NAME: - return sys_FS_MOVED_FROM - case syscall.FILE_ACTION_RENAMED_NEW_NAME: - return sys_FS_MOVED_TO - } - return 0 -} diff --git a/Godeps/_workspace/src/gopkg.in/yaml.v2/LICENSE b/Godeps/_workspace/src/gopkg.in/yaml.v2/LICENSE deleted file mode 100644 index a68e67f01b..0000000000 --- a/Godeps/_workspace/src/gopkg.in/yaml.v2/LICENSE +++ /dev/null @@ -1,188 +0,0 @@ - -Copyright (c) 2011-2014 - Canonical Inc. - -This software is licensed under the LGPLv3, included below. - -As a special exception to the GNU Lesser General Public License version 3 -("LGPL3"), the copyright holders of this Library give you permission to -convey to a third party a Combined Work that links statically or dynamically -to this Library without providing any Minimal Corresponding Source or -Minimal Application Code as set out in 4d or providing the installation -information set out in section 4e, provided that you comply with the other -provisions of LGPL3 and provided that you meet, for the Application the -terms and conditions of the license(s) which apply to the Application. - -Except as stated in this special exception, the provisions of LGPL3 will -continue to comply in full to this Library. If you modify this Library, you -may apply this exception to your version of this Library, but you are not -obliged to do so. If you do not wish to do so, delete this exception -statement from your version. This exception does not (and cannot) modify any -license terms which apply to the Application, with which you must still -comply. - - - GNU LESSER GENERAL PUBLIC LICENSE - Version 3, 29 June 2007 - - Copyright (C) 2007 Free Software Foundation, Inc. - Everyone is permitted to copy and distribute verbatim copies - of this license document, but changing it is not allowed. - - - This version of the GNU Lesser General Public License incorporates -the terms and conditions of version 3 of the GNU General Public -License, supplemented by the additional permissions listed below. - - 0. Additional Definitions. - - As used herein, "this License" refers to version 3 of the GNU Lesser -General Public License, and the "GNU GPL" refers to version 3 of the GNU -General Public License. - - "The Library" refers to a covered work governed by this License, -other than an Application or a Combined Work as defined below. - - An "Application" is any work that makes use of an interface provided -by the Library, but which is not otherwise based on the Library. -Defining a subclass of a class defined by the Library is deemed a mode -of using an interface provided by the Library. - - A "Combined Work" is a work produced by combining or linking an -Application with the Library. The particular version of the Library -with which the Combined Work was made is also called the "Linked -Version". - - The "Minimal Corresponding Source" for a Combined Work means the -Corresponding Source for the Combined Work, excluding any source code -for portions of the Combined Work that, considered in isolation, are -based on the Application, and not on the Linked Version. - - The "Corresponding Application Code" for a Combined Work means the -object code and/or source code for the Application, including any data -and utility programs needed for reproducing the Combined Work from the -Application, but excluding the System Libraries of the Combined Work. - - 1. Exception to Section 3 of the GNU GPL. - - You may convey a covered work under sections 3 and 4 of this License -without being bound by section 3 of the GNU GPL. - - 2. Conveying Modified Versions. - - If you modify a copy of the Library, and, in your modifications, a -facility refers to a function or data to be supplied by an Application -that uses the facility (other than as an argument passed when the -facility is invoked), then you may convey a copy of the modified -version: - - a) under this License, provided that you make a good faith effort to - ensure that, in the event an Application does not supply the - function or data, the facility still operates, and performs - whatever part of its purpose remains meaningful, or - - b) under the GNU GPL, with none of the additional permissions of - this License applicable to that copy. - - 3. Object Code Incorporating Material from Library Header Files. - - The object code form of an Application may incorporate material from -a header file that is part of the Library. You may convey such object -code under terms of your choice, provided that, if the incorporated -material is not limited to numerical parameters, data structure -layouts and accessors, or small macros, inline functions and templates -(ten or fewer lines in length), you do both of the following: - - a) Give prominent notice with each copy of the object code that the - Library is used in it and that the Library and its use are - covered by this License. - - b) Accompany the object code with a copy of the GNU GPL and this license - document. - - 4. Combined Works. - - You may convey a Combined Work under terms of your choice that, -taken together, effectively do not restrict modification of the -portions of the Library contained in the Combined Work and reverse -engineering for debugging such modifications, if you also do each of -the following: - - a) Give prominent notice with each copy of the Combined Work that - the Library is used in it and that the Library and its use are - covered by this License. - - b) Accompany the Combined Work with a copy of the GNU GPL and this license - document. - - c) For a Combined Work that displays copyright notices during - execution, include the copyright notice for the Library among - these notices, as well as a reference directing the user to the - copies of the GNU GPL and this license document. - - d) Do one of the following: - - 0) Convey the Minimal Corresponding Source under the terms of this - License, and the Corresponding Application Code in a form - suitable for, and under terms that permit, the user to - recombine or relink the Application with a modified version of - the Linked Version to produce a modified Combined Work, in the - manner specified by section 6 of the GNU GPL for conveying - Corresponding Source. - - 1) Use a suitable shared library mechanism for linking with the - Library. A suitable mechanism is one that (a) uses at run time - a copy of the Library already present on the user's computer - system, and (b) will operate properly with a modified version - of the Library that is interface-compatible with the Linked - Version. - - e) Provide Installation Information, but only if you would otherwise - be required to provide such information under section 6 of the - GNU GPL, and only to the extent that such information is - necessary to install and execute a modified version of the - Combined Work produced by recombining or relinking the - Application with a modified version of the Linked Version. (If - you use option 4d0, the Installation Information must accompany - the Minimal Corresponding Source and Corresponding Application - Code. If you use option 4d1, you must provide the Installation - Information in the manner specified by section 6 of the GNU GPL - for conveying Corresponding Source.) - - 5. Combined Libraries. - - You may place library facilities that are a work based on the -Library side by side in a single library together with other library -facilities that are not Applications and are not covered by this -License, and convey such a combined library under terms of your -choice, if you do both of the following: - - a) Accompany the combined library with a copy of the same work based - on the Library, uncombined with any other library facilities, - conveyed under the terms of this License. - - b) Give prominent notice with the combined library that part of it - is a work based on the Library, and explaining where to find the - accompanying uncombined form of the same work. - - 6. Revised Versions of the GNU Lesser General Public License. - - The Free Software Foundation may publish revised and/or new versions -of the GNU Lesser General Public License from time to time. Such new -versions will be similar in spirit to the present version, but may -differ in detail to address new problems or concerns. - - Each version is given a distinguishing version number. If the -Library as you received it specifies that a certain numbered version -of the GNU Lesser General Public License "or any later version" -applies to it, you have the option of following the terms and -conditions either of that published version or of any later version -published by the Free Software Foundation. If the Library as you -received it does not specify a version number of the GNU Lesser -General Public License, you may choose any version of the GNU Lesser -General Public License ever published by the Free Software Foundation. - - If the Library as you received it specifies that a proxy can decide -whether future versions of the GNU Lesser General Public License shall -apply, that proxy's public statement of acceptance of any version is -permanent authorization for you to choose that version for the -Library. diff --git a/Godeps/_workspace/src/gopkg.in/yaml.v2/LICENSE.libyaml b/Godeps/_workspace/src/gopkg.in/yaml.v2/LICENSE.libyaml deleted file mode 100644 index 8da58fbf6f..0000000000 --- a/Godeps/_workspace/src/gopkg.in/yaml.v2/LICENSE.libyaml +++ /dev/null @@ -1,31 +0,0 @@ -The following files were ported to Go from C files of libyaml, and thus -are still covered by their original copyright and license: - - apic.go - emitterc.go - parserc.go - readerc.go - scannerc.go - writerc.go - yamlh.go - yamlprivateh.go - -Copyright (c) 2006 Kirill Simonov - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies -of the Software, and to permit persons to whom the Software is furnished to do -so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/Godeps/_workspace/src/gopkg.in/yaml.v2/README.md b/Godeps/_workspace/src/gopkg.in/yaml.v2/README.md deleted file mode 100644 index d6c919e607..0000000000 --- a/Godeps/_workspace/src/gopkg.in/yaml.v2/README.md +++ /dev/null @@ -1,128 +0,0 @@ -# YAML support for the Go language - -Introduction ------------- - -The yaml package enables Go programs to comfortably encode and decode YAML -values. It was developed within [Canonical](https://www.canonical.com) as -part of the [juju](https://juju.ubuntu.com) project, and is based on a -pure Go port of the well-known [libyaml](http://pyyaml.org/wiki/LibYAML) -C library to parse and generate YAML data quickly and reliably. - -Compatibility -------------- - -The yaml package supports most of YAML 1.1 and 1.2, including support for -anchors, tags, map merging, etc. Multi-document unmarshalling is not yet -implemented, and base-60 floats from YAML 1.1 are purposefully not -supported since they're a poor design and are gone in YAML 1.2. - -Installation and usage ----------------------- - -The import path for the package is *gopkg.in/yaml.v2*. - -To install it, run: - - go get gopkg.in/yaml.v2 - -API documentation ------------------ - -If opened in a browser, the import path itself leads to the API documentation: - - * [https://gopkg.in/yaml.v2](https://gopkg.in/yaml.v2) - -API stability -------------- - -The package API for yaml v2 will remain stable as described in [gopkg.in](https://gopkg.in). - - -License -------- - -The yaml package is licensed under the LGPL with an exception that allows it to be linked statically. Please see the LICENSE file for details. - - -Example -------- - -```Go -package main - -import ( - "fmt" - "log" - - "gopkg.in/yaml.v2" -) - -var data = ` -a: Easy! -b: - c: 2 - d: [3, 4] -` - -type T struct { - A string - B struct{C int; D []int ",flow"} -} - -func main() { - t := T{} - - err := yaml.Unmarshal([]byte(data), &t) - if err != nil { - log.Fatalf("error: %v", err) - } - fmt.Printf("--- t:\n%v\n\n", t) - - d, err := yaml.Marshal(&t) - if err != nil { - log.Fatalf("error: %v", err) - } - fmt.Printf("--- t dump:\n%s\n\n", string(d)) - - m := make(map[interface{}]interface{}) - - err = yaml.Unmarshal([]byte(data), &m) - if err != nil { - log.Fatalf("error: %v", err) - } - fmt.Printf("--- m:\n%v\n\n", m) - - d, err = yaml.Marshal(&m) - if err != nil { - log.Fatalf("error: %v", err) - } - fmt.Printf("--- m dump:\n%s\n\n", string(d)) -} -``` - -This example will generate the following output: - -``` ---- t: -{Easy! {2 [3 4]}} - ---- t dump: -a: Easy! -b: - c: 2 - d: [3, 4] - - ---- m: -map[a:Easy! b:map[c:2 d:[3 4]]] - ---- m dump: -a: Easy! -b: - c: 2 - d: - - 3 - - 4 -``` - diff --git a/Godeps/_workspace/src/gopkg.in/yaml.v2/apic.go b/Godeps/_workspace/src/gopkg.in/yaml.v2/apic.go deleted file mode 100644 index 95ec014e8c..0000000000 --- a/Godeps/_workspace/src/gopkg.in/yaml.v2/apic.go +++ /dev/null @@ -1,742 +0,0 @@ -package yaml - -import ( - "io" - "os" -) - -func yaml_insert_token(parser *yaml_parser_t, pos int, token *yaml_token_t) { - //fmt.Println("yaml_insert_token", "pos:", pos, "typ:", token.typ, "head:", parser.tokens_head, "len:", len(parser.tokens)) - - // Check if we can move the queue at the beginning of the buffer. - if parser.tokens_head > 0 && len(parser.tokens) == cap(parser.tokens) { - if parser.tokens_head != len(parser.tokens) { - copy(parser.tokens, parser.tokens[parser.tokens_head:]) - } - parser.tokens = parser.tokens[:len(parser.tokens)-parser.tokens_head] - parser.tokens_head = 0 - } - parser.tokens = append(parser.tokens, *token) - if pos < 0 { - return - } - copy(parser.tokens[parser.tokens_head+pos+1:], parser.tokens[parser.tokens_head+pos:]) - parser.tokens[parser.tokens_head+pos] = *token -} - -// Create a new parser object. -func yaml_parser_initialize(parser *yaml_parser_t) bool { - *parser = yaml_parser_t{ - raw_buffer: make([]byte, 0, input_raw_buffer_size), - buffer: make([]byte, 0, input_buffer_size), - } - return true -} - -// Destroy a parser object. -func yaml_parser_delete(parser *yaml_parser_t) { - *parser = yaml_parser_t{} -} - -// String read handler. -func yaml_string_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) { - if parser.input_pos == len(parser.input) { - return 0, io.EOF - } - n = copy(buffer, parser.input[parser.input_pos:]) - parser.input_pos += n - return n, nil -} - -// File read handler. -func yaml_file_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) { - return parser.input_file.Read(buffer) -} - -// Set a string input. -func yaml_parser_set_input_string(parser *yaml_parser_t, input []byte) { - if parser.read_handler != nil { - panic("must set the input source only once") - } - parser.read_handler = yaml_string_read_handler - parser.input = input - parser.input_pos = 0 -} - -// Set a file input. -func yaml_parser_set_input_file(parser *yaml_parser_t, file *os.File) { - if parser.read_handler != nil { - panic("must set the input source only once") - } - parser.read_handler = yaml_file_read_handler - parser.input_file = file -} - -// Set the source encoding. -func yaml_parser_set_encoding(parser *yaml_parser_t, encoding yaml_encoding_t) { - if parser.encoding != yaml_ANY_ENCODING { - panic("must set the encoding only once") - } - parser.encoding = encoding -} - -// Create a new emitter object. -func yaml_emitter_initialize(emitter *yaml_emitter_t) bool { - *emitter = yaml_emitter_t{ - buffer: make([]byte, output_buffer_size), - raw_buffer: make([]byte, 0, output_raw_buffer_size), - states: make([]yaml_emitter_state_t, 0, initial_stack_size), - events: make([]yaml_event_t, 0, initial_queue_size), - } - return true -} - -// Destroy an emitter object. -func yaml_emitter_delete(emitter *yaml_emitter_t) { - *emitter = yaml_emitter_t{} -} - -// String write handler. -func yaml_string_write_handler(emitter *yaml_emitter_t, buffer []byte) error { - *emitter.output_buffer = append(*emitter.output_buffer, buffer...) - return nil -} - -// File write handler. -func yaml_file_write_handler(emitter *yaml_emitter_t, buffer []byte) error { - _, err := emitter.output_file.Write(buffer) - return err -} - -// Set a string output. -func yaml_emitter_set_output_string(emitter *yaml_emitter_t, output_buffer *[]byte) { - if emitter.write_handler != nil { - panic("must set the output target only once") - } - emitter.write_handler = yaml_string_write_handler - emitter.output_buffer = output_buffer -} - -// Set a file output. -func yaml_emitter_set_output_file(emitter *yaml_emitter_t, file io.Writer) { - if emitter.write_handler != nil { - panic("must set the output target only once") - } - emitter.write_handler = yaml_file_write_handler - emitter.output_file = file -} - -// Set the output encoding. -func yaml_emitter_set_encoding(emitter *yaml_emitter_t, encoding yaml_encoding_t) { - if emitter.encoding != yaml_ANY_ENCODING { - panic("must set the output encoding only once") - } - emitter.encoding = encoding -} - -// Set the canonical output style. -func yaml_emitter_set_canonical(emitter *yaml_emitter_t, canonical bool) { - emitter.canonical = canonical -} - -//// Set the indentation increment. -func yaml_emitter_set_indent(emitter *yaml_emitter_t, indent int) { - if indent < 2 || indent > 9 { - indent = 2 - } - emitter.best_indent = indent -} - -// Set the preferred line width. -func yaml_emitter_set_width(emitter *yaml_emitter_t, width int) { - if width < 0 { - width = -1 - } - emitter.best_width = width -} - -// Set if unescaped non-ASCII characters are allowed. -func yaml_emitter_set_unicode(emitter *yaml_emitter_t, unicode bool) { - emitter.unicode = unicode -} - -// Set the preferred line break character. -func yaml_emitter_set_break(emitter *yaml_emitter_t, line_break yaml_break_t) { - emitter.line_break = line_break -} - -///* -// * Destroy a token object. -// */ -// -//YAML_DECLARE(void) -//yaml_token_delete(yaml_token_t *token) -//{ -// assert(token); // Non-NULL token object expected. -// -// switch (token.type) -// { -// case YAML_TAG_DIRECTIVE_TOKEN: -// yaml_free(token.data.tag_directive.handle); -// yaml_free(token.data.tag_directive.prefix); -// break; -// -// case YAML_ALIAS_TOKEN: -// yaml_free(token.data.alias.value); -// break; -// -// case YAML_ANCHOR_TOKEN: -// yaml_free(token.data.anchor.value); -// break; -// -// case YAML_TAG_TOKEN: -// yaml_free(token.data.tag.handle); -// yaml_free(token.data.tag.suffix); -// break; -// -// case YAML_SCALAR_TOKEN: -// yaml_free(token.data.scalar.value); -// break; -// -// default: -// break; -// } -// -// memset(token, 0, sizeof(yaml_token_t)); -//} -// -///* -// * Check if a string is a valid UTF-8 sequence. -// * -// * Check 'reader.c' for more details on UTF-8 encoding. -// */ -// -//static int -//yaml_check_utf8(yaml_char_t *start, size_t length) -//{ -// yaml_char_t *end = start+length; -// yaml_char_t *pointer = start; -// -// while (pointer < end) { -// unsigned char octet; -// unsigned int width; -// unsigned int value; -// size_t k; -// -// octet = pointer[0]; -// width = (octet & 0x80) == 0x00 ? 1 : -// (octet & 0xE0) == 0xC0 ? 2 : -// (octet & 0xF0) == 0xE0 ? 3 : -// (octet & 0xF8) == 0xF0 ? 4 : 0; -// value = (octet & 0x80) == 0x00 ? octet & 0x7F : -// (octet & 0xE0) == 0xC0 ? octet & 0x1F : -// (octet & 0xF0) == 0xE0 ? octet & 0x0F : -// (octet & 0xF8) == 0xF0 ? octet & 0x07 : 0; -// if (!width) return 0; -// if (pointer+width > end) return 0; -// for (k = 1; k < width; k ++) { -// octet = pointer[k]; -// if ((octet & 0xC0) != 0x80) return 0; -// value = (value << 6) + (octet & 0x3F); -// } -// if (!((width == 1) || -// (width == 2 && value >= 0x80) || -// (width == 3 && value >= 0x800) || -// (width == 4 && value >= 0x10000))) return 0; -// -// pointer += width; -// } -// -// return 1; -//} -// - -// Create STREAM-START. -func yaml_stream_start_event_initialize(event *yaml_event_t, encoding yaml_encoding_t) bool { - *event = yaml_event_t{ - typ: yaml_STREAM_START_EVENT, - encoding: encoding, - } - return true -} - -// Create STREAM-END. -func yaml_stream_end_event_initialize(event *yaml_event_t) bool { - *event = yaml_event_t{ - typ: yaml_STREAM_END_EVENT, - } - return true -} - -// Create DOCUMENT-START. -func yaml_document_start_event_initialize(event *yaml_event_t, version_directive *yaml_version_directive_t, - tag_directives []yaml_tag_directive_t, implicit bool) bool { - *event = yaml_event_t{ - typ: yaml_DOCUMENT_START_EVENT, - version_directive: version_directive, - tag_directives: tag_directives, - implicit: implicit, - } - return true -} - -// Create DOCUMENT-END. -func yaml_document_end_event_initialize(event *yaml_event_t, implicit bool) bool { - *event = yaml_event_t{ - typ: yaml_DOCUMENT_END_EVENT, - implicit: implicit, - } - return true -} - -///* -// * Create ALIAS. -// */ -// -//YAML_DECLARE(int) -//yaml_alias_event_initialize(event *yaml_event_t, anchor *yaml_char_t) -//{ -// mark yaml_mark_t = { 0, 0, 0 } -// anchor_copy *yaml_char_t = NULL -// -// assert(event) // Non-NULL event object is expected. -// assert(anchor) // Non-NULL anchor is expected. -// -// if (!yaml_check_utf8(anchor, strlen((char *)anchor))) return 0 -// -// anchor_copy = yaml_strdup(anchor) -// if (!anchor_copy) -// return 0 -// -// ALIAS_EVENT_INIT(*event, anchor_copy, mark, mark) -// -// return 1 -//} - -// Create SCALAR. -func yaml_scalar_event_initialize(event *yaml_event_t, anchor, tag, value []byte, plain_implicit, quoted_implicit bool, style yaml_scalar_style_t) bool { - *event = yaml_event_t{ - typ: yaml_SCALAR_EVENT, - anchor: anchor, - tag: tag, - value: value, - implicit: plain_implicit, - quoted_implicit: quoted_implicit, - style: yaml_style_t(style), - } - return true -} - -// Create SEQUENCE-START. -func yaml_sequence_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_sequence_style_t) bool { - *event = yaml_event_t{ - typ: yaml_SEQUENCE_START_EVENT, - anchor: anchor, - tag: tag, - implicit: implicit, - style: yaml_style_t(style), - } - return true -} - -// Create SEQUENCE-END. -func yaml_sequence_end_event_initialize(event *yaml_event_t) bool { - *event = yaml_event_t{ - typ: yaml_SEQUENCE_END_EVENT, - } - return true -} - -// Create MAPPING-START. -func yaml_mapping_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_mapping_style_t) bool { - *event = yaml_event_t{ - typ: yaml_MAPPING_START_EVENT, - anchor: anchor, - tag: tag, - implicit: implicit, - style: yaml_style_t(style), - } - return true -} - -// Create MAPPING-END. -func yaml_mapping_end_event_initialize(event *yaml_event_t) bool { - *event = yaml_event_t{ - typ: yaml_MAPPING_END_EVENT, - } - return true -} - -// Destroy an event object. -func yaml_event_delete(event *yaml_event_t) { - *event = yaml_event_t{} -} - -///* -// * Create a document object. -// */ -// -//YAML_DECLARE(int) -//yaml_document_initialize(document *yaml_document_t, -// version_directive *yaml_version_directive_t, -// tag_directives_start *yaml_tag_directive_t, -// tag_directives_end *yaml_tag_directive_t, -// start_implicit int, end_implicit int) -//{ -// struct { -// error yaml_error_type_t -// } context -// struct { -// start *yaml_node_t -// end *yaml_node_t -// top *yaml_node_t -// } nodes = { NULL, NULL, NULL } -// version_directive_copy *yaml_version_directive_t = NULL -// struct { -// start *yaml_tag_directive_t -// end *yaml_tag_directive_t -// top *yaml_tag_directive_t -// } tag_directives_copy = { NULL, NULL, NULL } -// value yaml_tag_directive_t = { NULL, NULL } -// mark yaml_mark_t = { 0, 0, 0 } -// -// assert(document) // Non-NULL document object is expected. -// assert((tag_directives_start && tag_directives_end) || -// (tag_directives_start == tag_directives_end)) -// // Valid tag directives are expected. -// -// if (!STACK_INIT(&context, nodes, INITIAL_STACK_SIZE)) goto error -// -// if (version_directive) { -// version_directive_copy = yaml_malloc(sizeof(yaml_version_directive_t)) -// if (!version_directive_copy) goto error -// version_directive_copy.major = version_directive.major -// version_directive_copy.minor = version_directive.minor -// } -// -// if (tag_directives_start != tag_directives_end) { -// tag_directive *yaml_tag_directive_t -// if (!STACK_INIT(&context, tag_directives_copy, INITIAL_STACK_SIZE)) -// goto error -// for (tag_directive = tag_directives_start -// tag_directive != tag_directives_end; tag_directive ++) { -// assert(tag_directive.handle) -// assert(tag_directive.prefix) -// if (!yaml_check_utf8(tag_directive.handle, -// strlen((char *)tag_directive.handle))) -// goto error -// if (!yaml_check_utf8(tag_directive.prefix, -// strlen((char *)tag_directive.prefix))) -// goto error -// value.handle = yaml_strdup(tag_directive.handle) -// value.prefix = yaml_strdup(tag_directive.prefix) -// if (!value.handle || !value.prefix) goto error -// if (!PUSH(&context, tag_directives_copy, value)) -// goto error -// value.handle = NULL -// value.prefix = NULL -// } -// } -// -// DOCUMENT_INIT(*document, nodes.start, nodes.end, version_directive_copy, -// tag_directives_copy.start, tag_directives_copy.top, -// start_implicit, end_implicit, mark, mark) -// -// return 1 -// -//error: -// STACK_DEL(&context, nodes) -// yaml_free(version_directive_copy) -// while (!STACK_EMPTY(&context, tag_directives_copy)) { -// value yaml_tag_directive_t = POP(&context, tag_directives_copy) -// yaml_free(value.handle) -// yaml_free(value.prefix) -// } -// STACK_DEL(&context, tag_directives_copy) -// yaml_free(value.handle) -// yaml_free(value.prefix) -// -// return 0 -//} -// -///* -// * Destroy a document object. -// */ -// -//YAML_DECLARE(void) -//yaml_document_delete(document *yaml_document_t) -//{ -// struct { -// error yaml_error_type_t -// } context -// tag_directive *yaml_tag_directive_t -// -// context.error = YAML_NO_ERROR // Eliminate a compliler warning. -// -// assert(document) // Non-NULL document object is expected. -// -// while (!STACK_EMPTY(&context, document.nodes)) { -// node yaml_node_t = POP(&context, document.nodes) -// yaml_free(node.tag) -// switch (node.type) { -// case YAML_SCALAR_NODE: -// yaml_free(node.data.scalar.value) -// break -// case YAML_SEQUENCE_NODE: -// STACK_DEL(&context, node.data.sequence.items) -// break -// case YAML_MAPPING_NODE: -// STACK_DEL(&context, node.data.mapping.pairs) -// break -// default: -// assert(0) // Should not happen. -// } -// } -// STACK_DEL(&context, document.nodes) -// -// yaml_free(document.version_directive) -// for (tag_directive = document.tag_directives.start -// tag_directive != document.tag_directives.end -// tag_directive++) { -// yaml_free(tag_directive.handle) -// yaml_free(tag_directive.prefix) -// } -// yaml_free(document.tag_directives.start) -// -// memset(document, 0, sizeof(yaml_document_t)) -//} -// -///** -// * Get a document node. -// */ -// -//YAML_DECLARE(yaml_node_t *) -//yaml_document_get_node(document *yaml_document_t, index int) -//{ -// assert(document) // Non-NULL document object is expected. -// -// if (index > 0 && document.nodes.start + index <= document.nodes.top) { -// return document.nodes.start + index - 1 -// } -// return NULL -//} -// -///** -// * Get the root object. -// */ -// -//YAML_DECLARE(yaml_node_t *) -//yaml_document_get_root_node(document *yaml_document_t) -//{ -// assert(document) // Non-NULL document object is expected. -// -// if (document.nodes.top != document.nodes.start) { -// return document.nodes.start -// } -// return NULL -//} -// -///* -// * Add a scalar node to a document. -// */ -// -//YAML_DECLARE(int) -//yaml_document_add_scalar(document *yaml_document_t, -// tag *yaml_char_t, value *yaml_char_t, length int, -// style yaml_scalar_style_t) -//{ -// struct { -// error yaml_error_type_t -// } context -// mark yaml_mark_t = { 0, 0, 0 } -// tag_copy *yaml_char_t = NULL -// value_copy *yaml_char_t = NULL -// node yaml_node_t -// -// assert(document) // Non-NULL document object is expected. -// assert(value) // Non-NULL value is expected. -// -// if (!tag) { -// tag = (yaml_char_t *)YAML_DEFAULT_SCALAR_TAG -// } -// -// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error -// tag_copy = yaml_strdup(tag) -// if (!tag_copy) goto error -// -// if (length < 0) { -// length = strlen((char *)value) -// } -// -// if (!yaml_check_utf8(value, length)) goto error -// value_copy = yaml_malloc(length+1) -// if (!value_copy) goto error -// memcpy(value_copy, value, length) -// value_copy[length] = '\0' -// -// SCALAR_NODE_INIT(node, tag_copy, value_copy, length, style, mark, mark) -// if (!PUSH(&context, document.nodes, node)) goto error -// -// return document.nodes.top - document.nodes.start -// -//error: -// yaml_free(tag_copy) -// yaml_free(value_copy) -// -// return 0 -//} -// -///* -// * Add a sequence node to a document. -// */ -// -//YAML_DECLARE(int) -//yaml_document_add_sequence(document *yaml_document_t, -// tag *yaml_char_t, style yaml_sequence_style_t) -//{ -// struct { -// error yaml_error_type_t -// } context -// mark yaml_mark_t = { 0, 0, 0 } -// tag_copy *yaml_char_t = NULL -// struct { -// start *yaml_node_item_t -// end *yaml_node_item_t -// top *yaml_node_item_t -// } items = { NULL, NULL, NULL } -// node yaml_node_t -// -// assert(document) // Non-NULL document object is expected. -// -// if (!tag) { -// tag = (yaml_char_t *)YAML_DEFAULT_SEQUENCE_TAG -// } -// -// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error -// tag_copy = yaml_strdup(tag) -// if (!tag_copy) goto error -// -// if (!STACK_INIT(&context, items, INITIAL_STACK_SIZE)) goto error -// -// SEQUENCE_NODE_INIT(node, tag_copy, items.start, items.end, -// style, mark, mark) -// if (!PUSH(&context, document.nodes, node)) goto error -// -// return document.nodes.top - document.nodes.start -// -//error: -// STACK_DEL(&context, items) -// yaml_free(tag_copy) -// -// return 0 -//} -// -///* -// * Add a mapping node to a document. -// */ -// -//YAML_DECLARE(int) -//yaml_document_add_mapping(document *yaml_document_t, -// tag *yaml_char_t, style yaml_mapping_style_t) -//{ -// struct { -// error yaml_error_type_t -// } context -// mark yaml_mark_t = { 0, 0, 0 } -// tag_copy *yaml_char_t = NULL -// struct { -// start *yaml_node_pair_t -// end *yaml_node_pair_t -// top *yaml_node_pair_t -// } pairs = { NULL, NULL, NULL } -// node yaml_node_t -// -// assert(document) // Non-NULL document object is expected. -// -// if (!tag) { -// tag = (yaml_char_t *)YAML_DEFAULT_MAPPING_TAG -// } -// -// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error -// tag_copy = yaml_strdup(tag) -// if (!tag_copy) goto error -// -// if (!STACK_INIT(&context, pairs, INITIAL_STACK_SIZE)) goto error -// -// MAPPING_NODE_INIT(node, tag_copy, pairs.start, pairs.end, -// style, mark, mark) -// if (!PUSH(&context, document.nodes, node)) goto error -// -// return document.nodes.top - document.nodes.start -// -//error: -// STACK_DEL(&context, pairs) -// yaml_free(tag_copy) -// -// return 0 -//} -// -///* -// * Append an item to a sequence node. -// */ -// -//YAML_DECLARE(int) -//yaml_document_append_sequence_item(document *yaml_document_t, -// sequence int, item int) -//{ -// struct { -// error yaml_error_type_t -// } context -// -// assert(document) // Non-NULL document is required. -// assert(sequence > 0 -// && document.nodes.start + sequence <= document.nodes.top) -// // Valid sequence id is required. -// assert(document.nodes.start[sequence-1].type == YAML_SEQUENCE_NODE) -// // A sequence node is required. -// assert(item > 0 && document.nodes.start + item <= document.nodes.top) -// // Valid item id is required. -// -// if (!PUSH(&context, -// document.nodes.start[sequence-1].data.sequence.items, item)) -// return 0 -// -// return 1 -//} -// -///* -// * Append a pair of a key and a value to a mapping node. -// */ -// -//YAML_DECLARE(int) -//yaml_document_append_mapping_pair(document *yaml_document_t, -// mapping int, key int, value int) -//{ -// struct { -// error yaml_error_type_t -// } context -// -// pair yaml_node_pair_t -// -// assert(document) // Non-NULL document is required. -// assert(mapping > 0 -// && document.nodes.start + mapping <= document.nodes.top) -// // Valid mapping id is required. -// assert(document.nodes.start[mapping-1].type == YAML_MAPPING_NODE) -// // A mapping node is required. -// assert(key > 0 && document.nodes.start + key <= document.nodes.top) -// // Valid key id is required. -// assert(value > 0 && document.nodes.start + value <= document.nodes.top) -// // Valid value id is required. -// -// pair.key = key -// pair.value = value -// -// if (!PUSH(&context, -// document.nodes.start[mapping-1].data.mapping.pairs, pair)) -// return 0 -// -// return 1 -//} -// -// diff --git a/Godeps/_workspace/src/gopkg.in/yaml.v2/decode.go b/Godeps/_workspace/src/gopkg.in/yaml.v2/decode.go deleted file mode 100644 index 085cddc44b..0000000000 --- a/Godeps/_workspace/src/gopkg.in/yaml.v2/decode.go +++ /dev/null @@ -1,683 +0,0 @@ -package yaml - -import ( - "encoding" - "encoding/base64" - "fmt" - "math" - "reflect" - "strconv" - "time" -) - -const ( - documentNode = 1 << iota - mappingNode - sequenceNode - scalarNode - aliasNode -) - -type node struct { - kind int - line, column int - tag string - value string - implicit bool - children []*node - anchors map[string]*node -} - -// ---------------------------------------------------------------------------- -// Parser, produces a node tree out of a libyaml event stream. - -type parser struct { - parser yaml_parser_t - event yaml_event_t - doc *node -} - -func newParser(b []byte) *parser { - p := parser{} - if !yaml_parser_initialize(&p.parser) { - panic("failed to initialize YAML emitter") - } - - if len(b) == 0 { - b = []byte{'\n'} - } - - yaml_parser_set_input_string(&p.parser, b) - - p.skip() - if p.event.typ != yaml_STREAM_START_EVENT { - panic("expected stream start event, got " + strconv.Itoa(int(p.event.typ))) - } - p.skip() - return &p -} - -func (p *parser) destroy() { - if p.event.typ != yaml_NO_EVENT { - yaml_event_delete(&p.event) - } - yaml_parser_delete(&p.parser) -} - -func (p *parser) skip() { - if p.event.typ != yaml_NO_EVENT { - if p.event.typ == yaml_STREAM_END_EVENT { - failf("attempted to go past the end of stream; corrupted value?") - } - yaml_event_delete(&p.event) - } - if !yaml_parser_parse(&p.parser, &p.event) { - p.fail() - } -} - -func (p *parser) fail() { - var where string - var line int - if p.parser.problem_mark.line != 0 { - line = p.parser.problem_mark.line - } else if p.parser.context_mark.line != 0 { - line = p.parser.context_mark.line - } - if line != 0 { - where = "line " + strconv.Itoa(line) + ": " - } - var msg string - if len(p.parser.problem) > 0 { - msg = p.parser.problem - } else { - msg = "unknown problem parsing YAML content" - } - failf("%s%s", where, msg) -} - -func (p *parser) anchor(n *node, anchor []byte) { - if anchor != nil { - p.doc.anchors[string(anchor)] = n - } -} - -func (p *parser) parse() *node { - switch p.event.typ { - case yaml_SCALAR_EVENT: - return p.scalar() - case yaml_ALIAS_EVENT: - return p.alias() - case yaml_MAPPING_START_EVENT: - return p.mapping() - case yaml_SEQUENCE_START_EVENT: - return p.sequence() - case yaml_DOCUMENT_START_EVENT: - return p.document() - case yaml_STREAM_END_EVENT: - // Happens when attempting to decode an empty buffer. - return nil - default: - panic("attempted to parse unknown event: " + strconv.Itoa(int(p.event.typ))) - } - panic("unreachable") -} - -func (p *parser) node(kind int) *node { - return &node{ - kind: kind, - line: p.event.start_mark.line, - column: p.event.start_mark.column, - } -} - -func (p *parser) document() *node { - n := p.node(documentNode) - n.anchors = make(map[string]*node) - p.doc = n - p.skip() - n.children = append(n.children, p.parse()) - if p.event.typ != yaml_DOCUMENT_END_EVENT { - panic("expected end of document event but got " + strconv.Itoa(int(p.event.typ))) - } - p.skip() - return n -} - -func (p *parser) alias() *node { - n := p.node(aliasNode) - n.value = string(p.event.anchor) - p.skip() - return n -} - -func (p *parser) scalar() *node { - n := p.node(scalarNode) - n.value = string(p.event.value) - n.tag = string(p.event.tag) - n.implicit = p.event.implicit - p.anchor(n, p.event.anchor) - p.skip() - return n -} - -func (p *parser) sequence() *node { - n := p.node(sequenceNode) - p.anchor(n, p.event.anchor) - p.skip() - for p.event.typ != yaml_SEQUENCE_END_EVENT { - n.children = append(n.children, p.parse()) - } - p.skip() - return n -} - -func (p *parser) mapping() *node { - n := p.node(mappingNode) - p.anchor(n, p.event.anchor) - p.skip() - for p.event.typ != yaml_MAPPING_END_EVENT { - n.children = append(n.children, p.parse(), p.parse()) - } - p.skip() - return n -} - -// ---------------------------------------------------------------------------- -// Decoder, unmarshals a node into a provided value. - -type decoder struct { - doc *node - aliases map[string]bool - mapType reflect.Type - terrors []string -} - -var ( - mapItemType = reflect.TypeOf(MapItem{}) - durationType = reflect.TypeOf(time.Duration(0)) - defaultMapType = reflect.TypeOf(map[interface{}]interface{}{}) - ifaceType = defaultMapType.Elem() -) - -func newDecoder() *decoder { - d := &decoder{mapType: defaultMapType} - d.aliases = make(map[string]bool) - return d -} - -func (d *decoder) terror(n *node, tag string, out reflect.Value) { - if n.tag != "" { - tag = n.tag - } - value := n.value - if tag != yaml_SEQ_TAG && tag != yaml_MAP_TAG { - if len(value) > 10 { - value = " `" + value[:7] + "...`" - } else { - value = " `" + value + "`" - } - } - d.terrors = append(d.terrors, fmt.Sprintf("line %d: cannot unmarshal %s%s into %s", n.line+1, shortTag(tag), value, out.Type())) -} - -func (d *decoder) callUnmarshaler(n *node, u Unmarshaler) (good bool) { - terrlen := len(d.terrors) - err := u.UnmarshalYAML(func(v interface{}) (err error) { - defer handleErr(&err) - d.unmarshal(n, reflect.ValueOf(v)) - if len(d.terrors) > terrlen { - issues := d.terrors[terrlen:] - d.terrors = d.terrors[:terrlen] - return &TypeError{issues} - } - return nil - }) - if e, ok := err.(*TypeError); ok { - d.terrors = append(d.terrors, e.Errors...) - return false - } - if err != nil { - fail(err) - } - return true -} - -// d.prepare initializes and dereferences pointers and calls UnmarshalYAML -// if a value is found to implement it. -// It returns the initialized and dereferenced out value, whether -// unmarshalling was already done by UnmarshalYAML, and if so whether -// its types unmarshalled appropriately. -// -// If n holds a null value, prepare returns before doing anything. -func (d *decoder) prepare(n *node, out reflect.Value) (newout reflect.Value, unmarshaled, good bool) { - if n.tag == yaml_NULL_TAG || n.kind == scalarNode && n.tag == "" && (n.value == "null" || n.value == "") { - return out, false, false - } - again := true - for again { - again = false - if out.Kind() == reflect.Ptr { - if out.IsNil() { - out.Set(reflect.New(out.Type().Elem())) - } - out = out.Elem() - again = true - } - if out.CanAddr() { - if u, ok := out.Addr().Interface().(Unmarshaler); ok { - good = d.callUnmarshaler(n, u) - return out, true, good - } - } - } - return out, false, false -} - -func (d *decoder) unmarshal(n *node, out reflect.Value) (good bool) { - switch n.kind { - case documentNode: - return d.document(n, out) - case aliasNode: - return d.alias(n, out) - } - out, unmarshaled, good := d.prepare(n, out) - if unmarshaled { - return good - } - switch n.kind { - case scalarNode: - good = d.scalar(n, out) - case mappingNode: - good = d.mapping(n, out) - case sequenceNode: - good = d.sequence(n, out) - default: - panic("internal error: unknown node kind: " + strconv.Itoa(n.kind)) - } - return good -} - -func (d *decoder) document(n *node, out reflect.Value) (good bool) { - if len(n.children) == 1 { - d.doc = n - d.unmarshal(n.children[0], out) - return true - } - return false -} - -func (d *decoder) alias(n *node, out reflect.Value) (good bool) { - an, ok := d.doc.anchors[n.value] - if !ok { - failf("unknown anchor '%s' referenced", n.value) - } - if d.aliases[n.value] { - failf("anchor '%s' value contains itself", n.value) - } - d.aliases[n.value] = true - good = d.unmarshal(an, out) - delete(d.aliases, n.value) - return good -} - -var zeroValue reflect.Value - -func resetMap(out reflect.Value) { - for _, k := range out.MapKeys() { - out.SetMapIndex(k, zeroValue) - } -} - -func (d *decoder) scalar(n *node, out reflect.Value) (good bool) { - var tag string - var resolved interface{} - if n.tag == "" && !n.implicit { - tag = yaml_STR_TAG - resolved = n.value - } else { - tag, resolved = resolve(n.tag, n.value) - if tag == yaml_BINARY_TAG { - data, err := base64.StdEncoding.DecodeString(resolved.(string)) - if err != nil { - failf("!!binary value contains invalid base64 data") - } - resolved = string(data) - } - } - if resolved == nil { - if out.Kind() == reflect.Map && !out.CanAddr() { - resetMap(out) - } else { - out.Set(reflect.Zero(out.Type())) - } - return true - } - if s, ok := resolved.(string); ok && out.CanAddr() { - if u, ok := out.Addr().Interface().(encoding.TextUnmarshaler); ok { - err := u.UnmarshalText([]byte(s)) - if err != nil { - fail(err) - } - return true - } - } - switch out.Kind() { - case reflect.String: - if tag == yaml_BINARY_TAG { - out.SetString(resolved.(string)) - good = true - } else if resolved != nil { - out.SetString(n.value) - good = true - } - case reflect.Interface: - if resolved == nil { - out.Set(reflect.Zero(out.Type())) - } else { - out.Set(reflect.ValueOf(resolved)) - } - good = true - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - switch resolved := resolved.(type) { - case int: - if !out.OverflowInt(int64(resolved)) { - out.SetInt(int64(resolved)) - good = true - } - case int64: - if !out.OverflowInt(resolved) { - out.SetInt(resolved) - good = true - } - case uint64: - if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) { - out.SetInt(int64(resolved)) - good = true - } - case float64: - if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) { - out.SetInt(int64(resolved)) - good = true - } - case string: - if out.Type() == durationType { - d, err := time.ParseDuration(resolved) - if err == nil { - out.SetInt(int64(d)) - good = true - } - } - } - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - switch resolved := resolved.(type) { - case int: - if resolved >= 0 && !out.OverflowUint(uint64(resolved)) { - out.SetUint(uint64(resolved)) - good = true - } - case int64: - if resolved >= 0 && !out.OverflowUint(uint64(resolved)) { - out.SetUint(uint64(resolved)) - good = true - } - case uint64: - if !out.OverflowUint(uint64(resolved)) { - out.SetUint(uint64(resolved)) - good = true - } - case float64: - if resolved <= math.MaxUint64 && !out.OverflowUint(uint64(resolved)) { - out.SetUint(uint64(resolved)) - good = true - } - } - case reflect.Bool: - switch resolved := resolved.(type) { - case bool: - out.SetBool(resolved) - good = true - } - case reflect.Float32, reflect.Float64: - switch resolved := resolved.(type) { - case int: - out.SetFloat(float64(resolved)) - good = true - case int64: - out.SetFloat(float64(resolved)) - good = true - case uint64: - out.SetFloat(float64(resolved)) - good = true - case float64: - out.SetFloat(resolved) - good = true - } - case reflect.Ptr: - if out.Type().Elem() == reflect.TypeOf(resolved) { - // TODO DOes this make sense? When is out a Ptr except when decoding a nil value? - elem := reflect.New(out.Type().Elem()) - elem.Elem().Set(reflect.ValueOf(resolved)) - out.Set(elem) - good = true - } - } - if !good { - d.terror(n, tag, out) - } - return good -} - -func settableValueOf(i interface{}) reflect.Value { - v := reflect.ValueOf(i) - sv := reflect.New(v.Type()).Elem() - sv.Set(v) - return sv -} - -func (d *decoder) sequence(n *node, out reflect.Value) (good bool) { - l := len(n.children) - - var iface reflect.Value - switch out.Kind() { - case reflect.Slice: - out.Set(reflect.MakeSlice(out.Type(), l, l)) - case reflect.Interface: - // No type hints. Will have to use a generic sequence. - iface = out - out = settableValueOf(make([]interface{}, l)) - default: - d.terror(n, yaml_SEQ_TAG, out) - return false - } - et := out.Type().Elem() - - j := 0 - for i := 0; i < l; i++ { - e := reflect.New(et).Elem() - if ok := d.unmarshal(n.children[i], e); ok { - out.Index(j).Set(e) - j++ - } - } - out.Set(out.Slice(0, j)) - if iface.IsValid() { - iface.Set(out) - } - return true -} - -func (d *decoder) mapping(n *node, out reflect.Value) (good bool) { - switch out.Kind() { - case reflect.Struct: - return d.mappingStruct(n, out) - case reflect.Slice: - return d.mappingSlice(n, out) - case reflect.Map: - // okay - case reflect.Interface: - if d.mapType.Kind() == reflect.Map { - iface := out - out = reflect.MakeMap(d.mapType) - iface.Set(out) - } else { - slicev := reflect.New(d.mapType).Elem() - if !d.mappingSlice(n, slicev) { - return false - } - out.Set(slicev) - return true - } - default: - d.terror(n, yaml_MAP_TAG, out) - return false - } - outt := out.Type() - kt := outt.Key() - et := outt.Elem() - - mapType := d.mapType - if outt.Key() == ifaceType && outt.Elem() == ifaceType { - d.mapType = outt - } - - if out.IsNil() { - out.Set(reflect.MakeMap(outt)) - } - l := len(n.children) - for i := 0; i < l; i += 2 { - if isMerge(n.children[i]) { - d.merge(n.children[i+1], out) - continue - } - k := reflect.New(kt).Elem() - if d.unmarshal(n.children[i], k) { - kkind := k.Kind() - if kkind == reflect.Interface { - kkind = k.Elem().Kind() - } - if kkind == reflect.Map || kkind == reflect.Slice { - failf("invalid map key: %#v", k.Interface()) - } - e := reflect.New(et).Elem() - if d.unmarshal(n.children[i+1], e) { - out.SetMapIndex(k, e) - } - } - } - d.mapType = mapType - return true -} - -func (d *decoder) mappingSlice(n *node, out reflect.Value) (good bool) { - outt := out.Type() - if outt.Elem() != mapItemType { - d.terror(n, yaml_MAP_TAG, out) - return false - } - - mapType := d.mapType - d.mapType = outt - - var slice []MapItem - var l = len(n.children) - for i := 0; i < l; i += 2 { - if isMerge(n.children[i]) { - d.merge(n.children[i+1], out) - continue - } - item := MapItem{} - k := reflect.ValueOf(&item.Key).Elem() - if d.unmarshal(n.children[i], k) { - v := reflect.ValueOf(&item.Value).Elem() - if d.unmarshal(n.children[i+1], v) { - slice = append(slice, item) - } - } - } - out.Set(reflect.ValueOf(slice)) - d.mapType = mapType - return true -} - -func (d *decoder) mappingStruct(n *node, out reflect.Value) (good bool) { - sinfo, err := getStructInfo(out.Type()) - if err != nil { - panic(err) - } - name := settableValueOf("") - l := len(n.children) - - var inlineMap reflect.Value - var elemType reflect.Type - if sinfo.InlineMap != -1 { - inlineMap = out.Field(sinfo.InlineMap) - inlineMap.Set(reflect.New(inlineMap.Type()).Elem()) - elemType = inlineMap.Type().Elem() - } - - for i := 0; i < l; i += 2 { - ni := n.children[i] - if isMerge(ni) { - d.merge(n.children[i+1], out) - continue - } - if !d.unmarshal(ni, name) { - continue - } - if info, ok := sinfo.FieldsMap[name.String()]; ok { - var field reflect.Value - if info.Inline == nil { - field = out.Field(info.Num) - } else { - field = out.FieldByIndex(info.Inline) - } - d.unmarshal(n.children[i+1], field) - } else if sinfo.InlineMap != -1 { - if inlineMap.IsNil() { - inlineMap.Set(reflect.MakeMap(inlineMap.Type())) - } - value := reflect.New(elemType).Elem() - d.unmarshal(n.children[i+1], value) - inlineMap.SetMapIndex(name, value) - } - } - return true -} - -func failWantMap() { - failf("map merge requires map or sequence of maps as the value") -} - -func (d *decoder) merge(n *node, out reflect.Value) { - switch n.kind { - case mappingNode: - d.unmarshal(n, out) - case aliasNode: - an, ok := d.doc.anchors[n.value] - if ok && an.kind != mappingNode { - failWantMap() - } - d.unmarshal(n, out) - case sequenceNode: - // Step backwards as earlier nodes take precedence. - for i := len(n.children) - 1; i >= 0; i-- { - ni := n.children[i] - if ni.kind == aliasNode { - an, ok := d.doc.anchors[ni.value] - if ok && an.kind != mappingNode { - failWantMap() - } - } else if ni.kind != mappingNode { - failWantMap() - } - d.unmarshal(ni, out) - } - default: - failWantMap() - } -} - -func isMerge(n *node) bool { - return n.kind == scalarNode && n.value == "<<" && (n.implicit == true || n.tag == yaml_MERGE_TAG) -} diff --git a/Godeps/_workspace/src/gopkg.in/yaml.v2/decode_test.go b/Godeps/_workspace/src/gopkg.in/yaml.v2/decode_test.go deleted file mode 100644 index 04fdd9e72c..0000000000 --- a/Godeps/_workspace/src/gopkg.in/yaml.v2/decode_test.go +++ /dev/null @@ -1,966 +0,0 @@ -package yaml_test - -import ( - "errors" - . "gopkg.in/check.v1" - "gopkg.in/yaml.v2" - "math" - "net" - "reflect" - "strings" - "time" -) - -var unmarshalIntTest = 123 - -var unmarshalTests = []struct { - data string - value interface{} -}{ - { - "", - &struct{}{}, - }, { - "{}", &struct{}{}, - }, { - "v: hi", - map[string]string{"v": "hi"}, - }, { - "v: hi", map[string]interface{}{"v": "hi"}, - }, { - "v: true", - map[string]string{"v": "true"}, - }, { - "v: true", - map[string]interface{}{"v": true}, - }, { - "v: 10", - map[string]interface{}{"v": 10}, - }, { - "v: 0b10", - map[string]interface{}{"v": 2}, - }, { - "v: 0xA", - map[string]interface{}{"v": 10}, - }, { - "v: 4294967296", - map[string]int64{"v": 4294967296}, - }, { - "v: 0.1", - map[string]interface{}{"v": 0.1}, - }, { - "v: .1", - map[string]interface{}{"v": 0.1}, - }, { - "v: .Inf", - map[string]interface{}{"v": math.Inf(+1)}, - }, { - "v: -.Inf", - map[string]interface{}{"v": math.Inf(-1)}, - }, { - "v: -10", - map[string]interface{}{"v": -10}, - }, { - "v: -.1", - map[string]interface{}{"v": -0.1}, - }, - - // Simple values. - { - "123", - &unmarshalIntTest, - }, - - // Floats from spec - { - "canonical: 6.8523e+5", - map[string]interface{}{"canonical": 6.8523e+5}, - }, { - "expo: 685.230_15e+03", - map[string]interface{}{"expo": 685.23015e+03}, - }, { - "fixed: 685_230.15", - map[string]interface{}{"fixed": 685230.15}, - }, { - "neginf: -.inf", - map[string]interface{}{"neginf": math.Inf(-1)}, - }, { - "fixed: 685_230.15", - map[string]float64{"fixed": 685230.15}, - }, - //{"sexa: 190:20:30.15", map[string]interface{}{"sexa": 0}}, // Unsupported - //{"notanum: .NaN", map[string]interface{}{"notanum": math.NaN()}}, // Equality of NaN fails. - - // Bools from spec - { - "canonical: y", - map[string]interface{}{"canonical": true}, - }, { - "answer: NO", - map[string]interface{}{"answer": false}, - }, { - "logical: True", - map[string]interface{}{"logical": true}, - }, { - "option: on", - map[string]interface{}{"option": true}, - }, { - "option: on", - map[string]bool{"option": true}, - }, - // Ints from spec - { - "canonical: 685230", - map[string]interface{}{"canonical": 685230}, - }, { - "decimal: +685_230", - map[string]interface{}{"decimal": 685230}, - }, { - "octal: 02472256", - map[string]interface{}{"octal": 685230}, - }, { - "hexa: 0x_0A_74_AE", - map[string]interface{}{"hexa": 685230}, - }, { - "bin: 0b1010_0111_0100_1010_1110", - map[string]interface{}{"bin": 685230}, - }, { - "bin: -0b101010", - map[string]interface{}{"bin": -42}, - }, { - "decimal: +685_230", - map[string]int{"decimal": 685230}, - }, - - //{"sexa: 190:20:30", map[string]interface{}{"sexa": 0}}, // Unsupported - - // Nulls from spec - { - "empty:", - map[string]interface{}{"empty": nil}, - }, { - "canonical: ~", - map[string]interface{}{"canonical": nil}, - }, { - "english: null", - map[string]interface{}{"english": nil}, - }, { - "~: null key", - map[interface{}]string{nil: "null key"}, - }, { - "empty:", - map[string]*bool{"empty": nil}, - }, - - // Flow sequence - { - "seq: [A,B]", - map[string]interface{}{"seq": []interface{}{"A", "B"}}, - }, { - "seq: [A,B,C,]", - map[string][]string{"seq": []string{"A", "B", "C"}}, - }, { - "seq: [A,1,C]", - map[string][]string{"seq": []string{"A", "1", "C"}}, - }, { - "seq: [A,1,C]", - map[string][]int{"seq": []int{1}}, - }, { - "seq: [A,1,C]", - map[string]interface{}{"seq": []interface{}{"A", 1, "C"}}, - }, - // Block sequence - { - "seq:\n - A\n - B", - map[string]interface{}{"seq": []interface{}{"A", "B"}}, - }, { - "seq:\n - A\n - B\n - C", - map[string][]string{"seq": []string{"A", "B", "C"}}, - }, { - "seq:\n - A\n - 1\n - C", - map[string][]string{"seq": []string{"A", "1", "C"}}, - }, { - "seq:\n - A\n - 1\n - C", - map[string][]int{"seq": []int{1}}, - }, { - "seq:\n - A\n - 1\n - C", - map[string]interface{}{"seq": []interface{}{"A", 1, "C"}}, - }, - - // Literal block scalar - { - "scalar: | # Comment\n\n literal\n\n \ttext\n\n", - map[string]string{"scalar": "\nliteral\n\n\ttext\n"}, - }, - - // Folded block scalar - { - "scalar: > # Comment\n\n folded\n line\n \n next\n line\n * one\n * two\n\n last\n line\n\n", - map[string]string{"scalar": "\nfolded line\nnext line\n * one\n * two\n\nlast line\n"}, - }, - - // Map inside interface with no type hints. - { - "a: {b: c}", - map[interface{}]interface{}{"a": map[interface{}]interface{}{"b": "c"}}, - }, - - // Structs and type conversions. - { - "hello: world", - &struct{ Hello string }{"world"}, - }, { - "a: {b: c}", - &struct{ A struct{ B string } }{struct{ B string }{"c"}}, - }, { - "a: {b: c}", - &struct{ A *struct{ B string } }{&struct{ B string }{"c"}}, - }, { - "a: {b: c}", - &struct{ A map[string]string }{map[string]string{"b": "c"}}, - }, { - "a: {b: c}", - &struct{ A *map[string]string }{&map[string]string{"b": "c"}}, - }, { - "a:", - &struct{ A map[string]string }{}, - }, { - "a: 1", - &struct{ A int }{1}, - }, { - "a: 1", - &struct{ A float64 }{1}, - }, { - "a: 1.0", - &struct{ A int }{1}, - }, { - "a: 1.0", - &struct{ A uint }{1}, - }, { - "a: [1, 2]", - &struct{ A []int }{[]int{1, 2}}, - }, { - "a: 1", - &struct{ B int }{0}, - }, { - "a: 1", - &struct { - B int "a" - }{1}, - }, { - "a: y", - &struct{ A bool }{true}, - }, - - // Some cross type conversions - { - "v: 42", - map[string]uint{"v": 42}, - }, { - "v: -42", - map[string]uint{}, - }, { - "v: 4294967296", - map[string]uint64{"v": 4294967296}, - }, { - "v: -4294967296", - map[string]uint64{}, - }, - - // int - { - "int_max: 2147483647", - map[string]int{"int_max": math.MaxInt32}, - }, - { - "int_min: -2147483648", - map[string]int{"int_min": math.MinInt32}, - }, - { - "int_overflow: 9223372036854775808", // math.MaxInt64 + 1 - map[string]int{}, - }, - - // int64 - { - "int64_max: 9223372036854775807", - map[string]int64{"int64_max": math.MaxInt64}, - }, - { - "int64_max_base2: 0b111111111111111111111111111111111111111111111111111111111111111", - map[string]int64{"int64_max_base2": math.MaxInt64}, - }, - { - "int64_min: -9223372036854775808", - map[string]int64{"int64_min": math.MinInt64}, - }, - { - "int64_neg_base2: -0b111111111111111111111111111111111111111111111111111111111111111", - map[string]int64{"int64_neg_base2": -math.MaxInt64}, - }, - { - "int64_overflow: 9223372036854775808", // math.MaxInt64 + 1 - map[string]int64{}, - }, - - // uint - { - "uint_min: 0", - map[string]uint{"uint_min": 0}, - }, - { - "uint_max: 4294967295", - map[string]uint{"uint_max": math.MaxUint32}, - }, - { - "uint_underflow: -1", - map[string]uint{}, - }, - - // uint64 - { - "uint64_min: 0", - map[string]uint{"uint64_min": 0}, - }, - { - "uint64_max: 18446744073709551615", - map[string]uint64{"uint64_max": math.MaxUint64}, - }, - { - "uint64_max_base2: 0b1111111111111111111111111111111111111111111111111111111111111111", - map[string]uint64{"uint64_max_base2": math.MaxUint64}, - }, - { - "uint64_maxint64: 9223372036854775807", - map[string]uint64{"uint64_maxint64": math.MaxInt64}, - }, - { - "uint64_underflow: -1", - map[string]uint64{}, - }, - - // float32 - { - "float32_max: 3.40282346638528859811704183484516925440e+38", - map[string]float32{"float32_max": math.MaxFloat32}, - }, - { - "float32_nonzero: 1.401298464324817070923729583289916131280e-45", - map[string]float32{"float32_nonzero": math.SmallestNonzeroFloat32}, - }, - { - "float32_maxuint64: 18446744073709551615", - map[string]float32{"float32_maxuint64": float32(math.MaxUint64)}, - }, - { - "float32_maxuint64+1: 18446744073709551616", - map[string]float32{"float32_maxuint64+1": float32(math.MaxUint64 + 1)}, - }, - - // float64 - { - "float64_max: 1.797693134862315708145274237317043567981e+308", - map[string]float64{"float64_max": math.MaxFloat64}, - }, - { - "float64_nonzero: 4.940656458412465441765687928682213723651e-324", - map[string]float64{"float64_nonzero": math.SmallestNonzeroFloat64}, - }, - { - "float64_maxuint64: 18446744073709551615", - map[string]float64{"float64_maxuint64": float64(math.MaxUint64)}, - }, - { - "float64_maxuint64+1: 18446744073709551616", - map[string]float64{"float64_maxuint64+1": float64(math.MaxUint64 + 1)}, - }, - - // Overflow cases. - { - "v: 4294967297", - map[string]int32{}, - }, { - "v: 128", - map[string]int8{}, - }, - - // Quoted values. - { - "'1': '\"2\"'", - map[interface{}]interface{}{"1": "\"2\""}, - }, { - "v:\n- A\n- 'B\n\n C'\n", - map[string][]string{"v": []string{"A", "B\nC"}}, - }, - - // Explicit tags. - { - "v: !!float '1.1'", - map[string]interface{}{"v": 1.1}, - }, { - "v: !!null ''", - map[string]interface{}{"v": nil}, - }, { - "%TAG !y! tag:yaml.org,2002:\n---\nv: !y!int '1'", - map[string]interface{}{"v": 1}, - }, - - // Anchors and aliases. - { - "a: &x 1\nb: &y 2\nc: *x\nd: *y\n", - &struct{ A, B, C, D int }{1, 2, 1, 2}, - }, { - "a: &a {c: 1}\nb: *a", - &struct { - A, B struct { - C int - } - }{struct{ C int }{1}, struct{ C int }{1}}, - }, { - "a: &a [1, 2]\nb: *a", - &struct{ B []int }{[]int{1, 2}}, - }, { - "b: *a\na: &a {c: 1}", - &struct { - A, B struct { - C int - } - }{struct{ C int }{1}, struct{ C int }{1}}, - }, - - // Bug #1133337 - { - "foo: ''", - map[string]*string{"foo": new(string)}, - }, { - "foo: null", - map[string]string{"foo": ""}, - }, { - "foo: null", - map[string]interface{}{"foo": nil}, - }, - - // Ignored field - { - "a: 1\nb: 2\n", - &struct { - A int - B int "-" - }{1, 0}, - }, - - // Bug #1191981 - { - "" + - "%YAML 1.1\n" + - "--- !!str\n" + - `"Generic line break (no glyph)\n\` + "\n" + - ` Generic line break (glyphed)\n\` + "\n" + - ` Line separator\u2028\` + "\n" + - ` Paragraph separator\u2029"` + "\n", - "" + - "Generic line break (no glyph)\n" + - "Generic line break (glyphed)\n" + - "Line separator\u2028Paragraph separator\u2029", - }, - - // Struct inlining - { - "a: 1\nb: 2\nc: 3\n", - &struct { - A int - C inlineB `yaml:",inline"` - }{1, inlineB{2, inlineC{3}}}, - }, - - // Map inlining - { - "a: 1\nb: 2\nc: 3\n", - &struct { - A int - C map[string]int `yaml:",inline"` - }{1, map[string]int{"b": 2, "c": 3}}, - }, - - // bug 1243827 - { - "a: -b_c", - map[string]interface{}{"a": "-b_c"}, - }, - { - "a: +b_c", - map[string]interface{}{"a": "+b_c"}, - }, - { - "a: 50cent_of_dollar", - map[string]interface{}{"a": "50cent_of_dollar"}, - }, - - // Duration - { - "a: 3s", - map[string]time.Duration{"a": 3 * time.Second}, - }, - - // Issue #24. - { - "a: ", - map[string]string{"a": ""}, - }, - - // Base 60 floats are obsolete and unsupported. - { - "a: 1:1\n", - map[string]string{"a": "1:1"}, - }, - - // Binary data. - { - "a: !!binary gIGC\n", - map[string]string{"a": "\x80\x81\x82"}, - }, { - "a: !!binary |\n " + strings.Repeat("kJCQ", 17) + "kJ\n CQ\n", - map[string]string{"a": strings.Repeat("\x90", 54)}, - }, { - "a: !!binary |\n " + strings.Repeat("A", 70) + "\n ==\n", - map[string]string{"a": strings.Repeat("\x00", 52)}, - }, - - // Ordered maps. - { - "{b: 2, a: 1, d: 4, c: 3, sub: {e: 5}}", - &yaml.MapSlice{{"b", 2}, {"a", 1}, {"d", 4}, {"c", 3}, {"sub", yaml.MapSlice{{"e", 5}}}}, - }, - - // Issue #39. - { - "a:\n b:\n c: d\n", - map[string]struct{ B interface{} }{"a": {map[interface{}]interface{}{"c": "d"}}}, - }, - - // Custom map type. - { - "a: {b: c}", - M{"a": M{"b": "c"}}, - }, - - // Support encoding.TextUnmarshaler. - { - "a: 1.2.3.4\n", - map[string]net.IP{"a": net.IPv4(1, 2, 3, 4)}, - }, - { - "a: 2015-02-24T18:19:39Z\n", - map[string]time.Time{"a": time.Unix(1424801979, 0)}, - }, - - // Encode empty lists as zero-length slices. - { - "a: []", - &struct{ A []int }{[]int{}}, - }, -} - -type M map[interface{}]interface{} - -type inlineB struct { - B int - inlineC `yaml:",inline"` -} - -type inlineC struct { - C int -} - -func (s *S) TestUnmarshal(c *C) { - for _, item := range unmarshalTests { - t := reflect.ValueOf(item.value).Type() - var value interface{} - switch t.Kind() { - case reflect.Map: - value = reflect.MakeMap(t).Interface() - case reflect.String: - value = reflect.New(t).Interface() - case reflect.Ptr: - value = reflect.New(t.Elem()).Interface() - default: - c.Fatalf("missing case for %s", t) - } - err := yaml.Unmarshal([]byte(item.data), value) - if _, ok := err.(*yaml.TypeError); !ok { - c.Assert(err, IsNil) - } - if t.Kind() == reflect.String { - c.Assert(*value.(*string), Equals, item.value) - } else { - c.Assert(value, DeepEquals, item.value) - } - } -} - -func (s *S) TestUnmarshalNaN(c *C) { - value := map[string]interface{}{} - err := yaml.Unmarshal([]byte("notanum: .NaN"), &value) - c.Assert(err, IsNil) - c.Assert(math.IsNaN(value["notanum"].(float64)), Equals, true) -} - -var unmarshalErrorTests = []struct { - data, error string -}{ - {"v: !!float 'error'", "yaml: cannot decode !!str `error` as a !!float"}, - {"v: [A,", "yaml: line 1: did not find expected node content"}, - {"v:\n- [A,", "yaml: line 2: did not find expected node content"}, - {"a: *b\n", "yaml: unknown anchor 'b' referenced"}, - {"a: &a\n b: *a\n", "yaml: anchor 'a' value contains itself"}, - {"value: -", "yaml: block sequence entries are not allowed in this context"}, - {"a: !!binary ==", "yaml: !!binary value contains invalid base64 data"}, - {"{[.]}", `yaml: invalid map key: \[\]interface \{\}\{"\."\}`}, - {"{{.}}", `yaml: invalid map key: map\[interface\ \{\}\]interface \{\}\{".":interface \{\}\(nil\)\}`}, -} - -func (s *S) TestUnmarshalErrors(c *C) { - for _, item := range unmarshalErrorTests { - var value interface{} - err := yaml.Unmarshal([]byte(item.data), &value) - c.Assert(err, ErrorMatches, item.error, Commentf("Partial unmarshal: %#v", value)) - } -} - -var unmarshalerTests = []struct { - data, tag string - value interface{} -}{ - {"_: {hi: there}", "!!map", map[interface{}]interface{}{"hi": "there"}}, - {"_: [1,A]", "!!seq", []interface{}{1, "A"}}, - {"_: 10", "!!int", 10}, - {"_: null", "!!null", nil}, - {`_: BAR!`, "!!str", "BAR!"}, - {`_: "BAR!"`, "!!str", "BAR!"}, - {"_: !!foo 'BAR!'", "!!foo", "BAR!"}, -} - -var unmarshalerResult = map[int]error{} - -type unmarshalerType struct { - value interface{} -} - -func (o *unmarshalerType) UnmarshalYAML(unmarshal func(v interface{}) error) error { - if err := unmarshal(&o.value); err != nil { - return err - } - if i, ok := o.value.(int); ok { - if result, ok := unmarshalerResult[i]; ok { - return result - } - } - return nil -} - -type unmarshalerPointer struct { - Field *unmarshalerType "_" -} - -type unmarshalerValue struct { - Field unmarshalerType "_" -} - -func (s *S) TestUnmarshalerPointerField(c *C) { - for _, item := range unmarshalerTests { - obj := &unmarshalerPointer{} - err := yaml.Unmarshal([]byte(item.data), obj) - c.Assert(err, IsNil) - if item.value == nil { - c.Assert(obj.Field, IsNil) - } else { - c.Assert(obj.Field, NotNil, Commentf("Pointer not initialized (%#v)", item.value)) - c.Assert(obj.Field.value, DeepEquals, item.value) - } - } -} - -func (s *S) TestUnmarshalerValueField(c *C) { - for _, item := range unmarshalerTests { - obj := &unmarshalerValue{} - err := yaml.Unmarshal([]byte(item.data), obj) - c.Assert(err, IsNil) - c.Assert(obj.Field, NotNil, Commentf("Pointer not initialized (%#v)", item.value)) - c.Assert(obj.Field.value, DeepEquals, item.value) - } -} - -func (s *S) TestUnmarshalerWholeDocument(c *C) { - obj := &unmarshalerType{} - err := yaml.Unmarshal([]byte(unmarshalerTests[0].data), obj) - c.Assert(err, IsNil) - value, ok := obj.value.(map[interface{}]interface{}) - c.Assert(ok, Equals, true, Commentf("value: %#v", obj.value)) - c.Assert(value["_"], DeepEquals, unmarshalerTests[0].value) -} - -func (s *S) TestUnmarshalerTypeError(c *C) { - unmarshalerResult[2] = &yaml.TypeError{[]string{"foo"}} - unmarshalerResult[4] = &yaml.TypeError{[]string{"bar"}} - defer func() { - delete(unmarshalerResult, 2) - delete(unmarshalerResult, 4) - }() - - type T struct { - Before int - After int - M map[string]*unmarshalerType - } - var v T - data := `{before: A, m: {abc: 1, def: 2, ghi: 3, jkl: 4}, after: B}` - err := yaml.Unmarshal([]byte(data), &v) - c.Assert(err, ErrorMatches, ""+ - "yaml: unmarshal errors:\n"+ - " line 1: cannot unmarshal !!str `A` into int\n"+ - " foo\n"+ - " bar\n"+ - " line 1: cannot unmarshal !!str `B` into int") - c.Assert(v.M["abc"], NotNil) - c.Assert(v.M["def"], IsNil) - c.Assert(v.M["ghi"], NotNil) - c.Assert(v.M["jkl"], IsNil) - - c.Assert(v.M["abc"].value, Equals, 1) - c.Assert(v.M["ghi"].value, Equals, 3) -} - -type proxyTypeError struct{} - -func (v *proxyTypeError) UnmarshalYAML(unmarshal func(interface{}) error) error { - var s string - var a int32 - var b int64 - if err := unmarshal(&s); err != nil { - panic(err) - } - if s == "a" { - if err := unmarshal(&b); err == nil { - panic("should have failed") - } - return unmarshal(&a) - } - if err := unmarshal(&a); err == nil { - panic("should have failed") - } - return unmarshal(&b) -} - -func (s *S) TestUnmarshalerTypeErrorProxying(c *C) { - type T struct { - Before int - After int - M map[string]*proxyTypeError - } - var v T - data := `{before: A, m: {abc: a, def: b}, after: B}` - err := yaml.Unmarshal([]byte(data), &v) - c.Assert(err, ErrorMatches, ""+ - "yaml: unmarshal errors:\n"+ - " line 1: cannot unmarshal !!str `A` into int\n"+ - " line 1: cannot unmarshal !!str `a` into int32\n"+ - " line 1: cannot unmarshal !!str `b` into int64\n"+ - " line 1: cannot unmarshal !!str `B` into int") -} - -type failingUnmarshaler struct{} - -var failingErr = errors.New("failingErr") - -func (ft *failingUnmarshaler) UnmarshalYAML(unmarshal func(interface{}) error) error { - return failingErr -} - -func (s *S) TestUnmarshalerError(c *C) { - err := yaml.Unmarshal([]byte("a: b"), &failingUnmarshaler{}) - c.Assert(err, Equals, failingErr) -} - -type sliceUnmarshaler []int - -func (su *sliceUnmarshaler) UnmarshalYAML(unmarshal func(interface{}) error) error { - var slice []int - err := unmarshal(&slice) - if err == nil { - *su = slice - return nil - } - - var intVal int - err = unmarshal(&intVal) - if err == nil { - *su = []int{intVal} - return nil - } - - return err -} - -func (s *S) TestUnmarshalerRetry(c *C) { - var su sliceUnmarshaler - err := yaml.Unmarshal([]byte("[1, 2, 3]"), &su) - c.Assert(err, IsNil) - c.Assert(su, DeepEquals, sliceUnmarshaler([]int{1, 2, 3})) - - err = yaml.Unmarshal([]byte("1"), &su) - c.Assert(err, IsNil) - c.Assert(su, DeepEquals, sliceUnmarshaler([]int{1})) -} - -// From http://yaml.org/type/merge.html -var mergeTests = ` -anchors: - list: - - &CENTER { "x": 1, "y": 2 } - - &LEFT { "x": 0, "y": 2 } - - &BIG { "r": 10 } - - &SMALL { "r": 1 } - -# All the following maps are equal: - -plain: - # Explicit keys - "x": 1 - "y": 2 - "r": 10 - label: center/big - -mergeOne: - # Merge one map - << : *CENTER - "r": 10 - label: center/big - -mergeMultiple: - # Merge multiple maps - << : [ *CENTER, *BIG ] - label: center/big - -override: - # Override - << : [ *BIG, *LEFT, *SMALL ] - "x": 1 - label: center/big - -shortTag: - # Explicit short merge tag - !!merge "<<" : [ *CENTER, *BIG ] - label: center/big - -longTag: - # Explicit merge long tag - ! "<<" : [ *CENTER, *BIG ] - label: center/big - -inlineMap: - # Inlined map - << : {"x": 1, "y": 2, "r": 10} - label: center/big - -inlineSequenceMap: - # Inlined map in sequence - << : [ *CENTER, {"r": 10} ] - label: center/big -` - -func (s *S) TestMerge(c *C) { - var want = map[interface{}]interface{}{ - "x": 1, - "y": 2, - "r": 10, - "label": "center/big", - } - - var m map[interface{}]interface{} - err := yaml.Unmarshal([]byte(mergeTests), &m) - c.Assert(err, IsNil) - for name, test := range m { - if name == "anchors" { - continue - } - c.Assert(test, DeepEquals, want, Commentf("test %q failed", name)) - } -} - -func (s *S) TestMergeStruct(c *C) { - type Data struct { - X, Y, R int - Label string - } - want := Data{1, 2, 10, "center/big"} - - var m map[string]Data - err := yaml.Unmarshal([]byte(mergeTests), &m) - c.Assert(err, IsNil) - for name, test := range m { - if name == "anchors" { - continue - } - c.Assert(test, Equals, want, Commentf("test %q failed", name)) - } -} - -var unmarshalNullTests = []func() interface{}{ - func() interface{} { var v interface{}; v = "v"; return &v }, - func() interface{} { var s = "s"; return &s }, - func() interface{} { var s = "s"; sptr := &s; return &sptr }, - func() interface{} { var i = 1; return &i }, - func() interface{} { var i = 1; iptr := &i; return &iptr }, - func() interface{} { m := map[string]int{"s": 1}; return &m }, - func() interface{} { m := map[string]int{"s": 1}; return m }, -} - -func (s *S) TestUnmarshalNull(c *C) { - for _, test := range unmarshalNullTests { - item := test() - zero := reflect.Zero(reflect.TypeOf(item).Elem()).Interface() - err := yaml.Unmarshal([]byte("null"), item) - c.Assert(err, IsNil) - if reflect.TypeOf(item).Kind() == reflect.Map { - c.Assert(reflect.ValueOf(item).Interface(), DeepEquals, reflect.MakeMap(reflect.TypeOf(item)).Interface()) - } else { - c.Assert(reflect.ValueOf(item).Elem().Interface(), DeepEquals, zero) - } - } -} - -func (s *S) TestUnmarshalSliceOnPreset(c *C) { - // Issue #48. - v := struct{ A []int }{[]int{1}} - yaml.Unmarshal([]byte("a: [2]"), &v) - c.Assert(v.A, DeepEquals, []int{2}) -} - -//var data []byte -//func init() { -// var err error -// data, err = ioutil.ReadFile("/tmp/file.yaml") -// if err != nil { -// panic(err) -// } -//} -// -//func (s *S) BenchmarkUnmarshal(c *C) { -// var err error -// for i := 0; i < c.N; i++ { -// var v map[string]interface{} -// err = yaml.Unmarshal(data, &v) -// } -// if err != nil { -// panic(err) -// } -//} -// -//func (s *S) BenchmarkMarshal(c *C) { -// var v map[string]interface{} -// yaml.Unmarshal(data, &v) -// c.ResetTimer() -// for i := 0; i < c.N; i++ { -// yaml.Marshal(&v) -// } -//} diff --git a/Godeps/_workspace/src/gopkg.in/yaml.v2/emitterc.go b/Godeps/_workspace/src/gopkg.in/yaml.v2/emitterc.go deleted file mode 100644 index 2befd553ed..0000000000 --- a/Godeps/_workspace/src/gopkg.in/yaml.v2/emitterc.go +++ /dev/null @@ -1,1685 +0,0 @@ -package yaml - -import ( - "bytes" -) - -// Flush the buffer if needed. -func flush(emitter *yaml_emitter_t) bool { - if emitter.buffer_pos+5 >= len(emitter.buffer) { - return yaml_emitter_flush(emitter) - } - return true -} - -// Put a character to the output buffer. -func put(emitter *yaml_emitter_t, value byte) bool { - if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { - return false - } - emitter.buffer[emitter.buffer_pos] = value - emitter.buffer_pos++ - emitter.column++ - return true -} - -// Put a line break to the output buffer. -func put_break(emitter *yaml_emitter_t) bool { - if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { - return false - } - switch emitter.line_break { - case yaml_CR_BREAK: - emitter.buffer[emitter.buffer_pos] = '\r' - emitter.buffer_pos += 1 - case yaml_LN_BREAK: - emitter.buffer[emitter.buffer_pos] = '\n' - emitter.buffer_pos += 1 - case yaml_CRLN_BREAK: - emitter.buffer[emitter.buffer_pos+0] = '\r' - emitter.buffer[emitter.buffer_pos+1] = '\n' - emitter.buffer_pos += 2 - default: - panic("unknown line break setting") - } - emitter.column = 0 - emitter.line++ - return true -} - -// Copy a character from a string into buffer. -func write(emitter *yaml_emitter_t, s []byte, i *int) bool { - if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { - return false - } - p := emitter.buffer_pos - w := width(s[*i]) - switch w { - case 4: - emitter.buffer[p+3] = s[*i+3] - fallthrough - case 3: - emitter.buffer[p+2] = s[*i+2] - fallthrough - case 2: - emitter.buffer[p+1] = s[*i+1] - fallthrough - case 1: - emitter.buffer[p+0] = s[*i+0] - default: - panic("unknown character width") - } - emitter.column++ - emitter.buffer_pos += w - *i += w - return true -} - -// Write a whole string into buffer. -func write_all(emitter *yaml_emitter_t, s []byte) bool { - for i := 0; i < len(s); { - if !write(emitter, s, &i) { - return false - } - } - return true -} - -// Copy a line break character from a string into buffer. -func write_break(emitter *yaml_emitter_t, s []byte, i *int) bool { - if s[*i] == '\n' { - if !put_break(emitter) { - return false - } - *i++ - } else { - if !write(emitter, s, i) { - return false - } - emitter.column = 0 - emitter.line++ - } - return true -} - -// Set an emitter error and return false. -func yaml_emitter_set_emitter_error(emitter *yaml_emitter_t, problem string) bool { - emitter.error = yaml_EMITTER_ERROR - emitter.problem = problem - return false -} - -// Emit an event. -func yaml_emitter_emit(emitter *yaml_emitter_t, event *yaml_event_t) bool { - emitter.events = append(emitter.events, *event) - for !yaml_emitter_need_more_events(emitter) { - event := &emitter.events[emitter.events_head] - if !yaml_emitter_analyze_event(emitter, event) { - return false - } - if !yaml_emitter_state_machine(emitter, event) { - return false - } - yaml_event_delete(event) - emitter.events_head++ - } - return true -} - -// Check if we need to accumulate more events before emitting. -// -// We accumulate extra -// - 1 event for DOCUMENT-START -// - 2 events for SEQUENCE-START -// - 3 events for MAPPING-START -// -func yaml_emitter_need_more_events(emitter *yaml_emitter_t) bool { - if emitter.events_head == len(emitter.events) { - return true - } - var accumulate int - switch emitter.events[emitter.events_head].typ { - case yaml_DOCUMENT_START_EVENT: - accumulate = 1 - break - case yaml_SEQUENCE_START_EVENT: - accumulate = 2 - break - case yaml_MAPPING_START_EVENT: - accumulate = 3 - break - default: - return false - } - if len(emitter.events)-emitter.events_head > accumulate { - return false - } - var level int - for i := emitter.events_head; i < len(emitter.events); i++ { - switch emitter.events[i].typ { - case yaml_STREAM_START_EVENT, yaml_DOCUMENT_START_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT: - level++ - case yaml_STREAM_END_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_END_EVENT, yaml_MAPPING_END_EVENT: - level-- - } - if level == 0 { - return false - } - } - return true -} - -// Append a directive to the directives stack. -func yaml_emitter_append_tag_directive(emitter *yaml_emitter_t, value *yaml_tag_directive_t, allow_duplicates bool) bool { - for i := 0; i < len(emitter.tag_directives); i++ { - if bytes.Equal(value.handle, emitter.tag_directives[i].handle) { - if allow_duplicates { - return true - } - return yaml_emitter_set_emitter_error(emitter, "duplicate %TAG directive") - } - } - - // [Go] Do we actually need to copy this given garbage collection - // and the lack of deallocating destructors? - tag_copy := yaml_tag_directive_t{ - handle: make([]byte, len(value.handle)), - prefix: make([]byte, len(value.prefix)), - } - copy(tag_copy.handle, value.handle) - copy(tag_copy.prefix, value.prefix) - emitter.tag_directives = append(emitter.tag_directives, tag_copy) - return true -} - -// Increase the indentation level. -func yaml_emitter_increase_indent(emitter *yaml_emitter_t, flow, indentless bool) bool { - emitter.indents = append(emitter.indents, emitter.indent) - if emitter.indent < 0 { - if flow { - emitter.indent = emitter.best_indent - } else { - emitter.indent = 0 - } - } else if !indentless { - emitter.indent += emitter.best_indent - } - return true -} - -// State dispatcher. -func yaml_emitter_state_machine(emitter *yaml_emitter_t, event *yaml_event_t) bool { - switch emitter.state { - default: - case yaml_EMIT_STREAM_START_STATE: - return yaml_emitter_emit_stream_start(emitter, event) - - case yaml_EMIT_FIRST_DOCUMENT_START_STATE: - return yaml_emitter_emit_document_start(emitter, event, true) - - case yaml_EMIT_DOCUMENT_START_STATE: - return yaml_emitter_emit_document_start(emitter, event, false) - - case yaml_EMIT_DOCUMENT_CONTENT_STATE: - return yaml_emitter_emit_document_content(emitter, event) - - case yaml_EMIT_DOCUMENT_END_STATE: - return yaml_emitter_emit_document_end(emitter, event) - - case yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE: - return yaml_emitter_emit_flow_sequence_item(emitter, event, true) - - case yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE: - return yaml_emitter_emit_flow_sequence_item(emitter, event, false) - - case yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE: - return yaml_emitter_emit_flow_mapping_key(emitter, event, true) - - case yaml_EMIT_FLOW_MAPPING_KEY_STATE: - return yaml_emitter_emit_flow_mapping_key(emitter, event, false) - - case yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE: - return yaml_emitter_emit_flow_mapping_value(emitter, event, true) - - case yaml_EMIT_FLOW_MAPPING_VALUE_STATE: - return yaml_emitter_emit_flow_mapping_value(emitter, event, false) - - case yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE: - return yaml_emitter_emit_block_sequence_item(emitter, event, true) - - case yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE: - return yaml_emitter_emit_block_sequence_item(emitter, event, false) - - case yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE: - return yaml_emitter_emit_block_mapping_key(emitter, event, true) - - case yaml_EMIT_BLOCK_MAPPING_KEY_STATE: - return yaml_emitter_emit_block_mapping_key(emitter, event, false) - - case yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE: - return yaml_emitter_emit_block_mapping_value(emitter, event, true) - - case yaml_EMIT_BLOCK_MAPPING_VALUE_STATE: - return yaml_emitter_emit_block_mapping_value(emitter, event, false) - - case yaml_EMIT_END_STATE: - return yaml_emitter_set_emitter_error(emitter, "expected nothing after STREAM-END") - } - panic("invalid emitter state") -} - -// Expect STREAM-START. -func yaml_emitter_emit_stream_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { - if event.typ != yaml_STREAM_START_EVENT { - return yaml_emitter_set_emitter_error(emitter, "expected STREAM-START") - } - if emitter.encoding == yaml_ANY_ENCODING { - emitter.encoding = event.encoding - if emitter.encoding == yaml_ANY_ENCODING { - emitter.encoding = yaml_UTF8_ENCODING - } - } - if emitter.best_indent < 2 || emitter.best_indent > 9 { - emitter.best_indent = 2 - } - if emitter.best_width >= 0 && emitter.best_width <= emitter.best_indent*2 { - emitter.best_width = 80 - } - if emitter.best_width < 0 { - emitter.best_width = 1<<31 - 1 - } - if emitter.line_break == yaml_ANY_BREAK { - emitter.line_break = yaml_LN_BREAK - } - - emitter.indent = -1 - emitter.line = 0 - emitter.column = 0 - emitter.whitespace = true - emitter.indention = true - - if emitter.encoding != yaml_UTF8_ENCODING { - if !yaml_emitter_write_bom(emitter) { - return false - } - } - emitter.state = yaml_EMIT_FIRST_DOCUMENT_START_STATE - return true -} - -// Expect DOCUMENT-START or STREAM-END. -func yaml_emitter_emit_document_start(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { - - if event.typ == yaml_DOCUMENT_START_EVENT { - - if event.version_directive != nil { - if !yaml_emitter_analyze_version_directive(emitter, event.version_directive) { - return false - } - } - - for i := 0; i < len(event.tag_directives); i++ { - tag_directive := &event.tag_directives[i] - if !yaml_emitter_analyze_tag_directive(emitter, tag_directive) { - return false - } - if !yaml_emitter_append_tag_directive(emitter, tag_directive, false) { - return false - } - } - - for i := 0; i < len(default_tag_directives); i++ { - tag_directive := &default_tag_directives[i] - if !yaml_emitter_append_tag_directive(emitter, tag_directive, true) { - return false - } - } - - implicit := event.implicit - if !first || emitter.canonical { - implicit = false - } - - if emitter.open_ended && (event.version_directive != nil || len(event.tag_directives) > 0) { - if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { - return false - } - if !yaml_emitter_write_indent(emitter) { - return false - } - } - - if event.version_directive != nil { - implicit = false - if !yaml_emitter_write_indicator(emitter, []byte("%YAML"), true, false, false) { - return false - } - if !yaml_emitter_write_indicator(emitter, []byte("1.1"), true, false, false) { - return false - } - if !yaml_emitter_write_indent(emitter) { - return false - } - } - - if len(event.tag_directives) > 0 { - implicit = false - for i := 0; i < len(event.tag_directives); i++ { - tag_directive := &event.tag_directives[i] - if !yaml_emitter_write_indicator(emitter, []byte("%TAG"), true, false, false) { - return false - } - if !yaml_emitter_write_tag_handle(emitter, tag_directive.handle) { - return false - } - if !yaml_emitter_write_tag_content(emitter, tag_directive.prefix, true) { - return false - } - if !yaml_emitter_write_indent(emitter) { - return false - } - } - } - - if yaml_emitter_check_empty_document(emitter) { - implicit = false - } - if !implicit { - if !yaml_emitter_write_indent(emitter) { - return false - } - if !yaml_emitter_write_indicator(emitter, []byte("---"), true, false, false) { - return false - } - if emitter.canonical { - if !yaml_emitter_write_indent(emitter) { - return false - } - } - } - - emitter.state = yaml_EMIT_DOCUMENT_CONTENT_STATE - return true - } - - if event.typ == yaml_STREAM_END_EVENT { - if emitter.open_ended { - if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { - return false - } - if !yaml_emitter_write_indent(emitter) { - return false - } - } - if !yaml_emitter_flush(emitter) { - return false - } - emitter.state = yaml_EMIT_END_STATE - return true - } - - return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-START or STREAM-END") -} - -// Expect the root node. -func yaml_emitter_emit_document_content(emitter *yaml_emitter_t, event *yaml_event_t) bool { - emitter.states = append(emitter.states, yaml_EMIT_DOCUMENT_END_STATE) - return yaml_emitter_emit_node(emitter, event, true, false, false, false) -} - -// Expect DOCUMENT-END. -func yaml_emitter_emit_document_end(emitter *yaml_emitter_t, event *yaml_event_t) bool { - if event.typ != yaml_DOCUMENT_END_EVENT { - return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-END") - } - if !yaml_emitter_write_indent(emitter) { - return false - } - if !event.implicit { - // [Go] Allocate the slice elsewhere. - if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { - return false - } - if !yaml_emitter_write_indent(emitter) { - return false - } - } - if !yaml_emitter_flush(emitter) { - return false - } - emitter.state = yaml_EMIT_DOCUMENT_START_STATE - emitter.tag_directives = emitter.tag_directives[:0] - return true -} - -// Expect a flow item node. -func yaml_emitter_emit_flow_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { - if first { - if !yaml_emitter_write_indicator(emitter, []byte{'['}, true, true, false) { - return false - } - if !yaml_emitter_increase_indent(emitter, true, false) { - return false - } - emitter.flow_level++ - } - - if event.typ == yaml_SEQUENCE_END_EVENT { - emitter.flow_level-- - emitter.indent = emitter.indents[len(emitter.indents)-1] - emitter.indents = emitter.indents[:len(emitter.indents)-1] - if emitter.canonical && !first { - if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { - return false - } - if !yaml_emitter_write_indent(emitter) { - return false - } - } - if !yaml_emitter_write_indicator(emitter, []byte{']'}, false, false, false) { - return false - } - emitter.state = emitter.states[len(emitter.states)-1] - emitter.states = emitter.states[:len(emitter.states)-1] - - return true - } - - if !first { - if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { - return false - } - } - - if emitter.canonical || emitter.column > emitter.best_width { - if !yaml_emitter_write_indent(emitter) { - return false - } - } - emitter.states = append(emitter.states, yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE) - return yaml_emitter_emit_node(emitter, event, false, true, false, false) -} - -// Expect a flow key node. -func yaml_emitter_emit_flow_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { - if first { - if !yaml_emitter_write_indicator(emitter, []byte{'{'}, true, true, false) { - return false - } - if !yaml_emitter_increase_indent(emitter, true, false) { - return false - } - emitter.flow_level++ - } - - if event.typ == yaml_MAPPING_END_EVENT { - emitter.flow_level-- - emitter.indent = emitter.indents[len(emitter.indents)-1] - emitter.indents = emitter.indents[:len(emitter.indents)-1] - if emitter.canonical && !first { - if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { - return false - } - if !yaml_emitter_write_indent(emitter) { - return false - } - } - if !yaml_emitter_write_indicator(emitter, []byte{'}'}, false, false, false) { - return false - } - emitter.state = emitter.states[len(emitter.states)-1] - emitter.states = emitter.states[:len(emitter.states)-1] - return true - } - - if !first { - if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { - return false - } - } - if emitter.canonical || emitter.column > emitter.best_width { - if !yaml_emitter_write_indent(emitter) { - return false - } - } - - if !emitter.canonical && yaml_emitter_check_simple_key(emitter) { - emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE) - return yaml_emitter_emit_node(emitter, event, false, false, true, true) - } - if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, false) { - return false - } - emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_VALUE_STATE) - return yaml_emitter_emit_node(emitter, event, false, false, true, false) -} - -// Expect a flow value node. -func yaml_emitter_emit_flow_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool { - if simple { - if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) { - return false - } - } else { - if emitter.canonical || emitter.column > emitter.best_width { - if !yaml_emitter_write_indent(emitter) { - return false - } - } - if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, false) { - return false - } - } - emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_KEY_STATE) - return yaml_emitter_emit_node(emitter, event, false, false, true, false) -} - -// Expect a block item node. -func yaml_emitter_emit_block_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { - if first { - if !yaml_emitter_increase_indent(emitter, false, emitter.mapping_context && !emitter.indention) { - return false - } - } - if event.typ == yaml_SEQUENCE_END_EVENT { - emitter.indent = emitter.indents[len(emitter.indents)-1] - emitter.indents = emitter.indents[:len(emitter.indents)-1] - emitter.state = emitter.states[len(emitter.states)-1] - emitter.states = emitter.states[:len(emitter.states)-1] - return true - } - if !yaml_emitter_write_indent(emitter) { - return false - } - if !yaml_emitter_write_indicator(emitter, []byte{'-'}, true, false, true) { - return false - } - emitter.states = append(emitter.states, yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE) - return yaml_emitter_emit_node(emitter, event, false, true, false, false) -} - -// Expect a block key node. -func yaml_emitter_emit_block_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { - if first { - if !yaml_emitter_increase_indent(emitter, false, false) { - return false - } - } - if event.typ == yaml_MAPPING_END_EVENT { - emitter.indent = emitter.indents[len(emitter.indents)-1] - emitter.indents = emitter.indents[:len(emitter.indents)-1] - emitter.state = emitter.states[len(emitter.states)-1] - emitter.states = emitter.states[:len(emitter.states)-1] - return true - } - if !yaml_emitter_write_indent(emitter) { - return false - } - if yaml_emitter_check_simple_key(emitter) { - emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE) - return yaml_emitter_emit_node(emitter, event, false, false, true, true) - } - if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, true) { - return false - } - emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_VALUE_STATE) - return yaml_emitter_emit_node(emitter, event, false, false, true, false) -} - -// Expect a block value node. -func yaml_emitter_emit_block_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool { - if simple { - if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) { - return false - } - } else { - if !yaml_emitter_write_indent(emitter) { - return false - } - if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, true) { - return false - } - } - emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_KEY_STATE) - return yaml_emitter_emit_node(emitter, event, false, false, true, false) -} - -// Expect a node. -func yaml_emitter_emit_node(emitter *yaml_emitter_t, event *yaml_event_t, - root bool, sequence bool, mapping bool, simple_key bool) bool { - - emitter.root_context = root - emitter.sequence_context = sequence - emitter.mapping_context = mapping - emitter.simple_key_context = simple_key - - switch event.typ { - case yaml_ALIAS_EVENT: - return yaml_emitter_emit_alias(emitter, event) - case yaml_SCALAR_EVENT: - return yaml_emitter_emit_scalar(emitter, event) - case yaml_SEQUENCE_START_EVENT: - return yaml_emitter_emit_sequence_start(emitter, event) - case yaml_MAPPING_START_EVENT: - return yaml_emitter_emit_mapping_start(emitter, event) - default: - return yaml_emitter_set_emitter_error(emitter, - "expected SCALAR, SEQUENCE-START, MAPPING-START, or ALIAS") - } - return false -} - -// Expect ALIAS. -func yaml_emitter_emit_alias(emitter *yaml_emitter_t, event *yaml_event_t) bool { - if !yaml_emitter_process_anchor(emitter) { - return false - } - emitter.state = emitter.states[len(emitter.states)-1] - emitter.states = emitter.states[:len(emitter.states)-1] - return true -} - -// Expect SCALAR. -func yaml_emitter_emit_scalar(emitter *yaml_emitter_t, event *yaml_event_t) bool { - if !yaml_emitter_select_scalar_style(emitter, event) { - return false - } - if !yaml_emitter_process_anchor(emitter) { - return false - } - if !yaml_emitter_process_tag(emitter) { - return false - } - if !yaml_emitter_increase_indent(emitter, true, false) { - return false - } - if !yaml_emitter_process_scalar(emitter) { - return false - } - emitter.indent = emitter.indents[len(emitter.indents)-1] - emitter.indents = emitter.indents[:len(emitter.indents)-1] - emitter.state = emitter.states[len(emitter.states)-1] - emitter.states = emitter.states[:len(emitter.states)-1] - return true -} - -// Expect SEQUENCE-START. -func yaml_emitter_emit_sequence_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { - if !yaml_emitter_process_anchor(emitter) { - return false - } - if !yaml_emitter_process_tag(emitter) { - return false - } - if emitter.flow_level > 0 || emitter.canonical || event.sequence_style() == yaml_FLOW_SEQUENCE_STYLE || - yaml_emitter_check_empty_sequence(emitter) { - emitter.state = yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE - } else { - emitter.state = yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE - } - return true -} - -// Expect MAPPING-START. -func yaml_emitter_emit_mapping_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { - if !yaml_emitter_process_anchor(emitter) { - return false - } - if !yaml_emitter_process_tag(emitter) { - return false - } - if emitter.flow_level > 0 || emitter.canonical || event.mapping_style() == yaml_FLOW_MAPPING_STYLE || - yaml_emitter_check_empty_mapping(emitter) { - emitter.state = yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE - } else { - emitter.state = yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE - } - return true -} - -// Check if the document content is an empty scalar. -func yaml_emitter_check_empty_document(emitter *yaml_emitter_t) bool { - return false // [Go] Huh? -} - -// Check if the next events represent an empty sequence. -func yaml_emitter_check_empty_sequence(emitter *yaml_emitter_t) bool { - if len(emitter.events)-emitter.events_head < 2 { - return false - } - return emitter.events[emitter.events_head].typ == yaml_SEQUENCE_START_EVENT && - emitter.events[emitter.events_head+1].typ == yaml_SEQUENCE_END_EVENT -} - -// Check if the next events represent an empty mapping. -func yaml_emitter_check_empty_mapping(emitter *yaml_emitter_t) bool { - if len(emitter.events)-emitter.events_head < 2 { - return false - } - return emitter.events[emitter.events_head].typ == yaml_MAPPING_START_EVENT && - emitter.events[emitter.events_head+1].typ == yaml_MAPPING_END_EVENT -} - -// Check if the next node can be expressed as a simple key. -func yaml_emitter_check_simple_key(emitter *yaml_emitter_t) bool { - length := 0 - switch emitter.events[emitter.events_head].typ { - case yaml_ALIAS_EVENT: - length += len(emitter.anchor_data.anchor) - case yaml_SCALAR_EVENT: - if emitter.scalar_data.multiline { - return false - } - length += len(emitter.anchor_data.anchor) + - len(emitter.tag_data.handle) + - len(emitter.tag_data.suffix) + - len(emitter.scalar_data.value) - case yaml_SEQUENCE_START_EVENT: - if !yaml_emitter_check_empty_sequence(emitter) { - return false - } - length += len(emitter.anchor_data.anchor) + - len(emitter.tag_data.handle) + - len(emitter.tag_data.suffix) - case yaml_MAPPING_START_EVENT: - if !yaml_emitter_check_empty_mapping(emitter) { - return false - } - length += len(emitter.anchor_data.anchor) + - len(emitter.tag_data.handle) + - len(emitter.tag_data.suffix) - default: - return false - } - return length <= 128 -} - -// Determine an acceptable scalar style. -func yaml_emitter_select_scalar_style(emitter *yaml_emitter_t, event *yaml_event_t) bool { - - no_tag := len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 - if no_tag && !event.implicit && !event.quoted_implicit { - return yaml_emitter_set_emitter_error(emitter, "neither tag nor implicit flags are specified") - } - - style := event.scalar_style() - if style == yaml_ANY_SCALAR_STYLE { - style = yaml_PLAIN_SCALAR_STYLE - } - if emitter.canonical { - style = yaml_DOUBLE_QUOTED_SCALAR_STYLE - } - if emitter.simple_key_context && emitter.scalar_data.multiline { - style = yaml_DOUBLE_QUOTED_SCALAR_STYLE - } - - if style == yaml_PLAIN_SCALAR_STYLE { - if emitter.flow_level > 0 && !emitter.scalar_data.flow_plain_allowed || - emitter.flow_level == 0 && !emitter.scalar_data.block_plain_allowed { - style = yaml_SINGLE_QUOTED_SCALAR_STYLE - } - if len(emitter.scalar_data.value) == 0 && (emitter.flow_level > 0 || emitter.simple_key_context) { - style = yaml_SINGLE_QUOTED_SCALAR_STYLE - } - if no_tag && !event.implicit { - style = yaml_SINGLE_QUOTED_SCALAR_STYLE - } - } - if style == yaml_SINGLE_QUOTED_SCALAR_STYLE { - if !emitter.scalar_data.single_quoted_allowed { - style = yaml_DOUBLE_QUOTED_SCALAR_STYLE - } - } - if style == yaml_LITERAL_SCALAR_STYLE || style == yaml_FOLDED_SCALAR_STYLE { - if !emitter.scalar_data.block_allowed || emitter.flow_level > 0 || emitter.simple_key_context { - style = yaml_DOUBLE_QUOTED_SCALAR_STYLE - } - } - - if no_tag && !event.quoted_implicit && style != yaml_PLAIN_SCALAR_STYLE { - emitter.tag_data.handle = []byte{'!'} - } - emitter.scalar_data.style = style - return true -} - -// Write an achor. -func yaml_emitter_process_anchor(emitter *yaml_emitter_t) bool { - if emitter.anchor_data.anchor == nil { - return true - } - c := []byte{'&'} - if emitter.anchor_data.alias { - c[0] = '*' - } - if !yaml_emitter_write_indicator(emitter, c, true, false, false) { - return false - } - return yaml_emitter_write_anchor(emitter, emitter.anchor_data.anchor) -} - -// Write a tag. -func yaml_emitter_process_tag(emitter *yaml_emitter_t) bool { - if len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 { - return true - } - if len(emitter.tag_data.handle) > 0 { - if !yaml_emitter_write_tag_handle(emitter, emitter.tag_data.handle) { - return false - } - if len(emitter.tag_data.suffix) > 0 { - if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) { - return false - } - } - } else { - // [Go] Allocate these slices elsewhere. - if !yaml_emitter_write_indicator(emitter, []byte("!<"), true, false, false) { - return false - } - if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) { - return false - } - if !yaml_emitter_write_indicator(emitter, []byte{'>'}, false, false, false) { - return false - } - } - return true -} - -// Write a scalar. -func yaml_emitter_process_scalar(emitter *yaml_emitter_t) bool { - switch emitter.scalar_data.style { - case yaml_PLAIN_SCALAR_STYLE: - return yaml_emitter_write_plain_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) - - case yaml_SINGLE_QUOTED_SCALAR_STYLE: - return yaml_emitter_write_single_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) - - case yaml_DOUBLE_QUOTED_SCALAR_STYLE: - return yaml_emitter_write_double_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) - - case yaml_LITERAL_SCALAR_STYLE: - return yaml_emitter_write_literal_scalar(emitter, emitter.scalar_data.value) - - case yaml_FOLDED_SCALAR_STYLE: - return yaml_emitter_write_folded_scalar(emitter, emitter.scalar_data.value) - } - panic("unknown scalar style") -} - -// Check if a %YAML directive is valid. -func yaml_emitter_analyze_version_directive(emitter *yaml_emitter_t, version_directive *yaml_version_directive_t) bool { - if version_directive.major != 1 || version_directive.minor != 1 { - return yaml_emitter_set_emitter_error(emitter, "incompatible %YAML directive") - } - return true -} - -// Check if a %TAG directive is valid. -func yaml_emitter_analyze_tag_directive(emitter *yaml_emitter_t, tag_directive *yaml_tag_directive_t) bool { - handle := tag_directive.handle - prefix := tag_directive.prefix - if len(handle) == 0 { - return yaml_emitter_set_emitter_error(emitter, "tag handle must not be empty") - } - if handle[0] != '!' { - return yaml_emitter_set_emitter_error(emitter, "tag handle must start with '!'") - } - if handle[len(handle)-1] != '!' { - return yaml_emitter_set_emitter_error(emitter, "tag handle must end with '!'") - } - for i := 1; i < len(handle)-1; i += width(handle[i]) { - if !is_alpha(handle, i) { - return yaml_emitter_set_emitter_error(emitter, "tag handle must contain alphanumerical characters only") - } - } - if len(prefix) == 0 { - return yaml_emitter_set_emitter_error(emitter, "tag prefix must not be empty") - } - return true -} - -// Check if an anchor is valid. -func yaml_emitter_analyze_anchor(emitter *yaml_emitter_t, anchor []byte, alias bool) bool { - if len(anchor) == 0 { - problem := "anchor value must not be empty" - if alias { - problem = "alias value must not be empty" - } - return yaml_emitter_set_emitter_error(emitter, problem) - } - for i := 0; i < len(anchor); i += width(anchor[i]) { - if !is_alpha(anchor, i) { - problem := "anchor value must contain alphanumerical characters only" - if alias { - problem = "alias value must contain alphanumerical characters only" - } - return yaml_emitter_set_emitter_error(emitter, problem) - } - } - emitter.anchor_data.anchor = anchor - emitter.anchor_data.alias = alias - return true -} - -// Check if a tag is valid. -func yaml_emitter_analyze_tag(emitter *yaml_emitter_t, tag []byte) bool { - if len(tag) == 0 { - return yaml_emitter_set_emitter_error(emitter, "tag value must not be empty") - } - for i := 0; i < len(emitter.tag_directives); i++ { - tag_directive := &emitter.tag_directives[i] - if bytes.HasPrefix(tag, tag_directive.prefix) { - emitter.tag_data.handle = tag_directive.handle - emitter.tag_data.suffix = tag[len(tag_directive.prefix):] - return true - } - } - emitter.tag_data.suffix = tag - return true -} - -// Check if a scalar is valid. -func yaml_emitter_analyze_scalar(emitter *yaml_emitter_t, value []byte) bool { - var ( - block_indicators = false - flow_indicators = false - line_breaks = false - special_characters = false - - leading_space = false - leading_break = false - trailing_space = false - trailing_break = false - break_space = false - space_break = false - - preceeded_by_whitespace = false - followed_by_whitespace = false - previous_space = false - previous_break = false - ) - - emitter.scalar_data.value = value - - if len(value) == 0 { - emitter.scalar_data.multiline = false - emitter.scalar_data.flow_plain_allowed = false - emitter.scalar_data.block_plain_allowed = true - emitter.scalar_data.single_quoted_allowed = true - emitter.scalar_data.block_allowed = false - return true - } - - if len(value) >= 3 && ((value[0] == '-' && value[1] == '-' && value[2] == '-') || (value[0] == '.' && value[1] == '.' && value[2] == '.')) { - block_indicators = true - flow_indicators = true - } - - preceeded_by_whitespace = true - for i, w := 0, 0; i < len(value); i += w { - w = width(value[i]) - followed_by_whitespace = i+w >= len(value) || is_blank(value, i+w) - - if i == 0 { - switch value[i] { - case '#', ',', '[', ']', '{', '}', '&', '*', '!', '|', '>', '\'', '"', '%', '@', '`': - flow_indicators = true - block_indicators = true - case '?', ':': - flow_indicators = true - if followed_by_whitespace { - block_indicators = true - } - case '-': - if followed_by_whitespace { - flow_indicators = true - block_indicators = true - } - } - } else { - switch value[i] { - case ',', '?', '[', ']', '{', '}': - flow_indicators = true - case ':': - flow_indicators = true - if followed_by_whitespace { - block_indicators = true - } - case '#': - if preceeded_by_whitespace { - flow_indicators = true - block_indicators = true - } - } - } - - if !is_printable(value, i) || !is_ascii(value, i) && !emitter.unicode { - special_characters = true - } - if is_space(value, i) { - if i == 0 { - leading_space = true - } - if i+width(value[i]) == len(value) { - trailing_space = true - } - if previous_break { - break_space = true - } - previous_space = true - previous_break = false - } else if is_break(value, i) { - line_breaks = true - if i == 0 { - leading_break = true - } - if i+width(value[i]) == len(value) { - trailing_break = true - } - if previous_space { - space_break = true - } - previous_space = false - previous_break = true - } else { - previous_space = false - previous_break = false - } - - // [Go]: Why 'z'? Couldn't be the end of the string as that's the loop condition. - preceeded_by_whitespace = is_blankz(value, i) - } - - emitter.scalar_data.multiline = line_breaks - emitter.scalar_data.flow_plain_allowed = true - emitter.scalar_data.block_plain_allowed = true - emitter.scalar_data.single_quoted_allowed = true - emitter.scalar_data.block_allowed = true - - if leading_space || leading_break || trailing_space || trailing_break { - emitter.scalar_data.flow_plain_allowed = false - emitter.scalar_data.block_plain_allowed = false - } - if trailing_space { - emitter.scalar_data.block_allowed = false - } - if break_space { - emitter.scalar_data.flow_plain_allowed = false - emitter.scalar_data.block_plain_allowed = false - emitter.scalar_data.single_quoted_allowed = false - } - if space_break || special_characters { - emitter.scalar_data.flow_plain_allowed = false - emitter.scalar_data.block_plain_allowed = false - emitter.scalar_data.single_quoted_allowed = false - emitter.scalar_data.block_allowed = false - } - if line_breaks { - emitter.scalar_data.flow_plain_allowed = false - emitter.scalar_data.block_plain_allowed = false - } - if flow_indicators { - emitter.scalar_data.flow_plain_allowed = false - } - if block_indicators { - emitter.scalar_data.block_plain_allowed = false - } - return true -} - -// Check if the event data is valid. -func yaml_emitter_analyze_event(emitter *yaml_emitter_t, event *yaml_event_t) bool { - - emitter.anchor_data.anchor = nil - emitter.tag_data.handle = nil - emitter.tag_data.suffix = nil - emitter.scalar_data.value = nil - - switch event.typ { - case yaml_ALIAS_EVENT: - if !yaml_emitter_analyze_anchor(emitter, event.anchor, true) { - return false - } - - case yaml_SCALAR_EVENT: - if len(event.anchor) > 0 { - if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { - return false - } - } - if len(event.tag) > 0 && (emitter.canonical || (!event.implicit && !event.quoted_implicit)) { - if !yaml_emitter_analyze_tag(emitter, event.tag) { - return false - } - } - if !yaml_emitter_analyze_scalar(emitter, event.value) { - return false - } - - case yaml_SEQUENCE_START_EVENT: - if len(event.anchor) > 0 { - if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { - return false - } - } - if len(event.tag) > 0 && (emitter.canonical || !event.implicit) { - if !yaml_emitter_analyze_tag(emitter, event.tag) { - return false - } - } - - case yaml_MAPPING_START_EVENT: - if len(event.anchor) > 0 { - if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { - return false - } - } - if len(event.tag) > 0 && (emitter.canonical || !event.implicit) { - if !yaml_emitter_analyze_tag(emitter, event.tag) { - return false - } - } - } - return true -} - -// Write the BOM character. -func yaml_emitter_write_bom(emitter *yaml_emitter_t) bool { - if !flush(emitter) { - return false - } - pos := emitter.buffer_pos - emitter.buffer[pos+0] = '\xEF' - emitter.buffer[pos+1] = '\xBB' - emitter.buffer[pos+2] = '\xBF' - emitter.buffer_pos += 3 - return true -} - -func yaml_emitter_write_indent(emitter *yaml_emitter_t) bool { - indent := emitter.indent - if indent < 0 { - indent = 0 - } - if !emitter.indention || emitter.column > indent || (emitter.column == indent && !emitter.whitespace) { - if !put_break(emitter) { - return false - } - } - for emitter.column < indent { - if !put(emitter, ' ') { - return false - } - } - emitter.whitespace = true - emitter.indention = true - return true -} - -func yaml_emitter_write_indicator(emitter *yaml_emitter_t, indicator []byte, need_whitespace, is_whitespace, is_indention bool) bool { - if need_whitespace && !emitter.whitespace { - if !put(emitter, ' ') { - return false - } - } - if !write_all(emitter, indicator) { - return false - } - emitter.whitespace = is_whitespace - emitter.indention = (emitter.indention && is_indention) - emitter.open_ended = false - return true -} - -func yaml_emitter_write_anchor(emitter *yaml_emitter_t, value []byte) bool { - if !write_all(emitter, value) { - return false - } - emitter.whitespace = false - emitter.indention = false - return true -} - -func yaml_emitter_write_tag_handle(emitter *yaml_emitter_t, value []byte) bool { - if !emitter.whitespace { - if !put(emitter, ' ') { - return false - } - } - if !write_all(emitter, value) { - return false - } - emitter.whitespace = false - emitter.indention = false - return true -} - -func yaml_emitter_write_tag_content(emitter *yaml_emitter_t, value []byte, need_whitespace bool) bool { - if need_whitespace && !emitter.whitespace { - if !put(emitter, ' ') { - return false - } - } - for i := 0; i < len(value); { - var must_write bool - switch value[i] { - case ';', '/', '?', ':', '@', '&', '=', '+', '$', ',', '_', '.', '~', '*', '\'', '(', ')', '[', ']': - must_write = true - default: - must_write = is_alpha(value, i) - } - if must_write { - if !write(emitter, value, &i) { - return false - } - } else { - w := width(value[i]) - for k := 0; k < w; k++ { - octet := value[i] - i++ - if !put(emitter, '%') { - return false - } - - c := octet >> 4 - if c < 10 { - c += '0' - } else { - c += 'A' - 10 - } - if !put(emitter, c) { - return false - } - - c = octet & 0x0f - if c < 10 { - c += '0' - } else { - c += 'A' - 10 - } - if !put(emitter, c) { - return false - } - } - } - } - emitter.whitespace = false - emitter.indention = false - return true -} - -func yaml_emitter_write_plain_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { - if !emitter.whitespace { - if !put(emitter, ' ') { - return false - } - } - - spaces := false - breaks := false - for i := 0; i < len(value); { - if is_space(value, i) { - if allow_breaks && !spaces && emitter.column > emitter.best_width && !is_space(value, i+1) { - if !yaml_emitter_write_indent(emitter) { - return false - } - i += width(value[i]) - } else { - if !write(emitter, value, &i) { - return false - } - } - spaces = true - } else if is_break(value, i) { - if !breaks && value[i] == '\n' { - if !put_break(emitter) { - return false - } - } - if !write_break(emitter, value, &i) { - return false - } - emitter.indention = true - breaks = true - } else { - if breaks { - if !yaml_emitter_write_indent(emitter) { - return false - } - } - if !write(emitter, value, &i) { - return false - } - emitter.indention = false - spaces = false - breaks = false - } - } - - emitter.whitespace = false - emitter.indention = false - if emitter.root_context { - emitter.open_ended = true - } - - return true -} - -func yaml_emitter_write_single_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { - - if !yaml_emitter_write_indicator(emitter, []byte{'\''}, true, false, false) { - return false - } - - spaces := false - breaks := false - for i := 0; i < len(value); { - if is_space(value, i) { - if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 && !is_space(value, i+1) { - if !yaml_emitter_write_indent(emitter) { - return false - } - i += width(value[i]) - } else { - if !write(emitter, value, &i) { - return false - } - } - spaces = true - } else if is_break(value, i) { - if !breaks && value[i] == '\n' { - if !put_break(emitter) { - return false - } - } - if !write_break(emitter, value, &i) { - return false - } - emitter.indention = true - breaks = true - } else { - if breaks { - if !yaml_emitter_write_indent(emitter) { - return false - } - } - if value[i] == '\'' { - if !put(emitter, '\'') { - return false - } - } - if !write(emitter, value, &i) { - return false - } - emitter.indention = false - spaces = false - breaks = false - } - } - if !yaml_emitter_write_indicator(emitter, []byte{'\''}, false, false, false) { - return false - } - emitter.whitespace = false - emitter.indention = false - return true -} - -func yaml_emitter_write_double_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { - spaces := false - if !yaml_emitter_write_indicator(emitter, []byte{'"'}, true, false, false) { - return false - } - - for i := 0; i < len(value); { - if !is_printable(value, i) || (!emitter.unicode && !is_ascii(value, i)) || - is_bom(value, i) || is_break(value, i) || - value[i] == '"' || value[i] == '\\' { - - octet := value[i] - - var w int - var v rune - switch { - case octet&0x80 == 0x00: - w, v = 1, rune(octet&0x7F) - case octet&0xE0 == 0xC0: - w, v = 2, rune(octet&0x1F) - case octet&0xF0 == 0xE0: - w, v = 3, rune(octet&0x0F) - case octet&0xF8 == 0xF0: - w, v = 4, rune(octet&0x07) - } - for k := 1; k < w; k++ { - octet = value[i+k] - v = (v << 6) + (rune(octet) & 0x3F) - } - i += w - - if !put(emitter, '\\') { - return false - } - - var ok bool - switch v { - case 0x00: - ok = put(emitter, '0') - case 0x07: - ok = put(emitter, 'a') - case 0x08: - ok = put(emitter, 'b') - case 0x09: - ok = put(emitter, 't') - case 0x0A: - ok = put(emitter, 'n') - case 0x0b: - ok = put(emitter, 'v') - case 0x0c: - ok = put(emitter, 'f') - case 0x0d: - ok = put(emitter, 'r') - case 0x1b: - ok = put(emitter, 'e') - case 0x22: - ok = put(emitter, '"') - case 0x5c: - ok = put(emitter, '\\') - case 0x85: - ok = put(emitter, 'N') - case 0xA0: - ok = put(emitter, '_') - case 0x2028: - ok = put(emitter, 'L') - case 0x2029: - ok = put(emitter, 'P') - default: - if v <= 0xFF { - ok = put(emitter, 'x') - w = 2 - } else if v <= 0xFFFF { - ok = put(emitter, 'u') - w = 4 - } else { - ok = put(emitter, 'U') - w = 8 - } - for k := (w - 1) * 4; ok && k >= 0; k -= 4 { - digit := byte((v >> uint(k)) & 0x0F) - if digit < 10 { - ok = put(emitter, digit+'0') - } else { - ok = put(emitter, digit+'A'-10) - } - } - } - if !ok { - return false - } - spaces = false - } else if is_space(value, i) { - if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 { - if !yaml_emitter_write_indent(emitter) { - return false - } - if is_space(value, i+1) { - if !put(emitter, '\\') { - return false - } - } - i += width(value[i]) - } else if !write(emitter, value, &i) { - return false - } - spaces = true - } else { - if !write(emitter, value, &i) { - return false - } - spaces = false - } - } - if !yaml_emitter_write_indicator(emitter, []byte{'"'}, false, false, false) { - return false - } - emitter.whitespace = false - emitter.indention = false - return true -} - -func yaml_emitter_write_block_scalar_hints(emitter *yaml_emitter_t, value []byte) bool { - if is_space(value, 0) || is_break(value, 0) { - indent_hint := []byte{'0' + byte(emitter.best_indent)} - if !yaml_emitter_write_indicator(emitter, indent_hint, false, false, false) { - return false - } - } - - emitter.open_ended = false - - var chomp_hint [1]byte - if len(value) == 0 { - chomp_hint[0] = '-' - } else { - i := len(value) - 1 - for value[i]&0xC0 == 0x80 { - i-- - } - if !is_break(value, i) { - chomp_hint[0] = '-' - } else if i == 0 { - chomp_hint[0] = '+' - emitter.open_ended = true - } else { - i-- - for value[i]&0xC0 == 0x80 { - i-- - } - if is_break(value, i) { - chomp_hint[0] = '+' - emitter.open_ended = true - } - } - } - if chomp_hint[0] != 0 { - if !yaml_emitter_write_indicator(emitter, chomp_hint[:], false, false, false) { - return false - } - } - return true -} - -func yaml_emitter_write_literal_scalar(emitter *yaml_emitter_t, value []byte) bool { - if !yaml_emitter_write_indicator(emitter, []byte{'|'}, true, false, false) { - return false - } - if !yaml_emitter_write_block_scalar_hints(emitter, value) { - return false - } - if !put_break(emitter) { - return false - } - emitter.indention = true - emitter.whitespace = true - breaks := true - for i := 0; i < len(value); { - if is_break(value, i) { - if !write_break(emitter, value, &i) { - return false - } - emitter.indention = true - breaks = true - } else { - if breaks { - if !yaml_emitter_write_indent(emitter) { - return false - } - } - if !write(emitter, value, &i) { - return false - } - emitter.indention = false - breaks = false - } - } - - return true -} - -func yaml_emitter_write_folded_scalar(emitter *yaml_emitter_t, value []byte) bool { - if !yaml_emitter_write_indicator(emitter, []byte{'>'}, true, false, false) { - return false - } - if !yaml_emitter_write_block_scalar_hints(emitter, value) { - return false - } - - if !put_break(emitter) { - return false - } - emitter.indention = true - emitter.whitespace = true - - breaks := true - leading_spaces := true - for i := 0; i < len(value); { - if is_break(value, i) { - if !breaks && !leading_spaces && value[i] == '\n' { - k := 0 - for is_break(value, k) { - k += width(value[k]) - } - if !is_blankz(value, k) { - if !put_break(emitter) { - return false - } - } - } - if !write_break(emitter, value, &i) { - return false - } - emitter.indention = true - breaks = true - } else { - if breaks { - if !yaml_emitter_write_indent(emitter) { - return false - } - leading_spaces = is_blank(value, i) - } - if !breaks && is_space(value, i) && !is_space(value, i+1) && emitter.column > emitter.best_width { - if !yaml_emitter_write_indent(emitter) { - return false - } - i += width(value[i]) - } else { - if !write(emitter, value, &i) { - return false - } - } - emitter.indention = false - breaks = false - } - } - return true -} diff --git a/Godeps/_workspace/src/gopkg.in/yaml.v2/encode.go b/Godeps/_workspace/src/gopkg.in/yaml.v2/encode.go deleted file mode 100644 index 84f8499551..0000000000 --- a/Godeps/_workspace/src/gopkg.in/yaml.v2/encode.go +++ /dev/null @@ -1,306 +0,0 @@ -package yaml - -import ( - "encoding" - "fmt" - "reflect" - "regexp" - "sort" - "strconv" - "strings" - "time" -) - -type encoder struct { - emitter yaml_emitter_t - event yaml_event_t - out []byte - flow bool -} - -func newEncoder() (e *encoder) { - e = &encoder{} - e.must(yaml_emitter_initialize(&e.emitter)) - yaml_emitter_set_output_string(&e.emitter, &e.out) - yaml_emitter_set_unicode(&e.emitter, true) - e.must(yaml_stream_start_event_initialize(&e.event, yaml_UTF8_ENCODING)) - e.emit() - e.must(yaml_document_start_event_initialize(&e.event, nil, nil, true)) - e.emit() - return e -} - -func (e *encoder) finish() { - e.must(yaml_document_end_event_initialize(&e.event, true)) - e.emit() - e.emitter.open_ended = false - e.must(yaml_stream_end_event_initialize(&e.event)) - e.emit() -} - -func (e *encoder) destroy() { - yaml_emitter_delete(&e.emitter) -} - -func (e *encoder) emit() { - // This will internally delete the e.event value. - if !yaml_emitter_emit(&e.emitter, &e.event) && e.event.typ != yaml_DOCUMENT_END_EVENT && e.event.typ != yaml_STREAM_END_EVENT { - e.must(false) - } -} - -func (e *encoder) must(ok bool) { - if !ok { - msg := e.emitter.problem - if msg == "" { - msg = "unknown problem generating YAML content" - } - failf("%s", msg) - } -} - -func (e *encoder) marshal(tag string, in reflect.Value) { - if !in.IsValid() { - e.nilv() - return - } - iface := in.Interface() - if m, ok := iface.(Marshaler); ok { - v, err := m.MarshalYAML() - if err != nil { - fail(err) - } - if v == nil { - e.nilv() - return - } - in = reflect.ValueOf(v) - } else if m, ok := iface.(encoding.TextMarshaler); ok { - text, err := m.MarshalText() - if err != nil { - fail(err) - } - in = reflect.ValueOf(string(text)) - } - switch in.Kind() { - case reflect.Interface: - if in.IsNil() { - e.nilv() - } else { - e.marshal(tag, in.Elem()) - } - case reflect.Map: - e.mapv(tag, in) - case reflect.Ptr: - if in.IsNil() { - e.nilv() - } else { - e.marshal(tag, in.Elem()) - } - case reflect.Struct: - e.structv(tag, in) - case reflect.Slice: - if in.Type().Elem() == mapItemType { - e.itemsv(tag, in) - } else { - e.slicev(tag, in) - } - case reflect.String: - e.stringv(tag, in) - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - if in.Type() == durationType { - e.stringv(tag, reflect.ValueOf(iface.(time.Duration).String())) - } else { - e.intv(tag, in) - } - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - e.uintv(tag, in) - case reflect.Float32, reflect.Float64: - e.floatv(tag, in) - case reflect.Bool: - e.boolv(tag, in) - default: - panic("cannot marshal type: " + in.Type().String()) - } -} - -func (e *encoder) mapv(tag string, in reflect.Value) { - e.mappingv(tag, func() { - keys := keyList(in.MapKeys()) - sort.Sort(keys) - for _, k := range keys { - e.marshal("", k) - e.marshal("", in.MapIndex(k)) - } - }) -} - -func (e *encoder) itemsv(tag string, in reflect.Value) { - e.mappingv(tag, func() { - slice := in.Convert(reflect.TypeOf([]MapItem{})).Interface().([]MapItem) - for _, item := range slice { - e.marshal("", reflect.ValueOf(item.Key)) - e.marshal("", reflect.ValueOf(item.Value)) - } - }) -} - -func (e *encoder) structv(tag string, in reflect.Value) { - sinfo, err := getStructInfo(in.Type()) - if err != nil { - panic(err) - } - e.mappingv(tag, func() { - for _, info := range sinfo.FieldsList { - var value reflect.Value - if info.Inline == nil { - value = in.Field(info.Num) - } else { - value = in.FieldByIndex(info.Inline) - } - if info.OmitEmpty && isZero(value) { - continue - } - e.marshal("", reflect.ValueOf(info.Key)) - e.flow = info.Flow - e.marshal("", value) - } - if sinfo.InlineMap >= 0 { - m := in.Field(sinfo.InlineMap) - if m.Len() > 0 { - e.flow = false - keys := keyList(m.MapKeys()) - sort.Sort(keys) - for _, k := range keys { - if _, found := sinfo.FieldsMap[k.String()]; found { - panic(fmt.Sprintf("Can't have key %q in inlined map; conflicts with struct field", k.String())) - } - e.marshal("", k) - e.flow = false - e.marshal("", m.MapIndex(k)) - } - } - } - }) -} - -func (e *encoder) mappingv(tag string, f func()) { - implicit := tag == "" - style := yaml_BLOCK_MAPPING_STYLE - if e.flow { - e.flow = false - style = yaml_FLOW_MAPPING_STYLE - } - e.must(yaml_mapping_start_event_initialize(&e.event, nil, []byte(tag), implicit, style)) - e.emit() - f() - e.must(yaml_mapping_end_event_initialize(&e.event)) - e.emit() -} - -func (e *encoder) slicev(tag string, in reflect.Value) { - implicit := tag == "" - style := yaml_BLOCK_SEQUENCE_STYLE - if e.flow { - e.flow = false - style = yaml_FLOW_SEQUENCE_STYLE - } - e.must(yaml_sequence_start_event_initialize(&e.event, nil, []byte(tag), implicit, style)) - e.emit() - n := in.Len() - for i := 0; i < n; i++ { - e.marshal("", in.Index(i)) - } - e.must(yaml_sequence_end_event_initialize(&e.event)) - e.emit() -} - -// isBase60 returns whether s is in base 60 notation as defined in YAML 1.1. -// -// The base 60 float notation in YAML 1.1 is a terrible idea and is unsupported -// in YAML 1.2 and by this package, but these should be marshalled quoted for -// the time being for compatibility with other parsers. -func isBase60Float(s string) (result bool) { - // Fast path. - if s == "" { - return false - } - c := s[0] - if !(c == '+' || c == '-' || c >= '0' && c <= '9') || strings.IndexByte(s, ':') < 0 { - return false - } - // Do the full match. - return base60float.MatchString(s) -} - -// From http://yaml.org/type/float.html, except the regular expression there -// is bogus. In practice parsers do not enforce the "\.[0-9_]*" suffix. -var base60float = regexp.MustCompile(`^[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+(?:\.[0-9_]*)?$`) - -func (e *encoder) stringv(tag string, in reflect.Value) { - var style yaml_scalar_style_t - s := in.String() - rtag, rs := resolve("", s) - if rtag == yaml_BINARY_TAG { - if tag == "" || tag == yaml_STR_TAG { - tag = rtag - s = rs.(string) - } else if tag == yaml_BINARY_TAG { - failf("explicitly tagged !!binary data must be base64-encoded") - } else { - failf("cannot marshal invalid UTF-8 data as %s", shortTag(tag)) - } - } - if tag == "" && (rtag != yaml_STR_TAG || isBase60Float(s)) { - style = yaml_DOUBLE_QUOTED_SCALAR_STYLE - } else if strings.Contains(s, "\n") { - style = yaml_LITERAL_SCALAR_STYLE - } else { - style = yaml_PLAIN_SCALAR_STYLE - } - e.emitScalar(s, "", tag, style) -} - -func (e *encoder) boolv(tag string, in reflect.Value) { - var s string - if in.Bool() { - s = "true" - } else { - s = "false" - } - e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) -} - -func (e *encoder) intv(tag string, in reflect.Value) { - s := strconv.FormatInt(in.Int(), 10) - e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) -} - -func (e *encoder) uintv(tag string, in reflect.Value) { - s := strconv.FormatUint(in.Uint(), 10) - e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) -} - -func (e *encoder) floatv(tag string, in reflect.Value) { - // FIXME: Handle 64 bits here. - s := strconv.FormatFloat(float64(in.Float()), 'g', -1, 32) - switch s { - case "+Inf": - s = ".inf" - case "-Inf": - s = "-.inf" - case "NaN": - s = ".nan" - } - e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) -} - -func (e *encoder) nilv() { - e.emitScalar("null", "", "", yaml_PLAIN_SCALAR_STYLE) -} - -func (e *encoder) emitScalar(value, anchor, tag string, style yaml_scalar_style_t) { - implicit := tag == "" - e.must(yaml_scalar_event_initialize(&e.event, []byte(anchor), []byte(tag), []byte(value), implicit, implicit, style)) - e.emit() -} diff --git a/Godeps/_workspace/src/gopkg.in/yaml.v2/encode_test.go b/Godeps/_workspace/src/gopkg.in/yaml.v2/encode_test.go deleted file mode 100644 index 84099bd385..0000000000 --- a/Godeps/_workspace/src/gopkg.in/yaml.v2/encode_test.go +++ /dev/null @@ -1,501 +0,0 @@ -package yaml_test - -import ( - "fmt" - "math" - "strconv" - "strings" - "time" - - . "gopkg.in/check.v1" - "gopkg.in/yaml.v2" - "net" - "os" -) - -var marshalIntTest = 123 - -var marshalTests = []struct { - value interface{} - data string -}{ - { - nil, - "null\n", - }, { - &struct{}{}, - "{}\n", - }, { - map[string]string{"v": "hi"}, - "v: hi\n", - }, { - map[string]interface{}{"v": "hi"}, - "v: hi\n", - }, { - map[string]string{"v": "true"}, - "v: \"true\"\n", - }, { - map[string]string{"v": "false"}, - "v: \"false\"\n", - }, { - map[string]interface{}{"v": true}, - "v: true\n", - }, { - map[string]interface{}{"v": false}, - "v: false\n", - }, { - map[string]interface{}{"v": 10}, - "v: 10\n", - }, { - map[string]interface{}{"v": -10}, - "v: -10\n", - }, { - map[string]uint{"v": 42}, - "v: 42\n", - }, { - map[string]interface{}{"v": int64(4294967296)}, - "v: 4294967296\n", - }, { - map[string]int64{"v": int64(4294967296)}, - "v: 4294967296\n", - }, { - map[string]uint64{"v": 4294967296}, - "v: 4294967296\n", - }, { - map[string]interface{}{"v": "10"}, - "v: \"10\"\n", - }, { - map[string]interface{}{"v": 0.1}, - "v: 0.1\n", - }, { - map[string]interface{}{"v": float64(0.1)}, - "v: 0.1\n", - }, { - map[string]interface{}{"v": -0.1}, - "v: -0.1\n", - }, { - map[string]interface{}{"v": math.Inf(+1)}, - "v: .inf\n", - }, { - map[string]interface{}{"v": math.Inf(-1)}, - "v: -.inf\n", - }, { - map[string]interface{}{"v": math.NaN()}, - "v: .nan\n", - }, { - map[string]interface{}{"v": nil}, - "v: null\n", - }, { - map[string]interface{}{"v": ""}, - "v: \"\"\n", - }, { - map[string][]string{"v": []string{"A", "B"}}, - "v:\n- A\n- B\n", - }, { - map[string][]string{"v": []string{"A", "B\nC"}}, - "v:\n- A\n- |-\n B\n C\n", - }, { - map[string][]interface{}{"v": []interface{}{"A", 1, map[string][]int{"B": []int{2, 3}}}}, - "v:\n- A\n- 1\n- B:\n - 2\n - 3\n", - }, { - map[string]interface{}{"a": map[interface{}]interface{}{"b": "c"}}, - "a:\n b: c\n", - }, { - map[string]interface{}{"a": "-"}, - "a: '-'\n", - }, - - // Simple values. - { - &marshalIntTest, - "123\n", - }, - - // Structures - { - &struct{ Hello string }{"world"}, - "hello: world\n", - }, { - &struct { - A struct { - B string - } - }{struct{ B string }{"c"}}, - "a:\n b: c\n", - }, { - &struct { - A *struct { - B string - } - }{&struct{ B string }{"c"}}, - "a:\n b: c\n", - }, { - &struct { - A *struct { - B string - } - }{}, - "a: null\n", - }, { - &struct{ A int }{1}, - "a: 1\n", - }, { - &struct{ A []int }{[]int{1, 2}}, - "a:\n- 1\n- 2\n", - }, { - &struct { - B int "a" - }{1}, - "a: 1\n", - }, { - &struct{ A bool }{true}, - "a: true\n", - }, - - // Conditional flag - { - &struct { - A int "a,omitempty" - B int "b,omitempty" - }{1, 0}, - "a: 1\n", - }, { - &struct { - A int "a,omitempty" - B int "b,omitempty" - }{0, 0}, - "{}\n", - }, { - &struct { - A *struct{ X, y int } "a,omitempty,flow" - }{&struct{ X, y int }{1, 2}}, - "a: {x: 1}\n", - }, { - &struct { - A *struct{ X, y int } "a,omitempty,flow" - }{nil}, - "{}\n", - }, { - &struct { - A *struct{ X, y int } "a,omitempty,flow" - }{&struct{ X, y int }{}}, - "a: {x: 0}\n", - }, { - &struct { - A struct{ X, y int } "a,omitempty,flow" - }{struct{ X, y int }{1, 2}}, - "a: {x: 1}\n", - }, { - &struct { - A struct{ X, y int } "a,omitempty,flow" - }{struct{ X, y int }{0, 1}}, - "{}\n", - }, { - &struct { - A float64 "a,omitempty" - B float64 "b,omitempty" - }{1, 0}, - "a: 1\n", - }, - - // Flow flag - { - &struct { - A []int "a,flow" - }{[]int{1, 2}}, - "a: [1, 2]\n", - }, { - &struct { - A map[string]string "a,flow" - }{map[string]string{"b": "c", "d": "e"}}, - "a: {b: c, d: e}\n", - }, { - &struct { - A struct { - B, D string - } "a,flow" - }{struct{ B, D string }{"c", "e"}}, - "a: {b: c, d: e}\n", - }, - - // Unexported field - { - &struct { - u int - A int - }{0, 1}, - "a: 1\n", - }, - - // Ignored field - { - &struct { - A int - B int "-" - }{1, 2}, - "a: 1\n", - }, - - // Struct inlining - { - &struct { - A int - C inlineB `yaml:",inline"` - }{1, inlineB{2, inlineC{3}}}, - "a: 1\nb: 2\nc: 3\n", - }, - - // Map inlining - { - &struct { - A int - C map[string]int `yaml:",inline"` - }{1, map[string]int{"b": 2, "c": 3}}, - "a: 1\nb: 2\nc: 3\n", - }, - - // Duration - { - map[string]time.Duration{"a": 3 * time.Second}, - "a: 3s\n", - }, - - // Issue #24: bug in map merging logic. - { - map[string]string{"a": ""}, - "a: \n", - }, - - // Issue #34: marshal unsupported base 60 floats quoted for compatibility - // with old YAML 1.1 parsers. - { - map[string]string{"a": "1:1"}, - "a: \"1:1\"\n", - }, - - // Binary data. - { - map[string]string{"a": "\x00"}, - "a: \"\\0\"\n", - }, { - map[string]string{"a": "\x80\x81\x82"}, - "a: !!binary gIGC\n", - }, { - map[string]string{"a": strings.Repeat("\x90", 54)}, - "a: !!binary |\n " + strings.Repeat("kJCQ", 17) + "kJ\n CQ\n", - }, - - // Ordered maps. - { - &yaml.MapSlice{{"b", 2}, {"a", 1}, {"d", 4}, {"c", 3}, {"sub", yaml.MapSlice{{"e", 5}}}}, - "b: 2\na: 1\nd: 4\nc: 3\nsub:\n e: 5\n", - }, - - // Encode unicode as utf-8 rather than in escaped form. - { - map[string]string{"a": "你好"}, - "a: 你好\n", - }, - - // Support encoding.TextMarshaler. - { - map[string]net.IP{"a": net.IPv4(1, 2, 3, 4)}, - "a: 1.2.3.4\n", - }, - { - map[string]time.Time{"a": time.Unix(1424801979, 0)}, - "a: 2015-02-24T18:19:39Z\n", - }, - - // Ensure strings containing ": " are quoted (reported as PR #43, but not reproducible). - { - map[string]string{"a": "b: c"}, - "a: 'b: c'\n", - }, - - // Containing hash mark ('#') in string should be quoted - { - map[string]string{"a": "Hello #comment"}, - "a: 'Hello #comment'\n", - }, - { - map[string]string{"a": "你好 #comment"}, - "a: '你好 #comment'\n", - }, -} - -func (s *S) TestMarshal(c *C) { - defer os.Setenv("TZ", os.Getenv("TZ")) - os.Setenv("TZ", "UTC") - for _, item := range marshalTests { - data, err := yaml.Marshal(item.value) - c.Assert(err, IsNil) - c.Assert(string(data), Equals, item.data) - } -} - -var marshalErrorTests = []struct { - value interface{} - error string - panic string -}{{ - value: &struct { - B int - inlineB ",inline" - }{1, inlineB{2, inlineC{3}}}, - panic: `Duplicated key 'b' in struct struct \{ B int; .*`, -}, { - value: &struct { - A int - B map[string]int ",inline" - }{1, map[string]int{"a": 2}}, - panic: `Can't have key "a" in inlined map; conflicts with struct field`, -}} - -func (s *S) TestMarshalErrors(c *C) { - for _, item := range marshalErrorTests { - if item.panic != "" { - c.Assert(func() { yaml.Marshal(item.value) }, PanicMatches, item.panic) - } else { - _, err := yaml.Marshal(item.value) - c.Assert(err, ErrorMatches, item.error) - } - } -} - -func (s *S) TestMarshalTypeCache(c *C) { - var data []byte - var err error - func() { - type T struct{ A int } - data, err = yaml.Marshal(&T{}) - c.Assert(err, IsNil) - }() - func() { - type T struct{ B int } - data, err = yaml.Marshal(&T{}) - c.Assert(err, IsNil) - }() - c.Assert(string(data), Equals, "b: 0\n") -} - -var marshalerTests = []struct { - data string - value interface{} -}{ - {"_:\n hi: there\n", map[interface{}]interface{}{"hi": "there"}}, - {"_:\n- 1\n- A\n", []interface{}{1, "A"}}, - {"_: 10\n", 10}, - {"_: null\n", nil}, - {"_: BAR!\n", "BAR!"}, -} - -type marshalerType struct { - value interface{} -} - -func (o marshalerType) MarshalText() ([]byte, error) { - panic("MarshalText called on type with MarshalYAML") -} - -func (o marshalerType) MarshalYAML() (interface{}, error) { - return o.value, nil -} - -type marshalerValue struct { - Field marshalerType "_" -} - -func (s *S) TestMarshaler(c *C) { - for _, item := range marshalerTests { - obj := &marshalerValue{} - obj.Field.value = item.value - data, err := yaml.Marshal(obj) - c.Assert(err, IsNil) - c.Assert(string(data), Equals, string(item.data)) - } -} - -func (s *S) TestMarshalerWholeDocument(c *C) { - obj := &marshalerType{} - obj.value = map[string]string{"hello": "world!"} - data, err := yaml.Marshal(obj) - c.Assert(err, IsNil) - c.Assert(string(data), Equals, "hello: world!\n") -} - -type failingMarshaler struct{} - -func (ft *failingMarshaler) MarshalYAML() (interface{}, error) { - return nil, failingErr -} - -func (s *S) TestMarshalerError(c *C) { - _, err := yaml.Marshal(&failingMarshaler{}) - c.Assert(err, Equals, failingErr) -} - -func (s *S) TestSortedOutput(c *C) { - order := []interface{}{ - false, - true, - 1, - uint(1), - 1.0, - 1.1, - 1.2, - 2, - uint(2), - 2.0, - 2.1, - "", - ".1", - ".2", - ".a", - "1", - "2", - "a!10", - "a/2", - "a/10", - "a~10", - "ab/1", - "b/1", - "b/01", - "b/2", - "b/02", - "b/3", - "b/03", - "b1", - "b01", - "b3", - "c2.10", - "c10.2", - "d1", - "d12", - "d12a", - } - m := make(map[interface{}]int) - for _, k := range order { - m[k] = 1 - } - data, err := yaml.Marshal(m) - c.Assert(err, IsNil) - out := "\n" + string(data) - last := 0 - for i, k := range order { - repr := fmt.Sprint(k) - if s, ok := k.(string); ok { - if _, err = strconv.ParseFloat(repr, 32); s == "" || err == nil { - repr = `"` + repr + `"` - } - } - index := strings.Index(out, "\n"+repr+":") - if index == -1 { - c.Fatalf("%#v is not in the output: %#v", k, out) - } - if index < last { - c.Fatalf("%#v was generated before %#v: %q", k, order[i-1], out) - } - last = index - } -} diff --git a/Godeps/_workspace/src/gopkg.in/yaml.v2/parserc.go b/Godeps/_workspace/src/gopkg.in/yaml.v2/parserc.go deleted file mode 100644 index 0a7037ad1b..0000000000 --- a/Godeps/_workspace/src/gopkg.in/yaml.v2/parserc.go +++ /dev/null @@ -1,1096 +0,0 @@ -package yaml - -import ( - "bytes" -) - -// The parser implements the following grammar: -// -// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END -// implicit_document ::= block_node DOCUMENT-END* -// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* -// block_node_or_indentless_sequence ::= -// ALIAS -// | properties (block_content | indentless_block_sequence)? -// | block_content -// | indentless_block_sequence -// block_node ::= ALIAS -// | properties block_content? -// | block_content -// flow_node ::= ALIAS -// | properties flow_content? -// | flow_content -// properties ::= TAG ANCHOR? | ANCHOR TAG? -// block_content ::= block_collection | flow_collection | SCALAR -// flow_content ::= flow_collection | SCALAR -// block_collection ::= block_sequence | block_mapping -// flow_collection ::= flow_sequence | flow_mapping -// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END -// indentless_sequence ::= (BLOCK-ENTRY block_node?)+ -// block_mapping ::= BLOCK-MAPPING_START -// ((KEY block_node_or_indentless_sequence?)? -// (VALUE block_node_or_indentless_sequence?)?)* -// BLOCK-END -// flow_sequence ::= FLOW-SEQUENCE-START -// (flow_sequence_entry FLOW-ENTRY)* -// flow_sequence_entry? -// FLOW-SEQUENCE-END -// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? -// flow_mapping ::= FLOW-MAPPING-START -// (flow_mapping_entry FLOW-ENTRY)* -// flow_mapping_entry? -// FLOW-MAPPING-END -// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? - -// Peek the next token in the token queue. -func peek_token(parser *yaml_parser_t) *yaml_token_t { - if parser.token_available || yaml_parser_fetch_more_tokens(parser) { - return &parser.tokens[parser.tokens_head] - } - return nil -} - -// Remove the next token from the queue (must be called after peek_token). -func skip_token(parser *yaml_parser_t) { - parser.token_available = false - parser.tokens_parsed++ - parser.stream_end_produced = parser.tokens[parser.tokens_head].typ == yaml_STREAM_END_TOKEN - parser.tokens_head++ -} - -// Get the next event. -func yaml_parser_parse(parser *yaml_parser_t, event *yaml_event_t) bool { - // Erase the event object. - *event = yaml_event_t{} - - // No events after the end of the stream or error. - if parser.stream_end_produced || parser.error != yaml_NO_ERROR || parser.state == yaml_PARSE_END_STATE { - return true - } - - // Generate the next event. - return yaml_parser_state_machine(parser, event) -} - -// Set parser error. -func yaml_parser_set_parser_error(parser *yaml_parser_t, problem string, problem_mark yaml_mark_t) bool { - parser.error = yaml_PARSER_ERROR - parser.problem = problem - parser.problem_mark = problem_mark - return false -} - -func yaml_parser_set_parser_error_context(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string, problem_mark yaml_mark_t) bool { - parser.error = yaml_PARSER_ERROR - parser.context = context - parser.context_mark = context_mark - parser.problem = problem - parser.problem_mark = problem_mark - return false -} - -// State dispatcher. -func yaml_parser_state_machine(parser *yaml_parser_t, event *yaml_event_t) bool { - //trace("yaml_parser_state_machine", "state:", parser.state.String()) - - switch parser.state { - case yaml_PARSE_STREAM_START_STATE: - return yaml_parser_parse_stream_start(parser, event) - - case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE: - return yaml_parser_parse_document_start(parser, event, true) - - case yaml_PARSE_DOCUMENT_START_STATE: - return yaml_parser_parse_document_start(parser, event, false) - - case yaml_PARSE_DOCUMENT_CONTENT_STATE: - return yaml_parser_parse_document_content(parser, event) - - case yaml_PARSE_DOCUMENT_END_STATE: - return yaml_parser_parse_document_end(parser, event) - - case yaml_PARSE_BLOCK_NODE_STATE: - return yaml_parser_parse_node(parser, event, true, false) - - case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE: - return yaml_parser_parse_node(parser, event, true, true) - - case yaml_PARSE_FLOW_NODE_STATE: - return yaml_parser_parse_node(parser, event, false, false) - - case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE: - return yaml_parser_parse_block_sequence_entry(parser, event, true) - - case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE: - return yaml_parser_parse_block_sequence_entry(parser, event, false) - - case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE: - return yaml_parser_parse_indentless_sequence_entry(parser, event) - - case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE: - return yaml_parser_parse_block_mapping_key(parser, event, true) - - case yaml_PARSE_BLOCK_MAPPING_KEY_STATE: - return yaml_parser_parse_block_mapping_key(parser, event, false) - - case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE: - return yaml_parser_parse_block_mapping_value(parser, event) - - case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE: - return yaml_parser_parse_flow_sequence_entry(parser, event, true) - - case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE: - return yaml_parser_parse_flow_sequence_entry(parser, event, false) - - case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE: - return yaml_parser_parse_flow_sequence_entry_mapping_key(parser, event) - - case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE: - return yaml_parser_parse_flow_sequence_entry_mapping_value(parser, event) - - case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE: - return yaml_parser_parse_flow_sequence_entry_mapping_end(parser, event) - - case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE: - return yaml_parser_parse_flow_mapping_key(parser, event, true) - - case yaml_PARSE_FLOW_MAPPING_KEY_STATE: - return yaml_parser_parse_flow_mapping_key(parser, event, false) - - case yaml_PARSE_FLOW_MAPPING_VALUE_STATE: - return yaml_parser_parse_flow_mapping_value(parser, event, false) - - case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE: - return yaml_parser_parse_flow_mapping_value(parser, event, true) - - default: - panic("invalid parser state") - } - return false -} - -// Parse the production: -// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END -// ************ -func yaml_parser_parse_stream_start(parser *yaml_parser_t, event *yaml_event_t) bool { - token := peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_STREAM_START_TOKEN { - return yaml_parser_set_parser_error(parser, "did not find expected ", token.start_mark) - } - parser.state = yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE - *event = yaml_event_t{ - typ: yaml_STREAM_START_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - encoding: token.encoding, - } - skip_token(parser) - return true -} - -// Parse the productions: -// implicit_document ::= block_node DOCUMENT-END* -// * -// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* -// ************************* -func yaml_parser_parse_document_start(parser *yaml_parser_t, event *yaml_event_t, implicit bool) bool { - - token := peek_token(parser) - if token == nil { - return false - } - - // Parse extra document end indicators. - if !implicit { - for token.typ == yaml_DOCUMENT_END_TOKEN { - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - } - } - - if implicit && token.typ != yaml_VERSION_DIRECTIVE_TOKEN && - token.typ != yaml_TAG_DIRECTIVE_TOKEN && - token.typ != yaml_DOCUMENT_START_TOKEN && - token.typ != yaml_STREAM_END_TOKEN { - // Parse an implicit document. - if !yaml_parser_process_directives(parser, nil, nil) { - return false - } - parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE) - parser.state = yaml_PARSE_BLOCK_NODE_STATE - - *event = yaml_event_t{ - typ: yaml_DOCUMENT_START_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - } - - } else if token.typ != yaml_STREAM_END_TOKEN { - // Parse an explicit document. - var version_directive *yaml_version_directive_t - var tag_directives []yaml_tag_directive_t - start_mark := token.start_mark - if !yaml_parser_process_directives(parser, &version_directive, &tag_directives) { - return false - } - token = peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_DOCUMENT_START_TOKEN { - yaml_parser_set_parser_error(parser, - "did not find expected ", token.start_mark) - return false - } - parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE) - parser.state = yaml_PARSE_DOCUMENT_CONTENT_STATE - end_mark := token.end_mark - - *event = yaml_event_t{ - typ: yaml_DOCUMENT_START_EVENT, - start_mark: start_mark, - end_mark: end_mark, - version_directive: version_directive, - tag_directives: tag_directives, - implicit: false, - } - skip_token(parser) - - } else { - // Parse the stream end. - parser.state = yaml_PARSE_END_STATE - *event = yaml_event_t{ - typ: yaml_STREAM_END_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - } - skip_token(parser) - } - - return true -} - -// Parse the productions: -// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* -// *********** -// -func yaml_parser_parse_document_content(parser *yaml_parser_t, event *yaml_event_t) bool { - token := peek_token(parser) - if token == nil { - return false - } - if token.typ == yaml_VERSION_DIRECTIVE_TOKEN || - token.typ == yaml_TAG_DIRECTIVE_TOKEN || - token.typ == yaml_DOCUMENT_START_TOKEN || - token.typ == yaml_DOCUMENT_END_TOKEN || - token.typ == yaml_STREAM_END_TOKEN { - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - return yaml_parser_process_empty_scalar(parser, event, - token.start_mark) - } - return yaml_parser_parse_node(parser, event, true, false) -} - -// Parse the productions: -// implicit_document ::= block_node DOCUMENT-END* -// ************* -// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* -// -func yaml_parser_parse_document_end(parser *yaml_parser_t, event *yaml_event_t) bool { - token := peek_token(parser) - if token == nil { - return false - } - - start_mark := token.start_mark - end_mark := token.start_mark - - implicit := true - if token.typ == yaml_DOCUMENT_END_TOKEN { - end_mark = token.end_mark - skip_token(parser) - implicit = false - } - - parser.tag_directives = parser.tag_directives[:0] - - parser.state = yaml_PARSE_DOCUMENT_START_STATE - *event = yaml_event_t{ - typ: yaml_DOCUMENT_END_EVENT, - start_mark: start_mark, - end_mark: end_mark, - implicit: implicit, - } - return true -} - -// Parse the productions: -// block_node_or_indentless_sequence ::= -// ALIAS -// ***** -// | properties (block_content | indentless_block_sequence)? -// ********** * -// | block_content | indentless_block_sequence -// * -// block_node ::= ALIAS -// ***** -// | properties block_content? -// ********** * -// | block_content -// * -// flow_node ::= ALIAS -// ***** -// | properties flow_content? -// ********** * -// | flow_content -// * -// properties ::= TAG ANCHOR? | ANCHOR TAG? -// ************************* -// block_content ::= block_collection | flow_collection | SCALAR -// ****** -// flow_content ::= flow_collection | SCALAR -// ****** -func yaml_parser_parse_node(parser *yaml_parser_t, event *yaml_event_t, block, indentless_sequence bool) bool { - //defer trace("yaml_parser_parse_node", "block:", block, "indentless_sequence:", indentless_sequence)() - - token := peek_token(parser) - if token == nil { - return false - } - - if token.typ == yaml_ALIAS_TOKEN { - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - *event = yaml_event_t{ - typ: yaml_ALIAS_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - anchor: token.value, - } - skip_token(parser) - return true - } - - start_mark := token.start_mark - end_mark := token.start_mark - - var tag_token bool - var tag_handle, tag_suffix, anchor []byte - var tag_mark yaml_mark_t - if token.typ == yaml_ANCHOR_TOKEN { - anchor = token.value - start_mark = token.start_mark - end_mark = token.end_mark - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - if token.typ == yaml_TAG_TOKEN { - tag_token = true - tag_handle = token.value - tag_suffix = token.suffix - tag_mark = token.start_mark - end_mark = token.end_mark - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - } - } else if token.typ == yaml_TAG_TOKEN { - tag_token = true - tag_handle = token.value - tag_suffix = token.suffix - start_mark = token.start_mark - tag_mark = token.start_mark - end_mark = token.end_mark - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - if token.typ == yaml_ANCHOR_TOKEN { - anchor = token.value - end_mark = token.end_mark - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - } - } - - var tag []byte - if tag_token { - if len(tag_handle) == 0 { - tag = tag_suffix - tag_suffix = nil - } else { - for i := range parser.tag_directives { - if bytes.Equal(parser.tag_directives[i].handle, tag_handle) { - tag = append([]byte(nil), parser.tag_directives[i].prefix...) - tag = append(tag, tag_suffix...) - break - } - } - if len(tag) == 0 { - yaml_parser_set_parser_error_context(parser, - "while parsing a node", start_mark, - "found undefined tag handle", tag_mark) - return false - } - } - } - - implicit := len(tag) == 0 - if indentless_sequence && token.typ == yaml_BLOCK_ENTRY_TOKEN { - end_mark = token.end_mark - parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE - *event = yaml_event_t{ - typ: yaml_SEQUENCE_START_EVENT, - start_mark: start_mark, - end_mark: end_mark, - anchor: anchor, - tag: tag, - implicit: implicit, - style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE), - } - return true - } - if token.typ == yaml_SCALAR_TOKEN { - var plain_implicit, quoted_implicit bool - end_mark = token.end_mark - if (len(tag) == 0 && token.style == yaml_PLAIN_SCALAR_STYLE) || (len(tag) == 1 && tag[0] == '!') { - plain_implicit = true - } else if len(tag) == 0 { - quoted_implicit = true - } - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - - *event = yaml_event_t{ - typ: yaml_SCALAR_EVENT, - start_mark: start_mark, - end_mark: end_mark, - anchor: anchor, - tag: tag, - value: token.value, - implicit: plain_implicit, - quoted_implicit: quoted_implicit, - style: yaml_style_t(token.style), - } - skip_token(parser) - return true - } - if token.typ == yaml_FLOW_SEQUENCE_START_TOKEN { - // [Go] Some of the events below can be merged as they differ only on style. - end_mark = token.end_mark - parser.state = yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE - *event = yaml_event_t{ - typ: yaml_SEQUENCE_START_EVENT, - start_mark: start_mark, - end_mark: end_mark, - anchor: anchor, - tag: tag, - implicit: implicit, - style: yaml_style_t(yaml_FLOW_SEQUENCE_STYLE), - } - return true - } - if token.typ == yaml_FLOW_MAPPING_START_TOKEN { - end_mark = token.end_mark - parser.state = yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE - *event = yaml_event_t{ - typ: yaml_MAPPING_START_EVENT, - start_mark: start_mark, - end_mark: end_mark, - anchor: anchor, - tag: tag, - implicit: implicit, - style: yaml_style_t(yaml_FLOW_MAPPING_STYLE), - } - return true - } - if block && token.typ == yaml_BLOCK_SEQUENCE_START_TOKEN { - end_mark = token.end_mark - parser.state = yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE - *event = yaml_event_t{ - typ: yaml_SEQUENCE_START_EVENT, - start_mark: start_mark, - end_mark: end_mark, - anchor: anchor, - tag: tag, - implicit: implicit, - style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE), - } - return true - } - if block && token.typ == yaml_BLOCK_MAPPING_START_TOKEN { - end_mark = token.end_mark - parser.state = yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE - *event = yaml_event_t{ - typ: yaml_MAPPING_START_EVENT, - start_mark: start_mark, - end_mark: end_mark, - anchor: anchor, - tag: tag, - implicit: implicit, - style: yaml_style_t(yaml_BLOCK_MAPPING_STYLE), - } - return true - } - if len(anchor) > 0 || len(tag) > 0 { - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - - *event = yaml_event_t{ - typ: yaml_SCALAR_EVENT, - start_mark: start_mark, - end_mark: end_mark, - anchor: anchor, - tag: tag, - implicit: implicit, - quoted_implicit: false, - style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE), - } - return true - } - - context := "while parsing a flow node" - if block { - context = "while parsing a block node" - } - yaml_parser_set_parser_error_context(parser, context, start_mark, - "did not find expected node content", token.start_mark) - return false -} - -// Parse the productions: -// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END -// ******************** *********** * ********* -// -func yaml_parser_parse_block_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { - if first { - token := peek_token(parser) - parser.marks = append(parser.marks, token.start_mark) - skip_token(parser) - } - - token := peek_token(parser) - if token == nil { - return false - } - - if token.typ == yaml_BLOCK_ENTRY_TOKEN { - mark := token.end_mark - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_BLOCK_ENTRY_TOKEN && token.typ != yaml_BLOCK_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE) - return yaml_parser_parse_node(parser, event, true, false) - } else { - parser.state = yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE - return yaml_parser_process_empty_scalar(parser, event, mark) - } - } - if token.typ == yaml_BLOCK_END_TOKEN { - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - - *event = yaml_event_t{ - typ: yaml_SEQUENCE_END_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - } - - skip_token(parser) - return true - } - - context_mark := parser.marks[len(parser.marks)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - return yaml_parser_set_parser_error_context(parser, - "while parsing a block collection", context_mark, - "did not find expected '-' indicator", token.start_mark) -} - -// Parse the productions: -// indentless_sequence ::= (BLOCK-ENTRY block_node?)+ -// *********** * -func yaml_parser_parse_indentless_sequence_entry(parser *yaml_parser_t, event *yaml_event_t) bool { - token := peek_token(parser) - if token == nil { - return false - } - - if token.typ == yaml_BLOCK_ENTRY_TOKEN { - mark := token.end_mark - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_BLOCK_ENTRY_TOKEN && - token.typ != yaml_KEY_TOKEN && - token.typ != yaml_VALUE_TOKEN && - token.typ != yaml_BLOCK_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE) - return yaml_parser_parse_node(parser, event, true, false) - } - parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE - return yaml_parser_process_empty_scalar(parser, event, mark) - } - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - - *event = yaml_event_t{ - typ: yaml_SEQUENCE_END_EVENT, - start_mark: token.start_mark, - end_mark: token.start_mark, // [Go] Shouldn't this be token.end_mark? - } - return true -} - -// Parse the productions: -// block_mapping ::= BLOCK-MAPPING_START -// ******************* -// ((KEY block_node_or_indentless_sequence?)? -// *** * -// (VALUE block_node_or_indentless_sequence?)?)* -// -// BLOCK-END -// ********* -// -func yaml_parser_parse_block_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { - if first { - token := peek_token(parser) - parser.marks = append(parser.marks, token.start_mark) - skip_token(parser) - } - - token := peek_token(parser) - if token == nil { - return false - } - - if token.typ == yaml_KEY_TOKEN { - mark := token.end_mark - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_KEY_TOKEN && - token.typ != yaml_VALUE_TOKEN && - token.typ != yaml_BLOCK_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_VALUE_STATE) - return yaml_parser_parse_node(parser, event, true, true) - } else { - parser.state = yaml_PARSE_BLOCK_MAPPING_VALUE_STATE - return yaml_parser_process_empty_scalar(parser, event, mark) - } - } else if token.typ == yaml_BLOCK_END_TOKEN { - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - *event = yaml_event_t{ - typ: yaml_MAPPING_END_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - } - skip_token(parser) - return true - } - - context_mark := parser.marks[len(parser.marks)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - return yaml_parser_set_parser_error_context(parser, - "while parsing a block mapping", context_mark, - "did not find expected key", token.start_mark) -} - -// Parse the productions: -// block_mapping ::= BLOCK-MAPPING_START -// -// ((KEY block_node_or_indentless_sequence?)? -// -// (VALUE block_node_or_indentless_sequence?)?)* -// ***** * -// BLOCK-END -// -// -func yaml_parser_parse_block_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool { - token := peek_token(parser) - if token == nil { - return false - } - if token.typ == yaml_VALUE_TOKEN { - mark := token.end_mark - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_KEY_TOKEN && - token.typ != yaml_VALUE_TOKEN && - token.typ != yaml_BLOCK_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_KEY_STATE) - return yaml_parser_parse_node(parser, event, true, true) - } - parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE - return yaml_parser_process_empty_scalar(parser, event, mark) - } - parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE - return yaml_parser_process_empty_scalar(parser, event, token.start_mark) -} - -// Parse the productions: -// flow_sequence ::= FLOW-SEQUENCE-START -// ******************* -// (flow_sequence_entry FLOW-ENTRY)* -// * ********** -// flow_sequence_entry? -// * -// FLOW-SEQUENCE-END -// ***************** -// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? -// * -// -func yaml_parser_parse_flow_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { - if first { - token := peek_token(parser) - parser.marks = append(parser.marks, token.start_mark) - skip_token(parser) - } - token := peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { - if !first { - if token.typ == yaml_FLOW_ENTRY_TOKEN { - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - } else { - context_mark := parser.marks[len(parser.marks)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - return yaml_parser_set_parser_error_context(parser, - "while parsing a flow sequence", context_mark, - "did not find expected ',' or ']'", token.start_mark) - } - } - - if token.typ == yaml_KEY_TOKEN { - parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE - *event = yaml_event_t{ - typ: yaml_MAPPING_START_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - implicit: true, - style: yaml_style_t(yaml_FLOW_MAPPING_STYLE), - } - skip_token(parser) - return true - } else if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE) - return yaml_parser_parse_node(parser, event, false, false) - } - } - - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - - *event = yaml_event_t{ - typ: yaml_SEQUENCE_END_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - } - - skip_token(parser) - return true -} - -// -// Parse the productions: -// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? -// *** * -// -func yaml_parser_parse_flow_sequence_entry_mapping_key(parser *yaml_parser_t, event *yaml_event_t) bool { - token := peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_VALUE_TOKEN && - token.typ != yaml_FLOW_ENTRY_TOKEN && - token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE) - return yaml_parser_parse_node(parser, event, false, false) - } - mark := token.end_mark - skip_token(parser) - parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE - return yaml_parser_process_empty_scalar(parser, event, mark) -} - -// Parse the productions: -// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? -// ***** * -// -func yaml_parser_parse_flow_sequence_entry_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool { - token := peek_token(parser) - if token == nil { - return false - } - if token.typ == yaml_VALUE_TOKEN { - skip_token(parser) - token := peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE) - return yaml_parser_parse_node(parser, event, false, false) - } - } - parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE - return yaml_parser_process_empty_scalar(parser, event, token.start_mark) -} - -// Parse the productions: -// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? -// * -// -func yaml_parser_parse_flow_sequence_entry_mapping_end(parser *yaml_parser_t, event *yaml_event_t) bool { - token := peek_token(parser) - if token == nil { - return false - } - parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE - *event = yaml_event_t{ - typ: yaml_MAPPING_END_EVENT, - start_mark: token.start_mark, - end_mark: token.start_mark, // [Go] Shouldn't this be end_mark? - } - return true -} - -// Parse the productions: -// flow_mapping ::= FLOW-MAPPING-START -// ****************** -// (flow_mapping_entry FLOW-ENTRY)* -// * ********** -// flow_mapping_entry? -// ****************** -// FLOW-MAPPING-END -// **************** -// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? -// * *** * -// -func yaml_parser_parse_flow_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { - if first { - token := peek_token(parser) - parser.marks = append(parser.marks, token.start_mark) - skip_token(parser) - } - - token := peek_token(parser) - if token == nil { - return false - } - - if token.typ != yaml_FLOW_MAPPING_END_TOKEN { - if !first { - if token.typ == yaml_FLOW_ENTRY_TOKEN { - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - } else { - context_mark := parser.marks[len(parser.marks)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - return yaml_parser_set_parser_error_context(parser, - "while parsing a flow mapping", context_mark, - "did not find expected ',' or '}'", token.start_mark) - } - } - - if token.typ == yaml_KEY_TOKEN { - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_VALUE_TOKEN && - token.typ != yaml_FLOW_ENTRY_TOKEN && - token.typ != yaml_FLOW_MAPPING_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_VALUE_STATE) - return yaml_parser_parse_node(parser, event, false, false) - } else { - parser.state = yaml_PARSE_FLOW_MAPPING_VALUE_STATE - return yaml_parser_process_empty_scalar(parser, event, token.start_mark) - } - } else if token.typ != yaml_FLOW_MAPPING_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE) - return yaml_parser_parse_node(parser, event, false, false) - } - } - - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - *event = yaml_event_t{ - typ: yaml_MAPPING_END_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - } - skip_token(parser) - return true -} - -// Parse the productions: -// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? -// * ***** * -// -func yaml_parser_parse_flow_mapping_value(parser *yaml_parser_t, event *yaml_event_t, empty bool) bool { - token := peek_token(parser) - if token == nil { - return false - } - if empty { - parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE - return yaml_parser_process_empty_scalar(parser, event, token.start_mark) - } - if token.typ == yaml_VALUE_TOKEN { - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_MAPPING_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_KEY_STATE) - return yaml_parser_parse_node(parser, event, false, false) - } - } - parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE - return yaml_parser_process_empty_scalar(parser, event, token.start_mark) -} - -// Generate an empty scalar event. -func yaml_parser_process_empty_scalar(parser *yaml_parser_t, event *yaml_event_t, mark yaml_mark_t) bool { - *event = yaml_event_t{ - typ: yaml_SCALAR_EVENT, - start_mark: mark, - end_mark: mark, - value: nil, // Empty - implicit: true, - style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE), - } - return true -} - -var default_tag_directives = []yaml_tag_directive_t{ - {[]byte("!"), []byte("!")}, - {[]byte("!!"), []byte("tag:yaml.org,2002:")}, -} - -// Parse directives. -func yaml_parser_process_directives(parser *yaml_parser_t, - version_directive_ref **yaml_version_directive_t, - tag_directives_ref *[]yaml_tag_directive_t) bool { - - var version_directive *yaml_version_directive_t - var tag_directives []yaml_tag_directive_t - - token := peek_token(parser) - if token == nil { - return false - } - - for token.typ == yaml_VERSION_DIRECTIVE_TOKEN || token.typ == yaml_TAG_DIRECTIVE_TOKEN { - if token.typ == yaml_VERSION_DIRECTIVE_TOKEN { - if version_directive != nil { - yaml_parser_set_parser_error(parser, - "found duplicate %YAML directive", token.start_mark) - return false - } - if token.major != 1 || token.minor != 1 { - yaml_parser_set_parser_error(parser, - "found incompatible YAML document", token.start_mark) - return false - } - version_directive = &yaml_version_directive_t{ - major: token.major, - minor: token.minor, - } - } else if token.typ == yaml_TAG_DIRECTIVE_TOKEN { - value := yaml_tag_directive_t{ - handle: token.value, - prefix: token.prefix, - } - if !yaml_parser_append_tag_directive(parser, value, false, token.start_mark) { - return false - } - tag_directives = append(tag_directives, value) - } - - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - } - - for i := range default_tag_directives { - if !yaml_parser_append_tag_directive(parser, default_tag_directives[i], true, token.start_mark) { - return false - } - } - - if version_directive_ref != nil { - *version_directive_ref = version_directive - } - if tag_directives_ref != nil { - *tag_directives_ref = tag_directives - } - return true -} - -// Append a tag directive to the directives stack. -func yaml_parser_append_tag_directive(parser *yaml_parser_t, value yaml_tag_directive_t, allow_duplicates bool, mark yaml_mark_t) bool { - for i := range parser.tag_directives { - if bytes.Equal(value.handle, parser.tag_directives[i].handle) { - if allow_duplicates { - return true - } - return yaml_parser_set_parser_error(parser, "found duplicate %TAG directive", mark) - } - } - - // [Go] I suspect the copy is unnecessary. This was likely done - // because there was no way to track ownership of the data. - value_copy := yaml_tag_directive_t{ - handle: make([]byte, len(value.handle)), - prefix: make([]byte, len(value.prefix)), - } - copy(value_copy.handle, value.handle) - copy(value_copy.prefix, value.prefix) - parser.tag_directives = append(parser.tag_directives, value_copy) - return true -} diff --git a/Godeps/_workspace/src/gopkg.in/yaml.v2/readerc.go b/Godeps/_workspace/src/gopkg.in/yaml.v2/readerc.go deleted file mode 100644 index d5fb097277..0000000000 --- a/Godeps/_workspace/src/gopkg.in/yaml.v2/readerc.go +++ /dev/null @@ -1,391 +0,0 @@ -package yaml - -import ( - "io" -) - -// Set the reader error and return 0. -func yaml_parser_set_reader_error(parser *yaml_parser_t, problem string, offset int, value int) bool { - parser.error = yaml_READER_ERROR - parser.problem = problem - parser.problem_offset = offset - parser.problem_value = value - return false -} - -// Byte order marks. -const ( - bom_UTF8 = "\xef\xbb\xbf" - bom_UTF16LE = "\xff\xfe" - bom_UTF16BE = "\xfe\xff" -) - -// Determine the input stream encoding by checking the BOM symbol. If no BOM is -// found, the UTF-8 encoding is assumed. Return 1 on success, 0 on failure. -func yaml_parser_determine_encoding(parser *yaml_parser_t) bool { - // Ensure that we had enough bytes in the raw buffer. - for !parser.eof && len(parser.raw_buffer)-parser.raw_buffer_pos < 3 { - if !yaml_parser_update_raw_buffer(parser) { - return false - } - } - - // Determine the encoding. - buf := parser.raw_buffer - pos := parser.raw_buffer_pos - avail := len(buf) - pos - if avail >= 2 && buf[pos] == bom_UTF16LE[0] && buf[pos+1] == bom_UTF16LE[1] { - parser.encoding = yaml_UTF16LE_ENCODING - parser.raw_buffer_pos += 2 - parser.offset += 2 - } else if avail >= 2 && buf[pos] == bom_UTF16BE[0] && buf[pos+1] == bom_UTF16BE[1] { - parser.encoding = yaml_UTF16BE_ENCODING - parser.raw_buffer_pos += 2 - parser.offset += 2 - } else if avail >= 3 && buf[pos] == bom_UTF8[0] && buf[pos+1] == bom_UTF8[1] && buf[pos+2] == bom_UTF8[2] { - parser.encoding = yaml_UTF8_ENCODING - parser.raw_buffer_pos += 3 - parser.offset += 3 - } else { - parser.encoding = yaml_UTF8_ENCODING - } - return true -} - -// Update the raw buffer. -func yaml_parser_update_raw_buffer(parser *yaml_parser_t) bool { - size_read := 0 - - // Return if the raw buffer is full. - if parser.raw_buffer_pos == 0 && len(parser.raw_buffer) == cap(parser.raw_buffer) { - return true - } - - // Return on EOF. - if parser.eof { - return true - } - - // Move the remaining bytes in the raw buffer to the beginning. - if parser.raw_buffer_pos > 0 && parser.raw_buffer_pos < len(parser.raw_buffer) { - copy(parser.raw_buffer, parser.raw_buffer[parser.raw_buffer_pos:]) - } - parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)-parser.raw_buffer_pos] - parser.raw_buffer_pos = 0 - - // Call the read handler to fill the buffer. - size_read, err := parser.read_handler(parser, parser.raw_buffer[len(parser.raw_buffer):cap(parser.raw_buffer)]) - parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)+size_read] - if err == io.EOF { - parser.eof = true - } else if err != nil { - return yaml_parser_set_reader_error(parser, "input error: "+err.Error(), parser.offset, -1) - } - return true -} - -// Ensure that the buffer contains at least `length` characters. -// Return true on success, false on failure. -// -// The length is supposed to be significantly less that the buffer size. -func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool { - if parser.read_handler == nil { - panic("read handler must be set") - } - - // If the EOF flag is set and the raw buffer is empty, do nothing. - if parser.eof && parser.raw_buffer_pos == len(parser.raw_buffer) { - return true - } - - // Return if the buffer contains enough characters. - if parser.unread >= length { - return true - } - - // Determine the input encoding if it is not known yet. - if parser.encoding == yaml_ANY_ENCODING { - if !yaml_parser_determine_encoding(parser) { - return false - } - } - - // Move the unread characters to the beginning of the buffer. - buffer_len := len(parser.buffer) - if parser.buffer_pos > 0 && parser.buffer_pos < buffer_len { - copy(parser.buffer, parser.buffer[parser.buffer_pos:]) - buffer_len -= parser.buffer_pos - parser.buffer_pos = 0 - } else if parser.buffer_pos == buffer_len { - buffer_len = 0 - parser.buffer_pos = 0 - } - - // Open the whole buffer for writing, and cut it before returning. - parser.buffer = parser.buffer[:cap(parser.buffer)] - - // Fill the buffer until it has enough characters. - first := true - for parser.unread < length { - - // Fill the raw buffer if necessary. - if !first || parser.raw_buffer_pos == len(parser.raw_buffer) { - if !yaml_parser_update_raw_buffer(parser) { - parser.buffer = parser.buffer[:buffer_len] - return false - } - } - first = false - - // Decode the raw buffer. - inner: - for parser.raw_buffer_pos != len(parser.raw_buffer) { - var value rune - var width int - - raw_unread := len(parser.raw_buffer) - parser.raw_buffer_pos - - // Decode the next character. - switch parser.encoding { - case yaml_UTF8_ENCODING: - // Decode a UTF-8 character. Check RFC 3629 - // (http://www.ietf.org/rfc/rfc3629.txt) for more details. - // - // The following table (taken from the RFC) is used for - // decoding. - // - // Char. number range | UTF-8 octet sequence - // (hexadecimal) | (binary) - // --------------------+------------------------------------ - // 0000 0000-0000 007F | 0xxxxxxx - // 0000 0080-0000 07FF | 110xxxxx 10xxxxxx - // 0000 0800-0000 FFFF | 1110xxxx 10xxxxxx 10xxxxxx - // 0001 0000-0010 FFFF | 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx - // - // Additionally, the characters in the range 0xD800-0xDFFF - // are prohibited as they are reserved for use with UTF-16 - // surrogate pairs. - - // Determine the length of the UTF-8 sequence. - octet := parser.raw_buffer[parser.raw_buffer_pos] - switch { - case octet&0x80 == 0x00: - width = 1 - case octet&0xE0 == 0xC0: - width = 2 - case octet&0xF0 == 0xE0: - width = 3 - case octet&0xF8 == 0xF0: - width = 4 - default: - // The leading octet is invalid. - return yaml_parser_set_reader_error(parser, - "invalid leading UTF-8 octet", - parser.offset, int(octet)) - } - - // Check if the raw buffer contains an incomplete character. - if width > raw_unread { - if parser.eof { - return yaml_parser_set_reader_error(parser, - "incomplete UTF-8 octet sequence", - parser.offset, -1) - } - break inner - } - - // Decode the leading octet. - switch { - case octet&0x80 == 0x00: - value = rune(octet & 0x7F) - case octet&0xE0 == 0xC0: - value = rune(octet & 0x1F) - case octet&0xF0 == 0xE0: - value = rune(octet & 0x0F) - case octet&0xF8 == 0xF0: - value = rune(octet & 0x07) - default: - value = 0 - } - - // Check and decode the trailing octets. - for k := 1; k < width; k++ { - octet = parser.raw_buffer[parser.raw_buffer_pos+k] - - // Check if the octet is valid. - if (octet & 0xC0) != 0x80 { - return yaml_parser_set_reader_error(parser, - "invalid trailing UTF-8 octet", - parser.offset+k, int(octet)) - } - - // Decode the octet. - value = (value << 6) + rune(octet&0x3F) - } - - // Check the length of the sequence against the value. - switch { - case width == 1: - case width == 2 && value >= 0x80: - case width == 3 && value >= 0x800: - case width == 4 && value >= 0x10000: - default: - return yaml_parser_set_reader_error(parser, - "invalid length of a UTF-8 sequence", - parser.offset, -1) - } - - // Check the range of the value. - if value >= 0xD800 && value <= 0xDFFF || value > 0x10FFFF { - return yaml_parser_set_reader_error(parser, - "invalid Unicode character", - parser.offset, int(value)) - } - - case yaml_UTF16LE_ENCODING, yaml_UTF16BE_ENCODING: - var low, high int - if parser.encoding == yaml_UTF16LE_ENCODING { - low, high = 0, 1 - } else { - high, low = 1, 0 - } - - // The UTF-16 encoding is not as simple as one might - // naively think. Check RFC 2781 - // (http://www.ietf.org/rfc/rfc2781.txt). - // - // Normally, two subsequent bytes describe a Unicode - // character. However a special technique (called a - // surrogate pair) is used for specifying character - // values larger than 0xFFFF. - // - // A surrogate pair consists of two pseudo-characters: - // high surrogate area (0xD800-0xDBFF) - // low surrogate area (0xDC00-0xDFFF) - // - // The following formulas are used for decoding - // and encoding characters using surrogate pairs: - // - // U = U' + 0x10000 (0x01 00 00 <= U <= 0x10 FF FF) - // U' = yyyyyyyyyyxxxxxxxxxx (0 <= U' <= 0x0F FF FF) - // W1 = 110110yyyyyyyyyy - // W2 = 110111xxxxxxxxxx - // - // where U is the character value, W1 is the high surrogate - // area, W2 is the low surrogate area. - - // Check for incomplete UTF-16 character. - if raw_unread < 2 { - if parser.eof { - return yaml_parser_set_reader_error(parser, - "incomplete UTF-16 character", - parser.offset, -1) - } - break inner - } - - // Get the character. - value = rune(parser.raw_buffer[parser.raw_buffer_pos+low]) + - (rune(parser.raw_buffer[parser.raw_buffer_pos+high]) << 8) - - // Check for unexpected low surrogate area. - if value&0xFC00 == 0xDC00 { - return yaml_parser_set_reader_error(parser, - "unexpected low surrogate area", - parser.offset, int(value)) - } - - // Check for a high surrogate area. - if value&0xFC00 == 0xD800 { - width = 4 - - // Check for incomplete surrogate pair. - if raw_unread < 4 { - if parser.eof { - return yaml_parser_set_reader_error(parser, - "incomplete UTF-16 surrogate pair", - parser.offset, -1) - } - break inner - } - - // Get the next character. - value2 := rune(parser.raw_buffer[parser.raw_buffer_pos+low+2]) + - (rune(parser.raw_buffer[parser.raw_buffer_pos+high+2]) << 8) - - // Check for a low surrogate area. - if value2&0xFC00 != 0xDC00 { - return yaml_parser_set_reader_error(parser, - "expected low surrogate area", - parser.offset+2, int(value2)) - } - - // Generate the value of the surrogate pair. - value = 0x10000 + ((value & 0x3FF) << 10) + (value2 & 0x3FF) - } else { - width = 2 - } - - default: - panic("impossible") - } - - // Check if the character is in the allowed range: - // #x9 | #xA | #xD | [#x20-#x7E] (8 bit) - // | #x85 | [#xA0-#xD7FF] | [#xE000-#xFFFD] (16 bit) - // | [#x10000-#x10FFFF] (32 bit) - switch { - case value == 0x09: - case value == 0x0A: - case value == 0x0D: - case value >= 0x20 && value <= 0x7E: - case value == 0x85: - case value >= 0xA0 && value <= 0xD7FF: - case value >= 0xE000 && value <= 0xFFFD: - case value >= 0x10000 && value <= 0x10FFFF: - default: - return yaml_parser_set_reader_error(parser, - "control characters are not allowed", - parser.offset, int(value)) - } - - // Move the raw pointers. - parser.raw_buffer_pos += width - parser.offset += width - - // Finally put the character into the buffer. - if value <= 0x7F { - // 0000 0000-0000 007F . 0xxxxxxx - parser.buffer[buffer_len+0] = byte(value) - } else if value <= 0x7FF { - // 0000 0080-0000 07FF . 110xxxxx 10xxxxxx - parser.buffer[buffer_len+0] = byte(0xC0 + (value >> 6)) - parser.buffer[buffer_len+1] = byte(0x80 + (value & 0x3F)) - } else if value <= 0xFFFF { - // 0000 0800-0000 FFFF . 1110xxxx 10xxxxxx 10xxxxxx - parser.buffer[buffer_len+0] = byte(0xE0 + (value >> 12)) - parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 6) & 0x3F)) - parser.buffer[buffer_len+2] = byte(0x80 + (value & 0x3F)) - } else { - // 0001 0000-0010 FFFF . 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx - parser.buffer[buffer_len+0] = byte(0xF0 + (value >> 18)) - parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 12) & 0x3F)) - parser.buffer[buffer_len+2] = byte(0x80 + ((value >> 6) & 0x3F)) - parser.buffer[buffer_len+3] = byte(0x80 + (value & 0x3F)) - } - buffer_len += width - - parser.unread++ - } - - // On EOF, put NUL into the buffer and return. - if parser.eof { - parser.buffer[buffer_len] = 0 - buffer_len++ - parser.unread++ - break - } - } - parser.buffer = parser.buffer[:buffer_len] - return true -} diff --git a/Godeps/_workspace/src/gopkg.in/yaml.v2/resolve.go b/Godeps/_workspace/src/gopkg.in/yaml.v2/resolve.go deleted file mode 100644 index 93a8632743..0000000000 --- a/Godeps/_workspace/src/gopkg.in/yaml.v2/resolve.go +++ /dev/null @@ -1,203 +0,0 @@ -package yaml - -import ( - "encoding/base64" - "math" - "strconv" - "strings" - "unicode/utf8" -) - -type resolveMapItem struct { - value interface{} - tag string -} - -var resolveTable = make([]byte, 256) -var resolveMap = make(map[string]resolveMapItem) - -func init() { - t := resolveTable - t[int('+')] = 'S' // Sign - t[int('-')] = 'S' - for _, c := range "0123456789" { - t[int(c)] = 'D' // Digit - } - for _, c := range "yYnNtTfFoO~" { - t[int(c)] = 'M' // In map - } - t[int('.')] = '.' // Float (potentially in map) - - var resolveMapList = []struct { - v interface{} - tag string - l []string - }{ - {true, yaml_BOOL_TAG, []string{"y", "Y", "yes", "Yes", "YES"}}, - {true, yaml_BOOL_TAG, []string{"true", "True", "TRUE"}}, - {true, yaml_BOOL_TAG, []string{"on", "On", "ON"}}, - {false, yaml_BOOL_TAG, []string{"n", "N", "no", "No", "NO"}}, - {false, yaml_BOOL_TAG, []string{"false", "False", "FALSE"}}, - {false, yaml_BOOL_TAG, []string{"off", "Off", "OFF"}}, - {nil, yaml_NULL_TAG, []string{"", "~", "null", "Null", "NULL"}}, - {math.NaN(), yaml_FLOAT_TAG, []string{".nan", ".NaN", ".NAN"}}, - {math.Inf(+1), yaml_FLOAT_TAG, []string{".inf", ".Inf", ".INF"}}, - {math.Inf(+1), yaml_FLOAT_TAG, []string{"+.inf", "+.Inf", "+.INF"}}, - {math.Inf(-1), yaml_FLOAT_TAG, []string{"-.inf", "-.Inf", "-.INF"}}, - {"<<", yaml_MERGE_TAG, []string{"<<"}}, - } - - m := resolveMap - for _, item := range resolveMapList { - for _, s := range item.l { - m[s] = resolveMapItem{item.v, item.tag} - } - } -} - -const longTagPrefix = "tag:yaml.org,2002:" - -func shortTag(tag string) string { - // TODO This can easily be made faster and produce less garbage. - if strings.HasPrefix(tag, longTagPrefix) { - return "!!" + tag[len(longTagPrefix):] - } - return tag -} - -func longTag(tag string) string { - if strings.HasPrefix(tag, "!!") { - return longTagPrefix + tag[2:] - } - return tag -} - -func resolvableTag(tag string) bool { - switch tag { - case "", yaml_STR_TAG, yaml_BOOL_TAG, yaml_INT_TAG, yaml_FLOAT_TAG, yaml_NULL_TAG: - return true - } - return false -} - -func resolve(tag string, in string) (rtag string, out interface{}) { - if !resolvableTag(tag) { - return tag, in - } - - defer func() { - switch tag { - case "", rtag, yaml_STR_TAG, yaml_BINARY_TAG: - return - } - failf("cannot decode %s `%s` as a %s", shortTag(rtag), in, shortTag(tag)) - }() - - // Any data is accepted as a !!str or !!binary. - // Otherwise, the prefix is enough of a hint about what it might be. - hint := byte('N') - if in != "" { - hint = resolveTable[in[0]] - } - if hint != 0 && tag != yaml_STR_TAG && tag != yaml_BINARY_TAG { - // Handle things we can lookup in a map. - if item, ok := resolveMap[in]; ok { - return item.tag, item.value - } - - // Base 60 floats are a bad idea, were dropped in YAML 1.2, and - // are purposefully unsupported here. They're still quoted on - // the way out for compatibility with other parser, though. - - switch hint { - case 'M': - // We've already checked the map above. - - case '.': - // Not in the map, so maybe a normal float. - floatv, err := strconv.ParseFloat(in, 64) - if err == nil { - return yaml_FLOAT_TAG, floatv - } - - case 'D', 'S': - // Int, float, or timestamp. - plain := strings.Replace(in, "_", "", -1) - intv, err := strconv.ParseInt(plain, 0, 64) - if err == nil { - if intv == int64(int(intv)) { - return yaml_INT_TAG, int(intv) - } else { - return yaml_INT_TAG, intv - } - } - uintv, err := strconv.ParseUint(plain, 0, 64) - if err == nil { - return yaml_INT_TAG, uintv - } - floatv, err := strconv.ParseFloat(plain, 64) - if err == nil { - return yaml_FLOAT_TAG, floatv - } - if strings.HasPrefix(plain, "0b") { - intv, err := strconv.ParseInt(plain[2:], 2, 64) - if err == nil { - if intv == int64(int(intv)) { - return yaml_INT_TAG, int(intv) - } else { - return yaml_INT_TAG, intv - } - } - uintv, err := strconv.ParseUint(plain[2:], 2, 64) - if err == nil { - return yaml_INT_TAG, uintv - } - } else if strings.HasPrefix(plain, "-0b") { - intv, err := strconv.ParseInt(plain[3:], 2, 64) - if err == nil { - if intv == int64(int(intv)) { - return yaml_INT_TAG, -int(intv) - } else { - return yaml_INT_TAG, -intv - } - } - } - // XXX Handle timestamps here. - - default: - panic("resolveTable item not yet handled: " + string(rune(hint)) + " (with " + in + ")") - } - } - if tag == yaml_BINARY_TAG { - return yaml_BINARY_TAG, in - } - if utf8.ValidString(in) { - return yaml_STR_TAG, in - } - return yaml_BINARY_TAG, encodeBase64(in) -} - -// encodeBase64 encodes s as base64 that is broken up into multiple lines -// as appropriate for the resulting length. -func encodeBase64(s string) string { - const lineLen = 70 - encLen := base64.StdEncoding.EncodedLen(len(s)) - lines := encLen/lineLen + 1 - buf := make([]byte, encLen*2+lines) - in := buf[0:encLen] - out := buf[encLen:] - base64.StdEncoding.Encode(in, []byte(s)) - k := 0 - for i := 0; i < len(in); i += lineLen { - j := i + lineLen - if j > len(in) { - j = len(in) - } - k += copy(out[k:], in[i:j]) - if lines > 1 { - out[k] = '\n' - k++ - } - } - return string(out[:k]) -} diff --git a/Godeps/_workspace/src/gopkg.in/yaml.v2/scannerc.go b/Godeps/_workspace/src/gopkg.in/yaml.v2/scannerc.go deleted file mode 100644 index fe93b190c2..0000000000 --- a/Godeps/_workspace/src/gopkg.in/yaml.v2/scannerc.go +++ /dev/null @@ -1,2710 +0,0 @@ -package yaml - -import ( - "bytes" - "fmt" -) - -// Introduction -// ************ -// -// The following notes assume that you are familiar with the YAML specification -// (http://yaml.org/spec/cvs/current.html). We mostly follow it, although in -// some cases we are less restrictive that it requires. -// -// The process of transforming a YAML stream into a sequence of events is -// divided on two steps: Scanning and Parsing. -// -// The Scanner transforms the input stream into a sequence of tokens, while the -// parser transform the sequence of tokens produced by the Scanner into a -// sequence of parsing events. -// -// The Scanner is rather clever and complicated. The Parser, on the contrary, -// is a straightforward implementation of a recursive-descendant parser (or, -// LL(1) parser, as it is usually called). -// -// Actually there are two issues of Scanning that might be called "clever", the -// rest is quite straightforward. The issues are "block collection start" and -// "simple keys". Both issues are explained below in details. -// -// Here the Scanning step is explained and implemented. We start with the list -// of all the tokens produced by the Scanner together with short descriptions. -// -// Now, tokens: -// -// STREAM-START(encoding) # The stream start. -// STREAM-END # The stream end. -// VERSION-DIRECTIVE(major,minor) # The '%YAML' directive. -// TAG-DIRECTIVE(handle,prefix) # The '%TAG' directive. -// DOCUMENT-START # '---' -// DOCUMENT-END # '...' -// BLOCK-SEQUENCE-START # Indentation increase denoting a block -// BLOCK-MAPPING-START # sequence or a block mapping. -// BLOCK-END # Indentation decrease. -// FLOW-SEQUENCE-START # '[' -// FLOW-SEQUENCE-END # ']' -// BLOCK-SEQUENCE-START # '{' -// BLOCK-SEQUENCE-END # '}' -// BLOCK-ENTRY # '-' -// FLOW-ENTRY # ',' -// KEY # '?' or nothing (simple keys). -// VALUE # ':' -// ALIAS(anchor) # '*anchor' -// ANCHOR(anchor) # '&anchor' -// TAG(handle,suffix) # '!handle!suffix' -// SCALAR(value,style) # A scalar. -// -// The following two tokens are "virtual" tokens denoting the beginning and the -// end of the stream: -// -// STREAM-START(encoding) -// STREAM-END -// -// We pass the information about the input stream encoding with the -// STREAM-START token. -// -// The next two tokens are responsible for tags: -// -// VERSION-DIRECTIVE(major,minor) -// TAG-DIRECTIVE(handle,prefix) -// -// Example: -// -// %YAML 1.1 -// %TAG ! !foo -// %TAG !yaml! tag:yaml.org,2002: -// --- -// -// The correspoding sequence of tokens: -// -// STREAM-START(utf-8) -// VERSION-DIRECTIVE(1,1) -// TAG-DIRECTIVE("!","!foo") -// TAG-DIRECTIVE("!yaml","tag:yaml.org,2002:") -// DOCUMENT-START -// STREAM-END -// -// Note that the VERSION-DIRECTIVE and TAG-DIRECTIVE tokens occupy a whole -// line. -// -// The document start and end indicators are represented by: -// -// DOCUMENT-START -// DOCUMENT-END -// -// Note that if a YAML stream contains an implicit document (without '---' -// and '...' indicators), no DOCUMENT-START and DOCUMENT-END tokens will be -// produced. -// -// In the following examples, we present whole documents together with the -// produced tokens. -// -// 1. An implicit document: -// -// 'a scalar' -// -// Tokens: -// -// STREAM-START(utf-8) -// SCALAR("a scalar",single-quoted) -// STREAM-END -// -// 2. An explicit document: -// -// --- -// 'a scalar' -// ... -// -// Tokens: -// -// STREAM-START(utf-8) -// DOCUMENT-START -// SCALAR("a scalar",single-quoted) -// DOCUMENT-END -// STREAM-END -// -// 3. Several documents in a stream: -// -// 'a scalar' -// --- -// 'another scalar' -// --- -// 'yet another scalar' -// -// Tokens: -// -// STREAM-START(utf-8) -// SCALAR("a scalar",single-quoted) -// DOCUMENT-START -// SCALAR("another scalar",single-quoted) -// DOCUMENT-START -// SCALAR("yet another scalar",single-quoted) -// STREAM-END -// -// We have already introduced the SCALAR token above. The following tokens are -// used to describe aliases, anchors, tag, and scalars: -// -// ALIAS(anchor) -// ANCHOR(anchor) -// TAG(handle,suffix) -// SCALAR(value,style) -// -// The following series of examples illustrate the usage of these tokens: -// -// 1. A recursive sequence: -// -// &A [ *A ] -// -// Tokens: -// -// STREAM-START(utf-8) -// ANCHOR("A") -// FLOW-SEQUENCE-START -// ALIAS("A") -// FLOW-SEQUENCE-END -// STREAM-END -// -// 2. A tagged scalar: -// -// !!float "3.14" # A good approximation. -// -// Tokens: -// -// STREAM-START(utf-8) -// TAG("!!","float") -// SCALAR("3.14",double-quoted) -// STREAM-END -// -// 3. Various scalar styles: -// -// --- # Implicit empty plain scalars do not produce tokens. -// --- a plain scalar -// --- 'a single-quoted scalar' -// --- "a double-quoted scalar" -// --- |- -// a literal scalar -// --- >- -// a folded -// scalar -// -// Tokens: -// -// STREAM-START(utf-8) -// DOCUMENT-START -// DOCUMENT-START -// SCALAR("a plain scalar",plain) -// DOCUMENT-START -// SCALAR("a single-quoted scalar",single-quoted) -// DOCUMENT-START -// SCALAR("a double-quoted scalar",double-quoted) -// DOCUMENT-START -// SCALAR("a literal scalar",literal) -// DOCUMENT-START -// SCALAR("a folded scalar",folded) -// STREAM-END -// -// Now it's time to review collection-related tokens. We will start with -// flow collections: -// -// FLOW-SEQUENCE-START -// FLOW-SEQUENCE-END -// FLOW-MAPPING-START -// FLOW-MAPPING-END -// FLOW-ENTRY -// KEY -// VALUE -// -// The tokens FLOW-SEQUENCE-START, FLOW-SEQUENCE-END, FLOW-MAPPING-START, and -// FLOW-MAPPING-END represent the indicators '[', ']', '{', and '}' -// correspondingly. FLOW-ENTRY represent the ',' indicator. Finally the -// indicators '?' and ':', which are used for denoting mapping keys and values, -// are represented by the KEY and VALUE tokens. -// -// The following examples show flow collections: -// -// 1. A flow sequence: -// -// [item 1, item 2, item 3] -// -// Tokens: -// -// STREAM-START(utf-8) -// FLOW-SEQUENCE-START -// SCALAR("item 1",plain) -// FLOW-ENTRY -// SCALAR("item 2",plain) -// FLOW-ENTRY -// SCALAR("item 3",plain) -// FLOW-SEQUENCE-END -// STREAM-END -// -// 2. A flow mapping: -// -// { -// a simple key: a value, # Note that the KEY token is produced. -// ? a complex key: another value, -// } -// -// Tokens: -// -// STREAM-START(utf-8) -// FLOW-MAPPING-START -// KEY -// SCALAR("a simple key",plain) -// VALUE -// SCALAR("a value",plain) -// FLOW-ENTRY -// KEY -// SCALAR("a complex key",plain) -// VALUE -// SCALAR("another value",plain) -// FLOW-ENTRY -// FLOW-MAPPING-END -// STREAM-END -// -// A simple key is a key which is not denoted by the '?' indicator. Note that -// the Scanner still produce the KEY token whenever it encounters a simple key. -// -// For scanning block collections, the following tokens are used (note that we -// repeat KEY and VALUE here): -// -// BLOCK-SEQUENCE-START -// BLOCK-MAPPING-START -// BLOCK-END -// BLOCK-ENTRY -// KEY -// VALUE -// -// The tokens BLOCK-SEQUENCE-START and BLOCK-MAPPING-START denote indentation -// increase that precedes a block collection (cf. the INDENT token in Python). -// The token BLOCK-END denote indentation decrease that ends a block collection -// (cf. the DEDENT token in Python). However YAML has some syntax pecularities -// that makes detections of these tokens more complex. -// -// The tokens BLOCK-ENTRY, KEY, and VALUE are used to represent the indicators -// '-', '?', and ':' correspondingly. -// -// The following examples show how the tokens BLOCK-SEQUENCE-START, -// BLOCK-MAPPING-START, and BLOCK-END are emitted by the Scanner: -// -// 1. Block sequences: -// -// - item 1 -// - item 2 -// - -// - item 3.1 -// - item 3.2 -// - -// key 1: value 1 -// key 2: value 2 -// -// Tokens: -// -// STREAM-START(utf-8) -// BLOCK-SEQUENCE-START -// BLOCK-ENTRY -// SCALAR("item 1",plain) -// BLOCK-ENTRY -// SCALAR("item 2",plain) -// BLOCK-ENTRY -// BLOCK-SEQUENCE-START -// BLOCK-ENTRY -// SCALAR("item 3.1",plain) -// BLOCK-ENTRY -// SCALAR("item 3.2",plain) -// BLOCK-END -// BLOCK-ENTRY -// BLOCK-MAPPING-START -// KEY -// SCALAR("key 1",plain) -// VALUE -// SCALAR("value 1",plain) -// KEY -// SCALAR("key 2",plain) -// VALUE -// SCALAR("value 2",plain) -// BLOCK-END -// BLOCK-END -// STREAM-END -// -// 2. Block mappings: -// -// a simple key: a value # The KEY token is produced here. -// ? a complex key -// : another value -// a mapping: -// key 1: value 1 -// key 2: value 2 -// a sequence: -// - item 1 -// - item 2 -// -// Tokens: -// -// STREAM-START(utf-8) -// BLOCK-MAPPING-START -// KEY -// SCALAR("a simple key",plain) -// VALUE -// SCALAR("a value",plain) -// KEY -// SCALAR("a complex key",plain) -// VALUE -// SCALAR("another value",plain) -// KEY -// SCALAR("a mapping",plain) -// BLOCK-MAPPING-START -// KEY -// SCALAR("key 1",plain) -// VALUE -// SCALAR("value 1",plain) -// KEY -// SCALAR("key 2",plain) -// VALUE -// SCALAR("value 2",plain) -// BLOCK-END -// KEY -// SCALAR("a sequence",plain) -// VALUE -// BLOCK-SEQUENCE-START -// BLOCK-ENTRY -// SCALAR("item 1",plain) -// BLOCK-ENTRY -// SCALAR("item 2",plain) -// BLOCK-END -// BLOCK-END -// STREAM-END -// -// YAML does not always require to start a new block collection from a new -// line. If the current line contains only '-', '?', and ':' indicators, a new -// block collection may start at the current line. The following examples -// illustrate this case: -// -// 1. Collections in a sequence: -// -// - - item 1 -// - item 2 -// - key 1: value 1 -// key 2: value 2 -// - ? complex key -// : complex value -// -// Tokens: -// -// STREAM-START(utf-8) -// BLOCK-SEQUENCE-START -// BLOCK-ENTRY -// BLOCK-SEQUENCE-START -// BLOCK-ENTRY -// SCALAR("item 1",plain) -// BLOCK-ENTRY -// SCALAR("item 2",plain) -// BLOCK-END -// BLOCK-ENTRY -// BLOCK-MAPPING-START -// KEY -// SCALAR("key 1",plain) -// VALUE -// SCALAR("value 1",plain) -// KEY -// SCALAR("key 2",plain) -// VALUE -// SCALAR("value 2",plain) -// BLOCK-END -// BLOCK-ENTRY -// BLOCK-MAPPING-START -// KEY -// SCALAR("complex key") -// VALUE -// SCALAR("complex value") -// BLOCK-END -// BLOCK-END -// STREAM-END -// -// 2. Collections in a mapping: -// -// ? a sequence -// : - item 1 -// - item 2 -// ? a mapping -// : key 1: value 1 -// key 2: value 2 -// -// Tokens: -// -// STREAM-START(utf-8) -// BLOCK-MAPPING-START -// KEY -// SCALAR("a sequence",plain) -// VALUE -// BLOCK-SEQUENCE-START -// BLOCK-ENTRY -// SCALAR("item 1",plain) -// BLOCK-ENTRY -// SCALAR("item 2",plain) -// BLOCK-END -// KEY -// SCALAR("a mapping",plain) -// VALUE -// BLOCK-MAPPING-START -// KEY -// SCALAR("key 1",plain) -// VALUE -// SCALAR("value 1",plain) -// KEY -// SCALAR("key 2",plain) -// VALUE -// SCALAR("value 2",plain) -// BLOCK-END -// BLOCK-END -// STREAM-END -// -// YAML also permits non-indented sequences if they are included into a block -// mapping. In this case, the token BLOCK-SEQUENCE-START is not produced: -// -// key: -// - item 1 # BLOCK-SEQUENCE-START is NOT produced here. -// - item 2 -// -// Tokens: -// -// STREAM-START(utf-8) -// BLOCK-MAPPING-START -// KEY -// SCALAR("key",plain) -// VALUE -// BLOCK-ENTRY -// SCALAR("item 1",plain) -// BLOCK-ENTRY -// SCALAR("item 2",plain) -// BLOCK-END -// - -// Ensure that the buffer contains the required number of characters. -// Return true on success, false on failure (reader error or memory error). -func cache(parser *yaml_parser_t, length int) bool { - // [Go] This was inlined: !cache(A, B) -> unread < B && !update(A, B) - return parser.unread >= length || yaml_parser_update_buffer(parser, length) -} - -// Advance the buffer pointer. -func skip(parser *yaml_parser_t) { - parser.mark.index++ - parser.mark.column++ - parser.unread-- - parser.buffer_pos += width(parser.buffer[parser.buffer_pos]) -} - -func skip_line(parser *yaml_parser_t) { - if is_crlf(parser.buffer, parser.buffer_pos) { - parser.mark.index += 2 - parser.mark.column = 0 - parser.mark.line++ - parser.unread -= 2 - parser.buffer_pos += 2 - } else if is_break(parser.buffer, parser.buffer_pos) { - parser.mark.index++ - parser.mark.column = 0 - parser.mark.line++ - parser.unread-- - parser.buffer_pos += width(parser.buffer[parser.buffer_pos]) - } -} - -// Copy a character to a string buffer and advance pointers. -func read(parser *yaml_parser_t, s []byte) []byte { - w := width(parser.buffer[parser.buffer_pos]) - if w == 0 { - panic("invalid character sequence") - } - if len(s) == 0 { - s = make([]byte, 0, 32) - } - if w == 1 && len(s)+w <= cap(s) { - s = s[:len(s)+1] - s[len(s)-1] = parser.buffer[parser.buffer_pos] - parser.buffer_pos++ - } else { - s = append(s, parser.buffer[parser.buffer_pos:parser.buffer_pos+w]...) - parser.buffer_pos += w - } - parser.mark.index++ - parser.mark.column++ - parser.unread-- - return s -} - -// Copy a line break character to a string buffer and advance pointers. -func read_line(parser *yaml_parser_t, s []byte) []byte { - buf := parser.buffer - pos := parser.buffer_pos - switch { - case buf[pos] == '\r' && buf[pos+1] == '\n': - // CR LF . LF - s = append(s, '\n') - parser.buffer_pos += 2 - parser.mark.index++ - parser.unread-- - case buf[pos] == '\r' || buf[pos] == '\n': - // CR|LF . LF - s = append(s, '\n') - parser.buffer_pos += 1 - case buf[pos] == '\xC2' && buf[pos+1] == '\x85': - // NEL . LF - s = append(s, '\n') - parser.buffer_pos += 2 - case buf[pos] == '\xE2' && buf[pos+1] == '\x80' && (buf[pos+2] == '\xA8' || buf[pos+2] == '\xA9'): - // LS|PS . LS|PS - s = append(s, buf[parser.buffer_pos:pos+3]...) - parser.buffer_pos += 3 - default: - return s - } - parser.mark.index++ - parser.mark.column = 0 - parser.mark.line++ - parser.unread-- - return s -} - -// Get the next token. -func yaml_parser_scan(parser *yaml_parser_t, token *yaml_token_t) bool { - // Erase the token object. - *token = yaml_token_t{} // [Go] Is this necessary? - - // No tokens after STREAM-END or error. - if parser.stream_end_produced || parser.error != yaml_NO_ERROR { - return true - } - - // Ensure that the tokens queue contains enough tokens. - if !parser.token_available { - if !yaml_parser_fetch_more_tokens(parser) { - return false - } - } - - // Fetch the next token from the queue. - *token = parser.tokens[parser.tokens_head] - parser.tokens_head++ - parser.tokens_parsed++ - parser.token_available = false - - if token.typ == yaml_STREAM_END_TOKEN { - parser.stream_end_produced = true - } - return true -} - -// Set the scanner error and return false. -func yaml_parser_set_scanner_error(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string) bool { - parser.error = yaml_SCANNER_ERROR - parser.context = context - parser.context_mark = context_mark - parser.problem = problem - parser.problem_mark = parser.mark - return false -} - -func yaml_parser_set_scanner_tag_error(parser *yaml_parser_t, directive bool, context_mark yaml_mark_t, problem string) bool { - context := "while parsing a tag" - if directive { - context = "while parsing a %TAG directive" - } - return yaml_parser_set_scanner_error(parser, context, context_mark, "did not find URI escaped octet") -} - -func trace(args ...interface{}) func() { - pargs := append([]interface{}{"+++"}, args...) - fmt.Println(pargs...) - pargs = append([]interface{}{"---"}, args...) - return func() { fmt.Println(pargs...) } -} - -// Ensure that the tokens queue contains at least one token which can be -// returned to the Parser. -func yaml_parser_fetch_more_tokens(parser *yaml_parser_t) bool { - // While we need more tokens to fetch, do it. - for { - // Check if we really need to fetch more tokens. - need_more_tokens := false - - if parser.tokens_head == len(parser.tokens) { - // Queue is empty. - need_more_tokens = true - } else { - // Check if any potential simple key may occupy the head position. - if !yaml_parser_stale_simple_keys(parser) { - return false - } - - for i := range parser.simple_keys { - simple_key := &parser.simple_keys[i] - if simple_key.possible && simple_key.token_number == parser.tokens_parsed { - need_more_tokens = true - break - } - } - } - - // We are finished. - if !need_more_tokens { - break - } - // Fetch the next token. - if !yaml_parser_fetch_next_token(parser) { - return false - } - } - - parser.token_available = true - return true -} - -// The dispatcher for token fetchers. -func yaml_parser_fetch_next_token(parser *yaml_parser_t) bool { - // Ensure that the buffer is initialized. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - // Check if we just started scanning. Fetch STREAM-START then. - if !parser.stream_start_produced { - return yaml_parser_fetch_stream_start(parser) - } - - // Eat whitespaces and comments until we reach the next token. - if !yaml_parser_scan_to_next_token(parser) { - return false - } - - // Remove obsolete potential simple keys. - if !yaml_parser_stale_simple_keys(parser) { - return false - } - - // Check the indentation level against the current column. - if !yaml_parser_unroll_indent(parser, parser.mark.column) { - return false - } - - // Ensure that the buffer contains at least 4 characters. 4 is the length - // of the longest indicators ('--- ' and '... '). - if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { - return false - } - - // Is it the end of the stream? - if is_z(parser.buffer, parser.buffer_pos) { - return yaml_parser_fetch_stream_end(parser) - } - - // Is it a directive? - if parser.mark.column == 0 && parser.buffer[parser.buffer_pos] == '%' { - return yaml_parser_fetch_directive(parser) - } - - buf := parser.buffer - pos := parser.buffer_pos - - // Is it the document start indicator? - if parser.mark.column == 0 && buf[pos] == '-' && buf[pos+1] == '-' && buf[pos+2] == '-' && is_blankz(buf, pos+3) { - return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_START_TOKEN) - } - - // Is it the document end indicator? - if parser.mark.column == 0 && buf[pos] == '.' && buf[pos+1] == '.' && buf[pos+2] == '.' && is_blankz(buf, pos+3) { - return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_END_TOKEN) - } - - // Is it the flow sequence start indicator? - if buf[pos] == '[' { - return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_SEQUENCE_START_TOKEN) - } - - // Is it the flow mapping start indicator? - if parser.buffer[parser.buffer_pos] == '{' { - return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_MAPPING_START_TOKEN) - } - - // Is it the flow sequence end indicator? - if parser.buffer[parser.buffer_pos] == ']' { - return yaml_parser_fetch_flow_collection_end(parser, - yaml_FLOW_SEQUENCE_END_TOKEN) - } - - // Is it the flow mapping end indicator? - if parser.buffer[parser.buffer_pos] == '}' { - return yaml_parser_fetch_flow_collection_end(parser, - yaml_FLOW_MAPPING_END_TOKEN) - } - - // Is it the flow entry indicator? - if parser.buffer[parser.buffer_pos] == ',' { - return yaml_parser_fetch_flow_entry(parser) - } - - // Is it the block entry indicator? - if parser.buffer[parser.buffer_pos] == '-' && is_blankz(parser.buffer, parser.buffer_pos+1) { - return yaml_parser_fetch_block_entry(parser) - } - - // Is it the key indicator? - if parser.buffer[parser.buffer_pos] == '?' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) { - return yaml_parser_fetch_key(parser) - } - - // Is it the value indicator? - if parser.buffer[parser.buffer_pos] == ':' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) { - return yaml_parser_fetch_value(parser) - } - - // Is it an alias? - if parser.buffer[parser.buffer_pos] == '*' { - return yaml_parser_fetch_anchor(parser, yaml_ALIAS_TOKEN) - } - - // Is it an anchor? - if parser.buffer[parser.buffer_pos] == '&' { - return yaml_parser_fetch_anchor(parser, yaml_ANCHOR_TOKEN) - } - - // Is it a tag? - if parser.buffer[parser.buffer_pos] == '!' { - return yaml_parser_fetch_tag(parser) - } - - // Is it a literal scalar? - if parser.buffer[parser.buffer_pos] == '|' && parser.flow_level == 0 { - return yaml_parser_fetch_block_scalar(parser, true) - } - - // Is it a folded scalar? - if parser.buffer[parser.buffer_pos] == '>' && parser.flow_level == 0 { - return yaml_parser_fetch_block_scalar(parser, false) - } - - // Is it a single-quoted scalar? - if parser.buffer[parser.buffer_pos] == '\'' { - return yaml_parser_fetch_flow_scalar(parser, true) - } - - // Is it a double-quoted scalar? - if parser.buffer[parser.buffer_pos] == '"' { - return yaml_parser_fetch_flow_scalar(parser, false) - } - - // Is it a plain scalar? - // - // A plain scalar may start with any non-blank characters except - // - // '-', '?', ':', ',', '[', ']', '{', '}', - // '#', '&', '*', '!', '|', '>', '\'', '\"', - // '%', '@', '`'. - // - // In the block context (and, for the '-' indicator, in the flow context - // too), it may also start with the characters - // - // '-', '?', ':' - // - // if it is followed by a non-space character. - // - // The last rule is more restrictive than the specification requires. - // [Go] Make this logic more reasonable. - //switch parser.buffer[parser.buffer_pos] { - //case '-', '?', ':', ',', '?', '-', ',', ':', ']', '[', '}', '{', '&', '#', '!', '*', '>', '|', '"', '\'', '@', '%', '-', '`': - //} - if !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '-' || - parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':' || - parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '[' || - parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' || - parser.buffer[parser.buffer_pos] == '}' || parser.buffer[parser.buffer_pos] == '#' || - parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '*' || - parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '|' || - parser.buffer[parser.buffer_pos] == '>' || parser.buffer[parser.buffer_pos] == '\'' || - parser.buffer[parser.buffer_pos] == '"' || parser.buffer[parser.buffer_pos] == '%' || - parser.buffer[parser.buffer_pos] == '@' || parser.buffer[parser.buffer_pos] == '`') || - (parser.buffer[parser.buffer_pos] == '-' && !is_blank(parser.buffer, parser.buffer_pos+1)) || - (parser.flow_level == 0 && - (parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':') && - !is_blankz(parser.buffer, parser.buffer_pos+1)) { - return yaml_parser_fetch_plain_scalar(parser) - } - - // If we don't determine the token type so far, it is an error. - return yaml_parser_set_scanner_error(parser, - "while scanning for the next token", parser.mark, - "found character that cannot start any token") -} - -// Check the list of potential simple keys and remove the positions that -// cannot contain simple keys anymore. -func yaml_parser_stale_simple_keys(parser *yaml_parser_t) bool { - // Check for a potential simple key for each flow level. - for i := range parser.simple_keys { - simple_key := &parser.simple_keys[i] - - // The specification requires that a simple key - // - // - is limited to a single line, - // - is shorter than 1024 characters. - if simple_key.possible && (simple_key.mark.line < parser.mark.line || simple_key.mark.index+1024 < parser.mark.index) { - - // Check if the potential simple key to be removed is required. - if simple_key.required { - return yaml_parser_set_scanner_error(parser, - "while scanning a simple key", simple_key.mark, - "could not find expected ':'") - } - simple_key.possible = false - } - } - return true -} - -// Check if a simple key may start at the current position and add it if -// needed. -func yaml_parser_save_simple_key(parser *yaml_parser_t) bool { - // A simple key is required at the current position if the scanner is in - // the block context and the current column coincides with the indentation - // level. - - required := parser.flow_level == 0 && parser.indent == parser.mark.column - - // A simple key is required only when it is the first token in the current - // line. Therefore it is always allowed. But we add a check anyway. - if required && !parser.simple_key_allowed { - panic("should not happen") - } - - // - // If the current position may start a simple key, save it. - // - if parser.simple_key_allowed { - simple_key := yaml_simple_key_t{ - possible: true, - required: required, - token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head), - } - simple_key.mark = parser.mark - - if !yaml_parser_remove_simple_key(parser) { - return false - } - parser.simple_keys[len(parser.simple_keys)-1] = simple_key - } - return true -} - -// Remove a potential simple key at the current flow level. -func yaml_parser_remove_simple_key(parser *yaml_parser_t) bool { - i := len(parser.simple_keys) - 1 - if parser.simple_keys[i].possible { - // If the key is required, it is an error. - if parser.simple_keys[i].required { - return yaml_parser_set_scanner_error(parser, - "while scanning a simple key", parser.simple_keys[i].mark, - "could not find expected ':'") - } - } - // Remove the key from the stack. - parser.simple_keys[i].possible = false - return true -} - -// Increase the flow level and resize the simple key list if needed. -func yaml_parser_increase_flow_level(parser *yaml_parser_t) bool { - // Reset the simple key on the next level. - parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{}) - - // Increase the flow level. - parser.flow_level++ - return true -} - -// Decrease the flow level. -func yaml_parser_decrease_flow_level(parser *yaml_parser_t) bool { - if parser.flow_level > 0 { - parser.flow_level-- - parser.simple_keys = parser.simple_keys[:len(parser.simple_keys)-1] - } - return true -} - -// Push the current indentation level to the stack and set the new level -// the current column is greater than the indentation level. In this case, -// append or insert the specified token into the token queue. -func yaml_parser_roll_indent(parser *yaml_parser_t, column, number int, typ yaml_token_type_t, mark yaml_mark_t) bool { - // In the flow context, do nothing. - if parser.flow_level > 0 { - return true - } - - if parser.indent < column { - // Push the current indentation level to the stack and set the new - // indentation level. - parser.indents = append(parser.indents, parser.indent) - parser.indent = column - - // Create a token and insert it into the queue. - token := yaml_token_t{ - typ: typ, - start_mark: mark, - end_mark: mark, - } - if number > -1 { - number -= parser.tokens_parsed - } - yaml_insert_token(parser, number, &token) - } - return true -} - -// Pop indentation levels from the indents stack until the current level -// becomes less or equal to the column. For each intendation level, append -// the BLOCK-END token. -func yaml_parser_unroll_indent(parser *yaml_parser_t, column int) bool { - // In the flow context, do nothing. - if parser.flow_level > 0 { - return true - } - - // Loop through the intendation levels in the stack. - for parser.indent > column { - // Create a token and append it to the queue. - token := yaml_token_t{ - typ: yaml_BLOCK_END_TOKEN, - start_mark: parser.mark, - end_mark: parser.mark, - } - yaml_insert_token(parser, -1, &token) - - // Pop the indentation level. - parser.indent = parser.indents[len(parser.indents)-1] - parser.indents = parser.indents[:len(parser.indents)-1] - } - return true -} - -// Initialize the scanner and produce the STREAM-START token. -func yaml_parser_fetch_stream_start(parser *yaml_parser_t) bool { - - // Set the initial indentation. - parser.indent = -1 - - // Initialize the simple key stack. - parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{}) - - // A simple key is allowed at the beginning of the stream. - parser.simple_key_allowed = true - - // We have started. - parser.stream_start_produced = true - - // Create the STREAM-START token and append it to the queue. - token := yaml_token_t{ - typ: yaml_STREAM_START_TOKEN, - start_mark: parser.mark, - end_mark: parser.mark, - encoding: parser.encoding, - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the STREAM-END token and shut down the scanner. -func yaml_parser_fetch_stream_end(parser *yaml_parser_t) bool { - - // Force new line. - if parser.mark.column != 0 { - parser.mark.column = 0 - parser.mark.line++ - } - - // Reset the indentation level. - if !yaml_parser_unroll_indent(parser, -1) { - return false - } - - // Reset simple keys. - if !yaml_parser_remove_simple_key(parser) { - return false - } - - parser.simple_key_allowed = false - - // Create the STREAM-END token and append it to the queue. - token := yaml_token_t{ - typ: yaml_STREAM_END_TOKEN, - start_mark: parser.mark, - end_mark: parser.mark, - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce a VERSION-DIRECTIVE or TAG-DIRECTIVE token. -func yaml_parser_fetch_directive(parser *yaml_parser_t) bool { - // Reset the indentation level. - if !yaml_parser_unroll_indent(parser, -1) { - return false - } - - // Reset simple keys. - if !yaml_parser_remove_simple_key(parser) { - return false - } - - parser.simple_key_allowed = false - - // Create the YAML-DIRECTIVE or TAG-DIRECTIVE token. - token := yaml_token_t{} - if !yaml_parser_scan_directive(parser, &token) { - return false - } - // Append the token to the queue. - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the DOCUMENT-START or DOCUMENT-END token. -func yaml_parser_fetch_document_indicator(parser *yaml_parser_t, typ yaml_token_type_t) bool { - // Reset the indentation level. - if !yaml_parser_unroll_indent(parser, -1) { - return false - } - - // Reset simple keys. - if !yaml_parser_remove_simple_key(parser) { - return false - } - - parser.simple_key_allowed = false - - // Consume the token. - start_mark := parser.mark - - skip(parser) - skip(parser) - skip(parser) - - end_mark := parser.mark - - // Create the DOCUMENT-START or DOCUMENT-END token. - token := yaml_token_t{ - typ: typ, - start_mark: start_mark, - end_mark: end_mark, - } - // Append the token to the queue. - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the FLOW-SEQUENCE-START or FLOW-MAPPING-START token. -func yaml_parser_fetch_flow_collection_start(parser *yaml_parser_t, typ yaml_token_type_t) bool { - // The indicators '[' and '{' may start a simple key. - if !yaml_parser_save_simple_key(parser) { - return false - } - - // Increase the flow level. - if !yaml_parser_increase_flow_level(parser) { - return false - } - - // A simple key may follow the indicators '[' and '{'. - parser.simple_key_allowed = true - - // Consume the token. - start_mark := parser.mark - skip(parser) - end_mark := parser.mark - - // Create the FLOW-SEQUENCE-START of FLOW-MAPPING-START token. - token := yaml_token_t{ - typ: typ, - start_mark: start_mark, - end_mark: end_mark, - } - // Append the token to the queue. - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the FLOW-SEQUENCE-END or FLOW-MAPPING-END token. -func yaml_parser_fetch_flow_collection_end(parser *yaml_parser_t, typ yaml_token_type_t) bool { - // Reset any potential simple key on the current flow level. - if !yaml_parser_remove_simple_key(parser) { - return false - } - - // Decrease the flow level. - if !yaml_parser_decrease_flow_level(parser) { - return false - } - - // No simple keys after the indicators ']' and '}'. - parser.simple_key_allowed = false - - // Consume the token. - - start_mark := parser.mark - skip(parser) - end_mark := parser.mark - - // Create the FLOW-SEQUENCE-END of FLOW-MAPPING-END token. - token := yaml_token_t{ - typ: typ, - start_mark: start_mark, - end_mark: end_mark, - } - // Append the token to the queue. - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the FLOW-ENTRY token. -func yaml_parser_fetch_flow_entry(parser *yaml_parser_t) bool { - // Reset any potential simple keys on the current flow level. - if !yaml_parser_remove_simple_key(parser) { - return false - } - - // Simple keys are allowed after ','. - parser.simple_key_allowed = true - - // Consume the token. - start_mark := parser.mark - skip(parser) - end_mark := parser.mark - - // Create the FLOW-ENTRY token and append it to the queue. - token := yaml_token_t{ - typ: yaml_FLOW_ENTRY_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the BLOCK-ENTRY token. -func yaml_parser_fetch_block_entry(parser *yaml_parser_t) bool { - // Check if the scanner is in the block context. - if parser.flow_level == 0 { - // Check if we are allowed to start a new entry. - if !parser.simple_key_allowed { - return yaml_parser_set_scanner_error(parser, "", parser.mark, - "block sequence entries are not allowed in this context") - } - // Add the BLOCK-SEQUENCE-START token if needed. - if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_SEQUENCE_START_TOKEN, parser.mark) { - return false - } - } else { - // It is an error for the '-' indicator to occur in the flow context, - // but we let the Parser detect and report about it because the Parser - // is able to point to the context. - } - - // Reset any potential simple keys on the current flow level. - if !yaml_parser_remove_simple_key(parser) { - return false - } - - // Simple keys are allowed after '-'. - parser.simple_key_allowed = true - - // Consume the token. - start_mark := parser.mark - skip(parser) - end_mark := parser.mark - - // Create the BLOCK-ENTRY token and append it to the queue. - token := yaml_token_t{ - typ: yaml_BLOCK_ENTRY_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the KEY token. -func yaml_parser_fetch_key(parser *yaml_parser_t) bool { - - // In the block context, additional checks are required. - if parser.flow_level == 0 { - // Check if we are allowed to start a new key (not nessesary simple). - if !parser.simple_key_allowed { - return yaml_parser_set_scanner_error(parser, "", parser.mark, - "mapping keys are not allowed in this context") - } - // Add the BLOCK-MAPPING-START token if needed. - if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) { - return false - } - } - - // Reset any potential simple keys on the current flow level. - if !yaml_parser_remove_simple_key(parser) { - return false - } - - // Simple keys are allowed after '?' in the block context. - parser.simple_key_allowed = parser.flow_level == 0 - - // Consume the token. - start_mark := parser.mark - skip(parser) - end_mark := parser.mark - - // Create the KEY token and append it to the queue. - token := yaml_token_t{ - typ: yaml_KEY_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the VALUE token. -func yaml_parser_fetch_value(parser *yaml_parser_t) bool { - - simple_key := &parser.simple_keys[len(parser.simple_keys)-1] - - // Have we found a simple key? - if simple_key.possible { - // Create the KEY token and insert it into the queue. - token := yaml_token_t{ - typ: yaml_KEY_TOKEN, - start_mark: simple_key.mark, - end_mark: simple_key.mark, - } - yaml_insert_token(parser, simple_key.token_number-parser.tokens_parsed, &token) - - // In the block context, we may need to add the BLOCK-MAPPING-START token. - if !yaml_parser_roll_indent(parser, simple_key.mark.column, - simple_key.token_number, - yaml_BLOCK_MAPPING_START_TOKEN, simple_key.mark) { - return false - } - - // Remove the simple key. - simple_key.possible = false - - // A simple key cannot follow another simple key. - parser.simple_key_allowed = false - - } else { - // The ':' indicator follows a complex key. - - // In the block context, extra checks are required. - if parser.flow_level == 0 { - - // Check if we are allowed to start a complex value. - if !parser.simple_key_allowed { - return yaml_parser_set_scanner_error(parser, "", parser.mark, - "mapping values are not allowed in this context") - } - - // Add the BLOCK-MAPPING-START token if needed. - if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) { - return false - } - } - - // Simple keys after ':' are allowed in the block context. - parser.simple_key_allowed = parser.flow_level == 0 - } - - // Consume the token. - start_mark := parser.mark - skip(parser) - end_mark := parser.mark - - // Create the VALUE token and append it to the queue. - token := yaml_token_t{ - typ: yaml_VALUE_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the ALIAS or ANCHOR token. -func yaml_parser_fetch_anchor(parser *yaml_parser_t, typ yaml_token_type_t) bool { - // An anchor or an alias could be a simple key. - if !yaml_parser_save_simple_key(parser) { - return false - } - - // A simple key cannot follow an anchor or an alias. - parser.simple_key_allowed = false - - // Create the ALIAS or ANCHOR token and append it to the queue. - var token yaml_token_t - if !yaml_parser_scan_anchor(parser, &token, typ) { - return false - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the TAG token. -func yaml_parser_fetch_tag(parser *yaml_parser_t) bool { - // A tag could be a simple key. - if !yaml_parser_save_simple_key(parser) { - return false - } - - // A simple key cannot follow a tag. - parser.simple_key_allowed = false - - // Create the TAG token and append it to the queue. - var token yaml_token_t - if !yaml_parser_scan_tag(parser, &token) { - return false - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the SCALAR(...,literal) or SCALAR(...,folded) tokens. -func yaml_parser_fetch_block_scalar(parser *yaml_parser_t, literal bool) bool { - // Remove any potential simple keys. - if !yaml_parser_remove_simple_key(parser) { - return false - } - - // A simple key may follow a block scalar. - parser.simple_key_allowed = true - - // Create the SCALAR token and append it to the queue. - var token yaml_token_t - if !yaml_parser_scan_block_scalar(parser, &token, literal) { - return false - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the SCALAR(...,single-quoted) or SCALAR(...,double-quoted) tokens. -func yaml_parser_fetch_flow_scalar(parser *yaml_parser_t, single bool) bool { - // A plain scalar could be a simple key. - if !yaml_parser_save_simple_key(parser) { - return false - } - - // A simple key cannot follow a flow scalar. - parser.simple_key_allowed = false - - // Create the SCALAR token and append it to the queue. - var token yaml_token_t - if !yaml_parser_scan_flow_scalar(parser, &token, single) { - return false - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the SCALAR(...,plain) token. -func yaml_parser_fetch_plain_scalar(parser *yaml_parser_t) bool { - // A plain scalar could be a simple key. - if !yaml_parser_save_simple_key(parser) { - return false - } - - // A simple key cannot follow a flow scalar. - parser.simple_key_allowed = false - - // Create the SCALAR token and append it to the queue. - var token yaml_token_t - if !yaml_parser_scan_plain_scalar(parser, &token) { - return false - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Eat whitespaces and comments until the next token is found. -func yaml_parser_scan_to_next_token(parser *yaml_parser_t) bool { - - // Until the next token is not found. - for { - // Allow the BOM mark to start a line. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - if parser.mark.column == 0 && is_bom(parser.buffer, parser.buffer_pos) { - skip(parser) - } - - // Eat whitespaces. - // Tabs are allowed: - // - in the flow context - // - in the block context, but not at the beginning of the line or - // after '-', '?', or ':' (complex value). - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - for parser.buffer[parser.buffer_pos] == ' ' || ((parser.flow_level > 0 || !parser.simple_key_allowed) && parser.buffer[parser.buffer_pos] == '\t') { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Eat a comment until a line break. - if parser.buffer[parser.buffer_pos] == '#' { - for !is_breakz(parser.buffer, parser.buffer_pos) { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - } - - // If it is a line break, eat it. - if is_break(parser.buffer, parser.buffer_pos) { - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - skip_line(parser) - - // In the block context, a new line may start a simple key. - if parser.flow_level == 0 { - parser.simple_key_allowed = true - } - } else { - break // We have found a token. - } - } - - return true -} - -// Scan a YAML-DIRECTIVE or TAG-DIRECTIVE token. -// -// Scope: -// %YAML 1.1 # a comment \n -// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -// %TAG !yaml! tag:yaml.org,2002: \n -// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -// -func yaml_parser_scan_directive(parser *yaml_parser_t, token *yaml_token_t) bool { - // Eat '%'. - start_mark := parser.mark - skip(parser) - - // Scan the directive name. - var name []byte - if !yaml_parser_scan_directive_name(parser, start_mark, &name) { - return false - } - - // Is it a YAML directive? - if bytes.Equal(name, []byte("YAML")) { - // Scan the VERSION directive value. - var major, minor int8 - if !yaml_parser_scan_version_directive_value(parser, start_mark, &major, &minor) { - return false - } - end_mark := parser.mark - - // Create a VERSION-DIRECTIVE token. - *token = yaml_token_t{ - typ: yaml_VERSION_DIRECTIVE_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - major: major, - minor: minor, - } - - // Is it a TAG directive? - } else if bytes.Equal(name, []byte("TAG")) { - // Scan the TAG directive value. - var handle, prefix []byte - if !yaml_parser_scan_tag_directive_value(parser, start_mark, &handle, &prefix) { - return false - } - end_mark := parser.mark - - // Create a TAG-DIRECTIVE token. - *token = yaml_token_t{ - typ: yaml_TAG_DIRECTIVE_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - value: handle, - prefix: prefix, - } - - // Unknown directive. - } else { - yaml_parser_set_scanner_error(parser, "while scanning a directive", - start_mark, "found uknown directive name") - return false - } - - // Eat the rest of the line including any comments. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - for is_blank(parser.buffer, parser.buffer_pos) { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - if parser.buffer[parser.buffer_pos] == '#' { - for !is_breakz(parser.buffer, parser.buffer_pos) { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - } - - // Check if we are at the end of the line. - if !is_breakz(parser.buffer, parser.buffer_pos) { - yaml_parser_set_scanner_error(parser, "while scanning a directive", - start_mark, "did not find expected comment or line break") - return false - } - - // Eat a line break. - if is_break(parser.buffer, parser.buffer_pos) { - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - skip_line(parser) - } - - return true -} - -// Scan the directive name. -// -// Scope: -// %YAML 1.1 # a comment \n -// ^^^^ -// %TAG !yaml! tag:yaml.org,2002: \n -// ^^^ -// -func yaml_parser_scan_directive_name(parser *yaml_parser_t, start_mark yaml_mark_t, name *[]byte) bool { - // Consume the directive name. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - var s []byte - for is_alpha(parser.buffer, parser.buffer_pos) { - s = read(parser, s) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Check if the name is empty. - if len(s) == 0 { - yaml_parser_set_scanner_error(parser, "while scanning a directive", - start_mark, "could not find expected directive name") - return false - } - - // Check for an blank character after the name. - if !is_blankz(parser.buffer, parser.buffer_pos) { - yaml_parser_set_scanner_error(parser, "while scanning a directive", - start_mark, "found unexpected non-alphabetical character") - return false - } - *name = s - return true -} - -// Scan the value of VERSION-DIRECTIVE. -// -// Scope: -// %YAML 1.1 # a comment \n -// ^^^^^^ -func yaml_parser_scan_version_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, major, minor *int8) bool { - // Eat whitespaces. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - for is_blank(parser.buffer, parser.buffer_pos) { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Consume the major version number. - if !yaml_parser_scan_version_directive_number(parser, start_mark, major) { - return false - } - - // Eat '.'. - if parser.buffer[parser.buffer_pos] != '.' { - return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", - start_mark, "did not find expected digit or '.' character") - } - - skip(parser) - - // Consume the minor version number. - if !yaml_parser_scan_version_directive_number(parser, start_mark, minor) { - return false - } - return true -} - -const max_number_length = 2 - -// Scan the version number of VERSION-DIRECTIVE. -// -// Scope: -// %YAML 1.1 # a comment \n -// ^ -// %YAML 1.1 # a comment \n -// ^ -func yaml_parser_scan_version_directive_number(parser *yaml_parser_t, start_mark yaml_mark_t, number *int8) bool { - - // Repeat while the next character is digit. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - var value, length int8 - for is_digit(parser.buffer, parser.buffer_pos) { - // Check if the number is too long. - length++ - if length > max_number_length { - return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", - start_mark, "found extremely long version number") - } - value = value*10 + int8(as_digit(parser.buffer, parser.buffer_pos)) - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Check if the number was present. - if length == 0 { - return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", - start_mark, "did not find expected version number") - } - *number = value - return true -} - -// Scan the value of a TAG-DIRECTIVE token. -// -// Scope: -// %TAG !yaml! tag:yaml.org,2002: \n -// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -// -func yaml_parser_scan_tag_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, handle, prefix *[]byte) bool { - var handle_value, prefix_value []byte - - // Eat whitespaces. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - for is_blank(parser.buffer, parser.buffer_pos) { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Scan a handle. - if !yaml_parser_scan_tag_handle(parser, true, start_mark, &handle_value) { - return false - } - - // Expect a whitespace. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - if !is_blank(parser.buffer, parser.buffer_pos) { - yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive", - start_mark, "did not find expected whitespace") - return false - } - - // Eat whitespaces. - for is_blank(parser.buffer, parser.buffer_pos) { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Scan a prefix. - if !yaml_parser_scan_tag_uri(parser, true, nil, start_mark, &prefix_value) { - return false - } - - // Expect a whitespace or line break. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - if !is_blankz(parser.buffer, parser.buffer_pos) { - yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive", - start_mark, "did not find expected whitespace or line break") - return false - } - - *handle = handle_value - *prefix = prefix_value - return true -} - -func yaml_parser_scan_anchor(parser *yaml_parser_t, token *yaml_token_t, typ yaml_token_type_t) bool { - var s []byte - - // Eat the indicator character. - start_mark := parser.mark - skip(parser) - - // Consume the value. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - for is_alpha(parser.buffer, parser.buffer_pos) { - s = read(parser, s) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - end_mark := parser.mark - - /* - * Check if length of the anchor is greater than 0 and it is followed by - * a whitespace character or one of the indicators: - * - * '?', ':', ',', ']', '}', '%', '@', '`'. - */ - - if len(s) == 0 || - !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '?' || - parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == ',' || - parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '}' || - parser.buffer[parser.buffer_pos] == '%' || parser.buffer[parser.buffer_pos] == '@' || - parser.buffer[parser.buffer_pos] == '`') { - context := "while scanning an alias" - if typ == yaml_ANCHOR_TOKEN { - context = "while scanning an anchor" - } - yaml_parser_set_scanner_error(parser, context, start_mark, - "did not find expected alphabetic or numeric character") - return false - } - - // Create a token. - *token = yaml_token_t{ - typ: typ, - start_mark: start_mark, - end_mark: end_mark, - value: s, - } - - return true -} - -/* - * Scan a TAG token. - */ - -func yaml_parser_scan_tag(parser *yaml_parser_t, token *yaml_token_t) bool { - var handle, suffix []byte - - start_mark := parser.mark - - // Check if the tag is in the canonical form. - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - - if parser.buffer[parser.buffer_pos+1] == '<' { - // Keep the handle as '' - - // Eat '!<' - skip(parser) - skip(parser) - - // Consume the tag value. - if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) { - return false - } - - // Check for '>' and eat it. - if parser.buffer[parser.buffer_pos] != '>' { - yaml_parser_set_scanner_error(parser, "while scanning a tag", - start_mark, "did not find the expected '>'") - return false - } - - skip(parser) - } else { - // The tag has either the '!suffix' or the '!handle!suffix' form. - - // First, try to scan a handle. - if !yaml_parser_scan_tag_handle(parser, false, start_mark, &handle) { - return false - } - - // Check if it is, indeed, handle. - if handle[0] == '!' && len(handle) > 1 && handle[len(handle)-1] == '!' { - // Scan the suffix now. - if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) { - return false - } - } else { - // It wasn't a handle after all. Scan the rest of the tag. - if !yaml_parser_scan_tag_uri(parser, false, handle, start_mark, &suffix) { - return false - } - - // Set the handle to '!'. - handle = []byte{'!'} - - // A special case: the '!' tag. Set the handle to '' and the - // suffix to '!'. - if len(suffix) == 0 { - handle, suffix = suffix, handle - } - } - } - - // Check the character which ends the tag. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - if !is_blankz(parser.buffer, parser.buffer_pos) { - yaml_parser_set_scanner_error(parser, "while scanning a tag", - start_mark, "did not find expected whitespace or line break") - return false - } - - end_mark := parser.mark - - // Create a token. - *token = yaml_token_t{ - typ: yaml_TAG_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - value: handle, - suffix: suffix, - } - return true -} - -// Scan a tag handle. -func yaml_parser_scan_tag_handle(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, handle *[]byte) bool { - // Check the initial '!' character. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - if parser.buffer[parser.buffer_pos] != '!' { - yaml_parser_set_scanner_tag_error(parser, directive, - start_mark, "did not find expected '!'") - return false - } - - var s []byte - - // Copy the '!' character. - s = read(parser, s) - - // Copy all subsequent alphabetical and numerical characters. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - for is_alpha(parser.buffer, parser.buffer_pos) { - s = read(parser, s) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Check if the trailing character is '!' and copy it. - if parser.buffer[parser.buffer_pos] == '!' { - s = read(parser, s) - } else { - // It's either the '!' tag or not really a tag handle. If it's a %TAG - // directive, it's an error. If it's a tag token, it must be a part of URI. - if directive && !(s[0] == '!' && s[1] == 0) { - yaml_parser_set_scanner_tag_error(parser, directive, - start_mark, "did not find expected '!'") - return false - } - } - - *handle = s - return true -} - -// Scan a tag. -func yaml_parser_scan_tag_uri(parser *yaml_parser_t, directive bool, head []byte, start_mark yaml_mark_t, uri *[]byte) bool { - //size_t length = head ? strlen((char *)head) : 0 - var s []byte - - // Copy the head if needed. - // - // Note that we don't copy the leading '!' character. - if len(head) > 1 { - s = append(s, head[1:]...) - } - - // Scan the tag. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - // The set of characters that may appear in URI is as follows: - // - // '0'-'9', 'A'-'Z', 'a'-'z', '_', '-', ';', '/', '?', ':', '@', '&', - // '=', '+', '$', ',', '.', '!', '~', '*', '\'', '(', ')', '[', ']', - // '%'. - // [Go] Convert this into more reasonable logic. - for is_alpha(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == ';' || - parser.buffer[parser.buffer_pos] == '/' || parser.buffer[parser.buffer_pos] == '?' || - parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == '@' || - parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '=' || - parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '$' || - parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '.' || - parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '~' || - parser.buffer[parser.buffer_pos] == '*' || parser.buffer[parser.buffer_pos] == '\'' || - parser.buffer[parser.buffer_pos] == '(' || parser.buffer[parser.buffer_pos] == ')' || - parser.buffer[parser.buffer_pos] == '[' || parser.buffer[parser.buffer_pos] == ']' || - parser.buffer[parser.buffer_pos] == '%' { - // Check if it is a URI-escape sequence. - if parser.buffer[parser.buffer_pos] == '%' { - if !yaml_parser_scan_uri_escapes(parser, directive, start_mark, &s) { - return false - } - } else { - s = read(parser, s) - } - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Check if the tag is non-empty. - if len(s) == 0 { - yaml_parser_set_scanner_tag_error(parser, directive, - start_mark, "did not find expected tag URI") - return false - } - *uri = s - return true -} - -// Decode an URI-escape sequence corresponding to a single UTF-8 character. -func yaml_parser_scan_uri_escapes(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, s *[]byte) bool { - - // Decode the required number of characters. - w := 1024 - for w > 0 { - // Check for a URI-escaped octet. - if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) { - return false - } - - if !(parser.buffer[parser.buffer_pos] == '%' && - is_hex(parser.buffer, parser.buffer_pos+1) && - is_hex(parser.buffer, parser.buffer_pos+2)) { - return yaml_parser_set_scanner_tag_error(parser, directive, - start_mark, "did not find URI escaped octet") - } - - // Get the octet. - octet := byte((as_hex(parser.buffer, parser.buffer_pos+1) << 4) + as_hex(parser.buffer, parser.buffer_pos+2)) - - // If it is the leading octet, determine the length of the UTF-8 sequence. - if w == 1024 { - w = width(octet) - if w == 0 { - return yaml_parser_set_scanner_tag_error(parser, directive, - start_mark, "found an incorrect leading UTF-8 octet") - } - } else { - // Check if the trailing octet is correct. - if octet&0xC0 != 0x80 { - return yaml_parser_set_scanner_tag_error(parser, directive, - start_mark, "found an incorrect trailing UTF-8 octet") - } - } - - // Copy the octet and move the pointers. - *s = append(*s, octet) - skip(parser) - skip(parser) - skip(parser) - w-- - } - return true -} - -// Scan a block scalar. -func yaml_parser_scan_block_scalar(parser *yaml_parser_t, token *yaml_token_t, literal bool) bool { - // Eat the indicator '|' or '>'. - start_mark := parser.mark - skip(parser) - - // Scan the additional block scalar indicators. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - // Check for a chomping indicator. - var chomping, increment int - if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' { - // Set the chomping method and eat the indicator. - if parser.buffer[parser.buffer_pos] == '+' { - chomping = +1 - } else { - chomping = -1 - } - skip(parser) - - // Check for an indentation indicator. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - if is_digit(parser.buffer, parser.buffer_pos) { - // Check that the intendation is greater than 0. - if parser.buffer[parser.buffer_pos] == '0' { - yaml_parser_set_scanner_error(parser, "while scanning a block scalar", - start_mark, "found an intendation indicator equal to 0") - return false - } - - // Get the intendation level and eat the indicator. - increment = as_digit(parser.buffer, parser.buffer_pos) - skip(parser) - } - - } else if is_digit(parser.buffer, parser.buffer_pos) { - // Do the same as above, but in the opposite order. - - if parser.buffer[parser.buffer_pos] == '0' { - yaml_parser_set_scanner_error(parser, "while scanning a block scalar", - start_mark, "found an intendation indicator equal to 0") - return false - } - increment = as_digit(parser.buffer, parser.buffer_pos) - skip(parser) - - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' { - if parser.buffer[parser.buffer_pos] == '+' { - chomping = +1 - } else { - chomping = -1 - } - skip(parser) - } - } - - // Eat whitespaces and comments to the end of the line. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - for is_blank(parser.buffer, parser.buffer_pos) { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - if parser.buffer[parser.buffer_pos] == '#' { - for !is_breakz(parser.buffer, parser.buffer_pos) { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - } - - // Check if we are at the end of the line. - if !is_breakz(parser.buffer, parser.buffer_pos) { - yaml_parser_set_scanner_error(parser, "while scanning a block scalar", - start_mark, "did not find expected comment or line break") - return false - } - - // Eat a line break. - if is_break(parser.buffer, parser.buffer_pos) { - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - skip_line(parser) - } - - end_mark := parser.mark - - // Set the intendation level if it was specified. - var indent int - if increment > 0 { - if parser.indent >= 0 { - indent = parser.indent + increment - } else { - indent = increment - } - } - - // Scan the leading line breaks and determine the indentation level if needed. - var s, leading_break, trailing_breaks []byte - if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) { - return false - } - - // Scan the block scalar content. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - var leading_blank, trailing_blank bool - for parser.mark.column == indent && !is_z(parser.buffer, parser.buffer_pos) { - // We are at the beginning of a non-empty line. - - // Is it a trailing whitespace? - trailing_blank = is_blank(parser.buffer, parser.buffer_pos) - - // Check if we need to fold the leading line break. - if !literal && !leading_blank && !trailing_blank && len(leading_break) > 0 && leading_break[0] == '\n' { - // Do we need to join the lines by space? - if len(trailing_breaks) == 0 { - s = append(s, ' ') - } - } else { - s = append(s, leading_break...) - } - leading_break = leading_break[:0] - - // Append the remaining line breaks. - s = append(s, trailing_breaks...) - trailing_breaks = trailing_breaks[:0] - - // Is it a leading whitespace? - leading_blank = is_blank(parser.buffer, parser.buffer_pos) - - // Consume the current line. - for !is_breakz(parser.buffer, parser.buffer_pos) { - s = read(parser, s) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Consume the line break. - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - - leading_break = read_line(parser, leading_break) - - // Eat the following intendation spaces and line breaks. - if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) { - return false - } - } - - // Chomp the tail. - if chomping != -1 { - s = append(s, leading_break...) - } - if chomping == 1 { - s = append(s, trailing_breaks...) - } - - // Create a token. - *token = yaml_token_t{ - typ: yaml_SCALAR_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - value: s, - style: yaml_LITERAL_SCALAR_STYLE, - } - if !literal { - token.style = yaml_FOLDED_SCALAR_STYLE - } - return true -} - -// Scan intendation spaces and line breaks for a block scalar. Determine the -// intendation level if needed. -func yaml_parser_scan_block_scalar_breaks(parser *yaml_parser_t, indent *int, breaks *[]byte, start_mark yaml_mark_t, end_mark *yaml_mark_t) bool { - *end_mark = parser.mark - - // Eat the intendation spaces and line breaks. - max_indent := 0 - for { - // Eat the intendation spaces. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - for (*indent == 0 || parser.mark.column < *indent) && is_space(parser.buffer, parser.buffer_pos) { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - if parser.mark.column > max_indent { - max_indent = parser.mark.column - } - - // Check for a tab character messing the intendation. - if (*indent == 0 || parser.mark.column < *indent) && is_tab(parser.buffer, parser.buffer_pos) { - return yaml_parser_set_scanner_error(parser, "while scanning a block scalar", - start_mark, "found a tab character where an intendation space is expected") - } - - // Have we found a non-empty line? - if !is_break(parser.buffer, parser.buffer_pos) { - break - } - - // Consume the line break. - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - // [Go] Should really be returning breaks instead. - *breaks = read_line(parser, *breaks) - *end_mark = parser.mark - } - - // Determine the indentation level if needed. - if *indent == 0 { - *indent = max_indent - if *indent < parser.indent+1 { - *indent = parser.indent + 1 - } - if *indent < 1 { - *indent = 1 - } - } - return true -} - -// Scan a quoted scalar. -func yaml_parser_scan_flow_scalar(parser *yaml_parser_t, token *yaml_token_t, single bool) bool { - // Eat the left quote. - start_mark := parser.mark - skip(parser) - - // Consume the content of the quoted scalar. - var s, leading_break, trailing_breaks, whitespaces []byte - for { - // Check that there are no document indicators at the beginning of the line. - if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { - return false - } - - if parser.mark.column == 0 && - ((parser.buffer[parser.buffer_pos+0] == '-' && - parser.buffer[parser.buffer_pos+1] == '-' && - parser.buffer[parser.buffer_pos+2] == '-') || - (parser.buffer[parser.buffer_pos+0] == '.' && - parser.buffer[parser.buffer_pos+1] == '.' && - parser.buffer[parser.buffer_pos+2] == '.')) && - is_blankz(parser.buffer, parser.buffer_pos+3) { - yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar", - start_mark, "found unexpected document indicator") - return false - } - - // Check for EOF. - if is_z(parser.buffer, parser.buffer_pos) { - yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar", - start_mark, "found unexpected end of stream") - return false - } - - // Consume non-blank characters. - leading_blanks := false - for !is_blankz(parser.buffer, parser.buffer_pos) { - if single && parser.buffer[parser.buffer_pos] == '\'' && parser.buffer[parser.buffer_pos+1] == '\'' { - // Is is an escaped single quote. - s = append(s, '\'') - skip(parser) - skip(parser) - - } else if single && parser.buffer[parser.buffer_pos] == '\'' { - // It is a right single quote. - break - } else if !single && parser.buffer[parser.buffer_pos] == '"' { - // It is a right double quote. - break - - } else if !single && parser.buffer[parser.buffer_pos] == '\\' && is_break(parser.buffer, parser.buffer_pos+1) { - // It is an escaped line break. - if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) { - return false - } - skip(parser) - skip_line(parser) - leading_blanks = true - break - - } else if !single && parser.buffer[parser.buffer_pos] == '\\' { - // It is an escape sequence. - code_length := 0 - - // Check the escape character. - switch parser.buffer[parser.buffer_pos+1] { - case '0': - s = append(s, 0) - case 'a': - s = append(s, '\x07') - case 'b': - s = append(s, '\x08') - case 't', '\t': - s = append(s, '\x09') - case 'n': - s = append(s, '\x0A') - case 'v': - s = append(s, '\x0B') - case 'f': - s = append(s, '\x0C') - case 'r': - s = append(s, '\x0D') - case 'e': - s = append(s, '\x1B') - case ' ': - s = append(s, '\x20') - case '"': - s = append(s, '"') - case '\'': - s = append(s, '\'') - case '\\': - s = append(s, '\\') - case 'N': // NEL (#x85) - s = append(s, '\xC2') - s = append(s, '\x85') - case '_': // #xA0 - s = append(s, '\xC2') - s = append(s, '\xA0') - case 'L': // LS (#x2028) - s = append(s, '\xE2') - s = append(s, '\x80') - s = append(s, '\xA8') - case 'P': // PS (#x2029) - s = append(s, '\xE2') - s = append(s, '\x80') - s = append(s, '\xA9') - case 'x': - code_length = 2 - case 'u': - code_length = 4 - case 'U': - code_length = 8 - default: - yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", - start_mark, "found unknown escape character") - return false - } - - skip(parser) - skip(parser) - - // Consume an arbitrary escape code. - if code_length > 0 { - var value int - - // Scan the character value. - if parser.unread < code_length && !yaml_parser_update_buffer(parser, code_length) { - return false - } - for k := 0; k < code_length; k++ { - if !is_hex(parser.buffer, parser.buffer_pos+k) { - yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", - start_mark, "did not find expected hexdecimal number") - return false - } - value = (value << 4) + as_hex(parser.buffer, parser.buffer_pos+k) - } - - // Check the value and write the character. - if (value >= 0xD800 && value <= 0xDFFF) || value > 0x10FFFF { - yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", - start_mark, "found invalid Unicode character escape code") - return false - } - if value <= 0x7F { - s = append(s, byte(value)) - } else if value <= 0x7FF { - s = append(s, byte(0xC0+(value>>6))) - s = append(s, byte(0x80+(value&0x3F))) - } else if value <= 0xFFFF { - s = append(s, byte(0xE0+(value>>12))) - s = append(s, byte(0x80+((value>>6)&0x3F))) - s = append(s, byte(0x80+(value&0x3F))) - } else { - s = append(s, byte(0xF0+(value>>18))) - s = append(s, byte(0x80+((value>>12)&0x3F))) - s = append(s, byte(0x80+((value>>6)&0x3F))) - s = append(s, byte(0x80+(value&0x3F))) - } - - // Advance the pointer. - for k := 0; k < code_length; k++ { - skip(parser) - } - } - } else { - // It is a non-escaped non-blank character. - s = read(parser, s) - } - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - } - - // Check if we are at the end of the scalar. - if single { - if parser.buffer[parser.buffer_pos] == '\'' { - break - } - } else { - if parser.buffer[parser.buffer_pos] == '"' { - break - } - } - - // Consume blank characters. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) { - if is_blank(parser.buffer, parser.buffer_pos) { - // Consume a space or a tab character. - if !leading_blanks { - whitespaces = read(parser, whitespaces) - } else { - skip(parser) - } - } else { - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - - // Check if it is a first line break. - if !leading_blanks { - whitespaces = whitespaces[:0] - leading_break = read_line(parser, leading_break) - leading_blanks = true - } else { - trailing_breaks = read_line(parser, trailing_breaks) - } - } - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Join the whitespaces or fold line breaks. - if leading_blanks { - // Do we need to fold line breaks? - if len(leading_break) > 0 && leading_break[0] == '\n' { - if len(trailing_breaks) == 0 { - s = append(s, ' ') - } else { - s = append(s, trailing_breaks...) - } - } else { - s = append(s, leading_break...) - s = append(s, trailing_breaks...) - } - trailing_breaks = trailing_breaks[:0] - leading_break = leading_break[:0] - } else { - s = append(s, whitespaces...) - whitespaces = whitespaces[:0] - } - } - - // Eat the right quote. - skip(parser) - end_mark := parser.mark - - // Create a token. - *token = yaml_token_t{ - typ: yaml_SCALAR_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - value: s, - style: yaml_SINGLE_QUOTED_SCALAR_STYLE, - } - if !single { - token.style = yaml_DOUBLE_QUOTED_SCALAR_STYLE - } - return true -} - -// Scan a plain scalar. -func yaml_parser_scan_plain_scalar(parser *yaml_parser_t, token *yaml_token_t) bool { - - var s, leading_break, trailing_breaks, whitespaces []byte - var leading_blanks bool - var indent = parser.indent + 1 - - start_mark := parser.mark - end_mark := parser.mark - - // Consume the content of the plain scalar. - for { - // Check for a document indicator. - if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { - return false - } - if parser.mark.column == 0 && - ((parser.buffer[parser.buffer_pos+0] == '-' && - parser.buffer[parser.buffer_pos+1] == '-' && - parser.buffer[parser.buffer_pos+2] == '-') || - (parser.buffer[parser.buffer_pos+0] == '.' && - parser.buffer[parser.buffer_pos+1] == '.' && - parser.buffer[parser.buffer_pos+2] == '.')) && - is_blankz(parser.buffer, parser.buffer_pos+3) { - break - } - - // Check for a comment. - if parser.buffer[parser.buffer_pos] == '#' { - break - } - - // Consume non-blank characters. - for !is_blankz(parser.buffer, parser.buffer_pos) { - - // Check for 'x:x' in the flow context. TODO: Fix the test "spec-08-13". - if parser.flow_level > 0 && - parser.buffer[parser.buffer_pos] == ':' && - !is_blankz(parser.buffer, parser.buffer_pos+1) { - yaml_parser_set_scanner_error(parser, "while scanning a plain scalar", - start_mark, "found unexpected ':'") - return false - } - - // Check for indicators that may end a plain scalar. - if (parser.buffer[parser.buffer_pos] == ':' && is_blankz(parser.buffer, parser.buffer_pos+1)) || - (parser.flow_level > 0 && - (parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == ':' || - parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == '[' || - parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' || - parser.buffer[parser.buffer_pos] == '}')) { - break - } - - // Check if we need to join whitespaces and breaks. - if leading_blanks || len(whitespaces) > 0 { - if leading_blanks { - // Do we need to fold line breaks? - if leading_break[0] == '\n' { - if len(trailing_breaks) == 0 { - s = append(s, ' ') - } else { - s = append(s, trailing_breaks...) - } - } else { - s = append(s, leading_break...) - s = append(s, trailing_breaks...) - } - trailing_breaks = trailing_breaks[:0] - leading_break = leading_break[:0] - leading_blanks = false - } else { - s = append(s, whitespaces...) - whitespaces = whitespaces[:0] - } - } - - // Copy the character. - s = read(parser, s) - - end_mark = parser.mark - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - } - - // Is it the end? - if !(is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos)) { - break - } - - // Consume blank characters. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) { - if is_blank(parser.buffer, parser.buffer_pos) { - - // Check for tab character that abuse intendation. - if leading_blanks && parser.mark.column < indent && is_tab(parser.buffer, parser.buffer_pos) { - yaml_parser_set_scanner_error(parser, "while scanning a plain scalar", - start_mark, "found a tab character that violate intendation") - return false - } - - // Consume a space or a tab character. - if !leading_blanks { - whitespaces = read(parser, whitespaces) - } else { - skip(parser) - } - } else { - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - - // Check if it is a first line break. - if !leading_blanks { - whitespaces = whitespaces[:0] - leading_break = read_line(parser, leading_break) - leading_blanks = true - } else { - trailing_breaks = read_line(parser, trailing_breaks) - } - } - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Check intendation level. - if parser.flow_level == 0 && parser.mark.column < indent { - break - } - } - - // Create a token. - *token = yaml_token_t{ - typ: yaml_SCALAR_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - value: s, - style: yaml_PLAIN_SCALAR_STYLE, - } - - // Note that we change the 'simple_key_allowed' flag. - if leading_blanks { - parser.simple_key_allowed = true - } - return true -} diff --git a/Godeps/_workspace/src/gopkg.in/yaml.v2/sorter.go b/Godeps/_workspace/src/gopkg.in/yaml.v2/sorter.go deleted file mode 100644 index 5958822f9c..0000000000 --- a/Godeps/_workspace/src/gopkg.in/yaml.v2/sorter.go +++ /dev/null @@ -1,104 +0,0 @@ -package yaml - -import ( - "reflect" - "unicode" -) - -type keyList []reflect.Value - -func (l keyList) Len() int { return len(l) } -func (l keyList) Swap(i, j int) { l[i], l[j] = l[j], l[i] } -func (l keyList) Less(i, j int) bool { - a := l[i] - b := l[j] - ak := a.Kind() - bk := b.Kind() - for (ak == reflect.Interface || ak == reflect.Ptr) && !a.IsNil() { - a = a.Elem() - ak = a.Kind() - } - for (bk == reflect.Interface || bk == reflect.Ptr) && !b.IsNil() { - b = b.Elem() - bk = b.Kind() - } - af, aok := keyFloat(a) - bf, bok := keyFloat(b) - if aok && bok { - if af != bf { - return af < bf - } - if ak != bk { - return ak < bk - } - return numLess(a, b) - } - if ak != reflect.String || bk != reflect.String { - return ak < bk - } - ar, br := []rune(a.String()), []rune(b.String()) - for i := 0; i < len(ar) && i < len(br); i++ { - if ar[i] == br[i] { - continue - } - al := unicode.IsLetter(ar[i]) - bl := unicode.IsLetter(br[i]) - if al && bl { - return ar[i] < br[i] - } - if al || bl { - return bl - } - var ai, bi int - var an, bn int64 - for ai = i; ai < len(ar) && unicode.IsDigit(ar[ai]); ai++ { - an = an*10 + int64(ar[ai]-'0') - } - for bi = i; bi < len(br) && unicode.IsDigit(br[bi]); bi++ { - bn = bn*10 + int64(br[bi]-'0') - } - if an != bn { - return an < bn - } - if ai != bi { - return ai < bi - } - return ar[i] < br[i] - } - return len(ar) < len(br) -} - -// keyFloat returns a float value for v if it is a number/bool -// and whether it is a number/bool or not. -func keyFloat(v reflect.Value) (f float64, ok bool) { - switch v.Kind() { - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return float64(v.Int()), true - case reflect.Float32, reflect.Float64: - return v.Float(), true - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return float64(v.Uint()), true - case reflect.Bool: - if v.Bool() { - return 1, true - } - return 0, true - } - return 0, false -} - -// numLess returns whether a < b. -// a and b must necessarily have the same kind. -func numLess(a, b reflect.Value) bool { - switch a.Kind() { - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return a.Int() < b.Int() - case reflect.Float32, reflect.Float64: - return a.Float() < b.Float() - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return a.Uint() < b.Uint() - case reflect.Bool: - return !a.Bool() && b.Bool() - } - panic("not a number") -} diff --git a/Godeps/_workspace/src/gopkg.in/yaml.v2/suite_test.go b/Godeps/_workspace/src/gopkg.in/yaml.v2/suite_test.go deleted file mode 100644 index c5cf1ed4f6..0000000000 --- a/Godeps/_workspace/src/gopkg.in/yaml.v2/suite_test.go +++ /dev/null @@ -1,12 +0,0 @@ -package yaml_test - -import ( - . "gopkg.in/check.v1" - "testing" -) - -func Test(t *testing.T) { TestingT(t) } - -type S struct{} - -var _ = Suite(&S{}) diff --git a/Godeps/_workspace/src/gopkg.in/yaml.v2/writerc.go b/Godeps/_workspace/src/gopkg.in/yaml.v2/writerc.go deleted file mode 100644 index 190362f25d..0000000000 --- a/Godeps/_workspace/src/gopkg.in/yaml.v2/writerc.go +++ /dev/null @@ -1,89 +0,0 @@ -package yaml - -// Set the writer error and return false. -func yaml_emitter_set_writer_error(emitter *yaml_emitter_t, problem string) bool { - emitter.error = yaml_WRITER_ERROR - emitter.problem = problem - return false -} - -// Flush the output buffer. -func yaml_emitter_flush(emitter *yaml_emitter_t) bool { - if emitter.write_handler == nil { - panic("write handler not set") - } - - // Check if the buffer is empty. - if emitter.buffer_pos == 0 { - return true - } - - // If the output encoding is UTF-8, we don't need to recode the buffer. - if emitter.encoding == yaml_UTF8_ENCODING { - if err := emitter.write_handler(emitter, emitter.buffer[:emitter.buffer_pos]); err != nil { - return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error()) - } - emitter.buffer_pos = 0 - return true - } - - // Recode the buffer into the raw buffer. - var low, high int - if emitter.encoding == yaml_UTF16LE_ENCODING { - low, high = 0, 1 - } else { - high, low = 1, 0 - } - - pos := 0 - for pos < emitter.buffer_pos { - // See the "reader.c" code for more details on UTF-8 encoding. Note - // that we assume that the buffer contains a valid UTF-8 sequence. - - // Read the next UTF-8 character. - octet := emitter.buffer[pos] - - var w int - var value rune - switch { - case octet&0x80 == 0x00: - w, value = 1, rune(octet&0x7F) - case octet&0xE0 == 0xC0: - w, value = 2, rune(octet&0x1F) - case octet&0xF0 == 0xE0: - w, value = 3, rune(octet&0x0F) - case octet&0xF8 == 0xF0: - w, value = 4, rune(octet&0x07) - } - for k := 1; k < w; k++ { - octet = emitter.buffer[pos+k] - value = (value << 6) + (rune(octet) & 0x3F) - } - pos += w - - // Write the character. - if value < 0x10000 { - var b [2]byte - b[high] = byte(value >> 8) - b[low] = byte(value & 0xFF) - emitter.raw_buffer = append(emitter.raw_buffer, b[0], b[1]) - } else { - // Write the character using a surrogate pair (check "reader.c"). - var b [4]byte - value -= 0x10000 - b[high] = byte(0xD8 + (value >> 18)) - b[low] = byte((value >> 10) & 0xFF) - b[high+2] = byte(0xDC + ((value >> 8) & 0xFF)) - b[low+2] = byte(value & 0xFF) - emitter.raw_buffer = append(emitter.raw_buffer, b[0], b[1], b[2], b[3]) - } - } - - // Write the raw buffer. - if err := emitter.write_handler(emitter, emitter.raw_buffer); err != nil { - return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error()) - } - emitter.buffer_pos = 0 - emitter.raw_buffer = emitter.raw_buffer[:0] - return true -} diff --git a/Godeps/_workspace/src/gopkg.in/yaml.v2/yaml.go b/Godeps/_workspace/src/gopkg.in/yaml.v2/yaml.go deleted file mode 100644 index d133edf9d3..0000000000 --- a/Godeps/_workspace/src/gopkg.in/yaml.v2/yaml.go +++ /dev/null @@ -1,346 +0,0 @@ -// Package yaml implements YAML support for the Go language. -// -// Source code and other details for the project are available at GitHub: -// -// https://github.com/go-yaml/yaml -// -package yaml - -import ( - "errors" - "fmt" - "reflect" - "strings" - "sync" -) - -// MapSlice encodes and decodes as a YAML map. -// The order of keys is preserved when encoding and decoding. -type MapSlice []MapItem - -// MapItem is an item in a MapSlice. -type MapItem struct { - Key, Value interface{} -} - -// The Unmarshaler interface may be implemented by types to customize their -// behavior when being unmarshaled from a YAML document. The UnmarshalYAML -// method receives a function that may be called to unmarshal the original -// YAML value into a field or variable. It is safe to call the unmarshal -// function parameter more than once if necessary. -type Unmarshaler interface { - UnmarshalYAML(unmarshal func(interface{}) error) error -} - -// The Marshaler interface may be implemented by types to customize their -// behavior when being marshaled into a YAML document. The returned value -// is marshaled in place of the original value implementing Marshaler. -// -// If an error is returned by MarshalYAML, the marshaling procedure stops -// and returns with the provided error. -type Marshaler interface { - MarshalYAML() (interface{}, error) -} - -// Unmarshal decodes the first document found within the in byte slice -// and assigns decoded values into the out value. -// -// Maps and pointers (to a struct, string, int, etc) are accepted as out -// values. If an internal pointer within a struct is not initialized, -// the yaml package will initialize it if necessary for unmarshalling -// the provided data. The out parameter must not be nil. -// -// The type of the decoded values should be compatible with the respective -// values in out. If one or more values cannot be decoded due to a type -// mismatches, decoding continues partially until the end of the YAML -// content, and a *yaml.TypeError is returned with details for all -// missed values. -// -// Struct fields are only unmarshalled if they are exported (have an -// upper case first letter), and are unmarshalled using the field name -// lowercased as the default key. Custom keys may be defined via the -// "yaml" name in the field tag: the content preceding the first comma -// is used as the key, and the following comma-separated options are -// used to tweak the marshalling process (see Marshal). -// Conflicting names result in a runtime error. -// -// For example: -// -// type T struct { -// F int `yaml:"a,omitempty"` -// B int -// } -// var t T -// yaml.Unmarshal([]byte("a: 1\nb: 2"), &t) -// -// See the documentation of Marshal for the format of tags and a list of -// supported tag options. -// -func Unmarshal(in []byte, out interface{}) (err error) { - defer handleErr(&err) - d := newDecoder() - p := newParser(in) - defer p.destroy() - node := p.parse() - if node != nil { - v := reflect.ValueOf(out) - if v.Kind() == reflect.Ptr && !v.IsNil() { - v = v.Elem() - } - d.unmarshal(node, v) - } - if len(d.terrors) > 0 { - return &TypeError{d.terrors} - } - return nil -} - -// Marshal serializes the value provided into a YAML document. The structure -// of the generated document will reflect the structure of the value itself. -// Maps and pointers (to struct, string, int, etc) are accepted as the in value. -// -// Struct fields are only unmarshalled if they are exported (have an upper case -// first letter), and are unmarshalled using the field name lowercased as the -// default key. Custom keys may be defined via the "yaml" name in the field -// tag: the content preceding the first comma is used as the key, and the -// following comma-separated options are used to tweak the marshalling process. -// Conflicting names result in a runtime error. -// -// The field tag format accepted is: -// -// `(...) yaml:"[][,[,]]" (...)` -// -// The following flags are currently supported: -// -// omitempty Only include the field if it's not set to the zero -// value for the type or to empty slices or maps. -// Does not apply to zero valued structs. -// -// flow Marshal using a flow style (useful for structs, -// sequences and maps). -// -// inline Inline the field, which must be a struct or a map, -// causing all of its fields or keys to be processed as if -// they were part of the outer struct. For maps, keys must -// not conflict with the yaml keys of other struct fields. -// -// In addition, if the key is "-", the field is ignored. -// -// For example: -// -// type T struct { -// F int "a,omitempty" -// B int -// } -// yaml.Marshal(&T{B: 2}) // Returns "b: 2\n" -// yaml.Marshal(&T{F: 1}} // Returns "a: 1\nb: 0\n" -// -func Marshal(in interface{}) (out []byte, err error) { - defer handleErr(&err) - e := newEncoder() - defer e.destroy() - e.marshal("", reflect.ValueOf(in)) - e.finish() - out = e.out - return -} - -func handleErr(err *error) { - if v := recover(); v != nil { - if e, ok := v.(yamlError); ok { - *err = e.err - } else { - panic(v) - } - } -} - -type yamlError struct { - err error -} - -func fail(err error) { - panic(yamlError{err}) -} - -func failf(format string, args ...interface{}) { - panic(yamlError{fmt.Errorf("yaml: "+format, args...)}) -} - -// A TypeError is returned by Unmarshal when one or more fields in -// the YAML document cannot be properly decoded into the requested -// types. When this error is returned, the value is still -// unmarshaled partially. -type TypeError struct { - Errors []string -} - -func (e *TypeError) Error() string { - return fmt.Sprintf("yaml: unmarshal errors:\n %s", strings.Join(e.Errors, "\n ")) -} - -// -------------------------------------------------------------------------- -// Maintain a mapping of keys to structure field indexes - -// The code in this section was copied from mgo/bson. - -// structInfo holds details for the serialization of fields of -// a given struct. -type structInfo struct { - FieldsMap map[string]fieldInfo - FieldsList []fieldInfo - - // InlineMap is the number of the field in the struct that - // contains an ,inline map, or -1 if there's none. - InlineMap int -} - -type fieldInfo struct { - Key string - Num int - OmitEmpty bool - Flow bool - - // Inline holds the field index if the field is part of an inlined struct. - Inline []int -} - -var structMap = make(map[reflect.Type]*structInfo) -var fieldMapMutex sync.RWMutex - -func getStructInfo(st reflect.Type) (*structInfo, error) { - fieldMapMutex.RLock() - sinfo, found := structMap[st] - fieldMapMutex.RUnlock() - if found { - return sinfo, nil - } - - n := st.NumField() - fieldsMap := make(map[string]fieldInfo) - fieldsList := make([]fieldInfo, 0, n) - inlineMap := -1 - for i := 0; i != n; i++ { - field := st.Field(i) - if field.PkgPath != "" { - continue // Private field - } - - info := fieldInfo{Num: i} - - tag := field.Tag.Get("yaml") - if tag == "" && strings.Index(string(field.Tag), ":") < 0 { - tag = string(field.Tag) - } - if tag == "-" { - continue - } - - inline := false - fields := strings.Split(tag, ",") - if len(fields) > 1 { - for _, flag := range fields[1:] { - switch flag { - case "omitempty": - info.OmitEmpty = true - case "flow": - info.Flow = true - case "inline": - inline = true - default: - return nil, errors.New(fmt.Sprintf("Unsupported flag %q in tag %q of type %s", flag, tag, st)) - } - } - tag = fields[0] - } - - if inline { - switch field.Type.Kind() { - case reflect.Map: - if inlineMap >= 0 { - return nil, errors.New("Multiple ,inline maps in struct " + st.String()) - } - if field.Type.Key() != reflect.TypeOf("") { - return nil, errors.New("Option ,inline needs a map with string keys in struct " + st.String()) - } - inlineMap = info.Num - case reflect.Struct: - sinfo, err := getStructInfo(field.Type) - if err != nil { - return nil, err - } - for _, finfo := range sinfo.FieldsList { - if _, found := fieldsMap[finfo.Key]; found { - msg := "Duplicated key '" + finfo.Key + "' in struct " + st.String() - return nil, errors.New(msg) - } - if finfo.Inline == nil { - finfo.Inline = []int{i, finfo.Num} - } else { - finfo.Inline = append([]int{i}, finfo.Inline...) - } - fieldsMap[finfo.Key] = finfo - fieldsList = append(fieldsList, finfo) - } - default: - //return nil, errors.New("Option ,inline needs a struct value or map field") - return nil, errors.New("Option ,inline needs a struct value field") - } - continue - } - - if tag != "" { - info.Key = tag - } else { - info.Key = strings.ToLower(field.Name) - } - - if _, found = fieldsMap[info.Key]; found { - msg := "Duplicated key '" + info.Key + "' in struct " + st.String() - return nil, errors.New(msg) - } - - fieldsList = append(fieldsList, info) - fieldsMap[info.Key] = info - } - - sinfo = &structInfo{fieldsMap, fieldsList, inlineMap} - - fieldMapMutex.Lock() - structMap[st] = sinfo - fieldMapMutex.Unlock() - return sinfo, nil -} - -func isZero(v reflect.Value) bool { - switch v.Kind() { - case reflect.String: - return len(v.String()) == 0 - case reflect.Interface, reflect.Ptr: - return v.IsNil() - case reflect.Slice: - return v.Len() == 0 - case reflect.Map: - return v.Len() == 0 - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return v.Int() == 0 - case reflect.Float32, reflect.Float64: - return v.Float() == 0 - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return v.Uint() == 0 - case reflect.Bool: - return !v.Bool() - case reflect.Struct: - vt := v.Type() - for i := v.NumField() - 1; i >= 0; i-- { - if vt.Field(i).PkgPath != "" { - continue // Private field - } - if !isZero(v.Field(i)) { - return false - } - } - return true - } - return false -} diff --git a/Godeps/_workspace/src/gopkg.in/yaml.v2/yamlh.go b/Godeps/_workspace/src/gopkg.in/yaml.v2/yamlh.go deleted file mode 100644 index d60a6b6b00..0000000000 --- a/Godeps/_workspace/src/gopkg.in/yaml.v2/yamlh.go +++ /dev/null @@ -1,716 +0,0 @@ -package yaml - -import ( - "io" -) - -// The version directive data. -type yaml_version_directive_t struct { - major int8 // The major version number. - minor int8 // The minor version number. -} - -// The tag directive data. -type yaml_tag_directive_t struct { - handle []byte // The tag handle. - prefix []byte // The tag prefix. -} - -type yaml_encoding_t int - -// The stream encoding. -const ( - // Let the parser choose the encoding. - yaml_ANY_ENCODING yaml_encoding_t = iota - - yaml_UTF8_ENCODING // The default UTF-8 encoding. - yaml_UTF16LE_ENCODING // The UTF-16-LE encoding with BOM. - yaml_UTF16BE_ENCODING // The UTF-16-BE encoding with BOM. -) - -type yaml_break_t int - -// Line break types. -const ( - // Let the parser choose the break type. - yaml_ANY_BREAK yaml_break_t = iota - - yaml_CR_BREAK // Use CR for line breaks (Mac style). - yaml_LN_BREAK // Use LN for line breaks (Unix style). - yaml_CRLN_BREAK // Use CR LN for line breaks (DOS style). -) - -type yaml_error_type_t int - -// Many bad things could happen with the parser and emitter. -const ( - // No error is produced. - yaml_NO_ERROR yaml_error_type_t = iota - - yaml_MEMORY_ERROR // Cannot allocate or reallocate a block of memory. - yaml_READER_ERROR // Cannot read or decode the input stream. - yaml_SCANNER_ERROR // Cannot scan the input stream. - yaml_PARSER_ERROR // Cannot parse the input stream. - yaml_COMPOSER_ERROR // Cannot compose a YAML document. - yaml_WRITER_ERROR // Cannot write to the output stream. - yaml_EMITTER_ERROR // Cannot emit a YAML stream. -) - -// The pointer position. -type yaml_mark_t struct { - index int // The position index. - line int // The position line. - column int // The position column. -} - -// Node Styles - -type yaml_style_t int8 - -type yaml_scalar_style_t yaml_style_t - -// Scalar styles. -const ( - // Let the emitter choose the style. - yaml_ANY_SCALAR_STYLE yaml_scalar_style_t = iota - - yaml_PLAIN_SCALAR_STYLE // The plain scalar style. - yaml_SINGLE_QUOTED_SCALAR_STYLE // The single-quoted scalar style. - yaml_DOUBLE_QUOTED_SCALAR_STYLE // The double-quoted scalar style. - yaml_LITERAL_SCALAR_STYLE // The literal scalar style. - yaml_FOLDED_SCALAR_STYLE // The folded scalar style. -) - -type yaml_sequence_style_t yaml_style_t - -// Sequence styles. -const ( - // Let the emitter choose the style. - yaml_ANY_SEQUENCE_STYLE yaml_sequence_style_t = iota - - yaml_BLOCK_SEQUENCE_STYLE // The block sequence style. - yaml_FLOW_SEQUENCE_STYLE // The flow sequence style. -) - -type yaml_mapping_style_t yaml_style_t - -// Mapping styles. -const ( - // Let the emitter choose the style. - yaml_ANY_MAPPING_STYLE yaml_mapping_style_t = iota - - yaml_BLOCK_MAPPING_STYLE // The block mapping style. - yaml_FLOW_MAPPING_STYLE // The flow mapping style. -) - -// Tokens - -type yaml_token_type_t int - -// Token types. -const ( - // An empty token. - yaml_NO_TOKEN yaml_token_type_t = iota - - yaml_STREAM_START_TOKEN // A STREAM-START token. - yaml_STREAM_END_TOKEN // A STREAM-END token. - - yaml_VERSION_DIRECTIVE_TOKEN // A VERSION-DIRECTIVE token. - yaml_TAG_DIRECTIVE_TOKEN // A TAG-DIRECTIVE token. - yaml_DOCUMENT_START_TOKEN // A DOCUMENT-START token. - yaml_DOCUMENT_END_TOKEN // A DOCUMENT-END token. - - yaml_BLOCK_SEQUENCE_START_TOKEN // A BLOCK-SEQUENCE-START token. - yaml_BLOCK_MAPPING_START_TOKEN // A BLOCK-SEQUENCE-END token. - yaml_BLOCK_END_TOKEN // A BLOCK-END token. - - yaml_FLOW_SEQUENCE_START_TOKEN // A FLOW-SEQUENCE-START token. - yaml_FLOW_SEQUENCE_END_TOKEN // A FLOW-SEQUENCE-END token. - yaml_FLOW_MAPPING_START_TOKEN // A FLOW-MAPPING-START token. - yaml_FLOW_MAPPING_END_TOKEN // A FLOW-MAPPING-END token. - - yaml_BLOCK_ENTRY_TOKEN // A BLOCK-ENTRY token. - yaml_FLOW_ENTRY_TOKEN // A FLOW-ENTRY token. - yaml_KEY_TOKEN // A KEY token. - yaml_VALUE_TOKEN // A VALUE token. - - yaml_ALIAS_TOKEN // An ALIAS token. - yaml_ANCHOR_TOKEN // An ANCHOR token. - yaml_TAG_TOKEN // A TAG token. - yaml_SCALAR_TOKEN // A SCALAR token. -) - -func (tt yaml_token_type_t) String() string { - switch tt { - case yaml_NO_TOKEN: - return "yaml_NO_TOKEN" - case yaml_STREAM_START_TOKEN: - return "yaml_STREAM_START_TOKEN" - case yaml_STREAM_END_TOKEN: - return "yaml_STREAM_END_TOKEN" - case yaml_VERSION_DIRECTIVE_TOKEN: - return "yaml_VERSION_DIRECTIVE_TOKEN" - case yaml_TAG_DIRECTIVE_TOKEN: - return "yaml_TAG_DIRECTIVE_TOKEN" - case yaml_DOCUMENT_START_TOKEN: - return "yaml_DOCUMENT_START_TOKEN" - case yaml_DOCUMENT_END_TOKEN: - return "yaml_DOCUMENT_END_TOKEN" - case yaml_BLOCK_SEQUENCE_START_TOKEN: - return "yaml_BLOCK_SEQUENCE_START_TOKEN" - case yaml_BLOCK_MAPPING_START_TOKEN: - return "yaml_BLOCK_MAPPING_START_TOKEN" - case yaml_BLOCK_END_TOKEN: - return "yaml_BLOCK_END_TOKEN" - case yaml_FLOW_SEQUENCE_START_TOKEN: - return "yaml_FLOW_SEQUENCE_START_TOKEN" - case yaml_FLOW_SEQUENCE_END_TOKEN: - return "yaml_FLOW_SEQUENCE_END_TOKEN" - case yaml_FLOW_MAPPING_START_TOKEN: - return "yaml_FLOW_MAPPING_START_TOKEN" - case yaml_FLOW_MAPPING_END_TOKEN: - return "yaml_FLOW_MAPPING_END_TOKEN" - case yaml_BLOCK_ENTRY_TOKEN: - return "yaml_BLOCK_ENTRY_TOKEN" - case yaml_FLOW_ENTRY_TOKEN: - return "yaml_FLOW_ENTRY_TOKEN" - case yaml_KEY_TOKEN: - return "yaml_KEY_TOKEN" - case yaml_VALUE_TOKEN: - return "yaml_VALUE_TOKEN" - case yaml_ALIAS_TOKEN: - return "yaml_ALIAS_TOKEN" - case yaml_ANCHOR_TOKEN: - return "yaml_ANCHOR_TOKEN" - case yaml_TAG_TOKEN: - return "yaml_TAG_TOKEN" - case yaml_SCALAR_TOKEN: - return "yaml_SCALAR_TOKEN" - } - return "" -} - -// The token structure. -type yaml_token_t struct { - // The token type. - typ yaml_token_type_t - - // The start/end of the token. - start_mark, end_mark yaml_mark_t - - // The stream encoding (for yaml_STREAM_START_TOKEN). - encoding yaml_encoding_t - - // The alias/anchor/scalar value or tag/tag directive handle - // (for yaml_ALIAS_TOKEN, yaml_ANCHOR_TOKEN, yaml_SCALAR_TOKEN, yaml_TAG_TOKEN, yaml_TAG_DIRECTIVE_TOKEN). - value []byte - - // The tag suffix (for yaml_TAG_TOKEN). - suffix []byte - - // The tag directive prefix (for yaml_TAG_DIRECTIVE_TOKEN). - prefix []byte - - // The scalar style (for yaml_SCALAR_TOKEN). - style yaml_scalar_style_t - - // The version directive major/minor (for yaml_VERSION_DIRECTIVE_TOKEN). - major, minor int8 -} - -// Events - -type yaml_event_type_t int8 - -// Event types. -const ( - // An empty event. - yaml_NO_EVENT yaml_event_type_t = iota - - yaml_STREAM_START_EVENT // A STREAM-START event. - yaml_STREAM_END_EVENT // A STREAM-END event. - yaml_DOCUMENT_START_EVENT // A DOCUMENT-START event. - yaml_DOCUMENT_END_EVENT // A DOCUMENT-END event. - yaml_ALIAS_EVENT // An ALIAS event. - yaml_SCALAR_EVENT // A SCALAR event. - yaml_SEQUENCE_START_EVENT // A SEQUENCE-START event. - yaml_SEQUENCE_END_EVENT // A SEQUENCE-END event. - yaml_MAPPING_START_EVENT // A MAPPING-START event. - yaml_MAPPING_END_EVENT // A MAPPING-END event. -) - -// The event structure. -type yaml_event_t struct { - - // The event type. - typ yaml_event_type_t - - // The start and end of the event. - start_mark, end_mark yaml_mark_t - - // The document encoding (for yaml_STREAM_START_EVENT). - encoding yaml_encoding_t - - // The version directive (for yaml_DOCUMENT_START_EVENT). - version_directive *yaml_version_directive_t - - // The list of tag directives (for yaml_DOCUMENT_START_EVENT). - tag_directives []yaml_tag_directive_t - - // The anchor (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_ALIAS_EVENT). - anchor []byte - - // The tag (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). - tag []byte - - // The scalar value (for yaml_SCALAR_EVENT). - value []byte - - // Is the document start/end indicator implicit, or the tag optional? - // (for yaml_DOCUMENT_START_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_SCALAR_EVENT). - implicit bool - - // Is the tag optional for any non-plain style? (for yaml_SCALAR_EVENT). - quoted_implicit bool - - // The style (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). - style yaml_style_t -} - -func (e *yaml_event_t) scalar_style() yaml_scalar_style_t { return yaml_scalar_style_t(e.style) } -func (e *yaml_event_t) sequence_style() yaml_sequence_style_t { return yaml_sequence_style_t(e.style) } -func (e *yaml_event_t) mapping_style() yaml_mapping_style_t { return yaml_mapping_style_t(e.style) } - -// Nodes - -const ( - yaml_NULL_TAG = "tag:yaml.org,2002:null" // The tag !!null with the only possible value: null. - yaml_BOOL_TAG = "tag:yaml.org,2002:bool" // The tag !!bool with the values: true and false. - yaml_STR_TAG = "tag:yaml.org,2002:str" // The tag !!str for string values. - yaml_INT_TAG = "tag:yaml.org,2002:int" // The tag !!int for integer values. - yaml_FLOAT_TAG = "tag:yaml.org,2002:float" // The tag !!float for float values. - yaml_TIMESTAMP_TAG = "tag:yaml.org,2002:timestamp" // The tag !!timestamp for date and time values. - - yaml_SEQ_TAG = "tag:yaml.org,2002:seq" // The tag !!seq is used to denote sequences. - yaml_MAP_TAG = "tag:yaml.org,2002:map" // The tag !!map is used to denote mapping. - - // Not in original libyaml. - yaml_BINARY_TAG = "tag:yaml.org,2002:binary" - yaml_MERGE_TAG = "tag:yaml.org,2002:merge" - - yaml_DEFAULT_SCALAR_TAG = yaml_STR_TAG // The default scalar tag is !!str. - yaml_DEFAULT_SEQUENCE_TAG = yaml_SEQ_TAG // The default sequence tag is !!seq. - yaml_DEFAULT_MAPPING_TAG = yaml_MAP_TAG // The default mapping tag is !!map. -) - -type yaml_node_type_t int - -// Node types. -const ( - // An empty node. - yaml_NO_NODE yaml_node_type_t = iota - - yaml_SCALAR_NODE // A scalar node. - yaml_SEQUENCE_NODE // A sequence node. - yaml_MAPPING_NODE // A mapping node. -) - -// An element of a sequence node. -type yaml_node_item_t int - -// An element of a mapping node. -type yaml_node_pair_t struct { - key int // The key of the element. - value int // The value of the element. -} - -// The node structure. -type yaml_node_t struct { - typ yaml_node_type_t // The node type. - tag []byte // The node tag. - - // The node data. - - // The scalar parameters (for yaml_SCALAR_NODE). - scalar struct { - value []byte // The scalar value. - length int // The length of the scalar value. - style yaml_scalar_style_t // The scalar style. - } - - // The sequence parameters (for YAML_SEQUENCE_NODE). - sequence struct { - items_data []yaml_node_item_t // The stack of sequence items. - style yaml_sequence_style_t // The sequence style. - } - - // The mapping parameters (for yaml_MAPPING_NODE). - mapping struct { - pairs_data []yaml_node_pair_t // The stack of mapping pairs (key, value). - pairs_start *yaml_node_pair_t // The beginning of the stack. - pairs_end *yaml_node_pair_t // The end of the stack. - pairs_top *yaml_node_pair_t // The top of the stack. - style yaml_mapping_style_t // The mapping style. - } - - start_mark yaml_mark_t // The beginning of the node. - end_mark yaml_mark_t // The end of the node. - -} - -// The document structure. -type yaml_document_t struct { - - // The document nodes. - nodes []yaml_node_t - - // The version directive. - version_directive *yaml_version_directive_t - - // The list of tag directives. - tag_directives_data []yaml_tag_directive_t - tag_directives_start int // The beginning of the tag directives list. - tag_directives_end int // The end of the tag directives list. - - start_implicit int // Is the document start indicator implicit? - end_implicit int // Is the document end indicator implicit? - - // The start/end of the document. - start_mark, end_mark yaml_mark_t -} - -// The prototype of a read handler. -// -// The read handler is called when the parser needs to read more bytes from the -// source. The handler should write not more than size bytes to the buffer. -// The number of written bytes should be set to the size_read variable. -// -// [in,out] data A pointer to an application data specified by -// yaml_parser_set_input(). -// [out] buffer The buffer to write the data from the source. -// [in] size The size of the buffer. -// [out] size_read The actual number of bytes read from the source. -// -// On success, the handler should return 1. If the handler failed, -// the returned value should be 0. On EOF, the handler should set the -// size_read to 0 and return 1. -type yaml_read_handler_t func(parser *yaml_parser_t, buffer []byte) (n int, err error) - -// This structure holds information about a potential simple key. -type yaml_simple_key_t struct { - possible bool // Is a simple key possible? - required bool // Is a simple key required? - token_number int // The number of the token. - mark yaml_mark_t // The position mark. -} - -// The states of the parser. -type yaml_parser_state_t int - -const ( - yaml_PARSE_STREAM_START_STATE yaml_parser_state_t = iota - - yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE // Expect the beginning of an implicit document. - yaml_PARSE_DOCUMENT_START_STATE // Expect DOCUMENT-START. - yaml_PARSE_DOCUMENT_CONTENT_STATE // Expect the content of a document. - yaml_PARSE_DOCUMENT_END_STATE // Expect DOCUMENT-END. - yaml_PARSE_BLOCK_NODE_STATE // Expect a block node. - yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE // Expect a block node or indentless sequence. - yaml_PARSE_FLOW_NODE_STATE // Expect a flow node. - yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a block sequence. - yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE // Expect an entry of a block sequence. - yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE // Expect an entry of an indentless sequence. - yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping. - yaml_PARSE_BLOCK_MAPPING_KEY_STATE // Expect a block mapping key. - yaml_PARSE_BLOCK_MAPPING_VALUE_STATE // Expect a block mapping value. - yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a flow sequence. - yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE // Expect an entry of a flow sequence. - yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE // Expect a key of an ordered mapping. - yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE // Expect a value of an ordered mapping. - yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE // Expect the and of an ordered mapping entry. - yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping. - yaml_PARSE_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping. - yaml_PARSE_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping. - yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE // Expect an empty value of a flow mapping. - yaml_PARSE_END_STATE // Expect nothing. -) - -func (ps yaml_parser_state_t) String() string { - switch ps { - case yaml_PARSE_STREAM_START_STATE: - return "yaml_PARSE_STREAM_START_STATE" - case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE: - return "yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE" - case yaml_PARSE_DOCUMENT_START_STATE: - return "yaml_PARSE_DOCUMENT_START_STATE" - case yaml_PARSE_DOCUMENT_CONTENT_STATE: - return "yaml_PARSE_DOCUMENT_CONTENT_STATE" - case yaml_PARSE_DOCUMENT_END_STATE: - return "yaml_PARSE_DOCUMENT_END_STATE" - case yaml_PARSE_BLOCK_NODE_STATE: - return "yaml_PARSE_BLOCK_NODE_STATE" - case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE: - return "yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE" - case yaml_PARSE_FLOW_NODE_STATE: - return "yaml_PARSE_FLOW_NODE_STATE" - case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE: - return "yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE" - case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE: - return "yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE" - case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE: - return "yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE" - case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE: - return "yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE" - case yaml_PARSE_BLOCK_MAPPING_KEY_STATE: - return "yaml_PARSE_BLOCK_MAPPING_KEY_STATE" - case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE: - return "yaml_PARSE_BLOCK_MAPPING_VALUE_STATE" - case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE: - return "yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE" - case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE: - return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE" - case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE: - return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE" - case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE: - return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE" - case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE: - return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE" - case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE: - return "yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE" - case yaml_PARSE_FLOW_MAPPING_KEY_STATE: - return "yaml_PARSE_FLOW_MAPPING_KEY_STATE" - case yaml_PARSE_FLOW_MAPPING_VALUE_STATE: - return "yaml_PARSE_FLOW_MAPPING_VALUE_STATE" - case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE: - return "yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE" - case yaml_PARSE_END_STATE: - return "yaml_PARSE_END_STATE" - } - return "" -} - -// This structure holds aliases data. -type yaml_alias_data_t struct { - anchor []byte // The anchor. - index int // The node id. - mark yaml_mark_t // The anchor mark. -} - -// The parser structure. -// -// All members are internal. Manage the structure using the -// yaml_parser_ family of functions. -type yaml_parser_t struct { - - // Error handling - - error yaml_error_type_t // Error type. - - problem string // Error description. - - // The byte about which the problem occured. - problem_offset int - problem_value int - problem_mark yaml_mark_t - - // The error context. - context string - context_mark yaml_mark_t - - // Reader stuff - - read_handler yaml_read_handler_t // Read handler. - - input_file io.Reader // File input data. - input []byte // String input data. - input_pos int - - eof bool // EOF flag - - buffer []byte // The working buffer. - buffer_pos int // The current position of the buffer. - - unread int // The number of unread characters in the buffer. - - raw_buffer []byte // The raw buffer. - raw_buffer_pos int // The current position of the buffer. - - encoding yaml_encoding_t // The input encoding. - - offset int // The offset of the current position (in bytes). - mark yaml_mark_t // The mark of the current position. - - // Scanner stuff - - stream_start_produced bool // Have we started to scan the input stream? - stream_end_produced bool // Have we reached the end of the input stream? - - flow_level int // The number of unclosed '[' and '{' indicators. - - tokens []yaml_token_t // The tokens queue. - tokens_head int // The head of the tokens queue. - tokens_parsed int // The number of tokens fetched from the queue. - token_available bool // Does the tokens queue contain a token ready for dequeueing. - - indent int // The current indentation level. - indents []int // The indentation levels stack. - - simple_key_allowed bool // May a simple key occur at the current position? - simple_keys []yaml_simple_key_t // The stack of simple keys. - - // Parser stuff - - state yaml_parser_state_t // The current parser state. - states []yaml_parser_state_t // The parser states stack. - marks []yaml_mark_t // The stack of marks. - tag_directives []yaml_tag_directive_t // The list of TAG directives. - - // Dumper stuff - - aliases []yaml_alias_data_t // The alias data. - - document *yaml_document_t // The currently parsed document. -} - -// Emitter Definitions - -// The prototype of a write handler. -// -// The write handler is called when the emitter needs to flush the accumulated -// characters to the output. The handler should write @a size bytes of the -// @a buffer to the output. -// -// @param[in,out] data A pointer to an application data specified by -// yaml_emitter_set_output(). -// @param[in] buffer The buffer with bytes to be written. -// @param[in] size The size of the buffer. -// -// @returns On success, the handler should return @c 1. If the handler failed, -// the returned value should be @c 0. -// -type yaml_write_handler_t func(emitter *yaml_emitter_t, buffer []byte) error - -type yaml_emitter_state_t int - -// The emitter states. -const ( - // Expect STREAM-START. - yaml_EMIT_STREAM_START_STATE yaml_emitter_state_t = iota - - yaml_EMIT_FIRST_DOCUMENT_START_STATE // Expect the first DOCUMENT-START or STREAM-END. - yaml_EMIT_DOCUMENT_START_STATE // Expect DOCUMENT-START or STREAM-END. - yaml_EMIT_DOCUMENT_CONTENT_STATE // Expect the content of a document. - yaml_EMIT_DOCUMENT_END_STATE // Expect DOCUMENT-END. - yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a flow sequence. - yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE // Expect an item of a flow sequence. - yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping. - yaml_EMIT_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping. - yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a flow mapping. - yaml_EMIT_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping. - yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a block sequence. - yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE // Expect an item of a block sequence. - yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping. - yaml_EMIT_BLOCK_MAPPING_KEY_STATE // Expect the key of a block mapping. - yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a block mapping. - yaml_EMIT_BLOCK_MAPPING_VALUE_STATE // Expect a value of a block mapping. - yaml_EMIT_END_STATE // Expect nothing. -) - -// The emitter structure. -// -// All members are internal. Manage the structure using the @c yaml_emitter_ -// family of functions. -type yaml_emitter_t struct { - - // Error handling - - error yaml_error_type_t // Error type. - problem string // Error description. - - // Writer stuff - - write_handler yaml_write_handler_t // Write handler. - - output_buffer *[]byte // String output data. - output_file io.Writer // File output data. - - buffer []byte // The working buffer. - buffer_pos int // The current position of the buffer. - - raw_buffer []byte // The raw buffer. - raw_buffer_pos int // The current position of the buffer. - - encoding yaml_encoding_t // The stream encoding. - - // Emitter stuff - - canonical bool // If the output is in the canonical style? - best_indent int // The number of indentation spaces. - best_width int // The preferred width of the output lines. - unicode bool // Allow unescaped non-ASCII characters? - line_break yaml_break_t // The preferred line break. - - state yaml_emitter_state_t // The current emitter state. - states []yaml_emitter_state_t // The stack of states. - - events []yaml_event_t // The event queue. - events_head int // The head of the event queue. - - indents []int // The stack of indentation levels. - - tag_directives []yaml_tag_directive_t // The list of tag directives. - - indent int // The current indentation level. - - flow_level int // The current flow level. - - root_context bool // Is it the document root context? - sequence_context bool // Is it a sequence context? - mapping_context bool // Is it a mapping context? - simple_key_context bool // Is it a simple mapping key context? - - line int // The current line. - column int // The current column. - whitespace bool // If the last character was a whitespace? - indention bool // If the last character was an indentation character (' ', '-', '?', ':')? - open_ended bool // If an explicit document end is required? - - // Anchor analysis. - anchor_data struct { - anchor []byte // The anchor value. - alias bool // Is it an alias? - } - - // Tag analysis. - tag_data struct { - handle []byte // The tag handle. - suffix []byte // The tag suffix. - } - - // Scalar analysis. - scalar_data struct { - value []byte // The scalar value. - multiline bool // Does the scalar contain line breaks? - flow_plain_allowed bool // Can the scalar be expessed in the flow plain style? - block_plain_allowed bool // Can the scalar be expressed in the block plain style? - single_quoted_allowed bool // Can the scalar be expressed in the single quoted style? - block_allowed bool // Can the scalar be expressed in the literal or folded styles? - style yaml_scalar_style_t // The output style. - } - - // Dumper stuff - - opened bool // If the stream was already opened? - closed bool // If the stream was already closed? - - // The information associated with the document nodes. - anchors *struct { - references int // The number of references. - anchor int // The anchor id. - serialized bool // If the node has been emitted? - } - - last_anchor_id int // The last assigned anchor id. - - document *yaml_document_t // The currently emitted document. -} diff --git a/Godeps/_workspace/src/gopkg.in/yaml.v2/yamlprivateh.go b/Godeps/_workspace/src/gopkg.in/yaml.v2/yamlprivateh.go deleted file mode 100644 index 8110ce3c37..0000000000 --- a/Godeps/_workspace/src/gopkg.in/yaml.v2/yamlprivateh.go +++ /dev/null @@ -1,173 +0,0 @@ -package yaml - -const ( - // The size of the input raw buffer. - input_raw_buffer_size = 512 - - // The size of the input buffer. - // It should be possible to decode the whole raw buffer. - input_buffer_size = input_raw_buffer_size * 3 - - // The size of the output buffer. - output_buffer_size = 128 - - // The size of the output raw buffer. - // It should be possible to encode the whole output buffer. - output_raw_buffer_size = (output_buffer_size*2 + 2) - - // The size of other stacks and queues. - initial_stack_size = 16 - initial_queue_size = 16 - initial_string_size = 16 -) - -// Check if the character at the specified position is an alphabetical -// character, a digit, '_', or '-'. -func is_alpha(b []byte, i int) bool { - return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'Z' || b[i] >= 'a' && b[i] <= 'z' || b[i] == '_' || b[i] == '-' -} - -// Check if the character at the specified position is a digit. -func is_digit(b []byte, i int) bool { - return b[i] >= '0' && b[i] <= '9' -} - -// Get the value of a digit. -func as_digit(b []byte, i int) int { - return int(b[i]) - '0' -} - -// Check if the character at the specified position is a hex-digit. -func is_hex(b []byte, i int) bool { - return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'F' || b[i] >= 'a' && b[i] <= 'f' -} - -// Get the value of a hex-digit. -func as_hex(b []byte, i int) int { - bi := b[i] - if bi >= 'A' && bi <= 'F' { - return int(bi) - 'A' + 10 - } - if bi >= 'a' && bi <= 'f' { - return int(bi) - 'a' + 10 - } - return int(bi) - '0' -} - -// Check if the character is ASCII. -func is_ascii(b []byte, i int) bool { - return b[i] <= 0x7F -} - -// Check if the character at the start of the buffer can be printed unescaped. -func is_printable(b []byte, i int) bool { - return ((b[i] == 0x0A) || // . == #x0A - (b[i] >= 0x20 && b[i] <= 0x7E) || // #x20 <= . <= #x7E - (b[i] == 0xC2 && b[i+1] >= 0xA0) || // #0xA0 <= . <= #xD7FF - (b[i] > 0xC2 && b[i] < 0xED) || - (b[i] == 0xED && b[i+1] < 0xA0) || - (b[i] == 0xEE) || - (b[i] == 0xEF && // #xE000 <= . <= #xFFFD - !(b[i+1] == 0xBB && b[i+2] == 0xBF) && // && . != #xFEFF - !(b[i+1] == 0xBF && (b[i+2] == 0xBE || b[i+2] == 0xBF)))) -} - -// Check if the character at the specified position is NUL. -func is_z(b []byte, i int) bool { - return b[i] == 0x00 -} - -// Check if the beginning of the buffer is a BOM. -func is_bom(b []byte, i int) bool { - return b[0] == 0xEF && b[1] == 0xBB && b[2] == 0xBF -} - -// Check if the character at the specified position is space. -func is_space(b []byte, i int) bool { - return b[i] == ' ' -} - -// Check if the character at the specified position is tab. -func is_tab(b []byte, i int) bool { - return b[i] == '\t' -} - -// Check if the character at the specified position is blank (space or tab). -func is_blank(b []byte, i int) bool { - //return is_space(b, i) || is_tab(b, i) - return b[i] == ' ' || b[i] == '\t' -} - -// Check if the character at the specified position is a line break. -func is_break(b []byte, i int) bool { - return (b[i] == '\r' || // CR (#xD) - b[i] == '\n' || // LF (#xA) - b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) - b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) - b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9) // PS (#x2029) -} - -func is_crlf(b []byte, i int) bool { - return b[i] == '\r' && b[i+1] == '\n' -} - -// Check if the character is a line break or NUL. -func is_breakz(b []byte, i int) bool { - //return is_break(b, i) || is_z(b, i) - return ( // is_break: - b[i] == '\r' || // CR (#xD) - b[i] == '\n' || // LF (#xA) - b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) - b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) - b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) - // is_z: - b[i] == 0) -} - -// Check if the character is a line break, space, or NUL. -func is_spacez(b []byte, i int) bool { - //return is_space(b, i) || is_breakz(b, i) - return ( // is_space: - b[i] == ' ' || - // is_breakz: - b[i] == '\r' || // CR (#xD) - b[i] == '\n' || // LF (#xA) - b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) - b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) - b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) - b[i] == 0) -} - -// Check if the character is a line break, space, tab, or NUL. -func is_blankz(b []byte, i int) bool { - //return is_blank(b, i) || is_breakz(b, i) - return ( // is_blank: - b[i] == ' ' || b[i] == '\t' || - // is_breakz: - b[i] == '\r' || // CR (#xD) - b[i] == '\n' || // LF (#xA) - b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) - b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) - b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) - b[i] == 0) -} - -// Determine the width of the character. -func width(b byte) int { - // Don't replace these by a switch without first - // confirming that it is being inlined. - if b&0x80 == 0x00 { - return 1 - } - if b&0xE0 == 0xC0 { - return 2 - } - if b&0xF0 == 0xE0 { - return 3 - } - if b&0xF8 == 0xF0 { - return 4 - } - return 0 - -} diff --git a/Makefile b/Makefile index e07113414c..347f4d604a 100644 --- a/Makefile +++ b/Makefile @@ -67,16 +67,13 @@ clean: $(SELFLINK): $(GOPATH) ln -s $(MAKEFILE_DIR) $@ -$(GOPATH): - cp -a $(MAKEFILE_DIR)/Godeps/_workspace "$(GOPATH)" - dependencies: $(GOCC) | $(SELFLINK) documentation: search_index godoc -http=:6060 -index -index_files='search_index' format: dependencies - find . -iname '*.go' | egrep -v "^\./(\.build|Godeps)/" | xargs -n1 $(GOFMT) -w -s=true + find . -iname '*.go' | egrep -v "^\./\.build/" | xargs -n1 $(GOFMT) -w -s=true race_condition_binary: build $(GO) build -race -o prometheus.race $(BUILDFLAGS) github.com/prometheus/prometheus/cmd/prometheus