Merge pull request #932 from prometheus/update-deps

Update vendored dependencies.
This commit is contained in:
Julius Volz 2015-07-27 17:50:46 +02:00
commit 05225a538a
101 changed files with 4143 additions and 499 deletions

51
Godeps/Godeps.json generated
View file

@ -9,8 +9,8 @@
}, },
{ {
"ImportPath": "github.com/Sirupsen/logrus", "ImportPath": "github.com/Sirupsen/logrus",
"Comment": "v0.7.3-13-g81e2611", "Comment": "v0.8.5-1-g11538ee",
"Rev": "81e2611f37acccd8cb5e4e1a5a4a5f6c9c7f4537" "Rev": "11538ee6888f72d4ab44a1aeba06b9bc4cb134a1"
}, },
{ {
"ImportPath": "github.com/beorn7/perks/quantile", "ImportPath": "github.com/beorn7/perks/quantile",
@ -18,16 +18,21 @@
}, },
{ {
"ImportPath": "github.com/golang/protobuf/proto", "ImportPath": "github.com/golang/protobuf/proto",
"Rev": "16256d3ce6929458613798ee44b7914a3f59f5c6" "Rev": "0f7a9caded1fb3c9cc5a9b4bcf2ff633cc8ae644"
},
{
"ImportPath": "github.com/golang/snappy",
"Rev": "0c7f8a7704bfec561913f4df52c832f094ef56f0"
}, },
{ {
"ImportPath": "github.com/hashicorp/consul/api", "ImportPath": "github.com/hashicorp/consul/api",
"Comment": "v0.5.2-9-g145b495", "Comment": "v0.5.2-180-ga6317f2",
"Rev": "145b495e22388832240ee78788524bd975e443ca" "Rev": "a6317f2fb2ba9d5ae695f7fa703cfb30a1c59af1"
}, },
{ {
"ImportPath": "github.com/julienschmidt/httprouter", "ImportPath": "github.com/julienschmidt/httprouter",
"Rev": "8c199fb6259ffc1af525cc3ad52ee60ba8359669" "Comment": "v1.1-3-g6aacfd5",
"Rev": "6aacfd5ab513e34f7e64ea9627ab9670371b34e7"
}, },
{ {
"ImportPath": "github.com/matttproud/golang_protobuf_extensions/pbutil", "ImportPath": "github.com/matttproud/golang_protobuf_extensions/pbutil",
@ -35,27 +40,27 @@
}, },
{ {
"ImportPath": "github.com/miekg/dns", "ImportPath": "github.com/miekg/dns",
"Rev": "bb1103f648f811d2018d4bedcb2d4b2bce34a0f1" "Rev": "e59f851c912767b1db587dcabee6e6652e495c75"
}, },
{ {
"ImportPath": "github.com/prometheus/client_golang/extraction", "ImportPath": "github.com/prometheus/client_golang/extraction",
"Comment": "0.6.0", "Comment": "0.7.0",
"Rev": "e319516b0f97867d36151451cab8d4aefbe1786b" "Rev": "6dbab8106ed3ed77359ac85d9cf08e30290df864"
}, },
{ {
"ImportPath": "github.com/prometheus/client_golang/model", "ImportPath": "github.com/prometheus/client_golang/model",
"Comment": "0.6.0", "Comment": "0.7.0",
"Rev": "e319516b0f97867d36151451cab8d4aefbe1786b" "Rev": "6dbab8106ed3ed77359ac85d9cf08e30290df864"
}, },
{ {
"ImportPath": "github.com/prometheus/client_golang/prometheus", "ImportPath": "github.com/prometheus/client_golang/prometheus",
"Comment": "0.6.0", "Comment": "0.7.0",
"Rev": "e319516b0f97867d36151451cab8d4aefbe1786b" "Rev": "6dbab8106ed3ed77359ac85d9cf08e30290df864"
}, },
{ {
"ImportPath": "github.com/prometheus/client_golang/text", "ImportPath": "github.com/prometheus/client_golang/text",
"Comment": "0.6.0", "Comment": "0.7.0",
"Rev": "e319516b0f97867d36151451cab8d4aefbe1786b" "Rev": "6dbab8106ed3ed77359ac85d9cf08e30290df864"
}, },
{ {
"ImportPath": "github.com/prometheus/client_model/go", "ImportPath": "github.com/prometheus/client_model/go",
@ -64,27 +69,23 @@
}, },
{ {
"ImportPath": "github.com/prometheus/log", "ImportPath": "github.com/prometheus/log",
"Rev": "c1344118e003a86aefb326a436118ad1317266dd" "Rev": "439e5db48fbb50ebbaf2c816030473a62f505f55"
}, },
{ {
"ImportPath": "github.com/prometheus/procfs", "ImportPath": "github.com/prometheus/procfs",
"Rev": "ee2372b58cee877abe07cde670d04d3b3bac5ee6" "Rev": "c91d8eefde16bd047416409eb56353ea84a186e4"
}, },
{ {
"ImportPath": "github.com/samuel/go-zookeeper/zk", "ImportPath": "github.com/samuel/go-zookeeper/zk",
"Rev": "c86eba8e7e95efab81f6c0455332e49d39aed12f" "Rev": "5bb5cfc093ad18a28148c578f8632cfdb4d802e4"
}, },
{ {
"ImportPath": "github.com/syndtr/goleveldb/leveldb", "ImportPath": "github.com/syndtr/goleveldb/leveldb",
"Rev": "315fcfb05d4d46d4354b313d146ef688dda272a9" "Rev": "183614d6b32571e867df4cf086f5480ceefbdfac"
},
{
"ImportPath": "github.com/syndtr/gosnappy/snappy",
"Rev": "156a073208e131d7d2e212cb749feae7c339e846"
}, },
{ {
"ImportPath": "golang.org/x/net/context", "ImportPath": "golang.org/x/net/context",
"Rev": "ad9eb3904af97b912b9a242efb203c5c6782e72a" "Rev": "b71143c25f0aad5f54981684b715686d34c56d25"
}, },
{ {
"ImportPath": "gopkg.in/fsnotify.v1", "ImportPath": "gopkg.in/fsnotify.v1",
@ -93,7 +94,7 @@
}, },
{ {
"ImportPath": "gopkg.in/yaml.v2", "ImportPath": "gopkg.in/yaml.v2",
"Rev": "49c95bdc21843256fb6c4e0d370a05f24a0bf213" "Rev": "7ad95dd0798a40da1ccdff6dff35fd177b5edf40"
} }
] ]
} }

View file

@ -1,11 +1,37 @@
# 0.8 # 0.8.5
logrus: defaults to stderr instead of stdout * logrus/core: revert #208
# 0.8.4
* formatter/text: fix data race (#218)
# 0.8.3
* logrus/core: fix entry log level (#208)
* logrus/core: improve performance of text formatter by 40%
* logrus/core: expose `LevelHooks` type
* logrus/core: add support for DragonflyBSD and NetBSD
* formatter/text: print structs more verbosely
# 0.8.2
* logrus: fix more Fatal family functions
# 0.8.1
* logrus: fix not exiting on `Fatalf` and `Fatalln`
# 0.8.0
* logrus: defaults to stderr instead of stdout
* hooks/sentry: add special field for `*http.Request`
* formatter/text: ignore Windows for colors
# 0.7.3 # 0.7.3
formatter/\*: allow configuration of timestamp layout * formatter/\*: allow configuration of timestamp layout
# 0.7.2 # 0.7.2
formatter/text: Add configuration option for time format (#158) * formatter/text: Add configuration option for time format (#158)

View file

@ -183,7 +183,7 @@ Logrus comes with [built-in hooks](hooks/). Add those, or your custom hook, in
import ( import (
log "github.com/Sirupsen/logrus" log "github.com/Sirupsen/logrus"
"github.com/Sirupsen/logrus/hooks/airbrake" "github.com/Sirupsen/logrus/hooks/airbrake"
"github.com/Sirupsen/logrus/hooks/syslog" logrus_syslog "github.com/Sirupsen/logrus/hooks/syslog"
"log/syslog" "log/syslog"
) )
@ -206,12 +206,18 @@ func init() {
| [Papertrail](https://github.com/Sirupsen/logrus/blob/master/hooks/papertrail/papertrail.go) | Send errors to the Papertrail hosted logging service via UDP. | | [Papertrail](https://github.com/Sirupsen/logrus/blob/master/hooks/papertrail/papertrail.go) | Send errors to the Papertrail hosted logging service via UDP. |
| [Syslog](https://github.com/Sirupsen/logrus/blob/master/hooks/syslog/syslog.go) | Send errors to remote syslog server. Uses standard library `log/syslog` behind the scenes. | | [Syslog](https://github.com/Sirupsen/logrus/blob/master/hooks/syslog/syslog.go) | Send errors to remote syslog server. Uses standard library `log/syslog` behind the scenes. |
| [BugSnag](https://github.com/Sirupsen/logrus/blob/master/hooks/bugsnag/bugsnag.go) | Send errors to the Bugsnag exception tracking service. | | [BugSnag](https://github.com/Sirupsen/logrus/blob/master/hooks/bugsnag/bugsnag.go) | Send errors to the Bugsnag exception tracking service. |
| [Sentry](https://github.com/Sirupsen/logrus/blob/master/hooks/sentry/sentry.go) | Send errors to the Sentry error logging and aggregation service. |
| [Hiprus](https://github.com/nubo/hiprus) | Send errors to a channel in hipchat. | | [Hiprus](https://github.com/nubo/hiprus) | Send errors to a channel in hipchat. |
| [Logrusly](https://github.com/sebest/logrusly) | Send logs to [Loggly](https://www.loggly.com/) | | [Logrusly](https://github.com/sebest/logrusly) | Send logs to [Loggly](https://www.loggly.com/) |
| [Slackrus](https://github.com/johntdyer/slackrus) | Hook for Slack chat. | | [Slackrus](https://github.com/johntdyer/slackrus) | Hook for Slack chat. |
| [Journalhook](https://github.com/wercker/journalhook) | Hook for logging to `systemd-journald` | | [Journalhook](https://github.com/wercker/journalhook) | Hook for logging to `systemd-journald` |
| [Graylog](https://github.com/gemnasium/logrus-hooks/tree/master/graylog) | Hook for logging to [Graylog](http://graylog2.org/) | | [Graylog](https://github.com/gemnasium/logrus-hooks/tree/master/graylog) | Hook for logging to [Graylog](http://graylog2.org/) |
| [Raygun](https://github.com/squirkle/logrus-raygun-hook) | Hook for logging to [Raygun.io](http://raygun.io/) | | [Raygun](https://github.com/squirkle/logrus-raygun-hook) | Hook for logging to [Raygun.io](http://raygun.io/) |
| [LFShook](https://github.com/rifflock/lfshook) | Hook for logging to the local filesystem |
| [Honeybadger](https://github.com/agonzalezro/logrus_honeybadger) | Hook for sending exceptions to Honeybadger |
| [Mail](https://github.com/zbindenren/logrus_mail) | Hook for sending exceptions via mail |
| [Rollrus](https://github.com/heroku/rollrus) | Hook for sending errors to rollbar |
| [Fluentd](https://github.com/evalphobia/logrus_fluent) | Hook for logging to fluentd |
#### Level logging #### Level logging
@ -267,7 +273,7 @@ init() {
// do something here to set environment depending on an environment variable // do something here to set environment depending on an environment variable
// or command-line flag // or command-line flag
if Environment == "production" { if Environment == "production" {
log.SetFormatter(logrus.JSONFormatter) log.SetFormatter(&logrus.JSONFormatter{})
} else { } else {
// The TextFormatter is default, you don't actually have to do this. // The TextFormatter is default, you don't actually have to do this.
log.SetFormatter(&log.TextFormatter{}) log.SetFormatter(&log.TextFormatter{})
@ -324,7 +330,7 @@ func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) {
#### Logger as an `io.Writer` #### Logger as an `io.Writer`
Logrus can be transormed into an `io.Writer`. That writer is the end of an `io.Pipe` and it is your responsibility to close it. Logrus can be transformed into an `io.Writer`. That writer is the end of an `io.Pipe` and it is your responsibility to close it.
```go ```go
w := logger.Writer() w := logger.Writer()

View file

@ -188,6 +188,7 @@ func (entry *Entry) Fatalf(format string, args ...interface{}) {
if entry.Logger.Level >= FatalLevel { if entry.Logger.Level >= FatalLevel {
entry.Fatal(fmt.Sprintf(format, args...)) entry.Fatal(fmt.Sprintf(format, args...))
} }
os.Exit(1)
} }
func (entry *Entry) Panicf(format string, args ...interface{}) { func (entry *Entry) Panicf(format string, args ...interface{}) {
@ -234,6 +235,7 @@ func (entry *Entry) Fatalln(args ...interface{}) {
if entry.Logger.Level >= FatalLevel { if entry.Logger.Level >= FatalLevel {
entry.Fatal(entry.sprintlnn(args...)) entry.Fatal(entry.sprintlnn(args...))
} }
os.Exit(1)
} }
func (entry *Entry) Panicln(args ...interface{}) { func (entry *Entry) Panicln(args ...interface{}) {

View file

@ -1,6 +1,7 @@
package logrus package logrus
import ( import (
"fmt"
"testing" "testing"
"time" "time"
) )
@ -45,6 +46,15 @@ var largeFields = Fields{
"entries": "yeah", "entries": "yeah",
} }
var errorFields = Fields{
"foo": fmt.Errorf("bar"),
"baz": fmt.Errorf("qux"),
}
func BenchmarkErrorTextFormatter(b *testing.B) {
doBenchmark(b, &TextFormatter{DisableColors: true}, errorFields)
}
func BenchmarkSmallTextFormatter(b *testing.B) { func BenchmarkSmallTextFormatter(b *testing.B) {
doBenchmark(b, &TextFormatter{DisableColors: true}, smallFields) doBenchmark(b, &TextFormatter{DisableColors: true}, smallFields)
} }

View file

@ -11,11 +11,11 @@ type Hook interface {
} }
// Internal type for storing the hooks on a logger instance. // Internal type for storing the hooks on a logger instance.
type levelHooks map[Level][]Hook type LevelHooks map[Level][]Hook
// Add a hook to an instance of logger. This is called with // Add a hook to an instance of logger. This is called with
// `log.Hooks.Add(new(MyHook))` where `MyHook` implements the `Hook` interface. // `log.Hooks.Add(new(MyHook))` where `MyHook` implements the `Hook` interface.
func (hooks levelHooks) Add(hook Hook) { func (hooks LevelHooks) Add(hook Hook) {
for _, level := range hook.Levels() { for _, level := range hook.Levels() {
hooks[level] = append(hooks[level], hook) hooks[level] = append(hooks[level], hook)
} }
@ -23,7 +23,7 @@ func (hooks levelHooks) Add(hook Hook) {
// Fire all the hooks for the passed level. Used by `entry.log` to fire // Fire all the hooks for the passed level. Used by `entry.log` to fire
// appropriate hooks for a log entry. // appropriate hooks for a log entry.
func (hooks levelHooks) Fire(level Level, entry *Entry) error { func (hooks LevelHooks) Fire(level Level, entry *Entry) error {
for _, hook := range hooks[level] { for _, hook := range hooks[level] {
if err := hook.Fire(entry); err != nil { if err := hook.Fire(entry); err != nil {
return err return err

View file

@ -31,15 +31,32 @@ func main() {
} }
``` ```
If you wish to initialize a SentryHook with tags, you can use the `NewWithTagsSentryHook` constructor to provide default tags:
```go
tags := map[string]string{
"site": "example.com",
}
levels := []logrus.Level{
logrus.PanicLevel,
logrus.FatalLevel,
logrus.ErrorLevel,
}
hook, err := logrus_sentry.NewWithTagsSentryHook(YOUR_DSN, tags, levels)
```
## Special fields ## Special fields
Some logrus fields have a special meaning in this hook, Some logrus fields have a special meaning in this hook,
these are server_name and logger. these are `server_name`, `logger` and `http_request`.
When logs are sent to sentry these fields are treated differently. When logs are sent to sentry these fields are treated differently.
- server_name (also known as hostname) is the name of the server which - `server_name` (also known as hostname) is the name of the server which
is logging the event (hostname.example.com) is logging the event (hostname.example.com)
- logger is the part of the application which is logging the event. - `logger` is the part of the application which is logging the event.
In go this usually means setting it to the name of the package. In go this usually means setting it to the name of the package.
- `http_request` is the in-coming request(*http.Request). The detailed request data are sent to Sentry.
## Timeout ## Timeout

View file

@ -2,6 +2,7 @@ package logrus_sentry
import ( import (
"fmt" "fmt"
"net/http"
"time" "time"
"github.com/Sirupsen/logrus" "github.com/Sirupsen/logrus"
@ -36,6 +37,22 @@ func getAndDel(d logrus.Fields, key string) (string, bool) {
return val, true return val, true
} }
func getAndDelRequest(d logrus.Fields, key string) (*http.Request, bool) {
var (
ok bool
v interface{}
req *http.Request
)
if v, ok = d[key]; !ok {
return nil, false
}
if req, ok = v.(*http.Request); !ok || req == nil {
return nil, false
}
delete(d, key)
return req, true
}
// SentryHook delivers logs to a sentry server. // SentryHook delivers logs to a sentry server.
type SentryHook struct { type SentryHook struct {
// Timeout sets the time to wait for a delivery error from the sentry server. // Timeout sets the time to wait for a delivery error from the sentry server.
@ -51,7 +68,18 @@ type SentryHook struct {
// and initializes the raven client. // and initializes the raven client.
// This method sets the timeout to 100 milliseconds. // This method sets the timeout to 100 milliseconds.
func NewSentryHook(DSN string, levels []logrus.Level) (*SentryHook, error) { func NewSentryHook(DSN string, levels []logrus.Level) (*SentryHook, error) {
client, err := raven.NewClient(DSN, nil) client, err := raven.New(DSN)
if err != nil {
return nil, err
}
return &SentryHook{100 * time.Millisecond, client, levels}, nil
}
// NewWithTagsSentryHook creates a hook with tags to be added to an instance
// of logger and initializes the raven client. This method sets the timeout to
// 100 milliseconds.
func NewWithTagsSentryHook(DSN string, tags map[string]string, levels []logrus.Level) (*SentryHook, error) {
client, err := raven.NewWithTags(DSN, tags)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -61,7 +89,7 @@ func NewSentryHook(DSN string, levels []logrus.Level) (*SentryHook, error) {
// Called when an event should be sent to sentry // Called when an event should be sent to sentry
// Special fields that sentry uses to give more information to the server // Special fields that sentry uses to give more information to the server
// are extracted from entry.Data (if they are found) // are extracted from entry.Data (if they are found)
// These fields are: logger and server_name // These fields are: logger, server_name and http_request
func (hook *SentryHook) Fire(entry *logrus.Entry) error { func (hook *SentryHook) Fire(entry *logrus.Entry) error {
packet := &raven.Packet{ packet := &raven.Packet{
Message: entry.Message, Message: entry.Message,
@ -78,6 +106,9 @@ func (hook *SentryHook) Fire(entry *logrus.Entry) error {
if serverName, ok := getAndDel(d, "server_name"); ok { if serverName, ok := getAndDel(d, "server_name"); ok {
packet.ServerName = serverName packet.ServerName = serverName
} }
if req, ok := getAndDelRequest(d, "http_request"); ok {
packet.Interfaces = append(packet.Interfaces, raven.NewHttp(req))
}
packet.Extra = map[string]interface{}(d) packet.Extra = map[string]interface{}(d)
_, errCh := hook.client.Capture(packet, nil) _, errCh := hook.client.Capture(packet, nil)

View file

@ -6,6 +6,7 @@ import (
"io/ioutil" "io/ioutil"
"net/http" "net/http"
"net/http/httptest" "net/http/httptest"
"reflect"
"strings" "strings"
"testing" "testing"
@ -61,9 +62,12 @@ func TestSpecialFields(t *testing.T) {
t.Fatal(err.Error()) t.Fatal(err.Error())
} }
logger.Hooks.Add(hook) logger.Hooks.Add(hook)
req, _ := http.NewRequest("GET", "url", nil)
logger.WithFields(logrus.Fields{ logger.WithFields(logrus.Fields{
"server_name": server_name, "server_name": server_name,
"logger": logger_name, "logger": logger_name,
"http_request": req,
}).Error(message) }).Error(message)
packet := <-pch packet := <-pch
@ -95,3 +99,34 @@ func TestSentryHandler(t *testing.T) {
} }
}) })
} }
func TestSentryTags(t *testing.T) {
WithTestDSN(t, func(dsn string, pch <-chan *raven.Packet) {
logger := getTestLogger()
tags := map[string]string{
"site": "test",
}
levels := []logrus.Level{
logrus.ErrorLevel,
}
hook, err := NewWithTagsSentryHook(dsn, tags, levels)
if err != nil {
t.Fatal(err.Error())
}
logger.Hooks.Add(hook)
logger.Error(message)
packet := <-pch
expected := raven.Tags{
raven.Tag{
Key: "site",
Value: "test",
},
}
if !reflect.DeepEqual(packet.Tags, expected) {
t.Errorf("message should have been %s, was %s", message, packet.Message)
}
})
}

View file

@ -24,11 +24,12 @@ func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) {
} }
prefixFieldClashes(data) prefixFieldClashes(data)
if f.TimestampFormat == "" { timestampFormat := f.TimestampFormat
f.TimestampFormat = DefaultTimestampFormat if timestampFormat == "" {
timestampFormat = DefaultTimestampFormat
} }
data["time"] = entry.Time.Format(f.TimestampFormat) data["time"] = entry.Time.Format(timestampFormat)
data["msg"] = entry.Message data["msg"] = entry.Message
data["level"] = entry.Level.String() data["level"] = entry.Level.String()

View file

@ -14,7 +14,7 @@ type Logger struct {
// Hooks for the logger instance. These allow firing events based on logging // Hooks for the logger instance. These allow firing events based on logging
// levels and log entries. For example, to send errors to an error tracking // levels and log entries. For example, to send errors to an error tracking
// service, log to StatsD or dump the core on fatal errors. // service, log to StatsD or dump the core on fatal errors.
Hooks levelHooks Hooks LevelHooks
// All log entries pass through the formatter before logged to Out. The // All log entries pass through the formatter before logged to Out. The
// included formatters are `TextFormatter` and `JSONFormatter` for which // included formatters are `TextFormatter` and `JSONFormatter` for which
// TextFormatter is the default. In development (when a TTY is attached) it // TextFormatter is the default. In development (when a TTY is attached) it
@ -37,7 +37,7 @@ type Logger struct {
// var log = &Logger{ // var log = &Logger{
// Out: os.Stderr, // Out: os.Stderr,
// Formatter: new(JSONFormatter), // Formatter: new(JSONFormatter),
// Hooks: make(levelHooks), // Hooks: make(LevelHooks),
// Level: logrus.DebugLevel, // Level: logrus.DebugLevel,
// } // }
// //
@ -46,7 +46,7 @@ func New() *Logger {
return &Logger{ return &Logger{
Out: os.Stderr, Out: os.Stderr,
Formatter: new(TextFormatter), Formatter: new(TextFormatter),
Hooks: make(levelHooks), Hooks: make(LevelHooks),
Level: InfoLevel, Level: InfoLevel,
} }
} }
@ -102,6 +102,7 @@ func (logger *Logger) Fatalf(format string, args ...interface{}) {
if logger.Level >= FatalLevel { if logger.Level >= FatalLevel {
NewEntry(logger).Fatalf(format, args...) NewEntry(logger).Fatalf(format, args...)
} }
os.Exit(1)
} }
func (logger *Logger) Panicf(format string, args ...interface{}) { func (logger *Logger) Panicf(format string, args ...interface{}) {
@ -148,6 +149,7 @@ func (logger *Logger) Fatal(args ...interface{}) {
if logger.Level >= FatalLevel { if logger.Level >= FatalLevel {
NewEntry(logger).Fatal(args...) NewEntry(logger).Fatal(args...)
} }
os.Exit(1)
} }
func (logger *Logger) Panic(args ...interface{}) { func (logger *Logger) Panic(args ...interface{}) {
@ -194,6 +196,7 @@ func (logger *Logger) Fatalln(args ...interface{}) {
if logger.Level >= FatalLevel { if logger.Level >= FatalLevel {
NewEntry(logger).Fatalln(args...) NewEntry(logger).Fatalln(args...)
} }
os.Exit(1)
} }
func (logger *Logger) Panicln(args ...interface{}) { func (logger *Logger) Panicln(args ...interface{}) {

View file

@ -1,3 +1,5 @@
// +build darwin freebsd openbsd netbsd dragonfly
package logrus package logrus
import "syscall" import "syscall"

View file

@ -1,12 +0,0 @@
// Based on ssh/terminal:
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package logrus
import "syscall"
const ioctlReadTermios = syscall.TIOCGETA
type Termios syscall.Termios

View file

@ -1,20 +0,0 @@
/*
Go 1.2 doesn't include Termios for FreeBSD. This should be added in 1.3 and this could be merged with terminal_darwin.
*/
package logrus
import (
"syscall"
)
const ioctlReadTermios = syscall.TIOCGETA
type Termios struct {
Iflag uint32
Oflag uint32
Cflag uint32
Lflag uint32
Cc [20]uint8
Ispeed uint32
Ospeed uint32
}

View file

@ -3,7 +3,7 @@
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.
// +build linux darwin freebsd openbsd // +build linux darwin freebsd openbsd netbsd dragonfly
package logrus package logrus

View file

@ -73,14 +73,15 @@ func (f *TextFormatter) Format(entry *Entry) ([]byte, error) {
isColorTerminal := isTerminal && (runtime.GOOS != "windows") isColorTerminal := isTerminal && (runtime.GOOS != "windows")
isColored := (f.ForceColors || isColorTerminal) && !f.DisableColors isColored := (f.ForceColors || isColorTerminal) && !f.DisableColors
if f.TimestampFormat == "" { timestampFormat := f.TimestampFormat
f.TimestampFormat = DefaultTimestampFormat if timestampFormat == "" {
timestampFormat = DefaultTimestampFormat
} }
if isColored { if isColored {
f.printColored(b, entry, keys) f.printColored(b, entry, keys, timestampFormat)
} else { } else {
if !f.DisableTimestamp { if !f.DisableTimestamp {
f.appendKeyValue(b, "time", entry.Time.Format(f.TimestampFormat)) f.appendKeyValue(b, "time", entry.Time.Format(timestampFormat))
} }
f.appendKeyValue(b, "level", entry.Level.String()) f.appendKeyValue(b, "level", entry.Level.String())
f.appendKeyValue(b, "msg", entry.Message) f.appendKeyValue(b, "msg", entry.Message)
@ -93,7 +94,7 @@ func (f *TextFormatter) Format(entry *Entry) ([]byte, error) {
return b.Bytes(), nil return b.Bytes(), nil
} }
func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []string) { func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []string, timestampFormat string) {
var levelColor int var levelColor int
switch entry.Level { switch entry.Level {
case DebugLevel: case DebugLevel:
@ -111,11 +112,11 @@ func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []strin
if !f.FullTimestamp { if !f.FullTimestamp {
fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d] %-44s ", levelColor, levelText, miniTS(), entry.Message) fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d] %-44s ", levelColor, levelText, miniTS(), entry.Message)
} else { } else {
fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%s] %-44s ", levelColor, levelText, entry.Time.Format(f.TimestampFormat), entry.Message) fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%s] %-44s ", levelColor, levelText, entry.Time.Format(timestampFormat), entry.Message)
} }
for _, k := range keys { for _, k := range keys {
v := entry.Data[k] v := entry.Data[k]
fmt.Fprintf(b, " \x1b[%dm%s\x1b[0m=%v", levelColor, k, v) fmt.Fprintf(b, " \x1b[%dm%s\x1b[0m=%+v", levelColor, k, v)
} }
} }
@ -131,21 +132,28 @@ func needsQuoting(text string) bool {
return true return true
} }
func (f *TextFormatter) appendKeyValue(b *bytes.Buffer, key, value interface{}) { func (f *TextFormatter) appendKeyValue(b *bytes.Buffer, key string, value interface{}) {
switch value.(type) {
b.WriteString(key)
b.WriteByte('=')
switch value := value.(type) {
case string: case string:
if needsQuoting(value.(string)) { if needsQuoting(value) {
fmt.Fprintf(b, "%v=%s ", key, value) b.WriteString(value)
} else { } else {
fmt.Fprintf(b, "%v=%q ", key, value) fmt.Fprintf(b, "%q", value)
} }
case error: case error:
if needsQuoting(value.(error).Error()) { errmsg := value.Error()
fmt.Fprintf(b, "%v=%s ", key, value) if needsQuoting(errmsg) {
b.WriteString(errmsg)
} else { } else {
fmt.Fprintf(b, "%v=%q ", key, value) fmt.Fprintf(b, "%q", value)
} }
default: default:
fmt.Fprintf(b, "%v=%v ", key, value) fmt.Fprint(b, value)
} }
b.WriteByte(' ')
} }

View file

@ -1925,6 +1925,18 @@ func TestMapFieldRoundTrips(t *testing.T) {
} }
} }
func TestMapFieldWithNil(t *testing.T) {
m := &MessageWithMap{
MsgMapping: map[int64]*FloatingPoint{
1: nil,
},
}
b, err := Marshal(m)
if err == nil {
t.Fatalf("Marshal of bad map should have failed, got these bytes: %v", b)
}
}
// Benchmarks // Benchmarks
func testMsg() *GoTest { func testMsg() *GoTest {

View file

@ -75,12 +75,13 @@ func Merge(dst, src Message) {
} }
func mergeStruct(out, in reflect.Value) { func mergeStruct(out, in reflect.Value) {
sprop := GetProperties(in.Type())
for i := 0; i < in.NumField(); i++ { for i := 0; i < in.NumField(); i++ {
f := in.Type().Field(i) f := in.Type().Field(i)
if strings.HasPrefix(f.Name, "XXX_") { if strings.HasPrefix(f.Name, "XXX_") {
continue continue
} }
mergeAny(out.Field(i), in.Field(i)) mergeAny(out.Field(i), in.Field(i), false, sprop.Prop[i])
} }
if emIn, ok := in.Addr().Interface().(extendableProto); ok { if emIn, ok := in.Addr().Interface().(extendableProto); ok {
@ -98,7 +99,10 @@ func mergeStruct(out, in reflect.Value) {
} }
} }
func mergeAny(out, in reflect.Value) { // mergeAny performs a merge between two values of the same type.
// viaPtr indicates whether the values were indirected through a pointer (implying proto2).
// prop is set if this is a struct field (it may be nil).
func mergeAny(out, in reflect.Value, viaPtr bool, prop *Properties) {
if in.Type() == protoMessageType { if in.Type() == protoMessageType {
if !in.IsNil() { if !in.IsNil() {
if out.IsNil() { if out.IsNil() {
@ -112,6 +116,9 @@ func mergeAny(out, in reflect.Value) {
switch in.Kind() { switch in.Kind() {
case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64,
reflect.String, reflect.Uint32, reflect.Uint64: reflect.String, reflect.Uint32, reflect.Uint64:
if !viaPtr && isProto3Zero(in) {
return
}
out.Set(in) out.Set(in)
case reflect.Map: case reflect.Map:
if in.Len() == 0 { if in.Len() == 0 {
@ -127,7 +134,7 @@ func mergeAny(out, in reflect.Value) {
switch elemKind { switch elemKind {
case reflect.Ptr: case reflect.Ptr:
val = reflect.New(in.Type().Elem().Elem()) val = reflect.New(in.Type().Elem().Elem())
mergeAny(val, in.MapIndex(key)) mergeAny(val, in.MapIndex(key), false, nil)
case reflect.Slice: case reflect.Slice:
val = in.MapIndex(key) val = in.MapIndex(key)
val = reflect.ValueOf(append([]byte{}, val.Bytes()...)) val = reflect.ValueOf(append([]byte{}, val.Bytes()...))
@ -143,13 +150,21 @@ func mergeAny(out, in reflect.Value) {
if out.IsNil() { if out.IsNil() {
out.Set(reflect.New(in.Elem().Type())) out.Set(reflect.New(in.Elem().Type()))
} }
mergeAny(out.Elem(), in.Elem()) mergeAny(out.Elem(), in.Elem(), true, nil)
case reflect.Slice: case reflect.Slice:
if in.IsNil() { if in.IsNil() {
return return
} }
if in.Type().Elem().Kind() == reflect.Uint8 { if in.Type().Elem().Kind() == reflect.Uint8 {
// []byte is a scalar bytes field, not a repeated field. // []byte is a scalar bytes field, not a repeated field.
// Edge case: if this is in a proto3 message, a zero length
// bytes field is considered the zero value, and should not
// be merged.
if prop != nil && prop.proto3 && in.Len() == 0 {
return
}
// Make a deep copy. // Make a deep copy.
// Append to []byte{} instead of []byte(nil) so that we never end up // Append to []byte{} instead of []byte(nil) so that we never end up
// with a nil result. // with a nil result.
@ -167,7 +182,7 @@ func mergeAny(out, in reflect.Value) {
default: default:
for i := 0; i < n; i++ { for i := 0; i < n; i++ {
x := reflect.Indirect(reflect.New(in.Type().Elem())) x := reflect.Indirect(reflect.New(in.Type().Elem()))
mergeAny(x, in.Index(i)) mergeAny(x, in.Index(i), false, nil)
out.Set(reflect.Append(out, x)) out.Set(reflect.Append(out, x))
} }
} }
@ -184,7 +199,7 @@ func mergeExtension(out, in map[int32]Extension) {
eOut := Extension{desc: eIn.desc} eOut := Extension{desc: eIn.desc}
if eIn.value != nil { if eIn.value != nil {
v := reflect.New(reflect.TypeOf(eIn.value)).Elem() v := reflect.New(reflect.TypeOf(eIn.value)).Elem()
mergeAny(v, reflect.ValueOf(eIn.value)) mergeAny(v, reflect.ValueOf(eIn.value), false, nil)
eOut.value = v.Interface() eOut.value = v.Interface()
} }
if eIn.enc != nil { if eIn.enc != nil {

View file

@ -36,6 +36,7 @@ import (
"github.com/golang/protobuf/proto" "github.com/golang/protobuf/proto"
proto3pb "github.com/golang/protobuf/proto/proto3_proto"
pb "github.com/golang/protobuf/proto/testdata" pb "github.com/golang/protobuf/proto/testdata"
) )
@ -214,6 +215,23 @@ var mergeTests = []struct {
ByteMapping: map[bool][]byte{true: []byte("wowsa")}, ByteMapping: map[bool][]byte{true: []byte("wowsa")},
}, },
}, },
// proto3 shouldn't merge zero values,
// in the same way that proto2 shouldn't merge nils.
{
src: &proto3pb.Message{
Name: "Aaron",
Data: []byte(""), // zero value, but not nil
},
dst: &proto3pb.Message{
HeightInCm: 176,
Data: []byte("texas!"),
},
want: &proto3pb.Message{
Name: "Aaron",
HeightInCm: 176,
Data: []byte("texas!"),
},
},
} }
func TestMerge(t *testing.T) { func TestMerge(t *testing.T) {

View file

@ -675,7 +675,7 @@ func (o *Buffer) dec_new_map(p *Properties, base structPointer) error {
oi := o.index // index at the end of this map entry oi := o.index // index at the end of this map entry
o.index -= len(raw) // move buffer back to start of map entry o.index -= len(raw) // move buffer back to start of map entry
mptr := structPointer_Map(base, p.field, p.mtype) // *map[K]V mptr := structPointer_NewAt(base, p.field, p.mtype) // *map[K]V
if mptr.Elem().IsNil() { if mptr.Elem().IsNil() {
mptr.Elem().Set(reflect.MakeMap(mptr.Type().Elem())) mptr.Elem().Set(reflect.MakeMap(mptr.Type().Elem()))
} }

View file

@ -1084,7 +1084,7 @@ func (o *Buffer) enc_new_map(p *Properties, base structPointer) error {
repeated MapFieldEntry map_field = N; repeated MapFieldEntry map_field = N;
*/ */
v := structPointer_Map(base, p.field, p.mtype).Elem() // map[K]V v := structPointer_NewAt(base, p.field, p.mtype).Elem() // map[K]V
if v.Len() == 0 { if v.Len() == 0 {
return nil return nil
} }
@ -1106,6 +1106,11 @@ func (o *Buffer) enc_new_map(p *Properties, base structPointer) error {
for _, key := range keys { for _, key := range keys {
val := v.MapIndex(key) val := v.MapIndex(key)
// The only illegal map entry values are nil message pointers.
if val.Kind() == reflect.Ptr && val.IsNil() {
return errors.New("proto: map has nil element")
}
keycopy.Set(key) keycopy.Set(key)
valcopy.Set(val) valcopy.Set(val)
@ -1118,7 +1123,7 @@ func (o *Buffer) enc_new_map(p *Properties, base structPointer) error {
} }
func size_new_map(p *Properties, base structPointer) int { func size_new_map(p *Properties, base structPointer) int {
v := structPointer_Map(base, p.field, p.mtype).Elem() // map[K]V v := structPointer_NewAt(base, p.field, p.mtype).Elem() // map[K]V
keycopy, valcopy, keybase, valbase := mapEncodeScratch(p.mtype) keycopy, valcopy, keybase, valbase := mapEncodeScratch(p.mtype)

View file

@ -222,7 +222,7 @@ func ClearExtension(pb extendableProto, extension *ExtensionDesc) {
} }
// GetExtension parses and returns the given extension of pb. // GetExtension parses and returns the given extension of pb.
// If the extension is not present it returns ErrMissingExtension. // If the extension is not present and has no default value it returns ErrMissingExtension.
func GetExtension(pb extendableProto, extension *ExtensionDesc) (interface{}, error) { func GetExtension(pb extendableProto, extension *ExtensionDesc) (interface{}, error) {
if err := checkExtensionTypes(pb, extension); err != nil { if err := checkExtensionTypes(pb, extension); err != nil {
return nil, err return nil, err
@ -231,8 +231,11 @@ func GetExtension(pb extendableProto, extension *ExtensionDesc) (interface{}, er
emap := pb.ExtensionMap() emap := pb.ExtensionMap()
e, ok := emap[extension.Field] e, ok := emap[extension.Field]
if !ok { if !ok {
return nil, ErrMissingExtension // defaultExtensionValue returns the default value or
// ErrMissingExtension if there is no default.
return defaultExtensionValue(extension)
} }
if e.value != nil { if e.value != nil {
// Already decoded. Check the descriptor, though. // Already decoded. Check the descriptor, though.
if e.desc != extension { if e.desc != extension {
@ -258,6 +261,41 @@ func GetExtension(pb extendableProto, extension *ExtensionDesc) (interface{}, er
return e.value, nil return e.value, nil
} }
// defaultExtensionValue returns the default value for extension.
// If no default for an extension is defined ErrMissingExtension is returned.
func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) {
t := reflect.TypeOf(extension.ExtensionType)
props := extensionProperties(extension)
sf, _, err := fieldDefault(t, props)
if err != nil {
return nil, err
}
if sf == nil || sf.value == nil {
// There is no default value.
return nil, ErrMissingExtension
}
if t.Kind() != reflect.Ptr {
// We do not need to return a Ptr, we can directly return sf.value.
return sf.value, nil
}
// We need to return an interface{} that is a pointer to sf.value.
value := reflect.New(t).Elem()
value.Set(reflect.New(value.Type().Elem()))
if sf.kind == reflect.Int32 {
// We may have an int32 or an enum, but the underlying data is int32.
// Since we can't set an int32 into a non int32 reflect.value directly
// set it as a int32.
value.Elem().SetInt(int64(sf.value.(int32)))
} else {
value.Elem().Set(reflect.ValueOf(sf.value))
}
return value.Interface(), nil
}
// decodeExtension decodes an extension encoded in b. // decodeExtension decodes an extension encoded in b.
func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) { func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) {
o := NewBuffer(b) o := NewBuffer(b)

View file

@ -32,6 +32,8 @@
package proto_test package proto_test
import ( import (
"fmt"
"reflect"
"testing" "testing"
"github.com/golang/protobuf/proto" "github.com/golang/protobuf/proto"
@ -93,6 +95,143 @@ func TestGetExtensionStability(t *testing.T) {
} }
} }
func TestGetExtensionDefaults(t *testing.T) {
var setFloat64 float64 = 1
var setFloat32 float32 = 2
var setInt32 int32 = 3
var setInt64 int64 = 4
var setUint32 uint32 = 5
var setUint64 uint64 = 6
var setBool = true
var setBool2 = false
var setString = "Goodnight string"
var setBytes = []byte("Goodnight bytes")
var setEnum = pb.DefaultsMessage_TWO
type testcase struct {
ext *proto.ExtensionDesc // Extension we are testing.
want interface{} // Expected value of extension, or nil (meaning that GetExtension will fail).
def interface{} // Expected value of extension after ClearExtension().
}
tests := []testcase{
{pb.E_NoDefaultDouble, setFloat64, nil},
{pb.E_NoDefaultFloat, setFloat32, nil},
{pb.E_NoDefaultInt32, setInt32, nil},
{pb.E_NoDefaultInt64, setInt64, nil},
{pb.E_NoDefaultUint32, setUint32, nil},
{pb.E_NoDefaultUint64, setUint64, nil},
{pb.E_NoDefaultSint32, setInt32, nil},
{pb.E_NoDefaultSint64, setInt64, nil},
{pb.E_NoDefaultFixed32, setUint32, nil},
{pb.E_NoDefaultFixed64, setUint64, nil},
{pb.E_NoDefaultSfixed32, setInt32, nil},
{pb.E_NoDefaultSfixed64, setInt64, nil},
{pb.E_NoDefaultBool, setBool, nil},
{pb.E_NoDefaultBool, setBool2, nil},
{pb.E_NoDefaultString, setString, nil},
{pb.E_NoDefaultBytes, setBytes, nil},
{pb.E_NoDefaultEnum, setEnum, nil},
{pb.E_DefaultDouble, setFloat64, float64(3.1415)},
{pb.E_DefaultFloat, setFloat32, float32(3.14)},
{pb.E_DefaultInt32, setInt32, int32(42)},
{pb.E_DefaultInt64, setInt64, int64(43)},
{pb.E_DefaultUint32, setUint32, uint32(44)},
{pb.E_DefaultUint64, setUint64, uint64(45)},
{pb.E_DefaultSint32, setInt32, int32(46)},
{pb.E_DefaultSint64, setInt64, int64(47)},
{pb.E_DefaultFixed32, setUint32, uint32(48)},
{pb.E_DefaultFixed64, setUint64, uint64(49)},
{pb.E_DefaultSfixed32, setInt32, int32(50)},
{pb.E_DefaultSfixed64, setInt64, int64(51)},
{pb.E_DefaultBool, setBool, true},
{pb.E_DefaultBool, setBool2, true},
{pb.E_DefaultString, setString, "Hello, string"},
{pb.E_DefaultBytes, setBytes, []byte("Hello, bytes")},
{pb.E_DefaultEnum, setEnum, pb.DefaultsMessage_ONE},
}
checkVal := func(test testcase, msg *pb.DefaultsMessage, valWant interface{}) error {
val, err := proto.GetExtension(msg, test.ext)
if err != nil {
if valWant != nil {
return fmt.Errorf("GetExtension(): %s", err)
}
if want := proto.ErrMissingExtension; err != want {
return fmt.Errorf("Unexpected error: got %v, want %v", err, want)
}
return nil
}
// All proto2 extension values are either a pointer to a value or a slice of values.
ty := reflect.TypeOf(val)
tyWant := reflect.TypeOf(test.ext.ExtensionType)
if got, want := ty, tyWant; got != want {
return fmt.Errorf("unexpected reflect.TypeOf(): got %v want %v", got, want)
}
tye := ty.Elem()
tyeWant := tyWant.Elem()
if got, want := tye, tyeWant; got != want {
return fmt.Errorf("unexpected reflect.TypeOf().Elem(): got %v want %v", got, want)
}
// Check the name of the type of the value.
// If it is an enum it will be type int32 with the name of the enum.
if got, want := tye.Name(), tye.Name(); got != want {
return fmt.Errorf("unexpected reflect.TypeOf().Elem().Name(): got %v want %v", got, want)
}
// Check that value is what we expect.
// If we have a pointer in val, get the value it points to.
valExp := val
if ty.Kind() == reflect.Ptr {
valExp = reflect.ValueOf(val).Elem().Interface()
}
if got, want := valExp, valWant; !reflect.DeepEqual(got, want) {
return fmt.Errorf("unexpected reflect.DeepEqual(): got %v want %v", got, want)
}
return nil
}
setTo := func(test testcase) interface{} {
setTo := reflect.ValueOf(test.want)
if typ := reflect.TypeOf(test.ext.ExtensionType); typ.Kind() == reflect.Ptr {
setTo = reflect.New(typ).Elem()
setTo.Set(reflect.New(setTo.Type().Elem()))
setTo.Elem().Set(reflect.ValueOf(test.want))
}
return setTo.Interface()
}
for _, test := range tests {
msg := &pb.DefaultsMessage{}
name := test.ext.Name
// Check the initial value.
if err := checkVal(test, msg, test.def); err != nil {
t.Errorf("%s: %v", name, err)
}
// Set the per-type value and check value.
name = fmt.Sprintf("%s (set to %T %v)", name, test.want, test.want)
if err := proto.SetExtension(msg, test.ext, setTo(test)); err != nil {
t.Errorf("%s: SetExtension(): %v", name, err)
continue
}
if err := checkVal(test, msg, test.want); err != nil {
t.Errorf("%s: %v", name, err)
continue
}
// Set and check the value.
name += " (cleared)"
proto.ClearExtension(msg, test.ext)
if err := checkVal(test, msg, test.def); err != nil {
t.Errorf("%s: %v", name, err)
}
}
}
func TestExtensionsRoundTrip(t *testing.T) { func TestExtensionsRoundTrip(t *testing.T) {
msg := &pb.MyMessage{} msg := &pb.MyMessage{}
ext1 := &pb.Ext{ ext1 := &pb.Ext{

View file

@ -668,114 +668,120 @@ func buildDefaultMessage(t reflect.Type) (dm defaultMessage) {
} }
ft := t.Field(fi).Type ft := t.Field(fi).Type
var canHaveDefault, nestedMessage bool sf, nested, err := fieldDefault(ft, prop)
switch ft.Kind() { switch {
case reflect.Ptr: case err != nil:
if ft.Elem().Kind() == reflect.Struct { log.Print(err)
nestedMessage = true case nested:
} else { dm.nested = append(dm.nested, fi)
canHaveDefault = true // proto2 scalar field case sf != nil:
} sf.index = fi
dm.scalars = append(dm.scalars, *sf)
case reflect.Slice:
switch ft.Elem().Kind() {
case reflect.Ptr:
nestedMessage = true // repeated message
case reflect.Uint8:
canHaveDefault = true // bytes field
}
case reflect.Map:
if ft.Elem().Kind() == reflect.Ptr {
nestedMessage = true // map with message values
}
} }
if !canHaveDefault {
if nestedMessage {
dm.nested = append(dm.nested, fi)
}
continue
}
sf := scalarField{
index: fi,
kind: ft.Elem().Kind(),
}
// scalar fields without defaults
if !prop.HasDefault {
dm.scalars = append(dm.scalars, sf)
continue
}
// a scalar field: either *T or []byte
switch ft.Elem().Kind() {
case reflect.Bool:
x, err := strconv.ParseBool(prop.Default)
if err != nil {
log.Printf("proto: bad default bool %q: %v", prop.Default, err)
continue
}
sf.value = x
case reflect.Float32:
x, err := strconv.ParseFloat(prop.Default, 32)
if err != nil {
log.Printf("proto: bad default float32 %q: %v", prop.Default, err)
continue
}
sf.value = float32(x)
case reflect.Float64:
x, err := strconv.ParseFloat(prop.Default, 64)
if err != nil {
log.Printf("proto: bad default float64 %q: %v", prop.Default, err)
continue
}
sf.value = x
case reflect.Int32:
x, err := strconv.ParseInt(prop.Default, 10, 32)
if err != nil {
log.Printf("proto: bad default int32 %q: %v", prop.Default, err)
continue
}
sf.value = int32(x)
case reflect.Int64:
x, err := strconv.ParseInt(prop.Default, 10, 64)
if err != nil {
log.Printf("proto: bad default int64 %q: %v", prop.Default, err)
continue
}
sf.value = x
case reflect.String:
sf.value = prop.Default
case reflect.Uint8:
// []byte (not *uint8)
sf.value = []byte(prop.Default)
case reflect.Uint32:
x, err := strconv.ParseUint(prop.Default, 10, 32)
if err != nil {
log.Printf("proto: bad default uint32 %q: %v", prop.Default, err)
continue
}
sf.value = uint32(x)
case reflect.Uint64:
x, err := strconv.ParseUint(prop.Default, 10, 64)
if err != nil {
log.Printf("proto: bad default uint64 %q: %v", prop.Default, err)
continue
}
sf.value = x
default:
log.Printf("proto: unhandled def kind %v", ft.Elem().Kind())
continue
}
dm.scalars = append(dm.scalars, sf)
} }
return dm return dm
} }
// fieldDefault returns the scalarField for field type ft.
// sf will be nil if the field can not have a default.
// nestedMessage will be true if this is a nested message.
// Note that sf.index is not set on return.
func fieldDefault(ft reflect.Type, prop *Properties) (sf *scalarField, nestedMessage bool, err error) {
var canHaveDefault bool
switch ft.Kind() {
case reflect.Ptr:
if ft.Elem().Kind() == reflect.Struct {
nestedMessage = true
} else {
canHaveDefault = true // proto2 scalar field
}
case reflect.Slice:
switch ft.Elem().Kind() {
case reflect.Ptr:
nestedMessage = true // repeated message
case reflect.Uint8:
canHaveDefault = true // bytes field
}
case reflect.Map:
if ft.Elem().Kind() == reflect.Ptr {
nestedMessage = true // map with message values
}
}
if !canHaveDefault {
if nestedMessage {
return nil, true, nil
}
return nil, false, nil
}
// We now know that ft is a pointer or slice.
sf = &scalarField{kind: ft.Elem().Kind()}
// scalar fields without defaults
if !prop.HasDefault {
return sf, false, nil
}
// a scalar field: either *T or []byte
switch ft.Elem().Kind() {
case reflect.Bool:
x, err := strconv.ParseBool(prop.Default)
if err != nil {
return nil, false, fmt.Errorf("proto: bad default bool %q: %v", prop.Default, err)
}
sf.value = x
case reflect.Float32:
x, err := strconv.ParseFloat(prop.Default, 32)
if err != nil {
return nil, false, fmt.Errorf("proto: bad default float32 %q: %v", prop.Default, err)
}
sf.value = float32(x)
case reflect.Float64:
x, err := strconv.ParseFloat(prop.Default, 64)
if err != nil {
return nil, false, fmt.Errorf("proto: bad default float64 %q: %v", prop.Default, err)
}
sf.value = x
case reflect.Int32:
x, err := strconv.ParseInt(prop.Default, 10, 32)
if err != nil {
return nil, false, fmt.Errorf("proto: bad default int32 %q: %v", prop.Default, err)
}
sf.value = int32(x)
case reflect.Int64:
x, err := strconv.ParseInt(prop.Default, 10, 64)
if err != nil {
return nil, false, fmt.Errorf("proto: bad default int64 %q: %v", prop.Default, err)
}
sf.value = x
case reflect.String:
sf.value = prop.Default
case reflect.Uint8:
// []byte (not *uint8)
sf.value = []byte(prop.Default)
case reflect.Uint32:
x, err := strconv.ParseUint(prop.Default, 10, 32)
if err != nil {
return nil, false, fmt.Errorf("proto: bad default uint32 %q: %v", prop.Default, err)
}
sf.value = uint32(x)
case reflect.Uint64:
x, err := strconv.ParseUint(prop.Default, 10, 64)
if err != nil {
return nil, false, fmt.Errorf("proto: bad default uint64 %q: %v", prop.Default, err)
}
sf.value = x
default:
return nil, false, fmt.Errorf("proto: unhandled def kind %v", ft.Elem().Kind())
}
return sf, false, nil
}
// Map fields may have key types of non-float scalars, strings and enums. // Map fields may have key types of non-float scalars, strings and enums.
// The easiest way to sort them in some deterministic order is to use fmt. // The easiest way to sort them in some deterministic order is to use fmt.
// If this turns out to be inefficient we can always consider other options, // If this turns out to be inefficient we can always consider other options,
@ -788,3 +794,20 @@ func (s mapKeys) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
func (s mapKeys) Less(i, j int) bool { func (s mapKeys) Less(i, j int) bool {
return fmt.Sprint(s[i].Interface()) < fmt.Sprint(s[j].Interface()) return fmt.Sprint(s[i].Interface()) < fmt.Sprint(s[j].Interface())
} }
// isProto3Zero reports whether v is a zero proto3 value.
func isProto3Zero(v reflect.Value) bool {
switch v.Kind() {
case reflect.Bool:
return !v.Bool()
case reflect.Int32, reflect.Int64:
return v.Int() == 0
case reflect.Uint32, reflect.Uint64:
return v.Uint() == 0
case reflect.Float32, reflect.Float64:
return v.Float() == 0
case reflect.String:
return v.String() == ""
}
return false
}

View file

@ -144,8 +144,8 @@ func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension {
return structPointer_ifield(p, f).(*map[int32]Extension) return structPointer_ifield(p, f).(*map[int32]Extension)
} }
// Map returns the reflect.Value for the address of a map field in the struct. // NewAt returns the reflect.Value for a pointer to a field in the struct.
func structPointer_Map(p structPointer, f field, typ reflect.Type) reflect.Value { func structPointer_NewAt(p structPointer, f field, typ reflect.Type) reflect.Value {
return structPointer_field(p, f).Addr() return structPointer_field(p, f).Addr()
} }

View file

@ -130,8 +130,8 @@ func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension {
return (*map[int32]Extension)(unsafe.Pointer(uintptr(p) + uintptr(f))) return (*map[int32]Extension)(unsafe.Pointer(uintptr(p) + uintptr(f)))
} }
// Map returns the reflect.Value for the address of a map field in the struct. // NewAt returns the reflect.Value for a pointer to a field in the struct.
func structPointer_Map(p structPointer, f field, typ reflect.Type) reflect.Value { func structPointer_NewAt(p structPointer, f field, typ reflect.Type) reflect.Value {
return reflect.NewAt(typ, unsafe.Pointer(uintptr(p)+uintptr(f))) return reflect.NewAt(typ, unsafe.Pointer(uintptr(p)+uintptr(f)))
} }

View file

@ -22,6 +22,7 @@ It has these top-level messages:
OtherMessage OtherMessage
MyMessage MyMessage
Ext Ext
DefaultsMessage
MyMessageSet MyMessageSet
Empty Empty
MessageList MessageList
@ -181,6 +182,42 @@ func (x *MyMessage_Color) UnmarshalJSON(data []byte) error {
return nil return nil
} }
type DefaultsMessage_DefaultsEnum int32
const (
DefaultsMessage_ZERO DefaultsMessage_DefaultsEnum = 0
DefaultsMessage_ONE DefaultsMessage_DefaultsEnum = 1
DefaultsMessage_TWO DefaultsMessage_DefaultsEnum = 2
)
var DefaultsMessage_DefaultsEnum_name = map[int32]string{
0: "ZERO",
1: "ONE",
2: "TWO",
}
var DefaultsMessage_DefaultsEnum_value = map[string]int32{
"ZERO": 0,
"ONE": 1,
"TWO": 2,
}
func (x DefaultsMessage_DefaultsEnum) Enum() *DefaultsMessage_DefaultsEnum {
p := new(DefaultsMessage_DefaultsEnum)
*p = x
return p
}
func (x DefaultsMessage_DefaultsEnum) String() string {
return proto.EnumName(DefaultsMessage_DefaultsEnum_name, int32(x))
}
func (x *DefaultsMessage_DefaultsEnum) UnmarshalJSON(data []byte) error {
value, err := proto.UnmarshalJSONEnum(DefaultsMessage_DefaultsEnum_value, data, "DefaultsMessage_DefaultsEnum")
if err != nil {
return err
}
*x = DefaultsMessage_DefaultsEnum(value)
return nil
}
type Defaults_Color int32 type Defaults_Color int32
const ( const (
@ -1402,6 +1439,29 @@ var E_Ext_Number = &proto.ExtensionDesc{
Tag: "varint,105,opt,name=number", Tag: "varint,105,opt,name=number",
} }
type DefaultsMessage struct {
XXX_extensions map[int32]proto.Extension `json:"-"`
XXX_unrecognized []byte `json:"-"`
}
func (m *DefaultsMessage) Reset() { *m = DefaultsMessage{} }
func (m *DefaultsMessage) String() string { return proto.CompactTextString(m) }
func (*DefaultsMessage) ProtoMessage() {}
var extRange_DefaultsMessage = []proto.ExtensionRange{
{100, 536870911},
}
func (*DefaultsMessage) ExtensionRangeArray() []proto.ExtensionRange {
return extRange_DefaultsMessage
}
func (m *DefaultsMessage) ExtensionMap() map[int32]proto.Extension {
if m.XXX_extensions == nil {
m.XXX_extensions = make(map[int32]proto.Extension)
}
return m.XXX_extensions
}
type MyMessageSet struct { type MyMessageSet struct {
XXX_extensions map[int32]proto.Extension `json:"-"` XXX_extensions map[int32]proto.Extension `json:"-"`
XXX_unrecognized []byte `json:"-"` XXX_unrecognized []byte `json:"-"`
@ -1934,6 +1994,262 @@ var E_Greeting = &proto.ExtensionDesc{
Tag: "bytes,106,rep,name=greeting", Tag: "bytes,106,rep,name=greeting",
} }
var E_NoDefaultDouble = &proto.ExtensionDesc{
ExtendedType: (*DefaultsMessage)(nil),
ExtensionType: (*float64)(nil),
Field: 101,
Name: "testdata.no_default_double",
Tag: "fixed64,101,opt,name=no_default_double",
}
var E_NoDefaultFloat = &proto.ExtensionDesc{
ExtendedType: (*DefaultsMessage)(nil),
ExtensionType: (*float32)(nil),
Field: 102,
Name: "testdata.no_default_float",
Tag: "fixed32,102,opt,name=no_default_float",
}
var E_NoDefaultInt32 = &proto.ExtensionDesc{
ExtendedType: (*DefaultsMessage)(nil),
ExtensionType: (*int32)(nil),
Field: 103,
Name: "testdata.no_default_int32",
Tag: "varint,103,opt,name=no_default_int32",
}
var E_NoDefaultInt64 = &proto.ExtensionDesc{
ExtendedType: (*DefaultsMessage)(nil),
ExtensionType: (*int64)(nil),
Field: 104,
Name: "testdata.no_default_int64",
Tag: "varint,104,opt,name=no_default_int64",
}
var E_NoDefaultUint32 = &proto.ExtensionDesc{
ExtendedType: (*DefaultsMessage)(nil),
ExtensionType: (*uint32)(nil),
Field: 105,
Name: "testdata.no_default_uint32",
Tag: "varint,105,opt,name=no_default_uint32",
}
var E_NoDefaultUint64 = &proto.ExtensionDesc{
ExtendedType: (*DefaultsMessage)(nil),
ExtensionType: (*uint64)(nil),
Field: 106,
Name: "testdata.no_default_uint64",
Tag: "varint,106,opt,name=no_default_uint64",
}
var E_NoDefaultSint32 = &proto.ExtensionDesc{
ExtendedType: (*DefaultsMessage)(nil),
ExtensionType: (*int32)(nil),
Field: 107,
Name: "testdata.no_default_sint32",
Tag: "zigzag32,107,opt,name=no_default_sint32",
}
var E_NoDefaultSint64 = &proto.ExtensionDesc{
ExtendedType: (*DefaultsMessage)(nil),
ExtensionType: (*int64)(nil),
Field: 108,
Name: "testdata.no_default_sint64",
Tag: "zigzag64,108,opt,name=no_default_sint64",
}
var E_NoDefaultFixed32 = &proto.ExtensionDesc{
ExtendedType: (*DefaultsMessage)(nil),
ExtensionType: (*uint32)(nil),
Field: 109,
Name: "testdata.no_default_fixed32",
Tag: "fixed32,109,opt,name=no_default_fixed32",
}
var E_NoDefaultFixed64 = &proto.ExtensionDesc{
ExtendedType: (*DefaultsMessage)(nil),
ExtensionType: (*uint64)(nil),
Field: 110,
Name: "testdata.no_default_fixed64",
Tag: "fixed64,110,opt,name=no_default_fixed64",
}
var E_NoDefaultSfixed32 = &proto.ExtensionDesc{
ExtendedType: (*DefaultsMessage)(nil),
ExtensionType: (*int32)(nil),
Field: 111,
Name: "testdata.no_default_sfixed32",
Tag: "fixed32,111,opt,name=no_default_sfixed32",
}
var E_NoDefaultSfixed64 = &proto.ExtensionDesc{
ExtendedType: (*DefaultsMessage)(nil),
ExtensionType: (*int64)(nil),
Field: 112,
Name: "testdata.no_default_sfixed64",
Tag: "fixed64,112,opt,name=no_default_sfixed64",
}
var E_NoDefaultBool = &proto.ExtensionDesc{
ExtendedType: (*DefaultsMessage)(nil),
ExtensionType: (*bool)(nil),
Field: 113,
Name: "testdata.no_default_bool",
Tag: "varint,113,opt,name=no_default_bool",
}
var E_NoDefaultString = &proto.ExtensionDesc{
ExtendedType: (*DefaultsMessage)(nil),
ExtensionType: (*string)(nil),
Field: 114,
Name: "testdata.no_default_string",
Tag: "bytes,114,opt,name=no_default_string",
}
var E_NoDefaultBytes = &proto.ExtensionDesc{
ExtendedType: (*DefaultsMessage)(nil),
ExtensionType: ([]byte)(nil),
Field: 115,
Name: "testdata.no_default_bytes",
Tag: "bytes,115,opt,name=no_default_bytes",
}
var E_NoDefaultEnum = &proto.ExtensionDesc{
ExtendedType: (*DefaultsMessage)(nil),
ExtensionType: (*DefaultsMessage_DefaultsEnum)(nil),
Field: 116,
Name: "testdata.no_default_enum",
Tag: "varint,116,opt,name=no_default_enum,enum=testdata.DefaultsMessage_DefaultsEnum",
}
var E_DefaultDouble = &proto.ExtensionDesc{
ExtendedType: (*DefaultsMessage)(nil),
ExtensionType: (*float64)(nil),
Field: 201,
Name: "testdata.default_double",
Tag: "fixed64,201,opt,name=default_double,def=3.1415",
}
var E_DefaultFloat = &proto.ExtensionDesc{
ExtendedType: (*DefaultsMessage)(nil),
ExtensionType: (*float32)(nil),
Field: 202,
Name: "testdata.default_float",
Tag: "fixed32,202,opt,name=default_float,def=3.14",
}
var E_DefaultInt32 = &proto.ExtensionDesc{
ExtendedType: (*DefaultsMessage)(nil),
ExtensionType: (*int32)(nil),
Field: 203,
Name: "testdata.default_int32",
Tag: "varint,203,opt,name=default_int32,def=42",
}
var E_DefaultInt64 = &proto.ExtensionDesc{
ExtendedType: (*DefaultsMessage)(nil),
ExtensionType: (*int64)(nil),
Field: 204,
Name: "testdata.default_int64",
Tag: "varint,204,opt,name=default_int64,def=43",
}
var E_DefaultUint32 = &proto.ExtensionDesc{
ExtendedType: (*DefaultsMessage)(nil),
ExtensionType: (*uint32)(nil),
Field: 205,
Name: "testdata.default_uint32",
Tag: "varint,205,opt,name=default_uint32,def=44",
}
var E_DefaultUint64 = &proto.ExtensionDesc{
ExtendedType: (*DefaultsMessage)(nil),
ExtensionType: (*uint64)(nil),
Field: 206,
Name: "testdata.default_uint64",
Tag: "varint,206,opt,name=default_uint64,def=45",
}
var E_DefaultSint32 = &proto.ExtensionDesc{
ExtendedType: (*DefaultsMessage)(nil),
ExtensionType: (*int32)(nil),
Field: 207,
Name: "testdata.default_sint32",
Tag: "zigzag32,207,opt,name=default_sint32,def=46",
}
var E_DefaultSint64 = &proto.ExtensionDesc{
ExtendedType: (*DefaultsMessage)(nil),
ExtensionType: (*int64)(nil),
Field: 208,
Name: "testdata.default_sint64",
Tag: "zigzag64,208,opt,name=default_sint64,def=47",
}
var E_DefaultFixed32 = &proto.ExtensionDesc{
ExtendedType: (*DefaultsMessage)(nil),
ExtensionType: (*uint32)(nil),
Field: 209,
Name: "testdata.default_fixed32",
Tag: "fixed32,209,opt,name=default_fixed32,def=48",
}
var E_DefaultFixed64 = &proto.ExtensionDesc{
ExtendedType: (*DefaultsMessage)(nil),
ExtensionType: (*uint64)(nil),
Field: 210,
Name: "testdata.default_fixed64",
Tag: "fixed64,210,opt,name=default_fixed64,def=49",
}
var E_DefaultSfixed32 = &proto.ExtensionDesc{
ExtendedType: (*DefaultsMessage)(nil),
ExtensionType: (*int32)(nil),
Field: 211,
Name: "testdata.default_sfixed32",
Tag: "fixed32,211,opt,name=default_sfixed32,def=50",
}
var E_DefaultSfixed64 = &proto.ExtensionDesc{
ExtendedType: (*DefaultsMessage)(nil),
ExtensionType: (*int64)(nil),
Field: 212,
Name: "testdata.default_sfixed64",
Tag: "fixed64,212,opt,name=default_sfixed64,def=51",
}
var E_DefaultBool = &proto.ExtensionDesc{
ExtendedType: (*DefaultsMessage)(nil),
ExtensionType: (*bool)(nil),
Field: 213,
Name: "testdata.default_bool",
Tag: "varint,213,opt,name=default_bool,def=1",
}
var E_DefaultString = &proto.ExtensionDesc{
ExtendedType: (*DefaultsMessage)(nil),
ExtensionType: (*string)(nil),
Field: 214,
Name: "testdata.default_string",
Tag: "bytes,214,opt,name=default_string,def=Hello, string",
}
var E_DefaultBytes = &proto.ExtensionDesc{
ExtendedType: (*DefaultsMessage)(nil),
ExtensionType: ([]byte)(nil),
Field: 215,
Name: "testdata.default_bytes",
Tag: "bytes,215,opt,name=default_bytes,def=Hello, bytes",
}
var E_DefaultEnum = &proto.ExtensionDesc{
ExtendedType: (*DefaultsMessage)(nil),
ExtensionType: (*DefaultsMessage_DefaultsEnum)(nil),
Field: 216,
Name: "testdata.default_enum",
Tag: "varint,216,opt,name=default_enum,enum=testdata.DefaultsMessage_DefaultsEnum,def=1",
}
var E_X201 = &proto.ExtensionDesc{ var E_X201 = &proto.ExtensionDesc{
ExtendedType: (*MyMessageSet)(nil), ExtendedType: (*MyMessageSet)(nil),
ExtensionType: (*Empty)(nil), ExtensionType: (*Empty)(nil),
@ -2338,12 +2654,45 @@ func init() {
proto.RegisterEnum("testdata.FOO", FOO_name, FOO_value) proto.RegisterEnum("testdata.FOO", FOO_name, FOO_value)
proto.RegisterEnum("testdata.GoTest_KIND", GoTest_KIND_name, GoTest_KIND_value) proto.RegisterEnum("testdata.GoTest_KIND", GoTest_KIND_name, GoTest_KIND_value)
proto.RegisterEnum("testdata.MyMessage_Color", MyMessage_Color_name, MyMessage_Color_value) proto.RegisterEnum("testdata.MyMessage_Color", MyMessage_Color_name, MyMessage_Color_value)
proto.RegisterEnum("testdata.DefaultsMessage_DefaultsEnum", DefaultsMessage_DefaultsEnum_name, DefaultsMessage_DefaultsEnum_value)
proto.RegisterEnum("testdata.Defaults_Color", Defaults_Color_name, Defaults_Color_value) proto.RegisterEnum("testdata.Defaults_Color", Defaults_Color_name, Defaults_Color_value)
proto.RegisterEnum("testdata.RepeatedEnum_Color", RepeatedEnum_Color_name, RepeatedEnum_Color_value) proto.RegisterEnum("testdata.RepeatedEnum_Color", RepeatedEnum_Color_name, RepeatedEnum_Color_value)
proto.RegisterExtension(E_Ext_More) proto.RegisterExtension(E_Ext_More)
proto.RegisterExtension(E_Ext_Text) proto.RegisterExtension(E_Ext_Text)
proto.RegisterExtension(E_Ext_Number) proto.RegisterExtension(E_Ext_Number)
proto.RegisterExtension(E_Greeting) proto.RegisterExtension(E_Greeting)
proto.RegisterExtension(E_NoDefaultDouble)
proto.RegisterExtension(E_NoDefaultFloat)
proto.RegisterExtension(E_NoDefaultInt32)
proto.RegisterExtension(E_NoDefaultInt64)
proto.RegisterExtension(E_NoDefaultUint32)
proto.RegisterExtension(E_NoDefaultUint64)
proto.RegisterExtension(E_NoDefaultSint32)
proto.RegisterExtension(E_NoDefaultSint64)
proto.RegisterExtension(E_NoDefaultFixed32)
proto.RegisterExtension(E_NoDefaultFixed64)
proto.RegisterExtension(E_NoDefaultSfixed32)
proto.RegisterExtension(E_NoDefaultSfixed64)
proto.RegisterExtension(E_NoDefaultBool)
proto.RegisterExtension(E_NoDefaultString)
proto.RegisterExtension(E_NoDefaultBytes)
proto.RegisterExtension(E_NoDefaultEnum)
proto.RegisterExtension(E_DefaultDouble)
proto.RegisterExtension(E_DefaultFloat)
proto.RegisterExtension(E_DefaultInt32)
proto.RegisterExtension(E_DefaultInt64)
proto.RegisterExtension(E_DefaultUint32)
proto.RegisterExtension(E_DefaultUint64)
proto.RegisterExtension(E_DefaultSint32)
proto.RegisterExtension(E_DefaultSint64)
proto.RegisterExtension(E_DefaultFixed32)
proto.RegisterExtension(E_DefaultFixed64)
proto.RegisterExtension(E_DefaultSfixed32)
proto.RegisterExtension(E_DefaultSfixed64)
proto.RegisterExtension(E_DefaultBool)
proto.RegisterExtension(E_DefaultString)
proto.RegisterExtension(E_DefaultBytes)
proto.RegisterExtension(E_DefaultEnum)
proto.RegisterExtension(E_X201) proto.RegisterExtension(E_X201)
proto.RegisterExtension(E_X202) proto.RegisterExtension(E_X202)
proto.RegisterExtension(E_X203) proto.RegisterExtension(E_X203)

View file

@ -277,6 +277,51 @@ extend MyMessage {
repeated string greeting = 106; repeated string greeting = 106;
} }
message DefaultsMessage {
enum DefaultsEnum {
ZERO = 0;
ONE = 1;
TWO = 2;
};
extensions 100 to max;
}
extend DefaultsMessage {
optional double no_default_double = 101;
optional float no_default_float = 102;
optional int32 no_default_int32 = 103;
optional int64 no_default_int64 = 104;
optional uint32 no_default_uint32 = 105;
optional uint64 no_default_uint64 = 106;
optional sint32 no_default_sint32 = 107;
optional sint64 no_default_sint64 = 108;
optional fixed32 no_default_fixed32 = 109;
optional fixed64 no_default_fixed64 = 110;
optional sfixed32 no_default_sfixed32 = 111;
optional sfixed64 no_default_sfixed64 = 112;
optional bool no_default_bool = 113;
optional string no_default_string = 114;
optional bytes no_default_bytes = 115;
optional DefaultsMessage.DefaultsEnum no_default_enum = 116;
optional double default_double = 201 [default = 3.1415];
optional float default_float = 202 [default = 3.14];
optional int32 default_int32 = 203 [default = 42];
optional int64 default_int64 = 204 [default = 43];
optional uint32 default_uint32 = 205 [default = 44];
optional uint64 default_uint64 = 206 [default = 45];
optional sint32 default_sint32 = 207 [default = 46];
optional sint64 default_sint64 = 208 [default = 47];
optional fixed32 default_fixed32 = 209 [default = 48];
optional fixed64 default_fixed64 = 210 [default = 49];
optional sfixed32 default_sfixed32 = 211 [default = 50];
optional sfixed64 default_sfixed64 = 212 [default = 51];
optional bool default_bool = 213 [default = true];
optional string default_string = 214 [default = "Hello, string"];
optional bytes default_bytes = 215 [default = "Hello, bytes"];
optional DefaultsMessage.DefaultsEnum default_enum = 216 [default = ONE];
}
message MyMessageSet { message MyMessageSet {
option message_set_wire_format = true; option message_set_wire_format = true;
extensions 100 to max; extensions 100 to max;

View file

@ -41,7 +41,6 @@ import (
"io" "io"
"log" "log"
"math" "math"
"os"
"reflect" "reflect"
"sort" "sort"
"strings" "strings"
@ -283,20 +282,23 @@ func writeStruct(w *textWriter, sv reflect.Value) error {
if err := w.WriteByte('\n'); err != nil { if err := w.WriteByte('\n'); err != nil {
return err return err
} }
// value // nil values aren't legal, but we can avoid panicking because of them.
if _, err := w.WriteString("value:"); err != nil { if val.Kind() != reflect.Ptr || !val.IsNil() {
return err // value
} if _, err := w.WriteString("value:"); err != nil {
if !w.compact { return err
if err := w.WriteByte(' '); err != nil { }
if !w.compact {
if err := w.WriteByte(' '); err != nil {
return err
}
}
if err := writeAny(w, val, props.mvalprop); err != nil {
return err
}
if err := w.WriteByte('\n'); err != nil {
return err return err
} }
}
if err := writeAny(w, val, props.mvalprop); err != nil {
return err
}
if err := w.WriteByte('\n'); err != nil {
return err
} }
// close struct // close struct
w.unindent() w.unindent()
@ -315,27 +317,8 @@ func writeStruct(w *textWriter, sv reflect.Value) error {
} }
if fv.Kind() != reflect.Ptr && fv.Kind() != reflect.Slice { if fv.Kind() != reflect.Ptr && fv.Kind() != reflect.Slice {
// proto3 non-repeated scalar field; skip if zero value // proto3 non-repeated scalar field; skip if zero value
switch fv.Kind() { if isProto3Zero(fv) {
case reflect.Bool: continue
if !fv.Bool() {
continue
}
case reflect.Int32, reflect.Int64:
if fv.Int() == 0 {
continue
}
case reflect.Uint32, reflect.Uint64:
if fv.Uint() == 0 {
continue
}
case reflect.Float32, reflect.Float64:
if fv.Float() == 0 {
continue
}
case reflect.String:
if fv.String() == "" {
continue
}
} }
} }
@ -666,10 +649,7 @@ func writeExtensions(w *textWriter, pv reflect.Value) error {
pb, err := GetExtension(ep, desc) pb, err := GetExtension(ep, desc)
if err != nil { if err != nil {
if _, err := fmt.Fprintln(os.Stderr, "proto: failed getting extension: ", err); err != nil { return fmt.Errorf("failed getting extension: %v", err)
return err
}
continue
} }
// Repeated extensions will appear as a slice. // Repeated extensions will appear as a slice.

View file

@ -174,7 +174,7 @@ func (p *textParser) advance() {
} }
unq, err := unquoteC(p.s[1:i], rune(p.s[0])) unq, err := unquoteC(p.s[1:i], rune(p.s[0]))
if err != nil { if err != nil {
p.errorf("invalid quoted string %v", p.s[0:i+1]) p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err)
return return
} }
p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)] p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)]

View file

@ -152,7 +152,7 @@ var unMarshalTextTests = []UnmarshalTextTest{
// Bad quoted string // Bad quoted string
{ {
in: `inner: < host: "\0" >` + "\n", in: `inner: < host: "\0" >` + "\n",
err: `line 1.15: invalid quoted string "\0"`, err: `line 1.15: invalid quoted string "\0": \0 requires 2 following digits`,
}, },
// Number too large for int64 // Number too large for int64

View file

@ -426,6 +426,11 @@ func TestProto3Text(t *testing.T) {
&pb.MessageWithMap{NameMapping: map[int32]string{1234: "Feist"}}, &pb.MessageWithMap{NameMapping: map[int32]string{1234: "Feist"}},
`name_mapping:<key:1234 value:"Feist" >`, `name_mapping:<key:1234 value:"Feist" >`,
}, },
// map with nil value; not well-defined, but we shouldn't crash
{
&pb.MessageWithMap{MsgMapping: map[int64]*pb.FloatingPoint{7: nil}},
`msg_mapping:<key:7 >`,
},
} }
for _, test := range tests { for _, test := range tests {
got := strings.TrimSpace(test.m.String()) got := strings.TrimSpace(test.m.String())

View file

@ -8,5 +8,7 @@
# Please keep the list sorted. # Please keep the list sorted.
Damian Gryski <dgryski@gmail.com>
Google Inc. Google Inc.
Jan Mercl <0xjnml@gmail.com> Jan Mercl <0xjnml@gmail.com>
Sebastien Binet <seb.binet@gmail.com>

View file

@ -26,9 +26,11 @@
# Please keep the list sorted. # Please keep the list sorted.
Damian Gryski <dgryski@gmail.com>
Jan Mercl <0xjnml@gmail.com> Jan Mercl <0xjnml@gmail.com>
Kai Backman <kaib@golang.org> Kai Backman <kaib@golang.org>
Marc-Antoine Ruel <maruel@chromium.org> Marc-Antoine Ruel <maruel@chromium.org>
Nigel Tao <nigeltao@golang.org> Nigel Tao <nigeltao@golang.org>
Rob Pike <r@golang.org> Rob Pike <r@golang.org>
Russ Cox <rsc@golang.org> Russ Cox <rsc@golang.org>
Sebastien Binet <seb.binet@gmail.com>

View file

@ -0,0 +1,7 @@
The Snappy compression format in the Go programming language.
To download and install from source:
$ go get github.com/golang/snappy
Unless otherwise noted, the Snappy-Go source files are distributed
under the BSD-style license found in the LICENSE file.

View file

@ -27,7 +27,7 @@ func DecodedLen(src []byte) (int, error) {
// that the length header occupied. // that the length header occupied.
func decodedLen(src []byte) (blockLen, headerLen int, err error) { func decodedLen(src []byte) (blockLen, headerLen int, err error) {
v, n := binary.Uvarint(src) v, n := binary.Uvarint(src)
if n == 0 { if n <= 0 {
return 0, 0, ErrCorrupt return 0, 0, ErrCorrupt
} }
if uint64(int(v)) != v { if uint64(int(v)) != v {
@ -56,7 +56,7 @@ func Decode(dst, src []byte) ([]byte, error) {
x := uint(src[s] >> 2) x := uint(src[s] >> 2)
switch { switch {
case x < 60: case x < 60:
s += 1 s++
case x == 60: case x == 60:
s += 2 s += 2
if s > len(src) { if s > len(src) {
@ -130,7 +130,7 @@ func Decode(dst, src []byte) ([]byte, error) {
// NewReader returns a new Reader that decompresses from r, using the framing // NewReader returns a new Reader that decompresses from r, using the framing
// format described at // format described at
// https://code.google.com/p/snappy/source/browse/trunk/framing_format.txt // https://github.com/google/snappy/blob/master/framing_format.txt
func NewReader(r io.Reader) *Reader { func NewReader(r io.Reader) *Reader {
return &Reader{ return &Reader{
r: r, r: r,
@ -200,7 +200,7 @@ func (r *Reader) Read(p []byte) (int, error) {
} }
// The chunk types are specified at // The chunk types are specified at
// https://code.google.com/p/snappy/source/browse/trunk/framing_format.txt // https://github.com/google/snappy/blob/master/framing_format.txt
switch chunkType { switch chunkType {
case chunkTypeCompressedData: case chunkTypeCompressedData:
// Section 4.2. Compressed data (chunk type 0x00). // Section 4.2. Compressed data (chunk type 0x00).
@ -280,13 +280,11 @@ func (r *Reader) Read(p []byte) (int, error) {
// Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f). // Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f).
r.err = ErrUnsupported r.err = ErrUnsupported
return 0, r.err return 0, r.err
}
} else { // Section 4.4 Padding (chunk type 0xfe).
// Section 4.4 Padding (chunk type 0xfe). // Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd).
// Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd). if !r.readFull(r.buf[:chunkLen]) {
if !r.readFull(r.buf[:chunkLen]) { return 0, r.err
return 0, r.err
}
} }
} }
} }

View file

@ -79,7 +79,7 @@ func emitCopy(dst []byte, offset, length int) int {
// slice of dst if dst was large enough to hold the entire encoded block. // slice of dst if dst was large enough to hold the entire encoded block.
// Otherwise, a newly allocated slice will be returned. // Otherwise, a newly allocated slice will be returned.
// It is valid to pass a nil dst. // It is valid to pass a nil dst.
func Encode(dst, src []byte) ([]byte, error) { func Encode(dst, src []byte) []byte {
if n := MaxEncodedLen(len(src)); len(dst) < n { if n := MaxEncodedLen(len(src)); len(dst) < n {
dst = make([]byte, n) dst = make([]byte, n)
} }
@ -92,7 +92,7 @@ func Encode(dst, src []byte) ([]byte, error) {
if len(src) != 0 { if len(src) != 0 {
d += emitLiteral(dst[d:], src) d += emitLiteral(dst[d:], src)
} }
return dst[:d], nil return dst[:d]
} }
// Initialize the hash table. Its size ranges from 1<<8 to 1<<14 inclusive. // Initialize the hash table. Its size ranges from 1<<8 to 1<<14 inclusive.
@ -145,7 +145,7 @@ func Encode(dst, src []byte) ([]byte, error) {
if lit != len(src) { if lit != len(src) {
d += emitLiteral(dst[d:], src[lit:]) d += emitLiteral(dst[d:], src[lit:])
} }
return dst[:d], nil return dst[:d]
} }
// MaxEncodedLen returns the maximum length of a snappy block, given its // MaxEncodedLen returns the maximum length of a snappy block, given its
@ -176,7 +176,7 @@ func MaxEncodedLen(srcLen int) int {
// NewWriter returns a new Writer that compresses to w, using the framing // NewWriter returns a new Writer that compresses to w, using the framing
// format described at // format described at
// https://code.google.com/p/snappy/source/browse/trunk/framing_format.txt // https://github.com/google/snappy/blob/master/framing_format.txt
func NewWriter(w io.Writer) *Writer { func NewWriter(w io.Writer) *Writer {
return &Writer{ return &Writer{
w: w, w: w,
@ -226,11 +226,7 @@ func (w *Writer) Write(p []byte) (n int, errRet error) {
// Compress the buffer, discarding the result if the improvement // Compress the buffer, discarding the result if the improvement
// isn't at least 12.5%. // isn't at least 12.5%.
chunkType := uint8(chunkTypeCompressedData) chunkType := uint8(chunkTypeCompressedData)
chunkBody, err := Encode(w.enc, uncompressed) chunkBody := Encode(w.enc, uncompressed)
if err != nil {
w.err = err
return n, err
}
if len(chunkBody) >= len(uncompressed)-len(uncompressed)/8 { if len(chunkBody) >= len(uncompressed)-len(uncompressed)/8 {
chunkType, chunkBody = chunkTypeUncompressedData, uncompressed chunkType, chunkBody = chunkTypeUncompressedData, uncompressed
} }
@ -244,11 +240,11 @@ func (w *Writer) Write(p []byte) (n int, errRet error) {
w.buf[5] = uint8(checksum >> 8) w.buf[5] = uint8(checksum >> 8)
w.buf[6] = uint8(checksum >> 16) w.buf[6] = uint8(checksum >> 16)
w.buf[7] = uint8(checksum >> 24) w.buf[7] = uint8(checksum >> 24)
if _, err = w.w.Write(w.buf[:]); err != nil { if _, err := w.w.Write(w.buf[:]); err != nil {
w.err = err w.err = err
return n, err return n, err
} }
if _, err = w.w.Write(chunkBody); err != nil { if _, err := w.w.Write(chunkBody); err != nil {
w.err = err w.err = err
return n, err return n, err
} }

View file

@ -5,7 +5,7 @@
// Package snappy implements the snappy block-based compression format. // Package snappy implements the snappy block-based compression format.
// It aims for very high speeds and reasonable compression. // It aims for very high speeds and reasonable compression.
// //
// The C++ snappy implementation is at http://code.google.com/p/snappy/ // The C++ snappy implementation is at https://github.com/google/snappy
package snappy package snappy
import ( import (
@ -46,7 +46,7 @@ const (
chunkHeaderSize = 4 chunkHeaderSize = 4
magicChunk = "\xff\x06\x00\x00" + magicBody magicChunk = "\xff\x06\x00\x00" + magicBody
magicBody = "sNaPpY" magicBody = "sNaPpY"
// https://code.google.com/p/snappy/source/browse/trunk/framing_format.txt says // https://github.com/google/snappy/blob/master/framing_format.txt says
// that "the uncompressed data in a chunk must be no longer than 65536 bytes". // that "the uncompressed data in a chunk must be no longer than 65536 bytes".
maxUncompressedChunkLen = 65536 maxUncompressedChunkLen = 65536
) )
@ -61,7 +61,7 @@ const (
var crcTable = crc32.MakeTable(crc32.Castagnoli) var crcTable = crc32.MakeTable(crc32.Castagnoli)
// crc implements the checksum specified in section 3 of // crc implements the checksum specified in section 3 of
// https://code.google.com/p/snappy/source/browse/trunk/framing_format.txt // https://github.com/google/snappy/blob/master/framing_format.txt
func crc(b []byte) uint32 { func crc(b []byte) uint32 {
c := crc32.Update(0, crcTable, b) c := crc32.Update(0, crcTable, b)
return uint32(c>>15|c<<17) + 0xa282ead8 return uint32(c>>15|c<<17) + 0xa282ead8

View file

@ -24,11 +24,7 @@ var (
) )
func roundtrip(b, ebuf, dbuf []byte) error { func roundtrip(b, ebuf, dbuf []byte) error {
e, err := Encode(ebuf, b) d, err := Decode(dbuf, Encode(ebuf, b))
if err != nil {
return fmt.Errorf("encoding error: %v", err)
}
d, err := Decode(dbuf, e)
if err != nil { if err != nil {
return fmt.Errorf("decoding error: %v", err) return fmt.Errorf("decoding error: %v", err)
} }
@ -82,6 +78,16 @@ func TestSmallRegular(t *testing.T) {
} }
} }
func TestInvalidVarint(t *testing.T) {
data := []byte("\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\x00")
if _, err := DecodedLen(data); err != ErrCorrupt {
t.Errorf("DecodedLen: got %v, want ErrCorrupt", err)
}
if _, err := Decode(nil, data); err != ErrCorrupt {
t.Errorf("Decode: got %v, want ErrCorrupt", err)
}
}
func cmp(a, b []byte) error { func cmp(a, b []byte) error {
if len(a) != len(b) { if len(a) != len(b) {
return fmt.Errorf("got %d bytes, want %d", len(a), len(b)) return fmt.Errorf("got %d bytes, want %d", len(a), len(b))
@ -197,10 +203,7 @@ func TestWriterReset(t *testing.T) {
} }
func benchDecode(b *testing.B, src []byte) { func benchDecode(b *testing.B, src []byte) {
encoded, err := Encode(nil, src) encoded := Encode(nil, src)
if err != nil {
b.Fatal(err)
}
// Bandwidth is in amount of uncompressed data. // Bandwidth is in amount of uncompressed data.
b.SetBytes(int64(len(src))) b.SetBytes(int64(len(src)))
b.ResetTimer() b.ResetTimer()
@ -222,7 +225,7 @@ func benchEncode(b *testing.B, src []byte) {
func readFile(b testing.TB, filename string) []byte { func readFile(b testing.TB, filename string) []byte {
src, err := ioutil.ReadFile(filename) src, err := ioutil.ReadFile(filename)
if err != nil { if err != nil {
b.Fatalf("failed reading %s: %s", filename, err) b.Skipf("skipping benchmark: %v", err)
} }
if len(src) == 0 { if len(src) == 0 {
b.Fatalf("%s has zero length", filename) b.Fatalf("%s has zero length", filename)
@ -284,14 +287,14 @@ var testFiles = []struct {
// The test data files are present at this canonical URL. // The test data files are present at this canonical URL.
const baseURL = "https://raw.githubusercontent.com/google/snappy/master/testdata/" const baseURL = "https://raw.githubusercontent.com/google/snappy/master/testdata/"
func downloadTestdata(basename string) (errRet error) { func downloadTestdata(b *testing.B, basename string) (errRet error) {
filename := filepath.Join(*testdata, basename) filename := filepath.Join(*testdata, basename)
if stat, err := os.Stat(filename); err == nil && stat.Size() != 0 { if stat, err := os.Stat(filename); err == nil && stat.Size() != 0 {
return nil return nil
} }
if !*download { if !*download {
return fmt.Errorf("test data not found; skipping benchmark without the -download flag") b.Skipf("test data not found; skipping benchmark without the -download flag")
} }
// Download the official snappy C++ implementation reference test data // Download the official snappy C++ implementation reference test data
// files for benchmarking. // files for benchmarking.
@ -326,7 +329,7 @@ func downloadTestdata(basename string) (errRet error) {
} }
func benchFile(b *testing.B, n int, decode bool) { func benchFile(b *testing.B, n int, decode bool) {
if err := downloadTestdata(testFiles[n].filename); err != nil { if err := downloadTestdata(b, testFiles[n].filename); err != nil {
b.Fatalf("failed to download testdata: %s", err) b.Fatalf("failed to download testdata: %s", err)
} }
data := readFile(b, filepath.Join(*testdata, testFiles[n].filename)) data := readFile(b, filepath.Join(*testdata, testFiles[n].filename))

View file

@ -1,26 +1,14 @@
package api package api
import ( import (
"os"
"testing" "testing"
) )
// ROOT is a management token for the tests
var CONSUL_ROOT string
func init() {
CONSUL_ROOT = os.Getenv("CONSUL_ROOT")
}
func TestACL_CreateDestroy(t *testing.T) { func TestACL_CreateDestroy(t *testing.T) {
t.Parallel() t.Parallel()
if CONSUL_ROOT == "" { c, s := makeACLClient(t)
t.SkipNow()
}
c, s := makeClient(t)
defer s.Stop() defer s.Stop()
c.config.Token = CONSUL_ROOT
acl := c.ACL() acl := c.ACL()
ae := ACLEntry{ ae := ACLEntry{
@ -63,16 +51,12 @@ func TestACL_CreateDestroy(t *testing.T) {
func TestACL_CloneDestroy(t *testing.T) { func TestACL_CloneDestroy(t *testing.T) {
t.Parallel() t.Parallel()
if CONSUL_ROOT == "" { c, s := makeACLClient(t)
t.SkipNow()
}
c, s := makeClient(t)
defer s.Stop() defer s.Stop()
c.config.Token = CONSUL_ROOT
acl := c.ACL() acl := c.ACL()
id, wm, err := acl.Clone(CONSUL_ROOT, nil) id, wm, err := acl.Clone(c.config.Token, nil)
if err != nil { if err != nil {
t.Fatalf("err: %v", err) t.Fatalf("err: %v", err)
} }
@ -97,16 +81,12 @@ func TestACL_CloneDestroy(t *testing.T) {
func TestACL_Info(t *testing.T) { func TestACL_Info(t *testing.T) {
t.Parallel() t.Parallel()
if CONSUL_ROOT == "" { c, s := makeACLClient(t)
t.SkipNow()
}
c, s := makeClient(t)
defer s.Stop() defer s.Stop()
c.config.Token = CONSUL_ROOT
acl := c.ACL() acl := c.ACL()
ae, qm, err := acl.Info(CONSUL_ROOT, nil) ae, qm, err := acl.Info(c.config.Token, nil)
if err != nil { if err != nil {
t.Fatalf("err: %v", err) t.Fatalf("err: %v", err)
} }
@ -118,20 +98,16 @@ func TestACL_Info(t *testing.T) {
t.Fatalf("bad: %v", qm) t.Fatalf("bad: %v", qm)
} }
if ae == nil || ae.ID != CONSUL_ROOT || ae.Type != ACLManagementType { if ae == nil || ae.ID != c.config.Token || ae.Type != ACLManagementType {
t.Fatalf("bad: %#v", ae) t.Fatalf("bad: %#v", ae)
} }
} }
func TestACL_List(t *testing.T) { func TestACL_List(t *testing.T) {
t.Parallel() t.Parallel()
if CONSUL_ROOT == "" { c, s := makeACLClient(t)
t.SkipNow()
}
c, s := makeClient(t)
defer s.Stop() defer s.Stop()
c.config.Token = CONSUL_ROOT
acl := c.ACL() acl := c.ACL()
acls, qm, err := acl.List(nil) acls, qm, err := acl.List(nil)

View file

@ -20,6 +20,16 @@ func makeClient(t *testing.T) (*Client, *testutil.TestServer) {
return makeClientWithConfig(t, nil, nil) return makeClientWithConfig(t, nil, nil)
} }
func makeACLClient(t *testing.T) (*Client, *testutil.TestServer) {
return makeClientWithConfig(t, func(clientConfig *Config) {
clientConfig.Token = "root"
}, func(serverConfig *testutil.TestServerConfig) {
serverConfig.ACLMasterToken = "root"
serverConfig.ACLDatacenter = "dc1"
serverConfig.ACLDefaultPolicy = "deny"
})
}
func makeClientWithConfig( func makeClientWithConfig(
t *testing.T, t *testing.T,
cb1 configCallback, cb1 configCallback,

View file

@ -168,6 +168,10 @@ func (k *KV) Release(p *KVPair, q *WriteOptions) (bool, *WriteMeta, error) {
} }
func (k *KV) put(key string, params map[string]string, body []byte, q *WriteOptions) (bool, *WriteMeta, error) { func (k *KV) put(key string, params map[string]string, body []byte, q *WriteOptions) (bool, *WriteMeta, error) {
if len(key) > 0 && key[0] == '/' {
return false, nil, fmt.Errorf("Invalid key. Key must not begin with a '/': %s", key)
}
r := k.c.newRequest("PUT", "/v1/kv/"+key) r := k.c.newRequest("PUT", "/v1/kv/"+key)
r.setWriteOptions(q) r.setWriteOptions(q)
for param, val := range params { for param, val := range params {

View file

@ -24,9 +24,17 @@ func TestClientPutGetDelete(t *testing.T) {
t.Fatalf("unexpected value: %#v", pair) t.Fatalf("unexpected value: %#v", pair)
} }
// Put the key
value := []byte("test") value := []byte("test")
p := &KVPair{Key: key, Flags: 42, Value: value}
// Put a key that begins with a '/', this should fail
invalidKey := "/test"
p := &KVPair{Key: invalidKey, Flags: 42, Value: value}
if _, err := kv.Put(p, nil); err == nil {
t.Fatalf("Invalid key not detected: %s", invalidKey)
}
// Put the key
p = &KVPair{Key: key, Flags: 42, Value: value}
if _, err := kv.Put(p, nil); err != nil { if _, err := kv.Put(p, nil); err != nil {
t.Fatalf("err: %v", err) t.Fatalf("err: %v", err)
} }

View file

@ -183,11 +183,23 @@ WAIT:
// Handle the case of not getting the lock // Handle the case of not getting the lock
if !locked { if !locked {
select { // Determine why the lock failed
case <-time.After(DefaultLockRetryTime): qOpts.WaitIndex = 0
pair, meta, err = kv.Get(l.opts.Key, qOpts)
if pair != nil && pair.Session != "" {
//If the session is not null, this means that a wait can safely happen
//using a long poll
qOpts.WaitIndex = meta.LastIndex
goto WAIT goto WAIT
case <-stopCh: } else {
return nil, nil // If the session is empty and the lock failed to acquire, then it means
// a lock-delay is in effect and a timed wait must be used
select {
case <-time.After(DefaultLockRetryTime):
goto WAIT
case <-stopCh:
return nil, nil
}
} }
} }

View file

@ -0,0 +1,8 @@
sudo: false
language: go
go:
- 1.1
- 1.2
- 1.3
- 1.4
- tip

View file

@ -297,7 +297,7 @@ func main() {
**NOTE: It might be required to set [Router.HandleMethodNotAllowed](http://godoc.org/github.com/julienschmidt/httprouter#Router.HandleMethodNotAllowed) to `false` to avoid problems.** **NOTE: It might be required to set [Router.HandleMethodNotAllowed](http://godoc.org/github.com/julienschmidt/httprouter#Router.HandleMethodNotAllowed) to `false` to avoid problems.**
You can use another [http.HandlerFunc](http://golang.org/pkg/net/http/#HandlerFunc), for example another router, to handle requests which could not be matched by this router by using the [Router.NotFound](http://godoc.org/github.com/julienschmidt/httprouter#Router.NotFound) handler. This allows chaining. You can use another [http.Handler](http://golang.org/pkg/net/http/#Handler), for example another router, to handle requests which could not be matched by this router by using the [Router.NotFound](http://godoc.org/github.com/julienschmidt/httprouter#Router.NotFound) handler. This allows chaining.
### Static files ### Static files
The `NotFound` handler can for example be used to serve static files from the root path `/` (like an index.html file along with other assets): The `NotFound` handler can for example be used to serve static files from the root path `/` (like an index.html file along with other assets):

View file

@ -55,7 +55,7 @@
// //
// Catch-all parameters match anything until the path end, including the // Catch-all parameters match anything until the path end, including the
// directory index (the '/' before the catch-all). Since they match anything // directory index (the '/' before the catch-all). Since they match anything
// until the end, catch-all paramerters must always be the final path element. // until the end, catch-all parameters must always be the final path element.
// Path: /files/*filepath // Path: /files/*filepath
// //
// Requests: // Requests:
@ -138,14 +138,14 @@ type Router struct {
// handler. // handler.
HandleMethodNotAllowed bool HandleMethodNotAllowed bool
// Configurable http.HandlerFunc which is called when no matching route is // Configurable http.Handler which is called when no matching route is
// found. If it is not set, http.NotFound is used. // found. If it is not set, http.NotFound is used.
NotFound http.HandlerFunc NotFound http.Handler
// Configurable http.HandlerFunc which is called when a request // Configurable http.Handler which is called when a request
// cannot be routed and HandleMethodNotAllowed is true. // cannot be routed and HandleMethodNotAllowed is true.
// If it is not set, http.Error with http.StatusMethodNotAllowed is used. // If it is not set, http.Error with http.StatusMethodNotAllowed is used.
MethodNotAllowed http.HandlerFunc MethodNotAllowed http.Handler
// Function to handle panics recovered from http handlers. // Function to handle panics recovered from http handlers.
// It should be used to generate a error page and return the http error code // It should be used to generate a error page and return the http error code
@ -342,7 +342,7 @@ func (r *Router) ServeHTTP(w http.ResponseWriter, req *http.Request) {
handle, _, _ := r.trees[method].getValue(req.URL.Path) handle, _, _ := r.trees[method].getValue(req.URL.Path)
if handle != nil { if handle != nil {
if r.MethodNotAllowed != nil { if r.MethodNotAllowed != nil {
r.MethodNotAllowed(w, req) r.MethodNotAllowed.ServeHTTP(w, req)
} else { } else {
http.Error(w, http.Error(w,
http.StatusText(http.StatusMethodNotAllowed), http.StatusText(http.StatusMethodNotAllowed),
@ -356,7 +356,7 @@ func (r *Router) ServeHTTP(w http.ResponseWriter, req *http.Request) {
// Handle 404 // Handle 404
if r.NotFound != nil { if r.NotFound != nil {
r.NotFound(w, req) r.NotFound.ServeHTTP(w, req)
} else { } else {
http.NotFound(w, req) http.NotFound(w, req)
} }

View file

@ -174,6 +174,48 @@ func TestRouterRoot(t *testing.T) {
} }
} }
func TestRouterChaining(t *testing.T) {
router1 := New()
router2 := New()
router1.NotFound = router2
fooHit := false
router1.POST("/foo", func(w http.ResponseWriter, req *http.Request, _ Params) {
fooHit = true
w.WriteHeader(http.StatusOK)
})
barHit := false
router2.POST("/bar", func(w http.ResponseWriter, req *http.Request, _ Params) {
barHit = true
w.WriteHeader(http.StatusOK)
})
r, _ := http.NewRequest("POST", "/foo", nil)
w := httptest.NewRecorder()
router1.ServeHTTP(w, r)
if !(w.Code == http.StatusOK && fooHit) {
t.Errorf("Regular routing failed with router chaining.")
t.FailNow()
}
r, _ = http.NewRequest("POST", "/bar", nil)
w = httptest.NewRecorder()
router1.ServeHTTP(w, r)
if !(w.Code == http.StatusOK && barHit) {
t.Errorf("Chained routing failed with router chaining.")
t.FailNow()
}
r, _ = http.NewRequest("POST", "/qax", nil)
w = httptest.NewRecorder()
router1.ServeHTTP(w, r)
if !(w.Code == http.StatusNotFound) {
t.Errorf("NotFound behavior failed with router chaining.")
t.FailNow()
}
}
func TestRouterNotAllowed(t *testing.T) { func TestRouterNotAllowed(t *testing.T) {
handlerFunc := func(_ http.ResponseWriter, _ *http.Request, _ Params) {} handlerFunc := func(_ http.ResponseWriter, _ *http.Request, _ Params) {}
@ -190,10 +232,10 @@ func TestRouterNotAllowed(t *testing.T) {
w = httptest.NewRecorder() w = httptest.NewRecorder()
responseText := "custom method" responseText := "custom method"
router.MethodNotAllowed = func(w http.ResponseWriter, req *http.Request) { router.MethodNotAllowed = http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
w.WriteHeader(http.StatusTeapot) w.WriteHeader(http.StatusTeapot)
w.Write([]byte(responseText)) w.Write([]byte(responseText))
} })
router.ServeHTTP(w, r) router.ServeHTTP(w, r)
if got := w.Body.String(); !(got == responseText) { if got := w.Body.String(); !(got == responseText) {
t.Errorf("unexpected response got %q want %q", got, responseText) t.Errorf("unexpected response got %q want %q", got, responseText)
@ -237,10 +279,10 @@ func TestRouterNotFound(t *testing.T) {
// Test custom not found handler // Test custom not found handler
var notFound bool var notFound bool
router.NotFound = func(rw http.ResponseWriter, r *http.Request) { router.NotFound = http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) {
rw.WriteHeader(404) rw.WriteHeader(404)
notFound = true notFound = true
} })
r, _ := http.NewRequest("GET", "/nope", nil) r, _ := http.NewRequest("GET", "/nope", nil)
w := httptest.NewRecorder() w := httptest.NewRecorder()
router.ServeHTTP(w, r) router.ServeHTTP(w, r)

View file

@ -37,6 +37,7 @@ A not-so-up-to-date-list-that-may-be-actually-current:
* https://github.com/StalkR/dns-reverse-proxy * https://github.com/StalkR/dns-reverse-proxy
* https://github.com/tianon/rawdns * https://github.com/tianon/rawdns
* https://mesosphere.github.io/mesos-dns/ * https://mesosphere.github.io/mesos-dns/
* https://pulse.turbobytes.com/
Send pull request if you want to be listed here. Send pull request if you want to be listed here.
@ -118,6 +119,7 @@ Example programs can be found in the `github.com/miekg/exdns` repository.
* 6605 - ECDSA * 6605 - ECDSA
* 6725 - IANA Registry Update * 6725 - IANA Registry Update
* 6742 - ILNP DNS * 6742 - ILNP DNS
* 6844 - CAA record
* 6891 - EDNS0 update * 6891 - EDNS0 update
* 6895 - DNS IANA considerations * 6895 - DNS IANA considerations
* 6975 - Algorithm Understanding in DNSSEC * 6975 - Algorithm Understanding in DNSSEC
@ -138,6 +140,5 @@ Example programs can be found in the `github.com/miekg/exdns` repository.
* privatekey.Precompute() when signing? * privatekey.Precompute() when signing?
* Last remaining RRs: APL, ATMA, A6 and NXT and IPSECKEY; * Last remaining RRs: APL, ATMA, A6 and NXT and IPSECKEY;
* Missing in parsing: ISDN, UNSPEC, ATMA; * Missing in parsing: ISDN, UNSPEC, ATMA;
* CAA parsing is broken;
* NSEC(3) cover/match/closest enclose; * NSEC(3) cover/match/closest enclose;
* Replies with TC bit are not parsed to the end; * Replies with TC bit are not parsed to the end;

View file

@ -189,26 +189,15 @@ func (c *Client) exchange(m *Msg, a string) (r *Msg, rtt time.Duration, err erro
// If the received message contains a TSIG record the transaction // If the received message contains a TSIG record the transaction
// signature is verified. // signature is verified.
func (co *Conn) ReadMsg() (*Msg, error) { func (co *Conn) ReadMsg() (*Msg, error) {
var p []byte p, err := co.ReadMsgHeader(nil)
m := new(Msg) if err != nil {
if _, ok := co.Conn.(*net.TCPConn); ok {
p = make([]byte, MaxMsgSize)
} else {
if co.UDPSize >= 512 {
p = make([]byte, co.UDPSize)
} else {
p = make([]byte, MinMsgSize)
}
}
n, err := co.Read(p)
if err != nil && n == 0 {
return nil, err return nil, err
} }
p = p[:n]
m := new(Msg)
if err := m.Unpack(p); err != nil { if err := m.Unpack(p); err != nil {
return nil, err return nil, err
} }
co.rtt = time.Since(co.t)
if t := m.IsTsig(); t != nil { if t := m.IsTsig(); t != nil {
if _, ok := co.TsigSecret[t.Hdr.Name]; !ok { if _, ok := co.TsigSecret[t.Hdr.Name]; !ok {
return m, ErrSecret return m, ErrSecret
@ -219,6 +208,81 @@ func (co *Conn) ReadMsg() (*Msg, error) {
return m, err return m, err
} }
// ReadMsgHeader reads a DNS message, parses and populates hdr (when hdr is not nil).
// Returns message as a byte slice to be parsed with Msg.Unpack later on.
// Note that error handling on the message body is not possible as only the header is parsed.
func (co *Conn) ReadMsgHeader(hdr *Header) ([]byte, error) {
var (
p []byte
n int
err error
)
if t, ok := co.Conn.(*net.TCPConn); ok {
// First two bytes specify the length of the entire message.
l, err := tcpMsgLen(t)
if err != nil {
return nil, err
}
p = make([]byte, l)
n, err = tcpRead(t, p)
} else {
if co.UDPSize > MinMsgSize {
p = make([]byte, co.UDPSize)
} else {
p = make([]byte, MinMsgSize)
}
n, err = co.Read(p)
}
if err != nil {
return nil, err
} else if n < headerSize {
return nil, ErrShortRead
}
p = p[:n]
if hdr != nil {
if _, err = UnpackStruct(hdr, p, 0); err != nil {
return nil, err
}
}
return p, err
}
// tcpMsgLen is a helper func to read first two bytes of stream as uint16 packet length.
func tcpMsgLen(t *net.TCPConn) (int, error) {
p := []byte{0, 0}
n, err := t.Read(p)
if err != nil {
return 0, err
}
if n != 2 {
return 0, ErrShortRead
}
l, _ := unpackUint16(p, 0)
if l == 0 {
return 0, ErrShortRead
}
return int(l), nil
}
// tcpRead calls TCPConn.Read enough times to fill allocated buffer.
func tcpRead(t *net.TCPConn, p []byte) (int, error) {
n, err := t.Read(p)
if err != nil {
return n, err
}
for n < len(p) {
j, err := t.Read(p[n:])
if err != nil {
return n, err
}
n += j
}
return n, err
}
// Read implements the net.Conn read method. // Read implements the net.Conn read method.
func (co *Conn) Read(p []byte) (n int, err error) { func (co *Conn) Read(p []byte) (n int, err error) {
if co.Conn == nil { if co.Conn == nil {
@ -228,37 +292,22 @@ func (co *Conn) Read(p []byte) (n int, err error) {
return 0, io.ErrShortBuffer return 0, io.ErrShortBuffer
} }
if t, ok := co.Conn.(*net.TCPConn); ok { if t, ok := co.Conn.(*net.TCPConn); ok {
n, err = t.Read(p[0:2]) l, err := tcpMsgLen(t)
if err != nil || n != 2 { if err != nil {
return n, err return 0, err
} }
l, _ := unpackUint16(p[0:2], 0) if l > len(p) {
if l == 0 {
return 0, ErrShortRead
}
if int(l) > len(p) {
return int(l), io.ErrShortBuffer return int(l), io.ErrShortBuffer
} }
n, err = t.Read(p[:l]) return tcpRead(t, p[:l])
if err != nil {
return n, err
}
i := n
for i < int(l) {
j, err := t.Read(p[i:int(l)])
if err != nil {
return i, err
}
i += j
}
n = i
return n, err
} }
// UDP connection // UDP connection
n, err = co.Conn.Read(p) n, err = co.Conn.Read(p)
if err != nil { if err != nil {
return n, err return n, err
} }
co.rtt = time.Since(co.t)
return n, err return n, err
} }
@ -327,21 +376,3 @@ func DialTimeout(network, address string, timeout time.Duration) (conn *Conn, er
} }
return conn, nil return conn, nil
} }
// Close implements the net.Conn Close method.
func (co *Conn) Close() error { return co.Conn.Close() }
// LocalAddr implements the net.Conn LocalAddr method.
func (co *Conn) LocalAddr() net.Addr { return co.Conn.LocalAddr() }
// RemoteAddr implements the net.Conn RemoteAddr method.
func (co *Conn) RemoteAddr() net.Addr { return co.Conn.RemoteAddr() }
// SetDeadline implements the net.Conn SetDeadline method.
func (co *Conn) SetDeadline(t time.Time) error { return co.Conn.SetDeadline(t) }
// SetReadDeadline implements the net.Conn SetReadDeadline method.
func (co *Conn) SetReadDeadline(t time.Time) error { return co.Conn.SetReadDeadline(t) }
// SetWriteDeadline implements the net.Conn SetWriteDeadline method.
func (co *Conn) SetWriteDeadline(t time.Time) error { return co.Conn.SetWriteDeadline(t) }

View file

@ -32,7 +32,7 @@ func TestClientSync(t *testing.T) {
if err != nil { if err != nil {
t.Errorf("failed to exchange: %v", err) t.Errorf("failed to exchange: %v", err)
} }
if r != nil && r.Rcode != RcodeSuccess { if r == nil || r.Rcode != RcodeSuccess {
t.Errorf("failed to get an valid answer\n%v", r) t.Errorf("failed to get an valid answer\n%v", r)
} }
} }
@ -235,3 +235,52 @@ func ExampleUpdateLeaseTSIG(t *testing.T) {
t.Error(err) t.Error(err)
} }
} }
func TestClientConn(t *testing.T) {
HandleFunc("miek.nl.", HelloServer)
defer HandleRemove("miek.nl.")
// This uses TCP just to make it slightly different than TestClientSync
s, addrstr, err := RunLocalTCPServer("127.0.0.1:0")
if err != nil {
t.Fatalf("Unable to run test server: %v", err)
}
defer s.Shutdown()
m := new(Msg)
m.SetQuestion("miek.nl.", TypeSOA)
cn, err := Dial("tcp", addrstr)
if err != nil {
t.Errorf("failed to dial %s: %v", addrstr, err)
}
err = cn.WriteMsg(m)
if err != nil {
t.Errorf("failed to exchange: %v", err)
}
r, err := cn.ReadMsg()
if r == nil || r.Rcode != RcodeSuccess {
t.Errorf("failed to get an valid answer\n%v", r)
}
err = cn.WriteMsg(m)
if err != nil {
t.Errorf("failed to exchange: %v", err)
}
h := new(Header)
buf, err := cn.ReadMsgHeader(h)
if buf == nil {
t.Errorf("failed to get an valid answer\n%v", r)
}
if int(h.Bits&0xF) != RcodeSuccess {
t.Errorf("failed to get an valid answer in ReadMsgHeader\n%v", r)
}
if h.Ancount != 0 || h.Qdcount != 1 || h.Nscount != 0 || h.Arcount != 1 {
t.Errorf("expected to have question and additional in response; got something else: %+v", h)
}
if err = r.Unpack(buf); err != nil {
t.Errorf("unable to unpack message fully: %v", err)
}
}

View file

@ -429,9 +429,6 @@ func TestToRFC3597(t *testing.T) {
func TestNoRdataPack(t *testing.T) { func TestNoRdataPack(t *testing.T) {
data := make([]byte, 1024) data := make([]byte, 1024)
for typ, fn := range typeToRR { for typ, fn := range typeToRR {
if typ == TypeCAA {
continue // TODO(miek): known omission
}
r := fn() r := fn()
*r.Header() = RR_Header{Name: "miek.nl.", Rrtype: typ, Class: ClassINET, Ttl: 3600} *r.Header() = RR_Header{Name: "miek.nl.", Rrtype: typ, Class: ClassINET, Ttl: 3600}
_, err := PackRR(r, data, 0, nil, false) _, err := PackRR(r, data, 0, nil, false)

View file

@ -202,6 +202,9 @@ RFC 6895 sets aside a range of type codes for private use. This range
is 65,280 - 65,534 (0xFF00 - 0xFFFE). When experimenting with new Resource Records these is 65,280 - 65,534 (0xFF00 - 0xFFFE). When experimenting with new Resource Records these
can be used, before requesting an official type code from IANA. can be used, before requesting an official type code from IANA.
see http://miek.nl/posts/2014/Sep/21/Private%20RRs%20and%20IDN%20in%20Go%20DNS/ for more
information.
EDNS0 EDNS0
EDNS0 is an extension mechanism for the DNS defined in RFC 2671 and updated EDNS0 is an extension mechanism for the DNS defined in RFC 2671 and updated

File diff suppressed because it is too large Load diff

View file

@ -3,9 +3,10 @@ package idn
import ( import (
"bytes" "bytes"
"github.com/miekg/dns"
"strings" "strings"
"unicode" "unicode"
"github.com/miekg/dns"
) )
// Implementation idea from RFC itself and from from IDNA::Punycode created by // Implementation idea from RFC itself and from from IDNA::Punycode created by
@ -26,8 +27,8 @@ const (
) )
// ToPunycode converts unicode domain names to DNS-appropriate punycode names. // ToPunycode converts unicode domain names to DNS-appropriate punycode names.
// This function would return incorrect result for strings for non-canonical // This function would return an empty string result for domain names with
// unicode strings. // invalid unicode strings. This function expects domain names in lowercase.
func ToPunycode(s string) string { func ToPunycode(s string) string {
tokens := dns.SplitDomainName(s) tokens := dns.SplitDomainName(s)
switch { switch {
@ -40,7 +41,11 @@ func ToPunycode(s string) string {
} }
for i := range tokens { for i := range tokens {
tokens[i] = string(encode([]byte(tokens[i]))) t := encode([]byte(tokens[i]))
if t == nil {
return ""
}
tokens[i] = string(t)
} }
return strings.Join(tokens, ".") return strings.Join(tokens, ".")
} }
@ -138,12 +143,18 @@ func tfunc(k, bias rune) rune {
return k - bias return k - bias
} }
// encode transforms Unicode input bytes (that represent DNS label) into punycode bytestream // encode transforms Unicode input bytes (that represent DNS label) into
// punycode bytestream. This function would return nil if there's an invalid
// character in the label.
func encode(input []byte) []byte { func encode(input []byte) []byte {
n, bias := _N, _BIAS n, bias := _N, _BIAS
b := bytes.Runes(input) b := bytes.Runes(input)
for i := range b { for i := range b {
if !isValidRune(b[i]) {
return nil
}
b[i] = preprune(b[i]) b[i] = preprune(b[i])
} }
@ -267,3 +278,34 @@ func decode(b []byte) []byte {
} }
return ret.Bytes() return ret.Bytes()
} }
// isValidRune checks if the character is valid. We will look for the
// character property in the code points list. For now we aren't checking special
// rules in case of contextual property
func isValidRune(r rune) bool {
return findProperty(r) == propertyPVALID
}
// findProperty will try to check the code point property of the given
// character. It will use a binary search algorithm as we have a slice of
// ordered ranges (average case performance O(log n))
func findProperty(r rune) property {
imin, imax := 0, len(codePoints)
for imax >= imin {
imid := (imin + imax) / 2
codePoint := codePoints[imid]
if (codePoint.start == r && codePoint.end == 0) || (codePoint.start <= r && codePoint.end >= r) {
return codePoint.state
}
if (codePoint.end > 0 && codePoint.end < r) || (codePoint.end == 0 && codePoint.start < r) {
imin = imid + 1
} else {
imax = imid - 1
}
}
return propertyUnknown
}

View file

@ -13,13 +13,13 @@ var testcases = [][2]string{
{"AbC", "abc"}, {"AbC", "abc"},
{"я", "xn--41a"}, {"я", "xn--41a"},
{"zя", "xn--z-0ub"}, {"zя", "xn--z-0ub"},
{"ЯZ", "xn--z-zub"}, {"яZ", "xn--z-zub"},
{"а-я", "xn----7sb8g"}, {"а-я", "xn----7sb8g"},
{"إختبار", "xn--kgbechtv"}, {"إختبار", "xn--kgbechtv"},
{"آزمایشی", "xn--hgbk6aj7f53bba"}, {"آزمایشی", "xn--hgbk6aj7f53bba"},
{"测试", "xn--0zwm56d"}, {"测试", "xn--0zwm56d"},
{"測試", "xn--g6w251d"}, {"測試", "xn--g6w251d"},
{"Испытание", "xn--80akhbyknj4f"}, {"испытание", "xn--80akhbyknj4f"},
{"परीक्षा", "xn--11b5bs3a9aj6g"}, {"परीक्षा", "xn--11b5bs3a9aj6g"},
{"δοκιμή", "xn--jxalpdlp"}, {"δοκιμή", "xn--jxalpdlp"},
{"테스트", "xn--9t4b11yi5a"}, {"테스트", "xn--9t4b11yi5a"},
@ -27,6 +27,7 @@ var testcases = [][2]string{
{"テスト", "xn--zckzah"}, {"テスト", "xn--zckzah"},
{"பரிட்சை", "xn--hlcj6aya9esc7a"}, {"பரிட்சை", "xn--hlcj6aya9esc7a"},
{"mamão-com-açúcar", "xn--mamo-com-acar-yeb1e6q"}, {"mamão-com-açúcar", "xn--mamo-com-acar-yeb1e6q"},
{"σ", "xn--4xa"},
} }
func TestEncodeDecodePunycode(t *testing.T) { func TestEncodeDecodePunycode(t *testing.T) {
@ -81,17 +82,34 @@ func TestEncodeDecodeFinalPeriod(t *testing.T) {
} }
} }
var invalid = []string{ var invalidACEs = []string{
"xn--*", "xn--*",
"xn--", "xn--",
"xn---", "xn---",
} }
func TestInvalidPunycode(t *testing.T) { func TestInvalidPunycode(t *testing.T) {
for _, d := range invalid { for _, d := range invalidACEs {
s := FromPunycode(d) s := FromPunycode(d)
if s != d { if s != d {
t.Errorf("Changed invalid name %s to %#v", d, s) t.Errorf("Changed invalid name %s to %#v", d, s)
} }
} }
} }
// You can verify the labels that are valid or not comparing to the Verisign
// website: http://mct.verisign-grs.com/
var invalidUnicodes = []string{
"Σ",
"ЯZ",
"Испытание",
}
func TestInvalidUnicodes(t *testing.T) {
for _, d := range invalidUnicodes {
s := ToPunycode(d)
if s != "" {
t.Errorf("Changed invalid name %s to %#v", d, s)
}
}
}

View file

@ -543,6 +543,36 @@ func packTxtString(s string, msg []byte, offset int, tmp []byte) (int, error) {
return offset, nil return offset, nil
} }
func packOctetString(s string, msg []byte, offset int, tmp []byte) (int, error) {
if offset >= len(msg) {
return offset, ErrBuf
}
bs := tmp[:len(s)]
copy(bs, s)
for i := 0; i < len(bs); i++ {
if len(msg) <= offset {
return offset, ErrBuf
}
if bs[i] == '\\' {
i++
if i == len(bs) {
break
}
// check for \DDD
if i+2 < len(bs) && isDigit(bs[i]) && isDigit(bs[i+1]) && isDigit(bs[i+2]) {
msg[offset] = dddToByte(bs[i:])
i += 2
} else {
msg[offset] = bs[i]
}
} else {
msg[offset] = bs[i]
}
offset++
}
return offset, nil
}
func unpackTxt(msg []byte, offset, rdend int) ([]string, int, error) { func unpackTxt(msg []byte, offset, rdend int) ([]string, int, error) {
var err error var err error
var ss []string var ss []string
@ -890,6 +920,12 @@ func packStructValue(val reflect.Value, msg []byte, off int, compression map[str
// length of string. String is RAW (not encoded in hex, nor base64) // length of string. String is RAW (not encoded in hex, nor base64)
copy(msg[off:off+len(s)], s) copy(msg[off:off+len(s)], s)
off += len(s) off += len(s)
case `dns:"octet"`:
bytesTmp := make([]byte, 256)
off, err = packOctetString(fv.String(), msg, off, bytesTmp)
if err != nil {
return lenmsg, err
}
case `dns:"txt"`: case `dns:"txt"`:
fallthrough fallthrough
case "": case "":
@ -1254,6 +1290,13 @@ func unpackStructValue(val reflect.Value, msg []byte, off int) (off1 int, err er
switch val.Type().Field(i).Tag { switch val.Type().Field(i).Tag {
default: default:
return lenmsg, &Error{"bad tag unpacking string: " + val.Type().Field(i).Tag.Get("dns")} return lenmsg, &Error{"bad tag unpacking string: " + val.Type().Field(i).Tag.Get("dns")}
case `dns:"octet"`:
strend := lenrd
if strend > lenmsg {
return lenmsg, &Error{err: "overflow unpacking octet"}
}
s = string(msg[off:strend])
off = strend
case `dns:"hex"`: case `dns:"hex"`:
hexend := lenrd hexend := lenrd
if val.FieldByName("Hdr").FieldByName("Rrtype").Uint() == uint64(TypeHIP) { if val.FieldByName("Hdr").FieldByName("Rrtype").Uint() == uint64(TypeHIP) {

View file

@ -1455,3 +1455,54 @@ func TestParseHINFO(t *testing.T) {
} }
} }
} }
func TestParseCAA(t *testing.T) {
lt := map[string]string{
"example.net. CAA 0 issue \"symantec.com\"": "example.net.\t3600\tIN\tCAA\t0 issue \"symantec.com\"",
"example.net. CAA 0 issuewild \"symantec.com; stuff\"": "example.net.\t3600\tIN\tCAA\t0 issuewild \"symantec.com; stuff\"",
"example.net. CAA 128 tbs \"critical\"": "example.net.\t3600\tIN\tCAA\t128 tbs \"critical\"",
"example.net. CAA 2 auth \"0>09\\006\\010+\\006\\001\\004\\001\\214y\\002\\003\\001\\006\\009`\\134H\\001e\\003\\004\\002\\001\\004 y\\209\\012\\221r\\220\\156Q\\218\\150\\150{\\166\\245:\\231\\182%\\157:\\133\\179}\\1923r\\238\\151\\255\\128q\\145\\002\\001\\000\"": "example.net.\t3600\tIN\tCAA\t2 auth \"0>09\\006\\010+\\006\\001\\004\\001\\214y\\002\\003\\001\\006\\009`\\134H\\001e\\003\\004\\002\\001\\004 y\\209\\012\\221r\\220\\156Q\\218\\150\\150{\\166\\245:\\231\\182%\\157:\\133\\179}\\1923r\\238\\151\\255\\128q\\145\\002\\001\\000\"",
"example.net. TYPE257 0 issue \"symantec.com\"": "example.net.\t3600\tIN\tCAA\t0 issue \"symantec.com\"",
}
for i, o := range lt {
rr, err := NewRR(i)
if err != nil {
t.Error("failed to parse RR: ", err)
continue
}
if rr.String() != o {
t.Errorf("`%s' should be equal to\n`%s', but is `%s'", i, o, rr.String())
} else {
t.Logf("RR is OK: `%s'", rr.String())
}
}
}
func TestPackCAA(t *testing.T) {
m := new(Msg)
record := new(CAA)
record.Hdr = RR_Header{Name: "example.com.", Rrtype: TypeCAA, Class: ClassINET, Ttl: 0}
record.Tag = "issue"
record.Value = "symantec.com"
record.Flag = 1
m.Answer = append(m.Answer, record)
bytes, err := m.Pack()
if err != nil {
t.Fatalf("failed to pack msg: %v", err)
}
if err := m.Unpack(bytes); err != nil {
t.Fatalf("failed to unpack msg: %v", err)
}
if len(m.Answer) != 1 {
t.Fatalf("incorrect number of answers unpacked")
}
rr := m.Answer[0].(*CAA)
if rr.Tag != "issue" {
t.Fatalf("invalid tag for unpacked answer")
} else if rr.Value != "symantec.com" {
t.Fatalf("invalid value for unpacked answer")
} else if rr.Flag != 1 {
t.Fatalf("invalid flag for unpacked answer")
}
}

View file

@ -158,6 +158,8 @@ type Header struct {
} }
const ( const (
headerSize = 12
// Header.Bits // Header.Bits
_QR = 1 << 15 // query/response (response=1) _QR = 1 << 15 // query/response (response=1)
_AA = 1 << 10 // authoritative _AA = 1 << 10 // authoritative
@ -501,6 +503,34 @@ func sprintName(s string) string {
return string(dst) return string(dst)
} }
func sprintCAAValue(s string) string {
src := []byte(s)
dst := make([]byte, 0, len(src))
dst = append(dst, '"')
for i := 0; i < len(src); {
if i+1 < len(src) && src[i] == '\\' && src[i+1] == '.' {
dst = append(dst, src[i:i+2]...)
i += 2
} else {
b, n := nextByte(src, i)
if n == 0 {
i++ // dangling back slash
} else if b == '.' {
dst = append(dst, b)
} else {
if b < ' ' || b > '~' {
dst = appendByte(dst, b)
} else {
dst = append(dst, b)
}
}
i += n
}
}
dst = append(dst, '"')
return string(dst)
}
func sprintTxt(txt []string) string { func sprintTxt(txt []string) string {
var out []byte var out []byte
for i, s := range txt { for i, s := range txt {
@ -543,21 +573,24 @@ func appendTXTStringByte(s []byte, b byte) []byte {
return append(s, '\\', b) return append(s, '\\', b)
} }
if b < ' ' || b > '~' { if b < ' ' || b > '~' {
var buf [3]byte return appendByte(s, b)
bufs := strconv.AppendInt(buf[:0], int64(b), 10)
s = append(s, '\\')
for i := 0; i < 3-len(bufs); i++ {
s = append(s, '0')
}
for _, r := range bufs {
s = append(s, r)
}
return s
} }
return append(s, b) return append(s, b)
} }
func appendByte(s []byte, b byte) []byte {
var buf [3]byte
bufs := strconv.AppendInt(buf[:0], int64(b), 10)
s = append(s, '\\')
for i := 0; i < 3-len(bufs); i++ {
s = append(s, '0')
}
for _, r := range bufs {
s = append(s, r)
}
return s
}
func nextByte(b []byte, offset int) (byte, int) { func nextByte(b []byte, offset int) (byte, int) {
if offset >= len(b) { if offset >= len(b) {
return 0, 0 return 0, 0
@ -1527,8 +1560,6 @@ func (rr *EUI64) copy() RR { return &EUI64{*rr.Hdr.copyHeader(), rr.Ad
func (rr *EUI64) String() string { return rr.Hdr.String() + euiToString(rr.Address, 64) } func (rr *EUI64) String() string { return rr.Hdr.String() + euiToString(rr.Address, 64) }
func (rr *EUI64) len() int { return rr.Hdr.len() + 8 } func (rr *EUI64) len() int { return rr.Hdr.len() + 8 }
// Support in incomplete - just handle it as unknown record
/*
type CAA struct { type CAA struct {
Hdr RR_Header Hdr RR_Header
Flag uint8 Flag uint8
@ -1538,14 +1569,10 @@ type CAA struct {
func (rr *CAA) Header() *RR_Header { return &rr.Hdr } func (rr *CAA) Header() *RR_Header { return &rr.Hdr }
func (rr *CAA) copy() RR { return &CAA{*rr.Hdr.copyHeader(), rr.Flag, rr.Tag, rr.Value} } func (rr *CAA) copy() RR { return &CAA{*rr.Hdr.copyHeader(), rr.Flag, rr.Tag, rr.Value} }
func (rr *CAA) len() int { return rr.Hdr.len() + 1 + len(rr.Tag) + 1 + len(rr.Value) } func (rr *CAA) len() int { return rr.Hdr.len() + 2 + len(rr.Tag) + len(rr.Value) }
func (rr *CAA) String() string { func (rr *CAA) String() string {
s := rr.Hdr.String() + strconv.FormatInt(int64(rr.Flag), 10) + " " + rr.Tag return rr.Hdr.String() + strconv.Itoa(int(rr.Flag)) + " " + rr.Tag + " " + sprintCAAValue(rr.Value)
s += strconv.QuoteToASCII(rr.Value)
return s
} }
*/
type UID struct { type UID struct {
Hdr RR_Header Hdr RR_Header
@ -1668,10 +1695,10 @@ func copyIP(ip net.IP) net.IP {
// Map of constructors for each RR type. // Map of constructors for each RR type.
var typeToRR = map[uint16]func() RR{ var typeToRR = map[uint16]func() RR{
TypeA: func() RR { return new(A) }, TypeA: func() RR { return new(A) },
TypeAAAA: func() RR { return new(AAAA) }, TypeAAAA: func() RR { return new(AAAA) },
TypeAFSDB: func() RR { return new(AFSDB) }, TypeAFSDB: func() RR { return new(AFSDB) },
// TypeCAA: func() RR { return new(CAA) }, TypeCAA: func() RR { return new(CAA) },
TypeCDS: func() RR { return new(CDS) }, TypeCDS: func() RR { return new(CDS) },
TypeCERT: func() RR { return new(CERT) }, TypeCERT: func() RR { return new(CERT) },
TypeCNAME: func() RR { return new(CNAME) }, TypeCNAME: func() RR { return new(CNAME) },

View file

@ -8,7 +8,7 @@ import (
func TestDynamicUpdateParsing(t *testing.T) { func TestDynamicUpdateParsing(t *testing.T) {
prefix := "example.com. IN " prefix := "example.com. IN "
for _, typ := range TypeToString { for _, typ := range TypeToString {
if typ == "CAA" || typ == "OPT" || typ == "AXFR" || typ == "IXFR" || typ == "ANY" || typ == "TKEY" || if typ == "OPT" || typ == "AXFR" || typ == "IXFR" || typ == "ANY" || typ == "TKEY" ||
typ == "TSIG" || typ == "ISDN" || typ == "UNSPEC" || typ == "NULL" || typ == "ATMA" { typ == "TSIG" || typ == "ISDN" || typ == "UNSPEC" || typ == "NULL" || typ == "ATMA" {
continue continue
} }

View file

@ -107,7 +107,7 @@ func (t *Transfer) inIxfr(id uint16, c chan *Envelope) {
t.SetReadDeadline(time.Now().Add(timeout)) t.SetReadDeadline(time.Now().Add(timeout))
in, err := t.ReadMsg() in, err := t.ReadMsg()
if err != nil { if err != nil {
c <- &Envelope{in.Answer, err} c <- &Envelope{nil, err}
return return
} }
if id != in.Id { if id != in.Id {

View file

@ -2170,10 +2170,44 @@ func setIPSECKEY(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string)
return rr, nil, c1 return rr, nil, c1
} }
func setCAA(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
rr := new(CAA)
rr.Hdr = h
l := <-c
if l.length == 0 {
return rr, nil, l.comment
}
i, err := strconv.Atoi(l.token)
if err != nil {
return nil, &ParseError{f, "bad CAA Flag", l}, ""
}
rr.Flag = uint8(i)
<-c // zBlank
l = <-c // zString
if l.value != zString {
return nil, &ParseError{f, "bad CAA Tag", l}, ""
}
rr.Tag = l.token
<-c // zBlank
s, e, c1 := endingToTxtSlice(c, "bad CAA Value", f)
if e != nil {
return nil, e, ""
}
if len(s) > 1 {
return nil, &ParseError{f, "bad CAA Value", l}, ""
} else {
rr.Value = s[0]
}
return rr, nil, c1
}
var typeToparserFunc = map[uint16]parserFunc{ var typeToparserFunc = map[uint16]parserFunc{
TypeAAAA: parserFunc{setAAAA, false}, TypeAAAA: parserFunc{setAAAA, false},
TypeAFSDB: parserFunc{setAFSDB, false}, TypeAFSDB: parserFunc{setAFSDB, false},
TypeA: parserFunc{setA, false}, TypeA: parserFunc{setA, false},
TypeCAA: parserFunc{setCAA, true},
TypeCDS: parserFunc{setCDS, true}, TypeCDS: parserFunc{setCDS, true},
TypeCDNSKEY: parserFunc{setCDNSKEY, true}, TypeCDNSKEY: parserFunc{setCDNSKEY, true},
TypeCERT: parserFunc{setCERT, true}, TypeCERT: parserFunc{setCERT, true},

View file

@ -21,8 +21,8 @@ import (
) )
const ( const (
// ExportedLabelPrefix is the label name prefix to prepend if a // ExportedLabelPrefix is the prefix to prepend to the label names present in
// synthetic label is already present in the exported metrics. // exported metrics if a label of the same name is added by the server.
ExportedLabelPrefix LabelName = "exported_" ExportedLabelPrefix LabelName = "exported_"
// MetricNameLabel is the label name indicating the metric name of a // MetricNameLabel is the label name indicating the metric name of a

View file

@ -70,9 +70,9 @@ func (l *LabelSet) UnmarshalJSON(b []byte) error {
if err := json.Unmarshal(b, &m); err != nil { if err := json.Unmarshal(b, &m); err != nil {
return err return err
} }
// encoding/json only unmarshals maps of the form map[string]T. It does not // encoding/json only unmarshals maps of the form map[string]T. It treats
// detect that LabelName is a string and does not call its UnmarshalJSON method. // LabelName as a string and does not call its UnmarshalJSON method.
// Thus we have to replicate the behavior here. // Thus, we have to replicate the behavior here.
for ln := range m { for ln := range m {
if !LabelNameRE.MatchString(string(ln)) { if !LabelNameRE.MatchString(string(ln)) {
return fmt.Errorf("%q is not a valid label name", ln) return fmt.Errorf("%q is not a valid label name", ln)

View file

@ -121,7 +121,7 @@ func (m Metric) Fingerprint() Fingerprint {
return metricToFingerprint(m) return metricToFingerprint(m)
} }
// Fingerprint returns a Metric's Fingerprint calculated by a faster hashing // FastFingerprint returns a Metric's Fingerprint calculated by a faster hashing
// algorithm, which is, however, more susceptible to hash collisions. // algorithm, which is, however, more susceptible to hash collisions.
func (m Metric) FastFingerprint() Fingerprint { func (m Metric) FastFingerprint() Fingerprint {
return metricToFastFingerprint(m) return metricToFastFingerprint(m)

View file

@ -61,12 +61,13 @@
// It also exports some stats about the HTTP usage of the /metrics // It also exports some stats about the HTTP usage of the /metrics
// endpoint. (See the Handler function for more detail.) // endpoint. (See the Handler function for more detail.)
// //
// A more advanced metric type is the Summary. // Two more advanced metric types are the Summary and Histogram.
// //
// In addition to the fundamental metric types Gauge, Counter, and Summary, a // In addition to the fundamental metric types Gauge, Counter, Summary, and
// very important part of the Prometheus data model is the partitioning of // Histogram, a very important part of the Prometheus data model is the
// samples along dimensions called labels, which results in metric vectors. The // partitioning of samples along dimensions called labels, which results in
// fundamental types are GaugeVec, CounterVec, and SummaryVec. // metric vectors. The fundamental types are GaugeVec, CounterVec, SummaryVec,
// and HistogramVec.
// //
// Those are all the parts needed for basic usage. Detailed documentation and // Those are all the parts needed for basic usage. Detailed documentation and
// examples are provided below. // examples are provided below.

View file

@ -392,6 +392,9 @@ func ExampleSummaryVec() {
temps.WithLabelValues("lithobates-catesbeianus").Observe(32 + math.Floor(100*math.Cos(float64(i)*0.11))/10) temps.WithLabelValues("lithobates-catesbeianus").Observe(32 + math.Floor(100*math.Cos(float64(i)*0.11))/10)
} }
// Create a Summary without any observations.
temps.WithLabelValues("leiopelma-hochstetteri")
// Just for demonstration, let's check the state of the summary vector // Just for demonstration, let's check the state of the summary vector
// by (ab)using its Collect method and the Write method of its elements // by (ab)using its Collect method and the Write method of its elements
// (which is usually only used by Prometheus internally - code like the // (which is usually only used by Prometheus internally - code like the
@ -414,6 +417,26 @@ func ExampleSummaryVec() {
// Output: // Output:
// [label: < // [label: <
// name: "species" // name: "species"
// value: "leiopelma-hochstetteri"
// >
// summary: <
// sample_count: 0
// sample_sum: 0
// quantile: <
// quantile: 0.5
// value: nan
// >
// quantile: <
// quantile: 0.9
// value: nan
// >
// quantile: <
// quantile: 0.99
// value: nan
// >
// >
// label: <
// name: "species"
// value: "lithobates-catesbeianus" // value: "lithobates-catesbeianus"
// > // >
// summary: < // summary: <

View file

@ -14,6 +14,9 @@
package prometheus package prometheus
import ( import (
"bufio"
"io"
"net"
"net/http" "net/http"
"strconv" "strconv"
"strings" "strings"
@ -141,7 +144,18 @@ func InstrumentHandlerFuncWithOpts(opts SummaryOpts, handlerFunc func(http.Respo
urlLen = len(r.URL.String()) urlLen = len(r.URL.String())
} }
go computeApproximateRequestSize(r, out, urlLen) go computeApproximateRequestSize(r, out, urlLen)
handlerFunc(delegate, r)
_, cn := w.(http.CloseNotifier)
_, fl := w.(http.Flusher)
_, hj := w.(http.Hijacker)
_, rf := w.(io.ReaderFrom)
var rw http.ResponseWriter
if cn && fl && hj && rf {
rw = &fancyResponseWriterDelegator{delegate}
} else {
rw = delegate
}
handlerFunc(rw, r)
elapsed := float64(time.Since(now)) / float64(time.Microsecond) elapsed := float64(time.Since(now)) / float64(time.Microsecond)
@ -178,7 +192,7 @@ type responseWriterDelegator struct {
handler, method string handler, method string
status int status int
written int written int64
wroteHeader bool wroteHeader bool
} }
@ -193,7 +207,32 @@ func (r *responseWriterDelegator) Write(b []byte) (int, error) {
r.WriteHeader(http.StatusOK) r.WriteHeader(http.StatusOK)
} }
n, err := r.ResponseWriter.Write(b) n, err := r.ResponseWriter.Write(b)
r.written += n r.written += int64(n)
return n, err
}
type fancyResponseWriterDelegator struct {
*responseWriterDelegator
}
func (f *fancyResponseWriterDelegator) CloseNotify() <-chan bool {
return f.ResponseWriter.(http.CloseNotifier).CloseNotify()
}
func (f *fancyResponseWriterDelegator) Flush() {
f.ResponseWriter.(http.Flusher).Flush()
}
func (f *fancyResponseWriterDelegator) Hijack() (net.Conn, *bufio.ReadWriter, error) {
return f.ResponseWriter.(http.Hijacker).Hijack()
}
func (f *fancyResponseWriterDelegator) ReadFrom(r io.Reader) (int64, error) {
if !f.wroteHeader {
f.WriteHeader(http.StatusOK)
}
n, err := f.ResponseWriter.(io.ReaderFrom).ReadFrom(r)
f.written += n
return n, err return n, err
} }

View file

@ -170,6 +170,11 @@ func Unregister(c Collector) bool {
// checks are performed, but no further consistency checks (which would require // checks are performed, but no further consistency checks (which would require
// knowledge of a metric descriptor). // knowledge of a metric descriptor).
// //
// Sorting concerns: The caller is responsible for sorting the label pairs in
// each metric. However, the order of metrics will be sorted by the registry as
// it is required anyway after merging with the metric families collected
// conventionally.
//
// The function must be callable at any time and concurrently. // The function must be callable at any time and concurrently.
func SetMetricFamilyInjectionHook(hook func() []*dto.MetricFamily) { func SetMetricFamilyInjectionHook(hook func() []*dto.MetricFamily) {
defRegistry.metricFamilyInjectionHook = hook defRegistry.metricFamilyInjectionHook = hook
@ -520,10 +525,11 @@ func (r *registry) checkConsistency(metricFamily *dto.MetricFamily, dtoMetric *d
if metricFamily.GetType() == dto.MetricType_GAUGE && dtoMetric.Gauge == nil || if metricFamily.GetType() == dto.MetricType_GAUGE && dtoMetric.Gauge == nil ||
metricFamily.GetType() == dto.MetricType_COUNTER && dtoMetric.Counter == nil || metricFamily.GetType() == dto.MetricType_COUNTER && dtoMetric.Counter == nil ||
metricFamily.GetType() == dto.MetricType_SUMMARY && dtoMetric.Summary == nil || metricFamily.GetType() == dto.MetricType_SUMMARY && dtoMetric.Summary == nil ||
metricFamily.GetType() == dto.MetricType_HISTOGRAM && dtoMetric.Histogram == nil ||
metricFamily.GetType() == dto.MetricType_UNTYPED && dtoMetric.Untyped == nil { metricFamily.GetType() == dto.MetricType_UNTYPED && dtoMetric.Untyped == nil {
return fmt.Errorf( return fmt.Errorf(
"collected metric %q is not a %s", "collected metric %s %s is not a %s",
dtoMetric, metricFamily.Type, metricFamily.GetName(), dtoMetric, metricFamily.GetType(),
) )
} }
@ -533,6 +539,11 @@ func (r *registry) checkConsistency(metricFamily *dto.MetricFamily, dtoMetric *d
buf.WriteString(metricFamily.GetName()) buf.WriteString(metricFamily.GetName())
buf.WriteByte(model.SeparatorByte) buf.WriteByte(model.SeparatorByte)
h.Write(buf.Bytes()) h.Write(buf.Bytes())
// Make sure label pairs are sorted. We depend on it for the consistency
// check. Label pairs must be sorted by contract. But the point of this
// method is to check for contract violations. So we better do the sort
// now.
sort.Sort(LabelPairSorter(dtoMetric.Label))
for _, lp := range dtoMetric.Label { for _, lp := range dtoMetric.Label {
buf.Reset() buf.Reset()
buf.WriteString(lp.GetValue()) buf.WriteString(lp.GetValue())
@ -542,8 +553,8 @@ func (r *registry) checkConsistency(metricFamily *dto.MetricFamily, dtoMetric *d
metricHash := h.Sum64() metricHash := h.Sum64()
if _, exists := metricHashes[metricHash]; exists { if _, exists := metricHashes[metricHash]; exists {
return fmt.Errorf( return fmt.Errorf(
"collected metric %q was collected before with the same name and label values", "collected metric %s %s was collected before with the same name and label values",
dtoMetric, metricFamily.GetName(), dtoMetric,
) )
} }
metricHashes[metricHash] = struct{}{} metricHashes[metricHash] = struct{}{}
@ -555,14 +566,14 @@ func (r *registry) checkConsistency(metricFamily *dto.MetricFamily, dtoMetric *d
// Desc consistency with metric family. // Desc consistency with metric family.
if metricFamily.GetName() != desc.fqName { if metricFamily.GetName() != desc.fqName {
return fmt.Errorf( return fmt.Errorf(
"collected metric %q has name %q but should have %q", "collected metric %s %s has name %q but should have %q",
dtoMetric, metricFamily.GetName(), desc.fqName, metricFamily.GetName(), dtoMetric, metricFamily.GetName(), desc.fqName,
) )
} }
if metricFamily.GetHelp() != desc.help { if metricFamily.GetHelp() != desc.help {
return fmt.Errorf( return fmt.Errorf(
"collected metric %q has help %q but should have %q", "collected metric %s %s has help %q but should have %q",
dtoMetric, metricFamily.GetHelp(), desc.help, metricFamily.GetName(), dtoMetric, metricFamily.GetHelp(), desc.help,
) )
} }
@ -576,8 +587,8 @@ func (r *registry) checkConsistency(metricFamily *dto.MetricFamily, dtoMetric *d
} }
if len(lpsFromDesc) != len(dtoMetric.Label) { if len(lpsFromDesc) != len(dtoMetric.Label) {
return fmt.Errorf( return fmt.Errorf(
"labels in collected metric %q are inconsistent with descriptor %s", "labels in collected metric %s %s are inconsistent with descriptor %s",
dtoMetric, desc, metricFamily.GetName(), dtoMetric, desc,
) )
} }
sort.Sort(LabelPairSorter(lpsFromDesc)) sort.Sort(LabelPairSorter(lpsFromDesc))
@ -586,8 +597,8 @@ func (r *registry) checkConsistency(metricFamily *dto.MetricFamily, dtoMetric *d
if lpFromDesc.GetName() != lpFromMetric.GetName() || if lpFromDesc.GetName() != lpFromMetric.GetName() ||
lpFromDesc.Value != nil && lpFromDesc.GetValue() != lpFromMetric.GetValue() { lpFromDesc.Value != nil && lpFromDesc.GetValue() != lpFromMetric.GetValue() {
return fmt.Errorf( return fmt.Errorf(
"labels in collected metric %q are inconsistent with descriptor %s", "labels in collected metric %s %s are inconsistent with descriptor %s",
dtoMetric, desc, metricFamily.GetName(), dtoMetric, desc,
) )
} }
} }
@ -597,7 +608,10 @@ func (r *registry) checkConsistency(metricFamily *dto.MetricFamily, dtoMetric *d
// Is the desc registered? // Is the desc registered?
if _, exist := r.descIDs[desc.id]; !exist { if _, exist := r.descIDs[desc.id]; !exist {
return fmt.Errorf("collected metric %q with unregistered descriptor %s", dtoMetric, desc) return fmt.Errorf(
"collected metric %s %s with unregistered descriptor %s",
metricFamily.GetName(), dtoMetric, desc,
)
} }
return nil return nil

View file

@ -68,14 +68,14 @@ func testHandler(t testing.TB) {
Metric: []*dto.Metric{ Metric: []*dto.Metric{
{ {
Label: []*dto.LabelPair{ Label: []*dto.LabelPair{
{
Name: proto.String("externallabelname"),
Value: proto.String("externalval1"),
},
{ {
Name: proto.String("externalconstname"), Name: proto.String("externalconstname"),
Value: proto.String("externalconstvalue"), Value: proto.String("externalconstvalue"),
}, },
{
Name: proto.String("externallabelname"),
Value: proto.String("externalval1"),
},
}, },
Counter: &dto.Counter{ Counter: &dto.Counter{
Value: proto.Float64(1), Value: proto.Float64(1),
@ -100,27 +100,27 @@ func testHandler(t testing.TB) {
externalMetricFamilyAsBytes := externalBuf.Bytes() externalMetricFamilyAsBytes := externalBuf.Bytes()
externalMetricFamilyAsText := []byte(`# HELP externalname externaldocstring externalMetricFamilyAsText := []byte(`# HELP externalname externaldocstring
# TYPE externalname counter # TYPE externalname counter
externalname{externallabelname="externalval1",externalconstname="externalconstvalue"} 1 externalname{externalconstname="externalconstvalue",externallabelname="externalval1"} 1
`) `)
externalMetricFamilyAsProtoText := []byte(`name: "externalname" externalMetricFamilyAsProtoText := []byte(`name: "externalname"
help: "externaldocstring" help: "externaldocstring"
type: COUNTER type: COUNTER
metric: < metric: <
label: <
name: "externallabelname"
value: "externalval1"
>
label: < label: <
name: "externalconstname" name: "externalconstname"
value: "externalconstvalue" value: "externalconstvalue"
> >
label: <
name: "externallabelname"
value: "externalval1"
>
counter: < counter: <
value: 1 value: 1
> >
> >
`) `)
externalMetricFamilyAsProtoCompactText := []byte(`name:"externalname" help:"externaldocstring" type:COUNTER metric:<label:<name:"externallabelname" value:"externalval1" > label:<name:"externalconstname" value:"externalconstvalue" > counter:<value:1 > > externalMetricFamilyAsProtoCompactText := []byte(`name:"externalname" help:"externaldocstring" type:COUNTER metric:<label:<name:"externalconstname" value:"externalconstvalue" > label:<name:"externallabelname" value:"externalval1" > counter:<value:1 > >
`) `)
expectedMetricFamily := &dto.MetricFamily{ expectedMetricFamily := &dto.MetricFamily{

View file

@ -21,7 +21,7 @@ import "hash/fnv"
// An Untyped metric works the same as a Gauge. The only difference is that to // An Untyped metric works the same as a Gauge. The only difference is that to
// no type information is implied. // no type information is implied.
// //
// To create Gauge instances, use NewUntyped. // To create Untyped instances, use NewUntyped.
type Untyped interface { type Untyped interface {
Metric Metric
Collector Collector

View file

@ -58,6 +58,11 @@ func (m *MetricVec) Collect(ch chan<- Metric) {
// GetMetricWithLabelValues returns the Metric for the given slice of label // GetMetricWithLabelValues returns the Metric for the given slice of label
// values (same order as the VariableLabels in Desc). If that combination of // values (same order as the VariableLabels in Desc). If that combination of
// label values is accessed for the first time, a new Metric is created. // label values is accessed for the first time, a new Metric is created.
//
// It is possible to call this method without using the returned Metric to only
// create the new Metric but leave it at its start value (e.g. a Summary or
// Histogram without any observations). See also the SummaryVec example.
//
// Keeping the Metric for later use is possible (and should be considered if // Keeping the Metric for later use is possible (and should be considered if
// performance is critical), but keep in mind that Reset, DeleteLabelValues and // performance is critical), but keep in mind that Reset, DeleteLabelValues and
// Delete can be used to delete the Metric from the MetricVec. In that case, the // Delete can be used to delete the Metric from the MetricVec. In that case, the
@ -87,8 +92,9 @@ func (m *MetricVec) GetMetricWithLabelValues(lvs ...string) (Metric, error) {
// GetMetricWith returns the Metric for the given Labels map (the label names // GetMetricWith returns the Metric for the given Labels map (the label names
// must match those of the VariableLabels in Desc). If that label map is // must match those of the VariableLabels in Desc). If that label map is
// accessed for the first time, a new Metric is created. Implications of keeping // accessed for the first time, a new Metric is created. Implications of
// the Metric are the same as for GetMetricWithLabelValues. // creating a Metric without using it and keeping the Metric for later use are
// the same as for GetMetricWithLabelValues.
// //
// An error is returned if the number and names of the Labels are inconsistent // An error is returned if the number and names of the Labels are inconsistent
// with those of the VariableLabels in Desc. // with those of the VariableLabels in Desc.

View file

@ -79,7 +79,7 @@ func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (int, error) {
case dto.MetricType_COUNTER: case dto.MetricType_COUNTER:
if metric.Counter == nil { if metric.Counter == nil {
return written, fmt.Errorf( return written, fmt.Errorf(
"expected counter in metric %s", metric, "expected counter in metric %s %s", name, metric,
) )
} }
n, err = writeSample( n, err = writeSample(
@ -90,7 +90,7 @@ func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (int, error) {
case dto.MetricType_GAUGE: case dto.MetricType_GAUGE:
if metric.Gauge == nil { if metric.Gauge == nil {
return written, fmt.Errorf( return written, fmt.Errorf(
"expected gauge in metric %s", metric, "expected gauge in metric %s %s", name, metric,
) )
} }
n, err = writeSample( n, err = writeSample(
@ -101,7 +101,7 @@ func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (int, error) {
case dto.MetricType_UNTYPED: case dto.MetricType_UNTYPED:
if metric.Untyped == nil { if metric.Untyped == nil {
return written, fmt.Errorf( return written, fmt.Errorf(
"expected untyped in metric %s", metric, "expected untyped in metric %s %s", name, metric,
) )
} }
n, err = writeSample( n, err = writeSample(
@ -112,7 +112,7 @@ func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (int, error) {
case dto.MetricType_SUMMARY: case dto.MetricType_SUMMARY:
if metric.Summary == nil { if metric.Summary == nil {
return written, fmt.Errorf( return written, fmt.Errorf(
"expected summary in metric %s", metric, "expected summary in metric %s %s", name, metric,
) )
} }
for _, q := range metric.Summary.Quantile { for _, q := range metric.Summary.Quantile {
@ -144,7 +144,7 @@ func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (int, error) {
case dto.MetricType_HISTOGRAM: case dto.MetricType_HISTOGRAM:
if metric.Histogram == nil { if metric.Histogram == nil {
return written, fmt.Errorf( return written, fmt.Errorf(
"expected summary in metric %s", metric, "expected histogram in metric %s %s", name, metric,
) )
} }
infSeen := false infSeen := false
@ -191,7 +191,7 @@ func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (int, error) {
) )
default: default:
return written, fmt.Errorf( return written, fmt.Errorf(
"unexpected type in metric %s", metric, "unexpected type in metric %s %s", name, metric,
) )
} }
written += n written += n

View file

@ -83,10 +83,17 @@ type Parser struct {
// and exactly the same label set), the resulting MetricFamily will contain // and exactly the same label set), the resulting MetricFamily will contain
// duplicate Metric proto messages. Similar is true for duplicate label // duplicate Metric proto messages. Similar is true for duplicate label
// names. Checks for duplicates have to be performed separately, if required. // names. Checks for duplicates have to be performed separately, if required.
// Also note that neither the metrics within each MetricFamily are sorted nor
// the label pairs within each Metric. Sorting is not required for the most
// frequent use of this method, which is sample ingestion in the Prometheus
// server. However, for presentation purposes, you might want to sort the
// metrics, and in some cases, you must sort the labels, e.g. for consumption by
// the metric family injection hook of the Prometheus registry.
// //
// Summaries are a rather special beast. You would probably not use them in the // Summaries and histograms are rather special beasts. You would probably not
// simple text format anyway. This method can deal with summaries if they are // use them in the simple text format anyway. This method can deal with
// presented in exactly the way the text.Create function creates them. // summaries and histograms if they are presented in exactly the way the
// text.Create function creates them.
// //
// This method must not be called concurrently. If you want to parse different // This method must not be called concurrently. If you want to parse different
// input concurrently, instantiate a separate Parser for each goroutine. // input concurrently, instantiate a separate Parser for each goroutine.

View file

@ -95,6 +95,21 @@ func Infof(format string, args ...interface{}) {
fileLineEntry().Infof(format, args...) fileLineEntry().Infof(format, args...)
} }
// Print logs a message at level Info on the standard logger.
func Print(args ...interface{}) {
fileLineEntry().Info(args...)
}
// Println logs a message at level Info on the standard logger.
func Println(args ...interface{}) {
fileLineEntry().Infoln(args...)
}
// Printf logs a message at level Info on the standard logger.
func Printf(format string, args ...interface{}) {
fileLineEntry().Infof(format, args...)
}
// Warn logs a message at level Warn on the standard logger. // Warn logs a message at level Warn on the standard logger.
func Warn(args ...interface{}) { func Warn(args ...interface{}) {
fileLineEntry().Warn(args...) fileLineEntry().Warn(args...)

View file

@ -0,0 +1 @@
../../symlinktargets/abc

View file

@ -0,0 +1 @@
../../symlinktargets/def

View file

@ -0,0 +1 @@
../../symlinktargets/xyz

View file

@ -0,0 +1 @@
../../symlinktargets/ghi

View file

@ -0,0 +1 @@
../../symlinktargets/uvw

View file

@ -0,0 +1 @@
../../symlinktargets/abc

View file

@ -0,0 +1 @@
../../symlinktargets/def

View file

@ -0,0 +1 @@
../../symlinktargets/ghi

View file

@ -0,0 +1 @@
../../symlinktargets/uvw

View file

@ -0,0 +1 @@
../../symlinktargets/xyz

View file

@ -0,0 +1,2 @@
This directory contains some empty files that are the symlinks the files in the "fd" directory point to.
They are otherwise ignored by the tests

Some files were not shown because too many files have changed in this diff Show more