Merge pull request #372 from prometheus/superq/vendoring

Update vendoring
This commit is contained in:
Ben Kochie 2016-12-19 14:49:50 +01:00 committed by GitHub
commit af4c98618f
222 changed files with 41063 additions and 5321 deletions

View file

@ -1,10 +1,21 @@
# 0.9.0 (Unreleased) # 0.10.0
* feature: Add a test hook (#180)
* feature: `ParseLevel` is now case-insensitive (#326)
* feature: `FieldLogger` interface that generalizes `Logger` and `Entry` (#308)
* performance: avoid re-allocations on `WithFields` (#335)
# 0.9.0
* logrus/text_formatter: don't emit empty msg * logrus/text_formatter: don't emit empty msg
* logrus/hooks/airbrake: move out of main repository * logrus/hooks/airbrake: move out of main repository
* logrus/hooks/sentry: move out of main repository * logrus/hooks/sentry: move out of main repository
* logrus/hooks/papertrail: move out of main repository * logrus/hooks/papertrail: move out of main repository
* logrus/hooks/bugsnag: move out of main repository * logrus/hooks/bugsnag: move out of main repository
* logrus/core: run tests with `-race`
* logrus/core: detect TTY based on `stderr`
* logrus/core: support `WithError` on logger
* logrus/core: Solaris support
# 0.8.7 # 0.8.7

View file

@ -1,4 +1,4 @@
# Logrus <img src="http://i.imgur.com/hTeVwmJ.png" width="40" height="40" alt=":walrus:" class="emoji" title=":walrus:"/>&nbsp;[![Build Status](https://travis-ci.org/Sirupsen/logrus.svg?branch=master)](https://travis-ci.org/Sirupsen/logrus)&nbsp;[![godoc reference](https://godoc.org/github.com/Sirupsen/logrus?status.png)][godoc] # Logrus <img src="http://i.imgur.com/hTeVwmJ.png" width="40" height="40" alt=":walrus:" class="emoji" title=":walrus:"/>&nbsp;[![Build Status](https://travis-ci.org/Sirupsen/logrus.svg?branch=master)](https://travis-ci.org/Sirupsen/logrus)&nbsp;[![GoDoc](https://godoc.org/github.com/Sirupsen/logrus?status.svg)](https://godoc.org/github.com/Sirupsen/logrus)
Logrus is a structured logger for Go (golang), completely API compatible with Logrus is a structured logger for Go (golang), completely API compatible with
the standard library logger. [Godoc][godoc]. **Please note the Logrus API is not the standard library logger. [Godoc][godoc]. **Please note the Logrus API is not
@ -12,7 +12,7 @@ plain text):
![Colored](http://i.imgur.com/PY7qMwd.png) ![Colored](http://i.imgur.com/PY7qMwd.png)
With `log.Formatter = new(logrus.JSONFormatter)`, for easy parsing by logstash With `log.SetFormatter(&log.JSONFormatter{})`, for easy parsing by logstash
or Splunk: or Splunk:
```json ```json
@ -32,7 +32,7 @@ ocean","size":10,"time":"2014-03-10 19:57:38.562264131 -0400 EDT"}
"time":"2014-03-10 19:57:38.562543128 -0400 EDT"} "time":"2014-03-10 19:57:38.562543128 -0400 EDT"}
``` ```
With the default `log.Formatter = new(&log.TextFormatter{})` when a TTY is not With the default `log.SetFormatter(&log.TextFormatter{})` when a TTY is not
attached, the output is compatible with the attached, the output is compatible with the
[logfmt](http://godoc.org/github.com/kr/logfmt) format: [logfmt](http://godoc.org/github.com/kr/logfmt) format:
@ -218,9 +218,22 @@ Note: Syslog hook also support connecting to local syslog (Ex. "/dev/log" or "/v
| [Rollrus](https://github.com/heroku/rollrus) | Hook for sending errors to rollbar | | [Rollrus](https://github.com/heroku/rollrus) | Hook for sending errors to rollbar |
| [Fluentd](https://github.com/evalphobia/logrus_fluent) | Hook for logging to fluentd | | [Fluentd](https://github.com/evalphobia/logrus_fluent) | Hook for logging to fluentd |
| [Mongodb](https://github.com/weekface/mgorus) | Hook for logging to mongodb | | [Mongodb](https://github.com/weekface/mgorus) | Hook for logging to mongodb |
| [Influxus] (http://github.com/vlad-doru/influxus) | Hook for concurrently logging to [InfluxDB] (http://influxdata.com/) |
| [InfluxDB](https://github.com/Abramovic/logrus_influxdb) | Hook for logging to influxdb | | [InfluxDB](https://github.com/Abramovic/logrus_influxdb) | Hook for logging to influxdb |
| [Octokit](https://github.com/dorajistyle/logrus-octokit-hook) | Hook for logging to github via octokit | | [Octokit](https://github.com/dorajistyle/logrus-octokit-hook) | Hook for logging to github via octokit |
| [DeferPanic](https://github.com/deferpanic/dp-logrus) | Hook for logging to DeferPanic | | [DeferPanic](https://github.com/deferpanic/dp-logrus) | Hook for logging to DeferPanic |
| [Redis-Hook](https://github.com/rogierlommers/logrus-redis-hook) | Hook for logging to a ELK stack (through Redis) |
| [Amqp-Hook](https://github.com/vladoatanasov/logrus_amqp) | Hook for logging to Amqp broker (Like RabbitMQ) |
| [KafkaLogrus](https://github.com/goibibo/KafkaLogrus) | Hook for logging to kafka |
| [Typetalk](https://github.com/dragon3/logrus-typetalk-hook) | Hook for logging to [Typetalk](https://www.typetalk.in/) |
| [ElasticSearch](https://github.com/sohlich/elogrus) | Hook for logging to ElasticSearch|
| [Sumorus](https://github.com/doublefree/sumorus) | Hook for logging to [SumoLogic](https://www.sumologic.com/)|
| [Scribe](https://github.com/sagar8192/logrus-scribe-hook) | Hook for logging to [Scribe](https://github.com/facebookarchive/scribe)|
| [Logstash](https://github.com/bshuster-repo/logrus-logstash-hook) | Hook for logging to [Logstash](https://www.elastic.co/products/logstash) |
| [logz.io](https://github.com/ripcurld00d/logrus-logzio-hook) | Hook for logging to [logz.io](https://logz.io), a Log as a Service using Logstash |
| [Logmatic.io](https://github.com/logmatic/logmatic-go) | Hook for logging to [Logmatic.io](http://logmatic.io/) |
| [Pushover](https://github.com/toorop/logrus_pushover) | Send error via [Pushover](https://pushover.net) |
#### Level logging #### Level logging
@ -298,14 +311,10 @@ The built-in logging formatters are:
field to `true`. To force no colored output even if there is a TTY set the field to `true`. To force no colored output even if there is a TTY set the
`DisableColors` field to `true` `DisableColors` field to `true`
* `logrus.JSONFormatter`. Logs fields as JSON. * `logrus.JSONFormatter`. Logs fields as JSON.
* `logrus/formatters/logstash.LogstashFormatter`. Logs fields as [Logstash](http://logstash.net) Events.
```go
logrus.SetFormatter(&logstash.LogstashFormatter{Type: "application_name"})
```
Third party logging formatters: Third party logging formatters:
* [`logstash`](https://github.com/bshuster-repo/logrus-logstash-hook). Logs fields as [Logstash](http://logstash.net) Events.
* [`prefixed`](https://github.com/x-cray/logrus-prefixed-formatter). Displays log entry source along with alternative layout. * [`prefixed`](https://github.com/x-cray/logrus-prefixed-formatter). Displays log entry source along with alternative layout.
* [`zalgo`](https://github.com/aybabtme/logzalgo). Invoking the P͉̫o̳̼̊w̖͈̰͎e̬͔̭͂r͚̼̹̲ ̫͓͉̳͈ō̠͕͖̚f̝͍̠ ͕̲̞͖͑Z̖̫̤̫ͪa͉̬͈̗l͖͎g̳̥o̰̥̅!̣͔̲̻͊̄ ̙̘̦̹̦. * [`zalgo`](https://github.com/aybabtme/logzalgo). Invoking the P͉̫o̳̼̊w̖͈̰͎e̬͔̭͂r͚̼̹̲ ̫͓͉̳͈ō̠͕͖̚f̝͍̠ ͕̲̞͖͑Z̖̫̤̫ͪa͉̬͈̗l͖͎g̳̥o̰̥̅!̣͔̲̻͊̄ ̙̘̦̹̦.
@ -361,5 +370,56 @@ entries. It should not be a feature of the application-level logger.
| Tool | Description | | Tool | Description |
| ---- | ----------- | | ---- | ----------- |
|[Logrus Mate](https://github.com/gogap/logrus_mate)|Logrus mate is a tool for Logrus to manage loggers, you can initial logger's level, hook and formatter by config file, the logger will generated with different config at different environment.| |[Logrus Mate](https://github.com/gogap/logrus_mate)|Logrus mate is a tool for Logrus to manage loggers, you can initial logger's level, hook and formatter by config file, the logger will generated with different config at different environment.|
|[Logrus Viper Helper](https://github.com/heirko/go-contrib/tree/master/logrusHelper)|An Helper arround Logrus to wrap with spf13/Viper to load configuration with fangs! And to simplify Logrus configuration use some behavior of [Logrus Mate](https://github.com/gogap/logrus_mate). [sample](https://github.com/heirko/iris-contrib/blob/master/middleware/logrus-logger/example) |
[godoc]: https://godoc.org/github.com/Sirupsen/logrus #### Testing
Logrus has a built in facility for asserting the presence of log messages. This is implemented through the `test` hook and provides:
* decorators for existing logger (`test.NewLocal` and `test.NewGlobal`) which basically just add the `test` hook
* a test logger (`test.NewNullLogger`) that just records log messages (and does not output any):
```go
logger, hook := NewNullLogger()
logger.Error("Hello error")
assert.Equal(1, len(hook.Entries))
assert.Equal(logrus.ErrorLevel, hook.LastEntry().Level)
assert.Equal("Hello error", hook.LastEntry().Message)
hook.Reset()
assert.Nil(hook.LastEntry())
```
#### Fatal handlers
Logrus can register one or more functions that will be called when any `fatal`
level message is logged. The registered handlers will be executed before
logrus performs a `os.Exit(1)`. This behavior may be helpful if callers need
to gracefully shutdown. Unlike a `panic("Something went wrong...")` call which can be intercepted with a deferred `recover` a call to `os.Exit(1)` can not be intercepted.
```
...
handler := func() {
// gracefully shutdown something...
}
logrus.RegisterExitHandler(handler)
...
```
#### Thread safty
By default Logger is protected by mutex for concurrent writes, this mutex is invoked when calling hooks and writing logs.
If you are sure such locking is not needed, you can call logger.SetNoLock() to disable the locking.
Situation when locking is not needed includes:
* You have no hooks registered, or hooks calling is already thread-safe.
* Writing to logger.Out is already thread-safe, for example:
1) logger.Out is protected by locks.
2) logger.Out is a os.File handler opened with `O_APPEND` flag, and every write is smaller than 4k. (This allow multi-thread/multi-process writing)
(Refer to http://www.notthewizard.com/2014/06/17/are-files-appends-really-atomic/)

64
vendor/github.com/Sirupsen/logrus/alt_exit.go generated vendored Normal file
View file

@ -0,0 +1,64 @@
package logrus
// The following code was sourced and modified from the
// https://bitbucket.org/tebeka/atexit package governed by the following license:
//
// Copyright (c) 2012 Miki Tebeka <miki.tebeka@gmail.com>.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy of
// this software and associated documentation files (the "Software"), to deal in
// the Software without restriction, including without limitation the rights to
// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
// the Software, and to permit persons to whom the Software is furnished to do so,
// subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
// FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
// COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
// IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
// CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import (
"fmt"
"os"
)
var handlers = []func(){}
func runHandler(handler func()) {
defer func() {
if err := recover(); err != nil {
fmt.Fprintln(os.Stderr, "Error: Logrus exit handler error:", err)
}
}()
handler()
}
func runHandlers() {
for _, handler := range handlers {
runHandler(handler)
}
}
// Exit runs all the Logrus atexit handlers and then terminates the program using os.Exit(code)
func Exit(code int) {
runHandlers()
os.Exit(code)
}
// RegisterExitHandler adds a Logrus Exit handler, call logrus.Exit to invoke
// all handlers. The handlers will also be invoked when any Fatal log entry is
// made.
//
// This method is useful when a caller wishes to use logrus to log a fatal
// message but also needs to gracefully shutdown. An example usecase could be
// closing database connections, or sending a alert that the application is
// closing.
func RegisterExitHandler(handler func()) {
handlers = append(handlers, handler)
}

View file

@ -3,11 +3,21 @@ package logrus
import ( import (
"bytes" "bytes"
"fmt" "fmt"
"io"
"os" "os"
"sync"
"time" "time"
) )
var bufferPool *sync.Pool
func init() {
bufferPool = &sync.Pool{
New: func() interface{} {
return new(bytes.Buffer)
},
}
}
// Defines the key when adding errors using WithError. // Defines the key when adding errors using WithError.
var ErrorKey = "error" var ErrorKey = "error"
@ -29,6 +39,9 @@ type Entry struct {
// Message passed to Debug, Info, Warn, Error, Fatal or Panic // Message passed to Debug, Info, Warn, Error, Fatal or Panic
Message string Message string
// When formatter is called in entry.log(), an Buffer may be set to entry
Buffer *bytes.Buffer
} }
func NewEntry(logger *Logger) *Entry { func NewEntry(logger *Logger) *Entry {
@ -39,21 +52,15 @@ func NewEntry(logger *Logger) *Entry {
} }
} }
// Returns a reader for the entry, which is a proxy to the formatter.
func (entry *Entry) Reader() (*bytes.Buffer, error) {
serialized, err := entry.Logger.Formatter.Format(entry)
return bytes.NewBuffer(serialized), err
}
// Returns the string representation from the reader and ultimately the // Returns the string representation from the reader and ultimately the
// formatter. // formatter.
func (entry *Entry) String() (string, error) { func (entry *Entry) String() (string, error) {
reader, err := entry.Reader() serialized, err := entry.Logger.Formatter.Format(entry)
if err != nil { if err != nil {
return "", err return "", err
} }
str := string(serialized)
return reader.String(), err return str, nil
} }
// Add an error as single field (using the key defined in ErrorKey) to the Entry. // Add an error as single field (using the key defined in ErrorKey) to the Entry.
@ -68,7 +75,7 @@ func (entry *Entry) WithField(key string, value interface{}) *Entry {
// Add a map of fields to the Entry. // Add a map of fields to the Entry.
func (entry *Entry) WithFields(fields Fields) *Entry { func (entry *Entry) WithFields(fields Fields) *Entry {
data := Fields{} data := make(Fields, len(entry.Data)+len(fields))
for k, v := range entry.Data { for k, v := range entry.Data {
data[k] = v data[k] = v
} }
@ -81,6 +88,7 @@ func (entry *Entry) WithFields(fields Fields) *Entry {
// This function is not declared with a pointer value because otherwise // This function is not declared with a pointer value because otherwise
// race conditions will occur when using multiple goroutines // race conditions will occur when using multiple goroutines
func (entry Entry) log(level Level, msg string) { func (entry Entry) log(level Level, msg string) {
var buffer *bytes.Buffer
entry.Time = time.Now() entry.Time = time.Now()
entry.Level = level entry.Level = level
entry.Message = msg entry.Message = msg
@ -90,20 +98,23 @@ func (entry Entry) log(level Level, msg string) {
fmt.Fprintf(os.Stderr, "Failed to fire hook: %v\n", err) fmt.Fprintf(os.Stderr, "Failed to fire hook: %v\n", err)
entry.Logger.mu.Unlock() entry.Logger.mu.Unlock()
} }
buffer = bufferPool.Get().(*bytes.Buffer)
reader, err := entry.Reader() buffer.Reset()
defer bufferPool.Put(buffer)
entry.Buffer = buffer
serialized, err := entry.Logger.Formatter.Format(&entry)
entry.Buffer = nil
if err != nil { if err != nil {
entry.Logger.mu.Lock() entry.Logger.mu.Lock()
fmt.Fprintf(os.Stderr, "Failed to obtain reader, %v\n", err) fmt.Fprintf(os.Stderr, "Failed to obtain reader, %v\n", err)
entry.Logger.mu.Unlock() entry.Logger.mu.Unlock()
} } else {
entry.Logger.mu.Lock()
entry.Logger.mu.Lock() _, err = entry.Logger.Out.Write(serialized)
defer entry.Logger.mu.Unlock() if err != nil {
fmt.Fprintf(os.Stderr, "Failed to write to log, %v\n", err)
_, err = io.Copy(entry.Logger.Out, reader) }
if err != nil { entry.Logger.mu.Unlock()
fmt.Fprintf(os.Stderr, "Failed to write to log, %v\n", err)
} }
// To avoid Entry#log() returning a value that only would make sense for // To avoid Entry#log() returning a value that only would make sense for
@ -150,7 +161,7 @@ func (entry *Entry) Fatal(args ...interface{}) {
if entry.Logger.Level >= FatalLevel { if entry.Logger.Level >= FatalLevel {
entry.log(FatalLevel, fmt.Sprint(args...)) entry.log(FatalLevel, fmt.Sprint(args...))
} }
os.Exit(1) Exit(1)
} }
func (entry *Entry) Panic(args ...interface{}) { func (entry *Entry) Panic(args ...interface{}) {
@ -198,7 +209,7 @@ func (entry *Entry) Fatalf(format string, args ...interface{}) {
if entry.Logger.Level >= FatalLevel { if entry.Logger.Level >= FatalLevel {
entry.Fatal(fmt.Sprintf(format, args...)) entry.Fatal(fmt.Sprintf(format, args...))
} }
os.Exit(1) Exit(1)
} }
func (entry *Entry) Panicf(format string, args ...interface{}) { func (entry *Entry) Panicf(format string, args ...interface{}) {
@ -245,7 +256,7 @@ func (entry *Entry) Fatalln(args ...interface{}) {
if entry.Logger.Level >= FatalLevel { if entry.Logger.Level >= FatalLevel {
entry.Fatal(entry.sprintlnn(args...)) entry.Fatal(entry.sprintlnn(args...))
} }
os.Exit(1) Exit(1)
} }
func (entry *Entry) Panicln(args ...interface{}) { func (entry *Entry) Panicln(args ...interface{}) {

View file

@ -31,18 +31,15 @@ type Formatter interface {
// It's not exported because it's still using Data in an opinionated way. It's to // It's not exported because it's still using Data in an opinionated way. It's to
// avoid code duplication between the two default formatters. // avoid code duplication between the two default formatters.
func prefixFieldClashes(data Fields) { func prefixFieldClashes(data Fields) {
_, ok := data["time"] if t, ok := data["time"]; ok {
if ok { data["fields.time"] = t
data["fields.time"] = data["time"]
} }
_, ok = data["msg"] if m, ok := data["msg"]; ok {
if ok { data["fields.msg"] = m
data["fields.msg"] = data["msg"]
} }
_, ok = data["level"] if l, ok := data["level"]; ok {
if ok { data["fields.level"] = l
data["fields.level"] = data["level"]
} }
} }

View file

@ -26,8 +26,31 @@ type Logger struct {
// to) `logrus.Info`, which allows Info(), Warn(), Error() and Fatal() to be // to) `logrus.Info`, which allows Info(), Warn(), Error() and Fatal() to be
// logged. `logrus.Debug` is useful in // logged. `logrus.Debug` is useful in
Level Level Level Level
// Used to sync writing to the log. // Used to sync writing to the log. Locking is enabled by Default
mu sync.Mutex mu MutexWrap
// Reusable empty entry
entryPool sync.Pool
}
type MutexWrap struct {
lock sync.Mutex
disabled bool
}
func (mw *MutexWrap) Lock() {
if !mw.disabled {
mw.lock.Lock()
}
}
func (mw *MutexWrap) Unlock() {
if !mw.disabled {
mw.lock.Unlock()
}
}
func (mw *MutexWrap) Disable() {
mw.disabled = true
} }
// Creates a new logger. Configuration should be set by changing `Formatter`, // Creates a new logger. Configuration should be set by changing `Formatter`,
@ -51,162 +74,235 @@ func New() *Logger {
} }
} }
// Adds a field to the log entry, note that you it doesn't log until you call func (logger *Logger) newEntry() *Entry {
entry, ok := logger.entryPool.Get().(*Entry)
if ok {
return entry
}
return NewEntry(logger)
}
func (logger *Logger) releaseEntry(entry *Entry) {
logger.entryPool.Put(entry)
}
// Adds a field to the log entry, note that it doesn't log until you call
// Debug, Print, Info, Warn, Fatal or Panic. It only creates a log entry. // Debug, Print, Info, Warn, Fatal or Panic. It only creates a log entry.
// If you want multiple fields, use `WithFields`. // If you want multiple fields, use `WithFields`.
func (logger *Logger) WithField(key string, value interface{}) *Entry { func (logger *Logger) WithField(key string, value interface{}) *Entry {
return NewEntry(logger).WithField(key, value) entry := logger.newEntry()
defer logger.releaseEntry(entry)
return entry.WithField(key, value)
} }
// Adds a struct of fields to the log entry. All it does is call `WithField` for // Adds a struct of fields to the log entry. All it does is call `WithField` for
// each `Field`. // each `Field`.
func (logger *Logger) WithFields(fields Fields) *Entry { func (logger *Logger) WithFields(fields Fields) *Entry {
return NewEntry(logger).WithFields(fields) entry := logger.newEntry()
defer logger.releaseEntry(entry)
return entry.WithFields(fields)
} }
// Add an error as single field to the log entry. All it does is call // Add an error as single field to the log entry. All it does is call
// `WithError` for the given `error`. // `WithError` for the given `error`.
func (logger *Logger) WithError(err error) *Entry { func (logger *Logger) WithError(err error) *Entry {
return NewEntry(logger).WithError(err) entry := logger.newEntry()
defer logger.releaseEntry(entry)
return entry.WithError(err)
} }
func (logger *Logger) Debugf(format string, args ...interface{}) { func (logger *Logger) Debugf(format string, args ...interface{}) {
if logger.Level >= DebugLevel { if logger.Level >= DebugLevel {
NewEntry(logger).Debugf(format, args...) entry := logger.newEntry()
entry.Debugf(format, args...)
logger.releaseEntry(entry)
} }
} }
func (logger *Logger) Infof(format string, args ...interface{}) { func (logger *Logger) Infof(format string, args ...interface{}) {
if logger.Level >= InfoLevel { if logger.Level >= InfoLevel {
NewEntry(logger).Infof(format, args...) entry := logger.newEntry()
entry.Infof(format, args...)
logger.releaseEntry(entry)
} }
} }
func (logger *Logger) Printf(format string, args ...interface{}) { func (logger *Logger) Printf(format string, args ...interface{}) {
NewEntry(logger).Printf(format, args...) entry := logger.newEntry()
entry.Printf(format, args...)
logger.releaseEntry(entry)
} }
func (logger *Logger) Warnf(format string, args ...interface{}) { func (logger *Logger) Warnf(format string, args ...interface{}) {
if logger.Level >= WarnLevel { if logger.Level >= WarnLevel {
NewEntry(logger).Warnf(format, args...) entry := logger.newEntry()
entry.Warnf(format, args...)
logger.releaseEntry(entry)
} }
} }
func (logger *Logger) Warningf(format string, args ...interface{}) { func (logger *Logger) Warningf(format string, args ...interface{}) {
if logger.Level >= WarnLevel { if logger.Level >= WarnLevel {
NewEntry(logger).Warnf(format, args...) entry := logger.newEntry()
entry.Warnf(format, args...)
logger.releaseEntry(entry)
} }
} }
func (logger *Logger) Errorf(format string, args ...interface{}) { func (logger *Logger) Errorf(format string, args ...interface{}) {
if logger.Level >= ErrorLevel { if logger.Level >= ErrorLevel {
NewEntry(logger).Errorf(format, args...) entry := logger.newEntry()
entry.Errorf(format, args...)
logger.releaseEntry(entry)
} }
} }
func (logger *Logger) Fatalf(format string, args ...interface{}) { func (logger *Logger) Fatalf(format string, args ...interface{}) {
if logger.Level >= FatalLevel { if logger.Level >= FatalLevel {
NewEntry(logger).Fatalf(format, args...) entry := logger.newEntry()
entry.Fatalf(format, args...)
logger.releaseEntry(entry)
} }
os.Exit(1) Exit(1)
} }
func (logger *Logger) Panicf(format string, args ...interface{}) { func (logger *Logger) Panicf(format string, args ...interface{}) {
if logger.Level >= PanicLevel { if logger.Level >= PanicLevel {
NewEntry(logger).Panicf(format, args...) entry := logger.newEntry()
entry.Panicf(format, args...)
logger.releaseEntry(entry)
} }
} }
func (logger *Logger) Debug(args ...interface{}) { func (logger *Logger) Debug(args ...interface{}) {
if logger.Level >= DebugLevel { if logger.Level >= DebugLevel {
NewEntry(logger).Debug(args...) entry := logger.newEntry()
entry.Debug(args...)
logger.releaseEntry(entry)
} }
} }
func (logger *Logger) Info(args ...interface{}) { func (logger *Logger) Info(args ...interface{}) {
if logger.Level >= InfoLevel { if logger.Level >= InfoLevel {
NewEntry(logger).Info(args...) entry := logger.newEntry()
entry.Info(args...)
logger.releaseEntry(entry)
} }
} }
func (logger *Logger) Print(args ...interface{}) { func (logger *Logger) Print(args ...interface{}) {
NewEntry(logger).Info(args...) entry := logger.newEntry()
entry.Info(args...)
logger.releaseEntry(entry)
} }
func (logger *Logger) Warn(args ...interface{}) { func (logger *Logger) Warn(args ...interface{}) {
if logger.Level >= WarnLevel { if logger.Level >= WarnLevel {
NewEntry(logger).Warn(args...) entry := logger.newEntry()
entry.Warn(args...)
logger.releaseEntry(entry)
} }
} }
func (logger *Logger) Warning(args ...interface{}) { func (logger *Logger) Warning(args ...interface{}) {
if logger.Level >= WarnLevel { if logger.Level >= WarnLevel {
NewEntry(logger).Warn(args...) entry := logger.newEntry()
entry.Warn(args...)
logger.releaseEntry(entry)
} }
} }
func (logger *Logger) Error(args ...interface{}) { func (logger *Logger) Error(args ...interface{}) {
if logger.Level >= ErrorLevel { if logger.Level >= ErrorLevel {
NewEntry(logger).Error(args...) entry := logger.newEntry()
entry.Error(args...)
logger.releaseEntry(entry)
} }
} }
func (logger *Logger) Fatal(args ...interface{}) { func (logger *Logger) Fatal(args ...interface{}) {
if logger.Level >= FatalLevel { if logger.Level >= FatalLevel {
NewEntry(logger).Fatal(args...) entry := logger.newEntry()
entry.Fatal(args...)
logger.releaseEntry(entry)
} }
os.Exit(1) Exit(1)
} }
func (logger *Logger) Panic(args ...interface{}) { func (logger *Logger) Panic(args ...interface{}) {
if logger.Level >= PanicLevel { if logger.Level >= PanicLevel {
NewEntry(logger).Panic(args...) entry := logger.newEntry()
entry.Panic(args...)
logger.releaseEntry(entry)
} }
} }
func (logger *Logger) Debugln(args ...interface{}) { func (logger *Logger) Debugln(args ...interface{}) {
if logger.Level >= DebugLevel { if logger.Level >= DebugLevel {
NewEntry(logger).Debugln(args...) entry := logger.newEntry()
entry.Debugln(args...)
logger.releaseEntry(entry)
} }
} }
func (logger *Logger) Infoln(args ...interface{}) { func (logger *Logger) Infoln(args ...interface{}) {
if logger.Level >= InfoLevel { if logger.Level >= InfoLevel {
NewEntry(logger).Infoln(args...) entry := logger.newEntry()
entry.Infoln(args...)
logger.releaseEntry(entry)
} }
} }
func (logger *Logger) Println(args ...interface{}) { func (logger *Logger) Println(args ...interface{}) {
NewEntry(logger).Println(args...) entry := logger.newEntry()
entry.Println(args...)
logger.releaseEntry(entry)
} }
func (logger *Logger) Warnln(args ...interface{}) { func (logger *Logger) Warnln(args ...interface{}) {
if logger.Level >= WarnLevel { if logger.Level >= WarnLevel {
NewEntry(logger).Warnln(args...) entry := logger.newEntry()
entry.Warnln(args...)
logger.releaseEntry(entry)
} }
} }
func (logger *Logger) Warningln(args ...interface{}) { func (logger *Logger) Warningln(args ...interface{}) {
if logger.Level >= WarnLevel { if logger.Level >= WarnLevel {
NewEntry(logger).Warnln(args...) entry := logger.newEntry()
entry.Warnln(args...)
logger.releaseEntry(entry)
} }
} }
func (logger *Logger) Errorln(args ...interface{}) { func (logger *Logger) Errorln(args ...interface{}) {
if logger.Level >= ErrorLevel { if logger.Level >= ErrorLevel {
NewEntry(logger).Errorln(args...) entry := logger.newEntry()
entry.Errorln(args...)
logger.releaseEntry(entry)
} }
} }
func (logger *Logger) Fatalln(args ...interface{}) { func (logger *Logger) Fatalln(args ...interface{}) {
if logger.Level >= FatalLevel { if logger.Level >= FatalLevel {
NewEntry(logger).Fatalln(args...) entry := logger.newEntry()
entry.Fatalln(args...)
logger.releaseEntry(entry)
} }
os.Exit(1) Exit(1)
} }
func (logger *Logger) Panicln(args ...interface{}) { func (logger *Logger) Panicln(args ...interface{}) {
if logger.Level >= PanicLevel { if logger.Level >= PanicLevel {
NewEntry(logger).Panicln(args...) entry := logger.newEntry()
entry.Panicln(args...)
logger.releaseEntry(entry)
} }
} }
//When file is opened with appending mode, it's safe to
//write concurrently to a file (within 4k message on Linux).
//In these cases user can choose to disable the lock.
func (logger *Logger) SetNoLock() {
logger.mu.Disable()
}

View file

@ -3,6 +3,7 @@ package logrus
import ( import (
"fmt" "fmt"
"log" "log"
"strings"
) )
// Fields type, used to pass to `WithFields`. // Fields type, used to pass to `WithFields`.
@ -33,7 +34,7 @@ func (level Level) String() string {
// ParseLevel takes a string level and returns the Logrus log level constant. // ParseLevel takes a string level and returns the Logrus log level constant.
func ParseLevel(lvl string) (Level, error) { func ParseLevel(lvl string) (Level, error) {
switch lvl { switch strings.ToLower(lvl) {
case "panic": case "panic":
return PanicLevel, nil return PanicLevel, nil
case "fatal": case "fatal":
@ -52,6 +53,16 @@ func ParseLevel(lvl string) (Level, error) {
return l, fmt.Errorf("not a valid logrus Level: %q", lvl) return l, fmt.Errorf("not a valid logrus Level: %q", lvl)
} }
// A constant exposing all logging levels
var AllLevels = []Level{
PanicLevel,
FatalLevel,
ErrorLevel,
WarnLevel,
InfoLevel,
DebugLevel,
}
// These are the different logging levels. You can set the logging level to log // These are the different logging levels. You can set the logging level to log
// on your instance of logger, obtained with `logrus.New()`. // on your instance of logger, obtained with `logrus.New()`.
const ( const (
@ -96,3 +107,37 @@ type StdLogger interface {
Panicf(string, ...interface{}) Panicf(string, ...interface{})
Panicln(...interface{}) Panicln(...interface{})
} }
// The FieldLogger interface generalizes the Entry and Logger types
type FieldLogger interface {
WithField(key string, value interface{}) *Entry
WithFields(fields Fields) *Entry
WithError(err error) *Entry
Debugf(format string, args ...interface{})
Infof(format string, args ...interface{})
Printf(format string, args ...interface{})
Warnf(format string, args ...interface{})
Warningf(format string, args ...interface{})
Errorf(format string, args ...interface{})
Fatalf(format string, args ...interface{})
Panicf(format string, args ...interface{})
Debug(args ...interface{})
Info(args ...interface{})
Print(args ...interface{})
Warn(args ...interface{})
Warning(args ...interface{})
Error(args ...interface{})
Fatal(args ...interface{})
Panic(args ...interface{})
Debugln(args ...interface{})
Infoln(args ...interface{})
Println(args ...interface{})
Warnln(args ...interface{})
Warningln(args ...interface{})
Errorln(args ...interface{})
Fatalln(args ...interface{})
Panicln(args ...interface{})
}

View file

@ -0,0 +1,8 @@
// +build appengine
package logrus
// IsTerminal returns true if stderr's file descriptor is a terminal.
func IsTerminal() bool {
return true
}

View file

@ -1,4 +1,5 @@
// +build darwin freebsd openbsd netbsd dragonfly // +build darwin freebsd openbsd netbsd dragonfly
// +build !appengine
package logrus package logrus

View file

@ -3,6 +3,8 @@
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.
// +build !appengine
package logrus package logrus
import "syscall" import "syscall"

View file

@ -4,6 +4,7 @@
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.
// +build linux darwin freebsd openbsd netbsd dragonfly // +build linux darwin freebsd openbsd netbsd dragonfly
// +build !appengine
package logrus package logrus

View file

@ -1,4 +1,4 @@
// +build solaris // +build solaris,!appengine
package logrus package logrus

View file

@ -3,7 +3,7 @@
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.
// +build windows // +build windows,!appengine
package logrus package logrus

View file

@ -57,6 +57,7 @@ type TextFormatter struct {
} }
func (f *TextFormatter) Format(entry *Entry) ([]byte, error) { func (f *TextFormatter) Format(entry *Entry) ([]byte, error) {
var b *bytes.Buffer
var keys []string = make([]string, 0, len(entry.Data)) var keys []string = make([]string, 0, len(entry.Data))
for k := range entry.Data { for k := range entry.Data {
keys = append(keys, k) keys = append(keys, k)
@ -65,8 +66,11 @@ func (f *TextFormatter) Format(entry *Entry) ([]byte, error) {
if !f.DisableSorting { if !f.DisableSorting {
sort.Strings(keys) sort.Strings(keys)
} }
if entry.Buffer != nil {
b := &bytes.Buffer{} b = entry.Buffer
} else {
b = &bytes.Buffer{}
}
prefixFieldClashes(entry.Data) prefixFieldClashes(entry.Data)
@ -118,7 +122,8 @@ func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []strin
} }
for _, k := range keys { for _, k := range keys {
v := entry.Data[k] v := entry.Data[k]
fmt.Fprintf(b, " \x1b[%dm%s\x1b[0m=%+v", levelColor, k, v) fmt.Fprintf(b, " \x1b[%dm%s\x1b[0m=", levelColor, k)
f.appendValue(b, v)
} }
} }
@ -128,34 +133,36 @@ func needsQuoting(text string) bool {
(ch >= 'A' && ch <= 'Z') || (ch >= 'A' && ch <= 'Z') ||
(ch >= '0' && ch <= '9') || (ch >= '0' && ch <= '9') ||
ch == '-' || ch == '.') { ch == '-' || ch == '.') {
return false return true
} }
} }
return true return false
} }
func (f *TextFormatter) appendKeyValue(b *bytes.Buffer, key string, value interface{}) { func (f *TextFormatter) appendKeyValue(b *bytes.Buffer, key string, value interface{}) {
b.WriteString(key) b.WriteString(key)
b.WriteByte('=') b.WriteByte('=')
f.appendValue(b, value)
b.WriteByte(' ')
}
func (f *TextFormatter) appendValue(b *bytes.Buffer, value interface{}) {
switch value := value.(type) { switch value := value.(type) {
case string: case string:
if needsQuoting(value) { if !needsQuoting(value) {
b.WriteString(value) b.WriteString(value)
} else { } else {
fmt.Fprintf(b, "%q", value) fmt.Fprintf(b, "%q", value)
} }
case error: case error:
errmsg := value.Error() errmsg := value.Error()
if needsQuoting(errmsg) { if !needsQuoting(errmsg) {
b.WriteString(errmsg) b.WriteString(errmsg)
} else { } else {
fmt.Fprintf(b, "%q", value) fmt.Fprintf(b, "%q", errmsg)
} }
default: default:
fmt.Fprint(b, value) fmt.Fprint(b, value)
} }
b.WriteByte(' ')
} }

View file

@ -7,18 +7,40 @@ import (
) )
func (logger *Logger) Writer() *io.PipeWriter { func (logger *Logger) Writer() *io.PipeWriter {
return logger.WriterLevel(InfoLevel)
}
func (logger *Logger) WriterLevel(level Level) *io.PipeWriter {
reader, writer := io.Pipe() reader, writer := io.Pipe()
go logger.writerScanner(reader) var printFunc func(args ...interface{})
switch level {
case DebugLevel:
printFunc = logger.Debug
case InfoLevel:
printFunc = logger.Info
case WarnLevel:
printFunc = logger.Warn
case ErrorLevel:
printFunc = logger.Error
case FatalLevel:
printFunc = logger.Fatal
case PanicLevel:
printFunc = logger.Panic
default:
printFunc = logger.Print
}
go logger.writerScanner(reader, printFunc)
runtime.SetFinalizer(writer, writerFinalizer) runtime.SetFinalizer(writer, writerFinalizer)
return writer return writer
} }
func (logger *Logger) writerScanner(reader *io.PipeReader) { func (logger *Logger) writerScanner(reader *io.PipeReader, printFunc func(args ...interface{})) {
scanner := bufio.NewScanner(reader) scanner := bufio.NewScanner(reader)
for scanner.Scan() { for scanner.Scan() {
logger.Print(scanner.Text()) printFunc(scanner.Text())
} }
if err := scanner.Err(); err != nil { if err := scanner.Err(); err != nil {
logger.Errorf("Error while reading from Writer: %s", err) logger.Errorf("Error while reading from Writer: %s", err)

100
vendor/github.com/beevik/ntp/ntp.go generated vendored
View file

@ -16,7 +16,7 @@ import (
"time" "time"
) )
type mode byte type mode uint8
const ( const (
reserved mode = 0 + iota reserved mode = 0 + iota
@ -39,40 +39,53 @@ var (
ntpEpoch = time.Date(1900, 1, 1, 0, 0, 0, 0, time.UTC) ntpEpoch = time.Date(1900, 1, 1, 0, 0, 0, 0, time.UTC)
) )
type ntpTime struct { // An ntpTime is a 64-bit fixed-point (Q32.32) representation of the number of
Seconds uint32 // seconds elapsed since the NTP epoch.
Fraction uint32 type ntpTime uint64
// Duration interprets the fixed-point ntpTime as a number of elapsed seconds
// and returns the corresponding time.Duration value.
func (t ntpTime) Duration() time.Duration {
sec := (t >> 32) * nanoPerSec
frac := (t & 0xffffffff) * nanoPerSec >> 32
return time.Duration(sec + frac)
} }
// Time interprets the fixed-point ntpTime as a an absolute time and returns
// the corresponding time.Time value.
func (t ntpTime) Time() time.Time { func (t ntpTime) Time() time.Time {
return ntpEpoch.Add(t.sinceEpoch()) return ntpEpoch.Add(t.Duration())
} }
// sinceEpoch converts the ntpTime record t into a duration since the NTP // toNtpTime converts the time.Time value t into its 64-bit fixed-point
// epoch time (Jan 1, 1900). // ntpTime representation.
func (t ntpTime) sinceEpoch() time.Duration {
sec := time.Duration(t.Seconds) * time.Second
frac := time.Duration(uint64(t.Fraction) * nanoPerSec >> 32)
return sec + frac
}
// toNtpTime converts the time value t into an ntpTime representation.
func toNtpTime(t time.Time) ntpTime { func toNtpTime(t time.Time) ntpTime {
nsec := uint64(t.Sub(ntpEpoch)) nsec := uint64(t.Sub(ntpEpoch))
return ntpTime{ sec := nsec / nanoPerSec
Seconds: uint32(nsec / nanoPerSec), frac := (nsec - sec*nanoPerSec) << 32 / nanoPerSec
Fraction: uint32((nsec % nanoPerSec) << 32 / nanoPerSec), return ntpTime(sec<<32 | frac)
} }
// An ntpTimeShort is a 32-bit fixed-point (Q16.16) representation of the
// number of seconds elapsed since the NTP epoch.
type ntpTimeShort uint32
// Duration interprets the fixed-point ntpTimeShort as a number of elapsed
// seconds and returns the corresponding time.Duration value.
func (t ntpTimeShort) Duration() time.Duration {
sec := (t >> 16) * nanoPerSec
frac := (t & 0xffff) * nanoPerSec >> 16
return time.Duration(sec + frac)
} }
// msg is an internal representation of an NTP packet. // msg is an internal representation of an NTP packet.
type msg struct { type msg struct {
LiVnMode byte // Leap Indicator (2) + Version (3) + Mode (3) LiVnMode uint8 // Leap Indicator (2) + Version (3) + Mode (3)
Stratum byte Stratum uint8
Poll byte Poll int8
Precision byte Precision int8
RootDelay uint32 RootDelay ntpTimeShort
RootDispersion uint32 RootDispersion ntpTimeShort
ReferenceID uint32 ReferenceID uint32
ReferenceTime ntpTime ReferenceTime ntpTime
OriginTime ntpTime OriginTime ntpTime
@ -87,16 +100,21 @@ func (m *msg) setVersion(v int) {
// setMode sets the NTP protocol mode on the message. // setMode sets the NTP protocol mode on the message.
func (m *msg) setMode(md mode) { func (m *msg) setMode(md mode) {
m.LiVnMode = (m.LiVnMode & 0xf8) | byte(md) m.LiVnMode = (m.LiVnMode & 0xf8) | uint8(md)
} }
// A Response contains time data, some of which is returned by the NTP server // A Response contains time data, some of which is returned by the NTP server
// and some of which is calculated by the client. // and some of which is calculated by the client.
type Response struct { type Response struct {
Time time.Time // receive time reported by the server Time time.Time // receive time reported by the server
RTT time.Duration // round-trip time between client and server RTT time.Duration // round-trip time between client and server
ClockOffset time.Duration // local clock offset relative to server ClockOffset time.Duration // local clock offset relative to server
Stratum uint8 // stratum level of NTP server's clock Poll time.Duration // maximum polling interval
Precision time.Duration // precision of server's system clock
Stratum uint8 // stratum level of NTP server's clock
ReferenceID uint32 // server's reference ID
RootDelay time.Duration // server's RTT to the reference clock
RootDispersion time.Duration // server's dispersion to the reference clock
} }
// Query returns information from the remote NTP server specifed as host. NTP // Query returns information from the remote NTP server specifed as host. NTP
@ -109,10 +127,15 @@ func Query(host string, version int) (*Response, error) {
} }
r := &Response{ r := &Response{
Time: m.ReceiveTime.Time(), Time: m.ReceiveTime.Time(),
RTT: rtt(m.OriginTime, m.ReceiveTime, m.TransmitTime, now), RTT: rtt(m.OriginTime, m.ReceiveTime, m.TransmitTime, now),
ClockOffset: offset(m.OriginTime, m.ReceiveTime, m.TransmitTime, now), ClockOffset: offset(m.OriginTime, m.ReceiveTime, m.TransmitTime, now),
Stratum: m.Stratum, Poll: toInterval(m.Poll),
Precision: toInterval(m.Precision),
Stratum: m.Stratum,
ReferenceID: m.ReferenceID,
RootDelay: m.RootDelay.Duration(),
RootDispersion: m.RootDispersion.Duration(),
} }
// https://tools.ietf.org/html/rfc5905#section-7.3 // https://tools.ietf.org/html/rfc5905#section-7.3
@ -204,3 +227,14 @@ func offset(t1, t2, t3, t4 ntpTime) time.Duration {
b := t3.Time().Sub(t4.Time()) b := t3.Time().Sub(t4.Time())
return (a + b) / time.Duration(2) return (a + b) / time.Duration(2)
} }
func toInterval(t int8) time.Duration {
switch {
case t > 0:
return time.Duration(uint64(time.Second) << uint(t))
case t < 0:
return time.Duration(uint64(time.Second) >> uint(-t))
default:
return time.Second
}
}

20
vendor/github.com/beorn7/perks/LICENSE generated vendored Normal file
View file

@ -0,0 +1,20 @@
Copyright (C) 2013 Blake Mizerany
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

View file

@ -133,7 +133,7 @@ func (s *Stream) Query(q float64) float64 {
if l == 0 { if l == 0 {
return 0 return 0
} }
i := int(float64(l) * q) i := int(math.Ceil(float64(l) * q))
if i > 0 { if i > 0 {
i -= 1 i -= 1
} }

View file

@ -83,10 +83,20 @@ type Conn struct {
} }
} }
// New establishes a connection to the system bus and authenticates. // New establishes a connection to any available bus and authenticates.
// Callers should call Close() when done with the connection. // Callers should call Close() when done with the connection.
func New() (*Conn, error) { func New() (*Conn, error) {
return newConnection(func() (*dbus.Conn, error) { conn, err := NewSystemConnection()
if err != nil && os.Geteuid() == 0 {
return NewSystemdConnection()
}
return conn, err
}
// NewSystemConnection establishes a connection to the system bus and authenticates.
// Callers should call Close() when done with the connection
func NewSystemConnection() (*Conn, error) {
return NewConnection(func() (*dbus.Conn, error) {
return dbusAuthHelloConnection(dbus.SystemBusPrivate) return dbusAuthHelloConnection(dbus.SystemBusPrivate)
}) })
} }
@ -95,7 +105,7 @@ func New() (*Conn, error) {
// authenticates. This can be used to connect to systemd user instances. // authenticates. This can be used to connect to systemd user instances.
// Callers should call Close() when done with the connection. // Callers should call Close() when done with the connection.
func NewUserConnection() (*Conn, error) { func NewUserConnection() (*Conn, error) {
return newConnection(func() (*dbus.Conn, error) { return NewConnection(func() (*dbus.Conn, error) {
return dbusAuthHelloConnection(dbus.SessionBusPrivate) return dbusAuthHelloConnection(dbus.SessionBusPrivate)
}) })
} }
@ -104,7 +114,7 @@ func NewUserConnection() (*Conn, error) {
// This can be used for communicating with systemd without a dbus daemon. // This can be used for communicating with systemd without a dbus daemon.
// Callers should call Close() when done with the connection. // Callers should call Close() when done with the connection.
func NewSystemdConnection() (*Conn, error) { func NewSystemdConnection() (*Conn, error) {
return newConnection(func() (*dbus.Conn, error) { return NewConnection(func() (*dbus.Conn, error) {
// We skip Hello when talking directly to systemd. // We skip Hello when talking directly to systemd.
return dbusAuthConnection(func() (*dbus.Conn, error) { return dbusAuthConnection(func() (*dbus.Conn, error) {
return dbus.Dial("unix:path=/run/systemd/private") return dbus.Dial("unix:path=/run/systemd/private")
@ -118,13 +128,18 @@ func (c *Conn) Close() {
c.sigconn.Close() c.sigconn.Close()
} }
func newConnection(createBus func() (*dbus.Conn, error)) (*Conn, error) { // NewConnection establishes a connection to a bus using a caller-supplied function.
sysconn, err := createBus() // This allows connecting to remote buses through a user-supplied mechanism.
// The supplied function may be called multiple times, and should return independent connections.
// The returned connection must be fully initialised: the org.freedesktop.DBus.Hello call must have succeeded,
// and any authentication should be handled by the function.
func NewConnection(dialBus func() (*dbus.Conn, error)) (*Conn, error) {
sysconn, err := dialBus()
if err != nil { if err != nil {
return nil, err return nil, err
} }
sigconn, err := createBus() sigconn, err := dialBus()
if err != nil { if err != nil {
sysconn.Close() sysconn.Close()
return nil, err return nil, err

View file

@ -199,6 +199,11 @@ func (c *Conn) GetUnitProperty(unit string, propertyName string) (*Property, err
return c.getProperty(unit, "org.freedesktop.systemd1.Unit", propertyName) return c.getProperty(unit, "org.freedesktop.systemd1.Unit", propertyName)
} }
// GetServiceProperty returns property for given service name and property name
func (c *Conn) GetServiceProperty(service string, propertyName string) (*Property, error) {
return c.getProperty(service, "org.freedesktop.systemd1.Service", propertyName)
}
// GetUnitTypeProperties returns the extra properties for a unit, specific to the unit type. // GetUnitTypeProperties returns the extra properties for a unit, specific to the unit type.
// Valid values for unitType: Service, Socket, Target, Device, Mount, Automount, Snapshot, Timer, Swap, Path, Slice, Scope // Valid values for unitType: Service, Socket, Target, Device, Mount, Automount, Snapshot, Timer, Swap, Path, Slice, Scope
// return "dbus.Error: Unknown interface" if the unitType is not the correct type of the unit // return "dbus.Error: Unknown interface" if the unitType is not the correct type of the unit
@ -234,12 +239,11 @@ type UnitStatus struct {
JobPath dbus.ObjectPath // The job object path JobPath dbus.ObjectPath // The job object path
} }
// ListUnits returns an array with all currently loaded units. Note that type storeFunc func(retvalues ...interface{}) error
// units may be known by multiple names at the same time, and hence there might
// be more unit names loaded than actual units behind them. func (c *Conn) listUnitsInternal(f storeFunc) ([]UnitStatus, error) {
func (c *Conn) ListUnits() ([]UnitStatus, error) {
result := make([][]interface{}, 0) result := make([][]interface{}, 0)
err := c.sysobj.Call("org.freedesktop.systemd1.Manager.ListUnits", 0).Store(&result) err := f(&result)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -263,15 +267,43 @@ func (c *Conn) ListUnits() ([]UnitStatus, error) {
return status, nil return status, nil
} }
// ListUnits returns an array with all currently loaded units. Note that
// units may be known by multiple names at the same time, and hence there might
// be more unit names loaded than actual units behind them.
func (c *Conn) ListUnits() ([]UnitStatus, error) {
return c.listUnitsInternal(c.sysobj.Call("org.freedesktop.systemd1.Manager.ListUnits", 0).Store)
}
// ListUnitsFiltered returns an array with units filtered by state.
// It takes a list of units' statuses to filter.
func (c *Conn) ListUnitsFiltered(states []string) ([]UnitStatus, error) {
return c.listUnitsInternal(c.sysobj.Call("org.freedesktop.systemd1.Manager.ListUnitsFiltered", 0, states).Store)
}
// ListUnitsByPatterns returns an array with units.
// It takes a list of units' statuses and names to filter.
// Note that units may be known by multiple names at the same time,
// and hence there might be more unit names loaded than actual units behind them.
func (c *Conn) ListUnitsByPatterns(states []string, patterns []string) ([]UnitStatus, error) {
return c.listUnitsInternal(c.sysobj.Call("org.freedesktop.systemd1.Manager.ListUnitsByPatterns", 0, states, patterns).Store)
}
// ListUnitsByNames returns an array with units. It takes a list of units'
// names and returns an UnitStatus array. Comparing to ListUnitsByPatterns
// method, this method returns statuses even for inactive or non-existing
// units. Input array should contain exact unit names, but not patterns.
func (c *Conn) ListUnitsByNames(units []string) ([]UnitStatus, error) {
return c.listUnitsInternal(c.sysobj.Call("org.freedesktop.systemd1.Manager.ListUnitsByNames", 0, units).Store)
}
type UnitFile struct { type UnitFile struct {
Path string Path string
Type string Type string
} }
// ListUnitFiles returns an array of all available units on disk. func (c *Conn) listUnitFilesInternal(f storeFunc) ([]UnitFile, error) {
func (c *Conn) ListUnitFiles() ([]UnitFile, error) {
result := make([][]interface{}, 0) result := make([][]interface{}, 0)
err := c.sysobj.Call("org.freedesktop.systemd1.Manager.ListUnitFiles", 0).Store(&result) err := f(&result)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -295,6 +327,16 @@ func (c *Conn) ListUnitFiles() ([]UnitFile, error) {
return files, nil return files, nil
} }
// ListUnitFiles returns an array of all available units on disk.
func (c *Conn) ListUnitFiles() ([]UnitFile, error) {
return c.listUnitFilesInternal(c.sysobj.Call("org.freedesktop.systemd1.Manager.ListUnitFiles", 0).Store)
}
// ListUnitFilesByPatterns returns an array of all available units on disk matched the patterns.
func (c *Conn) ListUnitFilesByPatterns(states []string, patterns []string) ([]UnitFile, error) {
return c.listUnitFilesInternal(c.sysobj.Call("org.freedesktop.systemd1.Manager.ListUnitFilesByPatterns", 0, states, patterns).Store)
}
type LinkUnitFileChange EnableUnitFileChange type LinkUnitFileChange EnableUnitFileChange
// LinkUnitFiles() links unit files (that are located outside of the // LinkUnitFiles() links unit files (that are located outside of the
@ -431,6 +473,87 @@ type DisableUnitFileChange struct {
Destination string // Destination of the symlink Destination string // Destination of the symlink
} }
// MaskUnitFiles masks one or more units in the system
//
// It takes three arguments:
// * list of units to mask (either just file names or full
// absolute paths if the unit files are residing outside
// the usual unit search paths)
// * runtime to specify whether the unit was enabled for runtime
// only (true, /run/systemd/..), or persistently (false, /etc/systemd/..)
// * force flag
func (c *Conn) MaskUnitFiles(files []string, runtime bool, force bool) ([]MaskUnitFileChange, error) {
result := make([][]interface{}, 0)
err := c.sysobj.Call("org.freedesktop.systemd1.Manager.MaskUnitFiles", 0, files, runtime, force).Store(&result)
if err != nil {
return nil, err
}
resultInterface := make([]interface{}, len(result))
for i := range result {
resultInterface[i] = result[i]
}
changes := make([]MaskUnitFileChange, len(result))
changesInterface := make([]interface{}, len(changes))
for i := range changes {
changesInterface[i] = &changes[i]
}
err = dbus.Store(resultInterface, changesInterface...)
if err != nil {
return nil, err
}
return changes, nil
}
type MaskUnitFileChange struct {
Type string // Type of the change (one of symlink or unlink)
Filename string // File name of the symlink
Destination string // Destination of the symlink
}
// UnmaskUnitFiles unmasks one or more units in the system
//
// It takes two arguments:
// * list of unit files to mask (either just file names or full
// absolute paths if the unit files are residing outside
// the usual unit search paths)
// * runtime to specify whether the unit was enabled for runtime
// only (true, /run/systemd/..), or persistently (false, /etc/systemd/..)
func (c *Conn) UnmaskUnitFiles(files []string, runtime bool) ([]UnmaskUnitFileChange, error) {
result := make([][]interface{}, 0)
err := c.sysobj.Call("org.freedesktop.systemd1.Manager.UnmaskUnitFiles", 0, files, runtime).Store(&result)
if err != nil {
return nil, err
}
resultInterface := make([]interface{}, len(result))
for i := range result {
resultInterface[i] = result[i]
}
changes := make([]UnmaskUnitFileChange, len(result))
changesInterface := make([]interface{}, len(changes))
for i := range changes {
changesInterface[i] = &changes[i]
}
err = dbus.Store(resultInterface, changesInterface...)
if err != nil {
return nil, err
}
return changes, nil
}
type UnmaskUnitFileChange struct {
Type string // Type of the change (one of symlink or unlink)
Filename string // File name of the symlink
Destination string // Destination of the symlink
}
// Reload instructs systemd to scan for and reload unit files. This is // Reload instructs systemd to scan for and reload unit files. This is
// equivalent to a 'systemctl daemon-reload'. // equivalent to a 'systemctl daemon-reload'.
func (c *Conn) Reload() error { func (c *Conn) Reload() error {

View file

@ -78,6 +78,15 @@ func PropRemainAfterExit(b bool) Property {
} }
} }
// PropType sets the Type service property. See
// http://www.freedesktop.org/software/systemd/man/systemd.service.html#Type=
func PropType(t string) Property {
return Property{
Name: "Type",
Value: dbus.MakeVariant(t),
}
}
// PropDescription sets the Description unit property. See // PropDescription sets the Description unit property. See
// http://www.freedesktop.org/software/systemd/man/systemd.unit#Description= // http://www.freedesktop.org/software/systemd/man/systemd.unit#Description=
func PropDescription(desc string) Property { func PropDescription(desc string) Property {
@ -216,3 +225,13 @@ func PropSlice(slice string) Property {
Value: dbus.MakeVariant(slice), Value: dbus.MakeVariant(slice),
} }
} }
// PropPids sets the PIDs field of scope units used in the initial construction
// of the scope only and specifies the initial PIDs to add to the scope object.
// See https://www.freedesktop.org/wiki/Software/systemd/ControlGroupInterface/#properties
func PropPids(pids ...uint32) Property {
return Property{
Name: "PIDs",
Value: dbus.MakeVariant(pids),
}
}

View file

@ -1,2 +1,3 @@
Brandon Philips <brandon@ifup.org> (@philips) Brandon Philips <brandon@ifup.org> (@philips)
Brian Waldon <brian@waldon.cc> (@bcwaldon) Brian Waldon <brian@waldon.cc> (@bcwaldon)
John Southworth <jsouthwo@brocade.com> (@jsouthworth)

View file

@ -1,3 +1,5 @@
[![Build Status](https://travis-ci.org/godbus/dbus.svg?branch=master)](https://travis-ci.org/godbus/dbus)
dbus dbus
---- ----

167
vendor/github.com/godbus/dbus/conn.go generated vendored
View file

@ -16,6 +16,7 @@ var (
systemBusLck sync.Mutex systemBusLck sync.Mutex
sessionBus *Conn sessionBus *Conn
sessionBusLck sync.Mutex sessionBusLck sync.Mutex
sessionEnvLck sync.Mutex
) )
// ErrClosed is the error returned by calls on a closed connection. // ErrClosed is the error returned by calls on a closed connection.
@ -46,15 +47,13 @@ type Conn struct {
calls map[uint32]*Call calls map[uint32]*Call
callsLck sync.RWMutex callsLck sync.RWMutex
handlers map[ObjectPath]map[string]exportWithMapping handler Handler
handlersLck sync.RWMutex
out chan *Message out chan *Message
closed bool closed bool
outLck sync.RWMutex outLck sync.RWMutex
signals []chan<- *Signal signalHandler SignalHandler
signalsLck sync.Mutex
eavesdropped chan<- *Message eavesdropped chan<- *Message
eavesdroppedLck sync.Mutex eavesdroppedLck sync.Mutex
@ -89,14 +88,33 @@ func SessionBus() (conn *Conn, err error) {
return return
} }
// SessionBusPrivate returns a new private connection to the session bus. func getSessionBusAddress() (string, error) {
func SessionBusPrivate() (*Conn, error) { sessionEnvLck.Lock()
defer sessionEnvLck.Unlock()
address := os.Getenv("DBUS_SESSION_BUS_ADDRESS") address := os.Getenv("DBUS_SESSION_BUS_ADDRESS")
if address != "" && address != "autolaunch:" { if address != "" && address != "autolaunch:" {
return Dial(address) return address, nil
}
return getSessionBusPlatformAddress()
}
// SessionBusPrivate returns a new private connection to the session bus.
func SessionBusPrivate() (*Conn, error) {
address, err := getSessionBusAddress()
if err != nil {
return nil, err
} }
return sessionBusPlatform() return Dial(address)
}
// SessionBusPrivate returns a new private connection to the session bus.
func SessionBusPrivateHandler(handler Handler, signalHandler SignalHandler) (*Conn, error) {
address, err := getSessionBusAddress()
if err != nil {
return nil, err
}
return DialHandler(address, handler, signalHandler)
} }
// SystemBus returns a shared connection to the system bus, connecting to it if // SystemBus returns a shared connection to the system bus, connecting to it if
@ -128,13 +146,22 @@ func SystemBus() (conn *Conn, err error) {
return return
} }
// SystemBusPrivate returns a new private connection to the system bus. func getSystemBusAddress() string {
func SystemBusPrivate() (*Conn, error) {
address := os.Getenv("DBUS_SYSTEM_BUS_ADDRESS") address := os.Getenv("DBUS_SYSTEM_BUS_ADDRESS")
if address != "" { if address != "" {
return Dial(address) return address
} }
return Dial(defaultSystemBusAddress) return defaultSystemBusAddress
}
// SystemBusPrivate returns a new private connection to the system bus.
func SystemBusPrivate() (*Conn, error) {
return Dial(getSystemBusAddress())
}
// SystemBusPrivateHandler returns a new private connection to the system bus, using the provided handlers.
func SystemBusPrivateHandler(handler Handler, signalHandler SignalHandler) (*Conn, error) {
return DialHandler(getSystemBusAddress(), handler, signalHandler)
} }
// Dial establishes a new private connection to the message bus specified by address. // Dial establishes a new private connection to the message bus specified by address.
@ -143,21 +170,36 @@ func Dial(address string) (*Conn, error) {
if err != nil { if err != nil {
return nil, err return nil, err
} }
return newConn(tr) return newConn(tr, newDefaultHandler(), newDefaultSignalHandler())
}
// DialHandler establishes a new private connection to the message bus specified by address, using the supplied handlers.
func DialHandler(address string, handler Handler, signalHandler SignalHandler) (*Conn, error) {
tr, err := getTransport(address)
if err != nil {
return nil, err
}
return newConn(tr, handler, signalHandler)
} }
// NewConn creates a new private *Conn from an already established connection. // NewConn creates a new private *Conn from an already established connection.
func NewConn(conn io.ReadWriteCloser) (*Conn, error) { func NewConn(conn io.ReadWriteCloser) (*Conn, error) {
return newConn(genericTransport{conn}) return NewConnHandler(conn, newDefaultHandler(), newDefaultSignalHandler())
}
// NewConnHandler creates a new private *Conn from an already established connection, using the supplied handlers.
func NewConnHandler(conn io.ReadWriteCloser, handler Handler, signalHandler SignalHandler) (*Conn, error) {
return newConn(genericTransport{conn}, handler, signalHandler)
} }
// newConn creates a new *Conn from a transport. // newConn creates a new *Conn from a transport.
func newConn(tr transport) (*Conn, error) { func newConn(tr transport, handler Handler, signalHandler SignalHandler) (*Conn, error) {
conn := new(Conn) conn := new(Conn)
conn.transport = tr conn.transport = tr
conn.calls = make(map[uint32]*Call) conn.calls = make(map[uint32]*Call)
conn.out = make(chan *Message, 10) conn.out = make(chan *Message, 10)
conn.handlers = make(map[ObjectPath]map[string]exportWithMapping) conn.handler = handler
conn.signalHandler = signalHandler
conn.nextSerial = 1 conn.nextSerial = 1
conn.serialUsed = map[uint32]bool{0: true} conn.serialUsed = map[uint32]bool{0: true}
conn.busObj = conn.Object("org.freedesktop.DBus", "/org/freedesktop/DBus") conn.busObj = conn.Object("org.freedesktop.DBus", "/org/freedesktop/DBus")
@ -185,16 +227,21 @@ func (conn *Conn) Close() error {
close(conn.out) close(conn.out)
conn.closed = true conn.closed = true
conn.outLck.Unlock() conn.outLck.Unlock()
conn.signalsLck.Lock()
for _, ch := range conn.signals { if term, ok := conn.signalHandler.(Terminator); ok {
close(ch) term.Terminate()
} }
conn.signalsLck.Unlock()
if term, ok := conn.handler.(Terminator); ok {
term.Terminate()
}
conn.eavesdroppedLck.Lock() conn.eavesdroppedLck.Lock()
if conn.eavesdropped != nil { if conn.eavesdropped != nil {
close(conn.eavesdropped) close(conn.eavesdropped)
} }
conn.eavesdroppedLck.Unlock() conn.eavesdroppedLck.Unlock()
return conn.transport.Close() return conn.transport.Close()
} }
@ -331,17 +378,7 @@ func (conn *Conn) inWorker() {
conn.namesLck.Unlock() conn.namesLck.Unlock()
} }
} }
signal := &Signal{ go conn.handleSignal(msg)
Sender: sender,
Path: msg.Headers[FieldPath].value.(ObjectPath),
Name: iface + "." + member,
Body: msg.Body,
}
conn.signalsLck.Lock()
for _, ch := range conn.signals {
ch <- signal
}
conn.signalsLck.Unlock()
case TypeMethodCall: case TypeMethodCall:
go conn.handleCall(msg) go conn.handleCall(msg)
} }
@ -362,6 +399,21 @@ func (conn *Conn) inWorker() {
} }
} }
func (conn *Conn) handleSignal(msg *Message) {
iface := msg.Headers[FieldInterface].value.(string)
member := msg.Headers[FieldMember].value.(string)
// as per http://dbus.freedesktop.org/doc/dbus-specification.html ,
// sender is optional for signals.
sender, _ := msg.Headers[FieldSender].value.(string)
signal := &Signal{
Sender: sender,
Path: msg.Headers[FieldPath].value.(ObjectPath),
Name: iface + "." + member,
Body: msg.Body,
}
conn.signalHandler.DeliverSignal(iface, member, signal)
}
// Names returns the list of all names that are currently owned by this // Names returns the list of all names that are currently owned by this
// connection. The slice is always at least one element long, the first element // connection. The slice is always at least one element long, the first element
// being the unique name of the connection. // being the unique name of the connection.
@ -452,7 +504,19 @@ func (conn *Conn) Send(msg *Message, ch chan *Call) *Call {
// sendError creates an error message corresponding to the parameters and sends // sendError creates an error message corresponding to the parameters and sends
// it to conn.out. // it to conn.out.
func (conn *Conn) sendError(e Error, dest string, serial uint32) { func (conn *Conn) sendError(err error, dest string, serial uint32) {
var e *Error
switch em := err.(type) {
case Error:
e = &em
case *Error:
e = em
case DBusError:
name, body := em.DBusError()
e = NewError(name, body)
default:
e = MakeFailedError(err)
}
msg := new(Message) msg := new(Message)
msg.Type = TypeError msg.Type = TypeError
msg.serial = conn.getSerial() msg.serial = conn.getSerial()
@ -495,6 +559,14 @@ func (conn *Conn) sendReply(dest string, serial uint32, values ...interface{}) {
conn.outLck.RUnlock() conn.outLck.RUnlock()
} }
func (conn *Conn) defaultSignalAction(fn func(h *defaultSignalHandler, ch chan<- *Signal), ch chan<- *Signal) {
if !isDefaultSignalHandler(conn.signalHandler) {
return
}
handler := conn.signalHandler.(*defaultSignalHandler)
fn(handler, ch)
}
// Signal registers the given channel to be passed all received signal messages. // Signal registers the given channel to be passed all received signal messages.
// The caller has to make sure that ch is sufficiently buffered; if a message // The caller has to make sure that ch is sufficiently buffered; if a message
// arrives when a write to c is not possible, it is discarded. // arrives when a write to c is not possible, it is discarded.
@ -505,22 +577,12 @@ func (conn *Conn) sendReply(dest string, serial uint32, values ...interface{}) {
// channel for eavesdropped messages, this channel receives all signals, and // channel for eavesdropped messages, this channel receives all signals, and
// none of the channels passed to Signal will receive any signals. // none of the channels passed to Signal will receive any signals.
func (conn *Conn) Signal(ch chan<- *Signal) { func (conn *Conn) Signal(ch chan<- *Signal) {
conn.signalsLck.Lock() conn.defaultSignalAction((*defaultSignalHandler).addSignal, ch)
conn.signals = append(conn.signals, ch)
conn.signalsLck.Unlock()
} }
// RemoveSignal removes the given channel from the list of the registered channels. // RemoveSignal removes the given channel from the list of the registered channels.
func (conn *Conn) RemoveSignal(ch chan<- *Signal) { func (conn *Conn) RemoveSignal(ch chan<- *Signal) {
conn.signalsLck.Lock() conn.defaultSignalAction((*defaultSignalHandler).removeSignal, ch)
for i := len(conn.signals) - 1; i >= 0; i-- {
if ch == conn.signals[i] {
copy(conn.signals[i:], conn.signals[i+1:])
conn.signals[len(conn.signals)-1] = nil
conn.signals = conn.signals[:len(conn.signals)-1]
}
}
conn.signalsLck.Unlock()
} }
// SupportsUnixFDs returns whether the underlying transport supports passing of // SupportsUnixFDs returns whether the underlying transport supports passing of
@ -621,16 +683,11 @@ func dereferenceAll(vs []interface{}) []interface{} {
// getKey gets a key from a the list of keys. Returns "" on error / not found... // getKey gets a key from a the list of keys. Returns "" on error / not found...
func getKey(s, key string) string { func getKey(s, key string) string {
i := strings.Index(s, key) for _, keyEqualsValue := range strings.Split(s, ",") {
if i == -1 { keyValue := strings.SplitN(keyEqualsValue, "=", 2)
return "" if len(keyValue) == 2 && keyValue[0] == key {
return keyValue[1]
}
} }
if i+len(key)+1 >= len(s) || s[i+len(key)] != '=' { return ""
return ""
}
j := strings.Index(s, ",")
if j == -1 {
j = len(s)
}
return s[i+len(key)+1 : j]
} }

View file

@ -5,17 +5,17 @@ import (
"os/exec" "os/exec"
) )
func sessionBusPlatform() (*Conn, error) { func getSessionBusPlatformAddress() (string, error) {
cmd := exec.Command("launchctl", "getenv", "DBUS_LAUNCHD_SESSION_BUS_SOCKET") cmd := exec.Command("launchctl", "getenv", "DBUS_LAUNCHD_SESSION_BUS_SOCKET")
b, err := cmd.CombinedOutput() b, err := cmd.CombinedOutput()
if err != nil { if err != nil {
return nil, err return "", err
} }
if len(b) == 0 { if len(b) == 0 {
return nil, errors.New("dbus: couldn't determine address of session bus") return "", errors.New("dbus: couldn't determine address of session bus")
} }
return Dial("unix:path=" + string(b[:len(b)-1])) return "unix:path=" + string(b[:len(b)-1]), nil
} }

View file

@ -5,23 +5,27 @@ package dbus
import ( import (
"bytes" "bytes"
"errors" "errors"
"os"
"os/exec" "os/exec"
) )
func sessionBusPlatform() (*Conn, error) { func getSessionBusPlatformAddress() (string, error) {
cmd := exec.Command("dbus-launch") cmd := exec.Command("dbus-launch")
b, err := cmd.CombinedOutput() b, err := cmd.CombinedOutput()
if err != nil { if err != nil {
return nil, err return "", err
} }
i := bytes.IndexByte(b, '=') i := bytes.IndexByte(b, '=')
j := bytes.IndexByte(b, '\n') j := bytes.IndexByte(b, '\n')
if i == -1 || j == -1 { if i == -1 || j == -1 {
return nil, errors.New("dbus: couldn't determine address of session bus") return "", errors.New("dbus: couldn't determine address of session bus")
} }
return Dial(string(b[i+1 : j])) env, addr := string(b[0:i]), string(b[i+1:j])
os.Setenv(env, addr)
return addr, nil
} }

219
vendor/github.com/godbus/dbus/dbus.go generated vendored
View file

@ -58,67 +58,164 @@ func store(src, dest interface{}) error {
reflect.ValueOf(dest).Elem().Set(reflect.ValueOf(src)) reflect.ValueOf(dest).Elem().Set(reflect.ValueOf(src))
return nil return nil
} else if hasStruct(dest) { } else if hasStruct(dest) {
rv := reflect.ValueOf(dest).Elem() return storeStruct(src, dest)
switch rv.Kind() { } else if hasInterface(dest) {
case reflect.Struct: return storeInterfaceContainer(src, dest)
vs, ok := src.([]interface{})
if !ok {
return errors.New("dbus.Store: type mismatch")
}
t := rv.Type()
ndest := make([]interface{}, 0, rv.NumField())
for i := 0; i < rv.NumField(); i++ {
field := t.Field(i)
if field.PkgPath == "" && field.Tag.Get("dbus") != "-" {
ndest = append(ndest, rv.Field(i).Addr().Interface())
}
}
if len(vs) != len(ndest) {
return errors.New("dbus.Store: type mismatch")
}
err := Store(vs, ndest...)
if err != nil {
return errors.New("dbus.Store: type mismatch")
}
case reflect.Slice:
sv := reflect.ValueOf(src)
if sv.Kind() != reflect.Slice {
return errors.New("dbus.Store: type mismatch")
}
rv.Set(reflect.MakeSlice(rv.Type(), sv.Len(), sv.Len()))
for i := 0; i < sv.Len(); i++ {
if err := store(sv.Index(i).Interface(), rv.Index(i).Addr().Interface()); err != nil {
return err
}
}
case reflect.Map:
sv := reflect.ValueOf(src)
if sv.Kind() != reflect.Map {
return errors.New("dbus.Store: type mismatch")
}
keys := sv.MapKeys()
rv.Set(reflect.MakeMap(sv.Type()))
for _, key := range keys {
v := reflect.New(sv.Type().Elem())
if err := store(v, sv.MapIndex(key).Interface()); err != nil {
return err
}
rv.SetMapIndex(key, v.Elem())
}
default:
return errors.New("dbus.Store: type mismatch")
}
return nil
} else { } else {
return errors.New("dbus.Store: type mismatch") return errors.New("dbus.Store: type mismatch")
} }
} }
func storeStruct(src, dest interface{}) error {
rv := reflect.ValueOf(dest)
if rv.Kind() == reflect.Interface || rv.Kind() == reflect.Ptr {
rv = rv.Elem()
}
switch rv.Kind() {
case reflect.Struct:
vs, ok := src.([]interface{})
if !ok {
return errors.New("dbus.Store: type mismatch")
}
t := rv.Type()
ndest := make([]interface{}, 0, rv.NumField())
for i := 0; i < rv.NumField(); i++ {
field := t.Field(i)
if field.PkgPath == "" && field.Tag.Get("dbus") != "-" {
ndest = append(ndest, rv.Field(i).Addr().Interface())
}
}
if len(vs) != len(ndest) {
return errors.New("dbus.Store: type mismatch")
}
err := Store(vs, ndest...)
if err != nil {
return errors.New("dbus.Store: type mismatch")
}
case reflect.Slice:
sv := reflect.ValueOf(src)
if sv.Kind() != reflect.Slice {
return errors.New("dbus.Store: type mismatch")
}
rv.Set(reflect.MakeSlice(rv.Type(), sv.Len(), sv.Len()))
for i := 0; i < sv.Len(); i++ {
if err := store(sv.Index(i).Interface(), rv.Index(i).Addr().Interface()); err != nil {
return err
}
}
case reflect.Map:
sv := reflect.ValueOf(src)
if sv.Kind() != reflect.Map {
return errors.New("dbus.Store: type mismatch")
}
keys := sv.MapKeys()
rv.Set(reflect.MakeMap(sv.Type()))
for _, key := range keys {
v := reflect.New(sv.Type().Elem())
if err := store(v, sv.MapIndex(key).Interface()); err != nil {
return err
}
rv.SetMapIndex(key, v.Elem())
}
default:
return errors.New("dbus.Store: type mismatch")
}
return nil
}
func storeInterfaceContainer(src, dest interface{}) error {
rv := reflect.ValueOf(dest).Elem()
switch rv.Kind() {
case reflect.Interface:
return storeInterfaceValue(src, dest)
case reflect.Slice:
sv := reflect.ValueOf(src)
if sv.Kind() != reflect.Slice {
return errors.New("dbus.Store: type mismatch")
}
rv.Set(reflect.MakeSlice(rv.Type(), sv.Len(), sv.Len()))
for i := 0; i < sv.Len(); i++ {
v := newInterfaceImplValue(sv.Index(i))
err := store(getVariantValue(sv.Index(i)),
v.Interface())
if err != nil {
return err
}
rv.Index(i).Set(v.Elem())
}
case reflect.Map:
sv := reflect.ValueOf(src)
if sv.Kind() != reflect.Map {
return errors.New("dbus.Store: type mismatch")
}
keys := sv.MapKeys()
rv.Set(reflect.MakeMap(rv.Type()))
for _, key := range keys {
elemv := sv.MapIndex(key)
v := newInterfaceImplValue(elemv)
err := store(getVariantValue(elemv),
v.Interface())
if err != nil {
return err
}
rv.SetMapIndex(key, v.Elem())
}
default:
return errors.New("dbus.Store: type mismatch")
}
return nil
}
func getVariantValue(in reflect.Value) interface{} {
if in.Type() == variantType {
return in.Interface().(Variant).Value()
}
return in.Interface()
}
func newInterfaceImplValue(val reflect.Value) reflect.Value {
ifaceType := reflect.TypeOf((*interface{})(nil)).Elem()
if !hasVariant(val.Type()) {
return reflect.New(val.Type())
}
switch val.Kind() {
case reflect.Map:
return reflect.New(reflect.MapOf(val.Type().Key(),
ifaceType))
case reflect.Slice:
return reflect.New(reflect.SliceOf(ifaceType))
default:
return newInterfaceImplValue(
reflect.ValueOf(
val.Interface().(Variant).Value()))
}
}
func storeInterfaceValue(src, dest interface{}) error {
sv := reflect.ValueOf(src)
if sv.Type() == variantType {
store(src.(Variant).Value(), dest)
} else {
reflect.ValueOf(dest).Elem().Set(
reflect.ValueOf(src))
}
return nil
}
func hasStruct(v interface{}) bool { func hasStruct(v interface{}) bool {
return hasKind(v, reflect.Struct)
}
func hasInterface(v interface{}) bool {
return hasKind(v, reflect.Interface)
}
func hasKind(v interface{}, kind reflect.Kind) bool {
t := reflect.TypeOf(v) t := reflect.TypeOf(v)
for { for {
switch t.Kind() { switch t.Kind() {
case reflect.Struct: case kind:
return true return true
case reflect.Slice, reflect.Ptr, reflect.Map: case reflect.Slice, reflect.Ptr, reflect.Map:
t = t.Elem() t = t.Elem()
@ -128,6 +225,20 @@ func hasStruct(v interface{}) bool {
} }
} }
func hasVariant(t reflect.Type) bool {
for {
if t == variantType {
return true
}
switch t.Kind() {
case reflect.Slice, reflect.Ptr, reflect.Map:
t = t.Elem()
default:
return false
}
}
}
// An ObjectPath is an object path as defined by the D-Bus spec. // An ObjectPath is an object path as defined by the D-Bus spec.
type ObjectPath string type ObjectPath string
@ -177,8 +288,8 @@ func alignment(t reflect.Type) int {
return 4 return 4
case signatureType: case signatureType:
return 1 return 1
case interfacesType: // sometimes used for structs case interfacesType:
return 8 return 4
} }
switch t.Kind() { switch t.Kind() {
case reflect.Uint8: case reflect.Uint8:

283
vendor/github.com/godbus/dbus/default_handler.go generated vendored Normal file
View file

@ -0,0 +1,283 @@
package dbus
import (
"bytes"
"reflect"
"strings"
"sync"
)
func newIntrospectIntf(h *defaultHandler) *exportedIntf {
methods := make(map[string]Method)
methods["Introspect"] = exportedMethod{
reflect.ValueOf(func(msg Message) (string, *Error) {
path := msg.Headers[FieldPath].value.(ObjectPath)
return h.introspectPath(path), nil
}),
}
return newExportedIntf(methods, true)
}
func newDefaultHandler() *defaultHandler {
h := &defaultHandler{
objects: make(map[ObjectPath]*exportedObj),
defaultIntf: make(map[string]*exportedIntf),
}
h.defaultIntf["org.freedesktop.DBus.Introspectable"] = newIntrospectIntf(h)
return h
}
type defaultHandler struct {
sync.RWMutex
objects map[ObjectPath]*exportedObj
defaultIntf map[string]*exportedIntf
}
func (h *defaultHandler) PathExists(path ObjectPath) bool {
_, ok := h.objects[path]
return ok
}
func (h *defaultHandler) introspectPath(path ObjectPath) string {
subpath := make(map[string]struct{})
var xml bytes.Buffer
xml.WriteString("<node>")
for obj, _ := range h.objects {
p := string(path)
if p != "/" {
p += "/"
}
if strings.HasPrefix(string(obj), p) {
node_name := strings.Split(string(obj[len(p):]), "/")[0]
subpath[node_name] = struct{}{}
}
}
for s, _ := range subpath {
xml.WriteString("\n\t<node name=\"" + s + "\"/>")
}
xml.WriteString("\n</node>")
return xml.String()
}
func (h *defaultHandler) LookupObject(path ObjectPath) (ServerObject, bool) {
h.RLock()
defer h.RUnlock()
object, ok := h.objects[path]
if ok {
return object, ok
}
// If an object wasn't found for this exact path,
// look for a matching subtree registration
subtreeObject := newExportedObject()
path = path[:strings.LastIndex(string(path), "/")]
for len(path) > 0 {
object, ok = h.objects[path]
if ok {
for name, iface := range object.interfaces {
// Only include this handler if it registered for the subtree
if iface.isFallbackInterface() {
subtreeObject.interfaces[name] = iface
}
}
break
}
path = path[:strings.LastIndex(string(path), "/")]
}
for name, intf := range h.defaultIntf {
if _, exists := subtreeObject.interfaces[name]; exists {
continue
}
subtreeObject.interfaces[name] = intf
}
return subtreeObject, true
}
func (h *defaultHandler) AddObject(path ObjectPath, object *exportedObj) {
h.Lock()
h.objects[path] = object
h.Unlock()
}
func (h *defaultHandler) DeleteObject(path ObjectPath) {
h.Lock()
delete(h.objects, path)
h.Unlock()
}
type exportedMethod struct {
reflect.Value
}
func (m exportedMethod) Call(args ...interface{}) ([]interface{}, error) {
t := m.Type()
params := make([]reflect.Value, len(args))
for i := 0; i < len(args); i++ {
params[i] = reflect.ValueOf(args[i]).Elem()
}
ret := m.Value.Call(params)
err := ret[t.NumOut()-1].Interface().(*Error)
ret = ret[:t.NumOut()-1]
out := make([]interface{}, len(ret))
for i, val := range ret {
out[i] = val.Interface()
}
if err == nil {
//concrete type to interface nil is a special case
return out, nil
}
return out, err
}
func (m exportedMethod) NumArguments() int {
return m.Value.Type().NumIn()
}
func (m exportedMethod) ArgumentValue(i int) interface{} {
return reflect.Zero(m.Type().In(i)).Interface()
}
func (m exportedMethod) NumReturns() int {
return m.Value.Type().NumOut()
}
func (m exportedMethod) ReturnValue(i int) interface{} {
return reflect.Zero(m.Type().Out(i)).Interface()
}
func newExportedObject() *exportedObj {
return &exportedObj{
interfaces: make(map[string]*exportedIntf),
}
}
type exportedObj struct {
interfaces map[string]*exportedIntf
}
func (obj *exportedObj) LookupInterface(name string) (Interface, bool) {
if name == "" {
return obj, true
}
intf, exists := obj.interfaces[name]
return intf, exists
}
func (obj *exportedObj) AddInterface(name string, iface *exportedIntf) {
obj.interfaces[name] = iface
}
func (obj *exportedObj) DeleteInterface(name string) {
delete(obj.interfaces, name)
}
func (obj *exportedObj) LookupMethod(name string) (Method, bool) {
for _, intf := range obj.interfaces {
method, exists := intf.LookupMethod(name)
if exists {
return method, exists
}
}
return nil, false
}
func (obj *exportedObj) isFallbackInterface() bool {
return false
}
func newExportedIntf(methods map[string]Method, includeSubtree bool) *exportedIntf {
return &exportedIntf{
methods: methods,
includeSubtree: includeSubtree,
}
}
type exportedIntf struct {
methods map[string]Method
// Whether or not this export is for the entire subtree
includeSubtree bool
}
func (obj *exportedIntf) LookupMethod(name string) (Method, bool) {
out, exists := obj.methods[name]
return out, exists
}
func (obj *exportedIntf) isFallbackInterface() bool {
return obj.includeSubtree
}
func newDefaultSignalHandler() *defaultSignalHandler {
return &defaultSignalHandler{}
}
func isDefaultSignalHandler(handler SignalHandler) bool {
_, ok := handler.(*defaultSignalHandler)
return ok
}
type defaultSignalHandler struct {
sync.RWMutex
closed bool
signals []chan<- *Signal
}
func (sh *defaultSignalHandler) DeliverSignal(intf, name string, signal *Signal) {
sh.RLock()
defer sh.RUnlock()
if sh.closed {
return
}
for _, ch := range sh.signals {
ch <- signal
}
}
func (sh *defaultSignalHandler) Init() error {
sh.Lock()
sh.signals = make([]chan<- *Signal, 0)
sh.Unlock()
return nil
}
func (sh *defaultSignalHandler) Terminate() {
sh.Lock()
sh.closed = true
for _, ch := range sh.signals {
close(ch)
}
sh.signals = nil
sh.Unlock()
}
func (sh *defaultSignalHandler) addSignal(ch chan<- *Signal) {
sh.Lock()
defer sh.Unlock()
if sh.closed {
return
}
sh.signals = append(sh.signals, ch)
}
func (sh *defaultSignalHandler) removeSignal(ch chan<- *Signal) {
sh.Lock()
defer sh.Unlock()
if sh.closed {
return
}
for i := len(sh.signals) - 1; i >= 0; i-- {
if ch == sh.signals[i] {
copy(sh.signals[i:], sh.signals[i+1:])
sh.signals[len(sh.signals)-1] = nil
sh.signals = sh.signals[:len(sh.signals)-1]
}
}
}

View file

@ -202,6 +202,8 @@ func (enc *encoder) encode(v reflect.Value, depth int) {
panic(err) panic(err)
} }
enc.pos += length enc.pos += length
case reflect.Interface:
enc.encode(reflect.ValueOf(MakeVariant(v.Interface())), depth)
default: default:
panic(InvalidTypeError{v.Type()}) panic(InvalidTypeError{v.Type()})
} }

View file

@ -8,167 +8,73 @@ import (
) )
var ( var (
errmsgInvalidArg = Error{ ErrMsgInvalidArg = Error{
"org.freedesktop.DBus.Error.InvalidArgs", "org.freedesktop.DBus.Error.InvalidArgs",
[]interface{}{"Invalid type / number of args"}, []interface{}{"Invalid type / number of args"},
} }
errmsgNoObject = Error{ ErrMsgNoObject = Error{
"org.freedesktop.DBus.Error.NoSuchObject", "org.freedesktop.DBus.Error.NoSuchObject",
[]interface{}{"No such object"}, []interface{}{"No such object"},
} }
errmsgUnknownMethod = Error{ ErrMsgUnknownMethod = Error{
"org.freedesktop.DBus.Error.UnknownMethod", "org.freedesktop.DBus.Error.UnknownMethod",
[]interface{}{"Unknown / invalid method"}, []interface{}{"Unknown / invalid method"},
} }
ErrMsgUnknownInterface = Error{
"org.freedesktop.DBus.Error.UnknownInterface",
[]interface{}{"Object does not implement the interface"},
}
) )
// exportWithMapping represents an exported struct along with a method name func MakeFailedError(err error) *Error {
// mapping to allow for exporting lower-case methods, etc. return &Error{
type exportWithMapping struct { "org.freedesktop.DBus.Error.Failed",
export interface{} []interface{}{err.Error()},
}
// Method name mapping; key -> struct method, value -> dbus method.
mapping map[string]string
// Whether or not this export is for the entire subtree
includeSubtree bool
} }
// Sender is a type which can be used in exported methods to receive the message // Sender is a type which can be used in exported methods to receive the message
// sender. // sender.
type Sender string type Sender string
func exportedMethod(export exportWithMapping, name string) reflect.Value { func computeMethodName(name string, mapping map[string]string) string {
if export.export == nil { newname, ok := mapping[name]
return reflect.Value{}
}
// If a mapping was included in the export, check the map to see if we
// should be looking for a different method in the export.
if export.mapping != nil {
for key, value := range export.mapping {
if value == name {
name = key
break
}
// Catch the case where a method is aliased but the client is calling
// the original, e.g. the "Foo" method was exported mapped to
// "foo," and dbus client called the original "Foo."
if key == name {
return reflect.Value{}
}
}
}
value := reflect.ValueOf(export.export)
m := value.MethodByName(name)
// Catch the case of attempting to call an unexported method
method, ok := value.Type().MethodByName(name)
if !m.IsValid() || !ok || method.PkgPath != "" {
return reflect.Value{}
}
t := m.Type()
if t.NumOut() == 0 ||
t.Out(t.NumOut()-1) != reflect.TypeOf(&errmsgInvalidArg) {
return reflect.Value{}
}
return m
}
// searchHandlers will look through all registered handlers looking for one
// to handle the given path. If a verbatim one isn't found, it will check for
// a subtree registration for the path as well.
func (conn *Conn) searchHandlers(path ObjectPath) (map[string]exportWithMapping, bool) {
conn.handlersLck.RLock()
defer conn.handlersLck.RUnlock()
handlers, ok := conn.handlers[path]
if ok { if ok {
return handlers, ok name = newname
} }
return name
// If handlers weren't found for this exact path, look for a matching subtree
// registration
handlers = make(map[string]exportWithMapping)
path = path[:strings.LastIndex(string(path), "/")]
for len(path) > 0 {
var subtreeHandlers map[string]exportWithMapping
subtreeHandlers, ok = conn.handlers[path]
if ok {
for iface, handler := range subtreeHandlers {
// Only include this handler if it registered for the subtree
if handler.includeSubtree {
handlers[iface] = handler
}
}
break
}
path = path[:strings.LastIndex(string(path), "/")]
}
return handlers, ok
} }
// handleCall handles the given method call (i.e. looks if it's one of the func getMethods(in interface{}, mapping map[string]string) map[string]reflect.Value {
// pre-implemented ones and searches for a corresponding handler if not). if in == nil {
func (conn *Conn) handleCall(msg *Message) { return nil
name := msg.Headers[FieldMember].value.(string) }
path := msg.Headers[FieldPath].value.(ObjectPath) methods := make(map[string]reflect.Value)
ifaceName, hasIface := msg.Headers[FieldInterface].value.(string) val := reflect.ValueOf(in)
sender, hasSender := msg.Headers[FieldSender].value.(string) typ := val.Type()
serial := msg.serial for i := 0; i < typ.NumMethod(); i++ {
if ifaceName == "org.freedesktop.DBus.Peer" { methtype := typ.Method(i)
switch name { method := val.Method(i)
case "Ping": t := method.Type()
conn.sendReply(sender, serial) // only track valid methods must return *Error as last arg
case "GetMachineId": // and must be exported
conn.sendReply(sender, serial, conn.uuid) if t.NumOut() == 0 ||
default: t.Out(t.NumOut()-1) != reflect.TypeOf(&ErrMsgInvalidArg) ||
conn.sendError(errmsgUnknownMethod, sender, serial) methtype.PkgPath != "" {
continue
} }
return // map names while building table
} methods[computeMethodName(methtype.Name, mapping)] = method
if len(name) == 0 {
conn.sendError(errmsgUnknownMethod, sender, serial)
} }
return methods
}
// Find the exported handler (if any) for this path func standardMethodArgumentDecode(m Method, sender string, msg *Message, body []interface{}) ([]interface{}, error) {
handlers, ok := conn.searchHandlers(path) pointers := make([]interface{}, m.NumArguments())
if !ok { decode := make([]interface{}, 0, len(body))
conn.sendError(errmsgNoObject, sender, serial)
return
}
var m reflect.Value for i := 0; i < m.NumArguments(); i++ {
if hasIface { tp := reflect.TypeOf(m.ArgumentValue(i))
iface := handlers[ifaceName]
m = exportedMethod(iface, name)
} else {
for _, v := range handlers {
m = exportedMethod(v, name)
if m.IsValid() {
break
}
}
}
if !m.IsValid() {
conn.sendError(errmsgUnknownMethod, sender, serial)
return
}
t := m.Type()
vs := msg.Body
pointers := make([]interface{}, t.NumIn())
decode := make([]interface{}, 0, len(vs))
for i := 0; i < t.NumIn(); i++ {
tp := t.In(i)
val := reflect.New(tp) val := reflect.New(tp)
pointers[i] = val.Interface() pointers[i] = val.Interface()
if tp == reflect.TypeOf((*Sender)(nil)).Elem() { if tp == reflect.TypeOf((*Sender)(nil)).Elem() {
@ -180,26 +86,73 @@ func (conn *Conn) handleCall(msg *Message) {
} }
} }
if len(decode) != len(vs) { if len(decode) != len(body) {
conn.sendError(errmsgInvalidArg, sender, serial) return nil, ErrMsgInvalidArg
}
if err := Store(body, decode...); err != nil {
return nil, ErrMsgInvalidArg
}
return pointers, nil
}
func (conn *Conn) decodeArguments(m Method, sender string, msg *Message) ([]interface{}, error) {
if decoder, ok := m.(ArgumentDecoder); ok {
return decoder.DecodeArguments(conn, sender, msg, msg.Body)
}
return standardMethodArgumentDecode(m, sender, msg, msg.Body)
}
// handleCall handles the given method call (i.e. looks if it's one of the
// pre-implemented ones and searches for a corresponding handler if not).
func (conn *Conn) handleCall(msg *Message) {
name := msg.Headers[FieldMember].value.(string)
path := msg.Headers[FieldPath].value.(ObjectPath)
ifaceName, _ := msg.Headers[FieldInterface].value.(string)
sender, hasSender := msg.Headers[FieldSender].value.(string)
serial := msg.serial
if ifaceName == "org.freedesktop.DBus.Peer" {
switch name {
case "Ping":
conn.sendReply(sender, serial)
case "GetMachineId":
conn.sendReply(sender, serial, conn.uuid)
default:
conn.sendError(ErrMsgUnknownMethod, sender, serial)
}
return
}
if len(name) == 0 {
conn.sendError(ErrMsgUnknownMethod, sender, serial)
}
object, ok := conn.handler.LookupObject(path)
if !ok {
conn.sendError(ErrMsgNoObject, sender, serial)
return return
} }
if err := Store(vs, decode...); err != nil { iface, exists := object.LookupInterface(ifaceName)
conn.sendError(errmsgInvalidArg, sender, serial) if !exists {
conn.sendError(ErrMsgUnknownInterface, sender, serial)
return return
} }
// Extract parameters m, exists := iface.LookupMethod(name)
params := make([]reflect.Value, len(pointers)) if !exists {
for i := 0; i < len(pointers); i++ { conn.sendError(ErrMsgUnknownMethod, sender, serial)
params[i] = reflect.ValueOf(pointers[i]).Elem() return
}
args, err := conn.decodeArguments(m, sender, msg)
if err != nil {
conn.sendError(err, sender, serial)
return
} }
// Call method ret, err := m.Call(args...)
ret := m.Call(params) if err != nil {
if em := ret[t.NumOut()-1].Interface().(*Error); em != nil { conn.sendError(err, sender, serial)
conn.sendError(*em, sender, serial)
return return
} }
@ -212,13 +165,11 @@ func (conn *Conn) handleCall(msg *Message) {
reply.Headers[FieldDestination] = msg.Headers[FieldSender] reply.Headers[FieldDestination] = msg.Headers[FieldSender]
} }
reply.Headers[FieldReplySerial] = MakeVariant(msg.serial) reply.Headers[FieldReplySerial] = MakeVariant(msg.serial)
reply.Body = make([]interface{}, len(ret)-1) reply.Body = make([]interface{}, len(ret))
for i := 0; i < len(ret)-1; i++ { for i := 0; i < len(ret); i++ {
reply.Body[i] = ret[i].Interface() reply.Body[i] = ret[i]
}
if len(ret) != 1 {
reply.Headers[FieldSignature] = MakeVariant(SignatureOf(reply.Body...))
} }
reply.Headers[FieldSignature] = MakeVariant(SignatureOf(reply.Body...))
conn.outLck.RLock() conn.outLck.RLock()
if !conn.closed { if !conn.closed {
conn.out <- reply conn.out <- reply
@ -303,7 +254,7 @@ func (conn *Conn) Export(v interface{}, path ObjectPath, iface string) error {
// The keys in the map are the real method names (exported on the struct), and // The keys in the map are the real method names (exported on the struct), and
// the values are the method names to be exported on DBus. // the values are the method names to be exported on DBus.
func (conn *Conn) ExportWithMap(v interface{}, mapping map[string]string, path ObjectPath, iface string) error { func (conn *Conn) ExportWithMap(v interface{}, mapping map[string]string, path ObjectPath, iface string) error {
return conn.exportWithMap(v, mapping, path, iface, false) return conn.export(getMethods(v, mapping), path, iface, false)
} }
// ExportSubtree works exactly like Export but registers the given value for // ExportSubtree works exactly like Export but registers the given value for
@ -326,38 +277,89 @@ func (conn *Conn) ExportSubtree(v interface{}, path ObjectPath, iface string) er
// The keys in the map are the real method names (exported on the struct), and // The keys in the map are the real method names (exported on the struct), and
// the values are the method names to be exported on DBus. // the values are the method names to be exported on DBus.
func (conn *Conn) ExportSubtreeWithMap(v interface{}, mapping map[string]string, path ObjectPath, iface string) error { func (conn *Conn) ExportSubtreeWithMap(v interface{}, mapping map[string]string, path ObjectPath, iface string) error {
return conn.exportWithMap(v, mapping, path, iface, true) return conn.export(getMethods(v, mapping), path, iface, true)
}
// ExportMethodTable like Export registers the given methods as an object
// on the message bus. Unlike Export the it uses a method table to define
// the object instead of a native go object.
//
// The method table is a map from method name to function closure
// representing the method. This allows an object exported on the bus to not
// necessarily be a native go object. It can be useful for generating exposed
// methods on the fly.
//
// Any non-function objects in the method table are ignored.
func (conn *Conn) ExportMethodTable(methods map[string]interface{}, path ObjectPath, iface string) error {
return conn.exportMethodTable(methods, path, iface, false)
}
// Like ExportSubtree, but with the same caveats as ExportMethodTable.
func (conn *Conn) ExportSubtreeMethodTable(methods map[string]interface{}, path ObjectPath, iface string) error {
return conn.exportMethodTable(methods, path, iface, true)
}
func (conn *Conn) exportMethodTable(methods map[string]interface{}, path ObjectPath, iface string, includeSubtree bool) error {
out := make(map[string]reflect.Value)
for name, method := range methods {
rval := reflect.ValueOf(method)
if rval.Kind() != reflect.Func {
continue
}
t := rval.Type()
// only track valid methods must return *Error as last arg
if t.NumOut() == 0 ||
t.Out(t.NumOut()-1) != reflect.TypeOf(&ErrMsgInvalidArg) {
continue
}
out[name] = rval
}
return conn.export(out, path, iface, includeSubtree)
}
func (conn *Conn) unexport(h *defaultHandler, path ObjectPath, iface string) error {
if h.PathExists(path) {
obj := h.objects[path]
obj.DeleteInterface(iface)
if len(obj.interfaces) == 0 {
h.DeleteObject(path)
}
}
return nil
} }
// exportWithMap is the worker function for all exports/registrations. // exportWithMap is the worker function for all exports/registrations.
func (conn *Conn) exportWithMap(v interface{}, mapping map[string]string, path ObjectPath, iface string, includeSubtree bool) error { func (conn *Conn) export(methods map[string]reflect.Value, path ObjectPath, iface string, includeSubtree bool) error {
h, ok := conn.handler.(*defaultHandler)
if !ok {
return fmt.Errorf(
`dbus: export only allowed on the default hander handler have %T"`,
conn.handler)
}
if !path.IsValid() { if !path.IsValid() {
return fmt.Errorf(`dbus: Invalid path name: "%s"`, path) return fmt.Errorf(`dbus: Invalid path name: "%s"`, path)
} }
conn.handlersLck.Lock()
defer conn.handlersLck.Unlock()
// Remove a previous export if the interface is nil // Remove a previous export if the interface is nil
if v == nil { if methods == nil {
if _, ok := conn.handlers[path]; ok { return conn.unexport(h, path, iface)
delete(conn.handlers[path], iface)
if len(conn.handlers[path]) == 0 {
delete(conn.handlers, path)
}
}
return nil
} }
// If this is the first handler for this path, make a new map to hold all // If this is the first handler for this path, make a new map to hold all
// handlers for this path. // handlers for this path.
if _, ok := conn.handlers[path]; !ok { if !h.PathExists(path) {
conn.handlers[path] = make(map[string]exportWithMapping) h.AddObject(path, newExportedObject())
}
exportedMethods := make(map[string]Method)
for name, method := range methods {
exportedMethods[name] = exportedMethod{method}
} }
// Finally, save this handler // Finally, save this handler
conn.handlers[path][iface] = exportWithMapping{export: v, mapping: mapping, includeSubtree: includeSubtree} obj := h.objects[path]
obj.AddInterface(iface, newExportedIntf(exportedMethods, includeSubtree))
return nil return nil
} }

View file

@ -22,6 +22,13 @@ const (
// FlagNoAutoStart signals that the message bus should not automatically // FlagNoAutoStart signals that the message bus should not automatically
// start an application when handling this message. // start an application when handling this message.
FlagNoAutoStart FlagNoAutoStart
// FlagAllowInteractiveAuthorization may be set on a method call
// message to inform the receiving side that the caller is prepared
// to wait for interactive authorization, which might take a
// considerable time to complete. For instance, if this flag is set,
// it would be appropriate to query the user for passwords or
// confirmation via Polkit or a similar framework.
FlagAllowInteractiveAuthorization
) )
// Type represents the possible types of a D-Bus message. // Type represents the possible types of a D-Bus message.
@ -248,7 +255,7 @@ func (msg *Message) EncodeTo(out io.Writer, order binary.ByteOrder) error {
// IsValid checks whether msg is a valid message and returns an // IsValid checks whether msg is a valid message and returns an
// InvalidMessageError if it is not. // InvalidMessageError if it is not.
func (msg *Message) IsValid() error { func (msg *Message) IsValid() error {
if msg.Flags & ^(FlagNoAutoStart|FlagNoReplyExpected) != 0 { if msg.Flags & ^(FlagNoAutoStart|FlagNoReplyExpected|FlagAllowInteractiveAuthorization) != 0 {
return InvalidMessageError("invalid flags") return InvalidMessageError("invalid flags")
} }
if msg.Type == 0 || msg.Type >= typeMax { if msg.Type == 0 || msg.Type >= typeMax {

89
vendor/github.com/godbus/dbus/server_interfaces.go generated vendored Normal file
View file

@ -0,0 +1,89 @@
package dbus
// Terminator allows a handler to implement a shutdown mechanism that
// is called when the connection terminates.
type Terminator interface {
Terminate()
}
// Handler is the representation of a D-Bus Application.
//
// The Handler must have a way to lookup objects given
// an ObjectPath. The returned object must implement the
// ServerObject interface.
type Handler interface {
LookupObject(path ObjectPath) (ServerObject, bool)
}
// ServerObject is the representation of an D-Bus Object.
//
// Objects are registered at a path for a given Handler.
// The Objects implement D-Bus interfaces. The semantics
// of Interface lookup is up to the implementation of
// the ServerObject. The ServerObject implementation may
// choose to implement empty string as a valid interface
// represeting all methods or not per the D-Bus specification.
type ServerObject interface {
LookupInterface(name string) (Interface, bool)
}
// An Interface is the representation of a D-Bus Interface.
//
// Interfaces are a grouping of methods implemented by the Objects.
// Interfaces are responsible for routing method calls.
type Interface interface {
LookupMethod(name string) (Method, bool)
}
// A Method represents the exposed methods on D-Bus.
type Method interface {
// Call requires that all arguments are decoded before being passed to it.
Call(args ...interface{}) ([]interface{}, error)
NumArguments() int
NumReturns() int
// ArgumentValue returns a representative value for the argument at position
// it should be of the proper type. reflect.Zero would be a good mechanism
// to use for this Value.
ArgumentValue(position int) interface{}
// ReturnValue returns a representative value for the return at position
// it should be of the proper type. reflect.Zero would be a good mechanism
// to use for this Value.
ReturnValue(position int) interface{}
}
// An Argument Decoder can decode arguments using the non-standard mechanism
//
// If a method implements this interface then the non-standard
// decoder will be used.
//
// Method arguments must be decoded from the message.
// The mechanism for doing this will vary based on the
// implementation of the method. A normal approach is provided
// as part of this library, but may be replaced with
// any other decoding scheme.
type ArgumentDecoder interface {
// To decode the arguments of a method the sender and message are
// provided incase the semantics of the implementer provides access
// to these as part of the method invocation.
DecodeArguments(conn *Conn, sender string, msg *Message, args []interface{}) ([]interface{}, error)
}
// A SignalHandler is responsible for delivering a signal.
//
// Signal delivery may be changed from the default channel
// based approach by Handlers implementing the SignalHandler
// interface.
type SignalHandler interface {
DeliverSignal(iface, name string, signal *Signal)
}
// A DBusError is used to convert a generic object to a D-Bus error.
//
// Any custom error mechanism may implement this interface to provide
// a custom encoding of the error on D-Bus. By default if a normal
// error is returned, it will be encoded as the generic
// "org.freedesktop.DBus.Error.Failed" error. By implementing this
// interface as well a custom encoding may be provided.
type DBusError interface {
DBusError() (string, []interface{})
}

View file

@ -101,6 +101,8 @@ func getSignature(t reflect.Type) string {
panic(InvalidTypeError{t}) panic(InvalidTypeError{t})
} }
return "a{" + getSignature(t.Key()) + getSignature(t.Elem()) + "}" return "a{" + getSignature(t.Key()) + getSignature(t.Elem()) + "}"
case reflect.Interface:
return "v"
} }
panic(InvalidTypeError{t}) panic(InvalidTypeError{t})
} }
@ -162,7 +164,7 @@ func (e SignatureError) Error() string {
return fmt.Sprintf("dbus: invalid signature: %q (%s)", e.Sig, e.Reason) return fmt.Sprintf("dbus: invalid signature: %q (%s)", e.Sig, e.Reason)
} }
// Try to read a single type from this string. If it was successfull, err is nil // Try to read a single type from this string. If it was successful, err is nil
// and rem is the remaining unparsed part. Otherwise, err is a non-nil // and rem is the remaining unparsed part. Otherwise, err is a non-nil
// SignatureError and rem is "". depth is the current recursion depth which may // SignatureError and rem is "". depth is the current recursion depth which may
// not be greater than 64 and should be given as 0 on the first call. // not be greater than 64 and should be given as 0 on the first call.

43
vendor/github.com/godbus/dbus/transport_tcp.go generated vendored Normal file
View file

@ -0,0 +1,43 @@
//+build !windows
package dbus
import (
"errors"
"net"
)
func init() {
transports["tcp"] = newTcpTransport
}
func tcpFamily(keys string) (string, error) {
switch getKey(keys, "family") {
case "":
return "tcp", nil
case "ipv4":
return "tcp4", nil
case "ipv6":
return "tcp6", nil
default:
return "", errors.New("dbus: invalid tcp family (must be ipv4 or ipv6)")
}
}
func newTcpTransport(keys string) (transport, error) {
host := getKey(keys, "host")
port := getKey(keys, "port")
if host == "" || port == "" {
return nil, errors.New("dbus: unsupported address (must set host and port)")
}
protocol, err := tcpFamily(keys)
if err != nil {
return nil, err
}
socket, err := net.Dial(protocol, net.JoinHostPort(host, port))
if err != nil {
return nil, err
}
return NewConn(socket)
}

View file

@ -1,4 +1,4 @@
//+build !windows //+build !windows,!solaris
package dbus package dbus

View file

@ -0,0 +1,14 @@
package dbus
import "io"
func (t *unixTransport) SendNullByte() error {
n, _, err := t.UnixConn.WriteMsgUnix([]byte{0}, nil, nil)
if err != nil {
return err
}
if n != 1 {
return io.ErrShortWrite
}
return nil
}

View file

@ -39,5 +39,5 @@ test: install generate-test-pbs
generate-test-pbs: generate-test-pbs:
make install make install
make -C testdata make -C testdata
protoc --go_out=Mtestdata/test.proto=github.com/golang/protobuf/proto/testdata:. proto3_proto/proto3.proto protoc --go_out=Mtestdata/test.proto=github.com/golang/protobuf/proto/testdata,Mgoogle/protobuf/any.proto=github.com/golang/protobuf/ptypes/any:. proto3_proto/proto3.proto
make make

View file

@ -84,9 +84,15 @@ func mergeStruct(out, in reflect.Value) {
mergeAny(out.Field(i), in.Field(i), false, sprop.Prop[i]) mergeAny(out.Field(i), in.Field(i), false, sprop.Prop[i])
} }
if emIn, ok := in.Addr().Interface().(extendableProto); ok { if emIn, ok := extendable(in.Addr().Interface()); ok {
emOut := out.Addr().Interface().(extendableProto) emOut, _ := extendable(out.Addr().Interface())
mergeExtension(emOut.ExtensionMap(), emIn.ExtensionMap()) mIn, muIn := emIn.extensionsRead()
if mIn != nil {
mOut := emOut.extensionsWrite()
muIn.Lock()
mergeExtension(mOut, mIn)
muIn.Unlock()
}
} }
uf := in.FieldByName("XXX_unrecognized") uf := in.FieldByName("XXX_unrecognized")

View file

@ -61,7 +61,6 @@ var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for
// int32, int64, uint32, uint64, bool, and enum // int32, int64, uint32, uint64, bool, and enum
// protocol buffer types. // protocol buffer types.
func DecodeVarint(buf []byte) (x uint64, n int) { func DecodeVarint(buf []byte) (x uint64, n int) {
// x, n already 0
for shift := uint(0); shift < 64; shift += 7 { for shift := uint(0); shift < 64; shift += 7 {
if n >= len(buf) { if n >= len(buf) {
return 0, 0 return 0, 0
@ -78,13 +77,7 @@ func DecodeVarint(buf []byte) (x uint64, n int) {
return 0, 0 return 0, 0
} }
// DecodeVarint reads a varint-encoded integer from the Buffer. func (p *Buffer) decodeVarintSlow() (x uint64, err error) {
// This is the format for the
// int32, int64, uint32, uint64, bool, and enum
// protocol buffer types.
func (p *Buffer) DecodeVarint() (x uint64, err error) {
// x, err already 0
i := p.index i := p.index
l := len(p.buf) l := len(p.buf)
@ -107,6 +100,107 @@ func (p *Buffer) DecodeVarint() (x uint64, err error) {
return return
} }
// DecodeVarint reads a varint-encoded integer from the Buffer.
// This is the format for the
// int32, int64, uint32, uint64, bool, and enum
// protocol buffer types.
func (p *Buffer) DecodeVarint() (x uint64, err error) {
i := p.index
buf := p.buf
if i >= len(buf) {
return 0, io.ErrUnexpectedEOF
} else if buf[i] < 0x80 {
p.index++
return uint64(buf[i]), nil
} else if len(buf)-i < 10 {
return p.decodeVarintSlow()
}
var b uint64
// we already checked the first byte
x = uint64(buf[i]) - 0x80
i++
b = uint64(buf[i])
i++
x += b << 7
if b&0x80 == 0 {
goto done
}
x -= 0x80 << 7
b = uint64(buf[i])
i++
x += b << 14
if b&0x80 == 0 {
goto done
}
x -= 0x80 << 14
b = uint64(buf[i])
i++
x += b << 21
if b&0x80 == 0 {
goto done
}
x -= 0x80 << 21
b = uint64(buf[i])
i++
x += b << 28
if b&0x80 == 0 {
goto done
}
x -= 0x80 << 28
b = uint64(buf[i])
i++
x += b << 35
if b&0x80 == 0 {
goto done
}
x -= 0x80 << 35
b = uint64(buf[i])
i++
x += b << 42
if b&0x80 == 0 {
goto done
}
x -= 0x80 << 42
b = uint64(buf[i])
i++
x += b << 49
if b&0x80 == 0 {
goto done
}
x -= 0x80 << 49
b = uint64(buf[i])
i++
x += b << 56
if b&0x80 == 0 {
goto done
}
x -= 0x80 << 56
b = uint64(buf[i])
i++
x += b << 63
if b&0x80 == 0 {
goto done
}
// x -= 0x80 << 63 // Always zero.
return 0, errOverflow
done:
p.index = i
return x, nil
}
// DecodeFixed64 reads a 64-bit integer from the Buffer. // DecodeFixed64 reads a 64-bit integer from the Buffer.
// This is the format for the // This is the format for the
// fixed64, sfixed64, and double protocol buffer types. // fixed64, sfixed64, and double protocol buffer types.
@ -340,6 +434,8 @@ func (p *Buffer) DecodeGroup(pb Message) error {
// Buffer and places the decoded result in pb. If the struct // Buffer and places the decoded result in pb. If the struct
// underlying pb does not match the data in the buffer, the results can be // underlying pb does not match the data in the buffer, the results can be
// unpredictable. // unpredictable.
//
// Unlike proto.Unmarshal, this does not reset pb before starting to unmarshal.
func (p *Buffer) Unmarshal(pb Message) error { func (p *Buffer) Unmarshal(pb Message) error {
// If the object can unmarshal itself, let it. // If the object can unmarshal itself, let it.
if u, ok := pb.(Unmarshaler); ok { if u, ok := pb.(Unmarshaler); ok {
@ -378,6 +474,11 @@ func (o *Buffer) unmarshalType(st reflect.Type, prop *StructProperties, is_group
wire := int(u & 0x7) wire := int(u & 0x7)
if wire == WireEndGroup { if wire == WireEndGroup {
if is_group { if is_group {
if required > 0 {
// Not enough information to determine the exact field.
// (See below.)
return &RequiredNotSetError{"{Unknown}"}
}
return nil // input is satisfied return nil // input is satisfied
} }
return fmt.Errorf("proto: %s: wiretype end group for non-group", st) return fmt.Errorf("proto: %s: wiretype end group for non-group", st)
@ -390,11 +491,12 @@ func (o *Buffer) unmarshalType(st reflect.Type, prop *StructProperties, is_group
if !ok { if !ok {
// Maybe it's an extension? // Maybe it's an extension?
if prop.extendable { if prop.extendable {
if e := structPointer_Interface(base, st).(extendableProto); isExtensionField(e, int32(tag)) { if e, _ := extendable(structPointer_Interface(base, st)); isExtensionField(e, int32(tag)) {
if err = o.skip(st, tag, wire); err == nil { if err = o.skip(st, tag, wire); err == nil {
ext := e.ExtensionMap()[int32(tag)] // may be missing extmap := e.extensionsWrite()
ext := extmap[int32(tag)] // may be missing
ext.enc = append(ext.enc, o.buf[oi:o.index]...) ext.enc = append(ext.enc, o.buf[oi:o.index]...)
e.ExtensionMap()[int32(tag)] = ext extmap[int32(tag)] = ext
} }
continue continue
} }
@ -768,10 +870,11 @@ func (o *Buffer) dec_new_map(p *Properties, base structPointer) error {
} }
} }
keyelem, valelem := keyptr.Elem(), valptr.Elem() keyelem, valelem := keyptr.Elem(), valptr.Elem()
if !keyelem.IsValid() || !valelem.IsValid() { if !keyelem.IsValid() {
// We did not decode the key or the value in the map entry. keyelem = reflect.Zero(p.mtype.Key())
// Either way, it's an invalid map entry. }
return fmt.Errorf("proto: bad map data: missing key/val") if !valelem.IsValid() {
valelem = reflect.Zero(p.mtype.Elem())
} }
v.SetMapIndex(keyelem, valelem) v.SetMapIndex(keyelem, valelem)

View file

@ -64,8 +64,16 @@ var (
// a struct with a repeated field containing a nil element. // a struct with a repeated field containing a nil element.
errRepeatedHasNil = errors.New("proto: repeated field has nil element") errRepeatedHasNil = errors.New("proto: repeated field has nil element")
// errOneofHasNil is the error returned if Marshal is called with
// a struct with a oneof field containing a nil element.
errOneofHasNil = errors.New("proto: oneof field has nil value")
// ErrNil is the error returned if Marshal is called with nil. // ErrNil is the error returned if Marshal is called with nil.
ErrNil = errors.New("proto: Marshal called with nil") ErrNil = errors.New("proto: Marshal called with nil")
// ErrTooLarge is the error returned if Marshal is called with a
// message that encodes to >2GB.
ErrTooLarge = errors.New("proto: message encodes to over 2 GB")
) )
// The fundamental encoders that put bytes on the wire. // The fundamental encoders that put bytes on the wire.
@ -74,6 +82,10 @@ var (
const maxVarintBytes = 10 // maximum length of a varint const maxVarintBytes = 10 // maximum length of a varint
// maxMarshalSize is the largest allowed size of an encoded protobuf,
// since C++ and Java use signed int32s for the size.
const maxMarshalSize = 1<<31 - 1
// EncodeVarint returns the varint encoding of x. // EncodeVarint returns the varint encoding of x.
// This is the format for the // This is the format for the
// int32, int64, uint32, uint64, bool, and enum // int32, int64, uint32, uint64, bool, and enum
@ -222,10 +234,6 @@ func Marshal(pb Message) ([]byte, error) {
} }
p := NewBuffer(nil) p := NewBuffer(nil)
err := p.Marshal(pb) err := p.Marshal(pb)
var state errorState
if err != nil && !state.shouldContinue(err, nil) {
return nil, err
}
if p.buf == nil && err == nil { if p.buf == nil && err == nil {
// Return a non-nil slice on success. // Return a non-nil slice on success.
return []byte{}, nil return []byte{}, nil
@ -254,11 +262,8 @@ func (p *Buffer) Marshal(pb Message) error {
// Can the object marshal itself? // Can the object marshal itself?
if m, ok := pb.(Marshaler); ok { if m, ok := pb.(Marshaler); ok {
data, err := m.Marshal() data, err := m.Marshal()
if err != nil {
return err
}
p.buf = append(p.buf, data...) p.buf = append(p.buf, data...)
return nil return err
} }
t, base, err := getbase(pb) t, base, err := getbase(pb)
@ -270,9 +275,12 @@ func (p *Buffer) Marshal(pb Message) error {
} }
if collectStats { if collectStats {
stats.Encode++ (stats).Encode++ // Parens are to work around a goimports bug.
} }
if len(p.buf) > maxMarshalSize {
return ErrTooLarge
}
return err return err
} }
@ -294,7 +302,7 @@ func Size(pb Message) (n int) {
} }
if collectStats { if collectStats {
stats.Size++ (stats).Size++ // Parens are to work around a goimports bug.
} }
return return
@ -999,7 +1007,6 @@ func size_slice_struct_message(p *Properties, base structPointer) (n int) {
if p.isMarshaler { if p.isMarshaler {
m := structPointer_Interface(structp, p.stype).(Marshaler) m := structPointer_Interface(structp, p.stype).(Marshaler)
data, _ := m.Marshal() data, _ := m.Marshal()
n += len(p.tagcode)
n += sizeRawBytes(data) n += sizeRawBytes(data)
continue continue
} }
@ -1058,10 +1065,32 @@ func size_slice_struct_group(p *Properties, base structPointer) (n int) {
// Encode an extension map. // Encode an extension map.
func (o *Buffer) enc_map(p *Properties, base structPointer) error { func (o *Buffer) enc_map(p *Properties, base structPointer) error {
v := *structPointer_ExtMap(base, p.field) exts := structPointer_ExtMap(base, p.field)
if err := encodeExtensionMap(v); err != nil { if err := encodeExtensionsMap(*exts); err != nil {
return err return err
} }
return o.enc_map_body(*exts)
}
func (o *Buffer) enc_exts(p *Properties, base structPointer) error {
exts := structPointer_Extensions(base, p.field)
v, mu := exts.extensionsRead()
if v == nil {
return nil
}
mu.Lock()
defer mu.Unlock()
if err := encodeExtensionsMap(v); err != nil {
return err
}
return o.enc_map_body(v)
}
func (o *Buffer) enc_map_body(v map[int32]Extension) error {
// Fast-path for common cases: zero or one extensions. // Fast-path for common cases: zero or one extensions.
if len(v) <= 1 { if len(v) <= 1 {
for _, e := range v { for _, e := range v {
@ -1084,8 +1113,13 @@ func (o *Buffer) enc_map(p *Properties, base structPointer) error {
} }
func size_map(p *Properties, base structPointer) int { func size_map(p *Properties, base structPointer) int {
v := *structPointer_ExtMap(base, p.field) v := structPointer_ExtMap(base, p.field)
return sizeExtensionMap(v) return extensionsMapSize(*v)
}
func size_exts(p *Properties, base structPointer) int {
v := structPointer_Extensions(base, p.field)
return extensionsSize(v)
} }
// Encode a map field. // Encode a map field.
@ -1114,7 +1148,7 @@ func (o *Buffer) enc_new_map(p *Properties, base structPointer) error {
if err := p.mkeyprop.enc(o, p.mkeyprop, keybase); err != nil { if err := p.mkeyprop.enc(o, p.mkeyprop, keybase); err != nil {
return err return err
} }
if err := p.mvalprop.enc(o, p.mvalprop, valbase); err != nil { if err := p.mvalprop.enc(o, p.mvalprop, valbase); err != nil && err != ErrNil {
return err return err
} }
return nil return nil
@ -1124,11 +1158,6 @@ func (o *Buffer) enc_new_map(p *Properties, base structPointer) error {
for _, key := range v.MapKeys() { for _, key := range v.MapKeys() {
val := v.MapIndex(key) val := v.MapIndex(key)
// The only illegal map entry values are nil message pointers.
if val.Kind() == reflect.Ptr && val.IsNil() {
return errors.New("proto: map has nil element")
}
keycopy.Set(key) keycopy.Set(key)
valcopy.Set(val) valcopy.Set(val)
@ -1216,13 +1245,18 @@ func (o *Buffer) enc_struct(prop *StructProperties, base structPointer) error {
return err return err
} }
} }
if len(o.buf) > maxMarshalSize {
return ErrTooLarge
}
} }
} }
// Do oneof fields. // Do oneof fields.
if prop.oneofMarshaler != nil { if prop.oneofMarshaler != nil {
m := structPointer_Interface(base, prop.stype).(Message) m := structPointer_Interface(base, prop.stype).(Message)
if err := prop.oneofMarshaler(m, o); err != nil { if err := prop.oneofMarshaler(m, o); err == ErrNil {
return errOneofHasNil
} else if err != nil {
return err return err
} }
} }
@ -1230,6 +1264,9 @@ func (o *Buffer) enc_struct(prop *StructProperties, base structPointer) error {
// Add unrecognized fields at the end. // Add unrecognized fields at the end.
if prop.unrecField.IsValid() { if prop.unrecField.IsValid() {
v := *structPointer_Bytes(base, prop.unrecField) v := *structPointer_Bytes(base, prop.unrecField)
if len(o.buf)+len(v) > maxMarshalSize {
return ErrTooLarge
}
if len(v) > 0 { if len(v) > 0 {
o.buf = append(o.buf, v...) o.buf = append(o.buf, v...)
} }

View file

@ -54,13 +54,17 @@ Equality is defined in this way:
in a proto3 .proto file, fields are not "set"; specifically, in a proto3 .proto file, fields are not "set"; specifically,
zero length proto3 "bytes" fields are equal (nil == {}). zero length proto3 "bytes" fields are equal (nil == {}).
- Two repeated fields are equal iff their lengths are the same, - Two repeated fields are equal iff their lengths are the same,
and their corresponding elements are equal (a "bytes" field, and their corresponding elements are equal. Note a "bytes" field,
although represented by []byte, is not a repeated field) although represented by []byte, is not a repeated field and the
rule for the scalar fields described above applies.
- Two unset fields are equal. - Two unset fields are equal.
- Two unknown field sets are equal if their current - Two unknown field sets are equal if their current
encoded state is equal. encoded state is equal.
- Two extension sets are equal iff they have corresponding - Two extension sets are equal iff they have corresponding
elements that are pairwise equal. elements that are pairwise equal.
- Two map fields are equal iff their lengths are the same,
and they contain the same set of elements. Zero-length map
fields are equal.
- Every other combination of things are not equal. - Every other combination of things are not equal.
The return value is undefined if a and b are not protocol buffers. The return value is undefined if a and b are not protocol buffers.
@ -121,9 +125,16 @@ func equalStruct(v1, v2 reflect.Value) bool {
} }
} }
if em1 := v1.FieldByName("XXX_InternalExtensions"); em1.IsValid() {
em2 := v2.FieldByName("XXX_InternalExtensions")
if !equalExtensions(v1.Type(), em1.Interface().(XXX_InternalExtensions), em2.Interface().(XXX_InternalExtensions)) {
return false
}
}
if em1 := v1.FieldByName("XXX_extensions"); em1.IsValid() { if em1 := v1.FieldByName("XXX_extensions"); em1.IsValid() {
em2 := v2.FieldByName("XXX_extensions") em2 := v2.FieldByName("XXX_extensions")
if !equalExtensions(v1.Type(), em1.Interface().(map[int32]Extension), em2.Interface().(map[int32]Extension)) { if !equalExtMap(v1.Type(), em1.Interface().(map[int32]Extension), em2.Interface().(map[int32]Extension)) {
return false return false
} }
} }
@ -184,6 +195,13 @@ func equalAny(v1, v2 reflect.Value, prop *Properties) bool {
} }
return true return true
case reflect.Ptr: case reflect.Ptr:
// Maps may have nil values in them, so check for nil.
if v1.IsNil() && v2.IsNil() {
return true
}
if v1.IsNil() != v2.IsNil() {
return false
}
return equalAny(v1.Elem(), v2.Elem(), prop) return equalAny(v1.Elem(), v2.Elem(), prop)
case reflect.Slice: case reflect.Slice:
if v1.Type().Elem().Kind() == reflect.Uint8 { if v1.Type().Elem().Kind() == reflect.Uint8 {
@ -223,8 +241,14 @@ func equalAny(v1, v2 reflect.Value, prop *Properties) bool {
} }
// base is the struct type that the extensions are based on. // base is the struct type that the extensions are based on.
// em1 and em2 are extension maps. // x1 and x2 are InternalExtensions.
func equalExtensions(base reflect.Type, em1, em2 map[int32]Extension) bool { func equalExtensions(base reflect.Type, x1, x2 XXX_InternalExtensions) bool {
em1, _ := x1.extensionsRead()
em2, _ := x2.extensionsRead()
return equalExtMap(base, em1, em2)
}
func equalExtMap(base reflect.Type, em1, em2 map[int32]Extension) bool {
if len(em1) != len(em2) { if len(em1) != len(em2) {
return false return false
} }

View file

@ -52,14 +52,99 @@ type ExtensionRange struct {
Start, End int32 // both inclusive Start, End int32 // both inclusive
} }
// extendableProto is an interface implemented by any protocol buffer that may be extended. // extendableProto is an interface implemented by any protocol buffer generated by the current
// proto compiler that may be extended.
type extendableProto interface { type extendableProto interface {
Message
ExtensionRangeArray() []ExtensionRange
extensionsWrite() map[int32]Extension
extensionsRead() (map[int32]Extension, sync.Locker)
}
// extendableProtoV1 is an interface implemented by a protocol buffer generated by the previous
// version of the proto compiler that may be extended.
type extendableProtoV1 interface {
Message Message
ExtensionRangeArray() []ExtensionRange ExtensionRangeArray() []ExtensionRange
ExtensionMap() map[int32]Extension ExtensionMap() map[int32]Extension
} }
// extensionAdapter is a wrapper around extendableProtoV1 that implements extendableProto.
type extensionAdapter struct {
extendableProtoV1
}
func (e extensionAdapter) extensionsWrite() map[int32]Extension {
return e.ExtensionMap()
}
func (e extensionAdapter) extensionsRead() (map[int32]Extension, sync.Locker) {
return e.ExtensionMap(), notLocker{}
}
// notLocker is a sync.Locker whose Lock and Unlock methods are nops.
type notLocker struct{}
func (n notLocker) Lock() {}
func (n notLocker) Unlock() {}
// extendable returns the extendableProto interface for the given generated proto message.
// If the proto message has the old extension format, it returns a wrapper that implements
// the extendableProto interface.
func extendable(p interface{}) (extendableProto, bool) {
if ep, ok := p.(extendableProto); ok {
return ep, ok
}
if ep, ok := p.(extendableProtoV1); ok {
return extensionAdapter{ep}, ok
}
return nil, false
}
// XXX_InternalExtensions is an internal representation of proto extensions.
//
// Each generated message struct type embeds an anonymous XXX_InternalExtensions field,
// thus gaining the unexported 'extensions' method, which can be called only from the proto package.
//
// The methods of XXX_InternalExtensions are not concurrency safe in general,
// but calls to logically read-only methods such as has and get may be executed concurrently.
type XXX_InternalExtensions struct {
// The struct must be indirect so that if a user inadvertently copies a
// generated message and its embedded XXX_InternalExtensions, they
// avoid the mayhem of a copied mutex.
//
// The mutex serializes all logically read-only operations to p.extensionMap.
// It is up to the client to ensure that write operations to p.extensionMap are
// mutually exclusive with other accesses.
p *struct {
mu sync.Mutex
extensionMap map[int32]Extension
}
}
// extensionsWrite returns the extension map, creating it on first use.
func (e *XXX_InternalExtensions) extensionsWrite() map[int32]Extension {
if e.p == nil {
e.p = new(struct {
mu sync.Mutex
extensionMap map[int32]Extension
})
e.p.extensionMap = make(map[int32]Extension)
}
return e.p.extensionMap
}
// extensionsRead returns the extensions map for read-only use. It may be nil.
// The caller must hold the returned mutex's lock when accessing Elements within the map.
func (e *XXX_InternalExtensions) extensionsRead() (map[int32]Extension, sync.Locker) {
if e.p == nil {
return nil, nil
}
return e.p.extensionMap, &e.p.mu
}
var extendableProtoType = reflect.TypeOf((*extendableProto)(nil)).Elem() var extendableProtoType = reflect.TypeOf((*extendableProto)(nil)).Elem()
var extendableProtoV1Type = reflect.TypeOf((*extendableProtoV1)(nil)).Elem()
// ExtensionDesc represents an extension specification. // ExtensionDesc represents an extension specification.
// Used in generated code from the protocol compiler. // Used in generated code from the protocol compiler.
@ -69,6 +154,7 @@ type ExtensionDesc struct {
Field int32 // field number Field int32 // field number
Name string // fully-qualified name of extension, for text formatting Name string // fully-qualified name of extension, for text formatting
Tag string // protobuf tag style Tag string // protobuf tag style
Filename string // name of the file in which the extension is defined
} }
func (ed *ExtensionDesc) repeated() bool { func (ed *ExtensionDesc) repeated() bool {
@ -92,8 +178,13 @@ type Extension struct {
} }
// SetRawExtension is for testing only. // SetRawExtension is for testing only.
func SetRawExtension(base extendableProto, id int32, b []byte) { func SetRawExtension(base Message, id int32, b []byte) {
base.ExtensionMap()[id] = Extension{enc: b} epb, ok := extendable(base)
if !ok {
return
}
extmap := epb.extensionsWrite()
extmap[id] = Extension{enc: b}
} }
// isExtensionField returns true iff the given field number is in an extension range. // isExtensionField returns true iff the given field number is in an extension range.
@ -108,8 +199,12 @@ func isExtensionField(pb extendableProto, field int32) bool {
// checkExtensionTypes checks that the given extension is valid for pb. // checkExtensionTypes checks that the given extension is valid for pb.
func checkExtensionTypes(pb extendableProto, extension *ExtensionDesc) error { func checkExtensionTypes(pb extendableProto, extension *ExtensionDesc) error {
var pbi interface{} = pb
// Check the extended type. // Check the extended type.
if a, b := reflect.TypeOf(pb), reflect.TypeOf(extension.ExtendedType); a != b { if ea, ok := pbi.(extensionAdapter); ok {
pbi = ea.extendableProtoV1
}
if a, b := reflect.TypeOf(pbi), reflect.TypeOf(extension.ExtendedType); a != b {
return errors.New("proto: bad extended type; " + b.String() + " does not extend " + a.String()) return errors.New("proto: bad extended type; " + b.String() + " does not extend " + a.String())
} }
// Check the range. // Check the range.
@ -155,8 +250,19 @@ func extensionProperties(ed *ExtensionDesc) *Properties {
return prop return prop
} }
// encodeExtensionMap encodes any unmarshaled (unencoded) extensions in m. // encode encodes any unmarshaled (unencoded) extensions in e.
func encodeExtensionMap(m map[int32]Extension) error { func encodeExtensions(e *XXX_InternalExtensions) error {
m, mu := e.extensionsRead()
if m == nil {
return nil // fast path
}
mu.Lock()
defer mu.Unlock()
return encodeExtensionsMap(m)
}
// encode encodes any unmarshaled (unencoded) extensions in e.
func encodeExtensionsMap(m map[int32]Extension) error {
for k, e := range m { for k, e := range m {
if e.value == nil || e.desc == nil { if e.value == nil || e.desc == nil {
// Extension is only in its encoded form. // Extension is only in its encoded form.
@ -184,7 +290,17 @@ func encodeExtensionMap(m map[int32]Extension) error {
return nil return nil
} }
func sizeExtensionMap(m map[int32]Extension) (n int) { func extensionsSize(e *XXX_InternalExtensions) (n int) {
m, mu := e.extensionsRead()
if m == nil {
return 0
}
mu.Lock()
defer mu.Unlock()
return extensionsMapSize(m)
}
func extensionsMapSize(m map[int32]Extension) (n int) {
for _, e := range m { for _, e := range m {
if e.value == nil || e.desc == nil { if e.value == nil || e.desc == nil {
// Extension is only in its encoded form. // Extension is only in its encoded form.
@ -209,26 +325,51 @@ func sizeExtensionMap(m map[int32]Extension) (n int) {
} }
// HasExtension returns whether the given extension is present in pb. // HasExtension returns whether the given extension is present in pb.
func HasExtension(pb extendableProto, extension *ExtensionDesc) bool { func HasExtension(pb Message, extension *ExtensionDesc) bool {
// TODO: Check types, field numbers, etc.? // TODO: Check types, field numbers, etc.?
_, ok := pb.ExtensionMap()[extension.Field] epb, ok := extendable(pb)
if !ok {
return false
}
extmap, mu := epb.extensionsRead()
if extmap == nil {
return false
}
mu.Lock()
_, ok = extmap[extension.Field]
mu.Unlock()
return ok return ok
} }
// ClearExtension removes the given extension from pb. // ClearExtension removes the given extension from pb.
func ClearExtension(pb extendableProto, extension *ExtensionDesc) { func ClearExtension(pb Message, extension *ExtensionDesc) {
epb, ok := extendable(pb)
if !ok {
return
}
// TODO: Check types, field numbers, etc.? // TODO: Check types, field numbers, etc.?
delete(pb.ExtensionMap(), extension.Field) extmap := epb.extensionsWrite()
delete(extmap, extension.Field)
} }
// GetExtension parses and returns the given extension of pb. // GetExtension parses and returns the given extension of pb.
// If the extension is not present and has no default value it returns ErrMissingExtension. // If the extension is not present and has no default value it returns ErrMissingExtension.
func GetExtension(pb extendableProto, extension *ExtensionDesc) (interface{}, error) { func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) {
if err := checkExtensionTypes(pb, extension); err != nil { epb, ok := extendable(pb)
if !ok {
return nil, errors.New("proto: not an extendable proto")
}
if err := checkExtensionTypes(epb, extension); err != nil {
return nil, err return nil, err
} }
emap := pb.ExtensionMap() emap, mu := epb.extensionsRead()
if emap == nil {
return defaultExtensionValue(extension)
}
mu.Lock()
defer mu.Unlock()
e, ok := emap[extension.Field] e, ok := emap[extension.Field]
if !ok { if !ok {
// defaultExtensionValue returns the default value or // defaultExtensionValue returns the default value or
@ -332,10 +473,9 @@ func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) {
// GetExtensions returns a slice of the extensions present in pb that are also listed in es. // GetExtensions returns a slice of the extensions present in pb that are also listed in es.
// The returned slice has the same length as es; missing extensions will appear as nil elements. // The returned slice has the same length as es; missing extensions will appear as nil elements.
func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) { func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) {
epb, ok := pb.(extendableProto) epb, ok := extendable(pb)
if !ok { if !ok {
err = errors.New("proto: not an extendable proto") return nil, errors.New("proto: not an extendable proto")
return
} }
extensions = make([]interface{}, len(es)) extensions = make([]interface{}, len(es))
for i, e := range es { for i, e := range es {
@ -350,9 +490,44 @@ func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, e
return return
} }
// ExtensionDescs returns a new slice containing pb's extension descriptors, in undefined order.
// For non-registered extensions, ExtensionDescs returns an incomplete descriptor containing
// just the Field field, which defines the extension's field number.
func ExtensionDescs(pb Message) ([]*ExtensionDesc, error) {
epb, ok := extendable(pb)
if !ok {
return nil, fmt.Errorf("proto: %T is not an extendable proto.Message", pb)
}
registeredExtensions := RegisteredExtensions(pb)
emap, mu := epb.extensionsRead()
if emap == nil {
return nil, nil
}
mu.Lock()
defer mu.Unlock()
extensions := make([]*ExtensionDesc, 0, len(emap))
for extid, e := range emap {
desc := e.desc
if desc == nil {
desc = registeredExtensions[extid]
if desc == nil {
desc = &ExtensionDesc{Field: extid}
}
}
extensions = append(extensions, desc)
}
return extensions, nil
}
// SetExtension sets the specified extension of pb to the specified value. // SetExtension sets the specified extension of pb to the specified value.
func SetExtension(pb extendableProto, extension *ExtensionDesc, value interface{}) error { func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error {
if err := checkExtensionTypes(pb, extension); err != nil { epb, ok := extendable(pb)
if !ok {
return errors.New("proto: not an extendable proto")
}
if err := checkExtensionTypes(epb, extension); err != nil {
return err return err
} }
typ := reflect.TypeOf(extension.ExtensionType) typ := reflect.TypeOf(extension.ExtensionType)
@ -368,10 +543,23 @@ func SetExtension(pb extendableProto, extension *ExtensionDesc, value interface{
return fmt.Errorf("proto: SetExtension called with nil value of type %T", value) return fmt.Errorf("proto: SetExtension called with nil value of type %T", value)
} }
pb.ExtensionMap()[extension.Field] = Extension{desc: extension, value: value} extmap := epb.extensionsWrite()
extmap[extension.Field] = Extension{desc: extension, value: value}
return nil return nil
} }
// ClearAllExtensions clears all extensions from pb.
func ClearAllExtensions(pb Message) {
epb, ok := extendable(pb)
if !ok {
return
}
m := epb.extensionsWrite()
for k := range m {
delete(m, k)
}
}
// A global registry of extensions. // A global registry of extensions.
// The generated code will register the generated descriptors by calling RegisterExtension. // The generated code will register the generated descriptors by calling RegisterExtension.

View file

@ -235,6 +235,7 @@ To create and play with a Test object:
test := &pb.Test{ test := &pb.Test{
Label: proto.String("hello"), Label: proto.String("hello"),
Type: proto.Int32(17), Type: proto.Int32(17),
Reps: []int64{1, 2, 3},
Optionalgroup: &pb.Test_OptionalGroup{ Optionalgroup: &pb.Test_OptionalGroup{
RequiredField: proto.String("good bye"), RequiredField: proto.String("good bye"),
}, },
@ -307,7 +308,7 @@ func GetStats() Stats { return stats }
// temporary Buffer and are fine for most applications. // temporary Buffer and are fine for most applications.
type Buffer struct { type Buffer struct {
buf []byte // encode/decode byte stream buf []byte // encode/decode byte stream
index int // write point index int // read point
// pools of basic types to amortize allocation. // pools of basic types to amortize allocation.
bools []bool bools []bool
@ -888,6 +889,10 @@ func isProto3Zero(v reflect.Value) bool {
return false return false
} }
// ProtoPackageIsVersion2 is referenced from generated protocol buffer files
// to assert that that code is compatible with this version of the proto package.
const ProtoPackageIsVersion2 = true
// ProtoPackageIsVersion1 is referenced from generated protocol buffer files // ProtoPackageIsVersion1 is referenced from generated protocol buffer files
// to assert that that code is compatible with this version of the proto package. // to assert that that code is compatible with this version of the proto package.
const ProtoPackageIsVersion1 = true const ProtoPackageIsVersion1 = true

View file

@ -149,9 +149,21 @@ func skipVarint(buf []byte) []byte {
// MarshalMessageSet encodes the extension map represented by m in the message set wire format. // MarshalMessageSet encodes the extension map represented by m in the message set wire format.
// It is called by generated Marshal methods on protocol buffer messages with the message_set_wire_format option. // It is called by generated Marshal methods on protocol buffer messages with the message_set_wire_format option.
func MarshalMessageSet(m map[int32]Extension) ([]byte, error) { func MarshalMessageSet(exts interface{}) ([]byte, error) {
if err := encodeExtensionMap(m); err != nil { var m map[int32]Extension
return nil, err switch exts := exts.(type) {
case *XXX_InternalExtensions:
if err := encodeExtensions(exts); err != nil {
return nil, err
}
m, _ = exts.extensionsRead()
case map[int32]Extension:
if err := encodeExtensionsMap(exts); err != nil {
return nil, err
}
m = exts
default:
return nil, errors.New("proto: not an extension map")
} }
// Sort extension IDs to provide a deterministic encoding. // Sort extension IDs to provide a deterministic encoding.
@ -178,7 +190,17 @@ func MarshalMessageSet(m map[int32]Extension) ([]byte, error) {
// UnmarshalMessageSet decodes the extension map encoded in buf in the message set wire format. // UnmarshalMessageSet decodes the extension map encoded in buf in the message set wire format.
// It is called by generated Unmarshal methods on protocol buffer messages with the message_set_wire_format option. // It is called by generated Unmarshal methods on protocol buffer messages with the message_set_wire_format option.
func UnmarshalMessageSet(buf []byte, m map[int32]Extension) error { func UnmarshalMessageSet(buf []byte, exts interface{}) error {
var m map[int32]Extension
switch exts := exts.(type) {
case *XXX_InternalExtensions:
m = exts.extensionsWrite()
case map[int32]Extension:
m = exts
default:
return errors.New("proto: not an extension map")
}
ms := new(messageSet) ms := new(messageSet)
if err := Unmarshal(buf, ms); err != nil { if err := Unmarshal(buf, ms); err != nil {
return err return err
@ -209,7 +231,16 @@ func UnmarshalMessageSet(buf []byte, m map[int32]Extension) error {
// MarshalMessageSetJSON encodes the extension map represented by m in JSON format. // MarshalMessageSetJSON encodes the extension map represented by m in JSON format.
// It is called by generated MarshalJSON methods on protocol buffer messages with the message_set_wire_format option. // It is called by generated MarshalJSON methods on protocol buffer messages with the message_set_wire_format option.
func MarshalMessageSetJSON(m map[int32]Extension) ([]byte, error) { func MarshalMessageSetJSON(exts interface{}) ([]byte, error) {
var m map[int32]Extension
switch exts := exts.(type) {
case *XXX_InternalExtensions:
m, _ = exts.extensionsRead()
case map[int32]Extension:
m = exts
default:
return nil, errors.New("proto: not an extension map")
}
var b bytes.Buffer var b bytes.Buffer
b.WriteByte('{') b.WriteByte('{')
@ -252,7 +283,7 @@ func MarshalMessageSetJSON(m map[int32]Extension) ([]byte, error) {
// UnmarshalMessageSetJSON decodes the extension map encoded in buf in JSON format. // UnmarshalMessageSetJSON decodes the extension map encoded in buf in JSON format.
// It is called by generated UnmarshalJSON methods on protocol buffer messages with the message_set_wire_format option. // It is called by generated UnmarshalJSON methods on protocol buffer messages with the message_set_wire_format option.
func UnmarshalMessageSetJSON(buf []byte, m map[int32]Extension) error { func UnmarshalMessageSetJSON(buf []byte, exts interface{}) error {
// Common-case fast path. // Common-case fast path.
if len(buf) == 0 || bytes.Equal(buf, []byte("{}")) { if len(buf) == 0 || bytes.Equal(buf, []byte("{}")) {
return nil return nil

View file

@ -29,7 +29,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// +build appengine // +build appengine js
// This file contains an implementation of proto field accesses using package reflect. // This file contains an implementation of proto field accesses using package reflect.
// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can // It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can
@ -139,6 +139,11 @@ func structPointer_StringSlice(p structPointer, f field) *[]string {
return structPointer_ifield(p, f).(*[]string) return structPointer_ifield(p, f).(*[]string)
} }
// Extensions returns the address of an extension map field in the struct.
func structPointer_Extensions(p structPointer, f field) *XXX_InternalExtensions {
return structPointer_ifield(p, f).(*XXX_InternalExtensions)
}
// ExtMap returns the address of an extension map field in the struct. // ExtMap returns the address of an extension map field in the struct.
func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension { func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension {
return structPointer_ifield(p, f).(*map[int32]Extension) return structPointer_ifield(p, f).(*map[int32]Extension)

View file

@ -29,7 +29,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// +build !appengine // +build !appengine,!js
// This file contains the implementation of the proto field accesses using package unsafe. // This file contains the implementation of the proto field accesses using package unsafe.
@ -126,6 +126,10 @@ func structPointer_StringSlice(p structPointer, f field) *[]string {
} }
// ExtMap returns the address of an extension map field in the struct. // ExtMap returns the address of an extension map field in the struct.
func structPointer_Extensions(p structPointer, f field) *XXX_InternalExtensions {
return (*XXX_InternalExtensions)(unsafe.Pointer(uintptr(p) + uintptr(f)))
}
func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension { func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension {
return (*map[int32]Extension)(unsafe.Pointer(uintptr(p) + uintptr(f))) return (*map[int32]Extension)(unsafe.Pointer(uintptr(p) + uintptr(f)))
} }

View file

@ -173,6 +173,7 @@ func (sp *StructProperties) Swap(i, j int) { sp.order[i], sp.order[j] = sp.order
type Properties struct { type Properties struct {
Name string // name of the field, for error messages Name string // name of the field, for error messages
OrigName string // original name before protocol compiler (always set) OrigName string // original name before protocol compiler (always set)
JSONName string // name to use for JSON; determined by protoc
Wire string Wire string
WireType int WireType int
Tag int Tag int
@ -229,8 +230,9 @@ func (p *Properties) String() string {
if p.Packed { if p.Packed {
s += ",packed" s += ",packed"
} }
if p.OrigName != p.Name { s += ",name=" + p.OrigName
s += ",name=" + p.OrigName if p.JSONName != p.OrigName {
s += ",json=" + p.JSONName
} }
if p.proto3 { if p.proto3 {
s += ",proto3" s += ",proto3"
@ -310,6 +312,8 @@ func (p *Properties) Parse(s string) {
p.Packed = true p.Packed = true
case strings.HasPrefix(f, "name="): case strings.HasPrefix(f, "name="):
p.OrigName = f[5:] p.OrigName = f[5:]
case strings.HasPrefix(f, "json="):
p.JSONName = f[5:]
case strings.HasPrefix(f, "enum="): case strings.HasPrefix(f, "enum="):
p.Enum = f[5:] p.Enum = f[5:]
case f == "proto3": case f == "proto3":
@ -469,17 +473,13 @@ func (p *Properties) setEncAndDec(typ reflect.Type, f *reflect.StructField, lock
p.dec = (*Buffer).dec_slice_int64 p.dec = (*Buffer).dec_slice_int64
p.packedDec = (*Buffer).dec_slice_packed_int64 p.packedDec = (*Buffer).dec_slice_packed_int64
case reflect.Uint8: case reflect.Uint8:
p.enc = (*Buffer).enc_slice_byte
p.dec = (*Buffer).dec_slice_byte p.dec = (*Buffer).dec_slice_byte
p.size = size_slice_byte if p.proto3 {
// This is a []byte, which is either a bytes field,
// or the value of a map field. In the latter case,
// we always encode an empty []byte, so we should not
// use the proto3 enc/size funcs.
// f == nil iff this is the key/value of a map field.
if p.proto3 && f != nil {
p.enc = (*Buffer).enc_proto3_slice_byte p.enc = (*Buffer).enc_proto3_slice_byte
p.size = size_proto3_slice_byte p.size = size_proto3_slice_byte
} else {
p.enc = (*Buffer).enc_slice_byte
p.size = size_slice_byte
} }
case reflect.Float32, reflect.Float64: case reflect.Float32, reflect.Float64:
switch t2.Bits() { switch t2.Bits() {
@ -678,7 +678,8 @@ func getPropertiesLocked(t reflect.Type) *StructProperties {
propertiesMap[t] = prop propertiesMap[t] = prop
// build properties // build properties
prop.extendable = reflect.PtrTo(t).Implements(extendableProtoType) prop.extendable = reflect.PtrTo(t).Implements(extendableProtoType) ||
reflect.PtrTo(t).Implements(extendableProtoV1Type)
prop.unrecField = invalidField prop.unrecField = invalidField
prop.Prop = make([]*Properties, t.NumField()) prop.Prop = make([]*Properties, t.NumField())
prop.order = make([]int, t.NumField()) prop.order = make([]int, t.NumField())
@ -689,15 +690,22 @@ func getPropertiesLocked(t reflect.Type) *StructProperties {
name := f.Name name := f.Name
p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false) p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false)
if f.Name == "XXX_extensions" { // special case if f.Name == "XXX_InternalExtensions" { // special case
p.enc = (*Buffer).enc_exts
p.dec = nil // not needed
p.size = size_exts
} else if f.Name == "XXX_extensions" { // special case
p.enc = (*Buffer).enc_map p.enc = (*Buffer).enc_map
p.dec = nil // not needed p.dec = nil // not needed
p.size = size_map p.size = size_map
} } else if f.Name == "XXX_unrecognized" { // special case
if f.Name == "XXX_unrecognized" { // special case
prop.unrecField = toField(&f) prop.unrecField = toField(&f)
} }
oneof := f.Tag.Get("protobuf_oneof") != "" // special case oneof := f.Tag.Get("protobuf_oneof") // special case
if oneof != "" {
// Oneof fields don't use the traditional protobuf tag.
p.OrigName = oneof
}
prop.Prop[i] = p prop.Prop[i] = p
prop.order[i] = i prop.order[i] = i
if debug { if debug {
@ -707,7 +715,7 @@ func getPropertiesLocked(t reflect.Type) *StructProperties {
} }
print("\n") print("\n")
} }
if p.enc == nil && !strings.HasPrefix(f.Name, "XXX_") && !oneof { if p.enc == nil && !strings.HasPrefix(f.Name, "XXX_") && oneof == "" {
fmt.Fprintln(os.Stderr, "proto: no encoder for", f.Name, f.Type.String(), "[GetProperties]") fmt.Fprintln(os.Stderr, "proto: no encoder for", f.Name, f.Type.String(), "[GetProperties]")
} }
} }
@ -836,7 +844,29 @@ func RegisterType(x Message, name string) {
} }
// MessageName returns the fully-qualified proto name for the given message type. // MessageName returns the fully-qualified proto name for the given message type.
func MessageName(x Message) string { return revProtoTypes[reflect.TypeOf(x)] } func MessageName(x Message) string {
type xname interface {
XXX_MessageName() string
}
if m, ok := x.(xname); ok {
return m.XXX_MessageName()
}
return revProtoTypes[reflect.TypeOf(x)]
}
// MessageType returns the message type (pointer to struct) for a named message. // MessageType returns the message type (pointer to struct) for a named message.
func MessageType(name string) reflect.Type { return protoTypes[name] } func MessageType(name string) reflect.Type { return protoTypes[name] }
// A registry of all linked proto files.
var (
protoFiles = make(map[string][]byte) // file name => fileDescriptor
)
// RegisterFile is called from generated code and maps from the
// full file name of a .proto file to its compressed FileDescriptorProto.
func RegisterFile(filename string, fileDescriptor []byte) {
protoFiles[filename] = fileDescriptor
}
// FileDescriptor returns the compressed FileDescriptorProto for a .proto file.
func FileDescriptor(filename string) []byte { return protoFiles[filename] }

View file

@ -154,7 +154,7 @@ func (w *textWriter) indent() { w.ind++ }
func (w *textWriter) unindent() { func (w *textWriter) unindent() {
if w.ind == 0 { if w.ind == 0 {
log.Printf("proto: textWriter unindented too far") log.Print("proto: textWriter unindented too far")
return return
} }
w.ind-- w.ind--
@ -175,7 +175,93 @@ type raw interface {
Bytes() []byte Bytes() []byte
} }
func writeStruct(w *textWriter, sv reflect.Value) error { func requiresQuotes(u string) bool {
// When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted.
for _, ch := range u {
switch {
case ch == '.' || ch == '/' || ch == '_':
continue
case '0' <= ch && ch <= '9':
continue
case 'A' <= ch && ch <= 'Z':
continue
case 'a' <= ch && ch <= 'z':
continue
default:
return true
}
}
return false
}
// isAny reports whether sv is a google.protobuf.Any message
func isAny(sv reflect.Value) bool {
type wkt interface {
XXX_WellKnownType() string
}
t, ok := sv.Addr().Interface().(wkt)
return ok && t.XXX_WellKnownType() == "Any"
}
// writeProto3Any writes an expanded google.protobuf.Any message.
//
// It returns (false, nil) if sv value can't be unmarshaled (e.g. because
// required messages are not linked in).
//
// It returns (true, error) when sv was written in expanded format or an error
// was encountered.
func (tm *TextMarshaler) writeProto3Any(w *textWriter, sv reflect.Value) (bool, error) {
turl := sv.FieldByName("TypeUrl")
val := sv.FieldByName("Value")
if !turl.IsValid() || !val.IsValid() {
return true, errors.New("proto: invalid google.protobuf.Any message")
}
b, ok := val.Interface().([]byte)
if !ok {
return true, errors.New("proto: invalid google.protobuf.Any message")
}
parts := strings.Split(turl.String(), "/")
mt := MessageType(parts[len(parts)-1])
if mt == nil {
return false, nil
}
m := reflect.New(mt.Elem())
if err := Unmarshal(b, m.Interface().(Message)); err != nil {
return false, nil
}
w.Write([]byte("["))
u := turl.String()
if requiresQuotes(u) {
writeString(w, u)
} else {
w.Write([]byte(u))
}
if w.compact {
w.Write([]byte("]:<"))
} else {
w.Write([]byte("]: <\n"))
w.ind++
}
if err := tm.writeStruct(w, m.Elem()); err != nil {
return true, err
}
if w.compact {
w.Write([]byte("> "))
} else {
w.ind--
w.Write([]byte(">\n"))
}
return true, nil
}
func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error {
if tm.ExpandAny && isAny(sv) {
if canExpand, err := tm.writeProto3Any(w, sv); canExpand {
return err
}
}
st := sv.Type() st := sv.Type()
sprops := GetProperties(st) sprops := GetProperties(st)
for i := 0; i < sv.NumField(); i++ { for i := 0; i < sv.NumField(); i++ {
@ -227,7 +313,7 @@ func writeStruct(w *textWriter, sv reflect.Value) error {
} }
continue continue
} }
if err := writeAny(w, v, props); err != nil { if err := tm.writeAny(w, v, props); err != nil {
return err return err
} }
if err := w.WriteByte('\n'); err != nil { if err := w.WriteByte('\n'); err != nil {
@ -269,7 +355,7 @@ func writeStruct(w *textWriter, sv reflect.Value) error {
return err return err
} }
} }
if err := writeAny(w, key, props.mkeyprop); err != nil { if err := tm.writeAny(w, key, props.mkeyprop); err != nil {
return err return err
} }
if err := w.WriteByte('\n'); err != nil { if err := w.WriteByte('\n'); err != nil {
@ -286,7 +372,7 @@ func writeStruct(w *textWriter, sv reflect.Value) error {
return err return err
} }
} }
if err := writeAny(w, val, props.mvalprop); err != nil { if err := tm.writeAny(w, val, props.mvalprop); err != nil {
return err return err
} }
if err := w.WriteByte('\n'); err != nil { if err := w.WriteByte('\n'); err != nil {
@ -358,7 +444,7 @@ func writeStruct(w *textWriter, sv reflect.Value) error {
} }
// Enums have a String method, so writeAny will work fine. // Enums have a String method, so writeAny will work fine.
if err := writeAny(w, fv, props); err != nil { if err := tm.writeAny(w, fv, props); err != nil {
return err return err
} }
@ -369,8 +455,8 @@ func writeStruct(w *textWriter, sv reflect.Value) error {
// Extensions (the XXX_extensions field). // Extensions (the XXX_extensions field).
pv := sv.Addr() pv := sv.Addr()
if pv.Type().Implements(extendableProtoType) { if _, ok := extendable(pv.Interface()); ok {
if err := writeExtensions(w, pv); err != nil { if err := tm.writeExtensions(w, pv); err != nil {
return err return err
} }
} }
@ -400,7 +486,7 @@ func writeRaw(w *textWriter, b []byte) error {
} }
// writeAny writes an arbitrary field. // writeAny writes an arbitrary field.
func writeAny(w *textWriter, v reflect.Value, props *Properties) error { func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Properties) error {
v = reflect.Indirect(v) v = reflect.Indirect(v)
// Floats have special cases. // Floats have special cases.
@ -427,7 +513,7 @@ func writeAny(w *textWriter, v reflect.Value, props *Properties) error {
switch v.Kind() { switch v.Kind() {
case reflect.Slice: case reflect.Slice:
// Should only be a []byte; repeated fields are handled in writeStruct. // Should only be a []byte; repeated fields are handled in writeStruct.
if err := writeString(w, string(v.Interface().([]byte))); err != nil { if err := writeString(w, string(v.Bytes())); err != nil {
return err return err
} }
case reflect.String: case reflect.String:
@ -449,15 +535,15 @@ func writeAny(w *textWriter, v reflect.Value, props *Properties) error {
} }
} }
w.indent() w.indent()
if tm, ok := v.Interface().(encoding.TextMarshaler); ok { if etm, ok := v.Interface().(encoding.TextMarshaler); ok {
text, err := tm.MarshalText() text, err := etm.MarshalText()
if err != nil { if err != nil {
return err return err
} }
if _, err = w.Write(text); err != nil { if _, err = w.Write(text); err != nil {
return err return err
} }
} else if err := writeStruct(w, v); err != nil { } else if err := tm.writeStruct(w, v); err != nil {
return err return err
} }
w.unindent() w.unindent()
@ -601,19 +687,24 @@ func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
// writeExtensions writes all the extensions in pv. // writeExtensions writes all the extensions in pv.
// pv is assumed to be a pointer to a protocol message struct that is extendable. // pv is assumed to be a pointer to a protocol message struct that is extendable.
func writeExtensions(w *textWriter, pv reflect.Value) error { func (tm *TextMarshaler) writeExtensions(w *textWriter, pv reflect.Value) error {
emap := extensionMaps[pv.Type().Elem()] emap := extensionMaps[pv.Type().Elem()]
ep := pv.Interface().(extendableProto) ep, _ := extendable(pv.Interface())
// Order the extensions by ID. // Order the extensions by ID.
// This isn't strictly necessary, but it will give us // This isn't strictly necessary, but it will give us
// canonical output, which will also make testing easier. // canonical output, which will also make testing easier.
m := ep.ExtensionMap() m, mu := ep.extensionsRead()
if m == nil {
return nil
}
mu.Lock()
ids := make([]int32, 0, len(m)) ids := make([]int32, 0, len(m))
for id := range m { for id := range m {
ids = append(ids, id) ids = append(ids, id)
} }
sort.Sort(int32Slice(ids)) sort.Sort(int32Slice(ids))
mu.Unlock()
for _, extNum := range ids { for _, extNum := range ids {
ext := m[extNum] ext := m[extNum]
@ -636,13 +727,13 @@ func writeExtensions(w *textWriter, pv reflect.Value) error {
// Repeated extensions will appear as a slice. // Repeated extensions will appear as a slice.
if !desc.repeated() { if !desc.repeated() {
if err := writeExtension(w, desc.Name, pb); err != nil { if err := tm.writeExtension(w, desc.Name, pb); err != nil {
return err return err
} }
} else { } else {
v := reflect.ValueOf(pb) v := reflect.ValueOf(pb)
for i := 0; i < v.Len(); i++ { for i := 0; i < v.Len(); i++ {
if err := writeExtension(w, desc.Name, v.Index(i).Interface()); err != nil { if err := tm.writeExtension(w, desc.Name, v.Index(i).Interface()); err != nil {
return err return err
} }
} }
@ -651,7 +742,7 @@ func writeExtensions(w *textWriter, pv reflect.Value) error {
return nil return nil
} }
func writeExtension(w *textWriter, name string, pb interface{}) error { func (tm *TextMarshaler) writeExtension(w *textWriter, name string, pb interface{}) error {
if _, err := fmt.Fprintf(w, "[%s]:", name); err != nil { if _, err := fmt.Fprintf(w, "[%s]:", name); err != nil {
return err return err
} }
@ -660,7 +751,7 @@ func writeExtension(w *textWriter, name string, pb interface{}) error {
return err return err
} }
} }
if err := writeAny(w, reflect.ValueOf(pb), nil); err != nil { if err := tm.writeAny(w, reflect.ValueOf(pb), nil); err != nil {
return err return err
} }
if err := w.WriteByte('\n'); err != nil { if err := w.WriteByte('\n'); err != nil {
@ -685,7 +776,15 @@ func (w *textWriter) writeIndent() {
w.complete = false w.complete = false
} }
func marshalText(w io.Writer, pb Message, compact bool) error { // TextMarshaler is a configurable text format marshaler.
type TextMarshaler struct {
Compact bool // use compact text format (one line).
ExpandAny bool // expand google.protobuf.Any messages of known types
}
// Marshal writes a given protocol buffer in text format.
// The only errors returned are from w.
func (tm *TextMarshaler) Marshal(w io.Writer, pb Message) error {
val := reflect.ValueOf(pb) val := reflect.ValueOf(pb)
if pb == nil || val.IsNil() { if pb == nil || val.IsNil() {
w.Write([]byte("<nil>")) w.Write([]byte("<nil>"))
@ -700,11 +799,11 @@ func marshalText(w io.Writer, pb Message, compact bool) error {
aw := &textWriter{ aw := &textWriter{
w: ww, w: ww,
complete: true, complete: true,
compact: compact, compact: tm.Compact,
} }
if tm, ok := pb.(encoding.TextMarshaler); ok { if etm, ok := pb.(encoding.TextMarshaler); ok {
text, err := tm.MarshalText() text, err := etm.MarshalText()
if err != nil { if err != nil {
return err return err
} }
@ -718,7 +817,7 @@ func marshalText(w io.Writer, pb Message, compact bool) error {
} }
// Dereference the received pointer so we don't have outer < and >. // Dereference the received pointer so we don't have outer < and >.
v := reflect.Indirect(val) v := reflect.Indirect(val)
if err := writeStruct(aw, v); err != nil { if err := tm.writeStruct(aw, v); err != nil {
return err return err
} }
if bw != nil { if bw != nil {
@ -727,25 +826,29 @@ func marshalText(w io.Writer, pb Message, compact bool) error {
return nil return nil
} }
// Text is the same as Marshal, but returns the string directly.
func (tm *TextMarshaler) Text(pb Message) string {
var buf bytes.Buffer
tm.Marshal(&buf, pb)
return buf.String()
}
var (
defaultTextMarshaler = TextMarshaler{}
compactTextMarshaler = TextMarshaler{Compact: true}
)
// TODO: consider removing some of the Marshal functions below.
// MarshalText writes a given protocol buffer in text format. // MarshalText writes a given protocol buffer in text format.
// The only errors returned are from w. // The only errors returned are from w.
func MarshalText(w io.Writer, pb Message) error { func MarshalText(w io.Writer, pb Message) error { return defaultTextMarshaler.Marshal(w, pb) }
return marshalText(w, pb, false)
}
// MarshalTextString is the same as MarshalText, but returns the string directly. // MarshalTextString is the same as MarshalText, but returns the string directly.
func MarshalTextString(pb Message) string { func MarshalTextString(pb Message) string { return defaultTextMarshaler.Text(pb) }
var buf bytes.Buffer
marshalText(&buf, pb, false)
return buf.String()
}
// CompactText writes a given protocol buffer in compact text format (one line). // CompactText writes a given protocol buffer in compact text format (one line).
func CompactText(w io.Writer, pb Message) error { return marshalText(w, pb, true) } func CompactText(w io.Writer, pb Message) error { return compactTextMarshaler.Marshal(w, pb) }
// CompactTextString is the same as CompactText, but returns the string directly. // CompactTextString is the same as CompactText, but returns the string directly.
func CompactTextString(pb Message) string { func CompactTextString(pb Message) string { return compactTextMarshaler.Text(pb) }
var buf bytes.Buffer
marshalText(&buf, pb, true)
return buf.String()
}

View file

@ -44,6 +44,9 @@ import (
"unicode/utf8" "unicode/utf8"
) )
// Error string emitted when deserializing Any and fields are already set
const anyRepeatedlyUnpacked = "Any message unpacked multiple times, or %q already set"
type ParseError struct { type ParseError struct {
Message string Message string
Line int // 1-based line number Line int // 1-based line number
@ -119,6 +122,14 @@ func isWhitespace(c byte) bool {
return false return false
} }
func isQuote(c byte) bool {
switch c {
case '"', '\'':
return true
}
return false
}
func (p *textParser) skipWhitespace() { func (p *textParser) skipWhitespace() {
i := 0 i := 0
for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') { for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') {
@ -155,7 +166,7 @@ func (p *textParser) advance() {
p.cur.offset, p.cur.line = p.offset, p.line p.cur.offset, p.cur.line = p.offset, p.line
p.cur.unquoted = "" p.cur.unquoted = ""
switch p.s[0] { switch p.s[0] {
case '<', '>', '{', '}', ':', '[', ']', ';', ',': case '<', '>', '{', '}', ':', '[', ']', ';', ',', '/':
// Single symbol // Single symbol
p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)] p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)]
case '"', '\'': case '"', '\'':
@ -333,13 +344,13 @@ func (p *textParser) next() *token {
p.advance() p.advance()
if p.done { if p.done {
p.cur.value = "" p.cur.value = ""
} else if len(p.cur.value) > 0 && p.cur.value[0] == '"' { } else if len(p.cur.value) > 0 && isQuote(p.cur.value[0]) {
// Look for multiple quoted strings separated by whitespace, // Look for multiple quoted strings separated by whitespace,
// and concatenate them. // and concatenate them.
cat := p.cur cat := p.cur
for { for {
p.skipWhitespace() p.skipWhitespace()
if p.done || p.s[0] != '"' { if p.done || !isQuote(p.s[0]) {
break break
} }
p.advance() p.advance()
@ -443,7 +454,10 @@ func (p *textParser) readStruct(sv reflect.Value, terminator string) error {
fieldSet := make(map[string]bool) fieldSet := make(map[string]bool)
// A struct is a sequence of "name: value", terminated by one of // A struct is a sequence of "name: value", terminated by one of
// '>' or '}', or the end of the input. A name may also be // '>' or '}', or the end of the input. A name may also be
// "[extension]". // "[extension]" or "[type/url]".
//
// The whole struct can also be an expanded Any message, like:
// [type/url] < ... struct contents ... >
for { for {
tok := p.next() tok := p.next()
if tok.err != nil { if tok.err != nil {
@ -453,33 +467,74 @@ func (p *textParser) readStruct(sv reflect.Value, terminator string) error {
break break
} }
if tok.value == "[" { if tok.value == "[" {
// Looks like an extension. // Looks like an extension or an Any.
// //
// TODO: Check whether we need to handle // TODO: Check whether we need to handle
// namespace rooted names (e.g. ".something.Foo"). // namespace rooted names (e.g. ".something.Foo").
tok = p.next() extName, err := p.consumeExtName()
if tok.err != nil { if err != nil {
return tok.err return err
} }
if s := strings.LastIndex(extName, "/"); s >= 0 {
// If it contains a slash, it's an Any type URL.
messageName := extName[s+1:]
mt := MessageType(messageName)
if mt == nil {
return p.errorf("unrecognized message %q in google.protobuf.Any", messageName)
}
tok = p.next()
if tok.err != nil {
return tok.err
}
// consume an optional colon
if tok.value == ":" {
tok = p.next()
if tok.err != nil {
return tok.err
}
}
var terminator string
switch tok.value {
case "<":
terminator = ">"
case "{":
terminator = "}"
default:
return p.errorf("expected '{' or '<', found %q", tok.value)
}
v := reflect.New(mt.Elem())
if pe := p.readStruct(v.Elem(), terminator); pe != nil {
return pe
}
b, err := Marshal(v.Interface().(Message))
if err != nil {
return p.errorf("failed to marshal message of type %q: %v", messageName, err)
}
if fieldSet["type_url"] {
return p.errorf(anyRepeatedlyUnpacked, "type_url")
}
if fieldSet["value"] {
return p.errorf(anyRepeatedlyUnpacked, "value")
}
sv.FieldByName("TypeUrl").SetString(extName)
sv.FieldByName("Value").SetBytes(b)
fieldSet["type_url"] = true
fieldSet["value"] = true
continue
}
var desc *ExtensionDesc var desc *ExtensionDesc
// This could be faster, but it's functional. // This could be faster, but it's functional.
// TODO: Do something smarter than a linear scan. // TODO: Do something smarter than a linear scan.
for _, d := range RegisteredExtensions(reflect.New(st).Interface().(Message)) { for _, d := range RegisteredExtensions(reflect.New(st).Interface().(Message)) {
if d.Name == tok.value { if d.Name == extName {
desc = d desc = d
break break
} }
} }
if desc == nil { if desc == nil {
return p.errorf("unrecognized extension %q", tok.value) return p.errorf("unrecognized extension %q", extName)
}
// Check the extension terminator.
tok = p.next()
if tok.err != nil {
return tok.err
}
if tok.value != "]" {
return p.errorf("unrecognized extension terminator %q", tok.value)
} }
props := &Properties{} props := &Properties{}
@ -506,7 +561,7 @@ func (p *textParser) readStruct(sv reflect.Value, terminator string) error {
} }
reqFieldErr = err reqFieldErr = err
} }
ep := sv.Addr().Interface().(extendableProto) ep := sv.Addr().Interface().(Message)
if !rep { if !rep {
SetExtension(ep, desc, ext.Interface()) SetExtension(ep, desc, ext.Interface())
} else { } else {
@ -537,7 +592,11 @@ func (p *textParser) readStruct(sv reflect.Value, terminator string) error {
props = oop.Prop props = oop.Prop
nv := reflect.New(oop.Type.Elem()) nv := reflect.New(oop.Type.Elem())
dst = nv.Elem().Field(0) dst = nv.Elem().Field(0)
sv.Field(oop.Field).Set(nv) field := sv.Field(oop.Field)
if !field.IsNil() {
return p.errorf("field '%s' would overwrite already parsed oneof '%s'", name, sv.Type().Field(oop.Field).Name)
}
field.Set(nv)
} }
if !dst.IsValid() { if !dst.IsValid() {
return p.errorf("unknown field name %q in %v", name, st) return p.errorf("unknown field name %q in %v", name, st)
@ -558,8 +617,9 @@ func (p *textParser) readStruct(sv reflect.Value, terminator string) error {
// The map entry should be this sequence of tokens: // The map entry should be this sequence of tokens:
// < key : KEY value : VALUE > // < key : KEY value : VALUE >
// Technically the "key" and "value" could come in any order, // However, implementations may omit key or value, and technically
// but in practice they won't. // we should support them in any order. See b/28924776 for a time
// this went wrong.
tok := p.next() tok := p.next()
var terminator string var terminator string
@ -571,32 +631,39 @@ func (p *textParser) readStruct(sv reflect.Value, terminator string) error {
default: default:
return p.errorf("expected '{' or '<', found %q", tok.value) return p.errorf("expected '{' or '<', found %q", tok.value)
} }
if err := p.consumeToken("key"); err != nil { for {
return err tok := p.next()
} if tok.err != nil {
if err := p.consumeToken(":"); err != nil { return tok.err
return err }
} if tok.value == terminator {
if err := p.readAny(key, props.mkeyprop); err != nil { break
return err }
} switch tok.value {
if err := p.consumeOptionalSeparator(); err != nil { case "key":
return err if err := p.consumeToken(":"); err != nil {
} return err
if err := p.consumeToken("value"); err != nil { }
return err if err := p.readAny(key, props.mkeyprop); err != nil {
} return err
if err := p.checkForColon(props.mvalprop, dst.Type().Elem()); err != nil { }
return err if err := p.consumeOptionalSeparator(); err != nil {
} return err
if err := p.readAny(val, props.mvalprop); err != nil { }
return err case "value":
} if err := p.checkForColon(props.mvalprop, dst.Type().Elem()); err != nil {
if err := p.consumeOptionalSeparator(); err != nil { return err
return err }
} if err := p.readAny(val, props.mvalprop); err != nil {
if err := p.consumeToken(terminator); err != nil { return err
return err }
if err := p.consumeOptionalSeparator(); err != nil {
return err
}
default:
p.back()
return p.errorf(`expected "key", "value", or %q, found %q`, terminator, tok.value)
}
} }
dst.SetMapIndex(key, val) dst.SetMapIndex(key, val)
@ -619,7 +686,8 @@ func (p *textParser) readStruct(sv reflect.Value, terminator string) error {
return err return err
} }
reqFieldErr = err reqFieldErr = err
} else if props.Required { }
if props.Required {
reqCount-- reqCount--
} }
@ -635,6 +703,35 @@ func (p *textParser) readStruct(sv reflect.Value, terminator string) error {
return reqFieldErr return reqFieldErr
} }
// consumeExtName consumes extension name or expanded Any type URL and the
// following ']'. It returns the name or URL consumed.
func (p *textParser) consumeExtName() (string, error) {
tok := p.next()
if tok.err != nil {
return "", tok.err
}
// If extension name or type url is quoted, it's a single token.
if len(tok.value) > 2 && isQuote(tok.value[0]) && tok.value[len(tok.value)-1] == tok.value[0] {
name, err := unquoteC(tok.value[1:len(tok.value)-1], rune(tok.value[0]))
if err != nil {
return "", err
}
return name, p.consumeToken("]")
}
// Consume everything up to "]"
var parts []string
for tok.value != "]" {
parts = append(parts, tok.value)
tok = p.next()
if tok.err != nil {
return "", p.errorf("unrecognized type_url or extension name: %s", tok.err)
}
}
return strings.Join(parts, ""), nil
}
// consumeOptionalSeparator consumes an optional semicolon or comma. // consumeOptionalSeparator consumes an optional semicolon or comma.
// It is used in readStruct to provide backward compatibility. // It is used in readStruct to provide backward compatibility.
func (p *textParser) consumeOptionalSeparator() error { func (p *textParser) consumeOptionalSeparator() error {
@ -699,12 +796,12 @@ func (p *textParser) readAny(v reflect.Value, props *Properties) error {
fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem())) fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem()))
return p.readAny(fv.Index(fv.Len()-1), props) return p.readAny(fv.Index(fv.Len()-1), props)
case reflect.Bool: case reflect.Bool:
// Either "true", "false", 1 or 0. // true/1/t/True or false/f/0/False.
switch tok.value { switch tok.value {
case "true", "1": case "true", "1", "t", "True":
fv.SetBool(true) fv.SetBool(true)
return nil return nil
case "false", "0": case "false", "0", "f", "False":
fv.SetBool(false) fv.SetBool(false)
return nil return nil
} }

View file

@ -0,0 +1,7 @@
all:
cover:
go test -cover -v -coverprofile=cover.dat ./...
go tool cover -func cover.dat
.PHONY: cover

View file

@ -38,7 +38,7 @@ var errInvalidVarint = errors.New("invalid varint32 encountered")
func ReadDelimited(r io.Reader, m proto.Message) (n int, err error) { func ReadDelimited(r io.Reader, m proto.Message) (n int, err error) {
// Per AbstractParser#parsePartialDelimitedFrom with // Per AbstractParser#parsePartialDelimitedFrom with
// CodedInputStream#readRawVarint32. // CodedInputStream#readRawVarint32.
headerBuf := make([]byte, binary.MaxVarintLen32) var headerBuf [binary.MaxVarintLen32]byte
var bytesRead, varIntBytes int var bytesRead, varIntBytes int
var messageLength uint64 var messageLength uint64
for varIntBytes == 0 { // i.e. no varint has been decoded yet. for varIntBytes == 0 { // i.e. no varint has been decoded yet.

View file

@ -33,8 +33,8 @@ func WriteDelimited(w io.Writer, m proto.Message) (n int, err error) {
return 0, err return 0, err
} }
buf := make([]byte, binary.MaxVarintLen32) var buf [binary.MaxVarintLen32]byte
encodedLength := binary.PutUvarint(buf, uint64(len(buffer))) encodedLength := binary.PutUvarint(buf[:], uint64(len(buffer)))
sync, err := w.Write(buf[:encodedLength]) sync, err := w.Write(buf[:encodedLength])
if err != nil { if err != nil {

View file

@ -7,11 +7,6 @@ SoundCloud Ltd. (http://soundcloud.com/).
The following components are included in this product: The following components are included in this product:
goautoneg
http://bitbucket.org/ww/goautoneg
Copyright 2011, Open Knowledge Foundation Ltd.
See README.txt for license details.
perks - a fork of https://github.com/bmizerany/perks perks - a fork of https://github.com/bmizerany/perks
https://github.com/beorn7/perks https://github.com/beorn7/perks
Copyright 2013-2015 Blake Mizerany, Björn Rabenstein Copyright 2013-2015 Blake Mizerany, Björn Rabenstein

View file

@ -1,53 +1 @@
# Overview See [![go-doc](https://godoc.org/github.com/prometheus/client_golang/prometheus?status.svg)](https://godoc.org/github.com/prometheus/client_golang/prometheus).
This is the [Prometheus](http://www.prometheus.io) telemetric
instrumentation client [Go](http://golang.org) client library. It
enable authors to define process-space metrics for their servers and
expose them through a web service interface for extraction,
aggregation, and a whole slew of other post processing techniques.
# Installing
$ go get github.com/prometheus/client_golang/prometheus
# Example
```go
package main
import (
"net/http"
"github.com/prometheus/client_golang/prometheus"
)
var (
indexed = prometheus.NewCounter(prometheus.CounterOpts{
Namespace: "my_company",
Subsystem: "indexer",
Name: "documents_indexed",
Help: "The number of documents indexed.",
})
size = prometheus.NewGauge(prometheus.GaugeOpts{
Namespace: "my_company",
Subsystem: "storage",
Name: "documents_total_size_bytes",
Help: "The total size of all documents in the storage.",
})
)
func main() {
http.Handle("/metrics", prometheus.Handler())
indexed.Inc()
size.Set(5)
http.ListenAndServe(":8080", nil)
}
func init() {
prometheus.MustRegister(indexed)
prometheus.MustRegister(size)
}
```
# Documentation
[![GoDoc](https://godoc.org/github.com/prometheus/client_golang?status.png)](https://godoc.org/github.com/prometheus/client_golang)

View file

@ -15,15 +15,15 @@ package prometheus
// Collector is the interface implemented by anything that can be used by // Collector is the interface implemented by anything that can be used by
// Prometheus to collect metrics. A Collector has to be registered for // Prometheus to collect metrics. A Collector has to be registered for
// collection. See Register, MustRegister, RegisterOrGet, and MustRegisterOrGet. // collection. See Registerer.Register.
// //
// The stock metrics provided by this package (like Gauge, Counter, Summary) are // The stock metrics provided by this package (Gauge, Counter, Summary,
// also Collectors (which only ever collect one metric, namely itself). An // Histogram, Untyped) are also Collectors (which only ever collect one metric,
// implementer of Collector may, however, collect multiple metrics in a // namely itself). An implementer of Collector may, however, collect multiple
// coordinated fashion and/or create metrics on the fly. Examples for collectors // metrics in a coordinated fashion and/or create metrics on the fly. Examples
// already implemented in this library are the metric vectors (i.e. collection // for collectors already implemented in this library are the metric vectors
// of multiple instances of the same Metric but with different label values) // (i.e. collection of multiple instances of the same Metric but with different
// like GaugeVec or SummaryVec, and the ExpvarCollector. // label values) like GaugeVec or SummaryVec, and the ExpvarCollector.
type Collector interface { type Collector interface {
// Describe sends the super-set of all possible descriptors of metrics // Describe sends the super-set of all possible descriptors of metrics
// collected by this Collector to the provided channel and returns once // collected by this Collector to the provided channel and returns once
@ -37,39 +37,39 @@ type Collector interface {
// executing this method, it must send an invalid descriptor (created // executing this method, it must send an invalid descriptor (created
// with NewInvalidDesc) to signal the error to the registry. // with NewInvalidDesc) to signal the error to the registry.
Describe(chan<- *Desc) Describe(chan<- *Desc)
// Collect is called by Prometheus when collecting metrics. The // Collect is called by the Prometheus registry when collecting
// implementation sends each collected metric via the provided channel // metrics. The implementation sends each collected metric via the
// and returns once the last metric has been sent. The descriptor of // provided channel and returns once the last metric has been sent. The
// each sent metric is one of those returned by Describe. Returned // descriptor of each sent metric is one of those returned by
// metrics that share the same descriptor must differ in their variable // Describe. Returned metrics that share the same descriptor must differ
// label values. This method may be called concurrently and must // in their variable label values. This method may be called
// therefore be implemented in a concurrency safe way. Blocking occurs // concurrently and must therefore be implemented in a concurrency safe
// at the expense of total performance of rendering all registered // way. Blocking occurs at the expense of total performance of rendering
// metrics. Ideally, Collector implementations support concurrent // all registered metrics. Ideally, Collector implementations support
// readers. // concurrent readers.
Collect(chan<- Metric) Collect(chan<- Metric)
} }
// SelfCollector implements Collector for a single Metric so that that the // selfCollector implements Collector for a single Metric so that the Metric
// Metric collects itself. Add it as an anonymous field to a struct that // collects itself. Add it as an anonymous field to a struct that implements
// implements Metric, and call Init with the Metric itself as an argument. // Metric, and call init with the Metric itself as an argument.
type SelfCollector struct { type selfCollector struct {
self Metric self Metric
} }
// Init provides the SelfCollector with a reference to the metric it is supposed // init provides the selfCollector with a reference to the metric it is supposed
// to collect. It is usually called within the factory function to create a // to collect. It is usually called within the factory function to create a
// metric. See example. // metric. See example.
func (c *SelfCollector) Init(self Metric) { func (c *selfCollector) init(self Metric) {
c.self = self c.self = self
} }
// Describe implements Collector. // Describe implements Collector.
func (c *SelfCollector) Describe(ch chan<- *Desc) { func (c *selfCollector) Describe(ch chan<- *Desc) {
ch <- c.self.Desc() ch <- c.self.Desc()
} }
// Collect implements Collector. // Collect implements Collector.
func (c *SelfCollector) Collect(ch chan<- Metric) { func (c *selfCollector) Collect(ch chan<- Metric) {
ch <- c.self ch <- c.self
} }

View file

@ -35,6 +35,9 @@ type Counter interface {
// Prometheus metric. Do not use it for regular handling of a // Prometheus metric. Do not use it for regular handling of a
// Prometheus counter (as it can be used to break the contract of // Prometheus counter (as it can be used to break the contract of
// monotonically increasing values). // monotonically increasing values).
//
// Deprecated: Use NewConstMetric to create a counter for an external
// value. A Counter should never be set.
Set(float64) Set(float64)
// Inc increments the counter by 1. // Inc increments the counter by 1.
Inc() Inc()
@ -55,7 +58,7 @@ func NewCounter(opts CounterOpts) Counter {
opts.ConstLabels, opts.ConstLabels,
) )
result := &counter{value: value{desc: desc, valType: CounterValue, labelPairs: desc.constLabelPairs}} result := &counter{value: value{desc: desc, valType: CounterValue, labelPairs: desc.constLabelPairs}}
result.Init(result) // Init self-collection. result.init(result) // Init self-collection.
return result return result
} }
@ -79,7 +82,7 @@ func (c *counter) Add(v float64) {
// CounterVec embeds MetricVec. See there for a full list of methods with // CounterVec embeds MetricVec. See there for a full list of methods with
// detailed documentation. // detailed documentation.
type CounterVec struct { type CounterVec struct {
MetricVec *MetricVec
} }
// NewCounterVec creates a new CounterVec based on the provided CounterOpts and // NewCounterVec creates a new CounterVec based on the provided CounterOpts and
@ -93,19 +96,15 @@ func NewCounterVec(opts CounterOpts, labelNames []string) *CounterVec {
opts.ConstLabels, opts.ConstLabels,
) )
return &CounterVec{ return &CounterVec{
MetricVec: MetricVec{ MetricVec: newMetricVec(desc, func(lvs ...string) Metric {
children: map[uint64]Metric{}, result := &counter{value: value{
desc: desc, desc: desc,
newMetric: func(lvs ...string) Metric { valType: CounterValue,
result := &counter{value: value{ labelPairs: makeLabelPairs(desc, lvs),
desc: desc, }}
valType: CounterValue, result.init(result) // Init self-collection.
labelPairs: makeLabelPairs(desc, lvs), return result
}} }),
result.Init(result) // Init self-collection.
return result
},
},
} }
} }

View file

@ -1,3 +1,16 @@
// Copyright 2016 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package prometheus package prometheus
import ( import (

View file

@ -11,18 +11,17 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
// Package prometheus provides embeddable metric primitives for servers and // Package prometheus provides metrics primitives to instrument code for
// standardized exposition of telemetry through a web services interface. // monitoring. It also offers a registry for metrics. Sub-packages allow to
// expose the registered metrics via HTTP (package promhttp) or push them to a
// Pushgateway (package push).
// //
// All exported functions and methods are safe to be used concurrently unless // All exported functions and methods are safe to be used concurrently unless
// specified otherwise. //specified otherwise.
// //
// To expose metrics registered with the Prometheus registry, an HTTP server // A Basic Example
// needs to know about the Prometheus handler. The usual endpoint is "/metrics".
// //
// http.Handle("/metrics", prometheus.Handler()) // As a starting point, a very basic usage example:
//
// As a starting point a very basic usage example:
// //
// package main // package main
// //
@ -30,6 +29,7 @@
// "net/http" // "net/http"
// //
// "github.com/prometheus/client_golang/prometheus" // "github.com/prometheus/client_golang/prometheus"
// "github.com/prometheus/client_golang/prometheus/promhttp"
// ) // )
// //
// var ( // var (
@ -37,73 +37,145 @@
// Name: "cpu_temperature_celsius", // Name: "cpu_temperature_celsius",
// Help: "Current temperature of the CPU.", // Help: "Current temperature of the CPU.",
// }) // })
// hdFailures = prometheus.NewCounter(prometheus.CounterOpts{ // hdFailures = prometheus.NewCounterVec(
// Name: "hd_errors_total", // prometheus.CounterOpts{
// Help: "Number of hard-disk errors.", // Name: "hd_errors_total",
// }) // Help: "Number of hard-disk errors.",
// },
// []string{"device"},
// )
// ) // )
// //
// func init() { // func init() {
// // Metrics have to be registered to be exposed:
// prometheus.MustRegister(cpuTemp) // prometheus.MustRegister(cpuTemp)
// prometheus.MustRegister(hdFailures) // prometheus.MustRegister(hdFailures)
// } // }
// //
// func main() { // func main() {
// cpuTemp.Set(65.3) // cpuTemp.Set(65.3)
// hdFailures.Inc() // hdFailures.With(prometheus.Labels{"device":"/dev/sda"}).Inc()
// //
// http.Handle("/metrics", prometheus.Handler()) // // The Handler function provides a default handler to expose metrics
// // via an HTTP server. "/metrics" is the usual endpoint for that.
// http.Handle("/metrics", promhttp.Handler())
// http.ListenAndServe(":8080", nil) // http.ListenAndServe(":8080", nil)
// } // }
// //
// //
// This is a complete program that exports two metrics, a Gauge and a Counter. // This is a complete program that exports two metrics, a Gauge and a Counter,
// It also exports some stats about the HTTP usage of the /metrics // the latter with a label attached to turn it into a (one-dimensional) vector.
// endpoint. (See the Handler function for more detail.)
// //
// Two more advanced metric types are the Summary and Histogram. // Metrics
// //
// In addition to the fundamental metric types Gauge, Counter, Summary, and // The number of exported identifiers in this package might appear a bit
// Histogram, a very important part of the Prometheus data model is the // overwhelming. Hovever, in addition to the basic plumbing shown in the example
// partitioning of samples along dimensions called labels, which results in // above, you only need to understand the different metric types and their
// vector versions for basic usage.
//
// Above, you have already touched the Counter and the Gauge. There are two more
// advanced metric types: the Summary and Histogram. A more thorough description
// of those four metric types can be found in the Prometheus docs:
// https://prometheus.io/docs/concepts/metric_types/
//
// A fifth "type" of metric is Untyped. It behaves like a Gauge, but signals the
// Prometheus server not to assume anything about its type.
//
// In addition to the fundamental metric types Gauge, Counter, Summary,
// Histogram, and Untyped, a very important part of the Prometheus data model is
// the partitioning of samples along dimensions called labels, which results in
// metric vectors. The fundamental types are GaugeVec, CounterVec, SummaryVec, // metric vectors. The fundamental types are GaugeVec, CounterVec, SummaryVec,
// and HistogramVec. // HistogramVec, and UntypedVec.
// //
// Those are all the parts needed for basic usage. Detailed documentation and // While only the fundamental metric types implement the Metric interface, both
// examples are provided below. // the metrics and their vector versions implement the Collector interface. A
// Collector manages the collection of a number of Metrics, but for convenience,
// a Metric can also “collect itself”. Note that Gauge, Counter, Summary,
// Histogram, and Untyped are interfaces themselves while GaugeVec, CounterVec,
// SummaryVec, HistogramVec, and UntypedVec are not.
// //
// Everything else this package offers is essentially for "power users" only. A // To create instances of Metrics and their vector versions, you need a suitable
// few pointers to "power user features": // …Opts struct, i.e. GaugeOpts, CounterOpts, SummaryOpts,
// HistogramOpts, or UntypedOpts.
// //
// All the various ...Opts structs have a ConstLabels field for labels that // Custom Collectors and constant Metrics
// never change their value (which is only useful under special circumstances,
// see documentation of the Opts type).
// //
// The Untyped metric behaves like a Gauge, but signals the Prometheus server // While you could create your own implementations of Metric, most likely you
// not to assume anything about its type. // will only ever implement the Collector interface on your own. At a first
// glance, a custom Collector seems handy to bundle Metrics for common
// registration (with the prime example of the different metric vectors above,
// which bundle all the metrics of the same name but with different labels).
// //
// Functions to fine-tune how the metric registry works: EnableCollectChecks, // There is a more involved use case, too: If you already have metrics
// PanicOnCollectError, Register, Unregister, SetMetricFamilyInjectionHook. // available, created outside of the Prometheus context, you don't need the
// interface of the various Metric types. You essentially want to mirror the
// existing numbers into Prometheus Metrics during collection. An own
// implementation of the Collector interface is perfect for that. You can create
// Metric instances “on the fly” using NewConstMetric, NewConstHistogram, and
// NewConstSummary (and their respective Must… versions). That will happen in
// the Collect method. The Describe method has to return separate Desc
// instances, representative of the “throw-away” metrics to be created
// later. NewDesc comes in handy to create those Desc instances.
// //
// For custom metric collection, there are two entry points: Custom Metric // The Collector example illustrates the use case. You can also look at the
// implementations and custom Collector implementations. A Metric is the // source code of the processCollector (mirroring process metrics), the
// fundamental unit in the Prometheus data model: a sample at a point in time // goCollector (mirroring Go metrics), or the expvarCollector (mirroring expvar
// together with its meta-data (like its fully-qualified name and any number of // metrics) as examples that are used in this package itself.
// pairs of label name and label value) that knows how to marshal itself into a
// data transfer object (aka DTO, implemented as a protocol buffer). A Collector
// gets registered with the Prometheus registry and manages the collection of
// one or more Metrics. Many parts of this package are building blocks for
// Metrics and Collectors. Desc is the metric descriptor, actually used by all
// metrics under the hood, and by Collectors to describe the Metrics to be
// collected, but only to be dealt with by users if they implement their own
// Metrics or Collectors. To create a Desc, the BuildFQName function will come
// in handy. Other useful components for Metric and Collector implementation
// include: LabelPairSorter to sort the DTO version of label pairs,
// NewConstMetric and MustNewConstMetric to create "throw away" Metrics at
// collection time, MetricVec to bundle custom Metrics into a metric vector
// Collector, SelfCollector to make a custom Metric collect itself.
// //
// A good example for a custom Collector is the ExpVarCollector included in this // If you just need to call a function to get a single float value to collect as
// package, which exports variables exported via the "expvar" package as // a metric, GaugeFunc, CounterFunc, or UntypedFunc might be interesting
// Prometheus metrics. // shortcuts.
//
// Advanced Uses of the Registry
//
// While MustRegister is the by far most common way of registering a Collector,
// sometimes you might want to handle the errors the registration might
// cause. As suggested by the name, MustRegister panics if an error occurs. With
// the Register function, the error is returned and can be handled.
//
// An error is returned if the registered Collector is incompatible or
// inconsistent with already registered metrics. The registry aims for
// consistency of the collected metrics according to the Prometheus data
// model. Inconsistencies are ideally detected at registration time, not at
// collect time. The former will usually be detected at start-up time of a
// program, while the latter will only happen at scrape time, possibly not even
// on the first scrape if the inconsistency only becomes relevant later. That is
// the main reason why a Collector and a Metric have to describe themselves to
// the registry.
//
// So far, everything we did operated on the so-called default registry, as it
// can be found in the global DefaultRegistry variable. With NewRegistry, you
// can create a custom registry, or you can even implement the Registerer or
// Gatherer interfaces yourself. The methods Register and Unregister work in
// the same way on a custom registry as the global functions Register and
// Unregister on the default registry.
//
// There are a number of uses for custom registries: You can use registries
// with special properties, see NewPedanticRegistry. You can avoid global state,
// as it is imposed by the DefaultRegistry. You can use multiple registries at
// the same time to expose different metrics in different ways. You can use
// separate registries for testing purposes.
//
// Also note that the DefaultRegistry comes registered with a Collector for Go
// runtime metrics (via NewGoCollector) and a Collector for process metrics (via
// NewProcessCollector). With a custom registry, you are in control and decide
// yourself about the Collectors to register.
//
// HTTP Exposition
//
// The Registry implements the Gatherer interface. The caller of the Gather
// method can then expose the gathered metrics in some way. Usually, the metrics
// are served via HTTP on the /metrics endpoint. That's happening in the example
// above. The tools to expose metrics via HTTP are in the promhttp
// sub-package. (The top-level functions in the prometheus package are
// deprecated.)
//
// Pushing to the Pushgateway
//
// Function for pushing to the Pushgateway can be found in the push sub-package.
//
// Other Means of Exposition
//
// More ways of exposing metrics can easily be added. Sending metrics to
// Graphite would be an example that will soon be implemented.
package prometheus package prometheus

View file

@ -18,21 +18,21 @@ import (
"expvar" "expvar"
) )
// ExpvarCollector collects metrics from the expvar interface. It provides a type expvarCollector struct {
// quick way to expose numeric values that are already exported via expvar as
// Prometheus metrics. Note that the data models of expvar and Prometheus are
// fundamentally different, and that the ExpvarCollector is inherently
// slow. Thus, the ExpvarCollector is probably great for experiments and
// prototying, but you should seriously consider a more direct implementation of
// Prometheus metrics for monitoring production systems.
//
// Use NewExpvarCollector to create new instances.
type ExpvarCollector struct {
exports map[string]*Desc exports map[string]*Desc
} }
// NewExpvarCollector returns a newly allocated ExpvarCollector that still has // NewExpvarCollector returns a newly allocated expvar Collector that still has
// to be registered with the Prometheus registry. // to be registered with a Prometheus registry.
//
// An expvar Collector collects metrics from the expvar interface. It provides a
// quick way to expose numeric values that are already exported via expvar as
// Prometheus metrics. Note that the data models of expvar and Prometheus are
// fundamentally different, and that the expvar Collector is inherently slower
// than native Prometheus metrics. Thus, the expvar Collector is probably great
// for experiments and prototying, but you should seriously consider a more
// direct implementation of Prometheus metrics for monitoring production
// systems.
// //
// The exports map has the following meaning: // The exports map has the following meaning:
// //
@ -59,21 +59,21 @@ type ExpvarCollector struct {
// sample values. // sample values.
// //
// Anything that does not fit into the scheme above is silently ignored. // Anything that does not fit into the scheme above is silently ignored.
func NewExpvarCollector(exports map[string]*Desc) *ExpvarCollector { func NewExpvarCollector(exports map[string]*Desc) Collector {
return &ExpvarCollector{ return &expvarCollector{
exports: exports, exports: exports,
} }
} }
// Describe implements Collector. // Describe implements Collector.
func (e *ExpvarCollector) Describe(ch chan<- *Desc) { func (e *expvarCollector) Describe(ch chan<- *Desc) {
for _, desc := range e.exports { for _, desc := range e.exports {
ch <- desc ch <- desc
} }
} }
// Collect implements Collector. // Collect implements Collector.
func (e *ExpvarCollector) Collect(ch chan<- Metric) { func (e *expvarCollector) Collect(ch chan<- Metric) {
for name, desc := range e.exports { for name, desc := range e.exports {
var m Metric var m Metric
expVar := expvar.Get(name) expVar := expvar.Get(name)

View file

@ -58,7 +58,7 @@ func NewGauge(opts GaugeOpts) Gauge {
// (e.g. number of operations queued, partitioned by user and operation // (e.g. number of operations queued, partitioned by user and operation
// type). Create instances with NewGaugeVec. // type). Create instances with NewGaugeVec.
type GaugeVec struct { type GaugeVec struct {
MetricVec *MetricVec
} }
// NewGaugeVec creates a new GaugeVec based on the provided GaugeOpts and // NewGaugeVec creates a new GaugeVec based on the provided GaugeOpts and
@ -72,13 +72,9 @@ func NewGaugeVec(opts GaugeOpts, labelNames []string) *GaugeVec {
opts.ConstLabels, opts.ConstLabels,
) )
return &GaugeVec{ return &GaugeVec{
MetricVec: MetricVec{ MetricVec: newMetricVec(desc, func(lvs ...string) Metric {
children: map[uint64]Metric{}, return newValue(desc, GaugeValue, 0, lvs...)
desc: desc, }),
newMetric: func(lvs ...string) Metric {
return newValue(desc, GaugeValue, 0, lvs...)
},
},
} }
} }

View file

@ -17,7 +17,7 @@ type goCollector struct {
// NewGoCollector returns a collector which exports metrics about the current // NewGoCollector returns a collector which exports metrics about the current
// go process. // go process.
func NewGoCollector() *goCollector { func NewGoCollector() Collector {
return &goCollector{ return &goCollector{
goroutines: NewGauge(GaugeOpts{ goroutines: NewGauge(GaugeOpts{
Namespace: "go", Namespace: "go",
@ -211,7 +211,7 @@ func NewGoCollector() *goCollector {
"Number of seconds since 1970 of last garbage collection.", "Number of seconds since 1970 of last garbage collection.",
nil, nil, nil, nil,
), ),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.LastGC*10 ^ 9) }, eval: func(ms *runtime.MemStats) float64 { return float64(ms.LastGC) / 1e9 },
valType: GaugeValue, valType: GaugeValue,
}, },
}, },

View file

@ -51,11 +51,11 @@ type Histogram interface {
// bucket of a histogram ("le" -> "less or equal"). // bucket of a histogram ("le" -> "less or equal").
const bucketLabel = "le" const bucketLabel = "le"
// DefBuckets are the default Histogram buckets. The default buckets are
// tailored to broadly measure the response time (in seconds) of a network
// service. Most likely, however, you will be required to define buckets
// customized to your use case.
var ( var (
// DefBuckets are the default Histogram buckets. The default buckets are
// tailored to broadly measure the response time (in seconds) of a
// network service. Most likely, however, you will be required to define
// buckets customized to your use case.
DefBuckets = []float64{.005, .01, .025, .05, .1, .25, .5, 1, 2.5, 5, 10} DefBuckets = []float64{.005, .01, .025, .05, .1, .25, .5, 1, 2.5, 5, 10}
errBucketLabelNotAllowed = fmt.Errorf( errBucketLabelNotAllowed = fmt.Errorf(
@ -210,7 +210,7 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr
// Finally we know the final length of h.upperBounds and can make counts. // Finally we know the final length of h.upperBounds and can make counts.
h.counts = make([]uint64, len(h.upperBounds)) h.counts = make([]uint64, len(h.upperBounds))
h.Init(h) // Init self-collection. h.init(h) // Init self-collection.
return h return h
} }
@ -222,7 +222,7 @@ type histogram struct {
sumBits uint64 sumBits uint64
count uint64 count uint64
SelfCollector selfCollector
// Note that there is no mutex required. // Note that there is no mutex required.
desc *Desc desc *Desc
@ -287,7 +287,7 @@ func (h *histogram) Write(out *dto.Metric) error {
// (e.g. HTTP request latencies, partitioned by status code and method). Create // (e.g. HTTP request latencies, partitioned by status code and method). Create
// instances with NewHistogramVec. // instances with NewHistogramVec.
type HistogramVec struct { type HistogramVec struct {
MetricVec *MetricVec
} }
// NewHistogramVec creates a new HistogramVec based on the provided HistogramOpts and // NewHistogramVec creates a new HistogramVec based on the provided HistogramOpts and
@ -301,13 +301,9 @@ func NewHistogramVec(opts HistogramOpts, labelNames []string) *HistogramVec {
opts.ConstLabels, opts.ConstLabels,
) )
return &HistogramVec{ return &HistogramVec{
MetricVec: MetricVec{ MetricVec: newMetricVec(desc, func(lvs ...string) Metric {
children: map[uint64]Metric{}, return newHistogram(desc, opts, lvs...)
desc: desc, }),
newMetric: func(lvs ...string) Metric {
return newHistogram(desc, opts, lvs...)
},
},
} }
} }

View file

@ -15,14 +15,114 @@ package prometheus
import ( import (
"bufio" "bufio"
"bytes"
"compress/gzip"
"fmt"
"io" "io"
"net" "net"
"net/http" "net/http"
"strconv" "strconv"
"strings" "strings"
"sync"
"time" "time"
"github.com/prometheus/common/expfmt"
) )
// TODO(beorn7): Remove this whole file. It is a partial mirror of
// promhttp/http.go (to avoid circular import chains) where everything HTTP
// related should live. The functions here are just for avoiding
// breakage. Everything is deprecated.
const (
contentTypeHeader = "Content-Type"
contentLengthHeader = "Content-Length"
contentEncodingHeader = "Content-Encoding"
acceptEncodingHeader = "Accept-Encoding"
)
var bufPool sync.Pool
func getBuf() *bytes.Buffer {
buf := bufPool.Get()
if buf == nil {
return &bytes.Buffer{}
}
return buf.(*bytes.Buffer)
}
func giveBuf(buf *bytes.Buffer) {
buf.Reset()
bufPool.Put(buf)
}
// Handler returns an HTTP handler for the DefaultGatherer. It is
// already instrumented with InstrumentHandler (using "prometheus" as handler
// name).
//
// Deprecated: Please note the issues described in the doc comment of
// InstrumentHandler. You might want to consider using promhttp.Handler instead
// (which is non instrumented).
func Handler() http.Handler {
return InstrumentHandler("prometheus", UninstrumentedHandler())
}
// UninstrumentedHandler returns an HTTP handler for the DefaultGatherer.
//
// Deprecated: Use promhttp.Handler instead. See there for further documentation.
func UninstrumentedHandler() http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
mfs, err := DefaultGatherer.Gather()
if err != nil {
http.Error(w, "An error has occurred during metrics collection:\n\n"+err.Error(), http.StatusInternalServerError)
return
}
contentType := expfmt.Negotiate(req.Header)
buf := getBuf()
defer giveBuf(buf)
writer, encoding := decorateWriter(req, buf)
enc := expfmt.NewEncoder(writer, contentType)
var lastErr error
for _, mf := range mfs {
if err := enc.Encode(mf); err != nil {
lastErr = err
http.Error(w, "An error has occurred during metrics encoding:\n\n"+err.Error(), http.StatusInternalServerError)
return
}
}
if closer, ok := writer.(io.Closer); ok {
closer.Close()
}
if lastErr != nil && buf.Len() == 0 {
http.Error(w, "No metrics encoded, last error:\n\n"+err.Error(), http.StatusInternalServerError)
return
}
header := w.Header()
header.Set(contentTypeHeader, string(contentType))
header.Set(contentLengthHeader, fmt.Sprint(buf.Len()))
if encoding != "" {
header.Set(contentEncodingHeader, encoding)
}
w.Write(buf.Bytes())
})
}
// decorateWriter wraps a writer to handle gzip compression if requested. It
// returns the decorated writer and the appropriate "Content-Encoding" header
// (which is empty if no compression is enabled).
func decorateWriter(request *http.Request, writer io.Writer) (io.Writer, string) {
header := request.Header.Get(acceptEncodingHeader)
parts := strings.Split(header, ",")
for _, part := range parts {
part := strings.TrimSpace(part)
if part == "gzip" || strings.HasPrefix(part, "gzip;") {
return gzip.NewWriter(writer), "gzip"
}
}
return writer, ""
}
var instLabels = []string{"method", "code"} var instLabels = []string{"method", "code"}
type nower interface { type nower interface {
@ -57,12 +157,34 @@ func nowSeries(t ...time.Time) nower {
// has a constant label named "handler" with the provided handlerName as // has a constant label named "handler" with the provided handlerName as
// value. http_requests_total is a metric vector partitioned by HTTP method // value. http_requests_total is a metric vector partitioned by HTTP method
// (label name "method") and HTTP status code (label name "code"). // (label name "method") and HTTP status code (label name "code").
//
// Deprecated: InstrumentHandler has several issues:
//
// - It uses Summaries rather than Histograms. Summaries are not useful if
// aggregation across multiple instances is required.
//
// - It uses microseconds as unit, which is deprecated and should be replaced by
// seconds.
//
// - The size of the request is calculated in a separate goroutine. Since this
// calculator requires access to the request header, it creates a race with
// any writes to the header performed during request handling.
// httputil.ReverseProxy is a prominent example for a handler
// performing such writes.
//
// Upcoming versions of this package will provide ways of instrumenting HTTP
// handlers that are more flexible and have fewer issues. Please prefer direct
// instrumentation in the meantime.
func InstrumentHandler(handlerName string, handler http.Handler) http.HandlerFunc { func InstrumentHandler(handlerName string, handler http.Handler) http.HandlerFunc {
return InstrumentHandlerFunc(handlerName, handler.ServeHTTP) return InstrumentHandlerFunc(handlerName, handler.ServeHTTP)
} }
// InstrumentHandlerFunc wraps the given function for instrumentation. It // InstrumentHandlerFunc wraps the given function for instrumentation. It
// otherwise works in the same way as InstrumentHandler. // otherwise works in the same way as InstrumentHandler (and shares the same
// issues).
//
// Deprecated: InstrumentHandlerFunc is deprecated for the same reasons as
// InstrumentHandler is.
func InstrumentHandlerFunc(handlerName string, handlerFunc func(http.ResponseWriter, *http.Request)) http.HandlerFunc { func InstrumentHandlerFunc(handlerName string, handlerFunc func(http.ResponseWriter, *http.Request)) http.HandlerFunc {
return InstrumentHandlerFuncWithOpts( return InstrumentHandlerFuncWithOpts(
SummaryOpts{ SummaryOpts{
@ -73,13 +195,13 @@ func InstrumentHandlerFunc(handlerName string, handlerFunc func(http.ResponseWri
) )
} }
// InstrumentHandlerWithOpts works like InstrumentHandler but provides more // InstrumentHandlerWithOpts works like InstrumentHandler (and shares the same
// flexibility (at the cost of a more complex call syntax). As // issues) but provides more flexibility (at the cost of a more complex call
// InstrumentHandler, this function registers four metric collectors, but it // syntax). As InstrumentHandler, this function registers four metric
// uses the provided SummaryOpts to create them. However, the fields "Name" and // collectors, but it uses the provided SummaryOpts to create them. However, the
// "Help" in the SummaryOpts are ignored. "Name" is replaced by // fields "Name" and "Help" in the SummaryOpts are ignored. "Name" is replaced
// "requests_total", "request_duration_microseconds", "request_size_bytes", and // by "requests_total", "request_duration_microseconds", "request_size_bytes",
// "response_size_bytes", respectively. "Help" is replaced by an appropriate // and "response_size_bytes", respectively. "Help" is replaced by an appropriate
// help string. The names of the variable labels of the http_requests_total // help string. The names of the variable labels of the http_requests_total
// CounterVec are "method" (get, post, etc.), and "code" (HTTP status code). // CounterVec are "method" (get, post, etc.), and "code" (HTTP status code).
// //
@ -98,13 +220,20 @@ func InstrumentHandlerFunc(handlerName string, handlerFunc func(http.ResponseWri
// cannot use SummaryOpts. Instead, a CounterOpts struct is created internally, // cannot use SummaryOpts. Instead, a CounterOpts struct is created internally,
// and all its fields are set to the equally named fields in the provided // and all its fields are set to the equally named fields in the provided
// SummaryOpts. // SummaryOpts.
//
// Deprecated: InstrumentHandlerWithOpts is deprecated for the same reasons as
// InstrumentHandler is.
func InstrumentHandlerWithOpts(opts SummaryOpts, handler http.Handler) http.HandlerFunc { func InstrumentHandlerWithOpts(opts SummaryOpts, handler http.Handler) http.HandlerFunc {
return InstrumentHandlerFuncWithOpts(opts, handler.ServeHTTP) return InstrumentHandlerFuncWithOpts(opts, handler.ServeHTTP)
} }
// InstrumentHandlerFuncWithOpts works like InstrumentHandlerFunc but provides // InstrumentHandlerFuncWithOpts works like InstrumentHandlerFunc (and shares
// more flexibility (at the cost of a more complex call syntax). See // the same issues) but provides more flexibility (at the cost of a more complex
// InstrumentHandlerWithOpts for details how the provided SummaryOpts are used. // call syntax). See InstrumentHandlerWithOpts for details how the provided
// SummaryOpts are used.
//
// Deprecated: InstrumentHandlerFuncWithOpts is deprecated for the same reasons
// as InstrumentHandler is.
func InstrumentHandlerFuncWithOpts(opts SummaryOpts, handlerFunc func(http.ResponseWriter, *http.Request)) http.HandlerFunc { func InstrumentHandlerFuncWithOpts(opts SummaryOpts, handlerFunc func(http.ResponseWriter, *http.Request)) http.HandlerFunc {
reqCnt := NewCounterVec( reqCnt := NewCounterVec(
CounterOpts{ CounterOpts{

View file

@ -22,10 +22,8 @@ import (
const separatorByte byte = 255 const separatorByte byte = 255
// A Metric models a single sample value with its meta data being exported to // A Metric models a single sample value with its meta data being exported to
// Prometheus. Implementers of Metric in this package inclued Gauge, Counter, // Prometheus. Implementations of Metric in this package are Gauge, Counter,
// Untyped, and Summary. Users can implement their own Metric types, but that // Histogram, Summary, and Untyped.
// should be rarely needed. See the example for SelfCollector, which is also an
// example for a user-implemented Metric.
type Metric interface { type Metric interface {
// Desc returns the descriptor for the Metric. This method idempotently // Desc returns the descriptor for the Metric. This method idempotently
// returns the same descriptor throughout the lifetime of the // returns the same descriptor throughout the lifetime of the
@ -36,21 +34,23 @@ type Metric interface {
// Write encodes the Metric into a "Metric" Protocol Buffer data // Write encodes the Metric into a "Metric" Protocol Buffer data
// transmission object. // transmission object.
// //
// Implementers of custom Metric types must observe concurrency safety // Metric implementations must observe concurrency safety as reads of
// as reads of this metric may occur at any time, and any blocking // this metric may occur at any time, and any blocking occurs at the
// occurs at the expense of total performance of rendering all // expense of total performance of rendering all registered
// registered metrics. Ideally Metric implementations should support // metrics. Ideally, Metric implementations should support concurrent
// concurrent readers. // readers.
// //
// The Prometheus client library attempts to minimize memory allocations // While populating dto.Metric, it is the responsibility of the
// and will provide a pre-existing reset dto.Metric pointer. Prometheus // implementation to ensure validity of the Metric protobuf (like valid
// may recycle the dto.Metric proto message, so Metric implementations // UTF-8 strings or syntactically valid metric and label names). It is
// should just populate the provided dto.Metric and then should not keep // recommended to sort labels lexicographically. (Implementers may find
// any reference to it. // LabelPairSorter useful for that.) Callers of Write should still make
// // sure of sorting if they depend on it.
// While populating dto.Metric, labels must be sorted lexicographically.
// (Implementers may find LabelPairSorter useful for that.)
Write(*dto.Metric) error Write(*dto.Metric) error
// TODO(beorn7): The original rationale of passing in a pre-allocated
// dto.Metric protobuf to save allocations has disappeared. The
// signature of this method should be changed to "Write() (*dto.Metric,
// error)".
} }
// Opts bundles the options for creating most Metric types. Each metric // Opts bundles the options for creating most Metric types. Each metric

View file

@ -28,7 +28,7 @@ type processCollector struct {
// NewProcessCollector returns a collector which exports the current state of // NewProcessCollector returns a collector which exports the current state of
// process metrics including cpu, memory and file descriptor usage as well as // process metrics including cpu, memory and file descriptor usage as well as
// the process start time for the given process id under the given namespace. // the process start time for the given process id under the given namespace.
func NewProcessCollector(pid int, namespace string) *processCollector { func NewProcessCollector(pid int, namespace string) Collector {
return NewProcessCollectorPIDFn( return NewProcessCollectorPIDFn(
func() (int, error) { return pid, nil }, func() (int, error) { return pid, nil },
namespace, namespace,
@ -43,7 +43,7 @@ func NewProcessCollector(pid int, namespace string) *processCollector {
func NewProcessCollectorPIDFn( func NewProcessCollectorPIDFn(
pidFn func() (int, error), pidFn func() (int, error),
namespace string, namespace string,
) *processCollector { ) Collector {
c := processCollector{ c := processCollector{
pidFn: pidFn, pidFn: pidFn,
collectFn: func(chan<- Metric) {}, collectFn: func(chan<- Metric) {},

View file

@ -1,65 +0,0 @@
// Copyright 2015 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Copyright (c) 2013, The Prometheus Authors
// All rights reserved.
//
// Use of this source code is governed by a BSD-style license that can be found
// in the LICENSE file.
package prometheus
// Push triggers a metric collection by the default registry and pushes all
// collected metrics to the Pushgateway specified by url. See the Pushgateway
// documentation for detailed implications of the job and instance
// parameter. instance can be left empty. You can use just host:port or ip:port
// as url, in which case 'http://' is added automatically. You can also include
// the schema in the URL. However, do not include the '/metrics/jobs/...' part.
//
// Note that all previously pushed metrics with the same job and instance will
// be replaced with the metrics pushed by this call. (It uses HTTP method 'PUT'
// to push to the Pushgateway.)
func Push(job, instance, url string) error {
return defRegistry.Push(job, instance, url, "PUT")
}
// PushAdd works like Push, but only previously pushed metrics with the same
// name (and the same job and instance) will be replaced. (It uses HTTP method
// 'POST' to push to the Pushgateway.)
func PushAdd(job, instance, url string) error {
return defRegistry.Push(job, instance, url, "POST")
}
// PushCollectors works like Push, but it does not collect from the default
// registry. Instead, it collects from the provided collectors. It is a
// convenient way to push only a few metrics.
func PushCollectors(job, instance, url string, collectors ...Collector) error {
return pushCollectors(job, instance, url, "PUT", collectors...)
}
// PushAddCollectors works like PushAdd, but it does not collect from the
// default registry. Instead, it collects from the provided collectors. It is a
// convenient way to push only a few metrics.
func PushAddCollectors(job, instance, url string, collectors ...Collector) error {
return pushCollectors(job, instance, url, "POST", collectors...)
}
func pushCollectors(job, instance, url, method string, collectors ...Collector) error {
r := newRegistry()
for _, collector := range collectors {
if _, err := r.Register(collector); err != nil {
return err
}
}
return r.Push(job, instance, url, method)
}

File diff suppressed because it is too large Load diff

View file

@ -53,8 +53,8 @@ type Summary interface {
Observe(float64) Observe(float64)
} }
// DefObjectives are the default Summary quantile values.
var ( var (
// DefObjectives are the default Summary quantile values.
DefObjectives = map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001} DefObjectives = map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}
errQuantileLabelNotAllowed = fmt.Errorf( errQuantileLabelNotAllowed = fmt.Errorf(
@ -139,11 +139,11 @@ type SummaryOpts struct {
BufCap uint32 BufCap uint32
} }
// TODO: Great fuck-up with the sliding-window decay algorithm... The Merge // Great fuck-up with the sliding-window decay algorithm... The Merge method of
// method of perk/quantile is actually not working as advertised - and it might // perk/quantile is actually not working as advertised - and it might be
// be unfixable, as the underlying algorithm is apparently not capable of // unfixable, as the underlying algorithm is apparently not capable of merging
// merging summaries in the first place. To avoid using Merge, we are currently // summaries in the first place. To avoid using Merge, we are currently adding
// adding observations to _each_ age bucket, i.e. the effort to add a sample is // observations to _each_ age bucket, i.e. the effort to add a sample is
// essentially multiplied by the number of age buckets. When rotating age // essentially multiplied by the number of age buckets. When rotating age
// buckets, we empty the previous head stream. On scrape time, we simply take // buckets, we empty the previous head stream. On scrape time, we simply take
// the quantiles from the head stream (no merging required). Result: More effort // the quantiles from the head stream (no merging required). Result: More effort
@ -227,12 +227,12 @@ func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary {
} }
sort.Float64s(s.sortedObjectives) sort.Float64s(s.sortedObjectives)
s.Init(s) // Init self-collection. s.init(s) // Init self-collection.
return s return s
} }
type summary struct { type summary struct {
SelfCollector selfCollector
bufMtx sync.Mutex // Protects hotBuf and hotBufExpTime. bufMtx sync.Mutex // Protects hotBuf and hotBufExpTime.
mtx sync.Mutex // Protects every other moving part. mtx sync.Mutex // Protects every other moving part.
@ -390,7 +390,7 @@ func (s quantSort) Less(i, j int) bool {
// (e.g. HTTP request latencies, partitioned by status code and method). Create // (e.g. HTTP request latencies, partitioned by status code and method). Create
// instances with NewSummaryVec. // instances with NewSummaryVec.
type SummaryVec struct { type SummaryVec struct {
MetricVec *MetricVec
} }
// NewSummaryVec creates a new SummaryVec based on the provided SummaryOpts and // NewSummaryVec creates a new SummaryVec based on the provided SummaryOpts and
@ -404,13 +404,9 @@ func NewSummaryVec(opts SummaryOpts, labelNames []string) *SummaryVec {
opts.ConstLabels, opts.ConstLabels,
) )
return &SummaryVec{ return &SummaryVec{
MetricVec: MetricVec{ MetricVec: newMetricVec(desc, func(lvs ...string) Metric {
children: map[uint64]Metric{}, return newSummary(desc, opts, lvs...)
desc: desc, }),
newMetric: func(lvs ...string) Metric {
return newSummary(desc, opts, lvs...)
},
},
} }
} }

View file

@ -56,7 +56,7 @@ func NewUntyped(opts UntypedOpts) Untyped {
// labels. This is used if you want to count the same thing partitioned by // labels. This is used if you want to count the same thing partitioned by
// various dimensions. Create instances with NewUntypedVec. // various dimensions. Create instances with NewUntypedVec.
type UntypedVec struct { type UntypedVec struct {
MetricVec *MetricVec
} }
// NewUntypedVec creates a new UntypedVec based on the provided UntypedOpts and // NewUntypedVec creates a new UntypedVec based on the provided UntypedOpts and
@ -70,13 +70,9 @@ func NewUntypedVec(opts UntypedOpts, labelNames []string) *UntypedVec {
opts.ConstLabels, opts.ConstLabels,
) )
return &UntypedVec{ return &UntypedVec{
MetricVec: MetricVec{ MetricVec: newMetricVec(desc, func(lvs ...string) Metric {
children: map[uint64]Metric{}, return newValue(desc, UntypedValue, 0, lvs...)
desc: desc, }),
newMetric: func(lvs ...string) Metric {
return newValue(desc, UntypedValue, 0, lvs...)
},
},
} }
} }

View file

@ -48,7 +48,7 @@ type value struct {
// operations. http://golang.org/pkg/sync/atomic/#pkg-note-BUG // operations. http://golang.org/pkg/sync/atomic/#pkg-note-BUG
valBits uint64 valBits uint64
SelfCollector selfCollector
desc *Desc desc *Desc
valType ValueType valType ValueType
@ -68,7 +68,7 @@ func newValue(desc *Desc, valueType ValueType, val float64, labelValues ...strin
valBits: math.Float64bits(val), valBits: math.Float64bits(val),
labelPairs: makeLabelPairs(desc, labelValues), labelPairs: makeLabelPairs(desc, labelValues),
} }
result.Init(result) result.init(result)
return result return result
} }
@ -113,7 +113,7 @@ func (v *value) Write(out *dto.Metric) error {
// library to back the implementations of CounterFunc, GaugeFunc, and // library to back the implementations of CounterFunc, GaugeFunc, and
// UntypedFunc. // UntypedFunc.
type valueFunc struct { type valueFunc struct {
SelfCollector selfCollector
desc *Desc desc *Desc
valType ValueType valType ValueType
@ -134,7 +134,7 @@ func newValueFunc(desc *Desc, valueType ValueType, function func() float64) *val
function: function, function: function,
labelPairs: makeLabelPairs(desc, nil), labelPairs: makeLabelPairs(desc, nil),
} }
result.Init(result) result.init(result)
return result return result
} }

View file

@ -16,6 +16,8 @@ package prometheus
import ( import (
"fmt" "fmt"
"sync" "sync"
"github.com/prometheus/common/model"
) )
// MetricVec is a Collector to bundle metrics of the same name that // MetricVec is a Collector to bundle metrics of the same name that
@ -25,10 +27,31 @@ import (
// provided in this package. // provided in this package.
type MetricVec struct { type MetricVec struct {
mtx sync.RWMutex // Protects the children. mtx sync.RWMutex // Protects the children.
children map[uint64]Metric children map[uint64][]metricWithLabelValues
desc *Desc desc *Desc
newMetric func(labelValues ...string) Metric newMetric func(labelValues ...string) Metric
hashAdd func(h uint64, s string) uint64 // replace hash function for testing collision handling
hashAddByte func(h uint64, b byte) uint64
}
// newMetricVec returns an initialized MetricVec. The concrete value is
// returned for embedding into another struct.
func newMetricVec(desc *Desc, newMetric func(lvs ...string) Metric) *MetricVec {
return &MetricVec{
children: map[uint64][]metricWithLabelValues{},
desc: desc,
newMetric: newMetric,
hashAdd: hashAdd,
hashAddByte: hashAddByte,
}
}
// metricWithLabelValues provides the metric and its label values for
// disambiguation on hash collision.
type metricWithLabelValues struct {
values []string
metric Metric
} }
// Describe implements Collector. The length of the returned slice // Describe implements Collector. The length of the returned slice
@ -42,8 +65,10 @@ func (m *MetricVec) Collect(ch chan<- Metric) {
m.mtx.RLock() m.mtx.RLock()
defer m.mtx.RUnlock() defer m.mtx.RUnlock()
for _, metric := range m.children { for _, metrics := range m.children {
ch <- metric for _, metric := range metrics {
ch <- metric.metric
}
} }
} }
@ -77,16 +102,7 @@ func (m *MetricVec) GetMetricWithLabelValues(lvs ...string) (Metric, error) {
return nil, err return nil, err
} }
m.mtx.RLock() return m.getOrCreateMetricWithLabelValues(h, lvs), nil
metric, ok := m.children[h]
m.mtx.RUnlock()
if ok {
return metric, nil
}
m.mtx.Lock()
defer m.mtx.Unlock()
return m.getOrCreateMetric(h, lvs...), nil
} }
// GetMetricWith returns the Metric for the given Labels map (the label names // GetMetricWith returns the Metric for the given Labels map (the label names
@ -107,20 +123,7 @@ func (m *MetricVec) GetMetricWith(labels Labels) (Metric, error) {
return nil, err return nil, err
} }
m.mtx.RLock() return m.getOrCreateMetricWithLabels(h, labels), nil
metric, ok := m.children[h]
m.mtx.RUnlock()
if ok {
return metric, nil
}
lvs := make([]string, len(labels))
for i, label := range m.desc.variableLabels {
lvs[i] = labels[label]
}
m.mtx.Lock()
defer m.mtx.Unlock()
return m.getOrCreateMetric(h, lvs...), nil
} }
// WithLabelValues works as GetMetricWithLabelValues, but panics if an error // WithLabelValues works as GetMetricWithLabelValues, but panics if an error
@ -168,11 +171,7 @@ func (m *MetricVec) DeleteLabelValues(lvs ...string) bool {
if err != nil { if err != nil {
return false return false
} }
if _, ok := m.children[h]; !ok { return m.deleteByHashWithLabelValues(h, lvs)
return false
}
delete(m.children, h)
return true
} }
// Delete deletes the metric where the variable labels are the same as those // Delete deletes the metric where the variable labels are the same as those
@ -193,10 +192,50 @@ func (m *MetricVec) Delete(labels Labels) bool {
if err != nil { if err != nil {
return false return false
} }
if _, ok := m.children[h]; !ok {
return m.deleteByHashWithLabels(h, labels)
}
// deleteByHashWithLabelValues removes the metric from the hash bucket h. If
// there are multiple matches in the bucket, use lvs to select a metric and
// remove only that metric.
func (m *MetricVec) deleteByHashWithLabelValues(h uint64, lvs []string) bool {
metrics, ok := m.children[h]
if !ok {
return false return false
} }
delete(m.children, h)
i := m.findMetricWithLabelValues(metrics, lvs)
if i >= len(metrics) {
return false
}
if len(metrics) > 1 {
m.children[h] = append(metrics[:i], metrics[i+1:]...)
} else {
delete(m.children, h)
}
return true
}
// deleteByHashWithLabels removes the metric from the hash bucket h. If there
// are multiple matches in the bucket, use lvs to select a metric and remove
// only that metric.
func (m *MetricVec) deleteByHashWithLabels(h uint64, labels Labels) bool {
metrics, ok := m.children[h]
if !ok {
return false
}
i := m.findMetricWithLabels(metrics, labels)
if i >= len(metrics) {
return false
}
if len(metrics) > 1 {
m.children[h] = append(metrics[:i], metrics[i+1:]...)
} else {
delete(m.children, h)
}
return true return true
} }
@ -216,7 +255,8 @@ func (m *MetricVec) hashLabelValues(vals []string) (uint64, error) {
} }
h := hashNew() h := hashNew()
for _, val := range vals { for _, val := range vals {
h = hashAdd(h, val) h = m.hashAdd(h, val)
h = m.hashAddByte(h, model.SeparatorByte)
} }
return h, nil return h, nil
} }
@ -231,19 +271,134 @@ func (m *MetricVec) hashLabels(labels Labels) (uint64, error) {
if !ok { if !ok {
return 0, fmt.Errorf("label name %q missing in label map", label) return 0, fmt.Errorf("label name %q missing in label map", label)
} }
h = hashAdd(h, val) h = m.hashAdd(h, val)
h = m.hashAddByte(h, model.SeparatorByte)
} }
return h, nil return h, nil
} }
func (m *MetricVec) getOrCreateMetric(hash uint64, labelValues ...string) Metric { // getOrCreateMetricWithLabelValues retrieves the metric by hash and label value
metric, ok := m.children[hash] // or creates it and returns the new one.
//
// This function holds the mutex.
func (m *MetricVec) getOrCreateMetricWithLabelValues(hash uint64, lvs []string) Metric {
m.mtx.RLock()
metric, ok := m.getMetricWithLabelValues(hash, lvs)
m.mtx.RUnlock()
if ok {
return metric
}
m.mtx.Lock()
defer m.mtx.Unlock()
metric, ok = m.getMetricWithLabelValues(hash, lvs)
if !ok { if !ok {
// Copy labelValues. Otherwise, they would be allocated even if we don't go // Copy to avoid allocation in case wo don't go down this code path.
// down this code path. copiedLVs := make([]string, len(lvs))
copiedLabelValues := append(make([]string, 0, len(labelValues)), labelValues...) copy(copiedLVs, lvs)
metric = m.newMetric(copiedLabelValues...) metric = m.newMetric(copiedLVs...)
m.children[hash] = metric m.children[hash] = append(m.children[hash], metricWithLabelValues{values: copiedLVs, metric: metric})
} }
return metric return metric
} }
// getOrCreateMetricWithLabelValues retrieves the metric by hash and label value
// or creates it and returns the new one.
//
// This function holds the mutex.
func (m *MetricVec) getOrCreateMetricWithLabels(hash uint64, labels Labels) Metric {
m.mtx.RLock()
metric, ok := m.getMetricWithLabels(hash, labels)
m.mtx.RUnlock()
if ok {
return metric
}
m.mtx.Lock()
defer m.mtx.Unlock()
metric, ok = m.getMetricWithLabels(hash, labels)
if !ok {
lvs := m.extractLabelValues(labels)
metric = m.newMetric(lvs...)
m.children[hash] = append(m.children[hash], metricWithLabelValues{values: lvs, metric: metric})
}
return metric
}
// getMetricWithLabelValues gets a metric while handling possible collisions in
// the hash space. Must be called while holding read mutex.
func (m *MetricVec) getMetricWithLabelValues(h uint64, lvs []string) (Metric, bool) {
metrics, ok := m.children[h]
if ok {
if i := m.findMetricWithLabelValues(metrics, lvs); i < len(metrics) {
return metrics[i].metric, true
}
}
return nil, false
}
// getMetricWithLabels gets a metric while handling possible collisions in
// the hash space. Must be called while holding read mutex.
func (m *MetricVec) getMetricWithLabels(h uint64, labels Labels) (Metric, bool) {
metrics, ok := m.children[h]
if ok {
if i := m.findMetricWithLabels(metrics, labels); i < len(metrics) {
return metrics[i].metric, true
}
}
return nil, false
}
// findMetricWithLabelValues returns the index of the matching metric or
// len(metrics) if not found.
func (m *MetricVec) findMetricWithLabelValues(metrics []metricWithLabelValues, lvs []string) int {
for i, metric := range metrics {
if m.matchLabelValues(metric.values, lvs) {
return i
}
}
return len(metrics)
}
// findMetricWithLabels returns the index of the matching metric or len(metrics)
// if not found.
func (m *MetricVec) findMetricWithLabels(metrics []metricWithLabelValues, labels Labels) int {
for i, metric := range metrics {
if m.matchLabels(metric.values, labels) {
return i
}
}
return len(metrics)
}
func (m *MetricVec) matchLabelValues(values []string, lvs []string) bool {
if len(values) != len(lvs) {
return false
}
for i, v := range values {
if v != lvs[i] {
return false
}
}
return true
}
func (m *MetricVec) matchLabels(values []string, labels Labels) bool {
if len(labels) != len(values) {
return false
}
for i, k := range m.desc.variableLabels {
if values[i] != labels[k] {
return false
}
}
return true
}
func (m *MetricVec) extractLabelValues(labels Labels) []string {
labelValues := make([]string, len(labels))
for i, k := range m.desc.variableLabels {
labelValues[i] = labels[k]
}
return labelValues
}

View file

@ -46,10 +46,7 @@ func ResponseFormat(h http.Header) Format {
return FmtUnknown return FmtUnknown
} }
const ( const textType = "text/plain"
textType = "text/plain"
jsonType = "application/json"
)
switch mediatype { switch mediatype {
case ProtoType: case ProtoType:
@ -66,22 +63,6 @@ func ResponseFormat(h http.Header) Format {
return FmtUnknown return FmtUnknown
} }
return FmtText return FmtText
case jsonType:
var prometheusAPIVersion string
if params["schema"] == "prometheus/telemetry" && params["version"] != "" {
prometheusAPIVersion = params["version"]
} else {
prometheusAPIVersion = h.Get("X-Prometheus-API-Version")
}
switch prometheusAPIVersion {
case "0.0.2", "":
return fmtJSON2
default:
return FmtUnknown
}
} }
return FmtUnknown return FmtUnknown
@ -93,8 +74,6 @@ func NewDecoder(r io.Reader, format Format) Decoder {
switch format { switch format {
case FmtProtoDelim: case FmtProtoDelim:
return &protoDecoder{r: r} return &protoDecoder{r: r}
case fmtJSON2:
return newJSON2Decoder(r)
} }
return &textDecoder{r: r} return &textDecoder{r: r}
} }
@ -132,7 +111,7 @@ func (d *protoDecoder) Decode(v *dto.MetricFamily) error {
return nil return nil
} }
// textDecoder implements the Decoder interface for the text protcol. // textDecoder implements the Decoder interface for the text protocol.
type textDecoder struct { type textDecoder struct {
r io.Reader r io.Reader
p TextParser p TextParser

View file

@ -29,9 +29,6 @@ const (
FmtProtoDelim Format = ProtoFmt + ` encoding=delimited` FmtProtoDelim Format = ProtoFmt + ` encoding=delimited`
FmtProtoText Format = ProtoFmt + ` encoding=text` FmtProtoText Format = ProtoFmt + ` encoding=text`
FmtProtoCompact Format = ProtoFmt + ` encoding=compact-text` FmtProtoCompact Format = ProtoFmt + ` encoding=compact-text`
// fmtJSON2 is hidden as it is deprecated.
fmtJSON2 Format = `application/json; version=0.0.2`
) )
const ( const (

View file

@ -20,8 +20,8 @@ import "bytes"
// Fuzz text metric parser with with github.com/dvyukov/go-fuzz: // Fuzz text metric parser with with github.com/dvyukov/go-fuzz:
// //
// go-fuzz-build github.com/prometheus/client_golang/text // go-fuzz-build github.com/prometheus/common/expfmt
// go-fuzz -bin text-fuzz.zip -workdir fuzz // go-fuzz -bin expfmt-fuzz.zip -workdir fuzz
// //
// Further input samples should go in the folder fuzz/corpus. // Further input samples should go in the folder fuzz/corpus.
func Fuzz(in []byte) int { func Fuzz(in []byte) int {

View file

@ -1,174 +0,0 @@
// Copyright 2015 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package expfmt
import (
"encoding/json"
"fmt"
"io"
"sort"
"github.com/golang/protobuf/proto"
dto "github.com/prometheus/client_model/go"
"github.com/prometheus/common/model"
)
type json2Decoder struct {
dec *json.Decoder
fams []*dto.MetricFamily
}
func newJSON2Decoder(r io.Reader) Decoder {
return &json2Decoder{
dec: json.NewDecoder(r),
}
}
type histogram002 struct {
Labels model.LabelSet `json:"labels"`
Values map[string]float64 `json:"value"`
}
type counter002 struct {
Labels model.LabelSet `json:"labels"`
Value float64 `json:"value"`
}
func protoLabelSet(base, ext model.LabelSet) ([]*dto.LabelPair, error) {
labels := base.Clone().Merge(ext)
delete(labels, model.MetricNameLabel)
names := make([]string, 0, len(labels))
for ln := range labels {
names = append(names, string(ln))
}
sort.Strings(names)
pairs := make([]*dto.LabelPair, 0, len(labels))
for _, ln := range names {
if !model.LabelNameRE.MatchString(ln) {
return nil, fmt.Errorf("invalid label name %q", ln)
}
lv := labels[model.LabelName(ln)]
pairs = append(pairs, &dto.LabelPair{
Name: proto.String(ln),
Value: proto.String(string(lv)),
})
}
return pairs, nil
}
func (d *json2Decoder) more() error {
var entities []struct {
BaseLabels model.LabelSet `json:"baseLabels"`
Docstring string `json:"docstring"`
Metric struct {
Type string `json:"type"`
Values json.RawMessage `json:"value"`
} `json:"metric"`
}
if err := d.dec.Decode(&entities); err != nil {
return err
}
for _, e := range entities {
f := &dto.MetricFamily{
Name: proto.String(string(e.BaseLabels[model.MetricNameLabel])),
Help: proto.String(e.Docstring),
Type: dto.MetricType_UNTYPED.Enum(),
Metric: []*dto.Metric{},
}
d.fams = append(d.fams, f)
switch e.Metric.Type {
case "counter", "gauge":
var values []counter002
if err := json.Unmarshal(e.Metric.Values, &values); err != nil {
return fmt.Errorf("could not extract %s value: %s", e.Metric.Type, err)
}
for _, ctr := range values {
labels, err := protoLabelSet(e.BaseLabels, ctr.Labels)
if err != nil {
return err
}
f.Metric = append(f.Metric, &dto.Metric{
Label: labels,
Untyped: &dto.Untyped{
Value: proto.Float64(ctr.Value),
},
})
}
case "histogram":
var values []histogram002
if err := json.Unmarshal(e.Metric.Values, &values); err != nil {
return fmt.Errorf("could not extract %s value: %s", e.Metric.Type, err)
}
for _, hist := range values {
quants := make([]string, 0, len(values))
for q := range hist.Values {
quants = append(quants, q)
}
sort.Strings(quants)
for _, q := range quants {
value := hist.Values[q]
// The correct label is "quantile" but to not break old expressions
// this remains "percentile"
hist.Labels["percentile"] = model.LabelValue(q)
labels, err := protoLabelSet(e.BaseLabels, hist.Labels)
if err != nil {
return err
}
f.Metric = append(f.Metric, &dto.Metric{
Label: labels,
Untyped: &dto.Untyped{
Value: proto.Float64(value),
},
})
}
}
default:
return fmt.Errorf("unknown metric type %q", e.Metric.Type)
}
}
return nil
}
// Decode implements the Decoder interface.
func (d *json2Decoder) Decode(v *dto.MetricFamily) error {
if len(d.fams) == 0 {
if err := d.more(); err != nil {
return err
}
}
*v = *d.fams[0]
d.fams = d.fams[1:]
return nil
}

View file

@ -14,7 +14,6 @@
package expfmt package expfmt
import ( import (
"bytes"
"fmt" "fmt"
"io" "io"
"math" "math"
@ -26,9 +25,12 @@ import (
// MetricFamilyToText converts a MetricFamily proto message into text format and // MetricFamilyToText converts a MetricFamily proto message into text format and
// writes the resulting lines to 'out'. It returns the number of bytes written // writes the resulting lines to 'out'. It returns the number of bytes written
// and any error encountered. This function does not perform checks on the // and any error encountered. The output will have the same order as the input,
// content of the metric and label names, i.e. invalid metric or label names // no further sorting is performed. Furthermore, this function assumes the input
// is already sanitized and does not perform any sanity checks. If the input
// contains duplicate metrics or invalid metric or label names, the conversion
// will result in invalid text format output. // will result in invalid text format output.
//
// This method fulfills the type 'prometheus.encoder'. // This method fulfills the type 'prometheus.encoder'.
func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (int, error) { func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (int, error) {
var written int var written int
@ -285,21 +287,17 @@ func labelPairsToText(
return written, nil return written, nil
} }
var (
escape = strings.NewReplacer("\\", `\\`, "\n", `\n`)
escapeWithDoubleQuote = strings.NewReplacer("\\", `\\`, "\n", `\n`, "\"", `\"`)
)
// escapeString replaces '\' by '\\', new line character by '\n', and - if // escapeString replaces '\' by '\\', new line character by '\n', and - if
// includeDoubleQuote is true - '"' by '\"'. // includeDoubleQuote is true - '"' by '\"'.
func escapeString(v string, includeDoubleQuote bool) string { func escapeString(v string, includeDoubleQuote bool) string {
result := bytes.NewBuffer(make([]byte, 0, len(v))) if includeDoubleQuote {
for _, c := range v { return escapeWithDoubleQuote.Replace(v)
switch {
case c == '\\':
result.WriteString(`\\`)
case includeDoubleQuote && c == '"':
result.WriteString(`\"`)
case c == '\n':
result.WriteString(`\n`)
default:
result.WriteRune(c)
}
} }
return result.String()
return escape.Replace(v)
} }

View file

@ -47,7 +47,7 @@ func (e ParseError) Error() string {
} }
// TextParser is used to parse the simple and flat text-based exchange format. Its // TextParser is used to parse the simple and flat text-based exchange format. Its
// nil value is ready to use. // zero value is ready to use.
type TextParser struct { type TextParser struct {
metricFamiliesByName map[string]*dto.MetricFamily metricFamiliesByName map[string]*dto.MetricFamily
buf *bufio.Reader // Where the parsed input is read through. buf *bufio.Reader // Where the parsed input is read through.
@ -108,6 +108,13 @@ func (p *TextParser) TextToMetricFamilies(in io.Reader) (map[string]*dto.MetricF
delete(p.metricFamiliesByName, k) delete(p.metricFamiliesByName, k)
} }
} }
// If p.err is io.EOF now, we have run into a premature end of the input
// stream. Turn this error into something nicer and more
// meaningful. (io.EOF is often used as a signal for the legitimate end
// of an input stream.)
if p.err == io.EOF {
p.parseError("unexpected end of input stream")
}
return p.metricFamiliesByName, p.err return p.metricFamiliesByName, p.err
} }

View file

@ -0,0 +1,89 @@
// Copyright 2015 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build windows
package log
import (
"fmt"
"os"
"golang.org/x/sys/windows/svc/eventlog"
"github.com/Sirupsen/logrus"
)
func init() {
setEventlogFormatter = func(name string, debugAsInfo bool) error {
if name == "" {
return fmt.Errorf("missing name parameter")
}
fmter, err := newEventlogger(name, debugAsInfo, origLogger.Formatter)
if err != nil {
fmt.Fprintf(os.Stderr, "error creating eventlog formatter: %v\n", err)
origLogger.Errorf("can't connect logger to eventlog: %v", err)
return err
}
origLogger.Formatter = fmter
return nil
}
}
type eventlogger struct {
log *eventlog.Log
debugAsInfo bool
wrap logrus.Formatter
}
func newEventlogger(name string, debugAsInfo bool, fmter logrus.Formatter) (*eventlogger, error) {
logHandle, err := eventlog.Open(name)
if err != nil {
return nil, err
}
return &eventlogger{log: logHandle, debugAsInfo: debugAsInfo, wrap: fmter}, nil
}
func (s *eventlogger) Format(e *logrus.Entry) ([]byte, error) {
data, err := s.wrap.Format(e)
if err != nil {
fmt.Fprintf(os.Stderr, "eventlogger: can't format entry: %v\n", err)
return data, err
}
switch e.Level {
case logrus.PanicLevel:
fallthrough
case logrus.FatalLevel:
fallthrough
case logrus.ErrorLevel:
err = s.log.Error(102, e.Message)
case logrus.WarnLevel:
err = s.log.Warning(101, e.Message)
case logrus.InfoLevel:
err = s.log.Info(100, e.Message)
case logrus.DebugLevel:
if s.debugAsInfo {
err = s.log.Info(100, e.Message)
}
default:
err = s.log.Info(100, e.Message)
}
if err != nil {
fmt.Fprintf(os.Stderr, "eventlogger: can't send log to eventlog: %v\n", err)
}
return data, err
}

View file

@ -16,19 +16,23 @@ package log
import ( import (
"flag" "flag"
"fmt" "fmt"
"io"
"io/ioutil"
"log"
"net/url" "net/url"
"os" "os"
"runtime" "runtime"
"strconv"
"strings" "strings"
"github.com/Sirupsen/logrus" "github.com/Sirupsen/logrus"
) )
type levelFlag struct{} type levelFlag string
// String implements flag.Value. // String implements flag.Value.
func (f levelFlag) String() string { func (f levelFlag) String() string {
return origLogger.Level.String() return fmt.Sprintf("%q", string(f))
} }
// Set implements flag.Value. // Set implements flag.Value.
@ -44,20 +48,23 @@ func (f levelFlag) Set(level string) error {
// setSyslogFormatter is nil if the target architecture does not support syslog. // setSyslogFormatter is nil if the target architecture does not support syslog.
var setSyslogFormatter func(string, string) error var setSyslogFormatter func(string, string) error
// setEventlogFormatter is nil if the target OS does not support Eventlog (i.e., is not Windows).
var setEventlogFormatter func(string, bool) error
func setJSONFormatter() { func setJSONFormatter() {
origLogger.Formatter = &logrus.JSONFormatter{} origLogger.Formatter = &logrus.JSONFormatter{}
} }
type logFormatFlag struct{ uri string } type logFormatFlag url.URL
// String implements flag.Value. // String implements flag.Value.
func (f logFormatFlag) String() string { func (f logFormatFlag) String() string {
return f.uri u := url.URL(f)
return fmt.Sprintf("%q", u.String())
} }
// Set implements flag.Value. // Set implements flag.Value.
func (f logFormatFlag) Set(format string) error { func (f logFormatFlag) Set(format string) error {
f.uri = format
u, err := url.Parse(format) u, err := url.Parse(format)
if err != nil { if err != nil {
return err return err
@ -78,24 +85,49 @@ func (f logFormatFlag) Set(format string) error {
appname := u.Query().Get("appname") appname := u.Query().Get("appname")
facility := u.Query().Get("local") facility := u.Query().Get("local")
return setSyslogFormatter(appname, facility) return setSyslogFormatter(appname, facility)
case "eventlog":
if setEventlogFormatter == nil {
return fmt.Errorf("system does not support eventlog")
}
name := u.Query().Get("name")
debugAsInfo := false
debugAsInfoRaw := u.Query().Get("debugAsInfo")
if parsedDebugAsInfo, err := strconv.ParseBool(debugAsInfoRaw); err == nil {
debugAsInfo = parsedDebugAsInfo
}
return setEventlogFormatter(name, debugAsInfo)
case "stdout": case "stdout":
origLogger.Out = os.Stdout origLogger.Out = os.Stdout
case "stderr": case "stderr":
origLogger.Out = os.Stderr origLogger.Out = os.Stderr
default: default:
return fmt.Errorf("unsupported logger %s", u.Opaque) return fmt.Errorf("unsupported logger %q", u.Opaque)
} }
return nil return nil
} }
func init() { func init() {
// In order for these flags to take effect, the user of the package must call AddFlags(flag.CommandLine)
// flag.Parse() before logging anything.
flag.Var(levelFlag{}, "log.level", "Only log messages with the given severity or above. Valid levels: [debug, info, warn, error, fatal].")
flag.Var(logFormatFlag{}, "log.format", "If set use a syslog logger or JSON logging. Example: logger:syslog?appname=bob&local=7 or logger:stdout?json=true. Defaults to stderr.")
} }
// AddFlags adds the flags used by this package to the given FlagSet. That's
// useful if working with a custom FlagSet. The init function of this package
// adds the flags to flag.CommandLine anyway. Thus, it's usually enough to call
// flag.Parse() to make the logging flags take effect.
func AddFlags(fs *flag.FlagSet) {
fs.Var(
levelFlag(origLogger.Level.String()),
"log.level",
"Only log messages with the given severity or above. Valid levels: [debug, info, warn, error, fatal]",
)
fs.Var(
logFormatFlag(url.URL{Scheme: "logger", Opaque: "stderr"}),
"log.format",
`Set the log target and format. Example: "logger:syslog?appname=bob&local=7" or "logger:stdout?json=true"`,
)
}
// Logger is the interface for loggers used in the Prometheus components.
type Logger interface { type Logger interface {
Debug(...interface{}) Debug(...interface{})
Debugln(...interface{}) Debugln(...interface{})
@ -220,10 +252,26 @@ func (l logger) sourced() *logrus.Entry {
var origLogger = logrus.New() var origLogger = logrus.New()
var baseLogger = logger{entry: logrus.NewEntry(origLogger)} var baseLogger = logger{entry: logrus.NewEntry(origLogger)}
// Base returns the default Logger logging to
func Base() Logger { func Base() Logger {
return baseLogger return baseLogger
} }
// NewLogger returns a new Logger logging to out.
func NewLogger(w io.Writer) Logger {
l := logrus.New()
l.Out = w
return logger{entry: logrus.NewEntry(l)}
}
// NewNopLogger returns a logger that discards all log messages.
func NewNopLogger() Logger {
l := logrus.New()
l.Out = ioutil.Discard
return logger{entry: logrus.NewEntry(l)}
}
// With adds a field to the logger.
func With(key string, value interface{}) Logger { func With(key string, value interface{}) Logger {
return baseLogger.With(key, value) return baseLogger.With(key, value)
} }
@ -233,7 +281,7 @@ func Debug(args ...interface{}) {
baseLogger.sourced().Debug(args...) baseLogger.sourced().Debug(args...)
} }
// Debug logs a message at level Debug on the standard logger. // Debugln logs a message at level Debug on the standard logger.
func Debugln(args ...interface{}) { func Debugln(args ...interface{}) {
baseLogger.sourced().Debugln(args...) baseLogger.sourced().Debugln(args...)
} }
@ -248,7 +296,7 @@ func Info(args ...interface{}) {
baseLogger.sourced().Info(args...) baseLogger.sourced().Info(args...)
} }
// Info logs a message at level Info on the standard logger. // Infoln logs a message at level Info on the standard logger.
func Infoln(args ...interface{}) { func Infoln(args ...interface{}) {
baseLogger.sourced().Infoln(args...) baseLogger.sourced().Infoln(args...)
} }
@ -263,7 +311,7 @@ func Warn(args ...interface{}) {
baseLogger.sourced().Warn(args...) baseLogger.sourced().Warn(args...)
} }
// Warn logs a message at level Warn on the standard logger. // Warnln logs a message at level Warn on the standard logger.
func Warnln(args ...interface{}) { func Warnln(args ...interface{}) {
baseLogger.sourced().Warnln(args...) baseLogger.sourced().Warnln(args...)
} }
@ -278,7 +326,7 @@ func Error(args ...interface{}) {
baseLogger.sourced().Error(args...) baseLogger.sourced().Error(args...)
} }
// Error logs a message at level Error on the standard logger. // Errorln logs a message at level Error on the standard logger.
func Errorln(args ...interface{}) { func Errorln(args ...interface{}) {
baseLogger.sourced().Errorln(args...) baseLogger.sourced().Errorln(args...)
} }
@ -293,7 +341,7 @@ func Fatal(args ...interface{}) {
baseLogger.sourced().Fatal(args...) baseLogger.sourced().Fatal(args...)
} }
// Fatal logs a message at level Fatal on the standard logger. // Fatalln logs a message at level Fatal on the standard logger.
func Fatalln(args ...interface{}) { func Fatalln(args ...interface{}) {
baseLogger.sourced().Fatalln(args...) baseLogger.sourced().Fatalln(args...)
} }
@ -302,3 +350,16 @@ func Fatalln(args ...interface{}) {
func Fatalf(format string, args ...interface{}) { func Fatalf(format string, args ...interface{}) {
baseLogger.sourced().Fatalf(format, args...) baseLogger.sourced().Fatalf(format, args...)
} }
type errorLogWriter struct{}
func (errorLogWriter) Write(b []byte) (int, error) {
baseLogger.sourced().Error(string(b))
return len(b), nil
}
// NewErrorLogger returns a log.Logger that is meant to be used
// in the ErrorLog field of an http.Server to log HTTP server errors.
func NewErrorLogger() *log.Logger {
return log.New(&errorLogWriter{}, "", 0)
}

View file

@ -80,14 +80,18 @@ const (
QuantileLabel = "quantile" QuantileLabel = "quantile"
) )
// LabelNameRE is a regular expression matching valid label names. // LabelNameRE is a regular expression matching valid label names. Note that the
// IsValid method of LabelName performs the same check but faster than a match
// with this regular expression.
var LabelNameRE = regexp.MustCompile("^[a-zA-Z_][a-zA-Z0-9_]*$") var LabelNameRE = regexp.MustCompile("^[a-zA-Z_][a-zA-Z0-9_]*$")
// A LabelName is a key for a LabelSet or Metric. It has a value associated // A LabelName is a key for a LabelSet or Metric. It has a value associated
// therewith. // therewith.
type LabelName string type LabelName string
// IsValid is true iff the label name matches the pattern of LabelNameRE. // IsValid is true iff the label name matches the pattern of LabelNameRE. This
// method, however, does not use LabelNameRE for the check but a much faster
// hardcoded implementation.
func (ln LabelName) IsValid() bool { func (ln LabelName) IsValid() bool {
if len(ln) == 0 { if len(ln) == 0 {
return false return false
@ -106,7 +110,7 @@ func (ln *LabelName) UnmarshalYAML(unmarshal func(interface{}) error) error {
if err := unmarshal(&s); err != nil { if err := unmarshal(&s); err != nil {
return err return err
} }
if !LabelNameRE.MatchString(s) { if !LabelName(s).IsValid() {
return fmt.Errorf("%q is not a valid label name", s) return fmt.Errorf("%q is not a valid label name", s)
} }
*ln = LabelName(s) *ln = LabelName(s)
@ -119,7 +123,7 @@ func (ln *LabelName) UnmarshalJSON(b []byte) error {
if err := json.Unmarshal(b, &s); err != nil { if err := json.Unmarshal(b, &s); err != nil {
return err return err
} }
if !LabelNameRE.MatchString(s) { if !LabelName(s).IsValid() {
return fmt.Errorf("%q is not a valid label name", s) return fmt.Errorf("%q is not a valid label name", s)
} }
*ln = LabelName(s) *ln = LabelName(s)

View file

@ -160,7 +160,7 @@ func (l *LabelSet) UnmarshalJSON(b []byte) error {
// LabelName as a string and does not call its UnmarshalJSON method. // LabelName as a string and does not call its UnmarshalJSON method.
// Thus, we have to replicate the behavior here. // Thus, we have to replicate the behavior here.
for ln := range m { for ln := range m {
if !LabelNameRE.MatchString(string(ln)) { if !ln.IsValid() {
return fmt.Errorf("%q is not a valid label name", ln) return fmt.Errorf("%q is not a valid label name", ln)
} }
} }

View file

@ -21,8 +21,11 @@ import (
) )
var ( var (
separator = []byte{0} separator = []byte{0}
MetricNameRE = regexp.MustCompile(`^[a-zA-Z_][a-zA-Z0-9_:]*$`) // MetricNameRE is a regular expression matching valid metric
// names. Note that the IsValidMetricName function performs the same
// check but faster than a match with this regular expression.
MetricNameRE = regexp.MustCompile(`^[a-zA-Z_:][a-zA-Z0-9_:]*$`)
) )
// A Metric is similar to a LabelSet, but the key difference is that a Metric is // A Metric is similar to a LabelSet, but the key difference is that a Metric is
@ -85,6 +88,8 @@ func (m Metric) FastFingerprint() Fingerprint {
} }
// IsValidMetricName returns true iff name matches the pattern of MetricNameRE. // IsValidMetricName returns true iff name matches the pattern of MetricNameRE.
// This function, however, does not use MetricNameRE for the check but a much
// faster hardcoded implementation.
func IsValidMetricName(n LabelValue) bool { func IsValidMetricName(n LabelValue) bool {
if len(n) == 0 { if len(n) == 0 {
return false return false

View file

@ -12,5 +12,5 @@
// limitations under the License. // limitations under the License.
// Package model contains common data structures that are shared across // Package model contains common data structures that are shared across
// Prometheus componenets and libraries. // Prometheus components and libraries.
package model package model

View file

@ -98,7 +98,7 @@ func labelSetToFastFingerprint(ls LabelSet) Fingerprint {
// specified LabelNames into the signature calculation. The labels passed in // specified LabelNames into the signature calculation. The labels passed in
// will be sorted by this function. // will be sorted by this function.
func SignatureForLabels(m Metric, labels ...LabelName) uint64 { func SignatureForLabels(m Metric, labels ...LabelName) uint64 {
if len(m) == 0 || len(labels) == 0 { if len(labels) == 0 {
return emptyLabelSignature return emptyLabelSignature
} }

View file

@ -163,51 +163,70 @@ func (t *Time) UnmarshalJSON(b []byte) error {
// This type should not propagate beyond the scope of input/output processing. // This type should not propagate beyond the scope of input/output processing.
type Duration time.Duration type Duration time.Duration
var durationRE = regexp.MustCompile("^([0-9]+)(y|w|d|h|m|s|ms)$")
// StringToDuration parses a string into a time.Duration, assuming that a year // StringToDuration parses a string into a time.Duration, assuming that a year
// a day always has 24h. // always has 365d, a week always has 7d, and a day always has 24h.
func ParseDuration(durationStr string) (Duration, error) { func ParseDuration(durationStr string) (Duration, error) {
matches := durationRE.FindStringSubmatch(durationStr) matches := durationRE.FindStringSubmatch(durationStr)
if len(matches) != 3 { if len(matches) != 3 {
return 0, fmt.Errorf("not a valid duration string: %q", durationStr) return 0, fmt.Errorf("not a valid duration string: %q", durationStr)
} }
durSeconds, _ := strconv.Atoi(matches[1]) var (
dur := time.Duration(durSeconds) * time.Second n, _ = strconv.Atoi(matches[1])
unit := matches[2] dur = time.Duration(n) * time.Millisecond
switch unit { )
switch unit := matches[2]; unit {
case "y":
dur *= 1000 * 60 * 60 * 24 * 365
case "w":
dur *= 1000 * 60 * 60 * 24 * 7
case "d": case "d":
dur *= 60 * 60 * 24 dur *= 1000 * 60 * 60 * 24
case "h": case "h":
dur *= 60 * 60 dur *= 1000 * 60 * 60
case "m": case "m":
dur *= 60 dur *= 1000 * 60
case "s": case "s":
dur *= 1 dur *= 1000
case "ms":
// Value already correct
default: default:
return 0, fmt.Errorf("invalid time unit in duration string: %q", unit) return 0, fmt.Errorf("invalid time unit in duration string: %q", unit)
} }
return Duration(dur), nil return Duration(dur), nil
} }
var durationRE = regexp.MustCompile("^([0-9]+)([ywdhms]+)$")
func (d Duration) String() string { func (d Duration) String() string {
seconds := int64(time.Duration(d) / time.Second) var (
ms = int64(time.Duration(d) / time.Millisecond)
unit = "ms"
)
factors := map[string]int64{ factors := map[string]int64{
"d": 60 * 60 * 24, "y": 1000 * 60 * 60 * 24 * 365,
"h": 60 * 60, "w": 1000 * 60 * 60 * 24 * 7,
"m": 60, "d": 1000 * 60 * 60 * 24,
"s": 1, "h": 1000 * 60 * 60,
"m": 1000 * 60,
"s": 1000,
"ms": 1,
} }
unit := "s"
switch int64(0) { switch int64(0) {
case seconds % factors["d"]: case ms % factors["y"]:
unit = "y"
case ms % factors["w"]:
unit = "w"
case ms % factors["d"]:
unit = "d" unit = "d"
case seconds % factors["h"]: case ms % factors["h"]:
unit = "h" unit = "h"
case seconds % factors["m"]: case ms % factors["m"]:
unit = "m" unit = "m"
case ms % factors["s"]:
unit = "s"
} }
return fmt.Sprintf("%v%v", seconds/factors[unit], unit) return fmt.Sprintf("%v%v", ms/factors[unit], unit)
} }
// MarshalYAML implements the yaml.Marshaler interface. // MarshalYAML implements the yaml.Marshaler interface.

View file

@ -16,11 +16,28 @@ package model
import ( import (
"encoding/json" "encoding/json"
"fmt" "fmt"
"math"
"sort" "sort"
"strconv" "strconv"
"strings" "strings"
) )
var (
// ZeroSamplePair is the pseudo zero-value of SamplePair used to signal a
// non-existing sample pair. It is a SamplePair with timestamp Earliest and
// value 0.0. Note that the natural zero value of SamplePair has a timestamp
// of 0, which is possible to appear in a real SamplePair and thus not
// suitable to signal a non-existing SamplePair.
ZeroSamplePair = SamplePair{Timestamp: Earliest}
// ZeroSample is the pseudo zero-value of Sample used to signal a
// non-existing sample. It is a Sample with timestamp Earliest, value 0.0,
// and metric nil. Note that the natural zero value of Sample has a timestamp
// of 0, which is possible to appear in a real Sample and thus not suitable
// to signal a non-existing Sample.
ZeroSample = Sample{Timestamp: Earliest}
)
// A SampleValue is a representation of a value for a given sample at a given // A SampleValue is a representation of a value for a given sample at a given
// time. // time.
type SampleValue float64 type SampleValue float64
@ -43,8 +60,14 @@ func (v *SampleValue) UnmarshalJSON(b []byte) error {
return nil return nil
} }
// Equal returns true if the value of v and o is equal or if both are NaN. Note
// that v==o is false if both are NaN. If you want the conventional float
// behavior, use == to compare two SampleValues.
func (v SampleValue) Equal(o SampleValue) bool { func (v SampleValue) Equal(o SampleValue) bool {
return v == o if v == o {
return true
}
return math.IsNaN(float64(v)) && math.IsNaN(float64(o))
} }
func (v SampleValue) String() string { func (v SampleValue) String() string {
@ -77,9 +100,9 @@ func (s *SamplePair) UnmarshalJSON(b []byte) error {
} }
// Equal returns true if this SamplePair and o have equal Values and equal // Equal returns true if this SamplePair and o have equal Values and equal
// Timestamps. // Timestamps. The sematics of Value equality is defined by SampleValue.Equal.
func (s *SamplePair) Equal(o *SamplePair) bool { func (s *SamplePair) Equal(o *SamplePair) bool {
return s == o || (s.Value == o.Value && s.Timestamp.Equal(o.Timestamp)) return s == o || (s.Value.Equal(o.Value) && s.Timestamp.Equal(o.Timestamp))
} }
func (s SamplePair) String() string { func (s SamplePair) String() string {
@ -93,7 +116,8 @@ type Sample struct {
Timestamp Time `json:"timestamp"` Timestamp Time `json:"timestamp"`
} }
// Equal compares first the metrics, then the timestamp, then the value. // Equal compares first the metrics, then the timestamp, then the value. The
// sematics of value equality is defined by SampleValue.Equal.
func (s *Sample) Equal(o *Sample) bool { func (s *Sample) Equal(o *Sample) bool {
if s == o { if s == o {
return true return true
@ -105,7 +129,7 @@ func (s *Sample) Equal(o *Sample) bool {
if !s.Timestamp.Equal(o.Timestamp) { if !s.Timestamp.Equal(o.Timestamp) {
return false return false
} }
if s.Value != o.Value { if s.Value.Equal(o.Value) {
return false return false
} }

22
vendor/golang.org/x/sys/PATENTS generated vendored Normal file
View file

@ -0,0 +1,22 @@
Additional IP Rights Grant (Patents)
"This implementation" means the copyrightable works distributed by
Google as part of the Go project.
Google hereby grants to You a perpetual, worldwide, non-exclusive,
no-charge, royalty-free, irrevocable (except as stated in this section)
patent license to make, have made, use, offer to sell, sell, import,
transfer and otherwise run, modify and propagate the contents of this
implementation of Go, where such license applies only to those patent
claims, both currently owned or controlled by Google and acquired in
the future, licensable by Google that are necessarily infringed by this
implementation of Go. This grant does not include claims that would be
infringed only as a consequence of further modification of this
implementation. If you or your agent or exclusive licensee institute or
order or agree to the institution of patent litigation against any
entity (including a cross-claim or counterclaim in a lawsuit) alleging
that this implementation of Go or any code incorporated within this
implementation of Go constitutes direct or contributory patent
infringement, or inducement of patent infringement, then any patent
rights granted to you under this License for this implementation of Go
shall terminate as of the date such litigation is filed.

View file

@ -1,29 +1,28 @@
// Copyright 2009 The Go Authors. All rights reserved. // Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.
// +build linux
// +build mips64 mips64le
// +build !gccgo // +build !gccgo
#include "textflag.h" #include "textflag.h"
// //
// System call support for 386, FreeBSD // System calls for mips64, Linux
// //
// Just jump to package syscall's implementation for all these functions. // Just jump to package syscall's implementation for all these functions.
// The runtime may know about them. // The runtime may know about them.
TEXT ·Syscall(SB),NOSPLIT,$0-32 TEXT ·Syscall(SB),NOSPLIT,$0-56
JMP syscall·Syscall(SB) JMP syscall·Syscall(SB)
TEXT ·Syscall6(SB),NOSPLIT,$0-44 TEXT ·Syscall6(SB),NOSPLIT,$0-80
JMP syscall·Syscall6(SB) JMP syscall·Syscall6(SB)
TEXT ·Syscall9(SB),NOSPLIT,$0-56 TEXT ·RawSyscall(SB),NOSPLIT,$0-56
JMP syscall·Syscall9(SB)
TEXT ·RawSyscall(SB),NOSPLIT,$0-32
JMP syscall·RawSyscall(SB) JMP syscall·RawSyscall(SB)
TEXT ·RawSyscall6(SB),NOSPLIT,$0-44 TEXT ·RawSyscall6(SB),NOSPLIT,$0-80
JMP syscall·RawSyscall6(SB) JMP syscall·RawSyscall6(SB)

31
vendor/golang.org/x/sys/unix/asm_linux_mipsx.s generated vendored Normal file
View file

@ -0,0 +1,31 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build linux
// +build mips mipsle
// +build !gccgo
#include "textflag.h"
//
// System calls for mips, Linux
//
// Just jump to package syscall's implementation for all these functions.
// The runtime may know about them.
TEXT ·Syscall(SB),NOSPLIT,$0-28
JMP syscall·Syscall(SB)
TEXT ·Syscall6(SB),NOSPLIT,$0-40
JMP syscall·Syscall6(SB)
TEXT ·Syscall9(SB),NOSPLIT,$0-52
JMP syscall·Syscall9(SB)
TEXT ·RawSyscall(SB),NOSPLIT,$0-28
JMP syscall·RawSyscall(SB)
TEXT ·RawSyscall6(SB),NOSPLIT,$0-40
JMP syscall·RawSyscall6(SB)

28
vendor/golang.org/x/sys/unix/asm_linux_s390x.s generated vendored Normal file
View file

@ -0,0 +1,28 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build s390x
// +build linux
// +build !gccgo
#include "textflag.h"
//
// System calls for s390x, Linux
//
// Just jump to package syscall's implementation for all these functions.
// The runtime may know about them.
TEXT ·Syscall(SB),NOSPLIT,$0-56
BR syscall·Syscall(SB)
TEXT ·Syscall6(SB),NOSPLIT,$0-80
BR syscall·Syscall6(SB)
TEXT ·RawSyscall(SB),NOSPLIT,$0-56
BR syscall·RawSyscall(SB)
TEXT ·RawSyscall6(SB),NOSPLIT,$0-80
BR syscall·RawSyscall6(SB)

35
vendor/golang.org/x/sys/unix/bluetooth_linux.go generated vendored Normal file
View file

@ -0,0 +1,35 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Bluetooth sockets and messages
package unix
// Bluetooth Protocols
const (
BTPROTO_L2CAP = 0
BTPROTO_HCI = 1
BTPROTO_SCO = 2
BTPROTO_RFCOMM = 3
BTPROTO_BNEP = 4
BTPROTO_CMTP = 5
BTPROTO_HIDP = 6
BTPROTO_AVDTP = 7
)
const (
HCI_CHANNEL_RAW = 0
HCI_CHANNEL_USER = 1
HCI_CHANNEL_MONITOR = 2
HCI_CHANNEL_CONTROL = 3
)
// Socketoption Level
const (
SOL_BLUETOOTH = 0x112
SOL_HCI = 0x0
SOL_L2CAP = 0x6
SOL_RFCOMM = 0x12
SOL_SCO = 0x11
)

View file

@ -1,4 +1,4 @@
// +build linux,386 linux,arm // +build linux,386 linux,arm linux,mips linux,mipsle
// Copyright 2014 The Go Authors. All rights reserved. // Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style

20
vendor/golang.org/x/sys/unix/gccgo_linux_sparc64.go generated vendored Normal file
View file

@ -0,0 +1,20 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build gccgo,linux,sparc64
package unix
import "syscall"
//extern sysconf
func realSysconf(name int) int64
func sysconf(name int) (n int64, err syscall.Errno) {
r := realSysconf(name)
if r < 0 {
return 0, syscall.GetErrno()
}
return r, 0
}

View file

@ -161,7 +161,7 @@ freebsd_arm)
mkerrors="$mkerrors" mkerrors="$mkerrors"
mksyscall="./mksyscall.pl -l32 -arm" mksyscall="./mksyscall.pl -l32 -arm"
mksysnum="curl -s 'http://svn.freebsd.org/base/stable/10/sys/kern/syscalls.master' | ./mksysnum_freebsd.pl" mksysnum="curl -s 'http://svn.freebsd.org/base/stable/10/sys/kern/syscalls.master' | ./mksysnum_freebsd.pl"
# Let the type of C char be singed for making the bare syscall # Let the type of C char be signed for making the bare syscall
# API consistent across over platforms. # API consistent across over platforms.
mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char" mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char"
;; ;;
@ -194,7 +194,7 @@ linux_arm64)
exit 1 exit 1
fi fi
mksysnum="./mksysnum_linux.pl $unistd_h" mksysnum="./mksysnum_linux.pl $unistd_h"
# Let the type of C char be singed for making the bare syscall # Let the type of C char be signed for making the bare syscall
# API consistent across over platforms. # API consistent across over platforms.
mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char" mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char"
;; ;;
@ -212,6 +212,24 @@ linux_ppc64le)
mksysnum="./mksysnum_linux.pl $unistd_h" mksysnum="./mksysnum_linux.pl $unistd_h"
mktypes="GOARCH=$GOARCH go tool cgo -godefs" mktypes="GOARCH=$GOARCH go tool cgo -godefs"
;; ;;
linux_s390x)
GOOSARCH_in=syscall_linux_s390x.go
unistd_h=/usr/include/asm/unistd.h
mkerrors="$mkerrors -m64"
mksysnum="./mksysnum_linux.pl $unistd_h"
# Let the type of C char be signed to make the bare sys
# API more consistent between platforms.
# This is a deliberate departure from the way the syscall
# package generates its version of the types file.
mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char"
;;
linux_sparc64)
GOOSARCH_in=syscall_linux_sparc64.go
unistd_h=/usr/include/sparc64-linux-gnu/asm/unistd.h
mkerrors="$mkerrors -m64"
mksysnum="./mksysnum_linux.pl $unistd_h"
mktypes="GOARCH=$GOARCH go tool cgo -godefs"
;;
netbsd_386) netbsd_386)
mkerrors="$mkerrors -m32" mkerrors="$mkerrors -m32"
mksyscall="./mksyscall.pl -l32 -netbsd" mksyscall="./mksyscall.pl -l32 -netbsd"
@ -269,6 +287,6 @@ esac
if [ -n "$mksysnum" ]; then echo "$mksysnum |gofmt >zsysnum_$GOOSARCH.go"; fi if [ -n "$mksysnum" ]; then echo "$mksysnum |gofmt >zsysnum_$GOOSARCH.go"; fi
if [ -n "$mktypes" ]; then if [ -n "$mktypes" ]; then
echo "echo // +build $GOARCH,$GOOS > ztypes_$GOOSARCH.go"; echo "echo // +build $GOARCH,$GOOS > ztypes_$GOOSARCH.go";
echo "$mktypes types_$GOOS.go | gofmt >>ztypes_$GOOSARCH.go"; echo "$mktypes types_$GOOS.go | go run mkpost.go >>ztypes_$GOOSARCH.go";
fi fi
) | $run ) | $run

View file

@ -127,6 +127,8 @@ includes_Linux='
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/wait.h> #include <linux/wait.h>
#include <linux/icmpv6.h> #include <linux/icmpv6.h>
#include <linux/serial.h>
#include <linux/can.h>
#include <net/route.h> #include <net/route.h>
#include <asm/termbits.h> #include <asm/termbits.h>
@ -141,6 +143,12 @@ includes_Linux='
#ifndef PTRACE_SETREGS #ifndef PTRACE_SETREGS
#define PTRACE_SETREGS 0xd #define PTRACE_SETREGS 0xd
#endif #endif
#ifdef SOL_BLUETOOTH
// SPARC includes this in /usr/include/sparc64-linux-gnu/bits/socket.h
// but it is already in bluetooth_linux.go
#undef SOL_BLUETOOTH
#endif
' '
includes_NetBSD=' includes_NetBSD='
@ -332,6 +340,7 @@ ccflags="$@"
$2 !~ /^(BPF_TIMEVAL)$/ && $2 !~ /^(BPF_TIMEVAL)$/ &&
$2 ~ /^(BPF|DLT)_/ || $2 ~ /^(BPF|DLT)_/ ||
$2 ~ /^CLOCK_/ || $2 ~ /^CLOCK_/ ||
$2 ~ /^CAN_/ ||
$2 !~ "WMESGLEN" && $2 !~ "WMESGLEN" &&
$2 ~ /^W[A-Z0-9]+$/ {printf("\t%s = C.%s\n", $2, $2)} $2 ~ /^W[A-Z0-9]+$/ {printf("\t%s = C.%s\n", $2, $2)}
$2 ~ /^__WCOREFLAG$/ {next} $2 ~ /^__WCOREFLAG$/ {next}

62
vendor/golang.org/x/sys/unix/mkpost.go generated vendored Normal file
View file

@ -0,0 +1,62 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build ignore
// mkpost processes the output of cgo -godefs to
// modify the generated types. It is used to clean up
// the sys API in an architecture specific manner.
//
// mkpost is run after cgo -godefs by mkall.sh.
package main
import (
"fmt"
"go/format"
"io/ioutil"
"log"
"os"
"regexp"
)
func main() {
b, err := ioutil.ReadAll(os.Stdin)
if err != nil {
log.Fatal(err)
}
s := string(b)
goarch := os.Getenv("GOARCH")
goos := os.Getenv("GOOS")
if goarch == "s390x" && goos == "linux" {
// Export the types of PtraceRegs fields.
re := regexp.MustCompile("ptrace(Psw|Fpregs|Per)")
s = re.ReplaceAllString(s, "Ptrace$1")
// Replace padding fields inserted by cgo with blank identifiers.
re = regexp.MustCompile("Pad_cgo[A-Za-z0-9_]*")
s = re.ReplaceAllString(s, "_")
// Replace other unwanted fields with blank identifiers.
re = regexp.MustCompile("X_[A-Za-z0-9_]*")
s = re.ReplaceAllString(s, "_")
// Replace the control_regs union with a blank identifier for now.
re = regexp.MustCompile("(Control_regs)\\s+\\[0\\]uint64")
s = re.ReplaceAllString(s, "_ [0]uint64")
}
// gofmt
b, err = format.Source([]byte(s))
if err != nil {
log.Fatal(err)
}
// Append this command to the header to show where the new file
// came from.
re := regexp.MustCompile("(cgo -godefs [a-zA-Z0-9_]+\\.go.*)")
b = re.ReplaceAll(b, []byte("$1 | go run mkpost.go"))
fmt.Printf("%s", b)
}

View file

@ -23,6 +23,8 @@ package unix
const( const(
EOF EOF
my $offset = 0;
sub fmt { sub fmt {
my ($name, $num) = @_; my ($name, $num) = @_;
if($num > 999){ if($num > 999){
@ -31,13 +33,18 @@ sub fmt {
return; return;
} }
$name =~ y/a-z/A-Z/; $name =~ y/a-z/A-Z/;
$num = $num + $offset;
print " SYS_$name = $num;\n"; print " SYS_$name = $num;\n";
} }
my $prev; my $prev;
open(GCC, "gcc -E -dD $ARGV[0] |") || die "can't run gcc"; open(GCC, "gcc -E -dD $ARGV[0] |") || die "can't run gcc";
while(<GCC>){ while(<GCC>){
if(/^#define __NR_syscalls\s+/) { if(/^#define __NR_Linux\s+([0-9]+)/){
# mips/mips64: extract offset
$offset = $1;
}
elsif(/^#define __NR_syscalls\s+/) {
# ignore redefinitions of __NR_syscalls # ignore redefinitions of __NR_syscalls
} }
elsif(/^#define __NR_(\w+)\s+([0-9]+)/){ elsif(/^#define __NR_(\w+)\s+([0-9]+)/){
@ -51,6 +58,9 @@ while(<GCC>){
elsif(/^#define __NR_(\w+)\s+\(\w+\+\s*([0-9]+)\)/){ elsif(/^#define __NR_(\w+)\s+\(\w+\+\s*([0-9]+)\)/){
fmt($1, $prev+$2) fmt($1, $prev+$2)
} }
elsif(/^#define __NR_(\w+)\s+\(__NR_Linux \+ ([0-9]+)/){
fmt($1, $2);
}
} }
print <<EOF; print <<EOF;

View file

@ -62,7 +62,7 @@ func ParseSocketControlMessage(b []byte) ([]SocketControlMessage, error) {
func socketControlMessageHeaderAndData(b []byte) (*Cmsghdr, []byte, error) { func socketControlMessageHeaderAndData(b []byte) (*Cmsghdr, []byte, error) {
h := (*Cmsghdr)(unsafe.Pointer(&b[0])) h := (*Cmsghdr)(unsafe.Pointer(&b[0]))
if h.Len < SizeofCmsghdr || int(h.Len) > len(b) { if h.Len < SizeofCmsghdr || uint64(h.Len) > uint64(len(b)) {
return nil, nil, EINVAL return nil, nil, EINVAL
} }
return h, b[cmsgAlignOf(SizeofCmsghdr):h.Len], nil return h, b[cmsgAlignOf(SizeofCmsghdr):h.Len], nil

View file

@ -68,6 +68,8 @@ func (tv *Timeval) Nano() int64 {
return int64(tv.Sec)*1e9 + int64(tv.Usec)*1000 return int64(tv.Sec)*1e9 + int64(tv.Usec)*1000
} }
func TimevalToNsec(tv Timeval) int64 { return int64(tv.Sec)*1e9 + int64(tv.Usec)*1e3 }
// use is a no-op, but the compiler cannot see that it is. // use is a no-op, but the compiler cannot see that it is.
// Calling use(p) ensures that p is kept live until that point. // Calling use(p) ensures that p is kept live until that point.
//go:noescape //go:noescape

View file

@ -470,25 +470,11 @@ func Sysctl(name string) (string, error) {
} }
func SysctlArgs(name string, args ...int) (string, error) { func SysctlArgs(name string, args ...int) (string, error) {
mib, err := sysctlmib(name, args...) buf, err := SysctlRaw(name, args...)
if err != nil { if err != nil {
return "", err return "", err
} }
n := len(buf)
// Find size.
n := uintptr(0)
if err := sysctl(mib, nil, &n, nil, 0); err != nil {
return "", err
}
if n == 0 {
return "", nil
}
// Read into buffer of that size.
buf := make([]byte, n)
if err := sysctl(mib, &buf[0], &n, nil, 0); err != nil {
return "", err
}
// Throw away terminating NUL. // Throw away terminating NUL.
if n > 0 && buf[n-1] == '\x00' { if n > 0 && buf[n-1] == '\x00' {

Some files were not shown because too many files have changed in this diff Show more