Merge pull request #711 from prometheus/new-logging

Switch Prometheus to use github.com/prometheus/log.
This commit is contained in:
Fabian Reinartz 2015-05-20 22:01:07 +02:00
commit 7227b02413
75 changed files with 3903 additions and 2159 deletions

13
Godeps/Godeps.json generated
View file

@ -8,12 +8,13 @@
"Rev": "75cd24fc2f2c2a2088577d12123ddee5f54e0675"
},
{
"ImportPath": "github.com/beorn7/perks/quantile",
"Rev": "b965b613227fddccbfffe13eae360ed3fa822f8d"
"ImportPath": "github.com/Sirupsen/logrus",
"Comment": "v0.7.3-8-g52919f1",
"Rev": "52919f182f9c314f8a38c5afe96506f73d02b4b2"
},
{
"ImportPath": "github.com/golang/glog",
"Rev": "44145f04b68cf362d9c4df2182967c2275eaefed"
"ImportPath": "github.com/beorn7/perks/quantile",
"Rev": "b965b613227fddccbfffe13eae360ed3fa822f8d"
},
{
"ImportPath": "github.com/golang/protobuf/proto",
@ -56,6 +57,10 @@
"Comment": "model-0.0.2-12-gfa8ad6f",
"Rev": "fa8ad6fec33561be4280a8f0514318c79d7f6cb6"
},
{
"ImportPath": "github.com/prometheus/log",
"Rev": "c1344118e003a86aefb326a436118ad1317266dd"
},
{
"ImportPath": "github.com/prometheus/procfs",
"Rev": "490cc6eb5fa45bf8a8b7b73c8bc82a8160e8531d"

View file

@ -0,0 +1 @@
logrus

View file

@ -0,0 +1,8 @@
language: go
go:
- 1.2
- 1.3
- 1.4
- tip
install:
- go get -t ./...

View file

@ -0,0 +1,11 @@
# 0.8
logrus: defaults to stderr instead of stdout
# 0.7.3
formatter/\*: allow configuration of timestamp layout
# 0.7.2
formatter/text: Add configuration option for time format (#158)

View file

@ -0,0 +1,21 @@
The MIT License (MIT)
Copyright (c) 2014 Simon Eskildsen
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.

View file

@ -0,0 +1,349 @@
# Logrus <img src="http://i.imgur.com/hTeVwmJ.png" width="40" height="40" alt=":walrus:" class="emoji" title=":walrus:"/>&nbsp;[![Build Status](https://travis-ci.org/Sirupsen/logrus.svg?branch=master)](https://travis-ci.org/Sirupsen/logrus)&nbsp;[![godoc reference](https://godoc.org/github.com/Sirupsen/logrus?status.png)][godoc]
Logrus is a structured logger for Go (golang), completely API compatible with
the standard library logger. [Godoc][godoc]. **Please note the Logrus API is not
yet stable (pre 1.0). Logrus itself is completely stable and has been used in
many large deployments. The core API is unlikely to change much but please
version control your Logrus to make sure you aren't fetching latest `master` on
every build.**
Nicely color-coded in development (when a TTY is attached, otherwise just
plain text):
![Colored](http://i.imgur.com/PY7qMwd.png)
With `log.Formatter = new(logrus.JSONFormatter)`, for easy parsing by logstash
or Splunk:
```json
{"animal":"walrus","level":"info","msg":"A group of walrus emerges from the
ocean","size":10,"time":"2014-03-10 19:57:38.562264131 -0400 EDT"}
{"level":"warning","msg":"The group's number increased tremendously!",
"number":122,"omg":true,"time":"2014-03-10 19:57:38.562471297 -0400 EDT"}
{"animal":"walrus","level":"info","msg":"A giant walrus appears!",
"size":10,"time":"2014-03-10 19:57:38.562500591 -0400 EDT"}
{"animal":"walrus","level":"info","msg":"Tremendously sized cow enters the ocean.",
"size":9,"time":"2014-03-10 19:57:38.562527896 -0400 EDT"}
{"level":"fatal","msg":"The ice breaks!","number":100,"omg":true,
"time":"2014-03-10 19:57:38.562543128 -0400 EDT"}
```
With the default `log.Formatter = new(&log.TextFormatter{})` when a TTY is not
attached, the output is compatible with the
[logfmt](http://godoc.org/github.com/kr/logfmt) format:
```text
time="2015-03-26T01:27:38-04:00" level=debug msg="Started observing beach" animal=walrus number=8
time="2015-03-26T01:27:38-04:00" level=info msg="A group of walrus emerges from the ocean" animal=walrus size=10
time="2015-03-26T01:27:38-04:00" level=warning msg="The group's number increased tremendously!" number=122 omg=true
time="2015-03-26T01:27:38-04:00" level=debug msg="Temperature changes" temperature=-4
time="2015-03-26T01:27:38-04:00" level=panic msg="It's over 9000!" animal=orca size=9009
time="2015-03-26T01:27:38-04:00" level=fatal msg="The ice breaks!" err=&{0x2082280c0 map[animal:orca size:9009] 2015-03-26 01:27:38.441574009 -0400 EDT panic It's over 9000!} number=100 omg=true
exit status 1
```
#### Example
The simplest way to use Logrus is simply the package-level exported logger:
```go
package main
import (
log "github.com/Sirupsen/logrus"
)
func main() {
log.WithFields(log.Fields{
"animal": "walrus",
}).Info("A walrus appears")
}
```
Note that it's completely api-compatible with the stdlib logger, so you can
replace your `log` imports everywhere with `log "github.com/Sirupsen/logrus"`
and you'll now have the flexibility of Logrus. You can customize it all you
want:
```go
package main
import (
"os"
log "github.com/Sirupsen/logrus"
"github.com/Sirupsen/logrus/hooks/airbrake"
)
func init() {
// Log as JSON instead of the default ASCII formatter.
log.SetFormatter(&log.JSONFormatter{})
// Use the Airbrake hook to report errors that have Error severity or above to
// an exception tracker. You can create custom hooks, see the Hooks section.
log.AddHook(airbrake.NewHook("https://example.com", "xyz", "development"))
// Output to stderr instead of stdout, could also be a file.
log.SetOutput(os.Stderr)
// Only log the warning severity or above.
log.SetLevel(log.WarnLevel)
}
func main() {
log.WithFields(log.Fields{
"animal": "walrus",
"size": 10,
}).Info("A group of walrus emerges from the ocean")
log.WithFields(log.Fields{
"omg": true,
"number": 122,
}).Warn("The group's number increased tremendously!")
log.WithFields(log.Fields{
"omg": true,
"number": 100,
}).Fatal("The ice breaks!")
// A common pattern is to re-use fields between logging statements by re-using
// the logrus.Entry returned from WithFields()
contextLogger := log.WithFields(log.Fields{
"common": "this is a common field",
"other": "I also should be logged always",
})
contextLogger.Info("I'll be logged with common and other field")
contextLogger.Info("Me too")
}
```
For more advanced usage such as logging to multiple locations from the same
application, you can also create an instance of the `logrus` Logger:
```go
package main
import (
"github.com/Sirupsen/logrus"
)
// Create a new instance of the logger. You can have any number of instances.
var log = logrus.New()
func main() {
// The API for setting attributes is a little different than the package level
// exported logger. See Godoc.
log.Out = os.Stderr
log.WithFields(logrus.Fields{
"animal": "walrus",
"size": 10,
}).Info("A group of walrus emerges from the ocean")
}
```
#### Fields
Logrus encourages careful, structured logging though logging fields instead of
long, unparseable error messages. For example, instead of: `log.Fatalf("Failed
to send event %s to topic %s with key %d")`, you should log the much more
discoverable:
```go
log.WithFields(log.Fields{
"event": event,
"topic": topic,
"key": key,
}).Fatal("Failed to send event")
```
We've found this API forces you to think about logging in a way that produces
much more useful logging messages. We've been in countless situations where just
a single added field to a log statement that was already there would've saved us
hours. The `WithFields` call is optional.
In general, with Logrus using any of the `printf`-family functions should be
seen as a hint you should add a field, however, you can still use the
`printf`-family functions with Logrus.
#### Hooks
You can add hooks for logging levels. For example to send errors to an exception
tracking service on `Error`, `Fatal` and `Panic`, info to StatsD or log to
multiple places simultaneously, e.g. syslog.
Logrus comes with [built-in hooks](hooks/). Add those, or your custom hook, in
`init`:
```go
import (
log "github.com/Sirupsen/logrus"
"github.com/Sirupsen/logrus/hooks/airbrake"
"github.com/Sirupsen/logrus/hooks/syslog"
"log/syslog"
)
func init() {
log.AddHook(airbrake.NewHook("https://example.com", "xyz", "development"))
hook, err := logrus_syslog.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "")
if err != nil {
log.Error("Unable to connect to local syslog daemon")
} else {
log.AddHook(hook)
}
}
```
| Hook | Description |
| ----- | ----------- |
| [Airbrake](https://github.com/Sirupsen/logrus/blob/master/hooks/airbrake/airbrake.go) | Send errors to an exception tracking service compatible with the Airbrake API. Uses [`airbrake-go`](https://github.com/tobi/airbrake-go) behind the scenes. |
| [Papertrail](https://github.com/Sirupsen/logrus/blob/master/hooks/papertrail/papertrail.go) | Send errors to the Papertrail hosted logging service via UDP. |
| [Syslog](https://github.com/Sirupsen/logrus/blob/master/hooks/syslog/syslog.go) | Send errors to remote syslog server. Uses standard library `log/syslog` behind the scenes. |
| [BugSnag](https://github.com/Sirupsen/logrus/blob/master/hooks/bugsnag/bugsnag.go) | Send errors to the Bugsnag exception tracking service. |
| [Hiprus](https://github.com/nubo/hiprus) | Send errors to a channel in hipchat. |
| [Logrusly](https://github.com/sebest/logrusly) | Send logs to [Loggly](https://www.loggly.com/) |
| [Slackrus](https://github.com/johntdyer/slackrus) | Hook for Slack chat. |
| [Journalhook](https://github.com/wercker/journalhook) | Hook for logging to `systemd-journald` |
| [Graylog](https://github.com/gemnasium/logrus-hooks/tree/master/graylog) | Hook for logging to [Graylog](http://graylog2.org/) |
#### Level logging
Logrus has six logging levels: Debug, Info, Warning, Error, Fatal and Panic.
```go
log.Debug("Useful debugging information.")
log.Info("Something noteworthy happened!")
log.Warn("You should probably take a look at this.")
log.Error("Something failed but I'm not quitting.")
// Calls os.Exit(1) after logging
log.Fatal("Bye.")
// Calls panic() after logging
log.Panic("I'm bailing.")
```
You can set the logging level on a `Logger`, then it will only log entries with
that severity or anything above it:
```go
// Will log anything that is info or above (warn, error, fatal, panic). Default.
log.SetLevel(log.InfoLevel)
```
It may be useful to set `log.Level = logrus.DebugLevel` in a debug or verbose
environment if your application has that.
#### Entries
Besides the fields added with `WithField` or `WithFields` some fields are
automatically added to all logging events:
1. `time`. The timestamp when the entry was created.
2. `msg`. The logging message passed to `{Info,Warn,Error,Fatal,Panic}` after
the `AddFields` call. E.g. `Failed to send event.`
3. `level`. The logging level. E.g. `info`.
#### Environments
Logrus has no notion of environment.
If you wish for hooks and formatters to only be used in specific environments,
you should handle that yourself. For example, if your application has a global
variable `Environment`, which is a string representation of the environment you
could do:
```go
import (
log "github.com/Sirupsen/logrus"
)
init() {
// do something here to set environment depending on an environment variable
// or command-line flag
if Environment == "production" {
log.SetFormatter(logrus.JSONFormatter)
} else {
// The TextFormatter is default, you don't actually have to do this.
log.SetFormatter(&log.TextFormatter{})
}
}
```
This configuration is how `logrus` was intended to be used, but JSON in
production is mostly only useful if you do log aggregation with tools like
Splunk or Logstash.
#### Formatters
The built-in logging formatters are:
* `logrus.TextFormatter`. Logs the event in colors if stdout is a tty, otherwise
without colors.
* *Note:* to force colored output when there is no TTY, set the `ForceColors`
field to `true`. To force no colored output even if there is a TTY set the
`DisableColors` field to `true`
* `logrus.JSONFormatter`. Logs fields as JSON.
* `logrus_logstash.LogstashFormatter`. Logs fields as Logstash Events (http://logstash.net).
```go
logrus.SetFormatter(&logrus_logstash.LogstashFormatter{Type: “application_name"})
```
Third party logging formatters:
* [`zalgo`](https://github.com/aybabtme/logzalgo): invoking the P͉̫o̳̼̊w̖͈̰͎e̬͔̭͂r͚̼̹̲ ̫͓͉̳͈ō̠͕͖̚f̝͍̠ ͕̲̞͖͑Z̖̫̤̫ͪa͉̬͈̗l͖͎g̳̥o̰̥̅!̣͔̲̻͊̄ ̙̘̦̹̦.
You can define your formatter by implementing the `Formatter` interface,
requiring a `Format` method. `Format` takes an `*Entry`. `entry.Data` is a
`Fields` type (`map[string]interface{}`) with all your fields as well as the
default ones (see Entries section above):
```go
type MyJSONFormatter struct {
}
log.SetFormatter(new(MyJSONFormatter))
func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) {
// Note this doesn't include Time, Level and Message which are available on
// the Entry. Consult `godoc` on information about those fields or read the
// source of the official loggers.
serialized, err := json.Marshal(entry.Data)
if err != nil {
return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err)
}
return append(serialized, '\n'), nil
}
```
#### Logger as an `io.Writer`
Logrus can be transormed into an `io.Writer`. That writer is the end of an `io.Pipe` and it is your responsibility to close it.
```go
w := logger.Writer()
defer w.Close()
srv := http.Server{
// create a stdlib log.Logger that writes to
// logrus.Logger.
ErrorLog: log.New(w, "", 0),
}
```
Each line written to that writer will be printed the usual way, using formatters
and hooks. The level for those entries is `info`.
#### Rotation
Log rotation is not provided with Logrus. Log rotation should be done by an
external program (like `logrotate(8)`) that can compress and delete old log
entries. It should not be a feature of the application-level logger.
[godoc]: https://godoc.org/github.com/Sirupsen/logrus

View file

@ -0,0 +1,252 @@
package logrus
import (
"bytes"
"fmt"
"io"
"os"
"time"
)
// An entry is the final or intermediate Logrus logging entry. It contains all
// the fields passed with WithField{,s}. It's finally logged when Debug, Info,
// Warn, Error, Fatal or Panic is called on it. These objects can be reused and
// passed around as much as you wish to avoid field duplication.
type Entry struct {
Logger *Logger
// Contains all the fields set by the user.
Data Fields
// Time at which the log entry was created
Time time.Time
// Level the log entry was logged at: Debug, Info, Warn, Error, Fatal or Panic
Level Level
// Message passed to Debug, Info, Warn, Error, Fatal or Panic
Message string
}
func NewEntry(logger *Logger) *Entry {
return &Entry{
Logger: logger,
// Default is three fields, give a little extra room
Data: make(Fields, 5),
}
}
// Returns a reader for the entry, which is a proxy to the formatter.
func (entry *Entry) Reader() (*bytes.Buffer, error) {
serialized, err := entry.Logger.Formatter.Format(entry)
return bytes.NewBuffer(serialized), err
}
// Returns the string representation from the reader and ultimately the
// formatter.
func (entry *Entry) String() (string, error) {
reader, err := entry.Reader()
if err != nil {
return "", err
}
return reader.String(), err
}
// Add a single field to the Entry.
func (entry *Entry) WithField(key string, value interface{}) *Entry {
return entry.WithFields(Fields{key: value})
}
// Add a map of fields to the Entry.
func (entry *Entry) WithFields(fields Fields) *Entry {
data := Fields{}
for k, v := range entry.Data {
data[k] = v
}
for k, v := range fields {
data[k] = v
}
return &Entry{Logger: entry.Logger, Data: data}
}
func (entry *Entry) log(level Level, msg string) {
entry.Time = time.Now()
entry.Level = level
entry.Message = msg
if err := entry.Logger.Hooks.Fire(level, entry); err != nil {
entry.Logger.mu.Lock()
fmt.Fprintf(os.Stderr, "Failed to fire hook: %v\n", err)
entry.Logger.mu.Unlock()
}
reader, err := entry.Reader()
if err != nil {
entry.Logger.mu.Lock()
fmt.Fprintf(os.Stderr, "Failed to obtain reader, %v\n", err)
entry.Logger.mu.Unlock()
}
entry.Logger.mu.Lock()
defer entry.Logger.mu.Unlock()
_, err = io.Copy(entry.Logger.Out, reader)
if err != nil {
fmt.Fprintf(os.Stderr, "Failed to write to log, %v\n", err)
}
// To avoid Entry#log() returning a value that only would make sense for
// panic() to use in Entry#Panic(), we avoid the allocation by checking
// directly here.
if level <= PanicLevel {
panic(entry)
}
}
func (entry *Entry) Debug(args ...interface{}) {
if entry.Logger.Level >= DebugLevel {
entry.log(DebugLevel, fmt.Sprint(args...))
}
}
func (entry *Entry) Print(args ...interface{}) {
entry.Info(args...)
}
func (entry *Entry) Info(args ...interface{}) {
if entry.Logger.Level >= InfoLevel {
entry.log(InfoLevel, fmt.Sprint(args...))
}
}
func (entry *Entry) Warn(args ...interface{}) {
if entry.Logger.Level >= WarnLevel {
entry.log(WarnLevel, fmt.Sprint(args...))
}
}
func (entry *Entry) Warning(args ...interface{}) {
entry.Warn(args...)
}
func (entry *Entry) Error(args ...interface{}) {
if entry.Logger.Level >= ErrorLevel {
entry.log(ErrorLevel, fmt.Sprint(args...))
}
}
func (entry *Entry) Fatal(args ...interface{}) {
if entry.Logger.Level >= FatalLevel {
entry.log(FatalLevel, fmt.Sprint(args...))
}
os.Exit(1)
}
func (entry *Entry) Panic(args ...interface{}) {
if entry.Logger.Level >= PanicLevel {
entry.log(PanicLevel, fmt.Sprint(args...))
}
panic(fmt.Sprint(args...))
}
// Entry Printf family functions
func (entry *Entry) Debugf(format string, args ...interface{}) {
if entry.Logger.Level >= DebugLevel {
entry.Debug(fmt.Sprintf(format, args...))
}
}
func (entry *Entry) Infof(format string, args ...interface{}) {
if entry.Logger.Level >= InfoLevel {
entry.Info(fmt.Sprintf(format, args...))
}
}
func (entry *Entry) Printf(format string, args ...interface{}) {
entry.Infof(format, args...)
}
func (entry *Entry) Warnf(format string, args ...interface{}) {
if entry.Logger.Level >= WarnLevel {
entry.Warn(fmt.Sprintf(format, args...))
}
}
func (entry *Entry) Warningf(format string, args ...interface{}) {
entry.Warnf(format, args...)
}
func (entry *Entry) Errorf(format string, args ...interface{}) {
if entry.Logger.Level >= ErrorLevel {
entry.Error(fmt.Sprintf(format, args...))
}
}
func (entry *Entry) Fatalf(format string, args ...interface{}) {
if entry.Logger.Level >= FatalLevel {
entry.Fatal(fmt.Sprintf(format, args...))
}
}
func (entry *Entry) Panicf(format string, args ...interface{}) {
if entry.Logger.Level >= PanicLevel {
entry.Panic(fmt.Sprintf(format, args...))
}
}
// Entry Println family functions
func (entry *Entry) Debugln(args ...interface{}) {
if entry.Logger.Level >= DebugLevel {
entry.Debug(entry.sprintlnn(args...))
}
}
func (entry *Entry) Infoln(args ...interface{}) {
if entry.Logger.Level >= InfoLevel {
entry.Info(entry.sprintlnn(args...))
}
}
func (entry *Entry) Println(args ...interface{}) {
entry.Infoln(args...)
}
func (entry *Entry) Warnln(args ...interface{}) {
if entry.Logger.Level >= WarnLevel {
entry.Warn(entry.sprintlnn(args...))
}
}
func (entry *Entry) Warningln(args ...interface{}) {
entry.Warnln(args...)
}
func (entry *Entry) Errorln(args ...interface{}) {
if entry.Logger.Level >= ErrorLevel {
entry.Error(entry.sprintlnn(args...))
}
}
func (entry *Entry) Fatalln(args ...interface{}) {
if entry.Logger.Level >= FatalLevel {
entry.Fatal(entry.sprintlnn(args...))
}
}
func (entry *Entry) Panicln(args ...interface{}) {
if entry.Logger.Level >= PanicLevel {
entry.Panic(entry.sprintlnn(args...))
}
}
// Sprintlnn => Sprint no newline. This is to get the behavior of how
// fmt.Sprintln where spaces are always added between operands, regardless of
// their type. Instead of vendoring the Sprintln implementation to spare a
// string allocation, we do the simplest thing.
func (entry *Entry) sprintlnn(args ...interface{}) string {
msg := fmt.Sprintln(args...)
return msg[:len(msg)-1]
}

View file

@ -0,0 +1,53 @@
package logrus
import (
"bytes"
"fmt"
"testing"
"github.com/stretchr/testify/assert"
)
func TestEntryPanicln(t *testing.T) {
errBoom := fmt.Errorf("boom time")
defer func() {
p := recover()
assert.NotNil(t, p)
switch pVal := p.(type) {
case *Entry:
assert.Equal(t, "kaboom", pVal.Message)
assert.Equal(t, errBoom, pVal.Data["err"])
default:
t.Fatalf("want type *Entry, got %T: %#v", pVal, pVal)
}
}()
logger := New()
logger.Out = &bytes.Buffer{}
entry := NewEntry(logger)
entry.WithField("err", errBoom).Panicln("kaboom")
}
func TestEntryPanicf(t *testing.T) {
errBoom := fmt.Errorf("boom again")
defer func() {
p := recover()
assert.NotNil(t, p)
switch pVal := p.(type) {
case *Entry:
assert.Equal(t, "kaboom true", pVal.Message)
assert.Equal(t, errBoom, pVal.Data["err"])
default:
t.Fatalf("want type *Entry, got %T: %#v", pVal, pVal)
}
}()
logger := New()
logger.Out = &bytes.Buffer{}
entry := NewEntry(logger)
entry.WithField("err", errBoom).Panicf("kaboom %v", true)
}

View file

@ -0,0 +1,50 @@
package main
import (
"github.com/Sirupsen/logrus"
)
var log = logrus.New()
func init() {
log.Formatter = new(logrus.JSONFormatter)
log.Formatter = new(logrus.TextFormatter) // default
log.Level = logrus.DebugLevel
}
func main() {
defer func() {
err := recover()
if err != nil {
log.WithFields(logrus.Fields{
"omg": true,
"err": err,
"number": 100,
}).Fatal("The ice breaks!")
}
}()
log.WithFields(logrus.Fields{
"animal": "walrus",
"number": 8,
}).Debug("Started observing beach")
log.WithFields(logrus.Fields{
"animal": "walrus",
"size": 10,
}).Info("A group of walrus emerges from the ocean")
log.WithFields(logrus.Fields{
"omg": true,
"number": 122,
}).Warn("The group's number increased tremendously!")
log.WithFields(logrus.Fields{
"temperature": -4,
}).Debug("Temperature changes")
log.WithFields(logrus.Fields{
"animal": "orca",
"size": 9009,
}).Panic("It's over 9000!")
}

View file

@ -0,0 +1,30 @@
package main
import (
"github.com/Sirupsen/logrus"
"github.com/Sirupsen/logrus/hooks/airbrake"
)
var log = logrus.New()
func init() {
log.Formatter = new(logrus.TextFormatter) // default
log.Hooks.Add(airbrake.NewHook("https://example.com", "xyz", "development"))
}
func main() {
log.WithFields(logrus.Fields{
"animal": "walrus",
"size": 10,
}).Info("A group of walrus emerges from the ocean")
log.WithFields(logrus.Fields{
"omg": true,
"number": 122,
}).Warn("The group's number increased tremendously!")
log.WithFields(logrus.Fields{
"omg": true,
"number": 100,
}).Fatal("The ice breaks!")
}

View file

@ -0,0 +1,188 @@
package logrus
import (
"io"
)
var (
// std is the name of the standard logger in stdlib `log`
std = New()
)
func StandardLogger() *Logger {
return std
}
// SetOutput sets the standard logger output.
func SetOutput(out io.Writer) {
std.mu.Lock()
defer std.mu.Unlock()
std.Out = out
}
// SetFormatter sets the standard logger formatter.
func SetFormatter(formatter Formatter) {
std.mu.Lock()
defer std.mu.Unlock()
std.Formatter = formatter
}
// SetLevel sets the standard logger level.
func SetLevel(level Level) {
std.mu.Lock()
defer std.mu.Unlock()
std.Level = level
}
// GetLevel returns the standard logger level.
func GetLevel() Level {
std.mu.Lock()
defer std.mu.Unlock()
return std.Level
}
// AddHook adds a hook to the standard logger hooks.
func AddHook(hook Hook) {
std.mu.Lock()
defer std.mu.Unlock()
std.Hooks.Add(hook)
}
// WithField creates an entry from the standard logger and adds a field to
// it. If you want multiple fields, use `WithFields`.
//
// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal
// or Panic on the Entry it returns.
func WithField(key string, value interface{}) *Entry {
return std.WithField(key, value)
}
// WithFields creates an entry from the standard logger and adds multiple
// fields to it. This is simply a helper for `WithField`, invoking it
// once for each field.
//
// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal
// or Panic on the Entry it returns.
func WithFields(fields Fields) *Entry {
return std.WithFields(fields)
}
// Debug logs a message at level Debug on the standard logger.
func Debug(args ...interface{}) {
std.Debug(args...)
}
// Print logs a message at level Info on the standard logger.
func Print(args ...interface{}) {
std.Print(args...)
}
// Info logs a message at level Info on the standard logger.
func Info(args ...interface{}) {
std.Info(args...)
}
// Warn logs a message at level Warn on the standard logger.
func Warn(args ...interface{}) {
std.Warn(args...)
}
// Warning logs a message at level Warn on the standard logger.
func Warning(args ...interface{}) {
std.Warning(args...)
}
// Error logs a message at level Error on the standard logger.
func Error(args ...interface{}) {
std.Error(args...)
}
// Panic logs a message at level Panic on the standard logger.
func Panic(args ...interface{}) {
std.Panic(args...)
}
// Fatal logs a message at level Fatal on the standard logger.
func Fatal(args ...interface{}) {
std.Fatal(args...)
}
// Debugf logs a message at level Debug on the standard logger.
func Debugf(format string, args ...interface{}) {
std.Debugf(format, args...)
}
// Printf logs a message at level Info on the standard logger.
func Printf(format string, args ...interface{}) {
std.Printf(format, args...)
}
// Infof logs a message at level Info on the standard logger.
func Infof(format string, args ...interface{}) {
std.Infof(format, args...)
}
// Warnf logs a message at level Warn on the standard logger.
func Warnf(format string, args ...interface{}) {
std.Warnf(format, args...)
}
// Warningf logs a message at level Warn on the standard logger.
func Warningf(format string, args ...interface{}) {
std.Warningf(format, args...)
}
// Errorf logs a message at level Error on the standard logger.
func Errorf(format string, args ...interface{}) {
std.Errorf(format, args...)
}
// Panicf logs a message at level Panic on the standard logger.
func Panicf(format string, args ...interface{}) {
std.Panicf(format, args...)
}
// Fatalf logs a message at level Fatal on the standard logger.
func Fatalf(format string, args ...interface{}) {
std.Fatalf(format, args...)
}
// Debugln logs a message at level Debug on the standard logger.
func Debugln(args ...interface{}) {
std.Debugln(args...)
}
// Println logs a message at level Info on the standard logger.
func Println(args ...interface{}) {
std.Println(args...)
}
// Infoln logs a message at level Info on the standard logger.
func Infoln(args ...interface{}) {
std.Infoln(args...)
}
// Warnln logs a message at level Warn on the standard logger.
func Warnln(args ...interface{}) {
std.Warnln(args...)
}
// Warningln logs a message at level Warn on the standard logger.
func Warningln(args ...interface{}) {
std.Warningln(args...)
}
// Errorln logs a message at level Error on the standard logger.
func Errorln(args ...interface{}) {
std.Errorln(args...)
}
// Panicln logs a message at level Panic on the standard logger.
func Panicln(args ...interface{}) {
std.Panicln(args...)
}
// Fatalln logs a message at level Fatal on the standard logger.
func Fatalln(args ...interface{}) {
std.Fatalln(args...)
}

View file

@ -0,0 +1,48 @@
package logrus
import "time"
const DefaultTimestampFormat = time.RFC3339
// The Formatter interface is used to implement a custom Formatter. It takes an
// `Entry`. It exposes all the fields, including the default ones:
//
// * `entry.Data["msg"]`. The message passed from Info, Warn, Error ..
// * `entry.Data["time"]`. The timestamp.
// * `entry.Data["level"]. The level the entry was logged at.
//
// Any additional fields added with `WithField` or `WithFields` are also in
// `entry.Data`. Format is expected to return an array of bytes which are then
// logged to `logger.Out`.
type Formatter interface {
Format(*Entry) ([]byte, error)
}
// This is to not silently overwrite `time`, `msg` and `level` fields when
// dumping it. If this code wasn't there doing:
//
// logrus.WithField("level", 1).Info("hello")
//
// Would just silently drop the user provided level. Instead with this code
// it'll logged as:
//
// {"level": "info", "fields.level": 1, "msg": "hello", "time": "..."}
//
// It's not exported because it's still using Data in an opinionated way. It's to
// avoid code duplication between the two default formatters.
func prefixFieldClashes(data Fields) {
_, ok := data["time"]
if ok {
data["fields.time"] = data["time"]
}
_, ok = data["msg"]
if ok {
data["fields.msg"] = data["msg"]
}
_, ok = data["level"]
if ok {
data["fields.level"] = data["level"]
}
}

View file

@ -0,0 +1,88 @@
package logrus
import (
"testing"
"time"
)
// smallFields is a small size data set for benchmarking
var smallFields = Fields{
"foo": "bar",
"baz": "qux",
"one": "two",
"three": "four",
}
// largeFields is a large size data set for benchmarking
var largeFields = Fields{
"foo": "bar",
"baz": "qux",
"one": "two",
"three": "four",
"five": "six",
"seven": "eight",
"nine": "ten",
"eleven": "twelve",
"thirteen": "fourteen",
"fifteen": "sixteen",
"seventeen": "eighteen",
"nineteen": "twenty",
"a": "b",
"c": "d",
"e": "f",
"g": "h",
"i": "j",
"k": "l",
"m": "n",
"o": "p",
"q": "r",
"s": "t",
"u": "v",
"w": "x",
"y": "z",
"this": "will",
"make": "thirty",
"entries": "yeah",
}
func BenchmarkSmallTextFormatter(b *testing.B) {
doBenchmark(b, &TextFormatter{DisableColors: true}, smallFields)
}
func BenchmarkLargeTextFormatter(b *testing.B) {
doBenchmark(b, &TextFormatter{DisableColors: true}, largeFields)
}
func BenchmarkSmallColoredTextFormatter(b *testing.B) {
doBenchmark(b, &TextFormatter{ForceColors: true}, smallFields)
}
func BenchmarkLargeColoredTextFormatter(b *testing.B) {
doBenchmark(b, &TextFormatter{ForceColors: true}, largeFields)
}
func BenchmarkSmallJSONFormatter(b *testing.B) {
doBenchmark(b, &JSONFormatter{}, smallFields)
}
func BenchmarkLargeJSONFormatter(b *testing.B) {
doBenchmark(b, &JSONFormatter{}, largeFields)
}
func doBenchmark(b *testing.B, formatter Formatter, fields Fields) {
entry := &Entry{
Time: time.Time{},
Level: InfoLevel,
Message: "message",
Data: fields,
}
var d []byte
var err error
for i := 0; i < b.N; i++ {
d, err = formatter.Format(entry)
if err != nil {
b.Fatal(err)
}
b.SetBytes(int64(len(d)))
}
}

View file

@ -0,0 +1,56 @@
package logstash
import (
"encoding/json"
"fmt"
"github.com/Sirupsen/logrus"
)
// Formatter generates json in logstash format.
// Logstash site: http://logstash.net/
type LogstashFormatter struct {
Type string // if not empty use for logstash type field.
// TimestampFormat sets the format used for timestamps.
TimestampFormat string
}
func (f *LogstashFormatter) Format(entry *logrus.Entry) ([]byte, error) {
entry.Data["@version"] = 1
if f.TimestampFormat == "" {
f.TimestampFormat = logrus.DefaultTimestampFormat
}
entry.Data["@timestamp"] = entry.Time.Format(f.TimestampFormat)
// set message field
v, ok := entry.Data["message"]
if ok {
entry.Data["fields.message"] = v
}
entry.Data["message"] = entry.Message
// set level field
v, ok = entry.Data["level"]
if ok {
entry.Data["fields.level"] = v
}
entry.Data["level"] = entry.Level.String()
// set type field
if f.Type != "" {
v, ok = entry.Data["type"]
if ok {
entry.Data["fields.type"] = v
}
entry.Data["type"] = f.Type
}
serialized, err := json.Marshal(entry.Data)
if err != nil {
return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err)
}
return append(serialized, '\n'), nil
}

View file

@ -0,0 +1,52 @@
package logstash
import (
"bytes"
"encoding/json"
"github.com/Sirupsen/logrus"
"github.com/stretchr/testify/assert"
"testing"
)
func TestLogstashFormatter(t *testing.T) {
assert := assert.New(t)
lf := LogstashFormatter{Type: "abc"}
fields := logrus.Fields{
"message": "def",
"level": "ijk",
"type": "lmn",
"one": 1,
"pi": 3.14,
"bool": true,
}
entry := logrus.WithFields(fields)
entry.Message = "msg"
entry.Level = logrus.InfoLevel
b, _ := lf.Format(entry)
var data map[string]interface{}
dec := json.NewDecoder(bytes.NewReader(b))
dec.UseNumber()
dec.Decode(&data)
// base fields
assert.Equal(json.Number("1"), data["@version"])
assert.NotEmpty(data["@timestamp"])
assert.Equal("abc", data["type"])
assert.Equal("msg", data["message"])
assert.Equal("info", data["level"])
// substituted fields
assert.Equal("def", data["fields.message"])
assert.Equal("ijk", data["fields.level"])
assert.Equal("lmn", data["fields.type"])
// formats
assert.Equal(json.Number("1"), data["one"])
assert.Equal(json.Number("3.14"), data["pi"])
assert.Equal(true, data["bool"])
}

View file

@ -0,0 +1,122 @@
package logrus
import (
"testing"
"github.com/stretchr/testify/assert"
)
type TestHook struct {
Fired bool
}
func (hook *TestHook) Fire(entry *Entry) error {
hook.Fired = true
return nil
}
func (hook *TestHook) Levels() []Level {
return []Level{
DebugLevel,
InfoLevel,
WarnLevel,
ErrorLevel,
FatalLevel,
PanicLevel,
}
}
func TestHookFires(t *testing.T) {
hook := new(TestHook)
LogAndAssertJSON(t, func(log *Logger) {
log.Hooks.Add(hook)
assert.Equal(t, hook.Fired, false)
log.Print("test")
}, func(fields Fields) {
assert.Equal(t, hook.Fired, true)
})
}
type ModifyHook struct {
}
func (hook *ModifyHook) Fire(entry *Entry) error {
entry.Data["wow"] = "whale"
return nil
}
func (hook *ModifyHook) Levels() []Level {
return []Level{
DebugLevel,
InfoLevel,
WarnLevel,
ErrorLevel,
FatalLevel,
PanicLevel,
}
}
func TestHookCanModifyEntry(t *testing.T) {
hook := new(ModifyHook)
LogAndAssertJSON(t, func(log *Logger) {
log.Hooks.Add(hook)
log.WithField("wow", "elephant").Print("test")
}, func(fields Fields) {
assert.Equal(t, fields["wow"], "whale")
})
}
func TestCanFireMultipleHooks(t *testing.T) {
hook1 := new(ModifyHook)
hook2 := new(TestHook)
LogAndAssertJSON(t, func(log *Logger) {
log.Hooks.Add(hook1)
log.Hooks.Add(hook2)
log.WithField("wow", "elephant").Print("test")
}, func(fields Fields) {
assert.Equal(t, fields["wow"], "whale")
assert.Equal(t, hook2.Fired, true)
})
}
type ErrorHook struct {
Fired bool
}
func (hook *ErrorHook) Fire(entry *Entry) error {
hook.Fired = true
return nil
}
func (hook *ErrorHook) Levels() []Level {
return []Level{
ErrorLevel,
}
}
func TestErrorHookShouldntFireOnInfo(t *testing.T) {
hook := new(ErrorHook)
LogAndAssertJSON(t, func(log *Logger) {
log.Hooks.Add(hook)
log.Info("test")
}, func(fields Fields) {
assert.Equal(t, hook.Fired, false)
})
}
func TestErrorHookShouldFireOnError(t *testing.T) {
hook := new(ErrorHook)
LogAndAssertJSON(t, func(log *Logger) {
log.Hooks.Add(hook)
log.Error("test")
}, func(fields Fields) {
assert.Equal(t, hook.Fired, true)
})
}

View file

@ -0,0 +1,34 @@
package logrus
// A hook to be fired when logging on the logging levels returned from
// `Levels()` on your implementation of the interface. Note that this is not
// fired in a goroutine or a channel with workers, you should handle such
// functionality yourself if your call is non-blocking and you don't wish for
// the logging calls for levels returned from `Levels()` to block.
type Hook interface {
Levels() []Level
Fire(*Entry) error
}
// Internal type for storing the hooks on a logger instance.
type levelHooks map[Level][]Hook
// Add a hook to an instance of logger. This is called with
// `log.Hooks.Add(new(MyHook))` where `MyHook` implements the `Hook` interface.
func (hooks levelHooks) Add(hook Hook) {
for _, level := range hook.Levels() {
hooks[level] = append(hooks[level], hook)
}
}
// Fire all the hooks for the passed level. Used by `entry.log` to fire
// appropriate hooks for a log entry.
func (hooks levelHooks) Fire(level Level, entry *Entry) error {
for _, hook := range hooks[level] {
if err := hook.Fire(entry); err != nil {
return err
}
}
return nil
}

View file

@ -0,0 +1,54 @@
package airbrake
import (
"errors"
"fmt"
"github.com/Sirupsen/logrus"
"github.com/tobi/airbrake-go"
)
// AirbrakeHook to send exceptions to an exception-tracking service compatible
// with the Airbrake API.
type airbrakeHook struct {
APIKey string
Endpoint string
Environment string
}
func NewHook(endpoint, apiKey, env string) *airbrakeHook {
return &airbrakeHook{
APIKey: apiKey,
Endpoint: endpoint,
Environment: env,
}
}
func (hook *airbrakeHook) Fire(entry *logrus.Entry) error {
airbrake.ApiKey = hook.APIKey
airbrake.Endpoint = hook.Endpoint
airbrake.Environment = hook.Environment
var notifyErr error
err, ok := entry.Data["error"].(error)
if ok {
notifyErr = err
} else {
notifyErr = errors.New(entry.Message)
}
airErr := airbrake.Notify(notifyErr)
if airErr != nil {
return fmt.Errorf("Failed to send error to Airbrake: %s", airErr)
}
return nil
}
func (hook *airbrakeHook) Levels() []logrus.Level {
return []logrus.Level{
logrus.ErrorLevel,
logrus.FatalLevel,
logrus.PanicLevel,
}
}

View file

@ -0,0 +1,133 @@
package airbrake
import (
"encoding/xml"
"net/http"
"net/http/httptest"
"testing"
"time"
"github.com/Sirupsen/logrus"
)
type notice struct {
Error NoticeError `xml:"error"`
}
type NoticeError struct {
Class string `xml:"class"`
Message string `xml:"message"`
}
type customErr struct {
msg string
}
func (e *customErr) Error() string {
return e.msg
}
const (
testAPIKey = "abcxyz"
testEnv = "development"
expectedClass = "*airbrake.customErr"
expectedMsg = "foo"
unintendedMsg = "Airbrake will not see this string"
)
var (
noticeError = make(chan NoticeError, 1)
)
// TestLogEntryMessageReceived checks if invoking Logrus' log.Error
// method causes an XML payload containing the log entry message is received
// by a HTTP server emulating an Airbrake-compatible endpoint.
func TestLogEntryMessageReceived(t *testing.T) {
log := logrus.New()
ts := startAirbrakeServer(t)
defer ts.Close()
hook := NewHook(ts.URL, testAPIKey, "production")
log.Hooks.Add(hook)
log.Error(expectedMsg)
select {
case received := <-noticeError:
if received.Message != expectedMsg {
t.Errorf("Unexpected message received: %s", received.Message)
}
case <-time.After(time.Second):
t.Error("Timed out; no notice received by Airbrake API")
}
}
// TestLogEntryMessageReceived confirms that, when passing an error type using
// logrus.Fields, a HTTP server emulating an Airbrake endpoint receives the
// error message returned by the Error() method on the error interface
// rather than the logrus.Entry.Message string.
func TestLogEntryWithErrorReceived(t *testing.T) {
log := logrus.New()
ts := startAirbrakeServer(t)
defer ts.Close()
hook := NewHook(ts.URL, testAPIKey, "production")
log.Hooks.Add(hook)
log.WithFields(logrus.Fields{
"error": &customErr{expectedMsg},
}).Error(unintendedMsg)
select {
case received := <-noticeError:
if received.Message != expectedMsg {
t.Errorf("Unexpected message received: %s", received.Message)
}
if received.Class != expectedClass {
t.Errorf("Unexpected error class: %s", received.Class)
}
case <-time.After(time.Second):
t.Error("Timed out; no notice received by Airbrake API")
}
}
// TestLogEntryWithNonErrorTypeNotReceived confirms that, when passing a
// non-error type using logrus.Fields, a HTTP server emulating an Airbrake
// endpoint receives the logrus.Entry.Message string.
//
// Only error types are supported when setting the 'error' field using
// logrus.WithFields().
func TestLogEntryWithNonErrorTypeNotReceived(t *testing.T) {
log := logrus.New()
ts := startAirbrakeServer(t)
defer ts.Close()
hook := NewHook(ts.URL, testAPIKey, "production")
log.Hooks.Add(hook)
log.WithFields(logrus.Fields{
"error": expectedMsg,
}).Error(unintendedMsg)
select {
case received := <-noticeError:
if received.Message != unintendedMsg {
t.Errorf("Unexpected message received: %s", received.Message)
}
case <-time.After(time.Second):
t.Error("Timed out; no notice received by Airbrake API")
}
}
func startAirbrakeServer(t *testing.T) *httptest.Server {
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
var notice notice
if err := xml.NewDecoder(r.Body).Decode(&notice); err != nil {
t.Error(err)
}
r.Body.Close()
noticeError <- notice.Error
}))
return ts
}

View file

@ -0,0 +1,68 @@
package logrus_bugsnag
import (
"errors"
"github.com/Sirupsen/logrus"
"github.com/bugsnag/bugsnag-go"
)
type bugsnagHook struct{}
// ErrBugsnagUnconfigured is returned if NewBugsnagHook is called before
// bugsnag.Configure. Bugsnag must be configured before the hook.
var ErrBugsnagUnconfigured = errors.New("bugsnag must be configured before installing this logrus hook")
// ErrBugsnagSendFailed indicates that the hook failed to submit an error to
// bugsnag. The error was successfully generated, but `bugsnag.Notify()`
// failed.
type ErrBugsnagSendFailed struct {
err error
}
func (e ErrBugsnagSendFailed) Error() string {
return "failed to send error to Bugsnag: " + e.err.Error()
}
// NewBugsnagHook initializes a logrus hook which sends exceptions to an
// exception-tracking service compatible with the Bugsnag API. Before using
// this hook, you must call bugsnag.Configure(). The returned object should be
// registered with a log via `AddHook()`
//
// Entries that trigger an Error, Fatal or Panic should now include an "error"
// field to send to Bugsnag.
func NewBugsnagHook() (*bugsnagHook, error) {
if bugsnag.Config.APIKey == "" {
return nil, ErrBugsnagUnconfigured
}
return &bugsnagHook{}, nil
}
// Fire forwards an error to Bugsnag. Given a logrus.Entry, it extracts the
// "error" field (or the Message if the error isn't present) and sends it off.
func (hook *bugsnagHook) Fire(entry *logrus.Entry) error {
var notifyErr error
err, ok := entry.Data["error"].(error)
if ok {
notifyErr = err
} else {
notifyErr = errors.New(entry.Message)
}
bugsnagErr := bugsnag.Notify(notifyErr)
if bugsnagErr != nil {
return ErrBugsnagSendFailed{bugsnagErr}
}
return nil
}
// Levels enumerates the log levels on which the error should be forwarded to
// bugsnag: everything at or above the "Error" level.
func (hook *bugsnagHook) Levels() []logrus.Level {
return []logrus.Level{
logrus.ErrorLevel,
logrus.FatalLevel,
logrus.PanicLevel,
}
}

View file

@ -0,0 +1,64 @@
package logrus_bugsnag
import (
"encoding/json"
"errors"
"io/ioutil"
"net/http"
"net/http/httptest"
"testing"
"time"
"github.com/Sirupsen/logrus"
"github.com/bugsnag/bugsnag-go"
)
type notice struct {
Events []struct {
Exceptions []struct {
Message string `json:"message"`
} `json:"exceptions"`
} `json:"events"`
}
func TestNoticeReceived(t *testing.T) {
msg := make(chan string, 1)
expectedMsg := "foo"
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
var notice notice
data, _ := ioutil.ReadAll(r.Body)
if err := json.Unmarshal(data, &notice); err != nil {
t.Error(err)
}
_ = r.Body.Close()
msg <- notice.Events[0].Exceptions[0].Message
}))
defer ts.Close()
hook := &bugsnagHook{}
bugsnag.Configure(bugsnag.Configuration{
Endpoint: ts.URL,
ReleaseStage: "production",
APIKey: "12345678901234567890123456789012",
Synchronous: true,
})
log := logrus.New()
log.Hooks.Add(hook)
log.WithFields(logrus.Fields{
"error": errors.New(expectedMsg),
}).Error("Bugsnag will not see this string")
select {
case received := <-msg:
if received != expectedMsg {
t.Errorf("Unexpected message received: %s", received)
}
case <-time.After(time.Second):
t.Error("Timed out; no notice received by Bugsnag API")
}
}

View file

@ -0,0 +1,28 @@
# Papertrail Hook for Logrus <img src="http://i.imgur.com/hTeVwmJ.png" width="40" height="40" alt=":walrus:" class="emoji" title=":walrus:" />
[Papertrail](https://papertrailapp.com) provides hosted log management. Once stored in Papertrail, you can [group](http://help.papertrailapp.com/kb/how-it-works/groups/) your logs on various dimensions, [search](http://help.papertrailapp.com/kb/how-it-works/search-syntax) them, and trigger [alerts](http://help.papertrailapp.com/kb/how-it-works/alerts).
In most deployments, you'll want to send logs to Papertrail via their [remote_syslog](http://help.papertrailapp.com/kb/configuration/configuring-centralized-logging-from-text-log-files-in-unix/) daemon, which requires no application-specific configuration. This hook is intended for relatively low-volume logging, likely in managed cloud hosting deployments where installing `remote_syslog` is not possible.
## Usage
You can find your Papertrail UDP port on your [Papertrail account page](https://papertrailapp.com/account/destinations). Substitute it below for `YOUR_PAPERTRAIL_UDP_PORT`.
For `YOUR_APP_NAME`, substitute a short string that will readily identify your application or service in the logs.
```go
import (
"log/syslog"
"github.com/Sirupsen/logrus"
"github.com/Sirupsen/logrus/hooks/papertrail"
)
func main() {
log := logrus.New()
hook, err := logrus_papertrail.NewPapertrailHook("logs.papertrailapp.com", YOUR_PAPERTRAIL_UDP_PORT, YOUR_APP_NAME)
if err == nil {
log.Hooks.Add(hook)
}
}
```

View file

@ -0,0 +1,55 @@
package logrus_papertrail
import (
"fmt"
"net"
"os"
"time"
"github.com/Sirupsen/logrus"
)
const (
format = "Jan 2 15:04:05"
)
// PapertrailHook to send logs to a logging service compatible with the Papertrail API.
type PapertrailHook struct {
Host string
Port int
AppName string
UDPConn net.Conn
}
// NewPapertrailHook creates a hook to be added to an instance of logger.
func NewPapertrailHook(host string, port int, appName string) (*PapertrailHook, error) {
conn, err := net.Dial("udp", fmt.Sprintf("%s:%d", host, port))
return &PapertrailHook{host, port, appName, conn}, err
}
// Fire is called when a log event is fired.
func (hook *PapertrailHook) Fire(entry *logrus.Entry) error {
date := time.Now().Format(format)
msg, _ := entry.String()
payload := fmt.Sprintf("<22> %s %s: %s", date, hook.AppName, msg)
bytesWritten, err := hook.UDPConn.Write([]byte(payload))
if err != nil {
fmt.Fprintf(os.Stderr, "Unable to send log line to Papertrail via UDP. Wrote %d bytes before error: %v", bytesWritten, err)
return err
}
return nil
}
// Levels returns the available logging levels.
func (hook *PapertrailHook) Levels() []logrus.Level {
return []logrus.Level{
logrus.PanicLevel,
logrus.FatalLevel,
logrus.ErrorLevel,
logrus.WarnLevel,
logrus.InfoLevel,
logrus.DebugLevel,
}
}

View file

@ -0,0 +1,26 @@
package logrus_papertrail
import (
"fmt"
"testing"
"github.com/Sirupsen/logrus"
"github.com/stvp/go-udp-testing"
)
func TestWritingToUDP(t *testing.T) {
port := 16661
udp.SetAddr(fmt.Sprintf(":%d", port))
hook, err := NewPapertrailHook("localhost", port, "test")
if err != nil {
t.Errorf("Unable to connect to local UDP server.")
}
log := logrus.New()
log.Hooks.Add(hook)
udp.ShouldReceive(t, "foo", func() {
log.Info("foo")
})
}

View file

@ -0,0 +1,61 @@
# Sentry Hook for Logrus <img src="http://i.imgur.com/hTeVwmJ.png" width="40" height="40" alt=":walrus:" class="emoji" title=":walrus:" />
[Sentry](https://getsentry.com) provides both self-hosted and hosted
solutions for exception tracking.
Both client and server are
[open source](https://github.com/getsentry/sentry).
## Usage
Every sentry application defined on the server gets a different
[DSN](https://www.getsentry.com/docs/). In the example below replace
`YOUR_DSN` with the one created for your application.
```go
import (
"github.com/Sirupsen/logrus"
"github.com/Sirupsen/logrus/hooks/sentry"
)
func main() {
log := logrus.New()
hook, err := logrus_sentry.NewSentryHook(YOUR_DSN, []logrus.Level{
logrus.PanicLevel,
logrus.FatalLevel,
logrus.ErrorLevel,
})
if err == nil {
log.Hooks.Add(hook)
}
}
```
## Special fields
Some logrus fields have a special meaning in this hook,
these are server_name and logger.
When logs are sent to sentry these fields are treated differently.
- server_name (also known as hostname) is the name of the server which
is logging the event (hostname.example.com)
- logger is the part of the application which is logging the event.
In go this usually means setting it to the name of the package.
## Timeout
`Timeout` is the time the sentry hook will wait for a response
from the sentry server.
If this time elapses with no response from
the server an error will be returned.
If `Timeout` is set to 0 the SentryHook will not wait for a reply
and will assume a correct delivery.
The SentryHook has a default timeout of `100 milliseconds` when created
with a call to `NewSentryHook`. This can be changed by assigning a value to the `Timeout` field:
```go
hook, _ := logrus_sentry.NewSentryHook(...)
hook.Timeout = 20*time.Second
```

View file

@ -0,0 +1,100 @@
package logrus_sentry
import (
"fmt"
"time"
"github.com/Sirupsen/logrus"
"github.com/getsentry/raven-go"
)
var (
severityMap = map[logrus.Level]raven.Severity{
logrus.DebugLevel: raven.DEBUG,
logrus.InfoLevel: raven.INFO,
logrus.WarnLevel: raven.WARNING,
logrus.ErrorLevel: raven.ERROR,
logrus.FatalLevel: raven.FATAL,
logrus.PanicLevel: raven.FATAL,
}
)
func getAndDel(d logrus.Fields, key string) (string, bool) {
var (
ok bool
v interface{}
val string
)
if v, ok = d[key]; !ok {
return "", false
}
if val, ok = v.(string); !ok {
return "", false
}
delete(d, key)
return val, true
}
// SentryHook delivers logs to a sentry server.
type SentryHook struct {
// Timeout sets the time to wait for a delivery error from the sentry server.
// If this is set to zero the server will not wait for any response and will
// consider the message correctly sent
Timeout time.Duration
client *raven.Client
levels []logrus.Level
}
// NewSentryHook creates a hook to be added to an instance of logger
// and initializes the raven client.
// This method sets the timeout to 100 milliseconds.
func NewSentryHook(DSN string, levels []logrus.Level) (*SentryHook, error) {
client, err := raven.NewClient(DSN, nil)
if err != nil {
return nil, err
}
return &SentryHook{100 * time.Millisecond, client, levels}, nil
}
// Called when an event should be sent to sentry
// Special fields that sentry uses to give more information to the server
// are extracted from entry.Data (if they are found)
// These fields are: logger and server_name
func (hook *SentryHook) Fire(entry *logrus.Entry) error {
packet := &raven.Packet{
Message: entry.Message,
Timestamp: raven.Timestamp(entry.Time),
Level: severityMap[entry.Level],
Platform: "go",
}
d := entry.Data
if logger, ok := getAndDel(d, "logger"); ok {
packet.Logger = logger
}
if serverName, ok := getAndDel(d, "server_name"); ok {
packet.ServerName = serverName
}
packet.Extra = map[string]interface{}(d)
_, errCh := hook.client.Capture(packet, nil)
timeout := hook.Timeout
if timeout != 0 {
timeoutCh := time.After(timeout)
select {
case err := <-errCh:
return err
case <-timeoutCh:
return fmt.Errorf("no response from sentry server in %s", timeout)
}
}
return nil
}
// Levels returns the available logging levels.
func (hook *SentryHook) Levels() []logrus.Level {
return hook.levels
}

View file

@ -0,0 +1,97 @@
package logrus_sentry
import (
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"net/http/httptest"
"strings"
"testing"
"github.com/Sirupsen/logrus"
"github.com/getsentry/raven-go"
)
const (
message = "error message"
server_name = "testserver.internal"
logger_name = "test.logger"
)
func getTestLogger() *logrus.Logger {
l := logrus.New()
l.Out = ioutil.Discard
return l
}
func WithTestDSN(t *testing.T, tf func(string, <-chan *raven.Packet)) {
pch := make(chan *raven.Packet, 1)
s := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) {
defer req.Body.Close()
d := json.NewDecoder(req.Body)
p := &raven.Packet{}
err := d.Decode(p)
if err != nil {
t.Fatal(err.Error())
}
pch <- p
}))
defer s.Close()
fragments := strings.SplitN(s.URL, "://", 2)
dsn := fmt.Sprintf(
"%s://public:secret@%s/sentry/project-id",
fragments[0],
fragments[1],
)
tf(dsn, pch)
}
func TestSpecialFields(t *testing.T) {
WithTestDSN(t, func(dsn string, pch <-chan *raven.Packet) {
logger := getTestLogger()
hook, err := NewSentryHook(dsn, []logrus.Level{
logrus.ErrorLevel,
})
if err != nil {
t.Fatal(err.Error())
}
logger.Hooks.Add(hook)
logger.WithFields(logrus.Fields{
"server_name": server_name,
"logger": logger_name,
}).Error(message)
packet := <-pch
if packet.Logger != logger_name {
t.Errorf("logger should have been %s, was %s", logger_name, packet.Logger)
}
if packet.ServerName != server_name {
t.Errorf("server_name should have been %s, was %s", server_name, packet.ServerName)
}
})
}
func TestSentryHandler(t *testing.T) {
WithTestDSN(t, func(dsn string, pch <-chan *raven.Packet) {
logger := getTestLogger()
hook, err := NewSentryHook(dsn, []logrus.Level{
logrus.ErrorLevel,
})
if err != nil {
t.Fatal(err.Error())
}
logger.Hooks.Add(hook)
logger.Error(message)
packet := <-pch
if packet.Message != message {
t.Errorf("message should have been %s, was %s", message, packet.Message)
}
})
}

View file

@ -0,0 +1,20 @@
# Syslog Hooks for Logrus <img src="http://i.imgur.com/hTeVwmJ.png" width="40" height="40" alt=":walrus:" class="emoji" title=":walrus:"/>
## Usage
```go
import (
"log/syslog"
"github.com/Sirupsen/logrus"
logrus_syslog "github.com/Sirupsen/logrus/hooks/syslog"
)
func main() {
log := logrus.New()
hook, err := logrus_syslog.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "")
if err == nil {
log.Hooks.Add(hook)
}
}
```

View file

@ -0,0 +1,59 @@
package logrus_syslog
import (
"fmt"
"github.com/Sirupsen/logrus"
"log/syslog"
"os"
)
// SyslogHook to send logs via syslog.
type SyslogHook struct {
Writer *syslog.Writer
SyslogNetwork string
SyslogRaddr string
}
// Creates a hook to be added to an instance of logger. This is called with
// `hook, err := NewSyslogHook("udp", "localhost:514", syslog.LOG_DEBUG, "")`
// `if err == nil { log.Hooks.Add(hook) }`
func NewSyslogHook(network, raddr string, priority syslog.Priority, tag string) (*SyslogHook, error) {
w, err := syslog.Dial(network, raddr, priority, tag)
return &SyslogHook{w, network, raddr}, err
}
func (hook *SyslogHook) Fire(entry *logrus.Entry) error {
line, err := entry.String()
if err != nil {
fmt.Fprintf(os.Stderr, "Unable to read entry, %v", err)
return err
}
switch entry.Level {
case logrus.PanicLevel:
return hook.Writer.Crit(line)
case logrus.FatalLevel:
return hook.Writer.Crit(line)
case logrus.ErrorLevel:
return hook.Writer.Err(line)
case logrus.WarnLevel:
return hook.Writer.Warning(line)
case logrus.InfoLevel:
return hook.Writer.Info(line)
case logrus.DebugLevel:
return hook.Writer.Debug(line)
default:
return nil
}
}
func (hook *SyslogHook) Levels() []logrus.Level {
return []logrus.Level{
logrus.PanicLevel,
logrus.FatalLevel,
logrus.ErrorLevel,
logrus.WarnLevel,
logrus.InfoLevel,
logrus.DebugLevel,
}
}

View file

@ -0,0 +1,26 @@
package logrus_syslog
import (
"github.com/Sirupsen/logrus"
"log/syslog"
"testing"
)
func TestLocalhostAddAndPrint(t *testing.T) {
log := logrus.New()
hook, err := NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "")
if err != nil {
t.Errorf("Unable to connect to local syslog.")
}
log.Hooks.Add(hook)
for _, level := range hook.Levels() {
if len(log.Hooks[level]) != 1 {
t.Errorf("SyslogHook was not added. The length of log.Hooks[%v]: %v", level, len(log.Hooks[level]))
}
}
log.Info("Congratulations!")
}

View file

@ -0,0 +1,40 @@
package logrus
import (
"encoding/json"
"fmt"
)
type JSONFormatter struct {
// TimestampFormat sets the format used for marshaling timestamps.
TimestampFormat string
}
func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) {
data := make(Fields, len(entry.Data)+3)
for k, v := range entry.Data {
switch v := v.(type) {
case error:
// Otherwise errors are ignored by `encoding/json`
// https://github.com/Sirupsen/logrus/issues/137
data[k] = v.Error()
default:
data[k] = v
}
}
prefixFieldClashes(data)
if f.TimestampFormat == "" {
f.TimestampFormat = DefaultTimestampFormat
}
data["time"] = entry.Time.Format(f.TimestampFormat)
data["msg"] = entry.Message
data["level"] = entry.Level.String()
serialized, err := json.Marshal(data)
if err != nil {
return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err)
}
return append(serialized, '\n'), nil
}

View file

@ -0,0 +1,120 @@
package logrus
import (
"encoding/json"
"errors"
"testing"
)
func TestErrorNotLost(t *testing.T) {
formatter := &JSONFormatter{}
b, err := formatter.Format(WithField("error", errors.New("wild walrus")))
if err != nil {
t.Fatal("Unable to format entry: ", err)
}
entry := make(map[string]interface{})
err = json.Unmarshal(b, &entry)
if err != nil {
t.Fatal("Unable to unmarshal formatted entry: ", err)
}
if entry["error"] != "wild walrus" {
t.Fatal("Error field not set")
}
}
func TestErrorNotLostOnFieldNotNamedError(t *testing.T) {
formatter := &JSONFormatter{}
b, err := formatter.Format(WithField("omg", errors.New("wild walrus")))
if err != nil {
t.Fatal("Unable to format entry: ", err)
}
entry := make(map[string]interface{})
err = json.Unmarshal(b, &entry)
if err != nil {
t.Fatal("Unable to unmarshal formatted entry: ", err)
}
if entry["omg"] != "wild walrus" {
t.Fatal("Error field not set")
}
}
func TestFieldClashWithTime(t *testing.T) {
formatter := &JSONFormatter{}
b, err := formatter.Format(WithField("time", "right now!"))
if err != nil {
t.Fatal("Unable to format entry: ", err)
}
entry := make(map[string]interface{})
err = json.Unmarshal(b, &entry)
if err != nil {
t.Fatal("Unable to unmarshal formatted entry: ", err)
}
if entry["fields.time"] != "right now!" {
t.Fatal("fields.time not set to original time field")
}
if entry["time"] != "0001-01-01T00:00:00Z" {
t.Fatal("time field not set to current time, was: ", entry["time"])
}
}
func TestFieldClashWithMsg(t *testing.T) {
formatter := &JSONFormatter{}
b, err := formatter.Format(WithField("msg", "something"))
if err != nil {
t.Fatal("Unable to format entry: ", err)
}
entry := make(map[string]interface{})
err = json.Unmarshal(b, &entry)
if err != nil {
t.Fatal("Unable to unmarshal formatted entry: ", err)
}
if entry["fields.msg"] != "something" {
t.Fatal("fields.msg not set to original msg field")
}
}
func TestFieldClashWithLevel(t *testing.T) {
formatter := &JSONFormatter{}
b, err := formatter.Format(WithField("level", "something"))
if err != nil {
t.Fatal("Unable to format entry: ", err)
}
entry := make(map[string]interface{})
err = json.Unmarshal(b, &entry)
if err != nil {
t.Fatal("Unable to unmarshal formatted entry: ", err)
}
if entry["fields.level"] != "something" {
t.Fatal("fields.level not set to original level field")
}
}
func TestJSONEntryEndsWithNewline(t *testing.T) {
formatter := &JSONFormatter{}
b, err := formatter.Format(WithField("level", "something"))
if err != nil {
t.Fatal("Unable to format entry: ", err)
}
if b[len(b)-1] != '\n' {
t.Fatal("Expected JSON log entry to end with a newline")
}
}

View file

@ -0,0 +1,203 @@
package logrus
import (
"io"
"os"
"sync"
)
type Logger struct {
// The logs are `io.Copy`'d to this in a mutex. It's common to set this to a
// file, or leave it default which is `os.Stdout`. You can also set this to
// something more adventorous, such as logging to Kafka.
Out io.Writer
// Hooks for the logger instance. These allow firing events based on logging
// levels and log entries. For example, to send errors to an error tracking
// service, log to StatsD or dump the core on fatal errors.
Hooks levelHooks
// All log entries pass through the formatter before logged to Out. The
// included formatters are `TextFormatter` and `JSONFormatter` for which
// TextFormatter is the default. In development (when a TTY is attached) it
// logs with colors, but to a file it wouldn't. You can easily implement your
// own that implements the `Formatter` interface, see the `README` or included
// formatters for examples.
Formatter Formatter
// The logging level the logger should log at. This is typically (and defaults
// to) `logrus.Info`, which allows Info(), Warn(), Error() and Fatal() to be
// logged. `logrus.Debug` is useful in
Level Level
// Used to sync writing to the log.
mu sync.Mutex
}
// Creates a new logger. Configuration should be set by changing `Formatter`,
// `Out` and `Hooks` directly on the default logger instance. You can also just
// instantiate your own:
//
// var log = &Logger{
// Out: os.Stderr,
// Formatter: new(JSONFormatter),
// Hooks: make(levelHooks),
// Level: logrus.DebugLevel,
// }
//
// It's recommended to make this a global instance called `log`.
func New() *Logger {
return &Logger{
Out: os.Stderr,
Formatter: new(TextFormatter),
Hooks: make(levelHooks),
Level: InfoLevel,
}
}
// Adds a field to the log entry, note that you it doesn't log until you call
// Debug, Print, Info, Warn, Fatal or Panic. It only creates a log entry.
// Ff you want multiple fields, use `WithFields`.
func (logger *Logger) WithField(key string, value interface{}) *Entry {
return NewEntry(logger).WithField(key, value)
}
// Adds a struct of fields to the log entry. All it does is call `WithField` for
// each `Field`.
func (logger *Logger) WithFields(fields Fields) *Entry {
return NewEntry(logger).WithFields(fields)
}
func (logger *Logger) Debugf(format string, args ...interface{}) {
if logger.Level >= DebugLevel {
NewEntry(logger).Debugf(format, args...)
}
}
func (logger *Logger) Infof(format string, args ...interface{}) {
if logger.Level >= InfoLevel {
NewEntry(logger).Infof(format, args...)
}
}
func (logger *Logger) Printf(format string, args ...interface{}) {
NewEntry(logger).Printf(format, args...)
}
func (logger *Logger) Warnf(format string, args ...interface{}) {
if logger.Level >= WarnLevel {
NewEntry(logger).Warnf(format, args...)
}
}
func (logger *Logger) Warningf(format string, args ...interface{}) {
if logger.Level >= WarnLevel {
NewEntry(logger).Warnf(format, args...)
}
}
func (logger *Logger) Errorf(format string, args ...interface{}) {
if logger.Level >= ErrorLevel {
NewEntry(logger).Errorf(format, args...)
}
}
func (logger *Logger) Fatalf(format string, args ...interface{}) {
if logger.Level >= FatalLevel {
NewEntry(logger).Fatalf(format, args...)
}
}
func (logger *Logger) Panicf(format string, args ...interface{}) {
if logger.Level >= PanicLevel {
NewEntry(logger).Panicf(format, args...)
}
}
func (logger *Logger) Debug(args ...interface{}) {
if logger.Level >= DebugLevel {
NewEntry(logger).Debug(args...)
}
}
func (logger *Logger) Info(args ...interface{}) {
if logger.Level >= InfoLevel {
NewEntry(logger).Info(args...)
}
}
func (logger *Logger) Print(args ...interface{}) {
NewEntry(logger).Info(args...)
}
func (logger *Logger) Warn(args ...interface{}) {
if logger.Level >= WarnLevel {
NewEntry(logger).Warn(args...)
}
}
func (logger *Logger) Warning(args ...interface{}) {
if logger.Level >= WarnLevel {
NewEntry(logger).Warn(args...)
}
}
func (logger *Logger) Error(args ...interface{}) {
if logger.Level >= ErrorLevel {
NewEntry(logger).Error(args...)
}
}
func (logger *Logger) Fatal(args ...interface{}) {
if logger.Level >= FatalLevel {
NewEntry(logger).Fatal(args...)
}
}
func (logger *Logger) Panic(args ...interface{}) {
if logger.Level >= PanicLevel {
NewEntry(logger).Panic(args...)
}
}
func (logger *Logger) Debugln(args ...interface{}) {
if logger.Level >= DebugLevel {
NewEntry(logger).Debugln(args...)
}
}
func (logger *Logger) Infoln(args ...interface{}) {
if logger.Level >= InfoLevel {
NewEntry(logger).Infoln(args...)
}
}
func (logger *Logger) Println(args ...interface{}) {
NewEntry(logger).Println(args...)
}
func (logger *Logger) Warnln(args ...interface{}) {
if logger.Level >= WarnLevel {
NewEntry(logger).Warnln(args...)
}
}
func (logger *Logger) Warningln(args ...interface{}) {
if logger.Level >= WarnLevel {
NewEntry(logger).Warnln(args...)
}
}
func (logger *Logger) Errorln(args ...interface{}) {
if logger.Level >= ErrorLevel {
NewEntry(logger).Errorln(args...)
}
}
func (logger *Logger) Fatalln(args ...interface{}) {
if logger.Level >= FatalLevel {
NewEntry(logger).Fatalln(args...)
}
}
func (logger *Logger) Panicln(args ...interface{}) {
if logger.Level >= PanicLevel {
NewEntry(logger).Panicln(args...)
}
}

View file

@ -0,0 +1,94 @@
package logrus
import (
"fmt"
"log"
)
// Fields type, used to pass to `WithFields`.
type Fields map[string]interface{}
// Level type
type Level uint8
// Convert the Level to a string. E.g. PanicLevel becomes "panic".
func (level Level) String() string {
switch level {
case DebugLevel:
return "debug"
case InfoLevel:
return "info"
case WarnLevel:
return "warning"
case ErrorLevel:
return "error"
case FatalLevel:
return "fatal"
case PanicLevel:
return "panic"
}
return "unknown"
}
// ParseLevel takes a string level and returns the Logrus log level constant.
func ParseLevel(lvl string) (Level, error) {
switch lvl {
case "panic":
return PanicLevel, nil
case "fatal":
return FatalLevel, nil
case "error":
return ErrorLevel, nil
case "warn", "warning":
return WarnLevel, nil
case "info":
return InfoLevel, nil
case "debug":
return DebugLevel, nil
}
var l Level
return l, fmt.Errorf("not a valid logrus Level: %q", lvl)
}
// These are the different logging levels. You can set the logging level to log
// on your instance of logger, obtained with `logrus.New()`.
const (
// PanicLevel level, highest level of severity. Logs and then calls panic with the
// message passed to Debug, Info, ...
PanicLevel Level = iota
// FatalLevel level. Logs and then calls `os.Exit(1)`. It will exit even if the
// logging level is set to Panic.
FatalLevel
// ErrorLevel level. Logs. Used for errors that should definitely be noted.
// Commonly used for hooks to send errors to an error tracking service.
ErrorLevel
// WarnLevel level. Non-critical entries that deserve eyes.
WarnLevel
// InfoLevel level. General operational entries about what's going on inside the
// application.
InfoLevel
// DebugLevel level. Usually only enabled when debugging. Very verbose logging.
DebugLevel
)
// Won't compile if StdLogger can't be realized by a log.Logger
var _ StdLogger = &log.Logger{}
// StdLogger is what your logrus-enabled library should take, that way
// it'll accept a stdlib logger and a logrus logger. There's no standard
// interface, this is the closest we get, unfortunately.
type StdLogger interface {
Print(...interface{})
Printf(string, ...interface{})
Println(...interface{})
Fatal(...interface{})
Fatalf(string, ...interface{})
Fatalln(...interface{})
Panic(...interface{})
Panicf(string, ...interface{})
Panicln(...interface{})
}

View file

@ -0,0 +1,301 @@
package logrus
import (
"bytes"
"encoding/json"
"strconv"
"strings"
"sync"
"testing"
"github.com/stretchr/testify/assert"
)
func LogAndAssertJSON(t *testing.T, log func(*Logger), assertions func(fields Fields)) {
var buffer bytes.Buffer
var fields Fields
logger := New()
logger.Out = &buffer
logger.Formatter = new(JSONFormatter)
log(logger)
err := json.Unmarshal(buffer.Bytes(), &fields)
assert.Nil(t, err)
assertions(fields)
}
func LogAndAssertText(t *testing.T, log func(*Logger), assertions func(fields map[string]string)) {
var buffer bytes.Buffer
logger := New()
logger.Out = &buffer
logger.Formatter = &TextFormatter{
DisableColors: true,
}
log(logger)
fields := make(map[string]string)
for _, kv := range strings.Split(buffer.String(), " ") {
if !strings.Contains(kv, "=") {
continue
}
kvArr := strings.Split(kv, "=")
key := strings.TrimSpace(kvArr[0])
val := kvArr[1]
if kvArr[1][0] == '"' {
var err error
val, err = strconv.Unquote(val)
assert.NoError(t, err)
}
fields[key] = val
}
assertions(fields)
}
func TestPrint(t *testing.T) {
LogAndAssertJSON(t, func(log *Logger) {
log.Print("test")
}, func(fields Fields) {
assert.Equal(t, fields["msg"], "test")
assert.Equal(t, fields["level"], "info")
})
}
func TestInfo(t *testing.T) {
LogAndAssertJSON(t, func(log *Logger) {
log.Info("test")
}, func(fields Fields) {
assert.Equal(t, fields["msg"], "test")
assert.Equal(t, fields["level"], "info")
})
}
func TestWarn(t *testing.T) {
LogAndAssertJSON(t, func(log *Logger) {
log.Warn("test")
}, func(fields Fields) {
assert.Equal(t, fields["msg"], "test")
assert.Equal(t, fields["level"], "warning")
})
}
func TestInfolnShouldAddSpacesBetweenStrings(t *testing.T) {
LogAndAssertJSON(t, func(log *Logger) {
log.Infoln("test", "test")
}, func(fields Fields) {
assert.Equal(t, fields["msg"], "test test")
})
}
func TestInfolnShouldAddSpacesBetweenStringAndNonstring(t *testing.T) {
LogAndAssertJSON(t, func(log *Logger) {
log.Infoln("test", 10)
}, func(fields Fields) {
assert.Equal(t, fields["msg"], "test 10")
})
}
func TestInfolnShouldAddSpacesBetweenTwoNonStrings(t *testing.T) {
LogAndAssertJSON(t, func(log *Logger) {
log.Infoln(10, 10)
}, func(fields Fields) {
assert.Equal(t, fields["msg"], "10 10")
})
}
func TestInfoShouldAddSpacesBetweenTwoNonStrings(t *testing.T) {
LogAndAssertJSON(t, func(log *Logger) {
log.Infoln(10, 10)
}, func(fields Fields) {
assert.Equal(t, fields["msg"], "10 10")
})
}
func TestInfoShouldNotAddSpacesBetweenStringAndNonstring(t *testing.T) {
LogAndAssertJSON(t, func(log *Logger) {
log.Info("test", 10)
}, func(fields Fields) {
assert.Equal(t, fields["msg"], "test10")
})
}
func TestInfoShouldNotAddSpacesBetweenStrings(t *testing.T) {
LogAndAssertJSON(t, func(log *Logger) {
log.Info("test", "test")
}, func(fields Fields) {
assert.Equal(t, fields["msg"], "testtest")
})
}
func TestWithFieldsShouldAllowAssignments(t *testing.T) {
var buffer bytes.Buffer
var fields Fields
logger := New()
logger.Out = &buffer
logger.Formatter = new(JSONFormatter)
localLog := logger.WithFields(Fields{
"key1": "value1",
})
localLog.WithField("key2", "value2").Info("test")
err := json.Unmarshal(buffer.Bytes(), &fields)
assert.Nil(t, err)
assert.Equal(t, "value2", fields["key2"])
assert.Equal(t, "value1", fields["key1"])
buffer = bytes.Buffer{}
fields = Fields{}
localLog.Info("test")
err = json.Unmarshal(buffer.Bytes(), &fields)
assert.Nil(t, err)
_, ok := fields["key2"]
assert.Equal(t, false, ok)
assert.Equal(t, "value1", fields["key1"])
}
func TestUserSuppliedFieldDoesNotOverwriteDefaults(t *testing.T) {
LogAndAssertJSON(t, func(log *Logger) {
log.WithField("msg", "hello").Info("test")
}, func(fields Fields) {
assert.Equal(t, fields["msg"], "test")
})
}
func TestUserSuppliedMsgFieldHasPrefix(t *testing.T) {
LogAndAssertJSON(t, func(log *Logger) {
log.WithField("msg", "hello").Info("test")
}, func(fields Fields) {
assert.Equal(t, fields["msg"], "test")
assert.Equal(t, fields["fields.msg"], "hello")
})
}
func TestUserSuppliedTimeFieldHasPrefix(t *testing.T) {
LogAndAssertJSON(t, func(log *Logger) {
log.WithField("time", "hello").Info("test")
}, func(fields Fields) {
assert.Equal(t, fields["fields.time"], "hello")
})
}
func TestUserSuppliedLevelFieldHasPrefix(t *testing.T) {
LogAndAssertJSON(t, func(log *Logger) {
log.WithField("level", 1).Info("test")
}, func(fields Fields) {
assert.Equal(t, fields["level"], "info")
assert.Equal(t, fields["fields.level"], 1.0) // JSON has floats only
})
}
func TestDefaultFieldsAreNotPrefixed(t *testing.T) {
LogAndAssertText(t, func(log *Logger) {
ll := log.WithField("herp", "derp")
ll.Info("hello")
ll.Info("bye")
}, func(fields map[string]string) {
for _, fieldName := range []string{"fields.level", "fields.time", "fields.msg"} {
if _, ok := fields[fieldName]; ok {
t.Fatalf("should not have prefixed %q: %v", fieldName, fields)
}
}
})
}
func TestDoubleLoggingDoesntPrefixPreviousFields(t *testing.T) {
var buffer bytes.Buffer
var fields Fields
logger := New()
logger.Out = &buffer
logger.Formatter = new(JSONFormatter)
llog := logger.WithField("context", "eating raw fish")
llog.Info("looks delicious")
err := json.Unmarshal(buffer.Bytes(), &fields)
assert.NoError(t, err, "should have decoded first message")
assert.Equal(t, len(fields), 4, "should only have msg/time/level/context fields")
assert.Equal(t, fields["msg"], "looks delicious")
assert.Equal(t, fields["context"], "eating raw fish")
buffer.Reset()
llog.Warn("omg it is!")
err = json.Unmarshal(buffer.Bytes(), &fields)
assert.NoError(t, err, "should have decoded second message")
assert.Equal(t, len(fields), 4, "should only have msg/time/level/context fields")
assert.Equal(t, fields["msg"], "omg it is!")
assert.Equal(t, fields["context"], "eating raw fish")
assert.Nil(t, fields["fields.msg"], "should not have prefixed previous `msg` entry")
}
func TestConvertLevelToString(t *testing.T) {
assert.Equal(t, "debug", DebugLevel.String())
assert.Equal(t, "info", InfoLevel.String())
assert.Equal(t, "warning", WarnLevel.String())
assert.Equal(t, "error", ErrorLevel.String())
assert.Equal(t, "fatal", FatalLevel.String())
assert.Equal(t, "panic", PanicLevel.String())
}
func TestParseLevel(t *testing.T) {
l, err := ParseLevel("panic")
assert.Nil(t, err)
assert.Equal(t, PanicLevel, l)
l, err = ParseLevel("fatal")
assert.Nil(t, err)
assert.Equal(t, FatalLevel, l)
l, err = ParseLevel("error")
assert.Nil(t, err)
assert.Equal(t, ErrorLevel, l)
l, err = ParseLevel("warn")
assert.Nil(t, err)
assert.Equal(t, WarnLevel, l)
l, err = ParseLevel("warning")
assert.Nil(t, err)
assert.Equal(t, WarnLevel, l)
l, err = ParseLevel("info")
assert.Nil(t, err)
assert.Equal(t, InfoLevel, l)
l, err = ParseLevel("debug")
assert.Nil(t, err)
assert.Equal(t, DebugLevel, l)
l, err = ParseLevel("invalid")
assert.Equal(t, "not a valid logrus Level: \"invalid\"", err.Error())
}
func TestGetSetLevelRace(t *testing.T) {
wg := sync.WaitGroup{}
for i := 0; i < 100; i++ {
wg.Add(1)
go func(i int) {
defer wg.Done()
if i%2 == 0 {
SetLevel(InfoLevel)
} else {
GetLevel()
}
}(i)
}
wg.Wait()
}

View file

@ -0,0 +1,12 @@
// Based on ssh/terminal:
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package logrus
import "syscall"
const ioctlReadTermios = syscall.TIOCGETA
type Termios syscall.Termios

View file

@ -0,0 +1,20 @@
/*
Go 1.2 doesn't include Termios for FreeBSD. This should be added in 1.3 and this could be merged with terminal_darwin.
*/
package logrus
import (
"syscall"
)
const ioctlReadTermios = syscall.TIOCGETA
type Termios struct {
Iflag uint32
Oflag uint32
Cflag uint32
Lflag uint32
Cc [20]uint8
Ispeed uint32
Ospeed uint32
}

View file

@ -0,0 +1,12 @@
// Based on ssh/terminal:
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package logrus
import "syscall"
const ioctlReadTermios = syscall.TCGETS
type Termios syscall.Termios

View file

@ -0,0 +1,21 @@
// Based on ssh/terminal:
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build linux darwin freebsd openbsd
package logrus
import (
"syscall"
"unsafe"
)
// IsTerminal returns true if the given file descriptor is a terminal.
func IsTerminal() bool {
fd := syscall.Stdout
var termios Termios
_, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0)
return err == 0
}

View file

@ -0,0 +1,7 @@
package logrus
import "syscall"
const ioctlReadTermios = syscall.TIOCGETA
type Termios syscall.Termios

View file

@ -0,0 +1,27 @@
// Based on ssh/terminal:
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build windows
package logrus
import (
"syscall"
"unsafe"
)
var kernel32 = syscall.NewLazyDLL("kernel32.dll")
var (
procGetConsoleMode = kernel32.NewProc("GetConsoleMode")
)
// IsTerminal returns true if the given file descriptor is a terminal.
func IsTerminal() bool {
fd := syscall.Stdout
var st uint32
r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0)
return r != 0 && e == 0
}

View file

@ -0,0 +1,151 @@
package logrus
import (
"bytes"
"fmt"
"runtime"
"sort"
"strings"
"time"
)
const (
nocolor = 0
red = 31
green = 32
yellow = 33
blue = 34
gray = 37
)
var (
baseTimestamp time.Time
isTerminal bool
)
func init() {
baseTimestamp = time.Now()
isTerminal = IsTerminal()
}
func miniTS() int {
return int(time.Since(baseTimestamp) / time.Second)
}
type TextFormatter struct {
// Set to true to bypass checking for a TTY before outputting colors.
ForceColors bool
// Force disabling colors.
DisableColors bool
// Disable timestamp logging. useful when output is redirected to logging
// system that already adds timestamps.
DisableTimestamp bool
// Enable logging the full timestamp when a TTY is attached instead of just
// the time passed since beginning of execution.
FullTimestamp bool
// TimestampFormat to use for display when a full timestamp is printed
TimestampFormat string
// The fields are sorted by default for a consistent output. For applications
// that log extremely frequently and don't use the JSON formatter this may not
// be desired.
DisableSorting bool
}
func (f *TextFormatter) Format(entry *Entry) ([]byte, error) {
var keys []string = make([]string, 0, len(entry.Data))
for k := range entry.Data {
keys = append(keys, k)
}
if !f.DisableSorting {
sort.Strings(keys)
}
b := &bytes.Buffer{}
prefixFieldClashes(entry.Data)
isColorTerminal := isTerminal && (runtime.GOOS != "windows")
isColored := (f.ForceColors || isColorTerminal) && !f.DisableColors
if f.TimestampFormat == "" {
f.TimestampFormat = DefaultTimestampFormat
}
if isColored {
f.printColored(b, entry, keys)
} else {
if !f.DisableTimestamp {
f.appendKeyValue(b, "time", entry.Time.Format(f.TimestampFormat))
}
f.appendKeyValue(b, "level", entry.Level.String())
f.appendKeyValue(b, "msg", entry.Message)
for _, key := range keys {
f.appendKeyValue(b, key, entry.Data[key])
}
}
b.WriteByte('\n')
return b.Bytes(), nil
}
func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []string) {
var levelColor int
switch entry.Level {
case DebugLevel:
levelColor = gray
case WarnLevel:
levelColor = yellow
case ErrorLevel, FatalLevel, PanicLevel:
levelColor = red
default:
levelColor = blue
}
levelText := strings.ToUpper(entry.Level.String())[0:4]
if !f.FullTimestamp {
fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d] %-44s ", levelColor, levelText, miniTS(), entry.Message)
} else {
fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%s] %-44s ", levelColor, levelText, entry.Time.Format(f.TimestampFormat), entry.Message)
}
for _, k := range keys {
v := entry.Data[k]
fmt.Fprintf(b, " \x1b[%dm%s\x1b[0m=%v", levelColor, k, v)
}
}
func needsQuoting(text string) bool {
for _, ch := range text {
if !((ch >= 'a' && ch <= 'z') ||
(ch >= 'A' && ch <= 'Z') ||
(ch >= '0' && ch <= '9') ||
ch == '-' || ch == '.') {
return false
}
}
return true
}
func (f *TextFormatter) appendKeyValue(b *bytes.Buffer, key, value interface{}) {
switch value.(type) {
case string:
if needsQuoting(value.(string)) {
fmt.Fprintf(b, "%v=%s ", key, value)
} else {
fmt.Fprintf(b, "%v=%q ", key, value)
}
case error:
if needsQuoting(value.(error).Error()) {
fmt.Fprintf(b, "%v=%s ", key, value)
} else {
fmt.Fprintf(b, "%v=%q ", key, value)
}
default:
fmt.Fprintf(b, "%v=%v ", key, value)
}
}

View file

@ -0,0 +1,61 @@
package logrus
import (
"bytes"
"errors"
"testing"
"time"
)
func TestQuoting(t *testing.T) {
tf := &TextFormatter{DisableColors: true}
checkQuoting := func(q bool, value interface{}) {
b, _ := tf.Format(WithField("test", value))
idx := bytes.Index(b, ([]byte)("test="))
cont := bytes.Contains(b[idx+5:], []byte{'"'})
if cont != q {
if q {
t.Errorf("quoting expected for: %#v", value)
} else {
t.Errorf("quoting not expected for: %#v", value)
}
}
}
checkQuoting(false, "abcd")
checkQuoting(false, "v1.0")
checkQuoting(false, "1234567890")
checkQuoting(true, "/foobar")
checkQuoting(true, "x y")
checkQuoting(true, "x,y")
checkQuoting(false, errors.New("invalid"))
checkQuoting(true, errors.New("invalid argument"))
}
func TestTimestampFormat(t *testing.T) {
checkTimeStr := func(format string) {
customFormatter := &TextFormatter{DisableColors: true, TimestampFormat: format}
customStr, _ := customFormatter.Format(WithField("test", "test"))
timeStart := bytes.Index(customStr, ([]byte)("time="))
timeEnd := bytes.Index(customStr, ([]byte)("level="))
timeStr := customStr[timeStart+5 : timeEnd-1]
if timeStr[0] == '"' && timeStr[len(timeStr)-1] == '"' {
timeStr = timeStr[1 : len(timeStr)-1]
}
if format == "" {
format = time.RFC3339
}
_, e := time.Parse(format, (string)(timeStr))
if e != nil {
t.Errorf("time string \"%s\" did not match provided time format \"%s\": %s", timeStr, format, e)
}
}
checkTimeStr("2006-01-02T15:04:05.000000000Z07:00")
checkTimeStr("Mon Jan _2 15:04:05 2006")
checkTimeStr("")
}
// TODO add tests for sorting etc., this requires a parser for the text
// formatter output.

View file

@ -0,0 +1,31 @@
package logrus
import (
"bufio"
"io"
"runtime"
)
func (logger *Logger) Writer() *io.PipeWriter {
reader, writer := io.Pipe()
go logger.writerScanner(reader)
runtime.SetFinalizer(writer, writerFinalizer)
return writer
}
func (logger *Logger) writerScanner(reader *io.PipeReader) {
scanner := bufio.NewScanner(reader)
for scanner.Scan() {
logger.Print(scanner.Text())
}
if err := scanner.Err(); err != nil {
logger.Errorf("Error while reading from Writer: %s", err)
}
reader.Close()
}
func writerFinalizer(writer *io.PipeWriter) {
writer.Close()
}

View file

@ -1,191 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction, and
distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by the copyright
owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all other entities
that control, are controlled by, or are under common control with that entity.
For the purposes of this definition, "control" means (i) the power, direct or
indirect, to cause the direction or management of such entity, whether by
contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity exercising
permissions granted by this License.
"Source" form shall mean the preferred form for making modifications, including
but not limited to software source code, documentation source, and configuration
files.
"Object" form shall mean any form resulting from mechanical transformation or
translation of a Source form, including but not limited to compiled object code,
generated documentation, and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or Object form, made
available under the License, as indicated by a copyright notice that is included
in or attached to the work (an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object form, that
is based on (or derived from) the Work and for which the editorial revisions,
annotations, elaborations, or other modifications represent, as a whole, an
original work of authorship. For the purposes of this License, Derivative Works
shall not include works that remain separable from, or merely link (or bind by
name) to the interfaces of, the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including the original version
of the Work and any modifications or additions to that Work or Derivative Works
thereof, that is intentionally submitted to Licensor for inclusion in the Work
by the copyright owner or by an individual or Legal Entity authorized to submit
on behalf of the copyright owner. For the purposes of this definition,
"submitted" means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems, and
issue tracking systems that are managed by, or on behalf of, the Licensor for
the purpose of discussing and improving the Work, but excluding communication
that is conspicuously marked or otherwise designated in writing by the copyright
owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity on behalf
of whom a Contribution has been received by Licensor and subsequently
incorporated within the Work.
2. Grant of Copyright License.
Subject to the terms and conditions of this License, each Contributor hereby
grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
irrevocable copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the Work and such
Derivative Works in Source or Object form.
3. Grant of Patent License.
Subject to the terms and conditions of this License, each Contributor hereby
grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
irrevocable (except as stated in this section) patent license to make, have
made, use, offer to sell, sell, import, and otherwise transfer the Work, where
such license applies only to those patent claims licensable by such Contributor
that are necessarily infringed by their Contribution(s) alone or by combination
of their Contribution(s) with the Work to which such Contribution(s) was
submitted. If You institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work or a
Contribution incorporated within the Work constitutes direct or contributory
patent infringement, then any patent licenses granted to You under this License
for that Work shall terminate as of the date such litigation is filed.
4. Redistribution.
You may reproduce and distribute copies of the Work or Derivative Works thereof
in any medium, with or without modifications, and in Source or Object form,
provided that You meet the following conditions:
You must give any other recipients of the Work or Derivative Works a copy of
this License; and
You must cause any modified files to carry prominent notices stating that You
changed the files; and
You must retain, in the Source form of any Derivative Works that You distribute,
all copyright, patent, trademark, and attribution notices from the Source form
of the Work, excluding those notices that do not pertain to any part of the
Derivative Works; and
If the Work includes a "NOTICE" text file as part of its distribution, then any
Derivative Works that You distribute must include a readable copy of the
attribution notices contained within such NOTICE file, excluding those notices
that do not pertain to any part of the Derivative Works, in at least one of the
following places: within a NOTICE text file distributed as part of the
Derivative Works; within the Source form or documentation, if provided along
with the Derivative Works; or, within a display generated by the Derivative
Works, if and wherever such third-party notices normally appear. The contents of
the NOTICE file are for informational purposes only and do not modify the
License. You may add Your own attribution notices within Derivative Works that
You distribute, alongside or as an addendum to the NOTICE text from the Work,
provided that such additional attribution notices cannot be construed as
modifying the License.
You may add Your own copyright statement to Your modifications and may provide
additional or different license terms and conditions for use, reproduction, or
distribution of Your modifications, or for any such Derivative Works as a whole,
provided Your use, reproduction, and distribution of the Work otherwise complies
with the conditions stated in this License.
5. Submission of Contributions.
Unless You explicitly state otherwise, any Contribution intentionally submitted
for inclusion in the Work by You to the Licensor shall be under the terms and
conditions of this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify the terms of
any separate license agreement you may have executed with Licensor regarding
such Contributions.
6. Trademarks.
This License does not grant permission to use the trade names, trademarks,
service marks, or product names of the Licensor, except as required for
reasonable and customary use in describing the origin of the Work and
reproducing the content of the NOTICE file.
7. Disclaimer of Warranty.
Unless required by applicable law or agreed to in writing, Licensor provides the
Work (and each Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied,
including, without limitation, any warranties or conditions of TITLE,
NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are
solely responsible for determining the appropriateness of using or
redistributing the Work and assume any risks associated with Your exercise of
permissions under this License.
8. Limitation of Liability.
In no event and under no legal theory, whether in tort (including negligence),
contract, or otherwise, unless required by applicable law (such as deliberate
and grossly negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special, incidental,
or consequential damages of any character arising as a result of this License or
out of the use or inability to use the Work (including but not limited to
damages for loss of goodwill, work stoppage, computer failure or malfunction, or
any and all other commercial damages or losses), even if such Contributor has
been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability.
While redistributing the Work or Derivative Works thereof, You may choose to
offer, and charge a fee for, acceptance of support, warranty, indemnity, or
other liability obligations and/or rights consistent with this License. However,
in accepting such obligations, You may act only on Your own behalf and on Your
sole responsibility, not on behalf of any other Contributor, and only if You
agree to indemnify, defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason of your
accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work
To apply the Apache License to your work, attach the following boilerplate
notice, with the fields enclosed by brackets "[]" replaced with your own
identifying information. (Don't include the brackets!) The text should be
enclosed in the appropriate comment syntax for the file format. We also
recommend that a file or class name and description of purpose be included on
the same "printed page" as the copyright notice for easier identification within
third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View file

@ -1,44 +0,0 @@
glog
====
Leveled execution logs for Go.
This is an efficient pure Go implementation of leveled logs in the
manner of the open source C++ package
http://code.google.com/p/google-glog
By binding methods to booleans it is possible to use the log package
without paying the expense of evaluating the arguments to the log.
Through the -vmodule flag, the package also provides fine-grained
control over logging at the file level.
The comment from glog.go introduces the ideas:
Package glog implements logging analogous to the Google-internal
C++ INFO/ERROR/V setup. It provides functions Info, Warning,
Error, Fatal, plus formatting variants such as Infof. It
also provides V-style logging controlled by the -v and
-vmodule=file=2 flags.
Basic examples:
glog.Info("Prepare to repel boarders")
glog.Fatalf("Initialization failed: %s", err)
See the documentation for the V function for an explanation
of these examples:
if glog.V(2) {
glog.Info("Starting transaction...")
}
glog.V(2).Infoln("Processed", nItems, "elements")
The repository contains an open source version of the log package
used inside Google. The master copy of the source lives inside
Google, not here. The code in this repo is for export only and is not itself
under development. Feature requests will be ignored.
Send bug reports to golang-nuts@googlegroups.com.

File diff suppressed because it is too large Load diff

View file

@ -1,124 +0,0 @@
// Go support for leveled logs, analogous to https://code.google.com/p/google-glog/
//
// Copyright 2013 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// File I/O for logs.
package glog
import (
"errors"
"flag"
"fmt"
"os"
"os/user"
"path/filepath"
"strings"
"sync"
"time"
)
// MaxSize is the maximum size of a log file in bytes.
var MaxSize uint64 = 1024 * 1024 * 1800
// logDirs lists the candidate directories for new log files.
var logDirs []string
// If non-empty, overrides the choice of directory in which to write logs.
// See createLogDirs for the full list of possible destinations.
var logDir = flag.String("log_dir", "", "If non-empty, write log files in this directory")
func createLogDirs() {
if *logDir != "" {
logDirs = append(logDirs, *logDir)
}
logDirs = append(logDirs, os.TempDir())
}
var (
pid = os.Getpid()
program = filepath.Base(os.Args[0])
host = "unknownhost"
userName = "unknownuser"
)
func init() {
h, err := os.Hostname()
if err == nil {
host = shortHostname(h)
}
current, err := user.Current()
if err == nil {
userName = current.Username
}
// Sanitize userName since it may contain filepath separators on Windows.
userName = strings.Replace(userName, `\`, "_", -1)
}
// shortHostname returns its argument, truncating at the first period.
// For instance, given "www.google.com" it returns "www".
func shortHostname(hostname string) string {
if i := strings.Index(hostname, "."); i >= 0 {
return hostname[:i]
}
return hostname
}
// logName returns a new log file name containing tag, with start time t, and
// the name for the symlink for tag.
func logName(tag string, t time.Time) (name, link string) {
name = fmt.Sprintf("%s.%s.%s.log.%s.%04d%02d%02d-%02d%02d%02d.%d",
program,
host,
userName,
tag,
t.Year(),
t.Month(),
t.Day(),
t.Hour(),
t.Minute(),
t.Second(),
pid)
return name, program + "." + tag
}
var onceLogDirs sync.Once
// create creates a new log file and returns the file and its filename, which
// contains tag ("INFO", "FATAL", etc.) and t. If the file is created
// successfully, create also attempts to update the symlink for that tag, ignoring
// errors.
func create(tag string, t time.Time) (f *os.File, filename string, err error) {
onceLogDirs.Do(createLogDirs)
if len(logDirs) == 0 {
return nil, "", errors.New("log: no log dirs")
}
name, link := logName(tag, t)
var lastErr error
for _, dir := range logDirs {
fname := filepath.Join(dir, name)
f, err := os.Create(fname)
if err == nil {
symlink := filepath.Join(dir, link)
os.Remove(symlink) // ignore err
os.Symlink(name, symlink) // ignore err
return f, fname, nil
}
lastErr = err
}
return nil, "", fmt.Errorf("log: cannot create log: %v", lastErr)
}

View file

@ -1,415 +0,0 @@
// Go support for leveled logs, analogous to https://code.google.com/p/google-glog/
//
// Copyright 2013 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package glog
import (
"bytes"
"fmt"
stdLog "log"
"path/filepath"
"runtime"
"strconv"
"strings"
"testing"
"time"
)
// Test that shortHostname works as advertised.
func TestShortHostname(t *testing.T) {
for hostname, expect := range map[string]string{
"": "",
"host": "host",
"host.google.com": "host",
} {
if got := shortHostname(hostname); expect != got {
t.Errorf("shortHostname(%q): expected %q, got %q", hostname, expect, got)
}
}
}
// flushBuffer wraps a bytes.Buffer to satisfy flushSyncWriter.
type flushBuffer struct {
bytes.Buffer
}
func (f *flushBuffer) Flush() error {
return nil
}
func (f *flushBuffer) Sync() error {
return nil
}
// swap sets the log writers and returns the old array.
func (l *loggingT) swap(writers [numSeverity]flushSyncWriter) (old [numSeverity]flushSyncWriter) {
l.mu.Lock()
defer l.mu.Unlock()
old = l.file
for i, w := range writers {
logging.file[i] = w
}
return
}
// newBuffers sets the log writers to all new byte buffers and returns the old array.
func (l *loggingT) newBuffers() [numSeverity]flushSyncWriter {
return l.swap([numSeverity]flushSyncWriter{new(flushBuffer), new(flushBuffer), new(flushBuffer), new(flushBuffer)})
}
// contents returns the specified log value as a string.
func contents(s severity) string {
return logging.file[s].(*flushBuffer).String()
}
// contains reports whether the string is contained in the log.
func contains(s severity, str string, t *testing.T) bool {
return strings.Contains(contents(s), str)
}
// setFlags configures the logging flags how the test expects them.
func setFlags() {
logging.toStderr = false
}
// Test that Info works as advertised.
func TestInfo(t *testing.T) {
setFlags()
defer logging.swap(logging.newBuffers())
Info("test")
if !contains(infoLog, "I", t) {
t.Errorf("Info has wrong character: %q", contents(infoLog))
}
if !contains(infoLog, "test", t) {
t.Error("Info failed")
}
}
func TestInfoDepth(t *testing.T) {
setFlags()
defer logging.swap(logging.newBuffers())
f := func() { InfoDepth(1, "depth-test1") }
// The next three lines must stay together
_, _, wantLine, _ := runtime.Caller(0)
InfoDepth(0, "depth-test0")
f()
msgs := strings.Split(strings.TrimSuffix(contents(infoLog), "\n"), "\n")
if len(msgs) != 2 {
t.Fatalf("Got %d lines, expected 2", len(msgs))
}
for i, m := range msgs {
if !strings.HasPrefix(m, "I") {
t.Errorf("InfoDepth[%d] has wrong character: %q", i, m)
}
w := fmt.Sprintf("depth-test%d", i)
if !strings.Contains(m, w) {
t.Errorf("InfoDepth[%d] missing %q: %q", i, w, m)
}
// pull out the line number (between : and ])
msg := m[strings.LastIndex(m, ":")+1:]
x := strings.Index(msg, "]")
if x < 0 {
t.Errorf("InfoDepth[%d]: missing ']': %q", i, m)
continue
}
line, err := strconv.Atoi(msg[:x])
if err != nil {
t.Errorf("InfoDepth[%d]: bad line number: %q", i, m)
continue
}
wantLine++
if wantLine != line {
t.Errorf("InfoDepth[%d]: got line %d, want %d", i, line, wantLine)
}
}
}
func init() {
CopyStandardLogTo("INFO")
}
// Test that CopyStandardLogTo panics on bad input.
func TestCopyStandardLogToPanic(t *testing.T) {
defer func() {
if s, ok := recover().(string); !ok || !strings.Contains(s, "LOG") {
t.Errorf(`CopyStandardLogTo("LOG") should have panicked: %v`, s)
}
}()
CopyStandardLogTo("LOG")
}
// Test that using the standard log package logs to INFO.
func TestStandardLog(t *testing.T) {
setFlags()
defer logging.swap(logging.newBuffers())
stdLog.Print("test")
if !contains(infoLog, "I", t) {
t.Errorf("Info has wrong character: %q", contents(infoLog))
}
if !contains(infoLog, "test", t) {
t.Error("Info failed")
}
}
// Test that the header has the correct format.
func TestHeader(t *testing.T) {
setFlags()
defer logging.swap(logging.newBuffers())
defer func(previous func() time.Time) { timeNow = previous }(timeNow)
timeNow = func() time.Time {
return time.Date(2006, 1, 2, 15, 4, 5, .067890e9, time.Local)
}
pid = 1234
Info("test")
var line int
format := "I0102 15:04:05.067890 1234 glog_test.go:%d] test\n"
n, err := fmt.Sscanf(contents(infoLog), format, &line)
if n != 1 || err != nil {
t.Errorf("log format error: %d elements, error %s:\n%s", n, err, contents(infoLog))
}
// Scanf treats multiple spaces as equivalent to a single space,
// so check for correct space-padding also.
want := fmt.Sprintf(format, line)
if contents(infoLog) != want {
t.Errorf("log format error: got:\n\t%q\nwant:\t%q", contents(infoLog), want)
}
}
// Test that an Error log goes to Warning and Info.
// Even in the Info log, the source character will be E, so the data should
// all be identical.
func TestError(t *testing.T) {
setFlags()
defer logging.swap(logging.newBuffers())
Error("test")
if !contains(errorLog, "E", t) {
t.Errorf("Error has wrong character: %q", contents(errorLog))
}
if !contains(errorLog, "test", t) {
t.Error("Error failed")
}
str := contents(errorLog)
if !contains(warningLog, str, t) {
t.Error("Warning failed")
}
if !contains(infoLog, str, t) {
t.Error("Info failed")
}
}
// Test that a Warning log goes to Info.
// Even in the Info log, the source character will be W, so the data should
// all be identical.
func TestWarning(t *testing.T) {
setFlags()
defer logging.swap(logging.newBuffers())
Warning("test")
if !contains(warningLog, "W", t) {
t.Errorf("Warning has wrong character: %q", contents(warningLog))
}
if !contains(warningLog, "test", t) {
t.Error("Warning failed")
}
str := contents(warningLog)
if !contains(infoLog, str, t) {
t.Error("Info failed")
}
}
// Test that a V log goes to Info.
func TestV(t *testing.T) {
setFlags()
defer logging.swap(logging.newBuffers())
logging.verbosity.Set("2")
defer logging.verbosity.Set("0")
V(2).Info("test")
if !contains(infoLog, "I", t) {
t.Errorf("Info has wrong character: %q", contents(infoLog))
}
if !contains(infoLog, "test", t) {
t.Error("Info failed")
}
}
// Test that a vmodule enables a log in this file.
func TestVmoduleOn(t *testing.T) {
setFlags()
defer logging.swap(logging.newBuffers())
logging.vmodule.Set("glog_test=2")
defer logging.vmodule.Set("")
if !V(1) {
t.Error("V not enabled for 1")
}
if !V(2) {
t.Error("V not enabled for 2")
}
if V(3) {
t.Error("V enabled for 3")
}
V(2).Info("test")
if !contains(infoLog, "I", t) {
t.Errorf("Info has wrong character: %q", contents(infoLog))
}
if !contains(infoLog, "test", t) {
t.Error("Info failed")
}
}
// Test that a vmodule of another file does not enable a log in this file.
func TestVmoduleOff(t *testing.T) {
setFlags()
defer logging.swap(logging.newBuffers())
logging.vmodule.Set("notthisfile=2")
defer logging.vmodule.Set("")
for i := 1; i <= 3; i++ {
if V(Level(i)) {
t.Errorf("V enabled for %d", i)
}
}
V(2).Info("test")
if contents(infoLog) != "" {
t.Error("V logged incorrectly")
}
}
// vGlobs are patterns that match/don't match this file at V=2.
var vGlobs = map[string]bool{
// Easy to test the numeric match here.
"glog_test=1": false, // If -vmodule sets V to 1, V(2) will fail.
"glog_test=2": true,
"glog_test=3": true, // If -vmodule sets V to 1, V(3) will succeed.
// These all use 2 and check the patterns. All are true.
"*=2": true,
"?l*=2": true,
"????_*=2": true,
"??[mno]?_*t=2": true,
// These all use 2 and check the patterns. All are false.
"*x=2": false,
"m*=2": false,
"??_*=2": false,
"?[abc]?_*t=2": false,
}
// Test that vmodule globbing works as advertised.
func testVmoduleGlob(pat string, match bool, t *testing.T) {
setFlags()
defer logging.swap(logging.newBuffers())
defer logging.vmodule.Set("")
logging.vmodule.Set(pat)
if V(2) != Verbose(match) {
t.Errorf("incorrect match for %q: got %t expected %t", pat, V(2), match)
}
}
// Test that a vmodule globbing works as advertised.
func TestVmoduleGlob(t *testing.T) {
for glob, match := range vGlobs {
testVmoduleGlob(glob, match, t)
}
}
func TestRollover(t *testing.T) {
setFlags()
var err error
defer func(previous func(error)) { logExitFunc = previous }(logExitFunc)
logExitFunc = func(e error) {
err = e
}
defer func(previous uint64) { MaxSize = previous }(MaxSize)
MaxSize = 512
Info("x") // Be sure we have a file.
info, ok := logging.file[infoLog].(*syncBuffer)
if !ok {
t.Fatal("info wasn't created")
}
if err != nil {
t.Fatalf("info has initial error: %v", err)
}
fname0 := info.file.Name()
Info(strings.Repeat("x", int(MaxSize))) // force a rollover
if err != nil {
t.Fatalf("info has error after big write: %v", err)
}
// Make sure the next log file gets a file name with a different
// time stamp.
//
// TODO: determine whether we need to support subsecond log
// rotation. C++ does not appear to handle this case (nor does it
// handle Daylight Savings Time properly).
time.Sleep(1 * time.Second)
Info("x") // create a new file
if err != nil {
t.Fatalf("error after rotation: %v", err)
}
fname1 := info.file.Name()
if fname0 == fname1 {
t.Errorf("info.f.Name did not change: %v", fname0)
}
if info.nbytes >= MaxSize {
t.Errorf("file size was not reset: %d", info.nbytes)
}
}
func TestLogBacktraceAt(t *testing.T) {
setFlags()
defer logging.swap(logging.newBuffers())
// The peculiar style of this code simplifies line counting and maintenance of the
// tracing block below.
var infoLine string
setTraceLocation := func(file string, line int, ok bool, delta int) {
if !ok {
t.Fatal("could not get file:line")
}
_, file = filepath.Split(file)
infoLine = fmt.Sprintf("%s:%d", file, line+delta)
err := logging.traceLocation.Set(infoLine)
if err != nil {
t.Fatal("error setting log_backtrace_at: ", err)
}
}
{
// Start of tracing block. These lines know about each other's relative position.
_, file, line, ok := runtime.Caller(0)
setTraceLocation(file, line, ok, +2) // Two lines between Caller and Info calls.
Info("we want a stack trace here")
}
numAppearances := strings.Count(contents(infoLog), infoLine)
if numAppearances < 2 {
// Need 2 appearances, one in the log header and one in the trace:
// log_test.go:281: I0511 16:36:06.952398 02238 log_test.go:280] we want a stack trace here
// ...
// github.com/glog/glog_test.go:280 (0x41ba91)
// ...
// We could be more precise but that would require knowing the details
// of the traceback format, which may not be dependable.
t.Fatal("got no trace back; log is ", contents(infoLog))
}
}
func BenchmarkHeader(b *testing.B) {
for i := 0; i < b.N; i++ {
buf, _, _ := logging.header(infoLog, 0)
logging.putBuffer(buf)
}
}

View file

@ -0,0 +1,11 @@
The Prometheus project was started by Matt T. Proud (emeritus) and
Julius Volz in 2012.
Maintainers of this repository:
* Julius Volz <julius@soundcloud.com>
The following individuals have contributed code to this repository
(listed in alphabetical order):
* Julius Volz <julius@soundcloud.com>

View file

@ -0,0 +1,18 @@
# Contributing
Prometheus uses GitHub to manage reviews of pull requests.
* If you have a trivial fix or improvement, go ahead and create a pull
request, addressing (with `@...`) one or more of the maintainers
(see [AUTHORS.md](AUTHORS.md)) in the description of the pull request.
* If you plan to do something more involved, first discuss your ideas
on our [mailing list](https://groups.google.com/forum/?fromgroups#!forum/prometheus-developers).
This will avoid unnecessary work and surely give you and us a good deal
of inspiration.
* Relevant coding style guidelines are the [Go Code Review
Comments](https://code.google.com/p/go-wiki/wiki/CodeReviewComments)
and the _Formatting and style_ section of Peter Bourgon's [Go: Best
Practices for Production
Environments](http://peter.bourgon.org/go-in-production/#formatting-and-style).

201
Godeps/_workspace/src/github.com/prometheus/log/LICENSE generated vendored Normal file
View file

@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View file

@ -0,0 +1,2 @@
Standard logging library for Go-based Prometheus components.
Copyright 2015 The Prometheus Authors

View file

@ -0,0 +1,8 @@
# Prometheus Logging Library
Standard logging library for Go-based Prometheus components.
This library wraps
[https://github.com/Sirupsen/logrus](https://github.com/Sirupsen/logrus) in
order to add line:file annotations to log lines, as well as to provide common
command-line flags for Prometheus components using it.

156
Godeps/_workspace/src/github.com/prometheus/log/log.go generated vendored Normal file
View file

@ -0,0 +1,156 @@
// Copyright 2015 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package log
import (
"flag"
"runtime"
"strings"
"github.com/Sirupsen/logrus"
)
var logger = logrus.New()
type levelFlag struct{}
// String implements flag.Value.
func (f levelFlag) String() string {
return logger.Level.String()
}
// Set implements flag.Value.
func (f levelFlag) Set(level string) error {
l, err := logrus.ParseLevel(level)
if err != nil {
return err
}
logger.Level = l
return nil
}
func init() {
// In order for this flag to take effect, the user of the package must call
// flag.Parse() before logging anything.
flag.Var(levelFlag{}, "log.level", "Only log messages with the given severity or above. Valid levels: [debug, info, warn, error, fatal, panic].")
}
// fileLineEntry returns a logrus.Entry with file and line annotations for the
// original user log statement (two stack frames up from this function).
func fileLineEntry() *logrus.Entry {
_, file, line, ok := runtime.Caller(2)
if !ok {
file = "<???>"
line = 1
} else {
slash := strings.LastIndex(file, "/")
if slash >= 0 {
file = file[slash+1:]
}
}
return logger.WithFields(logrus.Fields{
"file": file,
"line": line,
})
}
// Debug logs a message at level Debug on the standard logger.
func Debug(args ...interface{}) {
fileLineEntry().Debug(args...)
}
// Debugln logs a message at level Debug on the standard logger.
func Debugln(args ...interface{}) {
fileLineEntry().Debugln(args...)
}
// Debugf logs a message at level Debug on the standard logger.
func Debugf(format string, args ...interface{}) {
fileLineEntry().Debugf(format, args...)
}
// Info logs a message at level Info on the standard logger.
func Info(args ...interface{}) {
fileLineEntry().Info(args...)
}
// Infoln logs a message at level Info on the standard logger.
func Infoln(args ...interface{}) {
fileLineEntry().Infoln(args...)
}
// Infof logs a message at level Info on the standard logger.
func Infof(format string, args ...interface{}) {
fileLineEntry().Infof(format, args...)
}
// Warn logs a message at level Warn on the standard logger.
func Warn(args ...interface{}) {
fileLineEntry().Warn(args...)
}
// Warnln logs a message at level Warn on the standard logger.
func Warnln(args ...interface{}) {
fileLineEntry().Warnln(args...)
}
// Warnf logs a message at level Warn on the standard logger.
func Warnf(format string, args ...interface{}) {
fileLineEntry().Warnf(format, args...)
}
// Error logs a message at level Error on the standard logger.
func Error(args ...interface{}) {
fileLineEntry().Error(args...)
}
// Errorln logs a message at level Error on the standard logger.
func Errorln(args ...interface{}) {
fileLineEntry().Errorln(args...)
}
// Errorf logs a message at level Error on the standard logger.
func Errorf(format string, args ...interface{}) {
fileLineEntry().Errorf(format, args...)
}
// Fatal logs a message at level Fatal on the standard logger.
func Fatal(args ...interface{}) {
fileLineEntry().Fatal(args...)
}
// Fatalln logs a message at level Fatal on the standard logger.
func Fatalln(args ...interface{}) {
fileLineEntry().Fatalln(args...)
}
// Fatalf logs a message at level Fatal on the standard logger.
func Fatalf(format string, args ...interface{}) {
fileLineEntry().Fatalf(format, args...)
}
// Panic logs a message at level Panic on the standard logger.
func Panic(args ...interface{}) {
fileLineEntry().Panicln(args...)
}
// Panicln logs a message at level Panic on the standard logger.
func Panicln(args ...interface{}) {
fileLineEntry().Panicln(args...)
}
// Panicf logs a message at level Panic on the standard logger.
func Panicf(format string, args ...interface{}) {
fileLineEntry().Panicf(format, args...)
}

View file

@ -0,0 +1,39 @@
// Copyright 2015 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package log
import (
"bytes"
"regexp"
"testing"
"github.com/Sirupsen/logrus"
)
func TestFileLineLogging(t *testing.T) {
var buf bytes.Buffer
logger.Out = &buf
logger.Formatter = &logrus.TextFormatter{
DisableColors: true,
}
// The default logging level should be "info".
Debugln("This debug-level line should not show up in the output.")
Infof("This %s-level line should show up in the output.", "info")
re := `^time=".*" level=info msg="This info-level line should show up in the output." file="log_test.go" line=33 \n$`
if !regexp.MustCompile(re).Match(buf.Bytes()) {
t.Fatalf("%q did not match expected regex %q", buf.String(), re)
}
}

22
main.go
View file

@ -25,7 +25,7 @@ import (
"syscall"
"time"
"github.com/golang/glog"
"github.com/prometheus/log"
clientmodel "github.com/prometheus/client_golang/model"
registry "github.com/prometheus/client_golang/prometheus"
@ -103,7 +103,7 @@ func NewPrometheus() *prometheus {
case "adaptive":
syncStrategy = local.Adaptive
default:
glog.Errorf("Invalid flag value for 'storage.local.series-sync-strategy': %s\n", *seriesSyncStrategy)
log.Errorf("Invalid flag value for 'storage.local.series-sync-strategy': %s\n", *seriesSyncStrategy)
os.Exit(2)
}
@ -123,7 +123,7 @@ func NewPrometheus() *prometheus {
var sampleAppender storage.SampleAppender
var remoteStorageQueues []*remote.StorageQueueManager
if *opentsdbURL == "" && *influxdbURL == "" {
glog.Warningf("No remote storage URLs provided; not sending any samples to long-term storage")
log.Warnf("No remote storage URLs provided; not sending any samples to long-term storage")
sampleAppender = memStorage
} else {
fanout := storage.Fanout{memStorage}
@ -217,12 +217,12 @@ func NewPrometheus() *prometheus {
}
func (p *prometheus) reloadConfig() bool {
glog.Infof("Loading configuration file %s", *configFile)
log.Infof("Loading configuration file %s", *configFile)
conf, err := config.LoadFromFile(*configFile)
if err != nil {
glog.Errorf("Couldn't load configuration (-config.file=%s): %v", *configFile, err)
glog.Errorf("Note: The configuration format has changed with version 0.14, please check the documentation.")
log.Errorf("Couldn't load configuration (-config.file=%s): %v", *configFile, err)
log.Errorf("Note: The configuration format has changed with version 0.14, please check the documentation.")
return false
}
@ -239,7 +239,7 @@ func (p *prometheus) reloadConfig() bool {
func (p *prometheus) Serve() {
// Start all components.
if err := p.storage.Start(); err != nil {
glog.Error("Error opening memory series storage: ", err)
log.Error("Error opening memory series storage: ", err)
os.Exit(1)
}
defer p.storage.Stop()
@ -278,14 +278,14 @@ func (p *prometheus) Serve() {
signal.Notify(term, os.Interrupt, syscall.SIGTERM)
select {
case <-term:
glog.Warning("Received SIGTERM, exiting gracefully...")
log.Warn("Received SIGTERM, exiting gracefully...")
case <-p.webService.QuitChan:
glog.Warning("Received termination request via web service, exiting gracefully...")
log.Warn("Received termination request via web service, exiting gracefully...")
}
close(hup)
glog.Info("See you next time!")
log.Info("See you next time!")
}
// Describe implements registry.Collector.
@ -368,7 +368,7 @@ func main() {
if err := flag.CommandLine.Parse(os.Args[1:]); err != nil {
if err != flag.ErrHelp {
glog.Errorf("Invalid command line arguments. Help: %s -h", os.Args[0])
log.Errorf("Invalid command line arguments. Help: %s -h", os.Args[0])
}
os.Exit(2)
}

View file

@ -23,8 +23,8 @@ import (
"strings"
"time"
"github.com/golang/glog"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/log"
clientmodel "github.com/prometheus/client_golang/model"
@ -158,7 +158,7 @@ func (n *NotificationHandler) sendNotifications(reqs NotificationReqs) error {
if err != nil {
return err
}
glog.V(1).Infoln("Sending notifications to alertmanager:", string(buf))
log.Debugln("Sending notifications to alertmanager:", string(buf))
resp, err := n.httpClient.Post(
n.alertmanagerURL+alertmanagerAPIEventsPath,
contentTypeJSON,
@ -181,7 +181,7 @@ func (n *NotificationHandler) sendNotifications(reqs NotificationReqs) error {
func (n *NotificationHandler) Run() {
for reqs := range n.pendingNotifications {
if n.alertmanagerURL == "" {
glog.Warning("No alert manager configured, not dispatching notification")
log.Warn("No alert manager configured, not dispatching notification")
n.notificationDropped.Inc()
continue
}
@ -190,7 +190,7 @@ func (n *NotificationHandler) Run() {
err := n.sendNotifications(reqs)
if err != nil {
glog.Error("Error sending notification: ", err)
log.Error("Error sending notification: ", err)
n.notificationErrors.Inc()
}
@ -206,10 +206,10 @@ func (n *NotificationHandler) SubmitReqs(reqs NotificationReqs) {
// Stop shuts down the notification handler.
func (n *NotificationHandler) Stop() {
glog.Info("Stopping notification handler...")
log.Info("Stopping notification handler...")
close(n.pendingNotifications)
<-n.stopped
glog.Info("Notification handler stopped.")
log.Info("Notification handler stopped.")
}
// Describe implements prometheus.Collector.

View file

@ -7,7 +7,7 @@ import (
"sync"
"time"
"github.com/golang/glog"
"github.com/prometheus/log"
consul "github.com/hashicorp/consul/api"
clientmodel "github.com/prometheus/client_golang/model"
@ -98,7 +98,7 @@ func (cd *ConsulDiscovery) Sources() []string {
srvs, _, err := client.Catalog().Services(nil)
if err != nil {
glog.Errorf("Error refreshing service list: %s", err)
log.Errorf("Error refreshing service list: %s", err)
return nil
}
cd.mu.Lock()
@ -140,7 +140,7 @@ func (cd *ConsulDiscovery) Run(ch chan<- *config.TargetGroup) {
// Stop implements the TargetProvider interface.
func (cd *ConsulDiscovery) Stop() {
glog.V(1).Infof("Stopping Consul service discovery for %s", cd.clientConf.Address)
log.Debugf("Stopping Consul service discovery for %s", cd.clientConf.Address)
// The lock prevents Run from terminating while the watchers attempt
// to send on their channels.
@ -157,7 +157,7 @@ func (cd *ConsulDiscovery) Stop() {
// Terminate Run.
cd.runDone <- struct{}{}
glog.V(1).Infof("Consul service discovery for %s stopped.", cd.clientConf.Address)
log.Debugf("Consul service discovery for %s stopped.", cd.clientConf.Address)
}
// watchServices retrieves updates from Consul's services endpoint and sends
@ -171,7 +171,7 @@ func (cd *ConsulDiscovery) watchServices(update chan<- *consulService) {
WaitIndex: lastIndex,
})
if err != nil {
glog.Errorf("Error refreshing service list: %s", err)
log.Errorf("Error refreshing service list: %s", err)
<-time.After(consulRetryInterval)
continue
}
@ -232,7 +232,7 @@ func (cd *ConsulDiscovery) watchService(srv *consulService, ch chan<- *config.Ta
WaitTime: consulWatchTimeout,
})
if err != nil {
glog.Errorf("Error refreshing service %s: %s", srv.name, err)
log.Errorf("Error refreshing service %s: %s", srv.name, err)
<-time.After(consulRetryInterval)
continue
}

View file

@ -20,11 +20,12 @@ import (
"sync"
"time"
"github.com/golang/glog"
"github.com/miekg/dns"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/log"
clientmodel "github.com/prometheus/client_golang/model"
"github.com/prometheus/prometheus/config"
)
@ -97,12 +98,12 @@ func (dd *DNSDiscovery) Run(ch chan<- *config.TargetGroup) {
// Stop implements the TargetProvider interface.
func (dd *DNSDiscovery) Stop() {
glog.V(1).Info("Stopping DNS discovery for %s...", dd.names)
log.Debug("Stopping DNS discovery for %s...", dd.names)
dd.ticker.Stop()
dd.done <- struct{}{}
glog.V(1).Info("DNS discovery for %s stopped.", dd.names)
log.Debug("DNS discovery for %s stopped.", dd.names)
}
// Sources implements the TargetProvider interface.
@ -120,7 +121,7 @@ func (dd *DNSDiscovery) refreshAll(ch chan<- *config.TargetGroup) {
for _, name := range dd.names {
go func(n string) {
if err := dd.refresh(n, ch); err != nil {
glog.Errorf("Error refreshing DNS targets: %s", err)
log.Errorf("Error refreshing DNS targets: %s", err)
}
wg.Done()
}(name)
@ -140,7 +141,7 @@ func (dd *DNSDiscovery) refresh(name string, ch chan<- *config.TargetGroup) erro
for _, record := range response.Answer {
addr, ok := record.(*dns.SRV)
if !ok {
glog.Warningf("%q is not a valid SRV record", record)
log.Warnf("%q is not a valid SRV record", record)
continue
}
// Remove the final dot from rooted DNS names to make them look more usual.
@ -173,7 +174,7 @@ func lookupSRV(name string) (*dns.Msg, error) {
for _, suffix := range conf.Search {
response, err = lookup(name, dns.TypeSRV, client, servAddr, suffix, false)
if err != nil {
glog.Warningf("resolving %s.%s failed: %s", name, suffix, err)
log.Warnf("resolving %s.%s failed: %s", name, suffix, err)
continue
}
if len(response.Answer) > 0 {

View file

@ -21,13 +21,13 @@ import (
"strings"
"time"
"github.com/golang/glog"
"github.com/prometheus/log"
"gopkg.in/fsnotify.v1"
"gopkg.in/yaml.v2"
"github.com/prometheus/prometheus/config"
clientmodel "github.com/prometheus/client_golang/model"
"github.com/prometheus/prometheus/config"
)
const FileSDFilepathLabel = clientmodel.MetaLabelPrefix + "filepath"
@ -64,7 +64,7 @@ func (fd *FileDiscovery) Sources() []string {
for _, p := range fd.listFiles() {
tgroups, err := readFile(p)
if err != nil {
glog.Errorf("Error reading file %q: ", p, err)
log.Errorf("Error reading file %q: ", p, err)
}
for _, tg := range tgroups {
srcs = append(srcs, tg.Source)
@ -79,7 +79,7 @@ func (fd *FileDiscovery) listFiles() []string {
for _, p := range fd.paths {
files, err := filepath.Glob(p)
if err != nil {
glog.Errorf("Error expanding glob %q: %s", p, err)
log.Errorf("Error expanding glob %q: %s", p, err)
continue
}
paths = append(paths, files...)
@ -100,7 +100,7 @@ func (fd *FileDiscovery) watchFiles() {
p = "./"
}
if err := fd.watcher.Add(p); err != nil {
glog.Errorf("Error adding file watch for %q: %s", p, err)
log.Errorf("Error adding file watch for %q: %s", p, err)
}
}
}
@ -111,7 +111,7 @@ func (fd *FileDiscovery) Run(ch chan<- *config.TargetGroup) {
watcher, err := fsnotify.NewWatcher()
if err != nil {
glog.Errorf("Error creating file watcher: %s", err)
log.Errorf("Error creating file watcher: %s", err)
return
}
fd.watcher = watcher
@ -152,7 +152,7 @@ func (fd *FileDiscovery) Run(ch chan<- *config.TargetGroup) {
case err := <-fd.watcher.Errors:
if err != nil {
glog.Errorf("Error on file watch: %s", err)
log.Errorf("Error on file watch: %s", err)
}
case <-fd.done:
@ -169,7 +169,7 @@ func (fd *FileDiscovery) refresh(ch chan<- *config.TargetGroup) {
for _, p := range fd.listFiles() {
tgroups, err := readFile(p)
if err != nil {
glog.Errorf("Error reading file %q: %s", p, err)
log.Errorf("Error reading file %q: %s", p, err)
// Prevent deletion down below.
ref[p] = fd.lastRefresh[p]
continue
@ -200,7 +200,7 @@ func fileSource(filename string, i int) string {
// Stop implements the TargetProvider interface.
func (fd *FileDiscovery) Stop() {
glog.V(1).Infof("Stopping file discovery for %s...", fd.paths)
log.Debugf("Stopping file discovery for %s...", fd.paths)
fd.done <- struct{}{}
// Closing the watcher will deadlock unless all events and errors are drained.
@ -219,7 +219,7 @@ func (fd *FileDiscovery) Stop() {
fd.done <- struct{}{}
glog.V(1).Infof("File discovery for %s stopped.", fd.paths)
log.Debugf("File discovery for %s stopped.", fd.paths)
}
// readFile reads a JSON or YAML list of targets groups from the file, depending on its

View file

@ -23,9 +23,9 @@ import (
"sync"
"time"
"github.com/golang/glog"
"github.com/prometheus/client_golang/extraction"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/log"
clientmodel "github.com/prometheus/client_golang/model"
@ -247,7 +247,7 @@ func (t *Target) RunScraper(sampleAppender storage.SampleAppender) {
lastScrapeInterval := t.scrapeInterval
t.RUnlock()
glog.V(1).Infof("Starting scraper for target %v...", t)
log.Debugf("Starting scraper for target %v...", t)
jitterTimer := time.NewTimer(time.Duration(float64(lastScrapeInterval) * rand.Float64()))
select {
@ -306,12 +306,12 @@ func (t *Target) RunScraper(sampleAppender storage.SampleAppender) {
// StopScraper implements Target.
func (t *Target) StopScraper() {
glog.V(1).Infof("Stopping scraper for target %v...", t)
log.Debugf("Stopping scraper for target %v...", t)
close(t.scraperStopping)
<-t.scraperStopped
glog.V(1).Infof("Scraper for target %v stopped.", t)
log.Debugf("Scraper for target %v stopped.", t)
}
const acceptHeader = `application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=delimited;q=0.7,text/plain;version=0.0.4;q=0.3,application/json;schema="prometheus/telemetry";version=0.0.2;q=0.2,*/*;q=0.1`

View file

@ -18,7 +18,7 @@ import (
"strings"
"sync"
"github.com/golang/glog"
"github.com/prometheus/log"
clientmodel "github.com/prometheus/client_golang/model"
@ -72,7 +72,7 @@ func NewTargetManager(sampleAppender storage.SampleAppender) *TargetManager {
// Run starts background processing to handle target updates.
func (tm *TargetManager) Run() {
glog.Info("Starting target manager...")
log.Info("Starting target manager...")
sources := map[string]struct{}{}
@ -107,10 +107,10 @@ func (tm *TargetManager) Run() {
// context of the given job config.
func (tm *TargetManager) handleTargetUpdates(cfg *config.ScrapeConfig, ch <-chan *config.TargetGroup) {
for tg := range ch {
glog.V(1).Infof("Received potential update for target group %q", tg.Source)
log.Debugf("Received potential update for target group %q", tg.Source)
if err := tm.updateTargetGroup(tg, cfg); err != nil {
glog.Errorf("Error updating targets: %s", err)
log.Errorf("Error updating targets: %s", err)
}
}
}
@ -136,8 +136,8 @@ func (tm *TargetManager) Stop() {
// stop background processing of the target manager. If removeTargets is true,
// existing targets will be stopped and removed.
func (tm *TargetManager) stop(removeTargets bool) {
glog.Info("Stopping target manager...")
defer glog.Info("Target manager stopped.")
log.Info("Stopping target manager...")
defer log.Info("Target manager stopped.")
for _, provs := range tm.providers {
for _, p := range provs {

View file

@ -19,8 +19,8 @@ import (
"sync"
"time"
"github.com/golang/glog"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/log"
clientmodel "github.com/prometheus/client_golang/model"
@ -119,7 +119,7 @@ func NewManager(o *ManagerOptions) *Manager {
// Run the rule manager's periodic rule evaluation.
func (m *Manager) Run() {
defer glog.Info("Rule manager stopped.")
defer log.Info("Rule manager stopped.")
m.Lock()
lastInterval := m.interval
@ -159,7 +159,7 @@ func (m *Manager) Run() {
// Stop the rule manager's rule evaluation cycles.
func (m *Manager) Stop() {
glog.Info("Stopping rule manager...")
log.Info("Stopping rule manager...")
m.done <- true
}
@ -197,7 +197,7 @@ func (m *Manager) queueAlertNotifications(rule *AlertingRule, timestamp clientmo
result, err := template.Expand()
if err != nil {
result = err.Error()
glog.Warningf("Error expanding alert template %v with data '%v': %v", rule.Name(), tmplData, err)
log.Warnf("Error expanding alert template %v with data '%v': %v", rule.Name(), tmplData, err)
}
return result
}
@ -238,7 +238,7 @@ func (m *Manager) runIteration() {
if err != nil {
evalFailures.Inc()
glog.Warningf("Error while evaluating rule %q: %s", rule, err)
log.Warnf("Error while evaluating rule %q: %s", rule, err)
return
}
@ -283,7 +283,7 @@ func (m *Manager) ApplyConfig(conf *config.Config) {
if err := m.loadRuleFiles(conf.RuleFiles...); err != nil {
// If loading the new rules failed, restore the old rule set.
m.rules = rulesSnapshot
glog.Errorf("Error loading rules, previous rule set restored: %s", err)
log.Errorf("Error loading rules, previous rule set restored: %s", err)
}
}

View file

@ -21,7 +21,7 @@ import (
"strings"
"sync/atomic"
"github.com/golang/glog"
"github.com/prometheus/log"
clientmodel "github.com/prometheus/client_golang/model"
@ -36,7 +36,7 @@ import (
// queue as started by newPersistence).
func (p *persistence) recoverFromCrash(fingerprintToSeries map[clientmodel.Fingerprint]*memorySeries) error {
// TODO(beorn): We need proper tests for the crash recovery.
glog.Warning("Starting crash recovery. Prometheus is inoperational until complete.")
log.Warn("Starting crash recovery. Prometheus is inoperational until complete.")
fpsSeen := map[clientmodel.Fingerprint]struct{}{}
count := 0
@ -48,7 +48,7 @@ func (p *persistence) recoverFromCrash(fingerprintToSeries map[clientmodel.Finge
// The mappings to rebuild.
fpm := fpMappings{}
glog.Info("Scanning files.")
log.Info("Scanning files.")
for i := 0; i < 1<<(seriesDirNameLen*4); i++ {
dirname := path.Join(p.basePath, fmt.Sprintf(seriesDirNameFmt, i))
dir, err := os.Open(dirname)
@ -70,14 +70,14 @@ func (p *persistence) recoverFromCrash(fingerprintToSeries map[clientmodel.Finge
}
count++
if count%10000 == 0 {
glog.Infof("%d files scanned.", count)
log.Infof("%d files scanned.", count)
}
}
}
}
glog.Infof("File scan complete. %d series found.", len(fpsSeen))
log.Infof("File scan complete. %d series found.", len(fpsSeen))
glog.Info("Checking for series without series file.")
log.Info("Checking for series without series file.")
for fp, s := range fingerprintToSeries {
if _, seen := fpsSeen[fp]; !seen {
// fp exists in fingerprintToSeries, but has no representation on disk.
@ -92,7 +92,7 @@ func (p *persistence) recoverFromCrash(fingerprintToSeries map[clientmodel.Finge
// to unindex it, just in case it's in the indexes.
p.unindexMetric(fp, s.metric)
}
glog.Warningf("Lost series detected: fingerprint %v, metric %v.", fp, s.metric)
log.Warnf("Lost series detected: fingerprint %v, metric %v.", fp, s.metric)
continue
}
// If we are here, the only chunks we have are the chunks in the checkpoint.
@ -100,12 +100,12 @@ func (p *persistence) recoverFromCrash(fingerprintToSeries map[clientmodel.Finge
if s.persistWatermark > 0 || s.chunkDescsOffset != 0 {
minLostChunks := s.persistWatermark + s.chunkDescsOffset
if minLostChunks <= 0 {
glog.Warningf(
log.Warnf(
"Possible loss of chunks for fingerprint %v, metric %v.",
fp, s.metric,
)
} else {
glog.Warningf(
log.Warnf(
"Lost at least %d chunks for fingerprint %v, metric %v.",
minLostChunks, fp, s.metric,
)
@ -122,7 +122,7 @@ func (p *persistence) recoverFromCrash(fingerprintToSeries map[clientmodel.Finge
fpsSeen[fp] = struct{}{} // Add so that fpsSeen is complete.
}
}
glog.Info("Check for series without series file complete.")
log.Info("Check for series without series file complete.")
if err := p.cleanUpArchiveIndexes(fingerprintToSeries, fpsSeen, fpm); err != nil {
return err
@ -138,7 +138,7 @@ func (p *persistence) recoverFromCrash(fingerprintToSeries map[clientmodel.Finge
}
p.setDirty(false)
glog.Warning("Crash recovery complete.")
log.Warn("Crash recovery complete.")
return nil
}
@ -178,9 +178,9 @@ func (p *persistence) sanitizeSeries(
var err error
defer func() {
if err != nil {
glog.Errorf("Failed to move lost series file %s to orphaned directory, deleting it instead. Error was: %s", filename, err)
log.Errorf("Failed to move lost series file %s to orphaned directory, deleting it instead. Error was: %s", filename, err)
if err = os.Remove(filename); err != nil {
glog.Errorf("Even deleting file %s did not work: %s", filename, err)
log.Errorf("Even deleting file %s did not work: %s", filename, err)
}
}
}()
@ -196,12 +196,12 @@ func (p *persistence) sanitizeSeries(
var fp clientmodel.Fingerprint
if len(fi.Name()) != fpLen-seriesDirNameLen+len(seriesFileSuffix) ||
!strings.HasSuffix(fi.Name(), seriesFileSuffix) {
glog.Warningf("Unexpected series file name %s.", filename)
log.Warnf("Unexpected series file name %s.", filename)
purge()
return fp, false
}
if err := fp.LoadFromString(path.Base(dirname) + fi.Name()[:fpLen-seriesDirNameLen]); err != nil {
glog.Warningf("Error parsing file name %s: %s", filename, err)
log.Warnf("Error parsing file name %s: %s", filename, err)
purge()
return fp, false
}
@ -210,24 +210,24 @@ func (p *persistence) sanitizeSeries(
chunksInFile := int(fi.Size()) / chunkLenWithHeader
modTime := fi.ModTime()
if bytesToTrim != 0 {
glog.Warningf(
log.Warnf(
"Truncating file %s to exactly %d chunks, trimming %d extraneous bytes.",
filename, chunksInFile, bytesToTrim,
)
f, err := os.OpenFile(filename, os.O_WRONLY, 0640)
if err != nil {
glog.Errorf("Could not open file %s: %s", filename, err)
log.Errorf("Could not open file %s: %s", filename, err)
purge()
return fp, false
}
if err := f.Truncate(fi.Size() - bytesToTrim); err != nil {
glog.Errorf("Failed to truncate file %s: %s", filename, err)
log.Errorf("Failed to truncate file %s: %s", filename, err)
purge()
return fp, false
}
}
if chunksInFile == 0 {
glog.Warningf("No chunks left in file %s.", filename)
log.Warnf("No chunks left in file %s.", filename)
purge()
return fp, false
}
@ -254,7 +254,7 @@ func (p *persistence) sanitizeSeries(
// heads.db. Treat this series as a freshly unarchived
// one. No chunks or chunkDescs in memory, no current
// head chunk.
glog.Warningf(
log.Warnf(
"Treating recovered metric %v, fingerprint %v, as freshly unarchived, with %d chunks in series file.",
s.metric, fp, chunksInFile,
)
@ -278,7 +278,7 @@ func (p *persistence) sanitizeSeries(
// Load all the chunk descs (which assumes we have none from the future).
cds, err := p.loadChunkDescs(fp, clientmodel.Now())
if err != nil {
glog.Errorf(
log.Errorf(
"Failed to load chunk descriptors for metric %v, fingerprint %v: %s",
s.metric, fp, err,
)
@ -298,7 +298,7 @@ func (p *persistence) sanitizeSeries(
}
}
if keepIdx == -1 {
glog.Warningf(
log.Warnf(
"Recovered metric %v, fingerprint %v: all %d chunks recovered from series file.",
s.metric, fp, chunksInFile,
)
@ -308,7 +308,7 @@ func (p *persistence) sanitizeSeries(
s.headChunkClosed = true
return fp, true
}
glog.Warningf(
log.Warnf(
"Recovered metric %v, fingerprint %v: recovered %d chunks from series file, recovered %d chunks from checkpoint.",
s.metric, fp, chunksInFile, len(s.chunkDescs)-keepIdx,
)
@ -320,7 +320,7 @@ func (p *persistence) sanitizeSeries(
// This series is supposed to be archived.
metric, err := p.getArchivedMetric(fp)
if err != nil {
glog.Errorf(
log.Errorf(
"Fingerprint %v assumed archived but couldn't be looked up in archived index: %s",
fp, err,
)
@ -328,7 +328,7 @@ func (p *persistence) sanitizeSeries(
return fp, false
}
if metric == nil {
glog.Warningf(
log.Warnf(
"Fingerprint %v assumed archived but couldn't be found in archived index.",
fp,
)
@ -345,14 +345,14 @@ func (p *persistence) cleanUpArchiveIndexes(
fpsSeen map[clientmodel.Fingerprint]struct{},
fpm fpMappings,
) error {
glog.Info("Cleaning up archive indexes.")
log.Info("Cleaning up archive indexes.")
var fp codable.Fingerprint
var m codable.Metric
count := 0
if err := p.archivedFingerprintToMetrics.ForEach(func(kv index.KeyValueAccessor) error {
count++
if count%10000 == 0 {
glog.Infof("%d archived metrics checked.", count)
log.Infof("%d archived metrics checked.", count)
}
if err := kv.Key(&fp); err != nil {
return err
@ -364,10 +364,10 @@ func (p *persistence) cleanUpArchiveIndexes(
}
if !fpSeen || inMemory {
if inMemory {
glog.Warningf("Archive clean-up: Fingerprint %v is not archived. Purging from archive indexes.", clientmodel.Fingerprint(fp))
log.Warnf("Archive clean-up: Fingerprint %v is not archived. Purging from archive indexes.", clientmodel.Fingerprint(fp))
}
if !fpSeen {
glog.Warningf("Archive clean-up: Fingerprint %v is unknown. Purging from archive indexes.", clientmodel.Fingerprint(fp))
log.Warnf("Archive clean-up: Fingerprint %v is unknown. Purging from archive indexes.", clientmodel.Fingerprint(fp))
}
// It's fine if the fp is not in the archive indexes.
if _, err := p.archivedFingerprintToMetrics.Delete(fp); err != nil {
@ -390,7 +390,7 @@ func (p *persistence) cleanUpArchiveIndexes(
if has {
return nil // All good.
}
glog.Warningf("Archive clean-up: Fingerprint %v is not in time-range index. Unarchiving it for recovery.")
log.Warnf("Archive clean-up: Fingerprint %v is not in time-range index. Unarchiving it for recovery.")
// Again, it's fine if fp is not in the archive index.
if _, err := p.archivedFingerprintToMetrics.Delete(fp); err != nil {
return err
@ -412,7 +412,7 @@ func (p *persistence) cleanUpArchiveIndexes(
if err := p.archivedFingerprintToTimeRange.ForEach(func(kv index.KeyValueAccessor) error {
count++
if count%10000 == 0 {
glog.Infof("%d archived time ranges checked.", count)
log.Infof("%d archived time ranges checked.", count)
}
if err := kv.Key(&fp); err != nil {
return err
@ -424,19 +424,19 @@ func (p *persistence) cleanUpArchiveIndexes(
if has {
return nil // All good.
}
glog.Warningf("Archive clean-up: Purging unknown fingerprint %v in time-range index.", fp)
log.Warnf("Archive clean-up: Purging unknown fingerprint %v in time-range index.", fp)
deleted, err := p.archivedFingerprintToTimeRange.Delete(fp)
if err != nil {
return err
}
if !deleted {
glog.Errorf("Fingerprint %v to be deleted from archivedFingerprintToTimeRange not found. This should never happen.", fp)
log.Errorf("Fingerprint %v to be deleted from archivedFingerprintToTimeRange not found. This should never happen.", fp)
}
return nil
}); err != nil {
return err
}
glog.Info("Clean-up of archive indexes complete.")
log.Info("Clean-up of archive indexes complete.")
return nil
}
@ -444,16 +444,16 @@ func (p *persistence) rebuildLabelIndexes(
fpToSeries map[clientmodel.Fingerprint]*memorySeries,
) error {
count := 0
glog.Info("Rebuilding label indexes.")
glog.Info("Indexing metrics in memory.")
log.Info("Rebuilding label indexes.")
log.Info("Indexing metrics in memory.")
for fp, s := range fpToSeries {
p.indexMetric(fp, s.metric)
count++
if count%10000 == 0 {
glog.Infof("%d metrics queued for indexing.", count)
log.Infof("%d metrics queued for indexing.", count)
}
}
glog.Info("Indexing archived metrics.")
log.Info("Indexing archived metrics.")
var fp codable.Fingerprint
var m codable.Metric
if err := p.archivedFingerprintToMetrics.ForEach(func(kv index.KeyValueAccessor) error {
@ -466,20 +466,20 @@ func (p *persistence) rebuildLabelIndexes(
p.indexMetric(clientmodel.Fingerprint(fp), clientmodel.Metric(m))
count++
if count%10000 == 0 {
glog.Infof("%d metrics queued for indexing.", count)
log.Infof("%d metrics queued for indexing.", count)
}
return nil
}); err != nil {
return err
}
glog.Info("All requests for rebuilding the label indexes queued. (Actual processing may lag behind.)")
log.Info("All requests for rebuilding the label indexes queued. (Actual processing may lag behind.)")
return nil
}
// maybeAddMapping adds a fingerprint mapping to fpm if the FastFingerprint of m is different from fp.
func maybeAddMapping(fp clientmodel.Fingerprint, m clientmodel.Metric, fpm fpMappings) {
if rawFP := m.FastFingerprint(); rawFP != fp {
glog.Warningf(
log.Warnf(
"Metric %v with fingerprint %v is mapped from raw fingerprint %v.",
m, fp, rawFP,
)

View file

@ -7,7 +7,7 @@ import (
"sync"
"sync/atomic"
"github.com/golang/glog"
"github.com/prometheus/log"
clientmodel "github.com/prometheus/client_golang/model"
)
@ -133,7 +133,7 @@ func (r *fpMapper) maybeAddMapping(
// Checkpoint mappings after each change.
err := r.p.checkpointFPMappings(r.mappings)
r.mtx.RUnlock()
glog.Infof(
log.Infof(
"Collision detected for fingerprint %v, metric %v, mapping to new fingerprint %v.",
fp, collidingMetric, mappedFP,
)
@ -147,7 +147,7 @@ func (r *fpMapper) maybeAddMapping(
// Checkpoint mappings after each change.
err := r.p.checkpointFPMappings(r.mappings)
r.mtx.Unlock()
glog.Infof(
log.Infof(
"Collision detected for fingerprint %v, metric %v, mapping to new fingerprint %v.",
fp, collidingMetric, mappedFP,
)

View file

@ -28,8 +28,8 @@ import (
"sync/atomic"
"time"
"github.com/golang/glog"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/log"
clientmodel "github.com/prometheus/client_golang/model"
@ -174,7 +174,7 @@ func newPersistence(basePath string, dirty, pedanticChecks bool, shouldSync sync
fLock, dirtyfileExisted, err := flock.New(dirtyPath)
if err != nil {
glog.Errorf("Could not lock %s, Prometheus already running?", dirtyPath)
log.Errorf("Could not lock %s, Prometheus already running?", dirtyPath)
return nil, err
}
if dirtyfileExisted {
@ -315,7 +315,7 @@ func (p *persistence) setDirty(dirty bool) {
p.dirty = dirty
if dirty {
p.becameDirty = true
glog.Error("The storage is now inconsistent. Restart Prometheus ASAP to initiate recovery.")
log.Error("The storage is now inconsistent. Restart Prometheus ASAP to initiate recovery.")
}
}
@ -352,7 +352,7 @@ func (p *persistence) getLabelValuesForLabelName(ln clientmodel.LabelName) (clie
func (p *persistence) persistChunks(fp clientmodel.Fingerprint, chunks []chunk) (index int, err error) {
defer func() {
if err != nil {
glog.Error("Error persisting chunks: ", err)
log.Error("Error persisting chunks: ", err)
p.setDirty(true)
}
}()
@ -530,7 +530,7 @@ func (p *persistence) loadChunkDescs(fp clientmodel.Fingerprint, beforeTime clie
// (4.8.2.2) The chunk itself, marshaled with the marshal() method.
//
func (p *persistence) checkpointSeriesMapAndHeads(fingerprintToSeries *seriesMap, fpLocker *fingerprintLocker) (err error) {
glog.Info("Checkpointing in-memory metrics and chunks...")
log.Info("Checkpointing in-memory metrics and chunks...")
begin := time.Now()
f, err := os.OpenFile(p.headsTempFileName(), os.O_WRONLY|os.O_TRUNC|os.O_CREATE, 0640)
if err != nil {
@ -550,7 +550,7 @@ func (p *persistence) checkpointSeriesMapAndHeads(fingerprintToSeries *seriesMap
err = os.Rename(p.headsTempFileName(), p.headsFileName())
duration := time.Since(begin)
p.checkpointDuration.Set(float64(duration) / float64(time.Millisecond))
glog.Infof("Done checkpointing in-memory metrics and chunks in %v.", duration)
log.Infof("Done checkpointing in-memory metrics and chunks in %v.", duration)
}()
w := bufio.NewWriterSize(f, fileBufSize)
@ -678,7 +678,7 @@ func (p *persistence) loadSeriesMapAndHeads() (sm *seriesMap, chunksToPersist in
defer func() {
if sm != nil && p.dirty {
glog.Warning("Persistence layer appears dirty.")
log.Warn("Persistence layer appears dirty.")
err = p.recoverFromCrash(fingerprintToSeries)
if err != nil {
sm = nil
@ -694,7 +694,7 @@ func (p *persistence) loadSeriesMapAndHeads() (sm *seriesMap, chunksToPersist in
return sm, 0, nil
}
if err != nil {
glog.Warning("Could not open heads file:", err)
log.Warn("Could not open heads file:", err)
p.dirty = true
return
}
@ -703,13 +703,13 @@ func (p *persistence) loadSeriesMapAndHeads() (sm *seriesMap, chunksToPersist in
buf := make([]byte, len(headsMagicString))
if _, err := io.ReadFull(r, buf); err != nil {
glog.Warning("Could not read from heads file:", err)
log.Warn("Could not read from heads file:", err)
p.dirty = true
return sm, 0, nil
}
magic := string(buf)
if magic != headsMagicString {
glog.Warningf(
log.Warnf(
"unexpected magic string, want %q, got %q",
headsMagicString, magic,
)
@ -718,13 +718,13 @@ func (p *persistence) loadSeriesMapAndHeads() (sm *seriesMap, chunksToPersist in
}
version, err := binary.ReadVarint(r)
if (version != headsFormatVersion && version != headsFormatLegacyVersion) || err != nil {
glog.Warningf("unknown heads format version, want %d", headsFormatVersion)
log.Warnf("unknown heads format version, want %d", headsFormatVersion)
p.dirty = true
return sm, 0, nil
}
numSeries, err := codable.DecodeUint64(r)
if err != nil {
glog.Warning("Could not decode number of series:", err)
log.Warn("Could not decode number of series:", err)
p.dirty = true
return sm, 0, nil
}
@ -732,20 +732,20 @@ func (p *persistence) loadSeriesMapAndHeads() (sm *seriesMap, chunksToPersist in
for ; numSeries > 0; numSeries-- {
seriesFlags, err := r.ReadByte()
if err != nil {
glog.Warning("Could not read series flags:", err)
log.Warn("Could not read series flags:", err)
p.dirty = true
return sm, chunksToPersist, nil
}
headChunkPersisted := seriesFlags&flagHeadChunkPersisted != 0
fp, err := codable.DecodeUint64(r)
if err != nil {
glog.Warning("Could not decode fingerprint:", err)
log.Warn("Could not decode fingerprint:", err)
p.dirty = true
return sm, chunksToPersist, nil
}
var metric codable.Metric
if err := metric.UnmarshalFromReader(r); err != nil {
glog.Warning("Could not decode metric:", err)
log.Warn("Could not decode metric:", err)
p.dirty = true
return sm, chunksToPersist, nil
}
@ -755,13 +755,13 @@ func (p *persistence) loadSeriesMapAndHeads() (sm *seriesMap, chunksToPersist in
// persistWatermark only present in v2.
persistWatermark, err = binary.ReadVarint(r)
if err != nil {
glog.Warning("Could not decode persist watermark:", err)
log.Warn("Could not decode persist watermark:", err)
p.dirty = true
return sm, chunksToPersist, nil
}
modTimeNano, err := binary.ReadVarint(r)
if err != nil {
glog.Warning("Could not decode modification time:", err)
log.Warn("Could not decode modification time:", err)
p.dirty = true
return sm, chunksToPersist, nil
}
@ -771,19 +771,19 @@ func (p *persistence) loadSeriesMapAndHeads() (sm *seriesMap, chunksToPersist in
}
chunkDescsOffset, err := binary.ReadVarint(r)
if err != nil {
glog.Warning("Could not decode chunk descriptor offset:", err)
log.Warn("Could not decode chunk descriptor offset:", err)
p.dirty = true
return sm, chunksToPersist, nil
}
savedFirstTime, err := binary.ReadVarint(r)
if err != nil {
glog.Warning("Could not decode saved first time:", err)
log.Warn("Could not decode saved first time:", err)
p.dirty = true
return sm, chunksToPersist, nil
}
numChunkDescs, err := binary.ReadVarint(r)
if err != nil {
glog.Warning("Could not decode number of chunk descriptors:", err)
log.Warn("Could not decode number of chunk descriptors:", err)
p.dirty = true
return sm, chunksToPersist, nil
}
@ -800,13 +800,13 @@ func (p *persistence) loadSeriesMapAndHeads() (sm *seriesMap, chunksToPersist in
if i < persistWatermark {
firstTime, err := binary.ReadVarint(r)
if err != nil {
glog.Warning("Could not decode first time:", err)
log.Warn("Could not decode first time:", err)
p.dirty = true
return sm, chunksToPersist, nil
}
lastTime, err := binary.ReadVarint(r)
if err != nil {
glog.Warning("Could not decode last time:", err)
log.Warn("Could not decode last time:", err)
p.dirty = true
return sm, chunksToPersist, nil
}
@ -819,13 +819,13 @@ func (p *persistence) loadSeriesMapAndHeads() (sm *seriesMap, chunksToPersist in
// Non-persisted chunk.
encoding, err := r.ReadByte()
if err != nil {
glog.Warning("Could not decode chunk type:", err)
log.Warn("Could not decode chunk type:", err)
p.dirty = true
return sm, chunksToPersist, nil
}
chunk := newChunkForEncoding(chunkEncoding(encoding))
if err := chunk.unmarshal(r); err != nil {
glog.Warning("Could not decode chunk:", err)
log.Warn("Could not decode chunk:", err)
p.dirty = true
return sm, chunksToPersist, nil
}
@ -870,7 +870,7 @@ func (p *persistence) dropAndPersistChunks(
// please handle with care!
defer func() {
if err != nil {
glog.Error("Error dropping and/or persisting chunks: ", err)
log.Error("Error dropping and/or persisting chunks: ", err)
p.setDirty(true)
}
}()
@ -1146,14 +1146,14 @@ func (p *persistence) purgeArchivedMetric(fp clientmodel.Fingerprint) (err error
return err
}
if !deleted {
glog.Errorf("Tried to delete non-archived fingerprint %s from archivedFingerprintToMetrics index. This should never happen.", fp)
log.Errorf("Tried to delete non-archived fingerprint %s from archivedFingerprintToMetrics index. This should never happen.", fp)
}
deleted, err = p.archivedFingerprintToTimeRange.Delete(codable.Fingerprint(fp))
if err != nil {
return err
}
if !deleted {
glog.Errorf("Tried to delete non-archived fingerprint %s from archivedFingerprintToTimeRange index. This should never happen.", fp)
log.Errorf("Tried to delete non-archived fingerprint %s from archivedFingerprintToTimeRange index. This should never happen.", fp)
}
p.unindexMetric(fp, metric)
return nil
@ -1183,14 +1183,14 @@ func (p *persistence) unarchiveMetric(fp clientmodel.Fingerprint) (
return false, firstTime, err
}
if !deleted {
glog.Errorf("Tried to delete non-archived fingerprint %s from archivedFingerprintToMetrics index. This should never happen.", fp)
log.Errorf("Tried to delete non-archived fingerprint %s from archivedFingerprintToMetrics index. This should never happen.", fp)
}
deleted, err = p.archivedFingerprintToTimeRange.Delete(codable.Fingerprint(fp))
if err != nil {
return false, firstTime, err
}
if !deleted {
glog.Errorf("Tried to delete non-archived fingerprint %s from archivedFingerprintToTimeRange index. This should never happen.", fp)
log.Errorf("Tried to delete non-archived fingerprint %s from archivedFingerprintToTimeRange index. This should never happen.", fp)
}
return true, firstTime, nil
}
@ -1205,26 +1205,26 @@ func (p *persistence) close() error {
var lastError, dirtyFileRemoveError error
if err := p.archivedFingerprintToMetrics.Close(); err != nil {
lastError = err
glog.Error("Error closing archivedFingerprintToMetric index DB: ", err)
log.Error("Error closing archivedFingerprintToMetric index DB: ", err)
}
if err := p.archivedFingerprintToTimeRange.Close(); err != nil {
lastError = err
glog.Error("Error closing archivedFingerprintToTimeRange index DB: ", err)
log.Error("Error closing archivedFingerprintToTimeRange index DB: ", err)
}
if err := p.labelPairToFingerprints.Close(); err != nil {
lastError = err
glog.Error("Error closing labelPairToFingerprints index DB: ", err)
log.Error("Error closing labelPairToFingerprints index DB: ", err)
}
if err := p.labelNameToLabelValues.Close(); err != nil {
lastError = err
glog.Error("Error closing labelNameToLabelValues index DB: ", err)
log.Error("Error closing labelNameToLabelValues index DB: ", err)
}
if lastError == nil && !p.isDirty() {
dirtyFileRemoveError = os.Remove(p.dirtyFileName)
}
if err := p.fLock.Release(); err != nil {
lastError = err
glog.Error("Error releasing file lock: ", err)
log.Error("Error releasing file lock: ", err)
}
if dirtyFileRemoveError != nil {
// On Windows, removing the dirty file before unlocking is not
@ -1266,11 +1266,11 @@ func (p *persistence) openChunkFileForWriting(fp clientmodel.Fingerprint) (*os.F
func (p *persistence) closeChunkFile(f *os.File) {
if p.shouldSync() {
if err := f.Sync(); err != nil {
glog.Error("Error syncing file:", err)
log.Error("Error syncing file:", err)
}
}
if err := f.Close(); err != nil {
glog.Error("Error closing chunk file:", err)
log.Error("Error closing chunk file:", err)
}
}
@ -1310,10 +1310,10 @@ func (p *persistence) processIndexingQueue() {
}(time.Now())
if err := p.labelPairToFingerprints.IndexBatch(pairToFPs); err != nil {
glog.Error("Error indexing label pair to fingerprints batch: ", err)
log.Error("Error indexing label pair to fingerprints batch: ", err)
}
if err := p.labelNameToLabelValues.IndexBatch(nameToValues); err != nil {
glog.Error("Error indexing label name to label values batch: ", err)
log.Error("Error indexing label name to label values batch: ", err)
}
batchSize = 0
nameToValues = index.LabelNameLabelValuesMapping{}
@ -1362,7 +1362,7 @@ loop:
var err error
baseFPs, _, err = p.labelPairToFingerprints.LookupSet(lp)
if err != nil {
glog.Errorf("Error looking up label pair %v: %s", lp, err)
log.Errorf("Error looking up label pair %v: %s", lp, err)
continue
}
pairToFPs[lp] = baseFPs
@ -1372,7 +1372,7 @@ loop:
var err error
baseValues, _, err = p.labelNameToLabelValues.LookupSet(ln)
if err != nil {
glog.Errorf("Error looking up label name %v: %s", ln, err)
log.Errorf("Error looking up label name %v: %s", ln, err)
continue
}
nameToValues[ln] = baseValues
@ -1422,7 +1422,7 @@ loop:
// (4.3.2) The unique metric string.
// (4.3.3) The mapped fingerprint as big-endian uint64.
func (p *persistence) checkpointFPMappings(fpm fpMappings) (err error) {
glog.Info("Checkpointing fingerprint mappings...")
log.Info("Checkpointing fingerprint mappings...")
begin := time.Now()
f, err := os.OpenFile(p.mappingsTempFileName(), os.O_WRONLY|os.O_TRUNC|os.O_CREATE, 0640)
if err != nil {
@ -1441,7 +1441,7 @@ func (p *persistence) checkpointFPMappings(fpm fpMappings) (err error) {
}
err = os.Rename(p.mappingsTempFileName(), p.mappingsFileName())
duration := time.Since(begin)
glog.Infof("Done checkpointing fingerprint mappings in %v.", duration)
log.Infof("Done checkpointing fingerprint mappings in %v.", duration)
}()
w := bufio.NewWriterSize(f, fileBufSize)

View file

@ -19,8 +19,8 @@ import (
"sync/atomic"
"time"
"github.com/golang/glog"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/log"
clientmodel "github.com/prometheus/client_golang/model"
@ -212,12 +212,12 @@ func (s *memorySeriesStorage) Start() error {
}
s.persistence = p
glog.Info("Loading series map and head chunks...")
log.Info("Loading series map and head chunks...")
s.fpToSeries, s.numChunksToPersist, err = p.loadSeriesMapAndHeads()
if err != nil {
return err
}
glog.Infof("%d series loaded.", s.fpToSeries.length())
log.Infof("%d series loaded.", s.fpToSeries.length())
s.numSeries.Set(float64(s.fpToSeries.length()))
mapper, err := newFPMapper(s.fpToSeries, p)
@ -236,13 +236,13 @@ func (s *memorySeriesStorage) Start() error {
// Stop implements Storage.
func (s *memorySeriesStorage) Stop() error {
glog.Info("Stopping local storage...")
log.Info("Stopping local storage...")
glog.Info("Stopping maintenance loop...")
log.Info("Stopping maintenance loop...")
close(s.loopStopping)
<-s.loopStopped
glog.Info("Stopping chunk eviction...")
log.Info("Stopping chunk eviction...")
close(s.evictStopping)
<-s.evictStopped
@ -254,7 +254,7 @@ func (s *memorySeriesStorage) Stop() error {
if err := s.persistence.close(); err != nil {
return err
}
glog.Info("Local storage stopped.")
log.Info("Local storage stopped.")
return nil
}
@ -304,7 +304,7 @@ func (s *memorySeriesStorage) GetFingerprintsForLabelMatchers(labelMatchers metr
},
)
if err != nil {
glog.Error("Error getting fingerprints for label pair: ", err)
log.Error("Error getting fingerprints for label pair: ", err)
}
if len(fps) == 0 {
return nil
@ -317,7 +317,7 @@ func (s *memorySeriesStorage) GetFingerprintsForLabelMatchers(labelMatchers metr
default:
values, err := s.persistence.getLabelValuesForLabelName(matcher.Name)
if err != nil {
glog.Errorf("Error getting label values for label name %q: %v", matcher.Name, err)
log.Errorf("Error getting label values for label name %q: %v", matcher.Name, err)
}
matches := matcher.Filter(values)
if len(matches) == 0 {
@ -331,7 +331,7 @@ func (s *memorySeriesStorage) GetFingerprintsForLabelMatchers(labelMatchers metr
},
)
if err != nil {
glog.Error("Error getting fingerprints for label pair: ", err)
log.Error("Error getting fingerprints for label pair: ", err)
}
for _, fp := range fps {
if _, ok := result[fp]; ok || result == nil {
@ -357,7 +357,7 @@ func (s *memorySeriesStorage) GetFingerprintsForLabelMatchers(labelMatchers metr
func (s *memorySeriesStorage) GetLabelValuesForLabelName(labelName clientmodel.LabelName) clientmodel.LabelValues {
lvs, err := s.persistence.getLabelValuesForLabelName(labelName)
if err != nil {
glog.Errorf("Error getting label values for label name %q: %v", labelName, err)
log.Errorf("Error getting label values for label name %q: %v", labelName, err)
}
return lvs
}
@ -377,7 +377,7 @@ func (s *memorySeriesStorage) GetMetricForFingerprint(fp clientmodel.Fingerprint
}
metric, err := s.persistence.getArchivedMetric(fp)
if err != nil {
glog.Errorf("Error retrieving archived metric for fingerprint %v: %v", fp, err)
log.Errorf("Error retrieving archived metric for fingerprint %v: %v", fp, err)
}
return clientmodel.COWMetric{
Metric: metric,
@ -387,20 +387,20 @@ func (s *memorySeriesStorage) GetMetricForFingerprint(fp clientmodel.Fingerprint
// Append implements Storage.
func (s *memorySeriesStorage) Append(sample *clientmodel.Sample) {
if s.getNumChunksToPersist() >= s.maxChunksToPersist {
glog.Warningf(
log.Warnf(
"%d chunks waiting for persistence, sample ingestion suspended.",
s.getNumChunksToPersist(),
)
for s.getNumChunksToPersist() >= s.maxChunksToPersist {
time.Sleep(time.Second)
}
glog.Warning("Sample ingestion resumed.")
log.Warn("Sample ingestion resumed.")
}
rawFP := sample.Metric.FastFingerprint()
s.fpLocker.Lock(rawFP)
fp, err := s.mapper.mapFP(rawFP, sample.Metric)
if err != nil {
glog.Errorf("Error while mapping fingerprint %v: %v", rawFP, err)
log.Errorf("Error while mapping fingerprint %v: %v", rawFP, err)
s.persistence.setDirty(true)
}
if fp != rawFP {
@ -423,7 +423,7 @@ func (s *memorySeriesStorage) getOrCreateSeries(fp clientmodel.Fingerprint, m cl
if !ok {
unarchived, firstTime, err := s.persistence.unarchiveMetric(fp)
if err != nil {
glog.Errorf("Error unarchiving fingerprint %v: %v", fp, err)
log.Errorf("Error unarchiving fingerprint %v: %v", fp, err)
}
if unarchived {
s.seriesOps.WithLabelValues(unarchive).Inc()
@ -507,7 +507,7 @@ func (s *memorySeriesStorage) handleEvictList() {
}
}()
ticker.Stop()
glog.Info("Chunk eviction stopped.")
log.Info("Chunk eviction stopped.")
close(s.evictStopped)
return
}
@ -627,7 +627,7 @@ func (s *memorySeriesStorage) cycleThroughMemoryFingerprints() chan clientmodel.
count++
}
if count > 0 {
glog.Infof(
log.Infof(
"Completed maintenance sweep through %d in-memory fingerprints in %v.",
count, time.Since(begin),
)
@ -651,7 +651,7 @@ func (s *memorySeriesStorage) cycleThroughArchivedFingerprints() chan clientmode
clientmodel.TimestampFromTime(time.Now()).Add(-s.dropAfter),
)
if err != nil {
glog.Error("Failed to lookup archived fingerprint ranges: ", err)
log.Error("Failed to lookup archived fingerprint ranges: ", err)
s.waitForNextFP(0, 1)
continue
}
@ -670,7 +670,7 @@ func (s *memorySeriesStorage) cycleThroughArchivedFingerprints() chan clientmode
s.waitForNextFP(len(archivedFPs), 1)
}
if len(archivedFPs) > 0 {
glog.Infof(
log.Infof(
"Completed maintenance sweep through %d archived fingerprints in %v.",
len(archivedFPs), time.Since(begin),
)
@ -687,7 +687,7 @@ func (s *memorySeriesStorage) loop() {
defer func() {
checkpointTimer.Stop()
glog.Info("Maintenance loop stopped.")
log.Info("Maintenance loop stopped.")
close(s.loopStopped)
}()
@ -807,7 +807,7 @@ func (s *memorySeriesStorage) maintainMemorySeries(
if len(series.chunkDescs) == 0 {
cds, err := s.loadChunkDescs(fp, clientmodel.Latest)
if err != nil {
glog.Errorf(
log.Errorf(
"Could not load chunk descriptors prior to archiving metric %v, metric will not be archived: %v",
series.metric, err,
)
@ -818,7 +818,7 @@ func (s *memorySeriesStorage) maintainMemorySeries(
if err := s.persistence.archiveMetric(
fp, series.metric, series.firstTime(), series.head().lastTime(),
); err != nil {
glog.Errorf("Error archiving metric %v: %v", series.metric, err)
log.Errorf("Error archiving metric %v: %v", series.metric, err)
return
}
s.seriesOps.WithLabelValues(archive).Inc()
@ -899,7 +899,7 @@ func (s *memorySeriesStorage) writeMemorySeries(
} else {
series.chunkDescsOffset -= numDroppedFromPersistence
if series.chunkDescsOffset < 0 {
glog.Errorf("Dropped more chunks from persistence than from memory for fingerprint %v, series %v.", fp, series)
log.Errorf("Dropped more chunks from persistence than from memory for fingerprint %v, series %v.", fp, series)
s.persistence.setDirty(true)
series.chunkDescsOffset = -1 // Makes sure it will be looked at during crash recovery.
}
@ -921,7 +921,7 @@ func (s *memorySeriesStorage) maintainArchivedSeries(fp clientmodel.Fingerprint,
has, firstTime, lastTime, err := s.persistence.hasArchivedMetric(fp)
if err != nil {
glog.Error("Error looking up archived time range: ", err)
log.Error("Error looking up archived time range: ", err)
return
}
if !has || !firstTime.Before(beforeTime) {
@ -933,11 +933,11 @@ func (s *memorySeriesStorage) maintainArchivedSeries(fp clientmodel.Fingerprint,
newFirstTime, _, _, allDropped, err := s.persistence.dropAndPersistChunks(fp, beforeTime, nil)
if err != nil {
glog.Error("Error dropping persisted chunks: ", err)
log.Error("Error dropping persisted chunks: ", err)
}
if allDropped {
if err := s.persistence.purgeArchivedMetric(fp); err != nil {
glog.Errorf("Error purging archived metric for fingerprint %v: %v", fp, err)
log.Errorf("Error purging archived metric for fingerprint %v: %v", fp, err)
return
}
s.seriesOps.WithLabelValues(archivePurge).Inc()
@ -976,9 +976,9 @@ func (s *memorySeriesStorage) incNumChunksToPersist(by int) {
func (s *memorySeriesStorage) isDegraded() bool {
nowDegraded := s.getNumChunksToPersist() > s.maxChunksToPersist*percentChunksToPersistForDegradation/100
if s.degraded && !nowDegraded {
glog.Warning("Storage has left graceful degradation mode. Things are back to normal.")
log.Warn("Storage has left graceful degradation mode. Things are back to normal.")
} else if !s.degraded && nowDegraded {
glog.Warningf(
log.Warnf(
"%d chunks waiting for persistence (%d%% of the allowed maximum %d). Storage is now in graceful degradation mode. Series files are not synced anymore if following the adaptive strategy. Checkpoints are not performed more often than every %v. Series maintenance happens as frequently as possible.",
s.getNumChunksToPersist(),
s.getNumChunksToPersist()*100/s.maxChunksToPersist,

View file

@ -20,7 +20,7 @@ import (
"testing/quick"
"time"
"github.com/golang/glog"
"github.com/prometheus/log"
clientmodel "github.com/prometheus/client_golang/model"
@ -223,7 +223,7 @@ func testChunk(t *testing.T, encoding chunkEncoding) {
}
s.fpLocker.Unlock(m.fp)
}
glog.Info("test done, closing")
log.Info("test done, closing")
}
func TestChunkType0(t *testing.T) {

View file

@ -24,7 +24,7 @@ import (
"net/url"
"time"
"github.com/golang/glog"
"github.com/prometheus/log"
clientmodel "github.com/prometheus/client_golang/model"
@ -97,7 +97,7 @@ func (c *Client) Store(samples clientmodel.Samples) error {
if math.IsNaN(v) || math.IsInf(v, 0) {
// TODO(julius): figure out if it's possible to insert special float
// values into InfluxDB somehow.
glog.Warningf("cannot send value %f to InfluxDB, skipping sample %#v", v, s)
log.Warnf("cannot send value %f to InfluxDB, skipping sample %#v", v, s)
continue
}
metric := s.Metric[clientmodel.MetricNameLabel]

View file

@ -24,7 +24,7 @@ import (
"regexp"
"time"
"github.com/golang/glog"
"github.com/prometheus/log"
clientmodel "github.com/prometheus/client_golang/model"
@ -81,7 +81,7 @@ func (c *Client) Store(samples clientmodel.Samples) error {
for _, s := range samples {
v := float64(s.Value)
if math.IsNaN(v) || math.IsInf(v, 0) {
glog.Warningf("cannot send value %f to OpenTSDB, skipping sample %#v", v, s)
log.Warnf("cannot send value %f to OpenTSDB, skipping sample %#v", v, s)
continue
}
metric := TagValue(s.Metric[clientmodel.MetricNameLabel])

View file

@ -16,10 +16,10 @@ package remote
import (
"time"
"github.com/golang/glog"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/log"
clientmodel "github.com/prometheus/client_golang/model"
"github.com/prometheus/client_golang/prometheus"
)
const (
@ -132,20 +132,20 @@ func (t *StorageQueueManager) Append(s *clientmodel.Sample) {
case t.queue <- s:
default:
t.samplesCount.WithLabelValues(dropped).Inc()
glog.Warning("Remote storage queue full, discarding sample.")
log.Warn("Remote storage queue full, discarding sample.")
}
}
// Stop stops sending samples to the remote storage and waits for pending
// sends to complete.
func (t *StorageQueueManager) Stop() {
glog.Infof("Stopping remote storage...")
log.Infof("Stopping remote storage...")
close(t.queue)
<-t.drained
for i := 0; i < maxConcurrentSends; i++ {
t.sendSemaphore <- true
}
glog.Info("Remote storage stopped.")
log.Info("Remote storage stopped.")
}
// Describe implements prometheus.Collector.
@ -180,7 +180,7 @@ func (t *StorageQueueManager) sendSamples(s clientmodel.Samples) {
labelValue := success
if err != nil {
glog.Warningf("error sending %d samples to remote storage: %s", len(s), err)
log.Warnf("error sending %d samples to remote storage: %s", len(s), err)
labelValue = failure
t.sendErrors.Inc()
}
@ -201,9 +201,9 @@ func (t *StorageQueueManager) Run() {
select {
case s, ok := <-t.queue:
if !ok {
glog.Infof("Flushing %d samples to remote storage...", len(t.pendingSamples))
log.Infof("Flushing %d samples to remote storage...", len(t.pendingSamples))
t.flush()
glog.Infof("Done flushing.")
log.Infof("Done flushing.")
return
}

View file

@ -22,7 +22,7 @@ import (
"strconv"
"time"
"github.com/golang/glog"
"github.com/prometheus/log"
clientmodel "github.com/prometheus/client_golang/model"
@ -86,7 +86,7 @@ func (serv MetricsService) Query(w http.ResponseWriter, r *http.Request) {
httpJSONError(w, res.Err, http.StatusOK)
return
}
glog.V(1).Infof("Instant query: %s\nQuery stats:\n%s\n", expr, query.Stats())
log.Debugf("Instant query: %s\nQuery stats:\n%s\n", expr, query.Stats())
httputils.RespondJSON(w, res.Value)
}
@ -147,7 +147,7 @@ func (serv MetricsService) QueryRange(w http.ResponseWriter, r *http.Request) {
return
}
glog.V(1).Infof("Range query: %s\nQuery stats:\n%s\n", expr, query.Stats())
log.Debugf("Range query: %s\nQuery stats:\n%s\n", expr, query.Stats())
httputils.RespondJSON(w, matrix)
}
@ -160,7 +160,7 @@ func (serv MetricsService) Metrics(w http.ResponseWriter, r *http.Request) {
sort.Sort(metricNames)
resultBytes, err := json.Marshal(metricNames)
if err != nil {
glog.Error("Error marshalling metric names: ", err)
log.Error("Error marshalling metric names: ", err)
httpJSONError(w, fmt.Errorf("Error marshalling metric names: %s", err), http.StatusInternalServerError)
return
}

View file

@ -8,7 +8,7 @@ import (
"net/http"
"strings"
"github.com/golang/glog"
"github.com/prometheus/log"
)
// Sub-directories for templates and static content.
@ -54,7 +54,7 @@ func (h Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
file, err := GetFile(StaticFiles, name)
if err != nil {
if err != io.EOF {
glog.Warning("Could not get file: ", err)
log.Warn("Could not get file: ", err)
}
w.WriteHeader(http.StatusNotFound)
return

View file

@ -26,8 +26,8 @@ import (
pprof_runtime "runtime/pprof"
"github.com/golang/glog"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/log"
clientmodel "github.com/prometheus/client_golang/model"
@ -108,13 +108,13 @@ func (ws WebService) ServeForever(pathPrefix string) {
}))
}
glog.Infof("Listening on %s", *listenAddress)
log.Infof("Listening on %s", *listenAddress)
// If we cannot bind to a port, retry after 30 seconds.
for {
err := http.ListenAndServe(*listenAddress, nil)
if err != nil {
glog.Errorf("Could not listen on %s: %s", *listenAddress, err)
log.Errorf("Could not listen on %s: %s", *listenAddress, err)
}
time.Sleep(30 * time.Second)
}
@ -136,14 +136,14 @@ func getTemplateFile(name string) (string, error) {
if *useLocalAssets {
file, err := ioutil.ReadFile(fmt.Sprintf("web/templates/%s.html", name))
if err != nil {
glog.Errorf("Could not read %s template: %s", name, err)
log.Errorf("Could not read %s template: %s", name, err)
return "", err
}
return string(file), nil
}
file, err := blob.GetFile(blob.TemplateFiles, name+".html")
if err != nil {
glog.Errorf("Could not read %s template: %s", name, err)
log.Errorf("Could not read %s template: %s", name, err)
return "", err
}
return string(file), nil
@ -178,7 +178,7 @@ func getTemplate(name string, pathPrefix string) (*template.Template, error) {
"globalURL": func(url string) string {
hostname, err := os.Hostname()
if err != nil {
glog.Warningf("Couldn't get hostname: %s, returning target.URL()", err)
log.Warnf("Couldn't get hostname: %s, returning target.URL()", err)
return url
}
for _, localhostRepresentation := range localhostRepresentations {
@ -190,22 +190,22 @@ func getTemplate(name string, pathPrefix string) (*template.Template, error) {
file, err := getTemplateFile("_base")
if err != nil {
glog.Errorln("Could not read base template:", err)
log.Errorln("Could not read base template:", err)
return nil, err
}
t, err = t.Parse(file)
if err != nil {
glog.Errorln("Could not parse base template:", err)
log.Errorln("Could not parse base template:", err)
}
file, err = getTemplateFile(name)
if err != nil {
glog.Error("Could not read template %s: %s", name, err)
log.Error("Could not read template %s: %s", name, err)
return nil, err
}
t, err = t.Parse(file)
if err != nil {
glog.Errorf("Could not parse template %s: %s", name, err)
log.Errorf("Could not parse template %s: %s", name, err)
}
return t, err
}
@ -213,12 +213,12 @@ func getTemplate(name string, pathPrefix string) (*template.Template, error) {
func executeTemplate(w http.ResponseWriter, name string, data interface{}, pathPrefix string) {
tpl, err := getTemplate(name, pathPrefix)
if err != nil {
glog.Error("Error preparing layout template: ", err)
log.Error("Error preparing layout template: ", err)
return
}
err = tpl.Execute(w, data)
if err != nil {
glog.Error("Error executing template: ", err)
log.Error("Error executing template: ", err)
}
}
@ -226,7 +226,7 @@ func dumpHeap(w http.ResponseWriter, r *http.Request) {
target := fmt.Sprintf("/tmp/%d.heap", time.Now().Unix())
f, err := os.Create(target)
if err != nil {
glog.Error("Could not dump heap: ", err)
log.Error("Could not dump heap: ", err)
}
fmt.Fprintf(w, "Writing to %s...", target)
defer f.Close()