Merge branch 'master' into dev-2.0

This commit is contained in:
Fabian Reinartz 2017-05-24 15:36:17 +02:00
commit 4c31061251
28 changed files with 986 additions and 299 deletions

View file

@ -1,12 +1,18 @@
## 1.6.2 / 2017-05-11 ## v1.6.3 / 2017-05-18
* [BUGFIX] Fix disappearing Alertmanger targets in Alertmanager discovery.
* [BUGFIX] Fix panic with remote_write on ARMv7.
* [BUGFIX] Fix stacked graphs to adapt min/max values.
## v1.6.2 / 2017-05-11
* [BUGFIX] Fix potential memory leak in Kubernetes service discovery * [BUGFIX] Fix potential memory leak in Kubernetes service discovery
## 1.6.1 / 2017-04-19 ## v1.6.1 / 2017-04-19
* [BUGFIX] Don't panic if storage has no FPs even after initial wait * [BUGFIX] Don't panic if storage has no FPs even after initial wait
## 1.6.0 / 2017-04-14 ## v1.6.0 / 2017-04-14
* [CHANGE] Replaced the remote write implementations for various backends by a * [CHANGE] Replaced the remote write implementations for various backends by a
generic write interface with example adapter implementation for various generic write interface with example adapter implementation for various
@ -71,11 +77,11 @@
* [BUGFIX] Fix deadlock in Zookeeper SD. * [BUGFIX] Fix deadlock in Zookeeper SD.
* [BUGFIX] Fix fuzzy search problems in the web-UI auto-completion. * [BUGFIX] Fix fuzzy search problems in the web-UI auto-completion.
## 1.5.3 / 2017-05-11 ## v1.5.3 / 2017-05-11
* [BUGFIX] Fix potential memory leak in Kubernetes service discovery * [BUGFIX] Fix potential memory leak in Kubernetes service discovery
## 1.5.2 / 2017-02-10 ## v1.5.2 / 2017-02-10
* [BUGFIX] Fix series corruption in a special case of series maintenance where * [BUGFIX] Fix series corruption in a special case of series maintenance where
the minimum series-file-shrink-ratio kicks in. the minimum series-file-shrink-ratio kicks in.
@ -83,14 +89,14 @@
scheduled to be quarantined. scheduled to be quarantined.
* [ENHANCEMENT] Binaries built with Go1.7.5. * [ENHANCEMENT] Binaries built with Go1.7.5.
## 1.5.1 / 2017-02-07 ## v1.5.1 / 2017-02-07
* [BUGFIX] Don't lose fully persisted memory series during checkpointing. * [BUGFIX] Don't lose fully persisted memory series during checkpointing.
* [BUGFIX] Fix intermittently failing relabeling. * [BUGFIX] Fix intermittently failing relabeling.
* [BUGFIX] Make `-storage.local.series-file-shrink-ratio` work. * [BUGFIX] Make `-storage.local.series-file-shrink-ratio` work.
* [BUGFIX] Remove race condition from TestLoop. * [BUGFIX] Remove race condition from TestLoop.
## 1.5.0 / 2017-01-23 ## v1.5.0 / 2017-01-23
* [CHANGE] Use lexicographic order to sort alerts by name. * [CHANGE] Use lexicographic order to sort alerts by name.
* [FEATURE] Add Joyent Triton discovery. * [FEATURE] Add Joyent Triton discovery.
@ -107,11 +113,11 @@
* [BUGFIX] Ignore dotfiles in data directory. * [BUGFIX] Ignore dotfiles in data directory.
* [BUGFIX] Abort on intermediate federation errors. * [BUGFIX] Abort on intermediate federation errors.
## 1.4.1 / 2016-11-28 ## v1.4.1 / 2016-11-28
* [BUGFIX] Fix Consul service discovery * [BUGFIX] Fix Consul service discovery
## 1.4.0 / 2016-11-25 ## v1.4.0 / 2016-11-25
* [FEATURE] Allow configuring Alertmanagers via service discovery * [FEATURE] Allow configuring Alertmanagers via service discovery
* [FEATURE] Display used Alertmanagers on runtime page in the UI * [FEATURE] Display used Alertmanagers on runtime page in the UI
@ -124,27 +130,16 @@
* [BUGFIX] Use proper float64 modulo in PromQL `%` binary operations * [BUGFIX] Use proper float64 modulo in PromQL `%` binary operations
* [BUGFIX] Fix crash bug in Kubernetes service discovery * [BUGFIX] Fix crash bug in Kubernetes service discovery
## 1.3.1 / 2016-11-04 ## v1.3.1 / 2016-11-04
This bug-fix release pulls in the fixes from the 1.2.3 release. This bug-fix release pulls in the fixes from the v1.2.3 release.
* [BUGFIX] Correctly handle empty Regex entry in relabel config. * [BUGFIX] Correctly handle empty Regex entry in relabel config.
* [BUGFIX] MOD (`%`) operator doesn't panic with small floating point numbers. * [BUGFIX] MOD (`%`) operator doesn't panic with small floating point numbers.
* [BUGFIX] Updated miekg/dns vendoring to pick up upstream bug fixes. * [BUGFIX] Updated miekg/dns vendoring to pick up upstream bug fixes.
* [ENHANCEMENT] Improved DNS error reporting. * [ENHANCEMENT] Improved DNS error reporting.
## 1.2.3 / 2016-11-04 ## v1.3.0 / 2016-11-01
Note that this release is chronologically after 1.3.0.
* [BUGFIX] Correctly handle end time before start time in range queries.
* [BUGFIX] Error on negative `-storage.staleness-delta`
* [BUGFIX] Correctly handle empty Regex entry in relabel config.
* [BUGFIX] MOD (`%`) operator doesn't panic with small floating point numbers.
* [BUGFIX] Updated miekg/dns vendoring to pick up upstream bug fixes.
* [ENHANCEMENT] Improved DNS error reporting.
## 1.3.0 / 2016-11-01
This is a breaking change to the Kubernetes service discovery. This is a breaking change to the Kubernetes service discovery.
@ -158,7 +153,16 @@ This is a breaking change to the Kubernetes service discovery.
* [BUGFIX] Validate query end time is not before start time. * [BUGFIX] Validate query end time is not before start time.
* [BUGFIX] Error on negative `-storage.staleness-delta` * [BUGFIX] Error on negative `-storage.staleness-delta`
## 1.2.2 / 2016-10-30 ## v1.2.3 / 2016-11-04
* [BUGFIX] Correctly handle end time before start time in range queries.
* [BUGFIX] Error on negative `-storage.staleness-delta`
* [BUGFIX] Correctly handle empty Regex entry in relabel config.
* [BUGFIX] MOD (`%`) operator doesn't panic with small floating point numbers.
* [BUGFIX] Updated miekg/dns vendoring to pick up upstream bug fixes.
* [ENHANCEMENT] Improved DNS error reporting.
## v1.2.2 / 2016-10-30
* [BUGFIX] Correctly handle on() in alerts. * [BUGFIX] Correctly handle on() in alerts.
* [BUGFIX] UI: Deal properly with aborted requests. * [BUGFIX] UI: Deal properly with aborted requests.
@ -167,13 +171,13 @@ This is a breaking change to the Kubernetes service discovery.
* [BUGFIX] Remote storage: Re-add accidentally removed timeout flag. * [BUGFIX] Remote storage: Re-add accidentally removed timeout flag.
* [BUGFIX] Updated a number of vendored packages to pick up upstream bug fixes. * [BUGFIX] Updated a number of vendored packages to pick up upstream bug fixes.
## 1.2.1 / 2016-10-10 ## v1.2.1 / 2016-10-10
* [BUGFIX] Count chunk evictions properly so that the server doesn't * [BUGFIX] Count chunk evictions properly so that the server doesn't
assume it runs out of memory and subsequencly throttles ingestion. assume it runs out of memory and subsequencly throttles ingestion.
* [BUGFIX] Use Go1.7.1 for prebuilt binaries to fix issues on MacOS Sierra. * [BUGFIX] Use Go1.7.1 for prebuilt binaries to fix issues on MacOS Sierra.
## 1.2.0 / 2016-10-07 ## v1.2.0 / 2016-10-07
* [FEATURE] Cleaner encoding of query parameters in `/graph` URLs. * [FEATURE] Cleaner encoding of query parameters in `/graph` URLs.
* [FEATURE] PromQL: Add `minute()` function. * [FEATURE] PromQL: Add `minute()` function.
@ -196,22 +200,22 @@ This is a breaking change to the Kubernetes service discovery.
* [FEATURE] **Experimental** remote write path: Add HTTP basic auth and TLS. * [FEATURE] **Experimental** remote write path: Add HTTP basic auth and TLS.
* [FEATURE] **Experimental** remote write path: Support for relabelling. * [FEATURE] **Experimental** remote write path: Support for relabelling.
## 1.1.3 / 2016-09-16 ## v1.1.3 / 2016-09-16
* [ENHANCEMENT] Use golang-builder base image for tests in CircleCI. * [ENHANCEMENT] Use golang-builder base image for tests in CircleCI.
* [ENHANCEMENT] Added unit tests for federation. * [ENHANCEMENT] Added unit tests for federation.
* [BUGFIX] Correctly de-dup metric families in federation output. * [BUGFIX] Correctly de-dup metric families in federation output.
## 1.1.2 / 2016-09-08 ## v1.1.2 / 2016-09-08
* [BUGFIX] Allow label names that coincide with keywords. * [BUGFIX] Allow label names that coincide with keywords.
## 1.1.1 / 2016-09-07 ## v1.1.1 / 2016-09-07
* [BUGFIX] Fix IPv6 escaping in service discovery integrations * [BUGFIX] Fix IPv6 escaping in service discovery integrations
* [BUGFIX] Fix default scrape port assignment for IPv6 * [BUGFIX] Fix default scrape port assignment for IPv6
## 1.1.0 / 2016-09-03 ## v1.1.0 / 2016-09-03
* [FEATURE] Add `quantile` and `quantile_over_time`. * [FEATURE] Add `quantile` and `quantile_over_time`.
* [FEATURE] Add `stddev_over_time` and `stdvar_over_time`. * [FEATURE] Add `stddev_over_time` and `stdvar_over_time`.
@ -241,17 +245,17 @@ This is a breaking change to the Kubernetes service discovery.
* [BUGFIX] Fix rule HTML escaping issues. * [BUGFIX] Fix rule HTML escaping issues.
* [BUGFIX] Remove internal labels from alerts sent to AM. * [BUGFIX] Remove internal labels from alerts sent to AM.
## 1.0.2 / 2016-08-24 ## v1.0.2 / 2016-08-24
* [BUGFIX] Clean up old targets after config reload. * [BUGFIX] Clean up old targets after config reload.
## 1.0.1 / 2016-07-21 ## v1.0.1 / 2016-07-21
* [BUGFIX] Exit with error on non-flag command-line arguments. * [BUGFIX] Exit with error on non-flag command-line arguments.
* [BUGFIX] Update example console templates to new HTTP API. * [BUGFIX] Update example console templates to new HTTP API.
* [BUGFIX] Re-add logging flags. * [BUGFIX] Re-add logging flags.
## 1.0.0 / 2016-07-18 ## v1.0.0 / 2016-07-18
* [CHANGE] Remove deprecated query language keywords * [CHANGE] Remove deprecated query language keywords
* [CHANGE] Change Kubernetes SD to require specifying Kubernetes role * [CHANGE] Change Kubernetes SD to require specifying Kubernetes role
@ -270,7 +274,7 @@ This is a breaking change to the Kubernetes service discovery.
* [BUGFIX] Fix edge case handling in crash recovery * [BUGFIX] Fix edge case handling in crash recovery
* [BUGFIX] Hide testing package flags from help output * [BUGFIX] Hide testing package flags from help output
## 0.20.0 / 2016-06-15 ## v0.20.0 / 2016-06-15
This release contains multiple breaking changes to the configuration schema. This release contains multiple breaking changes to the configuration schema.
@ -288,23 +292,23 @@ This release contains multiple breaking changes to the configuration schema.
* [CHANGE] Rename `names` to `files` in file SD configuration * [CHANGE] Rename `names` to `files` in file SD configuration
* [CHANGE] Remove kubelet port config option in Kubernetes SD configuration * [CHANGE] Remove kubelet port config option in Kubernetes SD configuration
## 0.19.3 / 2016-06-14 ## v0.19.3 / 2016-06-14
* [BUGFIX] Handle Marathon apps with zero ports * [BUGFIX] Handle Marathon apps with zero ports
* [BUGFIX] Fix startup panic in retrieval layer * [BUGFIX] Fix startup panic in retrieval layer
## 0.19.2 / 2016-05-29 ## v0.19.2 / 2016-05-29
* [BUGFIX] Correctly handle `GROUP_LEFT` and `GROUP_RIGHT` without labels in * [BUGFIX] Correctly handle `GROUP_LEFT` and `GROUP_RIGHT` without labels in
string representation of expressions and in rules. string representation of expressions and in rules.
* [BUGFIX] Use `-web.external-url` for new status endpoints. * [BUGFIX] Use `-web.external-url` for new status endpoints.
## 0.19.1 / 2016-05-25 ## v0.19.1 / 2016-05-25
* [BUGFIX] Handle service discovery panic affecting Kubernetes SD * [BUGFIX] Handle service discovery panic affecting Kubernetes SD
* [BUGFIX] Fix web UI display issue in some browsers * [BUGFIX] Fix web UI display issue in some browsers
## 0.19.0 / 2016-05-24 ## v0.19.0 / 2016-05-24
This version contains a breaking change to the query language. Please read This version contains a breaking change to the query language. Please read
the documentation on the grouping behavior of vector matching: the documentation on the grouping behavior of vector matching:
@ -319,7 +323,7 @@ https://prometheus.io/docs/querying/operators/#vector-matching
* [ENHANCEMENT] Partition status page into individual pages * [ENHANCEMENT] Partition status page into individual pages
* [BUGFIX] Fix issue of hanging target scrapes * [BUGFIX] Fix issue of hanging target scrapes
## 0.18.0 / 2016-04-18 ## v0.18.0 / 2016-04-18
* [BUGFIX] Fix operator precedence in PromQL * [BUGFIX] Fix operator precedence in PromQL
* [BUGFIX] Never drop still open head chunk * [BUGFIX] Never drop still open head chunk
@ -339,16 +343,16 @@ https://prometheus.io/docs/querying/operators/#vector-matching
* [ENHANCEMENT] Instrument retrieval layer * [ENHANCEMENT] Instrument retrieval layer
* [ENHANCEMENT] Add Go version to `prometheus_build_info` metric * [ENHANCEMENT] Add Go version to `prometheus_build_info` metric
## 0.17.0 / 2016-03-02 ## v0.17.0 / 2016-03-02
This version no longer works with Alertmanager 0.0.4 and earlier! This version no longer works with Alertmanager v0.0.4 and earlier!
The alerting rule syntax has changed as well but the old syntax is supported The alerting rule syntax has changed as well but the old syntax is supported
up until version 0.18. up until version v0.18.
All regular expressions in PromQL are anchored now, matching the behavior of All regular expressions in PromQL are anchored now, matching the behavior of
regular expressions in config files. regular expressions in config files.
* [CHANGE] Integrate with Alertmanager 0.1.0 and higher * [CHANGE] Integrate with Alertmanager v0.1.0 and higher
* [CHANGE] Degraded storage mode renamed to rushed mode * [CHANGE] Degraded storage mode renamed to rushed mode
* [CHANGE] New alerting rule syntax * [CHANGE] New alerting rule syntax
* [CHANGE] Add label validation on ingestion * [CHANGE] Add label validation on ingestion
@ -369,7 +373,7 @@ regular expressions in config files.
* [BUGFIX] Properly handle creation of target with bad TLS config * [BUGFIX] Properly handle creation of target with bad TLS config
* [BUGFIX] Fix of checkpoint timing issue * [BUGFIX] Fix of checkpoint timing issue
## 0.16.2 / 2016-01-18 ## v0.16.2 / 2016-01-18
* [FEATURE] Multiple authentication options for EC2 discovery added * [FEATURE] Multiple authentication options for EC2 discovery added
* [FEATURE] Several meta labels for EC2 discovery added * [FEATURE] Several meta labels for EC2 discovery added
@ -400,14 +404,14 @@ regular expressions in config files.
Some changes to the Kubernetes service discovery were integration since Some changes to the Kubernetes service discovery were integration since
it was released as a beta feature. it was released as a beta feature.
## 0.16.1 / 2015-10-16 ## v0.16.1 / 2015-10-16
* [FEATURE] Add `irate()` function. * [FEATURE] Add `irate()` function.
* [ENHANCEMENT] Improved auto-completion in expression browser. * [ENHANCEMENT] Improved auto-completion in expression browser.
* [CHANGE] Kubernetes SD moves node label to instance label. * [CHANGE] Kubernetes SD moves node label to instance label.
* [BUGFIX] Escape regexes in console templates. * [BUGFIX] Escape regexes in console templates.
## 0.16.0 / 2015-10-09 ## v0.16.0 / 2015-10-09
BREAKING CHANGES: BREAKING CHANGES:
@ -570,13 +574,14 @@ All changes:
the same series upon append. the same series upon append.
* [CLEANUP] Resolve relative paths during configuration loading. * [CLEANUP] Resolve relative paths during configuration loading.
## 0.15.1 / 2015-07-27 ## v0.15.1 / 2015-07-27
* [BUGFIX] Fix vector matching behavior when there is a mix of equality and * [BUGFIX] Fix vector matching behavior when there is a mix of equality and
non-equality matchers in a vector selector and one matcher matches no series. non-equality matchers in a vector selector and one matcher matches no series.
* [ENHANCEMENT] Allow overriding `GOARCH` and `GOOS` in Makefile.INCLUDE. * [ENHANCEMENT] Allow overriding `GOARCH` and `GOOS` in Makefile.INCLUDE.
* [ENHANCEMENT] Update vendored dependencies. * [ENHANCEMENT] Update vendored dependencies.
## 0.15.0 / 2015-07-21 ## v0.15.0 / 2015-07-21
BREAKING CHANGES: BREAKING CHANGES:
@ -687,7 +692,8 @@ All changes:
* [CLEANUP] Use `templates.TemplateExpander` for all page templates. * [CLEANUP] Use `templates.TemplateExpander` for all page templates.
* [CLEANUP] Use new v1 HTTP API for querying and graphing. * [CLEANUP] Use new v1 HTTP API for querying and graphing.
## 0.14.0 / 2015-06-01 ## v0.14.0 / 2015-06-01
* [CHANGE] Configuration format changed and switched to YAML. * [CHANGE] Configuration format changed and switched to YAML.
(See the provided [migration tool](https://github.com/prometheus/migrate/releases).) (See the provided [migration tool](https://github.com/prometheus/migrate/releases).)
* [ENHANCEMENT] Redesign of state-preserving target discovery. * [ENHANCEMENT] Redesign of state-preserving target discovery.
@ -712,10 +718,12 @@ All changes:
* [FEATURE] Add increase() query function to calculate a counter's increase. * [FEATURE] Add increase() query function to calculate a counter's increase.
* [ENHANCEMENT] Limit retrievable samples to the storage's retention window. * [ENHANCEMENT] Limit retrievable samples to the storage's retention window.
## 0.13.4 / 2015-05-23 ## v0.13.4 / 2015-05-23
* [BUGFIX] Fix a race while checkpointing fingerprint mappings. * [BUGFIX] Fix a race while checkpointing fingerprint mappings.
## 0.13.3 / 2015-05-11 ## v0.13.3 / 2015-05-11
* [BUGFIX] Handle fingerprint collisions properly. * [BUGFIX] Handle fingerprint collisions properly.
* [CHANGE] Comments in rules file must start with `#`. (The undocumented `//` * [CHANGE] Comments in rules file must start with `#`. (The undocumented `//`
and `/*...*/` comment styles are no longer supported.) and `/*...*/` comment styles are no longer supported.)
@ -725,7 +733,8 @@ All changes:
* [ENHANCEMENT] Limit maximum number of concurrent queries. * [ENHANCEMENT] Limit maximum number of concurrent queries.
* [ENHANCEMENT] Terminate running queries during shutdown. * [ENHANCEMENT] Terminate running queries during shutdown.
## 0.13.2 / 2015-05-05 ## v0.13.2 / 2015-05-05
* [MAINTENANCE] Updated vendored dependencies to their newest versions. * [MAINTENANCE] Updated vendored dependencies to their newest versions.
* [MAINTENANCE] Include rule_checker and console templates in release tarball. * [MAINTENANCE] Include rule_checker and console templates in release tarball.
* [BUGFIX] Sort NaN as the lowest value. * [BUGFIX] Sort NaN as the lowest value.
@ -735,11 +744,13 @@ All changes:
reading from disk. reading from disk.
* [BUGFIX] Show correct error on wrong DNS response. * [BUGFIX] Show correct error on wrong DNS response.
## 0.13.1 / 2015-04-09 ## v0.13.1 / 2015-04-09
* [BUGFIX] Treat memory series with zero chunks correctly in series maintenance. * [BUGFIX] Treat memory series with zero chunks correctly in series maintenance.
* [ENHANCEMENT] Improve readability of usage text even more. * [ENHANCEMENT] Improve readability of usage text even more.
## 0.13.0 / 2015-04-08 ## v0.13.0 / 2015-04-08
* [ENHANCEMENT] Double-delta encoding for chunks, saving typically 40% of * [ENHANCEMENT] Double-delta encoding for chunks, saving typically 40% of
space, both in RAM and on disk. space, both in RAM and on disk.
* [ENHANCEMENT] Redesign of chunk persistence queuing, increasing performance * [ENHANCEMENT] Redesign of chunk persistence queuing, increasing performance
@ -766,7 +777,8 @@ All changes:
* [CLEANUP] Misc. other code cleanups. * [CLEANUP] Misc. other code cleanups.
* [MAINTENANCE] Updated vendored dependcies to their newest versions. * [MAINTENANCE] Updated vendored dependcies to their newest versions.
## 0.12.0 / 2015-03-04 ## v0.12.0 / 2015-03-04
* [CHANGE] Use client_golang v0.3.1. THIS CHANGES FINGERPRINTING AND INVALIDATES * [CHANGE] Use client_golang v0.3.1. THIS CHANGES FINGERPRINTING AND INVALIDATES
ALL PERSISTED FINGERPRINTS. You have to wipe your storage to use this or ALL PERSISTED FINGERPRINTS. You have to wipe your storage to use this or
later versions. There is a version guard in place that will prevent you to later versions. There is a version guard in place that will prevent you to
@ -781,8 +793,9 @@ All changes:
(rather than /tmp/metrics). (rather than /tmp/metrics).
* [CHANGE] Makefile uses Go 1.4.2. * [CHANGE] Makefile uses Go 1.4.2.
## 0.11.1 / 2015-02-27 ## v0.11.1 / 2015-02-27
* [BUGFIX] Make series maintenance complete again. (Ever since 0.9.0rc4,
* [BUGFIX] Make series maintenance complete again. (Ever since v0.9.0rc4,
or commit 0851945, series would not be archived, chunk descriptors would or commit 0851945, series would not be archived, chunk descriptors would
not be evicted, and stale head chunks would never be closed. This happened not be evicted, and stale head chunks would never be closed. This happened
due to accidental deletion of a line calling a (well tested :) function. due to accidental deletion of a line calling a (well tested :) function.
@ -793,7 +806,8 @@ All changes:
* [CLEANUP] Code cleanups. * [CLEANUP] Code cleanups.
* [ENHANCEMENT] Limit the number of 'dirty' series counted during checkpointing. * [ENHANCEMENT] Limit the number of 'dirty' series counted during checkpointing.
## 0.11.0 / 2015-02-23 ## v0.11.0 / 2015-02-23
* [FEATURE] Introduce new metric type Histogram with server-side aggregation. * [FEATURE] Introduce new metric type Histogram with server-side aggregation.
* [FEATURE] Add offset operator. * [FEATURE] Add offset operator.
* [FEATURE] Add floor, ceil and round functions. * [FEATURE] Add floor, ceil and round functions.
@ -817,7 +831,8 @@ All changes:
* [BUGFIX] Fix Rickshaw/D3 version mismatch. * [BUGFIX] Fix Rickshaw/D3 version mismatch.
* [CLEANUP] Various code cleanups. * [CLEANUP] Various code cleanups.
## 0.10.0 / 2015-01-26 ## v0.10.0 / 2015-01-26
* [CHANGE] More efficient JSON result format in query API. This requires * [CHANGE] More efficient JSON result format in query API. This requires
up-to-date versions of PromDash and prometheus_cli, too. up-to-date versions of PromDash and prometheus_cli, too.
* [ENHANCEMENT] Excluded non-minified Bootstrap assets and the Bootstrap maps * [ENHANCEMENT] Excluded non-minified Bootstrap assets and the Bootstrap maps
@ -829,7 +844,8 @@ All changes:
* [BUGFIX] Several fixes to graphs in consoles. * [BUGFIX] Several fixes to graphs in consoles.
* [CLEANUP] Removed a file size check that did not check anything. * [CLEANUP] Removed a file size check that did not check anything.
## 0.9.0 / 2015-01-23 ## v0.9.0 / 2015-01-23
* [CHANGE] Reworked command line flags, now more consistent and taking into * [CHANGE] Reworked command line flags, now more consistent and taking into
account needs of the new storage backend (see below). account needs of the new storage backend (see below).
* [CHANGE] Metric names are dropped after certain transformations. * [CHANGE] Metric names are dropped after certain transformations.
@ -863,18 +879,20 @@ All changes:
* [ENHANCEMENT] Switched from Go 1.3 to Go 1.4. * [ENHANCEMENT] Switched from Go 1.3 to Go 1.4.
* [ENHANCEMENT] Vendored external dependencies with godeps. * [ENHANCEMENT] Vendored external dependencies with godeps.
* [ENHANCEMENT] Numerous Web UI improvements, moved to Bootstrap3 and * [ENHANCEMENT] Numerous Web UI improvements, moved to Bootstrap3 and
Rickshaw 1.5.1. Rickshaw v1.5.1.
* [ENHANCEMENT] Improved Docker integration. * [ENHANCEMENT] Improved Docker integration.
* [ENHANCEMENT] Simplified the Makefile contraption. * [ENHANCEMENT] Simplified the Makefile contraption.
* [CLEANUP] Put meta-data files into proper shape (LICENSE, README.md etc.) * [CLEANUP] Put meta-data files into proper shape (LICENSE, README.md etc.)
* [CLEANUP] Removed all legitimate 'go vet' and 'golint' warnings. * [CLEANUP] Removed all legitimate 'go vet' and 'golint' warnings.
* [CLEANUP] Removed dead code. * [CLEANUP] Removed dead code.
## 0.8.0 / 2014-09-04 ## v0.8.0 / 2014-09-04
* [ENHANCEMENT] Stagger scrapes to spread out load. * [ENHANCEMENT] Stagger scrapes to spread out load.
* [BUGFIX] Correctly quote HTTP Accept header. * [BUGFIX] Correctly quote HTTP Accept header.
## 0.7.0 / 2014-08-06 ## v0.7.0 / 2014-08-06
* [FEATURE] Added new functions: abs(), topk(), bottomk(), drop_common_labels(). * [FEATURE] Added new functions: abs(), topk(), bottomk(), drop_common_labels().
* [FEATURE] Let console templates get graph links from expressions. * [FEATURE] Let console templates get graph links from expressions.
* [FEATURE] Allow console templates to dynamically include other templates. * [FEATURE] Allow console templates to dynamically include other templates.
@ -888,7 +906,8 @@ All changes:
* [ENHANCEMENT] Removed incremental backoffs for unhealthy targets. * [ENHANCEMENT] Removed incremental backoffs for unhealthy targets.
* [ENHANCEMENT] Dockerfile also builds Prometheus support tools now. * [ENHANCEMENT] Dockerfile also builds Prometheus support tools now.
## 0.6.0 / 2014-06-30 ## v0.6.0 / 2014-06-30
* [FEATURE] Added console and alert templates support, along with various template functions. * [FEATURE] Added console and alert templates support, along with various template functions.
* [PERFORMANCE] Much faster and more memory-efficient flushing to disk. * [PERFORMANCE] Much faster and more memory-efficient flushing to disk.
* [ENHANCEMENT] Query results are now only logged when debugging. * [ENHANCEMENT] Query results are now only logged when debugging.
@ -898,7 +917,7 @@ All changes:
* [BUGFIX] Added installation step for missing dependency to Dockerfile. * [BUGFIX] Added installation step for missing dependency to Dockerfile.
* [BUGFIX] Removed broken and unused "User Dashboard" link. * [BUGFIX] Removed broken and unused "User Dashboard" link.
## 0.5.0 / 2014-05-28 ## v0.5.0 / 2014-05-28
* [BUGFIX] Fixed next retrieval time display on status page. * [BUGFIX] Fixed next retrieval time display on status page.
* [BUGFIX] Updated some variable references in tools subdir. * [BUGFIX] Updated some variable references in tools subdir.
@ -908,7 +927,7 @@ All changes:
* [ENHANCEMENT] Added internal check to verify temporal order of streams. * [ENHANCEMENT] Added internal check to verify temporal order of streams.
* [ENHANCEMENT] Some internal refactorings. * [ENHANCEMENT] Some internal refactorings.
## 0.4.0 / 2014-04-17 ## v0.4.0 / 2014-04-17
* [FEATURE] Vectors and scalars may now be reversed in binary operations (`<scalar> <binop> <vector>`). * [FEATURE] Vectors and scalars may now be reversed in binary operations (`<scalar> <binop> <vector>`).
* [FEATURE] It's possible to shutdown Prometheus via a `/-/quit` web endpoint now. * [FEATURE] It's possible to shutdown Prometheus via a `/-/quit` web endpoint now.

View file

@ -58,7 +58,7 @@ Prometheus will now be reachable at http://localhost:9090/.
### Building from source ### Building from source
To build Prometheus from the source code yourself you need to have a working To build Prometheus from the source code yourself you need to have a working
Go environment with [version 1.5 or greater installed](http://golang.org/doc/install). Go environment with [version 1.8 or greater installed](http://golang.org/doc/install).
You can directly use the `go` tool to download and install the `prometheus` You can directly use the `go` tool to download and install the `prometheus`
and `promtool` binaries into your `GOPATH`. We use Go 1.5's experimental and `promtool` binaries into your `GOPATH`. We use Go 1.5's experimental

View file

@ -25,6 +25,7 @@ func Uname() string {
if err != nil { if err != nil {
log.Fatal("Error!") log.Fatal("Error!")
} }
str := "(" + charsToString(buf.Sysname[:]) str := "(" + charsToString(buf.Sysname[:])
str += " " + charsToString(buf.Release[:]) str += " " + charsToString(buf.Release[:])
str += " " + charsToString(buf.Version[:]) str += " " + charsToString(buf.Version[:])

View file

@ -17,9 +17,12 @@
package main package main
func charsToString(ca []int8) string { func charsToString(ca []int8) string {
s := make([]byte, len(ca)) s := make([]byte, 0, len(ca))
for i, c := range ca { for _, c := range ca {
s[i] = byte(c) if byte(c) == 0 {
break
} }
return string(s[0:len(ca)]) s = append(s, byte(c))
}
return string(s)
} }

View file

@ -17,9 +17,12 @@
package main package main
func charsToString(ca []uint8) string { func charsToString(ca []uint8) string {
s := make([]byte, len(ca)) s := make([]byte, 0, len(ca))
for i, c := range ca { for _, c := range ca {
s[i] = byte(c) if byte(c) == 0 {
break
} }
return string(s[0:len(ca)]) s = append(s, byte(c))
}
return string(s)
} }

View file

@ -20,6 +20,7 @@ import (
"net/http" "net/http"
"net/http/httptest" "net/http/httptest"
"net/url" "net/url"
"reflect"
"testing" "testing"
"time" "time"
@ -396,3 +397,30 @@ func (a alertmanagerMock) url() *url.URL {
} }
return u return u
} }
func TestLabelSetNotReused(t *testing.T) {
tg := makeInputTargetGroup()
_, err := alertmanagerFromGroup(tg, &config.AlertmanagerConfig{})
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(tg, makeInputTargetGroup()) {
t.Fatal("Target modified during alertmanager extraction")
}
}
func makeInputTargetGroup() *config.TargetGroup {
return &config.TargetGroup{
Targets: []model.LabelSet{
model.LabelSet{
model.AddressLabel: model.LabelValue("1.1.1.1:9090"),
model.LabelName("notcommon1"): model.LabelValue("label"),
},
},
Labels: model.LabelSet{
model.LabelName("common"): model.LabelValue("label"),
},
Source: "testsource",
}
}

View file

@ -1,3 +1,50 @@
# 0.11.5
* feature: add writer and writerlevel to entry (#372)
# 0.11.4
* bug: fix undefined variable on solaris (#493)
# 0.11.3
* formatter: configure quoting of empty values (#484)
* formatter: configure quoting character (default is `"`) (#484)
* bug: fix not importing io correctly in non-linux environments (#481)
# 0.11.2
* bug: fix windows terminal detection (#476)
# 0.11.1
* bug: fix tty detection with custom out (#471)
# 0.11.0
* performance: Use bufferpool to allocate (#370)
* terminal: terminal detection for app-engine (#343)
* feature: exit handler (#375)
# 0.10.0
* feature: Add a test hook (#180)
* feature: `ParseLevel` is now case-insensitive (#326)
* feature: `FieldLogger` interface that generalizes `Logger` and `Entry` (#308)
* performance: avoid re-allocations on `WithFields` (#335)
# 0.9.0
* logrus/text_formatter: don't emit empty msg
* logrus/hooks/airbrake: move out of main repository
* logrus/hooks/sentry: move out of main repository
* logrus/hooks/papertrail: move out of main repository
* logrus/hooks/bugsnag: move out of main repository
* logrus/core: run tests with `-race`
* logrus/core: detect TTY based on `stderr`
* logrus/core: support `WithError` on logger
* logrus/core: Solaris support
# 0.8.7 # 0.8.7
* logrus/core: fix possible race (#216) * logrus/core: fix possible race (#216)

View file

@ -1,4 +1,10 @@
# Logrus <img src="http://i.imgur.com/hTeVwmJ.png" width="40" height="40" alt=":walrus:" class="emoji" title=":walrus:"/>&nbsp;[![Build Status](https://travis-ci.org/Sirupsen/logrus.svg?branch=master)](https://travis-ci.org/Sirupsen/logrus)&nbsp;[![godoc reference](https://godoc.org/github.com/Sirupsen/logrus?status.png)][godoc] # Logrus <img src="http://i.imgur.com/hTeVwmJ.png" width="40" height="40" alt=":walrus:" class="emoji" title=":walrus:"/>&nbsp;[![Build Status](https://travis-ci.org/sirupsen/logrus.svg?branch=master)](https://travis-ci.org/sirupsen/logrus)&nbsp;[![GoDoc](https://godoc.org/github.com/sirupsen/logrus?status.svg)](https://godoc.org/github.com/sirupsen/logrus)
**Seeing weird case-sensitive problems?** See [this
issue](https://github.com/sirupsen/logrus/issues/451#issuecomment-264332021).
This change has been reverted. I apologize for causing this. I greatly
underestimated the impact this would have. Logrus strives for stability and
backwards compatibility and failed to provide that.
Logrus is a structured logger for Go (golang), completely API compatible with Logrus is a structured logger for Go (golang), completely API compatible with
the standard library logger. [Godoc][godoc]. **Please note the Logrus API is not the standard library logger. [Godoc][godoc]. **Please note the Logrus API is not
@ -12,7 +18,7 @@ plain text):
![Colored](http://i.imgur.com/PY7qMwd.png) ![Colored](http://i.imgur.com/PY7qMwd.png)
With `log.Formatter = new(logrus.JSONFormatter)`, for easy parsing by logstash With `log.SetFormatter(&log.JSONFormatter{})`, for easy parsing by logstash
or Splunk: or Splunk:
```json ```json
@ -32,7 +38,7 @@ ocean","size":10,"time":"2014-03-10 19:57:38.562264131 -0400 EDT"}
"time":"2014-03-10 19:57:38.562543128 -0400 EDT"} "time":"2014-03-10 19:57:38.562543128 -0400 EDT"}
``` ```
With the default `log.Formatter = new(&log.TextFormatter{})` when a TTY is not With the default `log.SetFormatter(&log.TextFormatter{})` when a TTY is not
attached, the output is compatible with the attached, the output is compatible with the
[logfmt](http://godoc.org/github.com/kr/logfmt) format: [logfmt](http://godoc.org/github.com/kr/logfmt) format:
@ -46,6 +52,12 @@ time="2015-03-26T01:27:38-04:00" level=fatal msg="The ice breaks!" err=&{0x20822
exit status 1 exit status 1
``` ```
#### Case-sensitivity
The organization's name was changed to lower-case--and this will not be changed
back. If you are getting import conflicts due to case sensitivity, please use
the lower-case import: `github.com/sirupsen/logrus`.
#### Example #### Example
The simplest way to use Logrus is simply the package-level exported logger: The simplest way to use Logrus is simply the package-level exported logger:
@ -54,7 +66,7 @@ The simplest way to use Logrus is simply the package-level exported logger:
package main package main
import ( import (
log "github.com/Sirupsen/logrus" log "github.com/sirupsen/logrus"
) )
func main() { func main() {
@ -65,7 +77,7 @@ func main() {
``` ```
Note that it's completely api-compatible with the stdlib logger, so you can Note that it's completely api-compatible with the stdlib logger, so you can
replace your `log` imports everywhere with `log "github.com/Sirupsen/logrus"` replace your `log` imports everywhere with `log "github.com/sirupsen/logrus"`
and you'll now have the flexibility of Logrus. You can customize it all you and you'll now have the flexibility of Logrus. You can customize it all you
want: want:
@ -74,20 +86,16 @@ package main
import ( import (
"os" "os"
log "github.com/Sirupsen/logrus" log "github.com/sirupsen/logrus"
"github.com/Sirupsen/logrus/hooks/airbrake"
) )
func init() { func init() {
// Log as JSON instead of the default ASCII formatter. // Log as JSON instead of the default ASCII formatter.
log.SetFormatter(&log.JSONFormatter{}) log.SetFormatter(&log.JSONFormatter{})
// Use the Airbrake hook to report errors that have Error severity or above to // Output to stdout instead of the default stderr
// an exception tracker. You can create custom hooks, see the Hooks section. // Can be any io.Writer, see below for File example
log.AddHook(airbrake.NewHook("https://example.com", "xyz", "development")) log.SetOutput(os.Stdout)
// Output to stderr instead of stdout, could also be a file.
log.SetOutput(os.Stderr)
// Only log the warning severity or above. // Only log the warning severity or above.
log.SetLevel(log.WarnLevel) log.SetLevel(log.WarnLevel)
@ -128,7 +136,8 @@ application, you can also create an instance of the `logrus` Logger:
package main package main
import ( import (
"github.com/Sirupsen/logrus" "os"
"github.com/sirupsen/logrus"
) )
// Create a new instance of the logger. You can have any number of instances. // Create a new instance of the logger. You can have any number of instances.
@ -137,7 +146,15 @@ var log = logrus.New()
func main() { func main() {
// The API for setting attributes is a little different than the package level // The API for setting attributes is a little different than the package level
// exported logger. See Godoc. // exported logger. See Godoc.
log.Out = os.Stderr log.Out = os.Stdout
// You could set this to any `io.Writer` such as a file
// file, err := os.OpenFile("logrus.log", os.O_CREATE|os.O_WRONLY, 0666)
// if err == nil {
// log.Out = file
// } else {
// log.Info("Failed to log to file, using default stderr")
// }
log.WithFields(logrus.Fields{ log.WithFields(logrus.Fields{
"animal": "walrus", "animal": "walrus",
@ -148,7 +165,7 @@ func main() {
#### Fields #### Fields
Logrus encourages careful, structured logging though logging fields instead of Logrus encourages careful, structured logging through logging fields instead of
long, unparseable error messages. For example, instead of: `log.Fatalf("Failed long, unparseable error messages. For example, instead of: `log.Fatalf("Failed
to send event %s to topic %s with key %d")`, you should log the much more to send event %s to topic %s with key %d")`, you should log the much more
discoverable: discoverable:
@ -170,6 +187,20 @@ In general, with Logrus using any of the `printf`-family functions should be
seen as a hint you should add a field, however, you can still use the seen as a hint you should add a field, however, you can still use the
`printf`-family functions with Logrus. `printf`-family functions with Logrus.
#### Default Fields
Often it's helpful to have fields _always_ attached to log statements in an
application or parts of one. For example, you may want to always log the
`request_id` and `user_ip` in the context of a request. Instead of writing
`log.WithFields(log.Fields{"request_id": request_id, "user_ip": user_ip})` on
every line, you can create a `logrus.Entry` to pass around instead:
```go
requestLogger := log.WithFields(log.Fields{"request_id": request_id, "user_ip": user_ip})
requestLogger.Info("something happened on that request") # will log request_id and user_ip
requestLogger.Warn("something not great happened")
```
#### Hooks #### Hooks
You can add hooks for logging levels. For example to send errors to an exception You can add hooks for logging levels. For example to send errors to an exception
@ -181,14 +212,17 @@ Logrus comes with [built-in hooks](hooks/). Add those, or your custom hook, in
```go ```go
import ( import (
log "github.com/Sirupsen/logrus" log "github.com/sirupsen/logrus"
"github.com/Sirupsen/logrus/hooks/airbrake" "gopkg.in/gemnasium/logrus-airbrake-hook.v2" // the package is named "aibrake"
logrus_syslog "github.com/Sirupsen/logrus/hooks/syslog" logrus_syslog "github.com/sirupsen/logrus/hooks/syslog"
"log/syslog" "log/syslog"
) )
func init() { func init() {
log.AddHook(airbrake.NewHook("https://example.com", "xyz", "development"))
// Use the Airbrake hook to report errors that have Error severity or above to
// an exception tracker. You can create custom hooks, see the Hooks section.
log.AddHook(airbrake.NewHook(123, "xyz", "production"))
hook, err := logrus_syslog.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "") hook, err := logrus_syslog.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "")
if err != nil { if err != nil {
@ -198,27 +232,54 @@ func init() {
} }
} }
``` ```
Note: Syslog hook also support connecting to local syslog (Ex. "/dev/log" or "/var/run/syslog" or "/var/run/log"). For the detail, please check the [syslog hook README](hooks/syslog/README.md).
| Hook | Description | | Hook | Description |
| ----- | ----------- | | ----- | ----------- |
| [Airbrake](https://github.com/Sirupsen/logrus/blob/master/hooks/airbrake/airbrake.go) | Send errors to an exception tracking service compatible with the Airbrake API. Uses [`airbrake-go`](https://github.com/tobi/airbrake-go) behind the scenes. | | [Airbrake "legacy"](https://github.com/gemnasium/logrus-airbrake-legacy-hook) | Send errors to an exception tracking service compatible with the Airbrake API V2. Uses [`airbrake-go`](https://github.com/tobi/airbrake-go) behind the scenes. |
| [Papertrail](https://github.com/Sirupsen/logrus/blob/master/hooks/papertrail/papertrail.go) | Send errors to the Papertrail hosted logging service via UDP. | | [Airbrake](https://github.com/gemnasium/logrus-airbrake-hook) | Send errors to the Airbrake API V3. Uses the official [`gobrake`](https://github.com/airbrake/gobrake) behind the scenes. |
| [Syslog](https://github.com/Sirupsen/logrus/blob/master/hooks/syslog/syslog.go) | Send errors to remote syslog server. Uses standard library `log/syslog` behind the scenes. | | [Amazon Kinesis](https://github.com/evalphobia/logrus_kinesis) | Hook for logging to [Amazon Kinesis](https://aws.amazon.com/kinesis/) |
| [BugSnag](https://github.com/Sirupsen/logrus/blob/master/hooks/bugsnag/bugsnag.go) | Send errors to the Bugsnag exception tracking service. | | [Amqp-Hook](https://github.com/vladoatanasov/logrus_amqp) | Hook for logging to Amqp broker (Like RabbitMQ) |
| [Sentry](https://github.com/Sirupsen/logrus/blob/master/hooks/sentry/sentry.go) | Send errors to the Sentry error logging and aggregation service. | | [Bugsnag](https://github.com/Shopify/logrus-bugsnag/blob/master/bugsnag.go) | Send errors to the Bugsnag exception tracking service. |
| [Hiprus](https://github.com/nubo/hiprus) | Send errors to a channel in hipchat. | | [DeferPanic](https://github.com/deferpanic/dp-logrus) | Hook for logging to DeferPanic |
| [Logrusly](https://github.com/sebest/logrusly) | Send logs to [Loggly](https://www.loggly.com/) | | [Discordrus](https://github.com/kz/discordrus) | Hook for logging to [Discord](https://discordapp.com/) |
| [Slackrus](https://github.com/johntdyer/slackrus) | Hook for Slack chat. | | [ElasticSearch](https://github.com/sohlich/elogrus) | Hook for logging to ElasticSearch|
| [Journalhook](https://github.com/wercker/journalhook) | Hook for logging to `systemd-journald` | | [Firehose](https://github.com/beaubrewer/logrus_firehose) | Hook for logging to [Amazon Firehose](https://aws.amazon.com/kinesis/firehose/)
| [Graylog](https://github.com/gemnasium/logrus-hooks/tree/master/graylog) | Hook for logging to [Graylog](http://graylog2.org/) |
| [Raygun](https://github.com/squirkle/logrus-raygun-hook) | Hook for logging to [Raygun.io](http://raygun.io/) |
| [LFShook](https://github.com/rifflock/lfshook) | Hook for logging to the local filesystem |
| [Honeybadger](https://github.com/agonzalezro/logrus_honeybadger) | Hook for sending exceptions to Honeybadger |
| [Mail](https://github.com/zbindenren/logrus_mail) | Hook for sending exceptions via mail |
| [Rollrus](https://github.com/heroku/rollrus) | Hook for sending errors to rollbar |
| [Fluentd](https://github.com/evalphobia/logrus_fluent) | Hook for logging to fluentd | | [Fluentd](https://github.com/evalphobia/logrus_fluent) | Hook for logging to fluentd |
| [Go-Slack](https://github.com/multiplay/go-slack) | Hook for logging to [Slack](https://slack.com) |
| [Graylog](https://github.com/gemnasium/logrus-graylog-hook) | Hook for logging to [Graylog](http://graylog2.org/) |
| [Hiprus](https://github.com/nubo/hiprus) | Send errors to a channel in hipchat. |
| [Honeybadger](https://github.com/agonzalezro/logrus_honeybadger) | Hook for sending exceptions to Honeybadger |
| [InfluxDB](https://github.com/Abramovic/logrus_influxdb) | Hook for logging to influxdb |
| [Influxus](http://github.com/vlad-doru/influxus) | Hook for concurrently logging to [InfluxDB](http://influxdata.com/) |
| [Journalhook](https://github.com/wercker/journalhook) | Hook for logging to `systemd-journald` |
| [KafkaLogrus](https://github.com/goibibo/KafkaLogrus) | Hook for logging to kafka |
| [LFShook](https://github.com/rifflock/lfshook) | Hook for logging to the local filesystem |
| [Logentries](https://github.com/jcftang/logentriesrus) | Hook for logging to [Logentries](https://logentries.com/) |
| [Logentrus](https://github.com/puddingfactory/logentrus) | Hook for logging to [Logentries](https://logentries.com/) |
| [Logmatic.io](https://github.com/logmatic/logmatic-go) | Hook for logging to [Logmatic.io](http://logmatic.io/) |
| [Logrusly](https://github.com/sebest/logrusly) | Send logs to [Loggly](https://www.loggly.com/) |
| [Logstash](https://github.com/bshuster-repo/logrus-logstash-hook) | Hook for logging to [Logstash](https://www.elastic.co/products/logstash) |
| [Mail](https://github.com/zbindenren/logrus_mail) | Hook for sending exceptions via mail |
| [Mongodb](https://github.com/weekface/mgorus) | Hook for logging to mongodb | | [Mongodb](https://github.com/weekface/mgorus) | Hook for logging to mongodb |
| [NATS-Hook](https://github.com/rybit/nats_logrus_hook) | Hook for logging to [NATS](https://nats.io) |
| [Octokit](https://github.com/dorajistyle/logrus-octokit-hook) | Hook for logging to github via octokit |
| [Papertrail](https://github.com/polds/logrus-papertrail-hook) | Send errors to the [Papertrail](https://papertrailapp.com) hosted logging service via UDP. |
| [PostgreSQL](https://github.com/gemnasium/logrus-postgresql-hook) | Send logs to [PostgreSQL](http://postgresql.org) |
| [Pushover](https://github.com/toorop/logrus_pushover) | Send error via [Pushover](https://pushover.net) |
| [Raygun](https://github.com/squirkle/logrus-raygun-hook) | Hook for logging to [Raygun.io](http://raygun.io/) |
| [Redis-Hook](https://github.com/rogierlommers/logrus-redis-hook) | Hook for logging to a ELK stack (through Redis) |
| [Rollrus](https://github.com/heroku/rollrus) | Hook for sending errors to rollbar |
| [Scribe](https://github.com/sagar8192/logrus-scribe-hook) | Hook for logging to [Scribe](https://github.com/facebookarchive/scribe)|
| [Sentry](https://github.com/evalphobia/logrus_sentry) | Send errors to the Sentry error logging and aggregation service. |
| [Slackrus](https://github.com/johntdyer/slackrus) | Hook for Slack chat. |
| [Stackdriver](https://github.com/knq/sdhook) | Hook for logging to [Google Stackdriver](https://cloud.google.com/logging/) |
| [Sumorus](https://github.com/doublefree/sumorus) | Hook for logging to [SumoLogic](https://www.sumologic.com/)|
| [Syslog](https://github.com/Sirupsen/logrus/blob/master/hooks/syslog/syslog.go) | Send errors to remote syslog server. Uses standard library `log/syslog` behind the scenes. |
| [TraceView](https://github.com/evalphobia/logrus_appneta) | Hook for logging to [AppNeta TraceView](https://www.appneta.com/products/traceview/) |
| [Typetalk](https://github.com/dragon3/logrus-typetalk-hook) | Hook for logging to [Typetalk](https://www.typetalk.in/) |
| [logz.io](https://github.com/ripcurld00d/logrus-logzio-hook) | Hook for logging to [logz.io](https://logz.io), a Log as a Service using Logstash |
| [SQS-Hook](https://github.com/tsarpaul/logrus_sqs) | Hook for logging to [Amazon Simple Queue Service (SQS)](https://aws.amazon.com/sqs/) |
#### Level logging #### Level logging
@ -267,7 +328,7 @@ could do:
```go ```go
import ( import (
log "github.com/Sirupsen/logrus" log "github.com/sirupsen/logrus"
) )
init() { init() {
@ -294,17 +355,17 @@ The built-in logging formatters are:
without colors. without colors.
* *Note:* to force colored output when there is no TTY, set the `ForceColors` * *Note:* to force colored output when there is no TTY, set the `ForceColors`
field to `true`. To force no colored output even if there is a TTY set the field to `true`. To force no colored output even if there is a TTY set the
`DisableColors` field to `true` `DisableColors` field to `true`. For Windows, see
[github.com/mattn/go-colorable](https://github.com/mattn/go-colorable).
* All options are listed in the [generated docs](https://godoc.org/github.com/sirupsen/logrus#TextFormatter).
* `logrus.JSONFormatter`. Logs fields as JSON. * `logrus.JSONFormatter`. Logs fields as JSON.
* `logrus_logstash.LogstashFormatter`. Logs fields as Logstash Events (http://logstash.net). * All options are listed in the [generated docs](https://godoc.org/github.com/sirupsen/logrus#JSONFormatter).
```go
logrus.SetFormatter(&logrus_logstash.LogstashFormatter{Type: “application_name"})
```
Third party logging formatters: Third party logging formatters:
* [`zalgo`](https://github.com/aybabtme/logzalgo): invoking the P͉̫o̳̼̊w̖͈̰͎e̬͔̭͂r͚̼̹̲ ̫͓͉̳͈ō̠͕͖̚f̝͍̠ ͕̲̞͖͑Z̖̫̤̫ͪa͉̬͈̗l͖͎g̳̥o̰̥̅!̣͔̲̻͊̄ ̙̘̦̹̦. * [`logstash`](https://github.com/bshuster-repo/logrus-logstash-hook). Logs fields as [Logstash](http://logstash.net) Events.
* [`prefixed`](https://github.com/x-cray/logrus-prefixed-formatter). Displays log entry source along with alternative layout.
* [`zalgo`](https://github.com/aybabtme/logzalgo). Invoking the P͉̫o̳̼̊w̖͈̰͎e̬͔̭͂r͚̼̹̲ ̫͓͉̳͈ō̠͕͖̚f̝͍̠ ͕̲̞͖͑Z̖̫̤̫ͪa͉̬͈̗l͖͎g̳̥o̰̥̅!̣͔̲̻͊̄ ̙̘̦̹̦.
You can define your formatter by implementing the `Formatter` interface, You can define your formatter by implementing the `Formatter` interface,
requiring a `Format` method. `Format` takes an `*Entry`. `entry.Data` is a requiring a `Format` method. `Format` takes an `*Entry`. `entry.Data` is a
@ -347,11 +408,88 @@ srv := http.Server{
Each line written to that writer will be printed the usual way, using formatters Each line written to that writer will be printed the usual way, using formatters
and hooks. The level for those entries is `info`. and hooks. The level for those entries is `info`.
This means that we can override the standard library logger easily:
```go
logger := logrus.New()
logger.Formatter = &logrus.JSONFormatter{}
// Use logrus for standard log output
// Note that `log` here references stdlib's log
// Not logrus imported under the name `log`.
log.SetOutput(logger.Writer())
```
#### Rotation #### Rotation
Log rotation is not provided with Logrus. Log rotation should be done by an Log rotation is not provided with Logrus. Log rotation should be done by an
external program (like `logrotate(8)`) that can compress and delete old log external program (like `logrotate(8)`) that can compress and delete old log
entries. It should not be a feature of the application-level logger. entries. It should not be a feature of the application-level logger.
#### Tools
[godoc]: https://godoc.org/github.com/Sirupsen/logrus | Tool | Description |
| ---- | ----------- |
|[Logrus Mate](https://github.com/gogap/logrus_mate)|Logrus mate is a tool for Logrus to manage loggers, you can initial logger's level, hook and formatter by config file, the logger will generated with different config at different environment.|
|[Logrus Viper Helper](https://github.com/heirko/go-contrib/tree/master/logrusHelper)|An Helper around Logrus to wrap with spf13/Viper to load configuration with fangs! And to simplify Logrus configuration use some behavior of [Logrus Mate](https://github.com/gogap/logrus_mate). [sample](https://github.com/heirko/iris-contrib/blob/master/middleware/logrus-logger/example) |
#### Testing
Logrus has a built in facility for asserting the presence of log messages. This is implemented through the `test` hook and provides:
* decorators for existing logger (`test.NewLocal` and `test.NewGlobal`) which basically just add the `test` hook
* a test logger (`test.NewNullLogger`) that just records log messages (and does not output any):
```go
import(
"github.com/sirupsen/logrus"
"github.com/sirupsen/logrus/hooks/null"
"github.com/stretchr/testify/assert"
"testing"
)
func TestSomething(t*testing.T){
logger, hook := null.NewNullLogger()
logger.Error("Helloerror")
assert.Equal(t, 1, len(hook.Entries))
assert.Equal(t, logrus.ErrorLevel, hook.LastEntry().Level)
assert.Equal(t, "Helloerror", hook.LastEntry().Message)
hook.Reset()
assert.Nil(t, hook.LastEntry())
}
```
#### Fatal handlers
Logrus can register one or more functions that will be called when any `fatal`
level message is logged. The registered handlers will be executed before
logrus performs a `os.Exit(1)`. This behavior may be helpful if callers need
to gracefully shutdown. Unlike a `panic("Something went wrong...")` call which can be intercepted with a deferred `recover` a call to `os.Exit(1)` can not be intercepted.
```
...
handler := func() {
// gracefully shutdown something...
}
logrus.RegisterExitHandler(handler)
...
```
#### Thread safety
By default Logger is protected by mutex for concurrent writes, this mutex is invoked when calling hooks and writing logs.
If you are sure such locking is not needed, you can call logger.SetNoLock() to disable the locking.
Situation when locking is not needed includes:
* You have no hooks registered, or hooks calling is already thread-safe.
* Writing to logger.Out is already thread-safe, for example:
1) logger.Out is protected by locks.
2) logger.Out is a os.File handler opened with `O_APPEND` flag, and every write is smaller than 4k. (This allow multi-thread/multi-process writing)
(Refer to http://www.notthewizard.com/2014/06/17/are-files-appends-really-atomic/)

64
vendor/github.com/Sirupsen/logrus/alt_exit.go generated vendored Normal file
View file

@ -0,0 +1,64 @@
package logrus
// The following code was sourced and modified from the
// https://github.com/tebeka/atexit package governed by the following license:
//
// Copyright (c) 2012 Miki Tebeka <miki.tebeka@gmail.com>.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy of
// this software and associated documentation files (the "Software"), to deal in
// the Software without restriction, including without limitation the rights to
// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
// the Software, and to permit persons to whom the Software is furnished to do so,
// subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
// FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
// COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
// IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
// CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import (
"fmt"
"os"
)
var handlers = []func(){}
func runHandler(handler func()) {
defer func() {
if err := recover(); err != nil {
fmt.Fprintln(os.Stderr, "Error: Logrus exit handler error:", err)
}
}()
handler()
}
func runHandlers() {
for _, handler := range handlers {
runHandler(handler)
}
}
// Exit runs all the Logrus atexit handlers and then terminates the program using os.Exit(code)
func Exit(code int) {
runHandlers()
os.Exit(code)
}
// RegisterExitHandler adds a Logrus Exit handler, call logrus.Exit to invoke
// all handlers. The handlers will also be invoked when any Fatal log entry is
// made.
//
// This method is useful when a caller wishes to use logrus to log a fatal
// message but also needs to gracefully shutdown. An example usecase could be
// closing database connections, or sending a alert that the application is
// closing.
func RegisterExitHandler(handler func()) {
handlers = append(handlers, handler)
}

View file

@ -7,7 +7,7 @@ The simplest way to use Logrus is simply the package-level exported logger:
package main package main
import ( import (
log "github.com/Sirupsen/logrus" log "github.com/sirupsen/logrus"
) )
func main() { func main() {
@ -21,6 +21,6 @@ The simplest way to use Logrus is simply the package-level exported logger:
Output: Output:
time="2015-09-07T08:48:33Z" level=info msg="A walrus appears" animal=walrus number=1 size=10 time="2015-09-07T08:48:33Z" level=info msg="A walrus appears" animal=walrus number=1 size=10
For a full guide visit https://github.com/Sirupsen/logrus For a full guide visit https://github.com/sirupsen/logrus
*/ */
package logrus package logrus

View file

@ -3,11 +3,21 @@ package logrus
import ( import (
"bytes" "bytes"
"fmt" "fmt"
"io"
"os" "os"
"sync"
"time" "time"
) )
var bufferPool *sync.Pool
func init() {
bufferPool = &sync.Pool{
New: func() interface{} {
return new(bytes.Buffer)
},
}
}
// Defines the key when adding errors using WithError. // Defines the key when adding errors using WithError.
var ErrorKey = "error" var ErrorKey = "error"
@ -29,6 +39,9 @@ type Entry struct {
// Message passed to Debug, Info, Warn, Error, Fatal or Panic // Message passed to Debug, Info, Warn, Error, Fatal or Panic
Message string Message string
// When formatter is called in entry.log(), an Buffer may be set to entry
Buffer *bytes.Buffer
} }
func NewEntry(logger *Logger) *Entry { func NewEntry(logger *Logger) *Entry {
@ -39,21 +52,15 @@ func NewEntry(logger *Logger) *Entry {
} }
} }
// Returns a reader for the entry, which is a proxy to the formatter.
func (entry *Entry) Reader() (*bytes.Buffer, error) {
serialized, err := entry.Logger.Formatter.Format(entry)
return bytes.NewBuffer(serialized), err
}
// Returns the string representation from the reader and ultimately the // Returns the string representation from the reader and ultimately the
// formatter. // formatter.
func (entry *Entry) String() (string, error) { func (entry *Entry) String() (string, error) {
reader, err := entry.Reader() serialized, err := entry.Logger.Formatter.Format(entry)
if err != nil { if err != nil {
return "", err return "", err
} }
str := string(serialized)
return reader.String(), err return str, nil
} }
// Add an error as single field (using the key defined in ErrorKey) to the Entry. // Add an error as single field (using the key defined in ErrorKey) to the Entry.
@ -68,7 +75,7 @@ func (entry *Entry) WithField(key string, value interface{}) *Entry {
// Add a map of fields to the Entry. // Add a map of fields to the Entry.
func (entry *Entry) WithFields(fields Fields) *Entry { func (entry *Entry) WithFields(fields Fields) *Entry {
data := Fields{} data := make(Fields, len(entry.Data)+len(fields))
for k, v := range entry.Data { for k, v := range entry.Data {
data[k] = v data[k] = v
} }
@ -81,6 +88,7 @@ func (entry *Entry) WithFields(fields Fields) *Entry {
// This function is not declared with a pointer value because otherwise // This function is not declared with a pointer value because otherwise
// race conditions will occur when using multiple goroutines // race conditions will occur when using multiple goroutines
func (entry Entry) log(level Level, msg string) { func (entry Entry) log(level Level, msg string) {
var buffer *bytes.Buffer
entry.Time = time.Now() entry.Time = time.Now()
entry.Level = level entry.Level = level
entry.Message = msg entry.Message = msg
@ -90,21 +98,24 @@ func (entry Entry) log(level Level, msg string) {
fmt.Fprintf(os.Stderr, "Failed to fire hook: %v\n", err) fmt.Fprintf(os.Stderr, "Failed to fire hook: %v\n", err)
entry.Logger.mu.Unlock() entry.Logger.mu.Unlock()
} }
buffer = bufferPool.Get().(*bytes.Buffer)
reader, err := entry.Reader() buffer.Reset()
defer bufferPool.Put(buffer)
entry.Buffer = buffer
serialized, err := entry.Logger.Formatter.Format(&entry)
entry.Buffer = nil
if err != nil { if err != nil {
entry.Logger.mu.Lock() entry.Logger.mu.Lock()
fmt.Fprintf(os.Stderr, "Failed to obtain reader, %v\n", err) fmt.Fprintf(os.Stderr, "Failed to obtain reader, %v\n", err)
entry.Logger.mu.Unlock() entry.Logger.mu.Unlock()
} } else {
entry.Logger.mu.Lock() entry.Logger.mu.Lock()
defer entry.Logger.mu.Unlock() _, err = entry.Logger.Out.Write(serialized)
_, err = io.Copy(entry.Logger.Out, reader)
if err != nil { if err != nil {
fmt.Fprintf(os.Stderr, "Failed to write to log, %v\n", err) fmt.Fprintf(os.Stderr, "Failed to write to log, %v\n", err)
} }
entry.Logger.mu.Unlock()
}
// To avoid Entry#log() returning a value that only would make sense for // To avoid Entry#log() returning a value that only would make sense for
// panic() to use in Entry#Panic(), we avoid the allocation by checking // panic() to use in Entry#Panic(), we avoid the allocation by checking
@ -115,7 +126,7 @@ func (entry Entry) log(level Level, msg string) {
} }
func (entry *Entry) Debug(args ...interface{}) { func (entry *Entry) Debug(args ...interface{}) {
if entry.Logger.Level >= DebugLevel { if entry.Logger.level() >= DebugLevel {
entry.log(DebugLevel, fmt.Sprint(args...)) entry.log(DebugLevel, fmt.Sprint(args...))
} }
} }
@ -125,13 +136,13 @@ func (entry *Entry) Print(args ...interface{}) {
} }
func (entry *Entry) Info(args ...interface{}) { func (entry *Entry) Info(args ...interface{}) {
if entry.Logger.Level >= InfoLevel { if entry.Logger.level() >= InfoLevel {
entry.log(InfoLevel, fmt.Sprint(args...)) entry.log(InfoLevel, fmt.Sprint(args...))
} }
} }
func (entry *Entry) Warn(args ...interface{}) { func (entry *Entry) Warn(args ...interface{}) {
if entry.Logger.Level >= WarnLevel { if entry.Logger.level() >= WarnLevel {
entry.log(WarnLevel, fmt.Sprint(args...)) entry.log(WarnLevel, fmt.Sprint(args...))
} }
} }
@ -141,20 +152,20 @@ func (entry *Entry) Warning(args ...interface{}) {
} }
func (entry *Entry) Error(args ...interface{}) { func (entry *Entry) Error(args ...interface{}) {
if entry.Logger.Level >= ErrorLevel { if entry.Logger.level() >= ErrorLevel {
entry.log(ErrorLevel, fmt.Sprint(args...)) entry.log(ErrorLevel, fmt.Sprint(args...))
} }
} }
func (entry *Entry) Fatal(args ...interface{}) { func (entry *Entry) Fatal(args ...interface{}) {
if entry.Logger.Level >= FatalLevel { if entry.Logger.level() >= FatalLevel {
entry.log(FatalLevel, fmt.Sprint(args...)) entry.log(FatalLevel, fmt.Sprint(args...))
} }
os.Exit(1) Exit(1)
} }
func (entry *Entry) Panic(args ...interface{}) { func (entry *Entry) Panic(args ...interface{}) {
if entry.Logger.Level >= PanicLevel { if entry.Logger.level() >= PanicLevel {
entry.log(PanicLevel, fmt.Sprint(args...)) entry.log(PanicLevel, fmt.Sprint(args...))
} }
panic(fmt.Sprint(args...)) panic(fmt.Sprint(args...))
@ -163,13 +174,13 @@ func (entry *Entry) Panic(args ...interface{}) {
// Entry Printf family functions // Entry Printf family functions
func (entry *Entry) Debugf(format string, args ...interface{}) { func (entry *Entry) Debugf(format string, args ...interface{}) {
if entry.Logger.Level >= DebugLevel { if entry.Logger.level() >= DebugLevel {
entry.Debug(fmt.Sprintf(format, args...)) entry.Debug(fmt.Sprintf(format, args...))
} }
} }
func (entry *Entry) Infof(format string, args ...interface{}) { func (entry *Entry) Infof(format string, args ...interface{}) {
if entry.Logger.Level >= InfoLevel { if entry.Logger.level() >= InfoLevel {
entry.Info(fmt.Sprintf(format, args...)) entry.Info(fmt.Sprintf(format, args...))
} }
} }
@ -179,7 +190,7 @@ func (entry *Entry) Printf(format string, args ...interface{}) {
} }
func (entry *Entry) Warnf(format string, args ...interface{}) { func (entry *Entry) Warnf(format string, args ...interface{}) {
if entry.Logger.Level >= WarnLevel { if entry.Logger.level() >= WarnLevel {
entry.Warn(fmt.Sprintf(format, args...)) entry.Warn(fmt.Sprintf(format, args...))
} }
} }
@ -189,20 +200,20 @@ func (entry *Entry) Warningf(format string, args ...interface{}) {
} }
func (entry *Entry) Errorf(format string, args ...interface{}) { func (entry *Entry) Errorf(format string, args ...interface{}) {
if entry.Logger.Level >= ErrorLevel { if entry.Logger.level() >= ErrorLevel {
entry.Error(fmt.Sprintf(format, args...)) entry.Error(fmt.Sprintf(format, args...))
} }
} }
func (entry *Entry) Fatalf(format string, args ...interface{}) { func (entry *Entry) Fatalf(format string, args ...interface{}) {
if entry.Logger.Level >= FatalLevel { if entry.Logger.level() >= FatalLevel {
entry.Fatal(fmt.Sprintf(format, args...)) entry.Fatal(fmt.Sprintf(format, args...))
} }
os.Exit(1) Exit(1)
} }
func (entry *Entry) Panicf(format string, args ...interface{}) { func (entry *Entry) Panicf(format string, args ...interface{}) {
if entry.Logger.Level >= PanicLevel { if entry.Logger.level() >= PanicLevel {
entry.Panic(fmt.Sprintf(format, args...)) entry.Panic(fmt.Sprintf(format, args...))
} }
} }
@ -210,13 +221,13 @@ func (entry *Entry) Panicf(format string, args ...interface{}) {
// Entry Println family functions // Entry Println family functions
func (entry *Entry) Debugln(args ...interface{}) { func (entry *Entry) Debugln(args ...interface{}) {
if entry.Logger.Level >= DebugLevel { if entry.Logger.level() >= DebugLevel {
entry.Debug(entry.sprintlnn(args...)) entry.Debug(entry.sprintlnn(args...))
} }
} }
func (entry *Entry) Infoln(args ...interface{}) { func (entry *Entry) Infoln(args ...interface{}) {
if entry.Logger.Level >= InfoLevel { if entry.Logger.level() >= InfoLevel {
entry.Info(entry.sprintlnn(args...)) entry.Info(entry.sprintlnn(args...))
} }
} }
@ -226,7 +237,7 @@ func (entry *Entry) Println(args ...interface{}) {
} }
func (entry *Entry) Warnln(args ...interface{}) { func (entry *Entry) Warnln(args ...interface{}) {
if entry.Logger.Level >= WarnLevel { if entry.Logger.level() >= WarnLevel {
entry.Warn(entry.sprintlnn(args...)) entry.Warn(entry.sprintlnn(args...))
} }
} }
@ -236,20 +247,20 @@ func (entry *Entry) Warningln(args ...interface{}) {
} }
func (entry *Entry) Errorln(args ...interface{}) { func (entry *Entry) Errorln(args ...interface{}) {
if entry.Logger.Level >= ErrorLevel { if entry.Logger.level() >= ErrorLevel {
entry.Error(entry.sprintlnn(args...)) entry.Error(entry.sprintlnn(args...))
} }
} }
func (entry *Entry) Fatalln(args ...interface{}) { func (entry *Entry) Fatalln(args ...interface{}) {
if entry.Logger.Level >= FatalLevel { if entry.Logger.level() >= FatalLevel {
entry.Fatal(entry.sprintlnn(args...)) entry.Fatal(entry.sprintlnn(args...))
} }
os.Exit(1) Exit(1)
} }
func (entry *Entry) Panicln(args ...interface{}) { func (entry *Entry) Panicln(args ...interface{}) {
if entry.Logger.Level >= PanicLevel { if entry.Logger.level() >= PanicLevel {
entry.Panic(entry.sprintlnn(args...)) entry.Panic(entry.sprintlnn(args...))
} }
} }

View file

@ -31,14 +31,14 @@ func SetFormatter(formatter Formatter) {
func SetLevel(level Level) { func SetLevel(level Level) {
std.mu.Lock() std.mu.Lock()
defer std.mu.Unlock() defer std.mu.Unlock()
std.Level = level std.setLevel(level)
} }
// GetLevel returns the standard logger level. // GetLevel returns the standard logger level.
func GetLevel() Level { func GetLevel() Level {
std.mu.Lock() std.mu.Lock()
defer std.mu.Unlock() defer std.mu.Unlock()
return std.Level return std.level()
} }
// AddHook adds a hook to the standard logger hooks. // AddHook adds a hook to the standard logger hooks.

View file

@ -31,18 +31,15 @@ type Formatter interface {
// It's not exported because it's still using Data in an opinionated way. It's to // It's not exported because it's still using Data in an opinionated way. It's to
// avoid code duplication between the two default formatters. // avoid code duplication between the two default formatters.
func prefixFieldClashes(data Fields) { func prefixFieldClashes(data Fields) {
_, ok := data["time"] if t, ok := data["time"]; ok {
if ok { data["fields.time"] = t
data["fields.time"] = data["time"]
} }
_, ok = data["msg"] if m, ok := data["msg"]; ok {
if ok { data["fields.msg"] = m
data["fields.msg"] = data["msg"]
} }
_, ok = data["level"] if l, ok := data["level"]; ok {
if ok { data["fields.level"] = l
data["fields.level"] = data["level"]
} }
} }

View file

@ -5,9 +5,40 @@ import (
"fmt" "fmt"
) )
type fieldKey string
type FieldMap map[fieldKey]string
const (
FieldKeyMsg = "msg"
FieldKeyLevel = "level"
FieldKeyTime = "time"
)
func (f FieldMap) resolve(key fieldKey) string {
if k, ok := f[key]; ok {
return k
}
return string(key)
}
type JSONFormatter struct { type JSONFormatter struct {
// TimestampFormat sets the format used for marshaling timestamps. // TimestampFormat sets the format used for marshaling timestamps.
TimestampFormat string TimestampFormat string
// DisableTimestamp allows disabling automatic timestamps in output
DisableTimestamp bool
// FieldMap allows users to customize the names of keys for various fields.
// As an example:
// formatter := &JSONFormatter{
// FieldMap: FieldMap{
// FieldKeyTime: "@timestamp",
// FieldKeyLevel: "@level",
// FieldKeyMsg: "@message",
// },
// }
FieldMap FieldMap
} }
func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) { func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) {
@ -16,7 +47,7 @@ func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) {
switch v := v.(type) { switch v := v.(type) {
case error: case error:
// Otherwise errors are ignored by `encoding/json` // Otherwise errors are ignored by `encoding/json`
// https://github.com/Sirupsen/logrus/issues/137 // https://github.com/sirupsen/logrus/issues/137
data[k] = v.Error() data[k] = v.Error()
default: default:
data[k] = v data[k] = v
@ -29,9 +60,11 @@ func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) {
timestampFormat = DefaultTimestampFormat timestampFormat = DefaultTimestampFormat
} }
data["time"] = entry.Time.Format(timestampFormat) if !f.DisableTimestamp {
data["msg"] = entry.Message data[f.FieldMap.resolve(FieldKeyTime)] = entry.Time.Format(timestampFormat)
data["level"] = entry.Level.String() }
data[f.FieldMap.resolve(FieldKeyMsg)] = entry.Message
data[f.FieldMap.resolve(FieldKeyLevel)] = entry.Level.String()
serialized, err := json.Marshal(data) serialized, err := json.Marshal(data)
if err != nil { if err != nil {

View file

@ -4,6 +4,7 @@ import (
"io" "io"
"os" "os"
"sync" "sync"
"sync/atomic"
) )
type Logger struct { type Logger struct {
@ -26,8 +27,31 @@ type Logger struct {
// to) `logrus.Info`, which allows Info(), Warn(), Error() and Fatal() to be // to) `logrus.Info`, which allows Info(), Warn(), Error() and Fatal() to be
// logged. `logrus.Debug` is useful in // logged. `logrus.Debug` is useful in
Level Level Level Level
// Used to sync writing to the log. // Used to sync writing to the log. Locking is enabled by Default
mu sync.Mutex mu MutexWrap
// Reusable empty entry
entryPool sync.Pool
}
type MutexWrap struct {
lock sync.Mutex
disabled bool
}
func (mw *MutexWrap) Lock() {
if !mw.disabled {
mw.lock.Lock()
}
}
func (mw *MutexWrap) Unlock() {
if !mw.disabled {
mw.lock.Unlock()
}
}
func (mw *MutexWrap) Disable() {
mw.disabled = true
} }
// Creates a new logger. Configuration should be set by changing `Formatter`, // Creates a new logger. Configuration should be set by changing `Formatter`,
@ -51,156 +75,243 @@ func New() *Logger {
} }
} }
// Adds a field to the log entry, note that you it doesn't log until you call func (logger *Logger) newEntry() *Entry {
entry, ok := logger.entryPool.Get().(*Entry)
if ok {
return entry
}
return NewEntry(logger)
}
func (logger *Logger) releaseEntry(entry *Entry) {
logger.entryPool.Put(entry)
}
// Adds a field to the log entry, note that it doesn't log until you call
// Debug, Print, Info, Warn, Fatal or Panic. It only creates a log entry. // Debug, Print, Info, Warn, Fatal or Panic. It only creates a log entry.
// If you want multiple fields, use `WithFields`. // If you want multiple fields, use `WithFields`.
func (logger *Logger) WithField(key string, value interface{}) *Entry { func (logger *Logger) WithField(key string, value interface{}) *Entry {
return NewEntry(logger).WithField(key, value) entry := logger.newEntry()
defer logger.releaseEntry(entry)
return entry.WithField(key, value)
} }
// Adds a struct of fields to the log entry. All it does is call `WithField` for // Adds a struct of fields to the log entry. All it does is call `WithField` for
// each `Field`. // each `Field`.
func (logger *Logger) WithFields(fields Fields) *Entry { func (logger *Logger) WithFields(fields Fields) *Entry {
return NewEntry(logger).WithFields(fields) entry := logger.newEntry()
defer logger.releaseEntry(entry)
return entry.WithFields(fields)
}
// Add an error as single field to the log entry. All it does is call
// `WithError` for the given `error`.
func (logger *Logger) WithError(err error) *Entry {
entry := logger.newEntry()
defer logger.releaseEntry(entry)
return entry.WithError(err)
} }
func (logger *Logger) Debugf(format string, args ...interface{}) { func (logger *Logger) Debugf(format string, args ...interface{}) {
if logger.Level >= DebugLevel { if logger.level() >= DebugLevel {
NewEntry(logger).Debugf(format, args...) entry := logger.newEntry()
entry.Debugf(format, args...)
logger.releaseEntry(entry)
} }
} }
func (logger *Logger) Infof(format string, args ...interface{}) { func (logger *Logger) Infof(format string, args ...interface{}) {
if logger.Level >= InfoLevel { if logger.level() >= InfoLevel {
NewEntry(logger).Infof(format, args...) entry := logger.newEntry()
entry.Infof(format, args...)
logger.releaseEntry(entry)
} }
} }
func (logger *Logger) Printf(format string, args ...interface{}) { func (logger *Logger) Printf(format string, args ...interface{}) {
NewEntry(logger).Printf(format, args...) entry := logger.newEntry()
entry.Printf(format, args...)
logger.releaseEntry(entry)
} }
func (logger *Logger) Warnf(format string, args ...interface{}) { func (logger *Logger) Warnf(format string, args ...interface{}) {
if logger.Level >= WarnLevel { if logger.level() >= WarnLevel {
NewEntry(logger).Warnf(format, args...) entry := logger.newEntry()
entry.Warnf(format, args...)
logger.releaseEntry(entry)
} }
} }
func (logger *Logger) Warningf(format string, args ...interface{}) { func (logger *Logger) Warningf(format string, args ...interface{}) {
if logger.Level >= WarnLevel { if logger.level() >= WarnLevel {
NewEntry(logger).Warnf(format, args...) entry := logger.newEntry()
entry.Warnf(format, args...)
logger.releaseEntry(entry)
} }
} }
func (logger *Logger) Errorf(format string, args ...interface{}) { func (logger *Logger) Errorf(format string, args ...interface{}) {
if logger.Level >= ErrorLevel { if logger.level() >= ErrorLevel {
NewEntry(logger).Errorf(format, args...) entry := logger.newEntry()
entry.Errorf(format, args...)
logger.releaseEntry(entry)
} }
} }
func (logger *Logger) Fatalf(format string, args ...interface{}) { func (logger *Logger) Fatalf(format string, args ...interface{}) {
if logger.Level >= FatalLevel { if logger.level() >= FatalLevel {
NewEntry(logger).Fatalf(format, args...) entry := logger.newEntry()
entry.Fatalf(format, args...)
logger.releaseEntry(entry)
} }
os.Exit(1) Exit(1)
} }
func (logger *Logger) Panicf(format string, args ...interface{}) { func (logger *Logger) Panicf(format string, args ...interface{}) {
if logger.Level >= PanicLevel { if logger.level() >= PanicLevel {
NewEntry(logger).Panicf(format, args...) entry := logger.newEntry()
entry.Panicf(format, args...)
logger.releaseEntry(entry)
} }
} }
func (logger *Logger) Debug(args ...interface{}) { func (logger *Logger) Debug(args ...interface{}) {
if logger.Level >= DebugLevel { if logger.level() >= DebugLevel {
NewEntry(logger).Debug(args...) entry := logger.newEntry()
entry.Debug(args...)
logger.releaseEntry(entry)
} }
} }
func (logger *Logger) Info(args ...interface{}) { func (logger *Logger) Info(args ...interface{}) {
if logger.Level >= InfoLevel { if logger.level() >= InfoLevel {
NewEntry(logger).Info(args...) entry := logger.newEntry()
entry.Info(args...)
logger.releaseEntry(entry)
} }
} }
func (logger *Logger) Print(args ...interface{}) { func (logger *Logger) Print(args ...interface{}) {
NewEntry(logger).Info(args...) entry := logger.newEntry()
entry.Info(args...)
logger.releaseEntry(entry)
} }
func (logger *Logger) Warn(args ...interface{}) { func (logger *Logger) Warn(args ...interface{}) {
if logger.Level >= WarnLevel { if logger.level() >= WarnLevel {
NewEntry(logger).Warn(args...) entry := logger.newEntry()
entry.Warn(args...)
logger.releaseEntry(entry)
} }
} }
func (logger *Logger) Warning(args ...interface{}) { func (logger *Logger) Warning(args ...interface{}) {
if logger.Level >= WarnLevel { if logger.level() >= WarnLevel {
NewEntry(logger).Warn(args...) entry := logger.newEntry()
entry.Warn(args...)
logger.releaseEntry(entry)
} }
} }
func (logger *Logger) Error(args ...interface{}) { func (logger *Logger) Error(args ...interface{}) {
if logger.Level >= ErrorLevel { if logger.level() >= ErrorLevel {
NewEntry(logger).Error(args...) entry := logger.newEntry()
entry.Error(args...)
logger.releaseEntry(entry)
} }
} }
func (logger *Logger) Fatal(args ...interface{}) { func (logger *Logger) Fatal(args ...interface{}) {
if logger.Level >= FatalLevel { if logger.level() >= FatalLevel {
NewEntry(logger).Fatal(args...) entry := logger.newEntry()
entry.Fatal(args...)
logger.releaseEntry(entry)
} }
os.Exit(1) Exit(1)
} }
func (logger *Logger) Panic(args ...interface{}) { func (logger *Logger) Panic(args ...interface{}) {
if logger.Level >= PanicLevel { if logger.level() >= PanicLevel {
NewEntry(logger).Panic(args...) entry := logger.newEntry()
entry.Panic(args...)
logger.releaseEntry(entry)
} }
} }
func (logger *Logger) Debugln(args ...interface{}) { func (logger *Logger) Debugln(args ...interface{}) {
if logger.Level >= DebugLevel { if logger.level() >= DebugLevel {
NewEntry(logger).Debugln(args...) entry := logger.newEntry()
entry.Debugln(args...)
logger.releaseEntry(entry)
} }
} }
func (logger *Logger) Infoln(args ...interface{}) { func (logger *Logger) Infoln(args ...interface{}) {
if logger.Level >= InfoLevel { if logger.level() >= InfoLevel {
NewEntry(logger).Infoln(args...) entry := logger.newEntry()
entry.Infoln(args...)
logger.releaseEntry(entry)
} }
} }
func (logger *Logger) Println(args ...interface{}) { func (logger *Logger) Println(args ...interface{}) {
NewEntry(logger).Println(args...) entry := logger.newEntry()
entry.Println(args...)
logger.releaseEntry(entry)
} }
func (logger *Logger) Warnln(args ...interface{}) { func (logger *Logger) Warnln(args ...interface{}) {
if logger.Level >= WarnLevel { if logger.level() >= WarnLevel {
NewEntry(logger).Warnln(args...) entry := logger.newEntry()
entry.Warnln(args...)
logger.releaseEntry(entry)
} }
} }
func (logger *Logger) Warningln(args ...interface{}) { func (logger *Logger) Warningln(args ...interface{}) {
if logger.Level >= WarnLevel { if logger.level() >= WarnLevel {
NewEntry(logger).Warnln(args...) entry := logger.newEntry()
entry.Warnln(args...)
logger.releaseEntry(entry)
} }
} }
func (logger *Logger) Errorln(args ...interface{}) { func (logger *Logger) Errorln(args ...interface{}) {
if logger.Level >= ErrorLevel { if logger.level() >= ErrorLevel {
NewEntry(logger).Errorln(args...) entry := logger.newEntry()
entry.Errorln(args...)
logger.releaseEntry(entry)
} }
} }
func (logger *Logger) Fatalln(args ...interface{}) { func (logger *Logger) Fatalln(args ...interface{}) {
if logger.Level >= FatalLevel { if logger.level() >= FatalLevel {
NewEntry(logger).Fatalln(args...) entry := logger.newEntry()
entry.Fatalln(args...)
logger.releaseEntry(entry)
} }
os.Exit(1) Exit(1)
} }
func (logger *Logger) Panicln(args ...interface{}) { func (logger *Logger) Panicln(args ...interface{}) {
if logger.Level >= PanicLevel { if logger.level() >= PanicLevel {
NewEntry(logger).Panicln(args...) entry := logger.newEntry()
entry.Panicln(args...)
logger.releaseEntry(entry)
} }
} }
//When file is opened with appending mode, it's safe to
//write concurrently to a file (within 4k message on Linux).
//In these cases user can choose to disable the lock.
func (logger *Logger) SetNoLock() {
logger.mu.Disable()
}
func (logger *Logger) level() Level {
return Level(atomic.LoadUint32((*uint32)(&logger.Level)))
}
func (logger *Logger) setLevel(level Level) {
atomic.StoreUint32((*uint32)(&logger.Level), uint32(level))
}

View file

@ -3,13 +3,14 @@ package logrus
import ( import (
"fmt" "fmt"
"log" "log"
"strings"
) )
// Fields type, used to pass to `WithFields`. // Fields type, used to pass to `WithFields`.
type Fields map[string]interface{} type Fields map[string]interface{}
// Level type // Level type
type Level uint8 type Level uint32
// Convert the Level to a string. E.g. PanicLevel becomes "panic". // Convert the Level to a string. E.g. PanicLevel becomes "panic".
func (level Level) String() string { func (level Level) String() string {
@ -33,7 +34,7 @@ func (level Level) String() string {
// ParseLevel takes a string level and returns the Logrus log level constant. // ParseLevel takes a string level and returns the Logrus log level constant.
func ParseLevel(lvl string) (Level, error) { func ParseLevel(lvl string) (Level, error) {
switch lvl { switch strings.ToLower(lvl) {
case "panic": case "panic":
return PanicLevel, nil return PanicLevel, nil
case "fatal": case "fatal":
@ -52,6 +53,16 @@ func ParseLevel(lvl string) (Level, error) {
return l, fmt.Errorf("not a valid logrus Level: %q", lvl) return l, fmt.Errorf("not a valid logrus Level: %q", lvl)
} }
// A constant exposing all logging levels
var AllLevels = []Level{
PanicLevel,
FatalLevel,
ErrorLevel,
WarnLevel,
InfoLevel,
DebugLevel,
}
// These are the different logging levels. You can set the logging level to log // These are the different logging levels. You can set the logging level to log
// on your instance of logger, obtained with `logrus.New()`. // on your instance of logger, obtained with `logrus.New()`.
const ( const (
@ -96,3 +107,37 @@ type StdLogger interface {
Panicf(string, ...interface{}) Panicf(string, ...interface{})
Panicln(...interface{}) Panicln(...interface{})
} }
// The FieldLogger interface generalizes the Entry and Logger types
type FieldLogger interface {
WithField(key string, value interface{}) *Entry
WithFields(fields Fields) *Entry
WithError(err error) *Entry
Debugf(format string, args ...interface{})
Infof(format string, args ...interface{})
Printf(format string, args ...interface{})
Warnf(format string, args ...interface{})
Warningf(format string, args ...interface{})
Errorf(format string, args ...interface{})
Fatalf(format string, args ...interface{})
Panicf(format string, args ...interface{})
Debug(args ...interface{})
Info(args ...interface{})
Print(args ...interface{})
Warn(args ...interface{})
Warning(args ...interface{})
Error(args ...interface{})
Fatal(args ...interface{})
Panic(args ...interface{})
Debugln(args ...interface{})
Infoln(args ...interface{})
Println(args ...interface{})
Warnln(args ...interface{})
Warningln(args ...interface{})
Errorln(args ...interface{})
Fatalln(args ...interface{})
Panicln(args ...interface{})
}

View file

@ -0,0 +1,10 @@
// +build appengine
package logrus
import "io"
// IsTerminal returns true if stderr's file descriptor is a terminal.
func IsTerminal(f io.Writer) bool {
return true
}

View file

@ -1,4 +1,5 @@
// +build darwin freebsd openbsd netbsd dragonfly // +build darwin freebsd openbsd netbsd dragonfly
// +build !appengine
package logrus package logrus

View file

@ -3,6 +3,8 @@
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.
// +build !appengine
package logrus package logrus
import "syscall" import "syscall"

View file

@ -4,18 +4,25 @@
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.
// +build linux darwin freebsd openbsd netbsd dragonfly // +build linux darwin freebsd openbsd netbsd dragonfly
// +build !appengine
package logrus package logrus
import ( import (
"io"
"os"
"syscall" "syscall"
"unsafe" "unsafe"
) )
// IsTerminal returns true if the given file descriptor is a terminal. // IsTerminal returns true if stderr's file descriptor is a terminal.
func IsTerminal() bool { func IsTerminal(f io.Writer) bool {
fd := syscall.Stdout
var termios Termios var termios Termios
_, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0) switch v := f.(type) {
case *os.File:
_, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(v.Fd()), ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0)
return err == 0 return err == 0
default:
return false
}
} }

21
vendor/github.com/Sirupsen/logrus/terminal_solaris.go generated vendored Normal file
View file

@ -0,0 +1,21 @@
// +build solaris,!appengine
package logrus
import (
"io"
"os"
"golang.org/x/sys/unix"
)
// IsTerminal returns true if the given file descriptor is a terminal.
func IsTerminal(f io.Writer) bool {
switch v := f.(type) {
case *os.File:
_, err := unix.IoctlGetTermios(int(v.Fd()), unix.TCGETA)
return err == nil
default:
return false
}
}

View file

@ -3,11 +3,18 @@
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.
// +build windows // +build windows,!appengine
package logrus package logrus
import ( import (
"bytes"
"errors"
"io"
"os"
"os/exec"
"strconv"
"strings"
"syscall" "syscall"
"unsafe" "unsafe"
) )
@ -16,12 +23,60 @@ var kernel32 = syscall.NewLazyDLL("kernel32.dll")
var ( var (
procGetConsoleMode = kernel32.NewProc("GetConsoleMode") procGetConsoleMode = kernel32.NewProc("GetConsoleMode")
procSetConsoleMode = kernel32.NewProc("SetConsoleMode")
) )
// IsTerminal returns true if the given file descriptor is a terminal. const (
func IsTerminal() bool { enableProcessedOutput = 0x0001
fd := syscall.Stdout enableWrapAtEolOutput = 0x0002
var st uint32 enableVirtualTerminalProcessing = 0x0004
r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0) )
return r != 0 && e == 0
func getVersion() (float64, error) {
stdout, stderr := &bytes.Buffer{}, &bytes.Buffer{}
cmd := exec.Command("cmd", "ver")
cmd.Stdout = stdout
cmd.Stderr = stderr
err := cmd.Run()
if err != nil {
return -1, err
}
// The output should be like "Microsoft Windows [Version XX.X.XXXXXX]"
version := strings.Replace(stdout.String(), "\n", "", -1)
version = strings.Replace(version, "\r\n", "", -1)
x1 := strings.Index(version, "[Version")
if x1 == -1 || strings.Index(version, "]") == -1 {
return -1, errors.New("Can't determine Windows version")
}
return strconv.ParseFloat(version[x1+9:x1+13], 64)
}
func init() {
ver, err := getVersion()
if err != nil {
return
}
// Activate Virtual Processing for Windows CMD
// Info: https://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx
if ver >= 10 {
handle := syscall.Handle(os.Stderr.Fd())
procSetConsoleMode.Call(uintptr(handle), enableProcessedOutput|enableWrapAtEolOutput|enableVirtualTerminalProcessing)
}
}
// IsTerminal returns true if stderr's file descriptor is a terminal.
func IsTerminal(f io.Writer) bool {
switch v := f.(type) {
case *os.File:
var st uint32
r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(v.Fd()), uintptr(unsafe.Pointer(&st)), 0)
return r != 0 && e == 0
default:
return false
}
} }

View file

@ -3,9 +3,9 @@ package logrus
import ( import (
"bytes" "bytes"
"fmt" "fmt"
"runtime"
"sort" "sort"
"strings" "strings"
"sync"
"time" "time"
) )
@ -20,16 +20,10 @@ const (
var ( var (
baseTimestamp time.Time baseTimestamp time.Time
isTerminal bool
) )
func init() { func init() {
baseTimestamp = time.Now() baseTimestamp = time.Now()
isTerminal = IsTerminal()
}
func miniTS() int {
return int(time.Since(baseTimestamp) / time.Second)
} }
type TextFormatter struct { type TextFormatter struct {
@ -54,10 +48,32 @@ type TextFormatter struct {
// that log extremely frequently and don't use the JSON formatter this may not // that log extremely frequently and don't use the JSON formatter this may not
// be desired. // be desired.
DisableSorting bool DisableSorting bool
// QuoteEmptyFields will wrap empty fields in quotes if true
QuoteEmptyFields bool
// QuoteCharacter can be set to the override the default quoting character "
// with something else. For example: ', or `.
QuoteCharacter string
// Whether the logger's out is to a terminal
isTerminal bool
sync.Once
}
func (f *TextFormatter) init(entry *Entry) {
if len(f.QuoteCharacter) == 0 {
f.QuoteCharacter = "\""
}
if entry.Logger != nil {
f.isTerminal = IsTerminal(entry.Logger.Out)
}
} }
func (f *TextFormatter) Format(entry *Entry) ([]byte, error) { func (f *TextFormatter) Format(entry *Entry) ([]byte, error) {
var keys []string = make([]string, 0, len(entry.Data)) var b *bytes.Buffer
keys := make([]string, 0, len(entry.Data))
for k := range entry.Data { for k := range entry.Data {
keys = append(keys, k) keys = append(keys, k)
} }
@ -65,13 +81,17 @@ func (f *TextFormatter) Format(entry *Entry) ([]byte, error) {
if !f.DisableSorting { if !f.DisableSorting {
sort.Strings(keys) sort.Strings(keys)
} }
if entry.Buffer != nil {
b := &bytes.Buffer{} b = entry.Buffer
} else {
b = &bytes.Buffer{}
}
prefixFieldClashes(entry.Data) prefixFieldClashes(entry.Data)
isColorTerminal := isTerminal && (runtime.GOOS != "windows") f.Do(func() { f.init(entry) })
isColored := (f.ForceColors || isColorTerminal) && !f.DisableColors
isColored := (f.ForceColors || f.isTerminal) && !f.DisableColors
timestampFormat := f.TimestampFormat timestampFormat := f.TimestampFormat
if timestampFormat == "" { if timestampFormat == "" {
@ -84,7 +104,9 @@ func (f *TextFormatter) Format(entry *Entry) ([]byte, error) {
f.appendKeyValue(b, "time", entry.Time.Format(timestampFormat)) f.appendKeyValue(b, "time", entry.Time.Format(timestampFormat))
} }
f.appendKeyValue(b, "level", entry.Level.String()) f.appendKeyValue(b, "level", entry.Level.String())
if entry.Message != "" {
f.appendKeyValue(b, "msg", entry.Message) f.appendKeyValue(b, "msg", entry.Message)
}
for _, key := range keys { for _, key := range keys {
f.appendKeyValue(b, key, entry.Data[key]) f.appendKeyValue(b, key, entry.Data[key])
} }
@ -109,51 +131,59 @@ func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []strin
levelText := strings.ToUpper(entry.Level.String())[0:4] levelText := strings.ToUpper(entry.Level.String())[0:4]
if !f.FullTimestamp { if f.DisableTimestamp {
fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d] %-44s ", levelColor, levelText, miniTS(), entry.Message) fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m %-44s ", levelColor, levelText, entry.Message)
} else if !f.FullTimestamp {
fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d] %-44s ", levelColor, levelText, int(entry.Time.Sub(baseTimestamp)/time.Second), entry.Message)
} else { } else {
fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%s] %-44s ", levelColor, levelText, entry.Time.Format(timestampFormat), entry.Message) fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%s] %-44s ", levelColor, levelText, entry.Time.Format(timestampFormat), entry.Message)
} }
for _, k := range keys { for _, k := range keys {
v := entry.Data[k] v := entry.Data[k]
fmt.Fprintf(b, " \x1b[%dm%s\x1b[0m=%+v", levelColor, k, v) fmt.Fprintf(b, " \x1b[%dm%s\x1b[0m=", levelColor, k)
f.appendValue(b, v)
} }
} }
func needsQuoting(text string) bool { func (f *TextFormatter) needsQuoting(text string) bool {
if f.QuoteEmptyFields && len(text) == 0 {
return true
}
for _, ch := range text { for _, ch := range text {
if !((ch >= 'a' && ch <= 'z') || if !((ch >= 'a' && ch <= 'z') ||
(ch >= 'A' && ch <= 'Z') || (ch >= 'A' && ch <= 'Z') ||
(ch >= '0' && ch <= '9') || (ch >= '0' && ch <= '9') ||
ch == '-' || ch == '.') { ch == '-' || ch == '.') {
return false
}
}
return true return true
} }
}
return false
}
func (f *TextFormatter) appendKeyValue(b *bytes.Buffer, key string, value interface{}) { func (f *TextFormatter) appendKeyValue(b *bytes.Buffer, key string, value interface{}) {
b.WriteString(key) b.WriteString(key)
b.WriteByte('=') b.WriteByte('=')
f.appendValue(b, value)
b.WriteByte(' ')
}
func (f *TextFormatter) appendValue(b *bytes.Buffer, value interface{}) {
switch value := value.(type) { switch value := value.(type) {
case string: case string:
if needsQuoting(value) { if !f.needsQuoting(value) {
b.WriteString(value) b.WriteString(value)
} else { } else {
fmt.Fprintf(b, "%q", value) fmt.Fprintf(b, "%s%v%s", f.QuoteCharacter, value, f.QuoteCharacter)
} }
case error: case error:
errmsg := value.Error() errmsg := value.Error()
if needsQuoting(errmsg) { if !f.needsQuoting(errmsg) {
b.WriteString(errmsg) b.WriteString(errmsg)
} else { } else {
fmt.Fprintf(b, "%q", value) fmt.Fprintf(b, "%s%v%s", f.QuoteCharacter, errmsg, f.QuoteCharacter)
} }
default: default:
fmt.Fprint(b, value) fmt.Fprint(b, value)
} }
b.WriteByte(' ')
} }

View file

@ -7,21 +7,52 @@ import (
) )
func (logger *Logger) Writer() *io.PipeWriter { func (logger *Logger) Writer() *io.PipeWriter {
return logger.WriterLevel(InfoLevel)
}
func (logger *Logger) WriterLevel(level Level) *io.PipeWriter {
return NewEntry(logger).WriterLevel(level)
}
func (entry *Entry) Writer() *io.PipeWriter {
return entry.WriterLevel(InfoLevel)
}
func (entry *Entry) WriterLevel(level Level) *io.PipeWriter {
reader, writer := io.Pipe() reader, writer := io.Pipe()
go logger.writerScanner(reader) var printFunc func(args ...interface{})
switch level {
case DebugLevel:
printFunc = entry.Debug
case InfoLevel:
printFunc = entry.Info
case WarnLevel:
printFunc = entry.Warn
case ErrorLevel:
printFunc = entry.Error
case FatalLevel:
printFunc = entry.Fatal
case PanicLevel:
printFunc = entry.Panic
default:
printFunc = entry.Print
}
go entry.writerScanner(reader, printFunc)
runtime.SetFinalizer(writer, writerFinalizer) runtime.SetFinalizer(writer, writerFinalizer)
return writer return writer
} }
func (logger *Logger) writerScanner(reader *io.PipeReader) { func (entry *Entry) writerScanner(reader *io.PipeReader, printFunc func(args ...interface{})) {
scanner := bufio.NewScanner(reader) scanner := bufio.NewScanner(reader)
for scanner.Scan() { for scanner.Scan() {
logger.Print(scanner.Text()) printFunc(scanner.Text())
} }
if err := scanner.Err(); err != nil { if err := scanner.Err(); err != nil {
logger.Errorf("Error while reading from Writer: %s", err) entry.Errorf("Error while reading from Writer: %s", err)
} }
reader.Close() reader.Close()
} }

6
vendor/vendor.json vendored
View file

@ -58,6 +58,12 @@
"revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3", "revision": "c589d0c9f0d81640c518354c7bcae77d99820aa3",
"revisionTime": "2016-09-30T00:14:02Z" "revisionTime": "2016-09-30T00:14:02Z"
}, },
{
"checksumSHA1": "DYv6Q1+VfnUVxMwvk5IshAClLvw=",
"path": "github.com/Sirupsen/logrus",
"revision": "5e5dc898656f695e2a086b8e12559febbfc01562",
"revisionTime": "2017-05-15T10:45:16Z"
},
{ {
"checksumSHA1": "ddYc7mKe3g1x1UUKBrGR4vArJs8=", "checksumSHA1": "ddYc7mKe3g1x1UUKBrGR4vArJs8=",
"path": "github.com/asaskevich/govalidator", "path": "github.com/asaskevich/govalidator",

File diff suppressed because one or more lines are too long

View file

@ -415,7 +415,14 @@ Prometheus.Graph.prototype.submitQuery = function() {
return; return;
} }
var duration = new Date().getTime() - startTime; var duration = new Date().getTime() - startTime;
var totalTimeSeries = (xhr.responseJSON.data !== undefined) ? xhr.responseJSON.data.result.length : 0; var totalTimeSeries = 0;
if (xhr.responseJSON.data !== undefined) {
if (xhr.responseJSON.data.resultType === "scalar") {
totalTimeSeries = 1;
} else {
totalTimeSeries = xhr.responseJSON.data.result.length;
}
}
self.evalStats.html("Load time: " + duration + "ms <br /> Resolution: " + resolution + "s <br />" + "Total time series: " + totalTimeSeries); self.evalStats.html("Load time: " + duration + "ms <br /> Resolution: " + resolution + "s <br />" + "Total time series: " + totalTimeSeries);
self.spinner.hide(); self.spinner.hide();
} }
@ -557,6 +564,21 @@ Prometheus.Graph.prototype.updateGraph = function() {
}); });
// Find and set graph's max/min // Find and set graph's max/min
if (self.isStacked() === true) {
// When stacked is toggled
var max = 0;
self.data.forEach(function(timeSeries) {
var currSeriesMax = 0;
timeSeries.data.forEach(function(dataPoint) {
if (dataPoint.y > currSeriesMax && dataPoint.y != null) {
currSeriesMax = dataPoint.y;
}
});
max += currSeriesMax;
});
self.rickshawGraph.max = max*1.05;
self.rickshawGraph.min = 0;
} else {
var min = Infinity; var min = Infinity;
var max = -Infinity; var max = -Infinity;
self.data.forEach(function(timeSeries) { self.data.forEach(function(timeSeries) {
@ -576,6 +598,7 @@ Prometheus.Graph.prototype.updateGraph = function() {
self.rickshawGraph.max = max + (0.1*(Math.abs(max - min))); self.rickshawGraph.max = max + (0.1*(Math.abs(max - min)));
self.rickshawGraph.min = min - (0.1*(Math.abs(max - min))); self.rickshawGraph.min = min - (0.1*(Math.abs(max - min)));
} }
}
var xAxis = new Rickshaw.Graph.Axis.Time({ graph: self.rickshawGraph }); var xAxis = new Rickshaw.Graph.Axis.Time({ graph: self.rickshawGraph });

View file

@ -23,6 +23,7 @@ import (
"net/http" "net/http"
"net/url" "net/url"
"os" "os"
"path"
"path/filepath" "path/filepath"
"sort" "sort"
"strings" "strings"
@ -171,7 +172,7 @@ func New(o *Options) *Handler {
instrf := prometheus.InstrumentHandlerFunc instrf := prometheus.InstrumentHandlerFunc
router.Get("/", func(w http.ResponseWriter, r *http.Request) { router.Get("/", func(w http.ResponseWriter, r *http.Request) {
router.Redirect(w, r, "/graph", http.StatusFound) router.Redirect(w, r, path.Join(o.ExternalURL.Path, "/graph"), http.StatusFound)
}) })
router.Get("/alerts", instrf("alerts", h.alerts)) router.Get("/alerts", instrf("alerts", h.alerts))