mirror of
https://github.com/prometheus/prometheus.git
synced 2024-11-10 07:34:04 -08:00
Merge remote-tracking branch 'origin/master' into beorn7/storage
This commit is contained in:
commit
059295332f
60
CHANGELOG.md
60
CHANGELOG.md
|
@ -1,3 +1,63 @@
|
|||
## 0.17.0 / unreleased
|
||||
|
||||
This version no longer works with Alertmanager 0.0.4 and earlier!
|
||||
The alerting rule syntax has changed as well but the old syntax is supported
|
||||
up until version 0.18.
|
||||
|
||||
All regular expressions in PromQL are anchored now, matching the behavior of
|
||||
regular expressions in config files. Expressions in rules, alerts or dashboards
|
||||
need to be changed if they don't match the full value so far (for example
|
||||
`http_requests_total{status=~"^5"}` won't match anything anymore and needs to
|
||||
be changed to `http_requests_total{status=~"5.."}`).
|
||||
|
||||
* [CHANGE] Integrate with Alertmanager 0.1.0 and higher
|
||||
* [CHANGE] Degraded storage mode renamed to rushed mode
|
||||
* [CHANGE] New alerting rule syntax
|
||||
* [CHANGE] Add label validation on ingestion
|
||||
* [CHANGE] Regular expression matchers in PromQL are anchored
|
||||
* [FEATURE] Send alert resolved notifications to Alertmanager
|
||||
* [FEATURE] Allow millisecond precision in configuration file
|
||||
* [FEATURE] Support AirBnB's Smartstack Nerve for service discovery
|
||||
* [ENHANCEMENT] Storage switches less often between regular and rushed mode.
|
||||
* [ENHANCEMENT] Storage switches into rushed mode if there are too many memory chunks.
|
||||
* [ENHANCEMENT] Added more storage instrumentation
|
||||
* [ENHANCEMENT] Improved instrumentation of notification handler
|
||||
* [BUGFIX] Do not count head chunks as chunks waiting for persistence
|
||||
* [BUGFIX] Handle OPTIONS HTTP requests to the API correctly
|
||||
* [BUGFIX] Parsing of ranges in PromQL fixed
|
||||
* [BUGFIX] Correctly validate URL flag parameters
|
||||
|
||||
## 0.16.2 / 2016-01-18
|
||||
|
||||
* [FEATURE] Multiple authentication options for EC2 discovery added
|
||||
* [FEATURE] Several meta labels for EC2 discovery added
|
||||
* [FEATURE] Allow full URLs in static target groups (used e.g. by the `blackbox_exporter`)
|
||||
* [FEATURE] Add Graphite remote-storage integration
|
||||
* [FEATURE] Create separate Kubernetes targets for services and their endpoints
|
||||
* [FEATURE] Add `clamp_{min,max}` functions to PromQL
|
||||
* [FEATURE] Omitted time parameter in API query defaults to now
|
||||
* [ENHANCEMENT] Less frequent time series file truncation
|
||||
* [ENHANCEMENT] Instrument number of manually deleted time series
|
||||
* [ENHANCEMENT] Ignore lost+found directory during storage version detection
|
||||
* [CHANGE] Kubernetes `masters` renamed to `api_servers`
|
||||
* [CHANGE] "Healthy" and "unhealthy" targets are now called "up" and "down" in the web UI
|
||||
* [CHANGE] Remove undocumented 2nd argument of the `delta` function.
|
||||
(This is a BREAKING CHANGE for users of the undocumented 2nd argument.)
|
||||
* [BUGFIX] Return proper HTTP status codes on API errors
|
||||
* [BUGFIX] Fix Kubernetes authentication configuration
|
||||
* [BUGFIX] Fix stripped OFFSET from in rule evaluation and display
|
||||
* [BUGFIX] Do not crash on failing Consul SD initialization
|
||||
* [BUGFIX] Revert changes to metric auto-completion
|
||||
* [BUGFIX] Add config overflow validation for TLS configuration
|
||||
* [BUGFIX] Skip already watched Zookeeper nodes in serverset SD
|
||||
* [BUGFIX] Don't federate stale samples
|
||||
* [BUGFIX] Move NaN to end of result for `topk/bottomk/sort/sort_desc/min/max`
|
||||
* [BUGFIX] Limit extrapolation of `delta/rate/increase`
|
||||
* [BUGFIX] Fix unhandled error in rule evaluation
|
||||
|
||||
Some changes to the Kubernetes service discovery were integration since
|
||||
it was released as a beta feature.
|
||||
|
||||
## 0.16.1 / 2015-10-16
|
||||
|
||||
* [FEATURE] Add `irate()` function.
|
||||
|
|
7
Makefile
7
Makefile
|
@ -41,6 +41,10 @@ build:
|
|||
@echo ">> building binaries"
|
||||
@./scripts/build.sh
|
||||
|
||||
tarballs:
|
||||
@echo ">> building release tarballs"
|
||||
@./scripts/release_tarballs.sh
|
||||
|
||||
docker:
|
||||
@docker build -t prometheus:$(shell git rev-parse --short HEAD) .
|
||||
|
||||
|
@ -50,4 +54,5 @@ assets:
|
|||
@go-bindata $(bindata_flags) -pkg ui -o web/ui/bindata.go -ignore '(.*\.map|bootstrap\.js|bootstrap-theme\.css|bootstrap\.css)' web/ui/templates/... web/ui/static/...
|
||||
|
||||
|
||||
.PHONY: all style format build test vet docker assets
|
||||
.PHONY: all style format build test vet docker assets tarballs
|
||||
|
||||
|
|
|
@ -405,18 +405,6 @@ func (t *Target) InstanceIdentifier() string {
|
|||
return t.host()
|
||||
}
|
||||
|
||||
func (t *Target) fullLabels() model.LabelSet {
|
||||
t.RLock()
|
||||
defer t.RUnlock()
|
||||
|
||||
lset := t.labels.Clone()
|
||||
|
||||
if _, ok := lset[model.InstanceLabel]; !ok {
|
||||
lset[model.InstanceLabel] = t.labels[model.AddressLabel]
|
||||
}
|
||||
return lset
|
||||
}
|
||||
|
||||
// RunScraper implements Target.
|
||||
func (t *Target) RunScraper(sampleAppender storage.SampleAppender) {
|
||||
defer close(t.scraperStopped)
|
||||
|
|
|
@ -278,7 +278,7 @@ func (tm *TargetManager) updateTargetGroup(tgroup *config.TargetGroup, cfg *conf
|
|||
// to build up.
|
||||
wg.Add(1)
|
||||
go func(t *Target) {
|
||||
if err := match.Update(cfg, t.fullLabels(), t.metaLabels); err != nil {
|
||||
if err := match.Update(cfg, t.labels, t.metaLabels); err != nil {
|
||||
log.Errorf("Error updating target %v: %v", t, err)
|
||||
}
|
||||
wg.Done()
|
||||
|
|
|
@ -315,7 +315,6 @@ func TestTargetManagerConfigUpdate(t *testing.T) {
|
|||
"test_job1:static:0:0": {
|
||||
{
|
||||
model.JobLabel: "test_job1",
|
||||
model.InstanceLabel: "example.org:80",
|
||||
"testParam": "paramValue",
|
||||
model.SchemeLabel: "",
|
||||
model.MetricsPathLabel: "",
|
||||
|
@ -324,7 +323,6 @@ func TestTargetManagerConfigUpdate(t *testing.T) {
|
|||
},
|
||||
{
|
||||
model.JobLabel: "test_job1",
|
||||
model.InstanceLabel: "example.com:80",
|
||||
"testParam": "paramValue",
|
||||
model.SchemeLabel: "",
|
||||
model.MetricsPathLabel: "",
|
||||
|
@ -338,7 +336,6 @@ func TestTargetManagerConfigUpdate(t *testing.T) {
|
|||
"test_job1:static:0:0": {
|
||||
{
|
||||
model.JobLabel: "test_job1",
|
||||
model.InstanceLabel: "example.org:80",
|
||||
"testParam": "paramValue",
|
||||
model.SchemeLabel: "",
|
||||
model.MetricsPathLabel: "",
|
||||
|
@ -347,7 +344,6 @@ func TestTargetManagerConfigUpdate(t *testing.T) {
|
|||
},
|
||||
{
|
||||
model.JobLabel: "test_job1",
|
||||
model.InstanceLabel: "example.com:80",
|
||||
"testParam": "paramValue",
|
||||
model.SchemeLabel: "",
|
||||
model.MetricsPathLabel: "",
|
||||
|
@ -362,7 +358,6 @@ func TestTargetManagerConfigUpdate(t *testing.T) {
|
|||
"test_job1:static:0:0": {
|
||||
{
|
||||
model.JobLabel: "test_job1",
|
||||
model.InstanceLabel: "example.org:80",
|
||||
"testParam": "paramValue",
|
||||
model.SchemeLabel: "",
|
||||
model.MetricsPathLabel: "",
|
||||
|
@ -371,7 +366,6 @@ func TestTargetManagerConfigUpdate(t *testing.T) {
|
|||
},
|
||||
{
|
||||
model.JobLabel: "test_job1",
|
||||
model.InstanceLabel: "example.com:80",
|
||||
"testParam": "paramValue",
|
||||
model.SchemeLabel: "",
|
||||
model.MetricsPathLabel: "",
|
||||
|
@ -382,7 +376,6 @@ func TestTargetManagerConfigUpdate(t *testing.T) {
|
|||
"test_job2:static:0:0": {
|
||||
{
|
||||
model.JobLabel: "test_job2",
|
||||
model.InstanceLabel: "example.org:8080",
|
||||
"foo": "bar",
|
||||
"new": "ox-ba",
|
||||
model.SchemeLabel: "",
|
||||
|
@ -391,7 +384,6 @@ func TestTargetManagerConfigUpdate(t *testing.T) {
|
|||
},
|
||||
{
|
||||
model.JobLabel: "test_job2",
|
||||
model.InstanceLabel: "example.com:8081",
|
||||
"foo": "bar",
|
||||
"new": "ox-ba",
|
||||
model.SchemeLabel: "",
|
||||
|
@ -402,7 +394,6 @@ func TestTargetManagerConfigUpdate(t *testing.T) {
|
|||
"test_job2:static:0:1": {
|
||||
{
|
||||
model.JobLabel: "test_job2",
|
||||
model.InstanceLabel: "foo.com:1234",
|
||||
model.SchemeLabel: "",
|
||||
model.MetricsPathLabel: "",
|
||||
model.AddressLabel: "foo.com:1234",
|
||||
|
@ -427,7 +418,6 @@ func TestTargetManagerConfigUpdate(t *testing.T) {
|
|||
"test_job2:static:0:0": {
|
||||
{
|
||||
model.JobLabel: "test_job2",
|
||||
model.InstanceLabel: "example.org:8080",
|
||||
"foo": "bar",
|
||||
"new": "ox-ba",
|
||||
model.SchemeLabel: "",
|
||||
|
@ -435,7 +425,6 @@ func TestTargetManagerConfigUpdate(t *testing.T) {
|
|||
model.AddressLabel: "example.org:8080"},
|
||||
{
|
||||
model.JobLabel: "test_job2",
|
||||
model.InstanceLabel: "example.com:8081",
|
||||
"foo": "bar",
|
||||
"new": "ox-ba",
|
||||
model.SchemeLabel: "",
|
||||
|
@ -446,7 +435,6 @@ func TestTargetManagerConfigUpdate(t *testing.T) {
|
|||
"test_job2:static:0:1": {
|
||||
{
|
||||
model.JobLabel: "test_job2",
|
||||
model.InstanceLabel: "foo.com:1234",
|
||||
model.SchemeLabel: "",
|
||||
model.MetricsPathLabel: "",
|
||||
model.AddressLabel: "foo.com:1234",
|
||||
|
@ -494,7 +482,7 @@ func TestTargetManagerConfigUpdate(t *testing.T) {
|
|||
for _, expt := range expTargets {
|
||||
found := false
|
||||
for _, actt := range actTargets {
|
||||
if reflect.DeepEqual(expt, actt.fullLabels()) {
|
||||
if reflect.DeepEqual(expt, actt.labels) {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
|
|
|
@ -34,7 +34,8 @@ ldflags="
|
|||
-X ${repo_path}/version.Branch=${branch}
|
||||
-X ${repo_path}/version.BuildUser=${USER}@${host}
|
||||
-X ${repo_path}/version.BuildDate=${build_date}
|
||||
-X ${repo_path}/version.GoVersion=${go_version}"
|
||||
-X ${repo_path}/version.GoVersion=${go_version}
|
||||
${EXTRA_LDFLAGS}"
|
||||
|
||||
export GO15VENDOREXPERIMENT="1"
|
||||
|
||||
|
|
42
scripts/release_tarballs.sh
Executable file
42
scripts/release_tarballs.sh
Executable file
|
@ -0,0 +1,42 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright 2015 The Prometheus Authors
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
set -e
|
||||
|
||||
version=$(cat version/VERSION)
|
||||
|
||||
for GOOS in "darwin" "freebsd" "linux" "windows"; do
|
||||
for GOARCH in "amd64" "386"; do
|
||||
export GOARCH
|
||||
export GOOS
|
||||
make build
|
||||
|
||||
tarball_dir="prometheus-${version}.${GOOS}-${GOARCH}"
|
||||
tarball="${tarball_dir}.tar.gz"
|
||||
|
||||
if [ "$(go env GOOS)" = "windows" ]; then
|
||||
ext=".exe"
|
||||
fi
|
||||
|
||||
echo " > $tarball"
|
||||
mkdir -p "${tarball_dir}"
|
||||
cp -a "prometheus${ext}" "promtool${ext}" consoles console_libraries "${tarball_dir}"
|
||||
tar -czf "${tarball}" "${tarball_dir}"
|
||||
rm -rf "${tarball_dir}"
|
||||
rm "prometheus${ext}" "promtool${ext}"
|
||||
done
|
||||
done
|
||||
|
||||
exit 0
|
|
@ -18,6 +18,7 @@ import (
|
|||
"container/list"
|
||||
"fmt"
|
||||
"math"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
|
@ -124,6 +125,7 @@ type memorySeriesStorage struct {
|
|||
numChunksToPersist int64 // The number of chunks waiting for persistence.
|
||||
maxChunksToPersist int // If numChunksToPersist reaches this threshold, ingestion will be throttled.
|
||||
rushed bool // Whether the storage is in rushed mode.
|
||||
rushedMtx sync.Mutex // Protects entering and exiting rushed mode.
|
||||
throttled chan struct{} // This chan is sent to whenever NeedsThrottling() returns true (for logging).
|
||||
|
||||
fpLocker *fingerprintLocker
|
||||
|
@ -950,6 +952,13 @@ loop:
|
|||
} else {
|
||||
dirtySeriesCount = 0
|
||||
}
|
||||
// If a checkpoint takes longer than checkpointInterval, unluckily timed
|
||||
// combination with the Reset(0) call below can lead to a case where a
|
||||
// time is lurking in C leading to repeated checkpointing without break.
|
||||
select {
|
||||
case <-checkpointTimer.C: // Get rid of the lurking time.
|
||||
default:
|
||||
}
|
||||
checkpointTimer.Reset(s.checkpointInterval)
|
||||
case fp := <-memoryFingerprints:
|
||||
if s.maintainMemorySeries(fp, model.Now().Add(-s.dropAfter)) {
|
||||
|
@ -1234,6 +1243,9 @@ func (s *memorySeriesStorage) incNumChunksToPersist(by int) {
|
|||
// files should not by synced anymore provided the user has specified the
|
||||
// adaptive sync strategy.
|
||||
func (s *memorySeriesStorage) calculatePersistenceUrgencyScore() float64 {
|
||||
s.rushedMtx.Lock()
|
||||
defer s.rushedMtx.Unlock()
|
||||
|
||||
var (
|
||||
chunksToPersist = float64(s.getNumChunksToPersist())
|
||||
maxChunksToPersist = float64(s.maxChunksToPersist)
|
||||
|
|
|
@ -31,7 +31,7 @@ const (
|
|||
type (
|
||||
// Closer is the interface that wraps the Close method.
|
||||
Closer interface {
|
||||
// Close reaps the underlying directory and its children. The directory
|
||||
// Close reaps the underlying directory and its children. The directory
|
||||
// could be deleted by its users already.
|
||||
Close()
|
||||
}
|
||||
|
@ -59,7 +59,7 @@ type (
|
|||
}
|
||||
|
||||
// T implements the needed methods of testing.TB so that we do not need
|
||||
// to actually import testing (which has the side affect of adding all
|
||||
// to actually import testing (which has the side effect of adding all
|
||||
// the test flags, which we do not want in non-test binaries even if
|
||||
// they make use of these utilities for some reason).
|
||||
T interface {
|
||||
|
|
|
@ -1 +1 @@
|
|||
0.16.1
|
||||
0.17.0rc2
|
||||
|
|
Loading…
Reference in a new issue