2015-06-15 03:36:32 -07:00
// Copyright 2015 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
2016-02-17 14:52:44 -08:00
// The main package for the Prometheus server executable.
2015-06-15 03:36:32 -07:00
package main
import (
2017-10-24 21:21:42 -07:00
"context"
2018-01-16 03:10:54 -08:00
"crypto/md5"
"encoding/json"
2015-06-15 03:23:02 -07:00
"fmt"
2017-06-20 09:48:17 -07:00
"net"
2017-10-06 03:22:19 -07:00
"net/http"
2015-06-15 03:36:32 -07:00
_ "net/http/pprof" // Comment this line to disable pprof endpoint.
2017-06-20 09:48:17 -07:00
"net/url"
2015-06-15 03:36:32 -07:00
"os"
"os/signal"
2017-06-20 08:38:01 -07:00
"path/filepath"
2017-09-04 04:10:32 -07:00
"runtime"
2017-06-20 09:48:17 -07:00
"strings"
2018-01-17 10:14:24 -08:00
"sync"
2015-06-15 03:36:32 -07:00
"syscall"
"time"
2017-08-11 11:45:52 -07:00
"github.com/go-kit/kit/log"
"github.com/go-kit/kit/log/level"
2017-11-11 04:06:13 -08:00
"github.com/oklog/oklog/pkg/group"
2017-06-20 09:48:17 -07:00
"github.com/pkg/errors"
2015-06-23 09:04:04 -07:00
"github.com/prometheus/client_golang/prometheus"
2017-06-20 09:48:17 -07:00
"github.com/prometheus/common/model"
2016-05-05 04:46:51 -07:00
"github.com/prometheus/common/version"
2017-06-20 08:38:01 -07:00
"gopkg.in/alecthomas/kingpin.v2"
2017-09-08 09:34:20 -07:00
k8s_runtime "k8s.io/apimachinery/pkg/util/runtime"
promql: Allow per-query contexts.
For Weaveworks' Frankenstein, we need to support multitenancy. In
Frankenstein, we initially solved this without modifying the promql
package at all: we constructed a new promql.Engine for every
query and injected a storage implementation into that engine which would
be primed to only collect data for a given user.
This is problematic to upstream, however. Prometheus assumes that there
is only one engine: the query concurrency gate is part of the engine,
and the engine contains one central cancellable context to shut down all
queries. Also, creating a new engine for every query seems like overkill.
Thus, we want to be able to pass per-query contexts into a single engine.
This change gets rid of the promql.Engine's built-in base context and
allows passing in a per-query context instead. Central cancellation of
all queries is still possible by deriving all passed-in contexts from
one central one, but this is now the responsibility of the caller. The
central query context is now created in main() and passed into the
relevant components (web handler / API, rule manager).
In a next step, the per-query context would have to be passed to the
storage implementation, so that the storage can implement multi-tenancy
or other features based on the contextual information.
2016-09-15 04:52:50 -07:00
2017-10-06 03:22:19 -07:00
"github.com/mwitkow/go-conntrack"
2017-09-08 09:34:20 -07:00
"github.com/prometheus/common/promlog"
promlogflag "github.com/prometheus/common/promlog/flag"
2015-06-15 03:36:32 -07:00
"github.com/prometheus/prometheus/config"
2017-11-25 05:13:54 -08:00
"github.com/prometheus/prometheus/discovery"
2017-12-30 09:27:50 -08:00
sd_config "github.com/prometheus/prometheus/discovery/config"
2016-03-01 03:37:22 -08:00
"github.com/prometheus/prometheus/notifier"
2015-06-15 03:36:32 -07:00
"github.com/prometheus/prometheus/promql"
"github.com/prometheus/prometheus/rules"
2018-02-01 01:55:07 -08:00
"github.com/prometheus/prometheus/scrape"
2017-05-10 02:44:13 -07:00
"github.com/prometheus/prometheus/storage"
"github.com/prometheus/prometheus/storage/remote"
2016-12-29 00:27:30 -08:00
"github.com/prometheus/prometheus/storage/tsdb"
2017-11-23 23:59:05 -08:00
"github.com/prometheus/prometheus/util/strutil"
2015-06-15 03:36:32 -07:00
"github.com/prometheus/prometheus/web"
)
2017-06-20 09:48:17 -07:00
var (
configSuccess = prometheus . NewGauge ( prometheus . GaugeOpts {
2018-03-21 09:08:37 -07:00
Name : "prometheus_config_last_reload_successful" ,
Help : "Whether the last configuration reload attempt was successful." ,
2017-06-20 09:48:17 -07:00
} )
configSuccessTime = prometheus . NewGauge ( prometheus . GaugeOpts {
2018-03-21 09:08:37 -07:00
Name : "prometheus_config_last_reload_success_timestamp_seconds" ,
Help : "Timestamp of the last successful configuration reload." ,
2017-06-20 09:48:17 -07:00
} )
)
func init ( ) {
prometheus . MustRegister ( version . NewCollector ( "prometheus" ) )
}
2015-06-15 03:36:32 -07:00
func main ( ) {
2017-09-04 04:10:32 -07:00
if os . Getenv ( "DEBUG" ) != "" {
runtime . SetBlockProfileRate ( 20 )
runtime . SetMutexProfileFraction ( 20 )
}
2017-06-20 09:48:17 -07:00
cfg := struct {
2017-10-23 00:50:13 -07:00
configFile string
2017-06-20 09:48:17 -07:00
2018-05-23 07:03:54 -07:00
localStoragePath string
notifier notifier . Options
notifierTimeout model . Duration
web web . Options
tsdb tsdb . Options
lookbackDelta model . Duration
webTimeout model . Duration
queryTimeout model . Duration
queryConcurrency int
RemoteFlushDeadline model . Duration
2017-06-20 09:48:17 -07:00
prometheusURL string
2017-08-11 11:45:52 -07:00
logLevel promlog . AllowedLevel
2017-06-20 09:48:17 -07:00
} {
notifier : notifier . Options {
Registerer : prometheus . DefaultRegisterer ,
} ,
}
2017-06-20 08:38:01 -07:00
a := kingpin . New ( filepath . Base ( os . Args [ 0 ] ) , "The Prometheus monitoring server" )
a . Version ( version . Print ( "prometheus" ) )
a . HelpFlag . Short ( 'h' )
a . Flag ( "config.file" , "Prometheus configuration file path." ) .
Default ( "prometheus.yml" ) . StringVar ( & cfg . configFile )
2017-10-16 15:00:05 -07:00
a . Flag ( "web.listen-address" , "Address to listen on for UI, API, and telemetry." ) .
2017-06-20 08:38:01 -07:00
Default ( "0.0.0.0:9090" ) . StringVar ( & cfg . web . ListenAddress )
a . Flag ( "web.read-timeout" ,
"Maximum duration before timing out read of the request, and closing idle connections." ) .
Default ( "5m" ) . SetValue ( & cfg . webTimeout )
a . Flag ( "web.max-connections" , "Maximum number of simultaneous connections." ) .
Default ( "512" ) . IntVar ( & cfg . web . MaxConnections )
a . Flag ( "web.external-url" ,
"The URL under which Prometheus is externally reachable (for example, if Prometheus is served via a reverse proxy). Used for generating relative and absolute links back to Prometheus itself. If the URL has a path portion, it will be used to prefix all HTTP endpoints served by Prometheus. If omitted, relevant URL components will be derived automatically." ) .
PlaceHolder ( "<URL>" ) . StringVar ( & cfg . prometheusURL )
a . Flag ( "web.route-prefix" ,
"Prefix for the internal routes of web endpoints. Defaults to path of --web.external-url." ) .
PlaceHolder ( "<path>" ) . StringVar ( & cfg . web . RoutePrefix )
a . Flag ( "web.user-assets" , "Path to static asset directory, available at /user." ) .
PlaceHolder ( "<path>" ) . StringVar ( & cfg . web . UserAssetsPath )
2017-07-10 06:44:29 -07:00
a . Flag ( "web.enable-lifecycle" , "Enable shutdown and reload via HTTP request." ) .
Default ( "false" ) . BoolVar ( & cfg . web . EnableLifecycle )
2017-06-20 08:38:01 -07:00
2018-03-20 08:58:19 -07:00
a . Flag ( "web.enable-admin-api" , "Enable API endpoints for admin control actions." ) .
2017-07-10 00:29:41 -07:00
Default ( "false" ) . BoolVar ( & cfg . web . EnableAdminAPI )
2017-06-20 08:38:01 -07:00
a . Flag ( "web.console.templates" , "Path to the console template directory, available at /consoles." ) .
Default ( "consoles" ) . StringVar ( & cfg . web . ConsoleTemplatesPath )
a . Flag ( "web.console.libraries" , "Path to the console library directory." ) .
Default ( "console_libraries" ) . StringVar ( & cfg . web . ConsoleLibrariesPath )
a . Flag ( "storage.tsdb.path" , "Base path for metrics storage." ) .
Default ( "data/" ) . StringVar ( & cfg . localStoragePath )
2017-12-24 04:13:48 -08:00
a . Flag ( "storage.tsdb.min-block-duration" , "Minimum duration of a data block before being persisted. For use in testing." ) .
Hidden ( ) . Default ( "2h" ) . SetValue ( & cfg . tsdb . MinBlockDuration )
2017-06-20 08:38:01 -07:00
a . Flag ( "storage.tsdb.max-block-duration" ,
2017-12-24 04:13:48 -08:00
"Maximum duration compacted blocks may span. For use in testing. (Defaults to 10% of the retention period)." ) .
Hidden ( ) . PlaceHolder ( "<duration>" ) . SetValue ( & cfg . tsdb . MaxBlockDuration )
2017-06-20 08:38:01 -07:00
2018-03-20 09:54:36 -07:00
a . Flag ( "storage.tsdb.retention" , "How long to retain samples in storage." ) .
2017-06-20 08:38:01 -07:00
Default ( "15d" ) . SetValue ( & cfg . tsdb . Retention )
2017-06-22 06:02:10 -07:00
a . Flag ( "storage.tsdb.no-lockfile" , "Do not create lockfile in data directory." ) .
Default ( "false" ) . BoolVar ( & cfg . tsdb . NoLockfile )
2018-05-23 07:03:54 -07:00
a . Flag ( "storage.remote.flush-deadline" , "How long to wait flushing sample on shutdown or config reload." ) .
2018-05-24 07:40:24 -07:00
Default ( "1m" ) . PlaceHolder ( "<duration>" ) . SetValue ( & cfg . RemoteFlushDeadline )
2018-05-23 07:03:54 -07:00
2018-03-20 09:54:36 -07:00
a . Flag ( "alertmanager.notification-queue-capacity" , "The capacity of the queue for pending Alertmanager notifications." ) .
2017-06-20 08:38:01 -07:00
Default ( "10000" ) . IntVar ( & cfg . notifier . QueueCapacity )
2017-07-20 14:48:35 -07:00
a . Flag ( "alertmanager.timeout" , "Timeout for sending alerts to Alertmanager." ) .
2017-06-20 09:48:17 -07:00
Default ( "10s" ) . SetValue ( & cfg . notifierTimeout )
2017-06-20 08:38:01 -07:00
a . Flag ( "query.lookback-delta" , "The delta difference allowed for retrieving metrics during expression evaluations." ) .
Default ( "5m" ) . SetValue ( & cfg . lookbackDelta )
a . Flag ( "query.timeout" , "Maximum time a query may take before being aborted." ) .
2017-06-20 09:48:17 -07:00
Default ( "2m" ) . SetValue ( & cfg . queryTimeout )
2017-06-20 08:38:01 -07:00
a . Flag ( "query.max-concurrency" , "Maximum number of queries executed concurrently." ) .
2018-01-09 08:44:23 -08:00
Default ( "20" ) . IntVar ( & cfg . queryConcurrency )
2017-06-20 08:38:01 -07:00
2017-09-08 09:34:20 -07:00
promlogflag . AddFlags ( a , & cfg . logLevel )
2017-06-20 09:48:17 -07:00
_ , err := a . Parse ( os . Args [ 1 : ] )
if err != nil {
2017-10-09 07:25:50 -07:00
fmt . Fprintln ( os . Stderr , errors . Wrapf ( err , "Error parsing commandline arguments" ) )
2017-06-20 08:38:01 -07:00
a . Usage ( os . Args [ 1 : ] )
os . Exit ( 2 )
}
2017-06-20 09:48:17 -07:00
cfg . web . ExternalURL , err = computeExternalURL ( cfg . prometheusURL , cfg . web . ListenAddress )
if err != nil {
fmt . Fprintln ( os . Stderr , errors . Wrapf ( err , "parse external URL %q" , cfg . prometheusURL ) )
2017-06-20 08:38:01 -07:00
os . Exit ( 2 )
}
2017-06-20 09:48:17 -07:00
cfg . web . ReadTimeout = time . Duration ( cfg . webTimeout )
// Default -web.route-prefix to path of -web.external-url.
if cfg . web . RoutePrefix == "" {
cfg . web . RoutePrefix = cfg . web . ExternalURL . Path
}
// RoutePrefix must always be at least '/'.
cfg . web . RoutePrefix = "/" + strings . Trim ( cfg . web . RoutePrefix , "/" )
2015-06-15 03:36:32 -07:00
2017-06-20 09:48:17 -07:00
if cfg . tsdb . MaxBlockDuration == 0 {
cfg . tsdb . MaxBlockDuration = cfg . tsdb . Retention / 10
}
2015-09-01 10:18:39 -07:00
2017-06-20 09:48:17 -07:00
promql . LookbackDelta = time . Duration ( cfg . lookbackDelta )
2017-08-11 11:45:52 -07:00
logger := promlog . New ( cfg . logLevel )
2017-09-08 09:34:20 -07:00
// XXX(fabxc): Kubernetes does background logging which we can only customize by modifying
2017-08-11 11:45:52 -07:00
// a global variable.
// Ultimately, here is the best place to set it.
k8s_runtime . ErrorHandlers = [ ] func ( error ) {
func ( err error ) {
level . Error ( log . With ( logger , "component" , "k8s_client_runtime" ) ) . Log ( "err" , err )
} ,
}
2017-06-16 03:22:44 -07:00
2017-10-23 00:49:28 -07:00
level . Info ( logger ) . Log ( "msg" , "Starting Prometheus" , "version" , version . Info ( ) )
2017-08-11 11:45:52 -07:00
level . Info ( logger ) . Log ( "build_context" , version . BuildContext ( ) )
level . Info ( logger ) . Log ( "host_details" , Uname ( ) )
2017-12-11 05:01:53 -08:00
level . Info ( logger ) . Log ( "fd_limits" , FdLimits ( ) )
2016-05-05 04:46:51 -07:00
2017-09-18 03:32:17 -07:00
var (
localStorage = & tsdb . ReadyStorage { }
2018-05-24 07:40:24 -07:00
remoteStorage = remote . NewStorage ( log . With ( logger , "component" , "remote" ) , localStorage . StartTime , time . Duration ( cfg . RemoteFlushDeadline ) )
2017-09-18 03:32:17 -07:00
fanoutStorage = storage . NewFanout ( logger , localStorage , remoteStorage )
2017-08-11 11:45:52 -07:00
)
2016-09-19 13:47:51 -07:00
2015-06-25 16:32:44 -07:00
var (
2017-12-01 04:59:24 -08:00
ctxWeb , cancelWeb = context . WithCancel ( context . Background ( ) )
ctxRule = context . Background ( )
2017-11-26 07:15:15 -08:00
2018-01-30 09:45:37 -08:00
notifier = notifier . NewManager ( & cfg . notifier , log . With ( logger , "component" , "notifier" ) )
2018-01-25 15:32:36 -08:00
ctxScrape , cancelScrape = context . WithCancel ( context . Background ( ) )
discoveryManagerScrape = discovery . NewManager ( ctxScrape , log . With ( logger , "component" , "discovery manager scrape" ) )
ctxNotify , cancelNotify = context . WithCancel ( context . Background ( ) )
discoveryManagerNotify = discovery . NewManager ( ctxNotify , log . With ( logger , "component" , "discovery manager notify" ) )
2018-02-01 01:55:07 -08:00
scrapeManager = scrape . NewManager ( log . With ( logger , "component" , "scrape manager" ) , fanoutStorage )
2018-01-09 08:44:23 -08:00
queryEngine = promql . NewEngine (
log . With ( logger , "component" , "query engine" ) ,
prometheus . DefaultRegisterer ,
cfg . queryConcurrency ,
time . Duration ( cfg . queryTimeout ) ,
)
ruleManager = rules . NewManager ( & rules . ManagerOptions {
2017-11-29 15:16:36 -08:00
Appendable : fanoutStorage ,
2018-01-09 08:44:23 -08:00
QueryFunc : rules . EngineQueryFunc ( queryEngine , fanoutStorage ) ,
2017-11-29 15:16:36 -08:00
NotifyFunc : sendAlerts ( notifier , cfg . web . ExternalURL . String ( ) ) ,
2017-12-18 11:41:31 -08:00
Context : ctxRule ,
2017-11-26 07:15:15 -08:00
ExternalURL : cfg . web . ExternalURL ,
2017-11-29 15:16:36 -08:00
Registerer : prometheus . DefaultRegisterer ,
2017-11-26 07:15:15 -08:00
Logger : log . With ( logger , "component" , "rule manager" ) ,
} )
2015-06-25 16:32:44 -07:00
)
2017-11-26 07:15:15 -08:00
cfg . web . Context = ctxWeb
2017-09-18 03:32:17 -07:00
cfg . web . TSDB = localStorage . Get
cfg . web . Storage = fanoutStorage
2016-09-15 15:58:06 -07:00
cfg . web . QueryEngine = queryEngine
2017-11-25 05:13:54 -08:00
cfg . web . ScrapeManager = scrapeManager
2016-09-15 15:58:06 -07:00
cfg . web . RuleManager = ruleManager
2016-11-23 09:23:09 -08:00
cfg . web . Notifier = notifier
2015-06-15 03:36:32 -07:00
2016-09-15 15:58:06 -07:00
cfg . web . Version = & web . PrometheusVersion {
2016-05-05 04:46:51 -07:00
Version : version . Version ,
Revision : version . Revision ,
Branch : version . Branch ,
BuildUser : version . BuildUser ,
BuildDate : version . BuildDate ,
GoVersion : version . GoVersion ,
}
2016-09-15 15:58:06 -07:00
cfg . web . Flags = map [ string ] string { }
api: Added v1/status/flags endpoint. (#3864)
Endpoint URL: /api/v1/status/flags
Example Output:
```json
{
"status": "success",
"data": {
"alertmanager.notification-queue-capacity": "10000",
"alertmanager.timeout": "10s",
"completion-bash": "false",
"completion-script-bash": "false",
"completion-script-zsh": "false",
"config.file": "my_cool_prometheus.yaml",
"help": "false",
"help-long": "false",
"help-man": "false",
"log.level": "info",
"query.lookback-delta": "5m",
"query.max-concurrency": "20",
"query.timeout": "2m",
"storage.tsdb.max-block-duration": "36h",
"storage.tsdb.min-block-duration": "2h",
"storage.tsdb.no-lockfile": "false",
"storage.tsdb.path": "data/",
"storage.tsdb.retention": "15d",
"version": "false",
"web.console.libraries": "console_libraries",
"web.console.templates": "consoles",
"web.enable-admin-api": "false",
"web.enable-lifecycle": "false",
"web.external-url": "",
"web.listen-address": "0.0.0.0:9090",
"web.max-connections": "512",
"web.read-timeout": "5m",
"web.route-prefix": "/",
"web.user-assets": ""
}
}
```
Signed-off-by: Bartek Plotka <bwplotka@gmail.com>
2018-02-21 00:49:02 -08:00
// Exclude kingpin default flags to expose only Prometheus ones.
boilerplateFlags := kingpin . New ( "" , "" ) . Version ( "" )
2017-06-20 08:38:01 -07:00
for _ , f := range a . Model ( ) . Flags {
api: Added v1/status/flags endpoint. (#3864)
Endpoint URL: /api/v1/status/flags
Example Output:
```json
{
"status": "success",
"data": {
"alertmanager.notification-queue-capacity": "10000",
"alertmanager.timeout": "10s",
"completion-bash": "false",
"completion-script-bash": "false",
"completion-script-zsh": "false",
"config.file": "my_cool_prometheus.yaml",
"help": "false",
"help-long": "false",
"help-man": "false",
"log.level": "info",
"query.lookback-delta": "5m",
"query.max-concurrency": "20",
"query.timeout": "2m",
"storage.tsdb.max-block-duration": "36h",
"storage.tsdb.min-block-duration": "2h",
"storage.tsdb.no-lockfile": "false",
"storage.tsdb.path": "data/",
"storage.tsdb.retention": "15d",
"version": "false",
"web.console.libraries": "console_libraries",
"web.console.templates": "consoles",
"web.enable-admin-api": "false",
"web.enable-lifecycle": "false",
"web.external-url": "",
"web.listen-address": "0.0.0.0:9090",
"web.max-connections": "512",
"web.read-timeout": "5m",
"web.route-prefix": "/",
"web.user-assets": ""
}
}
```
Signed-off-by: Bartek Plotka <bwplotka@gmail.com>
2018-02-21 00:49:02 -08:00
if boilerplateFlags . GetFlag ( f . Name ) != nil {
continue
}
2016-09-15 15:58:06 -07:00
cfg . web . Flags [ f . Name ] = f . Value . String ( )
2017-06-20 08:38:01 -07:00
}
2016-09-15 15:58:06 -07:00
2017-12-18 11:41:31 -08:00
// Depends on cfg.web.ScrapeManager so needs to be after cfg.web.ScrapeManager = scrapeManager
2017-11-26 07:15:15 -08:00
webHandler := web . New ( log . With ( logger , "component" , "web" ) , & cfg . web )
2017-10-06 03:22:19 -07:00
// Monitor outgoing connections on default transport with conntrack.
http . DefaultTransport . ( * http . Transport ) . DialContext = conntrack . NewDialContextFunc (
conntrack . DialWithTracing ( ) ,
)
2017-11-23 06:48:14 -08:00
reloaders := [ ] func ( cfg * config . Config ) error {
remoteStorage . ApplyConfig ,
webHandler . ApplyConfig ,
2018-01-17 05:06:56 -08:00
// The Scrape and notifier managers need to reload before the Discovery manager as
// they need to read the most updated config when receiving the new targets list.
2017-11-23 06:48:14 -08:00
notifier . ApplyConfig ,
2018-01-17 04:02:13 -08:00
scrapeManager . ApplyConfig ,
2017-12-30 09:27:50 -08:00
func ( cfg * config . Config ) error {
c := make ( map [ string ] sd_config . ServiceDiscoveryConfig )
for _ , v := range cfg . ScrapeConfigs {
c [ v . JobName ] = v . ServiceDiscoveryConfig
}
return discoveryManagerScrape . ApplyConfig ( c )
} ,
func ( cfg * config . Config ) error {
c := make ( map [ string ] sd_config . ServiceDiscoveryConfig )
for _ , v := range cfg . AlertingConfig . AlertmanagerConfigs {
2018-01-16 03:10:54 -08:00
// AlertmanagerConfigs doesn't hold an unique identifier so we use the config hash as the identifier.
b , err := json . Marshal ( v )
if err != nil {
return err
}
c [ fmt . Sprintf ( "%x" , md5 . Sum ( b ) ) ] = v . ServiceDiscoveryConfig
2017-12-30 09:27:50 -08:00
}
return discoveryManagerNotify . ApplyConfig ( c )
} ,
2017-11-23 06:48:14 -08:00
func ( cfg * config . Config ) error {
2017-11-23 23:22:57 -08:00
// Get all rule files matching the configuration oaths.
var files [ ] string
for _ , pat := range cfg . RuleFiles {
fs , err := filepath . Glob ( pat )
if err != nil {
// The only error can be a bad pattern.
return fmt . Errorf ( "error retrieving rule files for %s: %s" , pat , err )
}
files = append ( files , fs ... )
}
2017-11-30 03:38:34 -08:00
return ruleManager . Update ( time . Duration ( cfg . GlobalConfig . EvaluationInterval ) , files )
2017-11-23 06:48:14 -08:00
} ,
2015-06-15 03:36:32 -07:00
}
2017-11-11 04:06:13 -08:00
prometheus . MustRegister ( configSuccess )
prometheus . MustRegister ( configSuccessTime )
2015-06-15 03:36:32 -07:00
2017-09-18 03:32:17 -07:00
// Start all components while we wait for TSDB to open but only load
// initial config and mark ourselves as ready after it completed.
dbOpen := make ( chan struct { } )
2018-01-17 10:14:24 -08:00
// sync.Once is used to make sure we can close the channel at different execution stages(SIGTERM or when the config is loaded).
type closeOnce struct {
C chan struct { }
once sync . Once
Close func ( )
}
// Wait until the server is ready to handle reloading.
reloadReady := & closeOnce {
C : make ( chan struct { } ) ,
}
reloadReady . Close = func ( ) {
reloadReady . once . Do ( func ( ) {
close ( reloadReady . C )
} )
}
2017-11-11 04:06:13 -08:00
var g group . Group
{
2018-04-01 11:19:30 -07:00
// Termination handler.
2017-11-11 04:06:13 -08:00
term := make ( chan os . Signal )
signal . Notify ( term , os . Interrupt , syscall . SIGTERM )
cancel := make ( chan struct { } )
g . Add (
func ( ) error {
2018-01-17 10:14:24 -08:00
// Don't forget to release the reloadReady channel so that waiting blocks can exit normally.
2017-11-11 04:06:13 -08:00
select {
case <- term :
level . Warn ( logger ) . Log ( "msg" , "Received SIGTERM, exiting gracefully..." )
2018-01-17 10:14:24 -08:00
reloadReady . Close ( )
2018-01-17 05:06:56 -08:00
2017-11-11 04:06:13 -08:00
case <- webHandler . Quit ( ) :
level . Warn ( logger ) . Log ( "msg" , "Received termination request via web service, exiting gracefully..." )
case <- cancel :
2018-01-17 10:14:24 -08:00
reloadReady . Close ( )
2017-11-11 04:06:13 -08:00
break
}
return nil
} ,
func ( err error ) {
close ( cancel )
} ,
2017-09-18 03:32:17 -07:00
)
2017-11-11 04:06:13 -08:00
}
2017-11-26 07:15:15 -08:00
{
2018-04-01 11:19:30 -07:00
// Scrape discovery manager.
2017-12-30 09:27:50 -08:00
g . Add (
func ( ) error {
2018-01-25 15:32:36 -08:00
err := discoveryManagerScrape . Run ( )
2017-12-30 09:27:50 -08:00
level . Info ( logger ) . Log ( "msg" , "Scrape discovery manager stopped" )
return err
} ,
func ( err error ) {
level . Info ( logger ) . Log ( "msg" , "Stopping scrape discovery manager..." )
2018-01-25 15:32:36 -08:00
cancelScrape ( )
2017-12-30 09:27:50 -08:00
} ,
)
}
{
2018-04-01 11:19:30 -07:00
// Notify discovery manager.
2017-11-26 07:15:15 -08:00
g . Add (
func ( ) error {
2018-01-25 15:32:36 -08:00
err := discoveryManagerNotify . Run ( )
2017-12-30 09:27:50 -08:00
level . Info ( logger ) . Log ( "msg" , "Notify discovery manager stopped" )
2017-11-26 07:15:15 -08:00
return err
} ,
func ( err error ) {
2017-12-30 09:27:50 -08:00
level . Info ( logger ) . Log ( "msg" , "Stopping notify discovery manager..." )
2018-01-25 15:32:36 -08:00
cancelNotify ( )
2017-11-26 07:15:15 -08:00
} ,
)
}
{
2018-04-01 11:19:30 -07:00
// Scrape manager.
2017-11-26 07:15:15 -08:00
g . Add (
func ( ) error {
2018-01-17 04:02:13 -08:00
// When the scrape manager receives a new targets list
2018-01-17 10:14:24 -08:00
// it needs to read a valid config for each job.
// It depends on the config being in sync with the discovery manager so
// we wait until the config is fully loaded.
2018-02-25 23:58:10 -08:00
<- reloadReady . C
2018-01-17 04:02:13 -08:00
2017-12-30 09:27:50 -08:00
err := scrapeManager . Run ( discoveryManagerScrape . SyncCh ( ) )
2017-11-26 07:15:15 -08:00
level . Info ( logger ) . Log ( "msg" , "Scrape manager stopped" )
return err
} ,
func ( err error ) {
// Scrape manager needs to be stopped before closing the local TSDB
// so that it doesn't try to write samples to a closed storage.
level . Info ( logger ) . Log ( "msg" , "Stopping scrape manager..." )
scrapeManager . Stop ( )
} ,
)
}
2017-11-11 04:06:13 -08:00
{
2018-04-01 11:19:30 -07:00
// Reload handler.
2017-11-11 04:06:13 -08:00
// Make sure that sighup handler is registered with a redirect to the channel before the potentially
// long and synchronous tsdb init.
hup := make ( chan os . Signal )
signal . Notify ( hup , syscall . SIGHUP )
cancel := make ( chan struct { } )
g . Add (
func ( ) error {
2018-02-25 23:58:10 -08:00
<- reloadReady . C
2015-06-15 03:36:32 -07:00
2017-11-11 04:06:13 -08:00
for {
select {
case <- hup :
2017-11-23 06:48:14 -08:00
if err := reloadConfig ( cfg . configFile , logger , reloaders ... ) ; err != nil {
2017-11-11 04:06:13 -08:00
level . Error ( logger ) . Log ( "msg" , "Error reloading config" , "err" , err )
}
case rc := <- webHandler . Reload ( ) :
2017-11-23 06:48:14 -08:00
if err := reloadConfig ( cfg . configFile , logger , reloaders ... ) ; err != nil {
2017-11-11 04:06:13 -08:00
level . Error ( logger ) . Log ( "msg" , "Error reloading config" , "err" , err )
rc <- err
} else {
rc <- nil
}
case <- cancel :
return nil
}
}
2017-09-18 03:32:17 -07:00
2017-11-11 04:06:13 -08:00
} ,
func ( err error ) {
2018-07-04 05:41:16 -07:00
// Wait for any in-progress reloads to complete to avoid
// reloading things after they have been shutdown.
cancel <- struct { } { }
2017-11-11 04:06:13 -08:00
} ,
)
2017-09-18 03:32:17 -07:00
}
2017-11-11 04:06:13 -08:00
{
2018-04-01 11:19:30 -07:00
// Initial configuration loading.
2017-11-11 04:06:13 -08:00
cancel := make ( chan struct { } )
g . Add (
func ( ) error {
select {
case <- dbOpen :
break
// In case a shutdown is initiated before the dbOpen is released
case <- cancel :
2018-01-17 10:14:24 -08:00
reloadReady . Close ( )
2017-11-11 04:06:13 -08:00
return nil
}
2017-09-18 03:32:17 -07:00
2017-11-23 06:48:14 -08:00
if err := reloadConfig ( cfg . configFile , logger , reloaders ... ) ; err != nil {
2018-07-18 00:58:40 -07:00
return fmt . Errorf ( "error loading config from %q: %s" , cfg . configFile , err )
2017-11-11 04:06:13 -08:00
}
2017-09-18 03:32:17 -07:00
2018-01-17 10:14:24 -08:00
reloadReady . Close ( )
2018-01-17 05:06:56 -08:00
2017-11-11 04:06:13 -08:00
webHandler . Ready ( )
2018-01-17 10:14:24 -08:00
level . Info ( logger ) . Log ( "msg" , "Server is ready to receive web requests." )
2017-11-11 04:06:13 -08:00
<- cancel
return nil
} ,
func ( err error ) {
close ( cancel )
} ,
)
}
2018-07-04 05:41:16 -07:00
{
// Rule manager.
// TODO(krasi) refactor ruleManager.Run() to be blocking to avoid using an extra blocking channel.
cancel := make ( chan struct { } )
g . Add (
func ( ) error {
<- reloadReady . C
ruleManager . Run ( )
<- cancel
return nil
} ,
func ( err error ) {
ruleManager . Stop ( )
close ( cancel )
} ,
)
}
2017-11-11 04:06:13 -08:00
{
2018-04-01 11:19:30 -07:00
// TSDB.
2017-11-11 04:06:13 -08:00
cancel := make ( chan struct { } )
g . Add (
func ( ) error {
level . Info ( logger ) . Log ( "msg" , "Starting TSDB ..." )
db , err := tsdb . Open (
cfg . localStoragePath ,
log . With ( logger , "component" , "tsdb" ) ,
prometheus . DefaultRegisterer ,
& cfg . tsdb ,
)
if err != nil {
2018-07-18 00:58:40 -07:00
return fmt . Errorf ( "opening storage failed: %s" , err )
2017-11-11 04:06:13 -08:00
}
level . Info ( logger ) . Log ( "msg" , "TSDB started" )
startTimeMargin := int64 ( 2 * time . Duration ( cfg . tsdb . MinBlockDuration ) . Seconds ( ) * 1000 )
localStorage . Set ( db , startTimeMargin )
close ( dbOpen )
<- cancel
return nil
} ,
func ( err error ) {
if err := fanoutStorage . Close ( ) ; err != nil {
level . Error ( logger ) . Log ( "msg" , "Error stopping storage" , "err" , err )
}
close ( cancel )
} ,
)
}
{
2018-04-01 11:19:30 -07:00
// Web handler.
2017-11-11 04:06:13 -08:00
g . Add (
func ( ) error {
2017-11-25 05:13:54 -08:00
if err := webHandler . Run ( ctxWeb ) ; err != nil {
2018-07-18 00:58:40 -07:00
return fmt . Errorf ( "error starting web server: %s" , err )
2017-11-11 04:06:13 -08:00
}
return nil
} ,
func ( err error ) {
2017-11-25 05:13:54 -08:00
cancelWeb ( )
2017-11-11 04:06:13 -08:00
} ,
)
}
{
2018-04-01 11:19:30 -07:00
// Notifier.
2017-11-11 04:06:13 -08:00
// Calling notifier.Stop() before ruleManager.Stop() will cause a panic if the ruleManager isn't running,
// so keep this interrupt after the ruleManager.Stop().
g . Add (
func ( ) error {
2018-01-17 05:06:56 -08:00
// When the notifier manager receives a new targets list
2018-01-17 10:14:24 -08:00
// it needs to read a valid config for each job.
// It depends on the config being in sync with the discovery manager
2018-01-17 05:06:56 -08:00
// so we wait until the config is fully loaded.
2018-02-25 23:58:10 -08:00
<- reloadReady . C
2017-12-30 09:27:50 -08:00
notifier . Run ( discoveryManagerNotify . SyncCh ( ) )
2018-01-17 10:14:24 -08:00
level . Info ( logger ) . Log ( "msg" , "Notifier manager stopped" )
2017-11-11 04:06:13 -08:00
return nil
} ,
func ( err error ) {
notifier . Stop ( )
} ,
)
}
if err := g . Run ( ) ; err != nil {
level . Error ( logger ) . Log ( "err" , err )
2018-06-21 00:32:26 -07:00
os . Exit ( 1 )
2015-06-15 03:36:32 -07:00
}
2017-08-11 11:45:52 -07:00
level . Info ( logger ) . Log ( "msg" , "See you next time!" )
2015-06-15 03:36:32 -07:00
}
2017-11-23 06:48:14 -08:00
func reloadConfig ( filename string , logger log . Logger , rls ... func ( * config . Config ) error ) ( err error ) {
2017-08-11 11:45:52 -07:00
level . Info ( logger ) . Log ( "msg" , "Loading configuration file" , "filename" , filename )
2015-09-01 10:18:39 -07:00
defer func ( ) {
2016-07-11 07:24:54 -07:00
if err == nil {
2015-09-01 10:18:39 -07:00
configSuccess . Set ( 1 )
2018-01-26 23:48:13 -08:00
configSuccessTime . SetToCurrentTime ( )
2015-09-01 10:18:39 -07:00
} else {
configSuccess . Set ( 0 )
}
} ( )
2015-06-15 03:36:32 -07:00
2015-08-05 09:30:37 -07:00
conf , err := config . LoadFile ( filename )
2015-06-15 03:36:32 -07:00
if err != nil {
2018-07-18 00:58:40 -07:00
return fmt . Errorf ( "couldn't load configuration (--config.file=%q): %v" , filename , err )
2015-06-15 03:36:32 -07:00
}
2016-08-11 18:23:18 -07:00
failed := false
2015-06-15 03:36:32 -07:00
for _ , rl := range rls {
2017-11-23 06:48:14 -08:00
if err := rl ( conf ) ; err != nil {
2017-08-11 11:45:52 -07:00
level . Error ( logger ) . Log ( "msg" , "Failed to apply configuration" , "err" , err )
2016-08-11 18:23:18 -07:00
failed = true
2016-07-11 07:24:54 -07:00
}
2015-06-15 03:36:32 -07:00
}
2016-08-11 18:23:18 -07:00
if failed {
2018-07-18 00:58:40 -07:00
return fmt . Errorf ( "one or more errors occurred while applying the new configuration (--config.file=%q)" , filename )
2016-08-11 18:23:18 -07:00
}
2018-07-04 05:41:16 -07:00
level . Info ( logger ) . Log ( "msg" , "Completed loading of configuration file" , "filename" , filename )
2016-08-11 18:23:18 -07:00
return nil
2015-06-15 03:36:32 -07:00
}
2017-06-20 09:48:17 -07:00
2017-10-05 03:16:15 -07:00
func startsOrEndsWithQuote ( s string ) bool {
return strings . HasPrefix ( s , "\"" ) || strings . HasPrefix ( s , "'" ) ||
strings . HasSuffix ( s , "\"" ) || strings . HasSuffix ( s , "'" )
}
2017-06-20 09:48:17 -07:00
// computeExternalURL computes a sanitized external URL from a raw input. It infers unset
// URL parts from the OS and the given listen address.
func computeExternalURL ( u , listenAddr string ) ( * url . URL , error ) {
if u == "" {
hostname , err := os . Hostname ( )
if err != nil {
return nil , err
}
_ , port , err := net . SplitHostPort ( listenAddr )
if err != nil {
return nil , err
}
u = fmt . Sprintf ( "http://%s:%s/" , hostname , port )
}
2017-10-05 03:16:15 -07:00
if startsOrEndsWithQuote ( u ) {
return nil , fmt . Errorf ( "URL must not begin or end with quotes" )
2017-06-20 09:48:17 -07:00
}
eu , err := url . Parse ( u )
if err != nil {
return nil , err
}
ppref := strings . TrimRight ( eu . Path , "/" )
if ppref != "" && ! strings . HasPrefix ( ppref , "/" ) {
ppref = "/" + ppref
}
eu . Path = ppref
return eu , nil
}
2017-11-23 23:59:05 -08:00
2018-01-22 08:17:33 -08:00
// sendAlerts implements the rules.NotifyFunc for a Notifier.
2017-11-23 23:59:05 -08:00
// It filters any non-firing alerts from the input.
2018-01-30 09:45:37 -08:00
func sendAlerts ( n * notifier . Manager , externalURL string ) rules . NotifyFunc {
2017-11-23 23:59:05 -08:00
return func ( ctx context . Context , expr string , alerts ... * rules . Alert ) error {
var res [ ] * notifier . Alert
for _ , alert := range alerts {
// Only send actually firing alerts.
if alert . State == rules . StatePending {
continue
}
a := & notifier . Alert {
StartsAt : alert . FiredAt ,
Labels : alert . Labels ,
Annotations : alert . Annotations ,
GeneratorURL : externalURL + strutil . TableLinkForExpression ( expr ) ,
}
if ! alert . ResolvedAt . IsZero ( ) {
a . EndsAt = alert . ResolvedAt
}
res = append ( res , a )
}
if len ( alerts ) > 0 {
n . Send ( res ... )
}
return nil
}
}