mirror of
https://github.com/prometheus/prometheus.git
synced 2024-12-25 13:44:05 -08:00
Use github.com/golang/glog for all logging.
This commit is contained in:
parent
83fb0a9a2d
commit
aa5d251f8d
43
main.go
43
main.go
|
@ -15,12 +15,12 @@ package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"flag"
|
"flag"
|
||||||
"log"
|
|
||||||
"os"
|
"os"
|
||||||
"os/signal"
|
"os/signal"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/golang/glog"
|
||||||
"github.com/prometheus/client_golang/extraction"
|
"github.com/prometheus/client_golang/extraction"
|
||||||
|
|
||||||
"github.com/prometheus/prometheus/config"
|
"github.com/prometheus/prometheus/config"
|
||||||
|
@ -28,7 +28,6 @@ import (
|
||||||
"github.com/prometheus/prometheus/retrieval"
|
"github.com/prometheus/prometheus/retrieval"
|
||||||
"github.com/prometheus/prometheus/rules"
|
"github.com/prometheus/prometheus/rules"
|
||||||
"github.com/prometheus/prometheus/storage/metric"
|
"github.com/prometheus/prometheus/storage/metric"
|
||||||
|
|
||||||
"github.com/prometheus/prometheus/web"
|
"github.com/prometheus/prometheus/web"
|
||||||
"github.com/prometheus/prometheus/web/api"
|
"github.com/prometheus/prometheus/web/api"
|
||||||
)
|
)
|
||||||
|
@ -91,7 +90,7 @@ func (p *prometheus) interruptHandler() {
|
||||||
|
|
||||||
<-notifier
|
<-notifier
|
||||||
|
|
||||||
log.Println("Received SIGINT; Exiting Gracefully...")
|
glog.Warning("Received SIGINT; Exiting gracefully...")
|
||||||
p.close()
|
p.close()
|
||||||
os.Exit(0)
|
os.Exit(0)
|
||||||
}
|
}
|
||||||
|
@ -169,15 +168,12 @@ func main() {
|
||||||
|
|
||||||
conf, err := config.LoadFromFile(*configFile)
|
conf, err := config.LoadFromFile(*configFile)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("Error loading configuration from %s: %v", *configFile, err)
|
glog.Fatalf("Error loading configuration from %s: %v", *configFile, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
ts, err := metric.NewTieredStorage(uint(*diskAppendQueueCapacity), 100, *arenaFlushInterval, *arenaTTL, *metricsStoragePath)
|
ts, err := metric.NewTieredStorage(uint(*diskAppendQueueCapacity), 100, *arenaFlushInterval, *arenaTTL, *metricsStoragePath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("Error opening storage: %s", err)
|
glog.Fatal("Error opening storage:", err)
|
||||||
}
|
|
||||||
if ts == nil {
|
|
||||||
log.Fatalln("Nil tiered storage.")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
unwrittenSamples := make(chan *extraction.Result, *samplesQueueCapacity)
|
unwrittenSamples := make(chan *extraction.Result, *samplesQueueCapacity)
|
||||||
|
@ -196,9 +192,8 @@ func main() {
|
||||||
|
|
||||||
// Queue depth will need to be exposed
|
// Queue depth will need to be exposed
|
||||||
ruleManager := rules.NewRuleManager(unwrittenSamples, notifications, conf.EvaluationInterval(), ts)
|
ruleManager := rules.NewRuleManager(unwrittenSamples, notifications, conf.EvaluationInterval(), ts)
|
||||||
err = ruleManager.AddRulesFromConfig(conf)
|
if err := ruleManager.AddRulesFromConfig(conf); err != nil {
|
||||||
if err != nil {
|
glog.Fatal("Error loading rule files:", err)
|
||||||
log.Fatalf("Error loading rule files: %v", err)
|
|
||||||
}
|
}
|
||||||
go ruleManager.Run()
|
go ruleManager.Run()
|
||||||
|
|
||||||
|
@ -273,56 +268,56 @@ func main() {
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
for _ = range prometheus.headCompactionTimer.C {
|
for _ = range prometheus.headCompactionTimer.C {
|
||||||
log.Println("Starting head compaction...")
|
glog.Info("Starting head compaction...")
|
||||||
err := prometheus.compact(*headAge, *headGroupSize)
|
err := prometheus.compact(*headAge, *headGroupSize)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf("could not compact due to %s", err)
|
glog.Error("could not compact:", err)
|
||||||
}
|
}
|
||||||
log.Println("Done")
|
glog.Info("Done")
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
for _ = range prometheus.bodyCompactionTimer.C {
|
for _ = range prometheus.bodyCompactionTimer.C {
|
||||||
log.Println("Starting body compaction...")
|
glog.Info("Starting body compaction...")
|
||||||
err := prometheus.compact(*bodyAge, *bodyGroupSize)
|
err := prometheus.compact(*bodyAge, *bodyGroupSize)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf("could not compact due to %s", err)
|
glog.Error("could not compact:", err)
|
||||||
}
|
}
|
||||||
log.Println("Done")
|
glog.Info("Done")
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
for _ = range prometheus.tailCompactionTimer.C {
|
for _ = range prometheus.tailCompactionTimer.C {
|
||||||
log.Println("Starting tail compaction...")
|
glog.Info("Starting tail compaction...")
|
||||||
err := prometheus.compact(*tailAge, *tailGroupSize)
|
err := prometheus.compact(*tailAge, *tailGroupSize)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf("could not compact due to %s", err)
|
glog.Error("could not compact:", err)
|
||||||
}
|
}
|
||||||
log.Println("Done")
|
glog.Info("Done")
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
for _ = range prometheus.deletionTimer.C {
|
for _ = range prometheus.deletionTimer.C {
|
||||||
log.Println("Starting deletion of stale values...")
|
glog.Info("Starting deletion of stale values...")
|
||||||
err := prometheus.delete(*deleteAge, deletionBatchSize)
|
err := prometheus.delete(*deleteAge, deletionBatchSize)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf("could not delete due to %s", err)
|
glog.Error("could not delete:", err)
|
||||||
}
|
}
|
||||||
log.Println("Done")
|
glog.Info("Done")
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
err := webService.ServeForever()
|
err := webService.ServeForever()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal(err)
|
glog.Fatal(err)
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
|
|
|
@ -19,11 +19,12 @@ import (
|
||||||
"flag"
|
"flag"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"log"
|
|
||||||
"net/http"
|
"net/http"
|
||||||
"text/template"
|
"text/template"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/golang/glog"
|
||||||
|
|
||||||
clientmodel "github.com/prometheus/client_golang/model"
|
clientmodel "github.com/prometheus/client_golang/model"
|
||||||
|
|
||||||
"github.com/prometheus/prometheus/utility"
|
"github.com/prometheus/prometheus/utility"
|
||||||
|
@ -95,7 +96,7 @@ func interpolateMessage(msg string, labels clientmodel.LabelSet, value clientmod
|
||||||
"{{$value := .Value}}"
|
"{{$value := .Value}}"
|
||||||
|
|
||||||
if _, err := t.Parse(defs + msg); err != nil {
|
if _, err := t.Parse(defs + msg); err != nil {
|
||||||
log.Println("Error parsing template:", err)
|
glog.Warning("Error parsing template:", err)
|
||||||
return msg
|
return msg
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -114,7 +115,7 @@ func interpolateMessage(msg string, labels clientmodel.LabelSet, value clientmod
|
||||||
|
|
||||||
var buf bytes.Buffer
|
var buf bytes.Buffer
|
||||||
if err := t.Execute(&buf, &tmplData); err != nil {
|
if err := t.Execute(&buf, &tmplData); err != nil {
|
||||||
log.Println("Error executing template:", err)
|
glog.Warning("Error executing template:", err)
|
||||||
return msg
|
return msg
|
||||||
}
|
}
|
||||||
return buf.String()
|
return buf.String()
|
||||||
|
@ -176,7 +177,7 @@ func (n *NotificationHandler) Run() {
|
||||||
|
|
||||||
for reqs := range n.pendingNotifications {
|
for reqs := range n.pendingNotifications {
|
||||||
if n.alertmanagerUrl == "" {
|
if n.alertmanagerUrl == "" {
|
||||||
log.Println("No alert manager configured, not dispatching notification")
|
glog.Warning("No alert manager configured, not dispatching notification")
|
||||||
notificationsCount.Increment(map[string]string{result: dropped})
|
notificationsCount.Increment(map[string]string{result: dropped})
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
@ -186,7 +187,7 @@ func (n *NotificationHandler) Run() {
|
||||||
recordOutcome(time.Since(begin), err)
|
recordOutcome(time.Since(begin), err)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Println("Error sending notification:", err)
|
glog.Error("Error sending notification:", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -16,12 +16,12 @@ package retrieval
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"fmt"
|
"fmt"
|
||||||
"log"
|
|
||||||
"net/http"
|
"net/http"
|
||||||
"os"
|
"os"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/golang/glog"
|
||||||
"github.com/prometheus/client_golang/extraction"
|
"github.com/prometheus/client_golang/extraction"
|
||||||
|
|
||||||
clientmodel "github.com/prometheus/client_golang/model"
|
clientmodel "github.com/prometheus/client_golang/model"
|
||||||
|
@ -303,7 +303,7 @@ func (t *target) GlobalAddress() string {
|
||||||
address := t.address
|
address := t.address
|
||||||
hostname, err := os.Hostname()
|
hostname, err := os.Hostname()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf("Couldn't get hostname: %s, returning target.Address()", err)
|
glog.Warning("Couldn't get hostname: %s, returning target.Address()", err)
|
||||||
return address
|
return address
|
||||||
}
|
}
|
||||||
for _, localhostRepresentation := range localhostRepresentations {
|
for _, localhostRepresentation := range localhostRepresentations {
|
||||||
|
|
|
@ -15,13 +15,14 @@ package retrieval
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"log"
|
|
||||||
"net/url"
|
"net/url"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/golang/glog"
|
||||||
|
"github.com/miekg/dns"
|
||||||
|
|
||||||
clientmodel "github.com/prometheus/client_golang/model"
|
clientmodel "github.com/prometheus/client_golang/model"
|
||||||
|
|
||||||
"github.com/miekg/dns"
|
|
||||||
"github.com/prometheus/prometheus/config"
|
"github.com/prometheus/prometheus/config"
|
||||||
"github.com/prometheus/prometheus/utility"
|
"github.com/prometheus/prometheus/utility"
|
||||||
)
|
)
|
||||||
|
@ -77,7 +78,7 @@ func (p *sdTargetProvider) Targets() ([]Target, error) {
|
||||||
for _, record := range response.Answer {
|
for _, record := range response.Answer {
|
||||||
addr, ok := record.(*dns.SRV)
|
addr, ok := record.(*dns.SRV)
|
||||||
if !ok {
|
if !ok {
|
||||||
log.Printf("%s is not a valid SRV record", addr)
|
glog.Warning("%s is not a valid SRV record", addr)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
// Remove the final dot from rooted DNS names to make them look more usual.
|
// Remove the final dot from rooted DNS names to make them look more usual.
|
||||||
|
|
|
@ -14,10 +14,11 @@
|
||||||
package retrieval
|
package retrieval
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"log"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/golang/glog"
|
||||||
"github.com/prometheus/client_golang/extraction"
|
"github.com/prometheus/client_golang/extraction"
|
||||||
|
|
||||||
clientmodel "github.com/prometheus/client_golang/model"
|
clientmodel "github.com/prometheus/client_golang/model"
|
||||||
|
|
||||||
"github.com/prometheus/prometheus/config"
|
"github.com/prometheus/prometheus/config"
|
||||||
|
@ -65,7 +66,7 @@ func (m *targetManager) TargetPoolForJob(job config.JobConfig) *TargetPool {
|
||||||
}
|
}
|
||||||
|
|
||||||
targetPool = NewTargetPool(m, provider)
|
targetPool = NewTargetPool(m, provider)
|
||||||
log.Printf("Pool for job %s does not exist; creating and starting...", job.GetName())
|
glog.Infof("Pool for job %s does not exist; creating and starting...", job.GetName())
|
||||||
|
|
||||||
interval := job.ScrapeInterval()
|
interval := job.ScrapeInterval()
|
||||||
m.poolsByJob[job.GetName()] = targetPool
|
m.poolsByJob[job.GetName()] = targetPool
|
||||||
|
|
|
@ -14,11 +14,11 @@
|
||||||
package retrieval
|
package retrieval
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"log"
|
|
||||||
"sort"
|
"sort"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/golang/glog"
|
||||||
"github.com/prometheus/client_golang/extraction"
|
"github.com/prometheus/client_golang/extraction"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -63,7 +63,7 @@ func (p *TargetPool) Run(results chan<- *extraction.Result, interval time.Durati
|
||||||
case newTargets := <-p.replaceTargetsQueue:
|
case newTargets := <-p.replaceTargetsQueue:
|
||||||
p.replaceTargets(newTargets)
|
p.replaceTargets(newTargets)
|
||||||
case <-p.done:
|
case <-p.done:
|
||||||
log.Printf("TargetPool exiting...")
|
glog.Info("TargetPool exiting...")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -127,7 +127,7 @@ func (p *TargetPool) runIteration(results chan<- *extraction.Result, interval ti
|
||||||
if p.targetProvider != nil {
|
if p.targetProvider != nil {
|
||||||
targets, err := p.targetProvider.Targets()
|
targets, err := p.targetProvider.Targets()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf("Error looking up targets, keeping old list: %s", err)
|
glog.Warning("Error looking up targets, keeping old list: %s", err)
|
||||||
} else {
|
} else {
|
||||||
p.ReplaceTargets(targets)
|
p.ReplaceTargets(targets)
|
||||||
}
|
}
|
||||||
|
|
|
@ -17,11 +17,12 @@ import (
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"hash/fnv"
|
"hash/fnv"
|
||||||
"log"
|
|
||||||
"math"
|
"math"
|
||||||
"sort"
|
"sort"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/golang/glog"
|
||||||
|
|
||||||
clientmodel "github.com/prometheus/client_golang/model"
|
clientmodel "github.com/prometheus/client_golang/model"
|
||||||
|
|
||||||
"github.com/prometheus/prometheus/stats"
|
"github.com/prometheus/prometheus/stats"
|
||||||
|
@ -401,7 +402,7 @@ func (node *VectorAggregation) Eval(timestamp time.Time, view *viewAdapter) Vect
|
||||||
func (node *VectorLiteral) Eval(timestamp time.Time, view *viewAdapter) Vector {
|
func (node *VectorLiteral) Eval(timestamp time.Time, view *viewAdapter) Vector {
|
||||||
values, err := view.GetValueAtTime(node.fingerprints, timestamp)
|
values, err := view.GetValueAtTime(node.fingerprints, timestamp)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Println("Unable to get vector values:", err)
|
glog.Error("Unable to get vector values:", err)
|
||||||
return Vector{}
|
return Vector{}
|
||||||
}
|
}
|
||||||
return values
|
return values
|
||||||
|
@ -589,7 +590,7 @@ func (node *MatrixLiteral) Eval(timestamp time.Time, view *viewAdapter) Matrix {
|
||||||
}
|
}
|
||||||
values, err := view.GetRangeValues(node.fingerprints, interval)
|
values, err := view.GetRangeValues(node.fingerprints, interval)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Println("Unable to get values for vector interval:", err)
|
glog.Error("Unable to get values for vector interval:", err)
|
||||||
return Matrix{}
|
return Matrix{}
|
||||||
}
|
}
|
||||||
return values
|
return values
|
||||||
|
@ -602,7 +603,7 @@ func (node *MatrixLiteral) EvalBoundaries(timestamp time.Time, view *viewAdapter
|
||||||
}
|
}
|
||||||
values, err := view.GetBoundaryValues(node.fingerprints, interval)
|
values, err := view.GetBoundaryValues(node.fingerprints, interval)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf("Unable to get boundary values for vector interval:", err)
|
glog.Error("Unable to get boundary values for vector interval:", err)
|
||||||
return Matrix{}
|
return Matrix{}
|
||||||
}
|
}
|
||||||
return values
|
return values
|
||||||
|
|
|
@ -14,9 +14,10 @@
|
||||||
package ast
|
package ast
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"log"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/golang/glog"
|
||||||
|
|
||||||
clientmodel "github.com/prometheus/client_golang/model"
|
clientmodel "github.com/prometheus/client_golang/model"
|
||||||
|
|
||||||
"github.com/prometheus/prometheus/stats"
|
"github.com/prometheus/prometheus/stats"
|
||||||
|
@ -56,7 +57,7 @@ func (analyzer *QueryAnalyzer) Visit(node Node) {
|
||||||
case *VectorLiteral:
|
case *VectorLiteral:
|
||||||
fingerprints, err := analyzer.storage.GetFingerprintsForLabelSet(n.labels)
|
fingerprints, err := analyzer.storage.GetFingerprintsForLabelSet(n.labels)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf("Error getting fingerprints for labelset %v: %v", n.labels, err)
|
glog.Errorf("Error getting fingerprints for labelset %v: %v", n.labels, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
n.fingerprints = fingerprints
|
n.fingerprints = fingerprints
|
||||||
|
@ -68,7 +69,7 @@ func (analyzer *QueryAnalyzer) Visit(node Node) {
|
||||||
case *MatrixLiteral:
|
case *MatrixLiteral:
|
||||||
fingerprints, err := analyzer.storage.GetFingerprintsForLabelSet(n.labels)
|
fingerprints, err := analyzer.storage.GetFingerprintsForLabelSet(n.labels)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf("Error getting fingerprints for labelset %v: %v", n.labels, err)
|
glog.Errorf("Error getting fingerprints for labelset %v: %v", n.labels, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
n.fingerprints = fingerprints
|
n.fingerprints = fingerprints
|
||||||
|
|
|
@ -18,10 +18,11 @@ import (
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"log"
|
|
||||||
"os"
|
"os"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
|
"github.com/golang/glog"
|
||||||
|
|
||||||
"github.com/prometheus/prometheus/rules/ast"
|
"github.com/prometheus/prometheus/rules/ast"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -69,7 +70,7 @@ func (lexer *RulesLexer) getChar() byte {
|
||||||
}
|
}
|
||||||
lexer.current = b
|
lexer.current = b
|
||||||
} else if err != io.EOF {
|
} else if err != io.EOF {
|
||||||
log.Fatal(err)
|
glog.Fatal(err)
|
||||||
}
|
}
|
||||||
return lexer.current
|
return lexer.current
|
||||||
}
|
}
|
||||||
|
|
|
@ -14,11 +14,12 @@
|
||||||
package rules
|
package rules
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"log"
|
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/golang/glog"
|
||||||
"github.com/prometheus/client_golang/extraction"
|
"github.com/prometheus/client_golang/extraction"
|
||||||
|
|
||||||
clientmodel "github.com/prometheus/client_golang/model"
|
clientmodel "github.com/prometheus/client_golang/model"
|
||||||
|
|
||||||
"github.com/prometheus/prometheus/config"
|
"github.com/prometheus/prometheus/config"
|
||||||
|
@ -74,7 +75,7 @@ func (m *ruleManager) Run() {
|
||||||
m.runIteration(m.results)
|
m.runIteration(m.results)
|
||||||
evalDurations.Add(map[string]string{intervalKey: m.interval.String()}, float64(time.Since(start)/time.Millisecond))
|
evalDurations.Add(map[string]string{intervalKey: m.interval.String()}, float64(time.Since(start)/time.Millisecond))
|
||||||
case <-m.done:
|
case <-m.done:
|
||||||
log.Printf("RuleManager exiting...")
|
glog.Info("RuleManager exiting...")
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -23,12 +23,12 @@ import (
|
||||||
|
|
||||||
clientmodel "github.com/prometheus/client_golang/model"
|
clientmodel "github.com/prometheus/client_golang/model"
|
||||||
|
|
||||||
dto "github.com/prometheus/prometheus/model/generated"
|
|
||||||
|
|
||||||
"github.com/prometheus/prometheus/coding"
|
"github.com/prometheus/prometheus/coding"
|
||||||
"github.com/prometheus/prometheus/storage"
|
"github.com/prometheus/prometheus/storage"
|
||||||
"github.com/prometheus/prometheus/storage/raw"
|
"github.com/prometheus/prometheus/storage/raw"
|
||||||
"github.com/prometheus/prometheus/storage/raw/leveldb"
|
"github.com/prometheus/prometheus/storage/raw/leveldb"
|
||||||
|
|
||||||
|
dto "github.com/prometheus/prometheus/model/generated"
|
||||||
)
|
)
|
||||||
|
|
||||||
// CurationState contains high-level curation state information for the
|
// CurationState contains high-level curation state information for the
|
||||||
|
|
|
@ -19,11 +19,11 @@ import (
|
||||||
|
|
||||||
clientmodel "github.com/prometheus/client_golang/model"
|
clientmodel "github.com/prometheus/client_golang/model"
|
||||||
|
|
||||||
dto "github.com/prometheus/prometheus/model/generated"
|
|
||||||
|
|
||||||
"github.com/prometheus/prometheus/coding"
|
"github.com/prometheus/prometheus/coding"
|
||||||
"github.com/prometheus/prometheus/coding/indexable"
|
"github.com/prometheus/prometheus/coding/indexable"
|
||||||
"github.com/prometheus/prometheus/storage/raw/leveldb"
|
"github.com/prometheus/prometheus/storage/raw/leveldb"
|
||||||
|
|
||||||
|
dto "github.com/prometheus/prometheus/model/generated"
|
||||||
)
|
)
|
||||||
|
|
||||||
// diskFrontier describes an on-disk store of series to provide a
|
// diskFrontier describes an on-disk store of series to provide a
|
||||||
|
|
|
@ -20,12 +20,12 @@ import (
|
||||||
|
|
||||||
clientmodel "github.com/prometheus/client_golang/model"
|
clientmodel "github.com/prometheus/client_golang/model"
|
||||||
|
|
||||||
dto "github.com/prometheus/prometheus/model/generated"
|
|
||||||
|
|
||||||
"github.com/prometheus/prometheus/storage"
|
"github.com/prometheus/prometheus/storage"
|
||||||
"github.com/prometheus/prometheus/storage/raw"
|
"github.com/prometheus/prometheus/storage/raw"
|
||||||
"github.com/prometheus/prometheus/storage/raw/leveldb"
|
"github.com/prometheus/prometheus/storage/raw/leveldb"
|
||||||
"github.com/prometheus/prometheus/utility"
|
"github.com/prometheus/prometheus/utility"
|
||||||
|
|
||||||
|
dto "github.com/prometheus/prometheus/model/generated"
|
||||||
)
|
)
|
||||||
|
|
||||||
type FingerprintMetricMapping map[clientmodel.Fingerprint]clientmodel.Metric
|
type FingerprintMetricMapping map[clientmodel.Fingerprint]clientmodel.Metric
|
||||||
|
|
|
@ -16,21 +16,21 @@ package metric
|
||||||
import (
|
import (
|
||||||
"flag"
|
"flag"
|
||||||
"fmt"
|
"fmt"
|
||||||
"log"
|
|
||||||
"sort"
|
"sort"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"code.google.com/p/goprotobuf/proto"
|
"code.google.com/p/goprotobuf/proto"
|
||||||
|
"github.com/golang/glog"
|
||||||
|
|
||||||
clientmodel "github.com/prometheus/client_golang/model"
|
clientmodel "github.com/prometheus/client_golang/model"
|
||||||
|
|
||||||
dto "github.com/prometheus/prometheus/model/generated"
|
|
||||||
|
|
||||||
"github.com/prometheus/prometheus/storage"
|
"github.com/prometheus/prometheus/storage"
|
||||||
"github.com/prometheus/prometheus/storage/raw"
|
"github.com/prometheus/prometheus/storage/raw"
|
||||||
"github.com/prometheus/prometheus/storage/raw/leveldb"
|
"github.com/prometheus/prometheus/storage/raw/leveldb"
|
||||||
"github.com/prometheus/prometheus/utility"
|
"github.com/prometheus/prometheus/utility"
|
||||||
|
|
||||||
|
dto "github.com/prometheus/prometheus/model/generated"
|
||||||
)
|
)
|
||||||
|
|
||||||
const sortConcurrency = 2
|
const sortConcurrency = 2
|
||||||
|
@ -104,7 +104,7 @@ func (l *LevelDBMetricPersistence) Close() {
|
||||||
closer.Close()
|
closer.Close()
|
||||||
case errorCloser:
|
case errorCloser:
|
||||||
if err := closer.Close(); err != nil {
|
if err := closer.Close(); err != nil {
|
||||||
log.Println("anomaly closing:", err)
|
glog.Error("Error closing persistence:", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -236,7 +236,7 @@ func NewLevelDBMetricPersistence(baseDirectory string) (*LevelDBMetricPersistenc
|
||||||
|
|
||||||
if !workers.Wait() {
|
if !workers.Wait() {
|
||||||
for _, err := range workers.Errors() {
|
for _, err := range workers.Errors() {
|
||||||
log.Printf("Could not open storage due to %s", err)
|
glog.Error("Could not open storage:", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil, fmt.Errorf("Unable to open metric persistence.")
|
return nil, fmt.Errorf("Unable to open metric persistence.")
|
||||||
|
|
|
@ -21,10 +21,10 @@ import (
|
||||||
|
|
||||||
clientmodel "github.com/prometheus/client_golang/model"
|
clientmodel "github.com/prometheus/client_golang/model"
|
||||||
|
|
||||||
dto "github.com/prometheus/prometheus/model/generated"
|
|
||||||
|
|
||||||
"github.com/prometheus/prometheus/storage/raw"
|
"github.com/prometheus/prometheus/storage/raw"
|
||||||
"github.com/prometheus/prometheus/storage/raw/leveldb"
|
"github.com/prometheus/prometheus/storage/raw/leveldb"
|
||||||
|
|
||||||
|
dto "github.com/prometheus/prometheus/model/generated"
|
||||||
)
|
)
|
||||||
|
|
||||||
// processor models a post-processing agent that performs work given a sample
|
// processor models a post-processing agent that performs work given a sample
|
||||||
|
|
|
@ -22,10 +22,10 @@ import (
|
||||||
|
|
||||||
clientmodel "github.com/prometheus/client_golang/model"
|
clientmodel "github.com/prometheus/client_golang/model"
|
||||||
|
|
||||||
|
"github.com/prometheus/prometheus/storage/raw/leveldb"
|
||||||
|
|
||||||
dto "github.com/prometheus/prometheus/model/generated"
|
dto "github.com/prometheus/prometheus/model/generated"
|
||||||
fixture "github.com/prometheus/prometheus/storage/raw/leveldb/test"
|
fixture "github.com/prometheus/prometheus/storage/raw/leveldb/test"
|
||||||
|
|
||||||
"github.com/prometheus/prometheus/storage/raw/leveldb"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type curationState struct {
|
type curationState struct {
|
||||||
|
|
|
@ -21,9 +21,9 @@ import (
|
||||||
|
|
||||||
clientmodel "github.com/prometheus/client_golang/model"
|
clientmodel "github.com/prometheus/client_golang/model"
|
||||||
|
|
||||||
dto "github.com/prometheus/prometheus/model/generated"
|
|
||||||
|
|
||||||
"github.com/prometheus/prometheus/coding/indexable"
|
"github.com/prometheus/prometheus/coding/indexable"
|
||||||
|
|
||||||
|
dto "github.com/prometheus/prometheus/model/generated"
|
||||||
)
|
)
|
||||||
|
|
||||||
// SampleKey models the business logic around the data-transfer object
|
// SampleKey models the business logic around the data-transfer object
|
||||||
|
|
|
@ -24,11 +24,11 @@ import (
|
||||||
|
|
||||||
clientmodel "github.com/prometheus/client_golang/model"
|
clientmodel "github.com/prometheus/client_golang/model"
|
||||||
|
|
||||||
dto "github.com/prometheus/prometheus/model/generated"
|
|
||||||
|
|
||||||
"github.com/prometheus/prometheus/coding"
|
"github.com/prometheus/prometheus/coding"
|
||||||
"github.com/prometheus/prometheus/coding/indexable"
|
"github.com/prometheus/prometheus/coding/indexable"
|
||||||
"github.com/prometheus/prometheus/utility/test"
|
"github.com/prometheus/prometheus/utility/test"
|
||||||
|
|
||||||
|
dto "github.com/prometheus/prometheus/model/generated"
|
||||||
)
|
)
|
||||||
|
|
||||||
const stochasticMaximumVariance = 8
|
const stochasticMaximumVariance = 8
|
||||||
|
|
|
@ -15,12 +15,11 @@ package metric
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"log"
|
|
||||||
"sort"
|
"sort"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
dto "github.com/prometheus/prometheus/model/generated"
|
"github.com/golang/glog"
|
||||||
|
|
||||||
clientmodel "github.com/prometheus/client_golang/model"
|
clientmodel "github.com/prometheus/client_golang/model"
|
||||||
|
|
||||||
|
@ -28,6 +27,8 @@ import (
|
||||||
"github.com/prometheus/prometheus/coding/indexable"
|
"github.com/prometheus/prometheus/coding/indexable"
|
||||||
"github.com/prometheus/prometheus/stats"
|
"github.com/prometheus/prometheus/stats"
|
||||||
"github.com/prometheus/prometheus/storage/raw/leveldb"
|
"github.com/prometheus/prometheus/storage/raw/leveldb"
|
||||||
|
|
||||||
|
dto "github.com/prometheus/prometheus/model/generated"
|
||||||
)
|
)
|
||||||
|
|
||||||
type chunk Values
|
type chunk Values
|
||||||
|
@ -175,7 +176,7 @@ func (t *TieredStorage) drain(drained chan<- bool) {
|
||||||
|
|
||||||
t.state = tieredStorageDraining
|
t.state = tieredStorageDraining
|
||||||
|
|
||||||
log.Println("Triggering drain...")
|
glog.Info("Triggering drain...")
|
||||||
t.draining <- (drained)
|
t.draining <- (drained)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -269,7 +270,7 @@ func (t *TieredStorage) Flush() {
|
||||||
func (t *TieredStorage) flushMemory(ttl time.Duration) {
|
func (t *TieredStorage) flushMemory(ttl time.Duration) {
|
||||||
flushOlderThan := time.Now().Add(-1 * ttl)
|
flushOlderThan := time.Now().Add(-1 * ttl)
|
||||||
|
|
||||||
log.Println("Flushing...")
|
glog.Info("Flushing samples to disk...")
|
||||||
t.memoryArena.Flush(flushOlderThan, t.appendToDiskQueue)
|
t.memoryArena.Flush(flushOlderThan, t.appendToDiskQueue)
|
||||||
|
|
||||||
queueLength := len(t.appendToDiskQueue)
|
queueLength := len(t.appendToDiskQueue)
|
||||||
|
@ -280,11 +281,11 @@ func (t *TieredStorage) flushMemory(ttl time.Duration) {
|
||||||
samples = append(samples, chunk...)
|
samples = append(samples, chunk...)
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Printf("Writing %d samples...", len(samples))
|
glog.Infof("Writing %d samples...", len(samples))
|
||||||
t.DiskStorage.AppendSamples(samples)
|
t.DiskStorage.AppendSamples(samples)
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Println("Done flushing.")
|
glog.Info("Done flushing.")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *TieredStorage) Close() {
|
func (t *TieredStorage) Close() {
|
||||||
|
@ -373,7 +374,7 @@ func (t *TieredStorage) renderView(viewJob viewJob) {
|
||||||
for _, scanJob := range scans {
|
for _, scanJob := range scans {
|
||||||
old, err := t.seriesTooOld(scanJob.fingerprint, *scanJob.operations[0].CurrentTime())
|
old, err := t.seriesTooOld(scanJob.fingerprint, *scanJob.operations[0].CurrentTime())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf("Error getting watermark from cache for %s: %s", scanJob.fingerprint, err)
|
glog.Errorf("Error getting watermark from cache for %s: %s", scanJob.fingerprint, err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if old {
|
if old {
|
||||||
|
|
|
@ -22,11 +22,11 @@ import (
|
||||||
|
|
||||||
clientmodel "github.com/prometheus/client_golang/model"
|
clientmodel "github.com/prometheus/client_golang/model"
|
||||||
|
|
||||||
dto "github.com/prometheus/prometheus/model/generated"
|
|
||||||
|
|
||||||
"github.com/prometheus/prometheus/storage"
|
"github.com/prometheus/prometheus/storage"
|
||||||
"github.com/prometheus/prometheus/storage/raw"
|
"github.com/prometheus/prometheus/storage/raw"
|
||||||
"github.com/prometheus/prometheus/storage/raw/leveldb"
|
"github.com/prometheus/prometheus/storage/raw/leveldb"
|
||||||
|
|
||||||
|
dto "github.com/prometheus/prometheus/model/generated"
|
||||||
)
|
)
|
||||||
|
|
||||||
// unsafe.Sizeof(watermarks{})
|
// unsafe.Sizeof(watermarks{})
|
||||||
|
|
|
@ -16,10 +16,10 @@ package leveldb
|
||||||
import (
|
import (
|
||||||
"code.google.com/p/goprotobuf/proto"
|
"code.google.com/p/goprotobuf/proto"
|
||||||
|
|
||||||
dto "github.com/prometheus/prometheus/model/generated"
|
|
||||||
|
|
||||||
"github.com/prometheus/prometheus/storage/raw"
|
"github.com/prometheus/prometheus/storage/raw"
|
||||||
"github.com/prometheus/prometheus/storage/raw/leveldb"
|
"github.com/prometheus/prometheus/storage/raw/leveldb"
|
||||||
|
|
||||||
|
dto "github.com/prometheus/prometheus/model/generated"
|
||||||
)
|
)
|
||||||
|
|
||||||
var existenceValue = new(dto.MembershipIndexValue)
|
var existenceValue = new(dto.MembershipIndexValue)
|
||||||
|
|
|
@ -22,16 +22,16 @@ import (
|
||||||
"encoding/csv"
|
"encoding/csv"
|
||||||
"flag"
|
"flag"
|
||||||
"fmt"
|
"fmt"
|
||||||
"log"
|
|
||||||
"os"
|
"os"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
|
||||||
"code.google.com/p/goprotobuf/proto"
|
"code.google.com/p/goprotobuf/proto"
|
||||||
|
"github.com/golang/glog"
|
||||||
dto "github.com/prometheus/prometheus/model/generated"
|
|
||||||
|
|
||||||
"github.com/prometheus/prometheus/storage"
|
"github.com/prometheus/prometheus/storage"
|
||||||
"github.com/prometheus/prometheus/storage/metric"
|
"github.com/prometheus/prometheus/storage/metric"
|
||||||
|
|
||||||
|
dto "github.com/prometheus/prometheus/model/generated"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
@ -95,12 +95,12 @@ func main() {
|
||||||
flag.Parse()
|
flag.Parse()
|
||||||
|
|
||||||
if storageRoot == nil || *storageRoot == "" {
|
if storageRoot == nil || *storageRoot == "" {
|
||||||
log.Fatal("Must provide a path...")
|
glog.Fatal("Must provide a path...")
|
||||||
}
|
}
|
||||||
|
|
||||||
persistence, err := metric.NewLevelDBMetricPersistence(*storageRoot)
|
persistence, err := metric.NewLevelDBMetricPersistence(*storageRoot)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal(err)
|
glog.Fatal(err)
|
||||||
}
|
}
|
||||||
defer persistence.Close()
|
defer persistence.Close()
|
||||||
|
|
||||||
|
@ -110,13 +110,13 @@ func main() {
|
||||||
|
|
||||||
entire, err := persistence.MetricSamples.ForEach(dumper, dumper, dumper)
|
entire, err := persistence.MetricSamples.ForEach(dumper, dumper, dumper)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("Error dumping samples: %s", err)
|
glog.Fatal("Error dumping samples:", err)
|
||||||
}
|
}
|
||||||
if !entire {
|
if !entire {
|
||||||
log.Fatalf("Didn't scan entire corpus")
|
glog.Fatal("Didn't scan entire corpus")
|
||||||
}
|
}
|
||||||
dumper.Flush()
|
dumper.Flush()
|
||||||
if err = dumper.Error(); err != nil {
|
if err = dumper.Error(); err != nil {
|
||||||
log.Fatalf("Error flushing CSV: %s", err)
|
glog.Fatal("Error flushing CSV:", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -19,8 +19,9 @@ package main
|
||||||
import (
|
import (
|
||||||
"flag"
|
"flag"
|
||||||
"github.com/prometheus/prometheus/storage/metric"
|
"github.com/prometheus/prometheus/storage/metric"
|
||||||
"log"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/golang/glog"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
@ -31,21 +32,21 @@ func main() {
|
||||||
flag.Parse()
|
flag.Parse()
|
||||||
|
|
||||||
if storageRoot == nil || *storageRoot == "" {
|
if storageRoot == nil || *storageRoot == "" {
|
||||||
log.Fatal("Must provide a path...")
|
glog.Fatal("Must provide a path...")
|
||||||
}
|
}
|
||||||
|
|
||||||
persistences, err := metric.NewLevelDBMetricPersistence(*storageRoot)
|
persistences, err := metric.NewLevelDBMetricPersistence(*storageRoot)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal(err)
|
glog.Fatal(err)
|
||||||
}
|
}
|
||||||
defer persistences.Close()
|
defer persistences.Close()
|
||||||
|
|
||||||
start := time.Now()
|
start := time.Now()
|
||||||
log.Printf("Starting compaction...")
|
glog.Info("Starting compaction...")
|
||||||
size, _ := persistences.Sizes()
|
size, _ := persistences.Sizes()
|
||||||
log.Printf("Original Size: %d", size)
|
glog.Info("Original Size:", size)
|
||||||
persistences.Prune()
|
persistences.Prune()
|
||||||
log.Printf("Finished in %s", time.Since(start))
|
glog.Info("Finished in", time.Since(start))
|
||||||
size, _ = persistences.Sizes()
|
size, _ = persistences.Sizes()
|
||||||
log.Printf("New Size: %d", size)
|
glog.Info("New Size:", size)
|
||||||
}
|
}
|
||||||
|
|
|
@ -19,7 +19,8 @@ package main
|
||||||
import (
|
import (
|
||||||
"flag"
|
"flag"
|
||||||
"fmt"
|
"fmt"
|
||||||
"log"
|
|
||||||
|
"github.com/golang/glog"
|
||||||
|
|
||||||
"github.com/prometheus/prometheus/rules"
|
"github.com/prometheus/prometheus/rules"
|
||||||
)
|
)
|
||||||
|
@ -30,12 +31,12 @@ func main() {
|
||||||
flag.Parse()
|
flag.Parse()
|
||||||
|
|
||||||
if *ruleFile == "" {
|
if *ruleFile == "" {
|
||||||
log.Fatal("Must provide a rule file path")
|
glog.Fatal("Must provide a rule file path")
|
||||||
}
|
}
|
||||||
|
|
||||||
rules, err := rules.LoadRulesFromFile(*ruleFile)
|
rules, err := rules.LoadRulesFromFile(*ruleFile)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("Error loading rule file %s: %s", *ruleFile, err)
|
glog.Fatalf("Error loading rule file %s: %s", *ruleFile, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Printf("Successfully loaded %d rules:\n\n", len(rules))
|
fmt.Printf("Successfully loaded %d rules:\n\n", len(rules))
|
||||||
|
|
|
@ -16,12 +16,12 @@ package api
|
||||||
import (
|
import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"errors"
|
"errors"
|
||||||
"log"
|
|
||||||
"net/http"
|
"net/http"
|
||||||
"sort"
|
"sort"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"code.google.com/p/gorest"
|
"code.google.com/p/gorest"
|
||||||
|
"github.com/golang/glog"
|
||||||
|
|
||||||
clientmodel "github.com/prometheus/client_golang/model"
|
clientmodel "github.com/prometheus/client_golang/model"
|
||||||
|
|
||||||
|
@ -59,7 +59,7 @@ func (serv MetricsService) Query(expr string, asText string) string {
|
||||||
|
|
||||||
queryStats := stats.NewTimerGroup()
|
queryStats := stats.NewTimerGroup()
|
||||||
result := ast.EvalToString(exprNode, timestamp, format, serv.Storage, queryStats)
|
result := ast.EvalToString(exprNode, timestamp, format, serv.Storage, queryStats)
|
||||||
log.Printf("Instant query: %s\nQuery stats:\n%s\n", expr, queryStats)
|
glog.Infof("Instant query: %s\nQuery stats:\n%s\n", expr, queryStats)
|
||||||
return result
|
return result
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -113,7 +113,7 @@ func (serv MetricsService) QueryRange(expr string, end int64, duration int64, st
|
||||||
result := ast.TypedValueToJSON(matrix, "matrix")
|
result := ast.TypedValueToJSON(matrix, "matrix")
|
||||||
jsonTimer.Stop()
|
jsonTimer.Stop()
|
||||||
|
|
||||||
log.Printf("Range query: %s\nQuery stats:\n%s\n", expr, queryStats)
|
glog.Infof("Range query: %s\nQuery stats:\n%s\n", expr, queryStats)
|
||||||
return result
|
return result
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -123,14 +123,14 @@ func (serv MetricsService) Metrics() string {
|
||||||
serv.setAccessControlHeaders(rb)
|
serv.setAccessControlHeaders(rb)
|
||||||
rb.SetContentType(gorest.Application_Json)
|
rb.SetContentType(gorest.Application_Json)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf("Error loading metric names: %v", err)
|
glog.Error("Error loading metric names:", err)
|
||||||
rb.SetResponseCode(http.StatusInternalServerError)
|
rb.SetResponseCode(http.StatusInternalServerError)
|
||||||
return err.Error()
|
return err.Error()
|
||||||
}
|
}
|
||||||
sort.Sort(metricNames)
|
sort.Sort(metricNames)
|
||||||
resultBytes, err := json.Marshal(metricNames)
|
resultBytes, err := json.Marshal(metricNames)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf("Error marshalling metric names: %v", err)
|
glog.Error("Error marshalling metric names:", err)
|
||||||
rb.SetResponseCode(http.StatusInternalServerError)
|
rb.SetResponseCode(http.StatusInternalServerError)
|
||||||
return err.Error()
|
return err.Error()
|
||||||
}
|
}
|
||||||
|
|
|
@ -5,9 +5,10 @@ import (
|
||||||
"compress/gzip"
|
"compress/gzip"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"log"
|
|
||||||
"net/http"
|
"net/http"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
|
"github.com/golang/glog"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
@ -50,7 +51,7 @@ func (h Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||||
file, err := GetFile(StaticFiles, name)
|
file, err := GetFile(StaticFiles, name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if err != io.EOF {
|
if err != io.EOF {
|
||||||
log.Printf("Could not get file: %s", err)
|
glog.Warning("Could not get file:", err)
|
||||||
}
|
}
|
||||||
w.WriteHeader(http.StatusNotFound)
|
w.WriteHeader(http.StatusNotFound)
|
||||||
return
|
return
|
||||||
|
|
13
web/web.go
13
web/web.go
|
@ -17,14 +17,13 @@ import (
|
||||||
"flag"
|
"flag"
|
||||||
"fmt"
|
"fmt"
|
||||||
"html/template"
|
"html/template"
|
||||||
"log"
|
|
||||||
"net"
|
"net"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/http/pprof"
|
"net/http/pprof"
|
||||||
"os"
|
"os"
|
||||||
|
|
||||||
"code.google.com/p/gorest"
|
"code.google.com/p/gorest"
|
||||||
|
"github.com/golang/glog"
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
"github.com/prometheus/client_golang/prometheus/exp"
|
"github.com/prometheus/client_golang/prometheus/exp"
|
||||||
|
|
||||||
|
@ -77,7 +76,7 @@ func (w WebService) ServeForever() error {
|
||||||
exp.Handle("/user/", http.StripPrefix("/user/", http.FileServer(http.Dir(*userAssetsPath))))
|
exp.Handle("/user/", http.StripPrefix("/user/", http.FileServer(http.Dir(*userAssetsPath))))
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Printf("listening on %s", *listenAddress)
|
glog.Info("listening on %s", *listenAddress)
|
||||||
|
|
||||||
return http.ListenAndServe(*listenAddress, exp.DefaultCoarseMux)
|
return http.ListenAndServe(*listenAddress, exp.DefaultCoarseMux)
|
||||||
}
|
}
|
||||||
|
@ -94,14 +93,14 @@ func getEmbeddedTemplate(name string) (*template.Template, error) {
|
||||||
|
|
||||||
file, err := blob.GetFile(blob.TemplateFiles, "_base.html")
|
file, err := blob.GetFile(blob.TemplateFiles, "_base.html")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf("Could not read base template: %s", err)
|
glog.Error("Could not read base template:", err)
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
t.Parse(string(file))
|
t.Parse(string(file))
|
||||||
|
|
||||||
file, err = blob.GetFile(blob.TemplateFiles, name+".html")
|
file, err = blob.GetFile(blob.TemplateFiles, name+".html")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf("Could not read %s template: %s", name, err)
|
glog.Errorf("Could not read %s template: %s", name, err)
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
t.Parse(string(file))
|
t.Parse(string(file))
|
||||||
|
@ -131,12 +130,12 @@ func getTemplate(name string) (t *template.Template, err error) {
|
||||||
func executeTemplate(w http.ResponseWriter, name string, data interface{}) {
|
func executeTemplate(w http.ResponseWriter, name string, data interface{}) {
|
||||||
tpl, err := getTemplate(name)
|
tpl, err := getTemplate(name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf("Error preparing layout template: %s", err)
|
glog.Error("Error preparing layout template:", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
err = tpl.Execute(w, data)
|
err = tpl.Execute(w, data)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf("Error executing template: %s", err)
|
glog.Error("Error executing template:", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Reference in a new issue