2013-05-21 14:23:02 -07:00
// Copyright 2013 Prometheus Team
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Dumper is responsible for dumping all samples along with metadata contained
// in a given Prometheus metrics storage. It prints samples in unquoted CSV
// format, with commas as field separators:
//
// <fingerprint>,<chunk_first_time>,<chunk_last_time>,<chunk_sample_count>,<chunk_index>,<timestamp>,<value>
package main
import (
"encoding/csv"
"flag"
"fmt"
"os"
"strconv"
2013-06-08 01:27:44 -07:00
2013-08-12 08:18:02 -07:00
"github.com/golang/glog"
2013-06-08 01:27:44 -07:00
"github.com/prometheus/prometheus/storage"
"github.com/prometheus/prometheus/storage/metric"
2013-05-21 14:23:02 -07:00
)
var (
storageRoot = flag . String ( "storage.root" , "" , "The path to the storage root for Prometheus." )
2013-10-22 16:06:49 -07:00
dieOnBadChunk = flag . Bool ( "dieOnBadChunk" , false , "Whether to die upon encountering a bad chunk." )
2013-05-21 14:23:02 -07:00
)
type SamplesDumper struct {
* csv . Writer
}
2013-06-25 05:02:27 -07:00
func ( d * SamplesDumper ) Operate ( key , value interface { } ) * storage . OperatorError {
sampleKey := key . ( * metric . SampleKey )
2013-10-22 16:06:49 -07:00
if * dieOnBadChunk && sampleKey . FirstTimestamp . After ( sampleKey . LastTimestamp ) {
glog . Fatalf ( "Chunk: First time (%v) after last time (%v): %v\n" , sampleKey . FirstTimestamp . Unix ( ) , sampleKey . LastTimestamp . Unix ( ) , sampleKey )
}
2013-06-25 05:02:27 -07:00
for i , sample := range value . ( metric . Values ) {
2013-10-22 16:06:49 -07:00
if * dieOnBadChunk && ( sample . Timestamp . Before ( sampleKey . FirstTimestamp ) || sample . Timestamp . After ( sampleKey . LastTimestamp ) ) {
glog . Fatalf ( "Sample not within chunk boundaries: chunk FirstTimestamp (%v), chunk LastTimestamp (%v) vs. sample Timestamp (%v)\n" , sampleKey . FirstTimestamp . Unix ( ) , sampleKey . LastTimestamp . Unix ( ) , sample . Timestamp )
}
2013-05-21 14:23:02 -07:00
d . Write ( [ ] string {
sampleKey . Fingerprint . String ( ) ,
strconv . FormatInt ( sampleKey . FirstTimestamp . Unix ( ) , 10 ) ,
strconv . FormatInt ( sampleKey . LastTimestamp . Unix ( ) , 10 ) ,
strconv . FormatUint ( uint64 ( sampleKey . SampleCount ) , 10 ) ,
strconv . Itoa ( i ) ,
strconv . FormatInt ( sample . Timestamp . Unix ( ) , 10 ) ,
fmt . Sprintf ( "%v" , sample . Value ) ,
} )
if err := d . Error ( ) ; err != nil {
return & storage . OperatorError {
error : err ,
Continuable : false ,
}
}
}
return nil
}
func main ( ) {
flag . Parse ( )
if storageRoot == nil || * storageRoot == "" {
2013-08-12 08:18:02 -07:00
glog . Fatal ( "Must provide a path..." )
2013-05-21 14:23:02 -07:00
}
persistence , err := metric . NewLevelDBMetricPersistence ( * storageRoot )
if err != nil {
2013-08-12 08:18:02 -07:00
glog . Fatal ( err )
2013-05-21 14:23:02 -07:00
}
defer persistence . Close ( )
2013-06-25 05:02:27 -07:00
dumper := & SamplesDumper {
csv . NewWriter ( os . Stdout ) ,
}
2013-10-22 16:06:49 -07:00
entire , err := persistence . MetricSamples . ForEach ( & metric . MetricSamplesDecoder { } , & metric . AcceptAllFilter { } , dumper )
2013-05-21 14:23:02 -07:00
if err != nil {
2013-08-12 09:22:48 -07:00
glog . Fatal ( "Error dumping samples: " , err )
2013-05-21 14:23:02 -07:00
}
if ! entire {
2013-08-12 08:18:02 -07:00
glog . Fatal ( "Didn't scan entire corpus" )
2013-05-21 14:23:02 -07:00
}
dumper . Flush ( )
if err = dumper . Error ( ) ; err != nil {
2013-08-12 09:22:48 -07:00
glog . Fatal ( "Error flushing CSV: " , err )
2013-05-21 14:23:02 -07:00
}
}