2017-04-10 11:59:45 -07:00
// Copyright 2017 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
2016-12-07 08:30:10 -08:00
package main
import (
2017-12-13 22:46:46 -08:00
"bufio"
2016-12-09 04:41:38 -08:00
"flag"
2016-12-07 08:30:10 -08:00
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"runtime"
"runtime/pprof"
2017-12-18 15:32:39 -08:00
"sort"
2018-05-08 04:35:06 -07:00
"strconv"
2017-12-13 22:46:46 -08:00
"strings"
2016-12-07 08:30:10 -08:00
"sync"
2017-10-19 09:14:37 -07:00
"text/tabwriter"
2016-12-07 08:30:10 -08:00
"time"
2017-09-19 01:20:19 -07:00
"github.com/go-kit/kit/log"
2017-05-18 07:09:30 -07:00
"github.com/pkg/errors"
2017-04-04 02:27:26 -07:00
"github.com/prometheus/tsdb"
2018-12-28 09:06:12 -08:00
"github.com/prometheus/tsdb/chunks"
2017-04-04 02:27:26 -07:00
"github.com/prometheus/tsdb/labels"
2017-09-09 04:11:12 -07:00
"gopkg.in/alecthomas/kingpin.v2"
2016-12-07 08:30:10 -08:00
)
func main ( ) {
2017-09-09 04:11:12 -07:00
var (
cli = kingpin . New ( filepath . Base ( os . Args [ 0 ] ) , "CLI tool for tsdb" )
benchCmd = cli . Command ( "bench" , "run benchmarks" )
benchWriteCmd = benchCmd . Command ( "write" , "run a write performance benchmark" )
2018-10-25 02:32:57 -07:00
benchWriteOutPath = benchWriteCmd . Flag ( "out" , "set the output path" ) . Default ( "benchout" ) . String ( )
2017-09-09 04:11:12 -07:00
benchWriteNumMetrics = benchWriteCmd . Flag ( "metrics" , "number of metrics to read" ) . Default ( "10000" ) . Int ( )
2018-10-25 02:32:57 -07:00
benchSamplesFile = benchWriteCmd . Arg ( "file" , "input file with samples data, default is (" + filepath . Join ( ".." , "testdata" , "20kseries.json" ) + ")" ) . Default ( filepath . Join ( ".." , "testdata" , "20kseries.json" ) ) . String ( )
2017-10-01 13:18:50 -07:00
listCmd = cli . Command ( "ls" , "list db blocks" )
2018-05-08 04:35:06 -07:00
listCmdHumanReadable = listCmd . Flag ( "human-readable" , "print human readable values" ) . Short ( 'h' ) . Bool ( )
2018-10-25 02:32:57 -07:00
listPath = listCmd . Arg ( "db path" , "database path (default is " + filepath . Join ( "benchout" , "storage" ) + ")" ) . Default ( filepath . Join ( "benchout" , "storage" ) ) . String ( )
2018-12-28 09:06:12 -08:00
analyzeCmd = cli . Command ( "analyze" , "analyze churn, label pair cardinality." )
analyzePath = analyzeCmd . Arg ( "db path" , "database path (default is " + filepath . Join ( "benchout" , "storage" ) + ")" ) . Default ( filepath . Join ( "benchout" , "storage" ) ) . String ( )
2019-01-02 08:48:42 -08:00
analyzeBlockID = analyzeCmd . Arg ( "block id" , "block to analyze (default is the last block)" ) . String ( )
2018-12-28 09:06:12 -08:00
analyzeLimit = analyzeCmd . Flag ( "limit" , "how many items to show in each list" ) . Default ( "20" ) . Int ( )
2016-12-07 08:30:10 -08:00
)
2017-09-09 04:11:12 -07:00
switch kingpin . MustParse ( cli . Parse ( os . Args [ 1 : ] ) ) {
case benchWriteCmd . FullCommand ( ) :
wb := & writeBenchmark {
outPath : * benchWriteOutPath ,
numMetrics : * benchWriteNumMetrics ,
samplesFile : * benchSamplesFile ,
}
wb . run ( )
2017-10-01 13:18:50 -07:00
case listCmd . FullCommand ( ) :
db , err := tsdb . Open ( * listPath , nil , nil , nil )
if err != nil {
exitWithError ( err )
}
2018-05-08 04:35:06 -07:00
printBlocks ( db . Blocks ( ) , listCmdHumanReadable )
2018-12-28 09:06:12 -08:00
case analyzeCmd . FullCommand ( ) :
db , err := tsdb . Open ( * analyzePath , nil , nil , nil )
if err != nil {
exitWithError ( err )
}
blocks := db . Blocks ( )
var block * tsdb . Block
2019-01-02 08:48:42 -08:00
if * analyzeBlockID != "" {
2018-12-28 09:06:12 -08:00
for _ , b := range blocks {
2019-01-02 08:48:42 -08:00
if b . Meta ( ) . ULID . String ( ) == * analyzeBlockID {
2018-12-28 09:06:12 -08:00
block = b
break
}
}
} else if len ( blocks ) > 0 {
block = blocks [ len ( blocks ) - 1 ]
}
if block == nil {
exitWithError ( fmt . Errorf ( "Block not found" ) )
}
analyzeBlock ( block , * analyzeLimit )
2016-12-07 08:30:10 -08:00
}
2017-09-09 04:11:12 -07:00
flag . CommandLine . Set ( "log.level" , "debug" )
2016-12-07 08:30:10 -08:00
}
type writeBenchmark struct {
2017-09-09 04:11:12 -07:00
outPath string
samplesFile string
cleanup bool
numMetrics int
2016-12-07 08:30:10 -08:00
2017-02-19 07:04:37 -08:00
storage * tsdb . DB
2016-12-07 08:30:10 -08:00
cpuprof * os . File
memprof * os . File
blockprof * os . File
2017-05-14 02:51:56 -07:00
mtxprof * os . File
2016-12-07 08:30:10 -08:00
}
2017-09-09 04:11:12 -07:00
func ( b * writeBenchmark ) run ( ) {
2016-12-07 08:30:10 -08:00
if b . outPath == "" {
dir , err := ioutil . TempDir ( "" , "tsdb_bench" )
if err != nil {
exitWithError ( err )
}
b . outPath = dir
b . cleanup = true
}
if err := os . RemoveAll ( b . outPath ) ; err != nil {
exitWithError ( err )
}
if err := os . MkdirAll ( b . outPath , 0777 ) ; err != nil {
exitWithError ( err )
}
dir := filepath . Join ( b . outPath , "storage" )
2017-09-19 01:20:19 -07:00
l := log . NewLogfmtLogger ( log . NewSyncWriter ( os . Stderr ) )
l = log . With ( l , "ts" , log . DefaultTimestampUTC , "caller" , log . DefaultCaller )
st , err := tsdb . Open ( dir , l , nil , & tsdb . Options {
2017-07-13 07:15:13 -07:00
RetentionDuration : 15 * 24 * 60 * 60 * 1000 , // 15 days in milliseconds
2017-08-03 09:33:13 -07:00
BlockRanges : tsdb . ExponentialBlockRanges ( 2 * 60 * 60 * 1000 , 5 , 3 ) ,
2017-02-09 17:54:26 -08:00
} )
2016-12-20 15:02:37 -08:00
if err != nil {
exitWithError ( err )
2016-12-07 08:30:10 -08:00
}
2016-12-20 15:02:37 -08:00
b . storage = st
2017-01-16 12:29:53 -08:00
var metrics [ ] labels . Labels
2016-12-07 08:30:10 -08:00
measureTime ( "readData" , func ( ) {
2017-09-09 04:11:12 -07:00
f , err := os . Open ( b . samplesFile )
2016-12-07 08:30:10 -08:00
if err != nil {
exitWithError ( err )
}
defer f . Close ( )
metrics , err = readPrometheusLabels ( f , b . numMetrics )
if err != nil {
exitWithError ( err )
}
} )
2017-02-01 06:29:48 -08:00
var total uint64
dur := measureTime ( "ingestScrapes" , func ( ) {
2016-12-07 08:30:10 -08:00
b . startProfiling ( )
2017-11-03 12:34:21 -07:00
total , err = b . ingestScrapes ( metrics , 3000 )
2017-02-01 06:29:48 -08:00
if err != nil {
2016-12-07 08:30:10 -08:00
exitWithError ( err )
}
} )
2017-02-01 06:29:48 -08:00
fmt . Println ( " > total samples:" , total )
fmt . Println ( " > samples/sec:" , float64 ( total ) / dur . Seconds ( ) )
2016-12-07 08:30:10 -08:00
measureTime ( "stopStorage" , func ( ) {
2016-12-20 15:02:37 -08:00
if err := b . storage . Close ( ) ; err != nil {
2016-12-07 08:30:10 -08:00
exitWithError ( err )
}
2018-09-20 01:33:52 -07:00
if err := b . stopProfiling ( ) ; err != nil {
exitWithError ( err )
}
2016-12-07 08:30:10 -08:00
} )
}
2017-06-07 04:42:53 -07:00
const timeDelta = 30000
2017-02-01 06:29:48 -08:00
func ( b * writeBenchmark ) ingestScrapes ( lbls [ ] labels . Labels , scrapeCount int ) ( uint64 , error ) {
2017-01-10 02:17:37 -08:00
var mu sync . Mutex
var total uint64
2016-12-07 08:30:10 -08:00
2017-02-02 02:09:19 -08:00
for i := 0 ; i < scrapeCount ; i += 100 {
var wg sync . WaitGroup
2017-01-11 03:54:18 -08:00
lbls := lbls
for len ( lbls ) > 0 {
l := 1000
if len ( lbls ) < 1000 {
l = len ( lbls )
2016-12-07 08:30:10 -08:00
}
2017-01-11 03:54:18 -08:00
batch := lbls [ : l ]
lbls = lbls [ l : ]
wg . Add ( 1 )
go func ( ) {
2017-06-07 04:42:53 -07:00
n , err := b . ingestScrapesShard ( batch , 100 , int64 ( timeDelta * i ) )
2017-01-11 03:54:18 -08:00
if err != nil {
// exitWithError(err)
fmt . Println ( " err" , err )
}
mu . Lock ( )
total += n
mu . Unlock ( )
wg . Done ( )
} ( )
}
wg . Wait ( )
2016-12-07 08:30:10 -08:00
}
2017-07-13 07:15:13 -07:00
fmt . Println ( "ingestion completed" )
2017-01-10 02:17:37 -08:00
2017-02-01 06:29:48 -08:00
return total , nil
2016-12-07 08:30:10 -08:00
}
2017-01-11 03:54:18 -08:00
func ( b * writeBenchmark ) ingestScrapesShard ( metrics [ ] labels . Labels , scrapeCount int , baset int64 ) ( uint64 , error ) {
ts := baset
2016-12-07 08:30:10 -08:00
2016-12-08 01:04:24 -08:00
type sample struct {
2016-12-21 00:39:01 -08:00
labels labels . Labels
2016-12-08 01:04:24 -08:00
value int64
2017-09-05 02:45:18 -07:00
ref * uint64
2016-12-08 01:04:24 -08:00
}
2017-01-11 03:54:18 -08:00
scrape := make ( [ ] * sample , 0 , len ( metrics ) )
2016-12-08 01:04:24 -08:00
for _ , m := range metrics {
2017-01-11 03:54:18 -08:00
scrape = append ( scrape , & sample {
labels : m ,
2016-12-08 01:04:24 -08:00
value : 123456789 ,
2017-01-11 03:54:18 -08:00
} )
2016-12-08 01:04:24 -08:00
}
2017-01-10 02:17:37 -08:00
total := uint64 ( 0 )
2016-12-08 01:04:24 -08:00
2016-12-07 08:30:10 -08:00
for i := 0 ; i < scrapeCount ; i ++ {
2017-01-12 10:18:51 -08:00
app := b . storage . Appender ( )
2017-06-07 04:42:53 -07:00
ts += timeDelta
2016-12-07 08:30:10 -08:00
2016-12-08 01:04:24 -08:00
for _ , s := range scrape {
2016-12-09 01:00:14 -08:00
s . value += 1000
2017-01-12 10:18:51 -08:00
if s . ref == nil {
2017-02-01 06:29:48 -08:00
ref , err := app . Add ( s . labels , ts , float64 ( s . value ) )
2017-01-12 10:18:51 -08:00
if err != nil {
panic ( err )
}
s . ref = & ref
2017-02-01 06:29:48 -08:00
} else if err := app . AddFast ( * s . ref , ts , float64 ( s . value ) ) ; err != nil {
2017-02-02 02:09:19 -08:00
2017-05-18 07:09:30 -07:00
if errors . Cause ( err ) != tsdb . ErrNotFound {
2017-01-12 10:18:51 -08:00
panic ( err )
}
2017-02-01 06:29:48 -08:00
ref , err := app . Add ( s . labels , ts , float64 ( s . value ) )
2017-01-12 10:18:51 -08:00
if err != nil {
panic ( err )
}
s . ref = & ref
}
2017-01-10 02:17:37 -08:00
total ++
2016-12-07 08:30:10 -08:00
}
2016-12-20 15:02:37 -08:00
if err := app . Commit ( ) ; err != nil {
2017-01-10 02:17:37 -08:00
return total , err
2016-12-07 08:30:10 -08:00
}
}
2017-01-10 02:17:37 -08:00
return total , nil
2016-12-07 08:30:10 -08:00
}
func ( b * writeBenchmark ) startProfiling ( ) {
var err error
// Start CPU profiling.
b . cpuprof , err = os . Create ( filepath . Join ( b . outPath , "cpu.prof" ) )
if err != nil {
2017-03-19 09:05:01 -07:00
exitWithError ( fmt . Errorf ( "bench: could not create cpu profile: %v" , err ) )
2016-12-07 08:30:10 -08:00
}
2018-09-20 01:33:52 -07:00
if err := pprof . StartCPUProfile ( b . cpuprof ) ; err != nil {
exitWithError ( fmt . Errorf ( "bench: could not start CPU profile: %v" , err ) )
}
2016-12-07 08:30:10 -08:00
// Start memory profiling.
b . memprof , err = os . Create ( filepath . Join ( b . outPath , "mem.prof" ) )
if err != nil {
2017-03-19 09:05:01 -07:00
exitWithError ( fmt . Errorf ( "bench: could not create memory profile: %v" , err ) )
2016-12-07 08:30:10 -08:00
}
2017-05-14 02:51:56 -07:00
runtime . MemProfileRate = 64 * 1024
2016-12-07 08:30:10 -08:00
// Start fatal profiling.
b . blockprof , err = os . Create ( filepath . Join ( b . outPath , "block.prof" ) )
if err != nil {
2017-03-19 09:05:01 -07:00
exitWithError ( fmt . Errorf ( "bench: could not create block profile: %v" , err ) )
2016-12-07 08:30:10 -08:00
}
2017-05-14 02:51:56 -07:00
runtime . SetBlockProfileRate ( 20 )
b . mtxprof , err = os . Create ( filepath . Join ( b . outPath , "mutex.prof" ) )
if err != nil {
exitWithError ( fmt . Errorf ( "bench: could not create mutex profile: %v" , err ) )
}
runtime . SetMutexProfileFraction ( 20 )
2016-12-07 08:30:10 -08:00
}
2018-09-20 01:33:52 -07:00
func ( b * writeBenchmark ) stopProfiling ( ) error {
2016-12-07 08:30:10 -08:00
if b . cpuprof != nil {
pprof . StopCPUProfile ( )
b . cpuprof . Close ( )
b . cpuprof = nil
}
if b . memprof != nil {
2018-09-20 01:33:52 -07:00
if err := pprof . Lookup ( "heap" ) . WriteTo ( b . memprof , 0 ) ; err != nil {
return fmt . Errorf ( "error writing mem profile: %v" , err )
}
2016-12-07 08:30:10 -08:00
b . memprof . Close ( )
b . memprof = nil
}
if b . blockprof != nil {
2018-09-20 01:33:52 -07:00
if err := pprof . Lookup ( "block" ) . WriteTo ( b . blockprof , 0 ) ; err != nil {
return fmt . Errorf ( "error writing block profile: %v" , err )
}
2016-12-07 08:30:10 -08:00
b . blockprof . Close ( )
b . blockprof = nil
runtime . SetBlockProfileRate ( 0 )
}
2017-05-14 02:51:56 -07:00
if b . mtxprof != nil {
2018-09-20 01:33:52 -07:00
if err := pprof . Lookup ( "mutex" ) . WriteTo ( b . mtxprof , 0 ) ; err != nil {
return fmt . Errorf ( "error writing mutex profile: %v" , err )
}
2017-05-14 02:51:56 -07:00
b . mtxprof . Close ( )
b . mtxprof = nil
runtime . SetMutexProfileFraction ( 0 )
}
2018-09-20 01:33:52 -07:00
return nil
2016-12-07 08:30:10 -08:00
}
2017-02-01 06:29:48 -08:00
func measureTime ( stage string , f func ( ) ) time . Duration {
2016-12-07 08:30:10 -08:00
fmt . Printf ( ">> start stage=%s\n" , stage )
start := time . Now ( )
f ( )
fmt . Printf ( ">> completed stage=%s duration=%s\n" , stage , time . Since ( start ) )
2017-02-01 06:29:48 -08:00
return time . Since ( start )
2016-12-07 08:30:10 -08:00
}
2017-12-13 22:46:46 -08:00
func mapToLabels ( m map [ string ] interface { } , l * labels . Labels ) {
for k , v := range m {
* l = append ( * l , labels . Label { Name : k , Value : v . ( string ) } )
2017-01-16 12:29:53 -08:00
}
2017-12-13 22:46:46 -08:00
}
func readPrometheusLabels ( r io . Reader , n int ) ( [ ] labels . Labels , error ) {
scanner := bufio . NewScanner ( r )
2016-12-07 08:30:10 -08:00
2017-01-16 12:29:53 -08:00
var mets [ ] labels . Labels
hashes := map [ uint64 ] struct { } { }
2017-12-13 22:46:46 -08:00
i := 0
2016-12-07 08:30:10 -08:00
2017-12-13 22:46:46 -08:00
for scanner . Scan ( ) && i < n {
2017-01-16 12:29:53 -08:00
m := make ( labels . Labels , 0 , 10 )
2017-12-13 22:46:46 -08:00
r := strings . NewReplacer ( "\"" , "" , "{" , "" , "}" , "" )
s := r . Replace ( scanner . Text ( ) )
labelChunks := strings . Split ( s , "," )
for _ , labelChunk := range labelChunks {
split := strings . Split ( labelChunk , ":" )
m = append ( m , labels . Label { Name : split [ 0 ] , Value : split [ 1 ] } )
}
2017-12-18 15:32:39 -08:00
// Order of the k/v labels matters, don't assume we'll always receive them already sorted.
sort . Sort ( m )
2017-01-16 12:29:53 -08:00
h := m . Hash ( )
if _ , ok := hashes [ h ] ; ok {
continue
2016-12-07 08:30:10 -08:00
}
2017-01-16 12:29:53 -08:00
mets = append ( mets , m )
hashes [ h ] = struct { } { }
i ++
2016-12-07 08:30:10 -08:00
}
2017-12-13 22:46:46 -08:00
return mets , nil
2016-12-07 08:30:10 -08:00
}
func exitWithError ( err error ) {
fmt . Fprintln ( os . Stderr , err )
os . Exit ( 1 )
}
2017-10-02 13:48:47 -07:00
2018-05-08 04:35:06 -07:00
func printBlocks ( blocks [ ] * tsdb . Block , humanReadable * bool ) {
2017-10-19 09:14:37 -07:00
tw := tabwriter . NewWriter ( os . Stdout , 0 , 0 , 2 , ' ' , 0 )
2017-10-02 13:48:47 -07:00
defer tw . Flush ( )
fmt . Fprintln ( tw , "BLOCK ULID\tMIN TIME\tMAX TIME\tNUM SAMPLES\tNUM CHUNKS\tNUM SERIES" )
for _ , b := range blocks {
2017-10-11 02:02:57 -07:00
meta := b . Meta ( )
2017-10-02 13:48:47 -07:00
fmt . Fprintf ( tw ,
"%v\t%v\t%v\t%v\t%v\t%v\n" ,
2017-10-11 02:02:57 -07:00
meta . ULID ,
2018-05-08 04:35:06 -07:00
getFormatedTime ( meta . MinTime , humanReadable ) ,
getFormatedTime ( meta . MaxTime , humanReadable ) ,
2017-10-11 02:02:57 -07:00
meta . Stats . NumSamples ,
meta . Stats . NumChunks ,
meta . Stats . NumSeries ,
2017-10-02 13:48:47 -07:00
)
}
}
2018-05-08 04:35:06 -07:00
func getFormatedTime ( timestamp int64 , humanReadable * bool ) string {
if * humanReadable {
return time . Unix ( timestamp / 1000 , 0 ) . String ( )
}
return strconv . FormatInt ( timestamp , 10 )
}
2018-12-28 09:06:12 -08:00
func analyzeBlock ( b * tsdb . Block , limit int ) {
fmt . Printf ( "Block path: %s\n" , b . Dir ( ) )
meta := b . Meta ( )
// Presume 1ms resolution that Prometheus uses.
fmt . Printf ( "Duration: %s\n" , ( time . Duration ( meta . MaxTime - meta . MinTime ) * 1e6 ) . String ( ) )
fmt . Printf ( "Series: %d\n" , meta . Stats . NumSeries )
ir , err := b . Index ( )
if err != nil {
exitWithError ( err )
}
defer ir . Close ( )
allLabelNames , err := ir . LabelNames ( )
if err != nil {
exitWithError ( err )
}
fmt . Printf ( "Label names: %d\n" , len ( allLabelNames ) )
type postingInfo struct {
key string
metric uint64
}
postingInfos := [ ] postingInfo { }
printInfo := func ( postingInfos [ ] postingInfo ) {
sort . Slice ( postingInfos , func ( i , j int ) bool { return postingInfos [ i ] . metric > postingInfos [ j ] . metric } )
for i , pc := range postingInfos {
fmt . Printf ( "%d %s\n" , pc . metric , pc . key )
if i >= limit {
break
}
}
}
labelsUncovered := map [ string ] uint64 { }
labelpairsUncovered := map [ string ] uint64 { }
labelpairsCount := map [ string ] uint64 { }
entries := 0
p , err := ir . Postings ( "" , "" ) // The special all key.
if err != nil {
exitWithError ( err )
}
lbls := labels . Labels { }
chks := [ ] chunks . Meta { }
for p . Next ( ) {
2019-01-02 08:48:42 -08:00
if err = ir . Series ( p . At ( ) , & lbls , & chks ) ; err != nil {
exitWithError ( err )
}
2018-12-28 09:06:12 -08:00
// Amount of the block time range not covered by this series.
uncovered := uint64 ( meta . MaxTime - meta . MinTime ) - uint64 ( chks [ len ( chks ) - 1 ] . MaxTime - chks [ 0 ] . MinTime )
for _ , lbl := range lbls {
key := lbl . Name + "=" + lbl . Value
labelsUncovered [ lbl . Name ] += uncovered
labelpairsUncovered [ key ] += uncovered
2019-01-02 08:48:42 -08:00
labelpairsCount [ key ] ++
entries ++
2018-12-28 09:06:12 -08:00
}
}
if p . Err ( ) != nil {
exitWithError ( p . Err ( ) )
}
fmt . Printf ( "Postings (unique label pairs): %d\n" , len ( labelpairsUncovered ) )
fmt . Printf ( "Postings entries (total label pairs): %d\n" , entries )
postingInfos = postingInfos [ : 0 ]
for k , m := range labelpairsUncovered {
postingInfos = append ( postingInfos , postingInfo { k , uint64 ( float64 ( m ) / float64 ( meta . MaxTime - meta . MinTime ) ) } )
}
fmt . Printf ( "\nLabel pairs most involved in churning:\n" )
printInfo ( postingInfos )
postingInfos = postingInfos [ : 0 ]
for k , m := range labelsUncovered {
postingInfos = append ( postingInfos , postingInfo { k , uint64 ( float64 ( m ) / float64 ( meta . MaxTime - meta . MinTime ) ) } )
}
fmt . Printf ( "\nLabel names most involved in churning:\n" )
printInfo ( postingInfos )
postingInfos = postingInfos [ : 0 ]
for k , m := range labelpairsCount {
postingInfos = append ( postingInfos , postingInfo { k , m } )
}
fmt . Printf ( "\nMost common label pairs:\n" )
printInfo ( postingInfos )
postingInfos = postingInfos [ : 0 ]
for _ , n := range allLabelNames {
lv , err := ir . LabelValues ( n )
if err != nil {
exitWithError ( err )
}
postingInfos = append ( postingInfos , postingInfo { n , uint64 ( lv . Len ( ) ) } )
}
fmt . Printf ( "\nHighest cardinality labels:\n" )
printInfo ( postingInfos )
postingInfos = postingInfos [ : 0 ]
lv , err := ir . LabelValues ( "__name__" )
if err != nil {
exitWithError ( err )
}
for i := 0 ; i < lv . Len ( ) ; i ++ {
names , err := lv . At ( i )
if err != nil {
exitWithError ( err )
}
for _ , n := range names {
postings , err := ir . Postings ( "__name__" , n )
if err != nil {
exitWithError ( err )
}
count := 0
for postings . Next ( ) {
count ++
}
if postings . Err ( ) != nil {
exitWithError ( postings . Err ( ) )
}
postingInfos = append ( postingInfos , postingInfo { n , uint64 ( count ) } )
}
}
fmt . Printf ( "\nHighest cardinality metric names:\n" )
printInfo ( postingInfos )
}