Rename shard to partition

This commit is contained in:
Fabian Reinartz 2017-01-06 08:08:02 +01:00
parent 9790aa98ac
commit 4590b61343
4 changed files with 103 additions and 103 deletions

View file

@ -31,31 +31,31 @@ type compactorMetrics struct {
} }
func newCompactorMetrics(i int) *compactorMetrics { func newCompactorMetrics(i int) *compactorMetrics {
shardLabel := prometheus.Labels{ partitionLabel := prometheus.Labels{
"shard": fmt.Sprintf("%d", i), "partition": fmt.Sprintf("%d", i),
} }
m := &compactorMetrics{} m := &compactorMetrics{}
m.triggered = prometheus.NewCounter(prometheus.CounterOpts{ m.triggered = prometheus.NewCounter(prometheus.CounterOpts{
Name: "tsdb_shard_compactions_triggered_total", Name: "tsdb_partition_compactions_triggered_total",
Help: "Total number of triggered compactions for the shard.", Help: "Total number of triggered compactions for the partition.",
ConstLabels: shardLabel, ConstLabels: partitionLabel,
}) })
m.ran = prometheus.NewCounter(prometheus.CounterOpts{ m.ran = prometheus.NewCounter(prometheus.CounterOpts{
Name: "tsdb_shard_compactions_total", Name: "tsdb_partition_compactions_total",
Help: "Total number of compactions that were executed for the shard.", Help: "Total number of compactions that were executed for the partition.",
ConstLabels: shardLabel, ConstLabels: partitionLabel,
}) })
m.failed = prometheus.NewCounter(prometheus.CounterOpts{ m.failed = prometheus.NewCounter(prometheus.CounterOpts{
Name: "tsdb_shard_compactions_failed_total", Name: "tsdb_partition_compactions_failed_total",
Help: "Total number of compactions that failed for the shard.", Help: "Total number of compactions that failed for the partition.",
ConstLabels: shardLabel, ConstLabels: partitionLabel,
}) })
m.duration = prometheus.NewHistogram(prometheus.HistogramOpts{ m.duration = prometheus.NewHistogram(prometheus.HistogramOpts{
Name: "tsdb_shard_compaction_duration", Name: "tsdb_partition_compaction_duration",
Help: "Duration of compaction runs.", Help: "Duration of compaction runs.",
ConstLabels: shardLabel, ConstLabels: partitionLabel,
}) })
return m return m

110
db.go
View file

@ -41,14 +41,14 @@ type DB struct {
opts *Options opts *Options
path string path string
shards []*Shard partitions []*Partition
} }
// TODO(fabxc): make configurable // TODO(fabxc): make configurable
const ( const (
shardShift = 0 partitionShift = 0
numShards = 1 << shardShift numPartitions = 1 << partitionShift
maxChunkSize = 1024 maxChunkSize = 1024
) )
// Open or create a new DB. // Open or create a new DB.
@ -70,25 +70,25 @@ func Open(path string, l log.Logger, opts *Options) (*DB, error) {
path: path, path: path,
} }
// Initialize vertical shards. // Initialize vertical partitions.
// TODO(fabxc): validate shard number to be power of 2, which is required // TODO(fabxc): validate partition number to be power of 2, which is required
// for the bitshift-modulo when finding the right shard. // for the bitshift-modulo when finding the right partition.
for i := 0; i < numShards; i++ { for i := 0; i < numPartitions; i++ {
l := log.NewContext(l).With("shard", i) l := log.NewContext(l).With("partition", i)
d := shardDir(path, i) d := partitionDir(path, i)
s, err := OpenShard(d, i, l) s, err := OpenPartition(d, i, l)
if err != nil { if err != nil {
return nil, fmt.Errorf("initializing shard %q failed: %s", d, err) return nil, fmt.Errorf("initializing partition %q failed: %s", d, err)
} }
c.shards = append(c.shards, s) c.partitions = append(c.partitions, s)
} }
return c, nil return c, nil
} }
func shardDir(base string, i int) string { func partitionDir(base string, i int) string {
return filepath.Join(base, strconv.Itoa(i)) return filepath.Join(base, strconv.Itoa(i))
} }
@ -96,8 +96,8 @@ func shardDir(base string, i int) string {
func (db *DB) Close() error { func (db *DB) Close() error {
var g errgroup.Group var g errgroup.Group
for _, shard := range db.shards { for _, partition := range db.partitions {
g.Go(shard.Close) g.Go(partition.Close)
} }
return g.Wait() return g.Wait()
@ -122,7 +122,7 @@ type Appender interface {
func (db *DB) Appender() Appender { func (db *DB) Appender() Appender {
return &bucketAppender{ return &bucketAppender{
db: db, db: db,
buckets: make([][]hashedSample, numShards), buckets: make([][]hashedSample, numPartitions),
} }
} }
@ -133,7 +133,7 @@ type bucketAppender struct {
func (ba *bucketAppender) Add(lset labels.Labels, t int64, v float64) error { func (ba *bucketAppender) Add(lset labels.Labels, t int64, v float64) error {
h := lset.Hash() h := lset.Hash()
s := h >> (64 - shardShift) s := h >> (64 - partitionShift)
ba.buckets[s] = append(ba.buckets[s], hashedSample{ ba.buckets[s] = append(ba.buckets[s], hashedSample{
hash: h, hash: h,
@ -156,9 +156,9 @@ func (ba *bucketAppender) Commit() error {
var merr MultiError var merr MultiError
// Spill buckets into shards. // Spill buckets into partitions.
for s, b := range ba.buckets { for s, b := range ba.buckets {
merr.Add(ba.db.shards[s].appendBatch(b)) merr.Add(ba.db.partitions[s].appendBatch(b))
} }
return merr.Err() return merr.Err()
} }
@ -174,12 +174,12 @@ type hashedSample struct {
const sep = '\xff' const sep = '\xff'
// Shard handles reads and writes of time series falling into // Partition handles reads and writes of time series falling into
// a hashed shard of a series. // a hashed partition of a series.
type Shard struct { type Partition struct {
path string path string
logger log.Logger logger log.Logger
metrics *shardMetrics metrics *partitionMetrics
mtx sync.RWMutex mtx sync.RWMutex
persisted []*persistedBlock persisted []*persistedBlock
@ -190,34 +190,34 @@ type Shard struct {
cutc chan struct{} cutc chan struct{}
} }
type shardMetrics struct { type partitionMetrics struct {
persistences prometheus.Counter persistences prometheus.Counter
persistenceDuration prometheus.Histogram persistenceDuration prometheus.Histogram
samplesAppended prometheus.Counter samplesAppended prometheus.Counter
} }
func newShardMetrics(r prometheus.Registerer, i int) *shardMetrics { func newPartitionMetrics(r prometheus.Registerer, i int) *partitionMetrics {
shardLabel := prometheus.Labels{ partitionLabel := prometheus.Labels{
"shard": fmt.Sprintf("%d", i), "partition": fmt.Sprintf("%d", i),
} }
m := &shardMetrics{} m := &partitionMetrics{}
m.persistences = prometheus.NewCounter(prometheus.CounterOpts{ m.persistences = prometheus.NewCounter(prometheus.CounterOpts{
Name: "tsdb_shard_persistences_total", Name: "tsdb_partition_persistences_total",
Help: "Total number of head persistances that ran so far.", Help: "Total number of head persistances that ran so far.",
ConstLabels: shardLabel, ConstLabels: partitionLabel,
}) })
m.persistenceDuration = prometheus.NewHistogram(prometheus.HistogramOpts{ m.persistenceDuration = prometheus.NewHistogram(prometheus.HistogramOpts{
Name: "tsdb_shard_persistence_duration_seconds", Name: "tsdb_partition_persistence_duration_seconds",
Help: "Duration of persistences in seconds.", Help: "Duration of persistences in seconds.",
ConstLabels: shardLabel, ConstLabels: partitionLabel,
Buckets: prometheus.ExponentialBuckets(0.25, 2, 5), Buckets: prometheus.ExponentialBuckets(0.25, 2, 5),
}) })
m.samplesAppended = prometheus.NewCounter(prometheus.CounterOpts{ m.samplesAppended = prometheus.NewCounter(prometheus.CounterOpts{
Name: "tsdb_shard_samples_appended_total", Name: "tsdb_partition_samples_appended_total",
Help: "Total number of appended samples for the shard.", Help: "Total number of appended samples for the partition.",
ConstLabels: shardLabel, ConstLabels: partitionLabel,
}) })
if r != nil { if r != nil {
@ -230,9 +230,9 @@ func newShardMetrics(r prometheus.Registerer, i int) *shardMetrics {
return m return m
} }
// OpenShard returns a new Shard. // OpenPartition returns a new Partition.
func OpenShard(path string, i int, logger log.Logger) (*Shard, error) { func OpenPartition(path string, i int, logger log.Logger) (*Partition, error) {
// Create directory if shard is new. // Create directory if partition is new.
if _, err := os.Stat(path); os.IsNotExist(err) { if _, err := os.Stat(path); os.IsNotExist(err) {
if err := os.MkdirAll(path, 0777); err != nil { if err := os.MkdirAll(path, 0777); err != nil {
return nil, err return nil, err
@ -258,10 +258,10 @@ func OpenShard(path string, i int, logger log.Logger) (*Shard, error) {
heads = []*HeadBlock{head} heads = []*HeadBlock{head}
} }
s := &Shard{ s := &Partition{
path: path, path: path,
logger: logger, logger: logger,
metrics: newShardMetrics(nil, i), metrics: newPartitionMetrics(nil, i),
heads: heads, heads: heads,
persisted: persisted, persisted: persisted,
cutc: make(chan struct{}, 1), cutc: make(chan struct{}, 1),
@ -275,7 +275,7 @@ func OpenShard(path string, i int, logger log.Logger) (*Shard, error) {
return s, nil return s, nil
} }
func (s *Shard) run() { func (s *Partition) run() {
for range s.cutc { for range s.cutc {
// if err := s.cut(); err != nil { // if err := s.cut(); err != nil {
// s.logger.Log("msg", "cut error", "err", err) // s.logger.Log("msg", "cut error", "err", err)
@ -296,8 +296,8 @@ func (s *Shard) run() {
close(s.donec) close(s.donec)
} }
// Close the shard. // Close the partition.
func (s *Shard) Close() error { func (s *Partition) Close() error {
close(s.cutc) close(s.cutc)
<-s.donec <-s.donec
@ -317,7 +317,7 @@ func (s *Shard) Close() error {
return merr.Err() return merr.Err()
} }
func (s *Shard) appendBatch(samples []hashedSample) error { func (s *Partition) appendBatch(samples []hashedSample) error {
if len(samples) == 0 { if len(samples) == 0 {
return nil return nil
} }
@ -344,11 +344,11 @@ func (s *Shard) appendBatch(samples []hashedSample) error {
return err return err
} }
func (s *Shard) lock() sync.Locker { func (s *Partition) lock() sync.Locker {
return &s.mtx return &s.mtx
} }
func (s *Shard) headForDir(dir string) (int, bool) { func (s *Partition) headForDir(dir string) (int, bool) {
for i, b := range s.heads { for i, b := range s.heads {
if b.dir() == dir { if b.dir() == dir {
return i, true return i, true
@ -357,7 +357,7 @@ func (s *Shard) headForDir(dir string) (int, bool) {
return -1, false return -1, false
} }
func (s *Shard) persistedForDir(dir string) (int, bool) { func (s *Partition) persistedForDir(dir string) (int, bool) {
for i, b := range s.persisted { for i, b := range s.persisted {
if b.dir() == dir { if b.dir() == dir {
return i, true return i, true
@ -366,7 +366,7 @@ func (s *Shard) persistedForDir(dir string) (int, bool) {
return -1, false return -1, false
} }
func (s *Shard) reinit(dir string) error { func (s *Partition) reinit(dir string) error {
if !fileutil.Exist(dir) { if !fileutil.Exist(dir) {
if i, ok := s.headForDir(dir); ok { if i, ok := s.headForDir(dir); ok {
if err := s.heads[i].Close(); err != nil { if err := s.heads[i].Close(); err != nil {
@ -410,7 +410,7 @@ func (s *Shard) reinit(dir string) error {
return nil return nil
} }
func (s *Shard) compactable() []block { func (s *Partition) compactable() []block {
var blocks []block var blocks []block
for _, pb := range s.persisted { for _, pb := range s.persisted {
blocks = append([]block{pb}, blocks...) blocks = append([]block{pb}, blocks...)
@ -444,9 +444,9 @@ func intervalContains(min, max, t int64) bool {
return t >= min && t <= max return t >= min && t <= max
} }
// blocksForRange returns all blocks within the shard that may contain // blocksForRange returns all blocks within the partition that may contain
// data for the given time range. // data for the given time range.
func (s *Shard) blocksForInterval(mint, maxt int64) []block { func (s *Partition) blocksForInterval(mint, maxt int64) []block {
var bs []block var bs []block
for _, b := range s.persisted { for _, b := range s.persisted {
@ -472,7 +472,7 @@ const headGracePeriod = 60 * 1000 // 60 seconds for millisecond scale
// cut starts a new head block to append to. The completed head block // cut starts a new head block to append to. The completed head block
// will still be appendable for the configured grace period. // will still be appendable for the configured grace period.
func (s *Shard) cut() error { func (s *Partition) cut() error {
// Set new head block. // Set new head block.
head := s.heads[len(s.heads)-1] head := s.heads[len(s.heads)-1]
@ -487,7 +487,7 @@ func (s *Shard) cut() error {
return nil return nil
} }
// func (s *Shard) persist() error { // func (s *Partition) persist() error {
// s.mtx.Lock() // s.mtx.Lock()
// // Set new head block. // // Set new head block.
@ -501,7 +501,7 @@ func (s *Shard) cut() error {
// s.mtx.Unlock() // s.mtx.Unlock()
// // TODO(fabxc): add grace period where we can still append to old head shard // // TODO(fabxc): add grace period where we can still append to old head partition
// // before actually persisting it. // // before actually persisting it.
// dir := filepath.Join(s.path, fmt.Sprintf("%d", head.stats.MinTime)) // dir := filepath.Join(s.path, fmt.Sprintf("%d", head.stats.MinTime))

View file

@ -35,10 +35,10 @@ type Series interface {
Iterator() SeriesIterator Iterator() SeriesIterator
} }
// querier merges query results from a set of shard querieres. // querier merges query results from a set of partition querieres.
type querier struct { type querier struct {
mint, maxt int64 mint, maxt int64
shards []Querier partitions []Querier
} }
// Querier returns a new querier over the database for the given // Querier returns a new querier over the database for the given
@ -48,19 +48,19 @@ func (db *DB) Querier(mint, maxt int64) Querier {
mint: mint, mint: mint,
maxt: maxt, maxt: maxt,
} }
for _, s := range db.shards { for _, s := range db.partitions {
q.shards = append(q.shards, s.Querier(mint, maxt)) q.partitions = append(q.partitions, s.Querier(mint, maxt))
} }
return q return q
} }
func (q *querier) Select(ms ...labels.Matcher) SeriesSet { func (q *querier) Select(ms ...labels.Matcher) SeriesSet {
// We gather the non-overlapping series from every shard and simply // We gather the non-overlapping series from every partition and simply
// return their union. // return their union.
r := &mergedSeriesSet{} r := &mergedSeriesSet{}
for _, s := range q.shards { for _, s := range q.partitions {
r.sets = append(r.sets, s.Select(ms...)) r.sets = append(r.sets, s.Select(ms...))
} }
if len(r.sets) == 0 { if len(r.sets) == 0 {
@ -70,11 +70,11 @@ func (q *querier) Select(ms ...labels.Matcher) SeriesSet {
} }
func (q *querier) LabelValues(n string) ([]string, error) { func (q *querier) LabelValues(n string) ([]string, error) {
res, err := q.shards[0].LabelValues(n) res, err := q.partitions[0].LabelValues(n)
if err != nil { if err != nil {
return nil, err return nil, err
} }
for _, sq := range q.shards[1:] { for _, sq := range q.partitions[1:] {
pr, err := sq.LabelValues(n) pr, err := sq.LabelValues(n)
if err != nil { if err != nil {
return nil, err return nil, err
@ -120,29 +120,29 @@ func (q *querier) LabelValuesFor(string, labels.Label) ([]string, error) {
func (q *querier) Close() error { func (q *querier) Close() error {
var merr MultiError var merr MultiError
for _, sq := range q.shards { for _, sq := range q.partitions {
merr.Add(sq.Close()) merr.Add(sq.Close())
} }
return merr.Err() return merr.Err()
} }
// shardQuerier aggregates querying results from time blocks within // partitionQuerier aggregates querying results from time blocks within
// a single shard. // a single partition.
type shardQuerier struct { type partitionQuerier struct {
shard *Shard partition *Partition
blocks []Querier blocks []Querier
} }
// Querier returns a new querier over the data shard for the given // Querier returns a new querier over the data partition for the given
// time range. // time range.
func (s *Shard) Querier(mint, maxt int64) Querier { func (s *Partition) Querier(mint, maxt int64) Querier {
s.mtx.RLock() s.mtx.RLock()
blocks := s.blocksForInterval(mint, maxt) blocks := s.blocksForInterval(mint, maxt)
sq := &shardQuerier{ sq := &partitionQuerier{
blocks: make([]Querier, 0, len(blocks)), blocks: make([]Querier, 0, len(blocks)),
shard: s, partition: s,
} }
for _, b := range blocks { for _, b := range blocks {
@ -163,7 +163,7 @@ func (s *Shard) Querier(mint, maxt int64) Querier {
return sq return sq
} }
func (q *shardQuerier) LabelValues(n string) ([]string, error) { func (q *partitionQuerier) LabelValues(n string) ([]string, error) {
res, err := q.blocks[0].LabelValues(n) res, err := q.blocks[0].LabelValues(n)
if err != nil { if err != nil {
return nil, err return nil, err
@ -179,11 +179,11 @@ func (q *shardQuerier) LabelValues(n string) ([]string, error) {
return res, nil return res, nil
} }
func (q *shardQuerier) LabelValuesFor(string, labels.Label) ([]string, error) { func (q *partitionQuerier) LabelValuesFor(string, labels.Label) ([]string, error) {
return nil, fmt.Errorf("not implemented") return nil, fmt.Errorf("not implemented")
} }
func (q *shardQuerier) Select(ms ...labels.Matcher) SeriesSet { func (q *partitionQuerier) Select(ms ...labels.Matcher) SeriesSet {
// Sets from different blocks have no time overlap. The reference numbers // Sets from different blocks have no time overlap. The reference numbers
// they emit point to series sorted in lexicographic order. // they emit point to series sorted in lexicographic order.
// We can fully connect partial series by simply comparing with the previous // We can fully connect partial series by simply comparing with the previous
@ -194,18 +194,18 @@ func (q *shardQuerier) Select(ms ...labels.Matcher) SeriesSet {
r := q.blocks[0].Select(ms...) r := q.blocks[0].Select(ms...)
for _, s := range q.blocks[1:] { for _, s := range q.blocks[1:] {
r = newShardSeriesSet(r, s.Select(ms...)) r = newPartitionSeriesSet(r, s.Select(ms...))
} }
return r return r
} }
func (q *shardQuerier) Close() error { func (q *partitionQuerier) Close() error {
var merr MultiError var merr MultiError
for _, bq := range q.blocks { for _, bq := range q.blocks {
merr.Add(bq.Close()) merr.Add(bq.Close())
} }
q.shard.mtx.RUnlock() q.partition.mtx.RUnlock()
return merr.Err() return merr.Err()
} }
@ -359,15 +359,15 @@ func (s *mergedSeriesSet) Next() bool {
return s.Next() return s.Next()
} }
type shardSeriesSet struct { type partitionSeriesSet struct {
a, b SeriesSet a, b SeriesSet
cur Series cur Series
adone, bdone bool adone, bdone bool
} }
func newShardSeriesSet(a, b SeriesSet) *shardSeriesSet { func newPartitionSeriesSet(a, b SeriesSet) *partitionSeriesSet {
s := &shardSeriesSet{a: a, b: b} s := &partitionSeriesSet{a: a, b: b}
// Initialize first elements of both sets as Next() needs // Initialize first elements of both sets as Next() needs
// one element look-ahead. // one element look-ahead.
s.adone = !s.a.Next() s.adone = !s.a.Next()
@ -376,18 +376,18 @@ func newShardSeriesSet(a, b SeriesSet) *shardSeriesSet {
return s return s
} }
func (s *shardSeriesSet) At() Series { func (s *partitionSeriesSet) At() Series {
return s.cur return s.cur
} }
func (s *shardSeriesSet) Err() error { func (s *partitionSeriesSet) Err() error {
if s.a.Err() != nil { if s.a.Err() != nil {
return s.a.Err() return s.a.Err()
} }
return s.b.Err() return s.b.Err()
} }
func (s *shardSeriesSet) compare() int { func (s *partitionSeriesSet) compare() int {
if s.adone { if s.adone {
return 1 return 1
} }
@ -397,7 +397,7 @@ func (s *shardSeriesSet) compare() int {
return labels.Compare(s.a.At().Labels(), s.b.At().Labels()) return labels.Compare(s.a.At().Labels(), s.b.At().Labels())
} }
func (s *shardSeriesSet) Next() bool { func (s *partitionSeriesSet) Next() bool {
if s.adone && s.bdone || s.Err() != nil { if s.adone && s.bdone || s.Err() != nil {
return false return false
} }

View file

@ -65,7 +65,7 @@ func (it *listSeriesIterator) Err() error {
return nil return nil
} }
func TestShardSeriesSet(t *testing.T) { func TestPartitionSeriesSet(t *testing.T) {
newSeries := func(l map[string]string, s []sample) Series { newSeries := func(l map[string]string, s []sample) Series {
return &mockSeries{ return &mockSeries{
labels: func() labels.Labels { return labels.FromMap(l) }, labels: func() labels.Labels { return labels.FromMap(l) },
@ -77,7 +77,7 @@ func TestShardSeriesSet(t *testing.T) {
// The input sets in order (samples in series in b are strictly // The input sets in order (samples in series in b are strictly
// after those in a). // after those in a).
a, b SeriesSet a, b SeriesSet
// The composition of a and b in the shard series set must yield // The composition of a and b in the partition series set must yield
// results equivalent to the result series set. // results equivalent to the result series set.
exp SeriesSet exp SeriesSet
}{ }{
@ -170,7 +170,7 @@ func TestShardSeriesSet(t *testing.T) {
Outer: Outer:
for _, c := range cases { for _, c := range cases {
res := newShardSeriesSet(c.a, c.b) res := newPartitionSeriesSet(c.a, c.b)
for { for {
eok, rok := c.exp.Next(), res.Next() eok, rok := c.exp.Next(), res.Next()