2017-04-10 11:59:45 -07:00
|
|
|
// Copyright 2017 The Prometheus Authors
|
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
// you may not use this file except in compliance with the License.
|
|
|
|
// You may obtain a copy of the License at
|
|
|
|
//
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
//
|
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
// limitations under the License.
|
|
|
|
|
2017-11-30 06:34:49 -08:00
|
|
|
package index
|
2016-12-09 11:45:46 -08:00
|
|
|
|
|
|
|
import (
|
2017-01-19 22:58:19 -08:00
|
|
|
"bufio"
|
Write label indices based on the posting offset table.
This avoids having to build it up in RAM, and means that all variable
memory usage for compactions is now 0.25 bytes per symbol plus a few
O(labelnames) structures. So in practice, pretty close to constant
memory for compactions.
benchmark old ns/op new ns/op delta
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 662974828 667162981 +0.63%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 2459590377 2131168138 -13.35%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 3808280548 3919290378 +2.91%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 8513884311 8738099339 +2.63%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 1898843003 1944131966 +2.39%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 5601478437 6031391658 +7.67%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 11225096097 11359624463 +1.20%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 23994637282 23919583343 -0.31%
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 891042098 826898358 -7.20%
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 915949138 902555676 -1.46%
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 955138431 879067946 -7.96%
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 991447640 958785968 -3.29%
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 1068729356 980249080 -8.28%
benchmark old allocs new allocs delta
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 470778 470556 -0.05%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 791429 791225 -0.03%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 1111514 1111257 -0.02%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 2111498 2111369 -0.01%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 841433 841220 -0.03%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 1911469 1911202 -0.01%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 3041558 3041328 -0.01%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 6741534 6741382 -0.00%
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 824856 820873 -0.48%
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 887220 885180 -0.23%
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 905253 901539 -0.41%
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 925148 913632 -1.24%
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 1019141 978727 -3.97%
benchmark old bytes new bytes delta
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 35694744 41523836 +16.33%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 53405264 59499056 +11.41%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 74160320 78151568 +5.38%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 120878480 135364672 +11.98%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 203832448 209925504 +2.99%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 341029208 346551064 +1.62%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 580217176 582345224 +0.37%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 1356872288 1363495368 +0.49%
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 119535672 94815920 -20.68%
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 115352280 95980776 -16.79%
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 119472320 98724460 -17.37%
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 111979312 94325456 -15.77%
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 116628584 98566344 -15.49%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-12-17 17:29:41 -08:00
|
|
|
"bytes"
|
2019-12-16 09:24:48 -08:00
|
|
|
"context"
|
2016-12-09 12:23:34 -08:00
|
|
|
"encoding/binary"
|
2021-11-24 01:56:37 -08:00
|
|
|
"fmt"
|
2017-02-15 15:24:53 -08:00
|
|
|
"hash"
|
2017-11-30 06:34:49 -08:00
|
|
|
"hash/crc32"
|
2016-12-09 11:45:46 -08:00
|
|
|
"io"
|
2017-10-09 06:21:46 -07:00
|
|
|
"math"
|
2017-02-23 01:50:22 -08:00
|
|
|
"os"
|
2017-02-24 22:24:20 -08:00
|
|
|
"path/filepath"
|
2016-12-09 12:23:34 -08:00
|
|
|
"sort"
|
Load only some offsets into the symbol table into memory.
Rather than keeping the entire symbol table in memory, keep every nth
offset and walk from there to the entry we need. This ends up slightly
slower, ~360ms per 1M series returned from PostingsForMatchers which is
not much considering the rest of the CPU such a query would go on to
use.
Make LabelValues use the postings tables, rather than having
to do symbol lookups. Use yoloString, as PostingsForMatchers
doesn't need the strings to stick around and adjust the API
call to keep the Querier open until it's all marshalled.
Remove allocatedSymbols memory optimisation, we no longer keep all the
symbol strings in heap memory. Remove LabelValuesFor and LabelIndices,
they're dead code. Ensure we've still tests for label indices,
and add missing test that we can work with old V1 Format index files.
PostingForMatchers performance is slightly better, with a big drop in
allocation counts due to using yoloString for LabelValues:
benchmark old ns/op new ns/op delta
BenchmarkPostingsForMatchers/Block/n="1"-4 36698 36681 -0.05%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 522786 560887 +7.29%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 511652 537680 +5.09%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 522102 564239 +8.07%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 113689911 111795919 -1.67%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 135825572 132871085 -2.18%
BenchmarkPostingsForMatchers/Block/i=~""-4 40782628 38038181 -6.73%
BenchmarkPostingsForMatchers/Block/i!=""-4 31267869 29194327 -6.63%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 112733329 111568823 -1.03%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 112868153 111232029 -1.45%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 31338257 29349446 -6.35%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 32054482 29972436 -6.50%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 136504654 133968442 -1.86%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 27960350 27264997 -2.49%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 136765564 133860724 -2.12%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 163714583 159453668 -2.60%
benchmark old allocs new allocs delta
BenchmarkPostingsForMatchers/Block/n="1"-4 6 6 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 11 11 +0.00%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 11 11 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 17 15 -11.76%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 100012 12 -99.99%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 200040 100040 -49.99%
BenchmarkPostingsForMatchers/Block/i=~""-4 200045 100045 -49.99%
BenchmarkPostingsForMatchers/Block/i!=""-4 200041 100041 -49.99%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 100017 17 -99.98%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 100023 23 -99.98%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 200046 100046 -49.99%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 200050 100050 -49.99%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 200049 100049 -49.99%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 111150 11150 -89.97%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 200055 100055 -49.99%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 311238 111238 -64.26%
benchmark old bytes new bytes delta
BenchmarkPostingsForMatchers/Block/n="1"-4 296 296 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 424 424 +0.00%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 424 424 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 552 1544 +179.71%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 1600482 1606125 +0.35%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 17259065 17264709 +0.03%
BenchmarkPostingsForMatchers/Block/i=~""-4 17259150 17264780 +0.03%
BenchmarkPostingsForMatchers/Block/i!=""-4 17259048 17264680 +0.03%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 1600610 1606242 +0.35%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 1600813 1606434 +0.35%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 17259176 17264808 +0.03%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 17259304 17264936 +0.03%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 17259333 17264965 +0.03%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 3142628 3148262 +0.18%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 17259509 17265141 +0.03%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 20405680 20416944 +0.06%
However overall Select performance is down and involves more allocs, due to
having to do more than a simple map lookup to resolve a symbol and that all the strings
returned are allocated:
benchmark old ns/op new ns/op delta
BenchmarkQuerierSelect/Block/1of1000000-4 506092636 862678244 +70.46%
BenchmarkQuerierSelect/Block/10of1000000-4 505638968 860917636 +70.26%
BenchmarkQuerierSelect/Block/100of1000000-4 505229450 882150048 +74.60%
BenchmarkQuerierSelect/Block/1000of1000000-4 515905414 862241115 +67.13%
BenchmarkQuerierSelect/Block/10000of1000000-4 516785354 874841110 +69.29%
BenchmarkQuerierSelect/Block/100000of1000000-4 540742808 907030187 +67.74%
BenchmarkQuerierSelect/Block/1000000of1000000-4 815224288 1181236903 +44.90%
benchmark old allocs new allocs delta
BenchmarkQuerierSelect/Block/1of1000000-4 4000020 6000020 +50.00%
BenchmarkQuerierSelect/Block/10of1000000-4 4000038 6000038 +50.00%
BenchmarkQuerierSelect/Block/100of1000000-4 4000218 6000218 +50.00%
BenchmarkQuerierSelect/Block/1000of1000000-4 4002018 6002018 +49.97%
BenchmarkQuerierSelect/Block/10000of1000000-4 4020018 6020018 +49.75%
BenchmarkQuerierSelect/Block/100000of1000000-4 4200018 6200018 +47.62%
BenchmarkQuerierSelect/Block/1000000of1000000-4 6000018 8000019 +33.33%
benchmark old bytes new bytes delta
BenchmarkQuerierSelect/Block/1of1000000-4 176001468 227201476 +29.09%
BenchmarkQuerierSelect/Block/10of1000000-4 176002620 227202628 +29.09%
BenchmarkQuerierSelect/Block/100of1000000-4 176014140 227214148 +29.09%
BenchmarkQuerierSelect/Block/1000of1000000-4 176129340 227329348 +29.07%
BenchmarkQuerierSelect/Block/10000of1000000-4 177281340 228481348 +28.88%
BenchmarkQuerierSelect/Block/100000of1000000-4 188801340 240001348 +27.12%
BenchmarkQuerierSelect/Block/1000000of1000000-4 304001340 355201616 +16.84%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-12-12 08:55:32 -08:00
|
|
|
"unsafe"
|
2016-12-20 04:10:37 -08:00
|
|
|
|
|
|
|
"github.com/pkg/errors"
|
2022-09-30 07:33:56 -07:00
|
|
|
"golang.org/x/exp/slices"
|
2020-10-22 02:00:08 -07:00
|
|
|
|
2021-11-08 06:23:17 -08:00
|
|
|
"github.com/prometheus/prometheus/model/labels"
|
2021-02-09 09:38:35 -08:00
|
|
|
"github.com/prometheus/prometheus/storage"
|
2019-08-13 01:34:14 -07:00
|
|
|
"github.com/prometheus/prometheus/tsdb/chunks"
|
|
|
|
"github.com/prometheus/prometheus/tsdb/encoding"
|
|
|
|
tsdb_errors "github.com/prometheus/prometheus/tsdb/errors"
|
|
|
|
"github.com/prometheus/prometheus/tsdb/fileutil"
|
2016-12-09 11:45:46 -08:00
|
|
|
)
|
|
|
|
|
|
|
|
const (
|
|
|
|
// MagicIndex 4 bytes at the head of an index file.
|
|
|
|
MagicIndex = 0xBAAAD700
|
2019-01-11 09:31:26 -08:00
|
|
|
// HeaderLen represents number of bytes reserved of index for header.
|
|
|
|
HeaderLen = 5
|
2016-12-09 11:45:46 -08:00
|
|
|
|
2019-01-11 09:31:26 -08:00
|
|
|
// FormatV1 represents 1 version of index.
|
|
|
|
FormatV1 = 1
|
|
|
|
// FormatV2 represents 2 version of index.
|
|
|
|
FormatV2 = 2
|
2018-11-07 07:52:41 -08:00
|
|
|
|
2019-01-29 00:32:32 -08:00
|
|
|
indexFilename = "index"
|
2017-02-23 01:50:22 -08:00
|
|
|
)
|
|
|
|
|
2017-04-25 23:33:54 -07:00
|
|
|
type indexWriterSeries struct {
|
|
|
|
labels labels.Labels
|
2017-11-30 06:34:49 -08:00
|
|
|
chunks []chunks.Meta // series file offset of chunks
|
2017-04-25 23:33:54 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
type indexWriterSeriesSlice []*indexWriterSeries
|
|
|
|
|
|
|
|
func (s indexWriterSeriesSlice) Len() int { return len(s) }
|
|
|
|
func (s indexWriterSeriesSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
|
|
|
|
|
|
|
func (s indexWriterSeriesSlice) Less(i, j int) bool {
|
|
|
|
return labels.Compare(s[i].labels, s[j].labels) < 0
|
|
|
|
}
|
|
|
|
|
|
|
|
type indexWriterStage uint8
|
|
|
|
|
|
|
|
const (
|
2017-08-05 04:31:48 -07:00
|
|
|
idxStageNone indexWriterStage = iota
|
|
|
|
idxStageSymbols
|
|
|
|
idxStageSeries
|
2017-04-25 23:33:54 -07:00
|
|
|
idxStageDone
|
|
|
|
)
|
|
|
|
|
|
|
|
func (s indexWriterStage) String() string {
|
|
|
|
switch s {
|
2017-08-05 04:31:48 -07:00
|
|
|
case idxStageNone:
|
|
|
|
return "none"
|
|
|
|
case idxStageSymbols:
|
|
|
|
return "symbols"
|
|
|
|
case idxStageSeries:
|
|
|
|
return "series"
|
2017-04-25 23:33:54 -07:00
|
|
|
case idxStageDone:
|
|
|
|
return "done"
|
|
|
|
}
|
|
|
|
return "<unknown>"
|
|
|
|
}
|
|
|
|
|
2017-11-30 06:34:49 -08:00
|
|
|
// The table gets initialized with sync.Once but may still cause a race
|
|
|
|
// with any other use of the crc32 package anywhere. Thus we initialize it
|
|
|
|
// before.
|
|
|
|
var castagnoliTable *crc32.Table
|
2016-12-09 11:45:46 -08:00
|
|
|
|
2017-11-30 06:34:49 -08:00
|
|
|
func init() {
|
|
|
|
castagnoliTable = crc32.MakeTable(crc32.Castagnoli)
|
|
|
|
}
|
2016-12-09 11:45:46 -08:00
|
|
|
|
2017-11-30 06:34:49 -08:00
|
|
|
// newCRC32 initializes a CRC32 hash with a preconfigured polynomial, so the
|
|
|
|
// polynomial may be easily changed in one location at a later time, if necessary.
|
|
|
|
func newCRC32() hash.Hash32 {
|
|
|
|
return crc32.New(castagnoliTable)
|
2016-12-09 11:45:46 -08:00
|
|
|
}
|
|
|
|
|
2020-11-19 10:18:12 -08:00
|
|
|
type symbolCacheEntry struct {
|
|
|
|
index uint32
|
|
|
|
lastValue string
|
|
|
|
lastValueIndex uint32
|
|
|
|
}
|
|
|
|
|
2018-01-10 11:19:16 -08:00
|
|
|
// Writer implements the IndexWriter interface for the standard
|
2016-12-09 11:45:46 -08:00
|
|
|
// serialization format.
|
2017-11-30 06:34:49 -08:00
|
|
|
type Writer struct {
|
2019-12-17 13:54:13 -08:00
|
|
|
ctx context.Context
|
|
|
|
|
|
|
|
// For the main index file.
|
2020-01-09 03:28:10 -08:00
|
|
|
f *FileWriter
|
2017-04-26 09:01:13 -07:00
|
|
|
|
2019-12-17 16:55:29 -08:00
|
|
|
// Temporary file for postings.
|
2020-01-09 03:28:10 -08:00
|
|
|
fP *FileWriter
|
2019-12-17 13:16:56 -08:00
|
|
|
// Temporary file for posting offsets table.
|
2020-01-09 03:28:10 -08:00
|
|
|
fPO *FileWriter
|
2019-12-17 13:54:13 -08:00
|
|
|
cntPO uint64
|
2019-12-17 13:16:56 -08:00
|
|
|
|
Write label indices based on the posting offset table.
This avoids having to build it up in RAM, and means that all variable
memory usage for compactions is now 0.25 bytes per symbol plus a few
O(labelnames) structures. So in practice, pretty close to constant
memory for compactions.
benchmark old ns/op new ns/op delta
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 662974828 667162981 +0.63%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 2459590377 2131168138 -13.35%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 3808280548 3919290378 +2.91%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 8513884311 8738099339 +2.63%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 1898843003 1944131966 +2.39%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 5601478437 6031391658 +7.67%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 11225096097 11359624463 +1.20%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 23994637282 23919583343 -0.31%
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 891042098 826898358 -7.20%
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 915949138 902555676 -1.46%
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 955138431 879067946 -7.96%
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 991447640 958785968 -3.29%
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 1068729356 980249080 -8.28%
benchmark old allocs new allocs delta
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 470778 470556 -0.05%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 791429 791225 -0.03%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 1111514 1111257 -0.02%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 2111498 2111369 -0.01%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 841433 841220 -0.03%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 1911469 1911202 -0.01%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 3041558 3041328 -0.01%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 6741534 6741382 -0.00%
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 824856 820873 -0.48%
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 887220 885180 -0.23%
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 905253 901539 -0.41%
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 925148 913632 -1.24%
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 1019141 978727 -3.97%
benchmark old bytes new bytes delta
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 35694744 41523836 +16.33%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 53405264 59499056 +11.41%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 74160320 78151568 +5.38%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 120878480 135364672 +11.98%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 203832448 209925504 +2.99%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 341029208 346551064 +1.62%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 580217176 582345224 +0.37%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 1356872288 1363495368 +0.49%
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 119535672 94815920 -20.68%
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 115352280 95980776 -16.79%
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 119472320 98724460 -17.37%
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 111979312 94325456 -15.77%
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 116628584 98566344 -15.49%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-12-17 17:29:41 -08:00
|
|
|
toc TOC
|
|
|
|
stage indexWriterStage
|
|
|
|
postingsStart uint64 // Due to padding, can differ from TOC entry.
|
2016-12-09 11:45:46 -08:00
|
|
|
|
2017-03-02 14:35:02 -08:00
|
|
|
// Reusable memory.
|
2019-12-11 09:20:41 -08:00
|
|
|
buf1 encoding.Encbuf
|
|
|
|
buf2 encoding.Encbuf
|
2017-03-02 14:35:02 -08:00
|
|
|
|
2020-11-19 10:18:12 -08:00
|
|
|
numSymbols int
|
|
|
|
symbols *Symbols
|
|
|
|
symbolFile *fileutil.MmapFile
|
|
|
|
lastSymbol string
|
|
|
|
symbolCache map[string]symbolCacheEntry
|
Stream symbols during compaction. (#6468)
Rather than buffer up symbols in RAM, do it one by one
during compaction. Then use the reader's symbol handling
for symbol lookups during the rest of the index write.
There is some slowdown in compaction, due to having to look through a file
rather than a hash lookup. This is noise to the overall cost of compacting
series with thousands of samples though.
benchmark old ns/op new ns/op delta
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 539917175 675341565 +25.08%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 2441815993 2477453524 +1.46%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 3978543559 3922909687 -1.40%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 8430219716 8586610007 +1.86%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 1786424591 1909552782 +6.89%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 5328998202 6020839950 +12.98%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 10085059958 11085278690 +9.92%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 25497010155 27018079806 +5.97%
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 2427391406 2817217987 +16.06%
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 2592965497 2538805050 -2.09%
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 2437388343 2668012858 +9.46%
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 2317095324 2787423966 +20.30%
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 2600239857 2096973860 -19.35%
benchmark old allocs new allocs delta
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 500851 470794 -6.00%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 821527 791451 -3.66%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 1141562 1111508 -2.63%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 2141576 2111504 -1.40%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 871466 841424 -3.45%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 1941428 1911415 -1.55%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 3071573 3041510 -0.98%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 6771648 6741509 -0.45%
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 731493 824888 +12.77%
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 793918 887311 +11.76%
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 811842 905204 +11.50%
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 832244 925081 +11.16%
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 921553 1019162 +10.59%
benchmark old bytes new bytes delta
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 40532648 35698276 -11.93%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 60340216 53409568 -11.49%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 81087336 72065552 -11.13%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 142485576 120878544 -15.16%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 208661368 203831136 -2.31%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 347345904 340484696 -1.98%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 585185856 576244648 -1.53%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 1357641792 1358966528 +0.10%
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 126486664 119666744 -5.39%
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 122323192 115117224 -5.89%
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 126404504 119469864 -5.49%
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 119047832 112230408 -5.73%
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 136576016 116634800 -14.60%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-12-17 11:49:54 -08:00
|
|
|
|
Write label indices based on the posting offset table.
This avoids having to build it up in RAM, and means that all variable
memory usage for compactions is now 0.25 bytes per symbol plus a few
O(labelnames) structures. So in practice, pretty close to constant
memory for compactions.
benchmark old ns/op new ns/op delta
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 662974828 667162981 +0.63%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 2459590377 2131168138 -13.35%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 3808280548 3919290378 +2.91%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 8513884311 8738099339 +2.63%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 1898843003 1944131966 +2.39%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 5601478437 6031391658 +7.67%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 11225096097 11359624463 +1.20%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 23994637282 23919583343 -0.31%
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 891042098 826898358 -7.20%
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 915949138 902555676 -1.46%
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 955138431 879067946 -7.96%
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 991447640 958785968 -3.29%
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 1068729356 980249080 -8.28%
benchmark old allocs new allocs delta
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 470778 470556 -0.05%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 791429 791225 -0.03%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 1111514 1111257 -0.02%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 2111498 2111369 -0.01%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 841433 841220 -0.03%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 1911469 1911202 -0.01%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 3041558 3041328 -0.01%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 6741534 6741382 -0.00%
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 824856 820873 -0.48%
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 887220 885180 -0.23%
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 905253 901539 -0.41%
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 925148 913632 -1.24%
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 1019141 978727 -3.97%
benchmark old bytes new bytes delta
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 35694744 41523836 +16.33%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 53405264 59499056 +11.41%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 74160320 78151568 +5.38%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 120878480 135364672 +11.98%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 203832448 209925504 +2.99%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 341029208 346551064 +1.62%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 580217176 582345224 +0.37%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 1356872288 1363495368 +0.49%
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 119535672 94815920 -20.68%
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 115352280 95980776 -16.79%
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 119472320 98724460 -17.37%
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 111979312 94325456 -15.77%
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 116628584 98566344 -15.49%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-12-17 17:29:41 -08:00
|
|
|
labelIndexes []labelIndexHashEntry // Label index offsets.
|
|
|
|
labelNames map[string]uint64 // Label names, and their usage.
|
2017-08-05 04:31:48 -07:00
|
|
|
|
|
|
|
// Hold last series to validate that clients insert new series in order.
|
|
|
|
lastSeries labels.Labels
|
2021-11-06 03:10:04 -07:00
|
|
|
lastRef storage.SeriesRef
|
2017-02-15 15:24:53 -08:00
|
|
|
|
|
|
|
crc32 hash.Hash
|
2018-01-10 11:19:16 -08:00
|
|
|
|
|
|
|
Version int
|
2016-12-09 12:23:34 -08:00
|
|
|
}
|
|
|
|
|
2019-01-11 09:31:26 -08:00
|
|
|
// TOC represents index Table Of Content that states where each section of index starts.
|
|
|
|
type TOC struct {
|
|
|
|
Symbols uint64
|
|
|
|
Series uint64
|
|
|
|
LabelIndices uint64
|
|
|
|
LabelIndicesTable uint64
|
|
|
|
Postings uint64
|
|
|
|
PostingsTable uint64
|
|
|
|
}
|
|
|
|
|
|
|
|
// NewTOCFromByteSlice return parsed TOC from given index byte slice.
|
|
|
|
func NewTOCFromByteSlice(bs ByteSlice) (*TOC, error) {
|
|
|
|
if bs.Len() < indexTOCLen {
|
2019-02-22 09:11:11 -08:00
|
|
|
return nil, encoding.ErrInvalidSize
|
2019-01-11 09:31:26 -08:00
|
|
|
}
|
|
|
|
b := bs.Range(bs.Len()-indexTOCLen, bs.Len())
|
|
|
|
|
|
|
|
expCRC := binary.BigEndian.Uint32(b[len(b)-4:])
|
2019-02-22 09:11:11 -08:00
|
|
|
d := encoding.Decbuf{B: b[:len(b)-4]}
|
2019-01-11 09:31:26 -08:00
|
|
|
|
2019-02-22 09:11:11 -08:00
|
|
|
if d.Crc32(castagnoliTable) != expCRC {
|
|
|
|
return nil, errors.Wrap(encoding.ErrInvalidChecksum, "read TOC")
|
2019-01-11 09:31:26 -08:00
|
|
|
}
|
|
|
|
|
2019-02-22 09:11:11 -08:00
|
|
|
if err := d.Err(); err != nil {
|
2019-01-11 09:31:26 -08:00
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return &TOC{
|
2019-02-22 09:11:11 -08:00
|
|
|
Symbols: d.Be64(),
|
|
|
|
Series: d.Be64(),
|
|
|
|
LabelIndices: d.Be64(),
|
|
|
|
LabelIndicesTable: d.Be64(),
|
|
|
|
Postings: d.Be64(),
|
|
|
|
PostingsTable: d.Be64(),
|
2019-01-11 09:31:26 -08:00
|
|
|
}, nil
|
2017-04-26 09:01:13 -07:00
|
|
|
}
|
|
|
|
|
2018-02-12 02:40:12 -08:00
|
|
|
// NewWriter returns a new Writer to the given filename. It serializes data in format version 2.
|
2019-12-16 09:24:48 -08:00
|
|
|
func NewWriter(ctx context.Context, fn string) (*Writer, error) {
|
2017-11-30 06:34:49 -08:00
|
|
|
dir := filepath.Dir(fn)
|
|
|
|
|
2017-02-24 22:24:20 -08:00
|
|
|
df, err := fileutil.OpenDir(dir)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2018-02-01 23:46:23 -08:00
|
|
|
defer df.Close() // Close for platform windows.
|
|
|
|
|
|
|
|
if err := os.RemoveAll(fn); err != nil {
|
|
|
|
return nil, errors.Wrap(err, "remove any existing index at path")
|
|
|
|
}
|
2017-10-31 07:37:41 -07:00
|
|
|
|
2019-12-17 13:16:56 -08:00
|
|
|
// Main index file we are building.
|
2020-01-09 03:28:10 -08:00
|
|
|
f, err := NewFileWriter(fn)
|
2019-12-17 13:16:56 -08:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2019-12-17 16:55:29 -08:00
|
|
|
// Temporary file for postings.
|
2020-01-09 03:28:10 -08:00
|
|
|
fP, err := NewFileWriter(fn + "_tmp_p")
|
2019-12-17 16:55:29 -08:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2019-12-17 13:16:56 -08:00
|
|
|
// Temporary file for posting offset table.
|
2020-01-09 03:28:10 -08:00
|
|
|
fPO, err := NewFileWriter(fn + "_tmp_po")
|
2017-02-24 22:24:20 -08:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2019-04-03 01:16:54 -07:00
|
|
|
if err := df.Sync(); err != nil {
|
2017-02-24 22:24:20 -08:00
|
|
|
return nil, errors.Wrap(err, "sync dir")
|
|
|
|
}
|
|
|
|
|
2017-11-30 06:34:49 -08:00
|
|
|
iw := &Writer{
|
2019-12-17 13:54:13 -08:00
|
|
|
ctx: ctx,
|
|
|
|
f: f,
|
2019-12-17 16:55:29 -08:00
|
|
|
fP: fP,
|
2019-12-17 13:54:13 -08:00
|
|
|
fPO: fPO,
|
|
|
|
stage: idxStageNone,
|
2017-03-02 14:35:02 -08:00
|
|
|
|
|
|
|
// Reusable memory.
|
2019-12-11 09:20:41 -08:00
|
|
|
buf1: encoding.Encbuf{B: make([]byte, 0, 1<<22)},
|
|
|
|
buf2: encoding.Encbuf{B: make([]byte, 0, 1<<22)},
|
2017-03-02 14:35:02 -08:00
|
|
|
|
2020-11-19 10:18:12 -08:00
|
|
|
symbolCache: make(map[string]symbolCacheEntry, 1<<8),
|
|
|
|
labelNames: make(map[string]uint64, 1<<8),
|
|
|
|
crc32: newCRC32(),
|
2016-12-09 12:23:34 -08:00
|
|
|
}
|
2017-02-24 22:24:20 -08:00
|
|
|
if err := iw.writeMeta(); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return iw, nil
|
2016-12-09 12:23:34 -08:00
|
|
|
}
|
|
|
|
|
2017-11-30 06:34:49 -08:00
|
|
|
func (w *Writer) write(bufs ...[]byte) error {
|
2020-01-09 03:28:10 -08:00
|
|
|
return w.f.Write(bufs...)
|
2019-12-17 13:54:13 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
func (w *Writer) writeAt(buf []byte, pos uint64) error {
|
2020-01-09 03:28:10 -08:00
|
|
|
return w.f.WriteAt(buf, pos)
|
2019-12-17 13:54:13 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
func (w *Writer) addPadding(size int) error {
|
2020-01-09 03:28:10 -08:00
|
|
|
return w.f.AddPadding(size)
|
2019-12-17 13:54:13 -08:00
|
|
|
}
|
|
|
|
|
2020-01-09 03:28:10 -08:00
|
|
|
type FileWriter struct {
|
2019-12-17 13:54:13 -08:00
|
|
|
f *os.File
|
|
|
|
fbuf *bufio.Writer
|
|
|
|
pos uint64
|
|
|
|
name string
|
|
|
|
}
|
|
|
|
|
2020-01-09 03:28:10 -08:00
|
|
|
func NewFileWriter(name string) (*FileWriter, error) {
|
2021-10-22 01:06:44 -07:00
|
|
|
f, err := os.OpenFile(name, os.O_CREATE|os.O_RDWR, 0o666)
|
2019-12-17 13:54:13 -08:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2020-01-09 03:28:10 -08:00
|
|
|
return &FileWriter{
|
2019-12-17 13:54:13 -08:00
|
|
|
f: f,
|
|
|
|
fbuf: bufio.NewWriterSize(f, 1<<22),
|
|
|
|
pos: 0,
|
|
|
|
name: name,
|
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
|
2020-01-09 03:28:10 -08:00
|
|
|
func (fw *FileWriter) Pos() uint64 {
|
|
|
|
return fw.pos
|
|
|
|
}
|
|
|
|
|
|
|
|
func (fw *FileWriter) Write(bufs ...[]byte) error {
|
2017-04-25 08:21:24 -07:00
|
|
|
for _, b := range bufs {
|
2019-12-17 13:54:13 -08:00
|
|
|
n, err := fw.fbuf.Write(b)
|
|
|
|
fw.pos += uint64(n)
|
2017-04-25 08:21:24 -07:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2018-01-05 04:58:09 -08:00
|
|
|
// For now the index file must not grow beyond 64GiB. Some of the fixed-sized
|
2017-04-28 05:28:25 -07:00
|
|
|
// offset references in v1 are only 4 bytes large.
|
2017-04-26 09:01:13 -07:00
|
|
|
// Once we move to compressed/varint representations in those areas, this limitation
|
|
|
|
// can be lifted.
|
2019-12-17 13:54:13 -08:00
|
|
|
if fw.pos > 16*math.MaxUint32 {
|
Write label indices based on the posting offset table.
This avoids having to build it up in RAM, and means that all variable
memory usage for compactions is now 0.25 bytes per symbol plus a few
O(labelnames) structures. So in practice, pretty close to constant
memory for compactions.
benchmark old ns/op new ns/op delta
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 662974828 667162981 +0.63%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 2459590377 2131168138 -13.35%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 3808280548 3919290378 +2.91%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 8513884311 8738099339 +2.63%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 1898843003 1944131966 +2.39%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 5601478437 6031391658 +7.67%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 11225096097 11359624463 +1.20%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 23994637282 23919583343 -0.31%
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 891042098 826898358 -7.20%
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 915949138 902555676 -1.46%
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 955138431 879067946 -7.96%
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 991447640 958785968 -3.29%
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 1068729356 980249080 -8.28%
benchmark old allocs new allocs delta
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 470778 470556 -0.05%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 791429 791225 -0.03%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 1111514 1111257 -0.02%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 2111498 2111369 -0.01%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 841433 841220 -0.03%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 1911469 1911202 -0.01%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 3041558 3041328 -0.01%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 6741534 6741382 -0.00%
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 824856 820873 -0.48%
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 887220 885180 -0.23%
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 905253 901539 -0.41%
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 925148 913632 -1.24%
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 1019141 978727 -3.97%
benchmark old bytes new bytes delta
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 35694744 41523836 +16.33%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 53405264 59499056 +11.41%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 74160320 78151568 +5.38%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 120878480 135364672 +11.98%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 203832448 209925504 +2.99%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 341029208 346551064 +1.62%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 580217176 582345224 +0.37%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 1356872288 1363495368 +0.49%
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 119535672 94815920 -20.68%
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 115352280 95980776 -16.79%
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 119472320 98724460 -17.37%
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 111979312 94325456 -15.77%
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 116628584 98566344 -15.49%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-12-17 17:29:41 -08:00
|
|
|
return errors.Errorf("%q exceeding max size of 64GiB", fw.name)
|
2017-04-26 09:01:13 -07:00
|
|
|
}
|
2017-04-25 08:21:24 -07:00
|
|
|
}
|
|
|
|
return nil
|
2016-12-09 12:23:34 -08:00
|
|
|
}
|
|
|
|
|
2020-01-09 03:28:10 -08:00
|
|
|
func (fw *FileWriter) Flush() error {
|
2019-12-17 13:54:13 -08:00
|
|
|
return fw.fbuf.Flush()
|
|
|
|
}
|
|
|
|
|
2020-01-09 03:28:10 -08:00
|
|
|
func (fw *FileWriter) WriteAt(buf []byte, pos uint64) error {
|
|
|
|
if err := fw.Flush(); err != nil {
|
2019-12-11 04:49:13 -08:00
|
|
|
return err
|
|
|
|
}
|
2019-12-17 13:54:13 -08:00
|
|
|
_, err := fw.f.WriteAt(buf, int64(pos))
|
2019-12-11 04:49:13 -08:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2020-01-10 02:11:30 -08:00
|
|
|
// AddPadding adds zero byte padding until the file size is a multiple size.
|
2020-01-09 03:28:10 -08:00
|
|
|
func (fw *FileWriter) AddPadding(size int) error {
|
2019-12-17 13:54:13 -08:00
|
|
|
p := fw.pos % uint64(size)
|
2017-04-28 05:28:25 -07:00
|
|
|
if p == 0 {
|
|
|
|
return nil
|
|
|
|
}
|
2017-11-30 06:34:49 -08:00
|
|
|
p = uint64(size) - p
|
2020-01-09 03:28:10 -08:00
|
|
|
|
|
|
|
if err := fw.Write(make([]byte, p)); err != nil {
|
|
|
|
return errors.Wrap(err, "add padding")
|
|
|
|
}
|
|
|
|
return nil
|
2019-12-17 13:54:13 -08:00
|
|
|
}
|
|
|
|
|
2020-01-09 03:28:10 -08:00
|
|
|
func (fw *FileWriter) Close() error {
|
|
|
|
if err := fw.Flush(); err != nil {
|
2019-12-17 13:54:13 -08:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
if err := fw.f.Sync(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
return fw.f.Close()
|
|
|
|
}
|
|
|
|
|
2020-01-09 03:28:10 -08:00
|
|
|
func (fw *FileWriter) Remove() error {
|
2019-12-17 13:54:13 -08:00
|
|
|
return os.Remove(fw.name)
|
2017-04-28 05:28:25 -07:00
|
|
|
}
|
|
|
|
|
2017-04-26 09:01:13 -07:00
|
|
|
// ensureStage handles transitions between write stages and ensures that IndexWriter
|
|
|
|
// methods are called in an order valid for the implementation.
|
2017-11-30 06:34:49 -08:00
|
|
|
func (w *Writer) ensureStage(s indexWriterStage) error {
|
2019-12-16 09:24:48 -08:00
|
|
|
select {
|
|
|
|
case <-w.ctx.Done():
|
|
|
|
return w.ctx.Err()
|
|
|
|
default:
|
|
|
|
}
|
|
|
|
|
2017-04-26 09:01:13 -07:00
|
|
|
if w.stage == s {
|
|
|
|
return nil
|
|
|
|
}
|
Write label indices based on the posting offset table.
This avoids having to build it up in RAM, and means that all variable
memory usage for compactions is now 0.25 bytes per symbol plus a few
O(labelnames) structures. So in practice, pretty close to constant
memory for compactions.
benchmark old ns/op new ns/op delta
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 662974828 667162981 +0.63%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 2459590377 2131168138 -13.35%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 3808280548 3919290378 +2.91%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 8513884311 8738099339 +2.63%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 1898843003 1944131966 +2.39%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 5601478437 6031391658 +7.67%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 11225096097 11359624463 +1.20%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 23994637282 23919583343 -0.31%
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 891042098 826898358 -7.20%
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 915949138 902555676 -1.46%
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 955138431 879067946 -7.96%
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 991447640 958785968 -3.29%
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 1068729356 980249080 -8.28%
benchmark old allocs new allocs delta
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 470778 470556 -0.05%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 791429 791225 -0.03%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 1111514 1111257 -0.02%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 2111498 2111369 -0.01%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 841433 841220 -0.03%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 1911469 1911202 -0.01%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 3041558 3041328 -0.01%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 6741534 6741382 -0.00%
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 824856 820873 -0.48%
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 887220 885180 -0.23%
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 905253 901539 -0.41%
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 925148 913632 -1.24%
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 1019141 978727 -3.97%
benchmark old bytes new bytes delta
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 35694744 41523836 +16.33%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 53405264 59499056 +11.41%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 74160320 78151568 +5.38%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 120878480 135364672 +11.98%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 203832448 209925504 +2.99%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 341029208 346551064 +1.62%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 580217176 582345224 +0.37%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 1356872288 1363495368 +0.49%
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 119535672 94815920 -20.68%
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 115352280 95980776 -16.79%
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 119472320 98724460 -17.37%
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 111979312 94325456 -15.77%
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 116628584 98566344 -15.49%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-12-17 17:29:41 -08:00
|
|
|
if w.stage < s-1 {
|
2019-12-17 16:55:29 -08:00
|
|
|
// A stage has been skipped.
|
Write label indices based on the posting offset table.
This avoids having to build it up in RAM, and means that all variable
memory usage for compactions is now 0.25 bytes per symbol plus a few
O(labelnames) structures. So in practice, pretty close to constant
memory for compactions.
benchmark old ns/op new ns/op delta
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 662974828 667162981 +0.63%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 2459590377 2131168138 -13.35%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 3808280548 3919290378 +2.91%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 8513884311 8738099339 +2.63%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 1898843003 1944131966 +2.39%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 5601478437 6031391658 +7.67%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 11225096097 11359624463 +1.20%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 23994637282 23919583343 -0.31%
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 891042098 826898358 -7.20%
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 915949138 902555676 -1.46%
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 955138431 879067946 -7.96%
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 991447640 958785968 -3.29%
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 1068729356 980249080 -8.28%
benchmark old allocs new allocs delta
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 470778 470556 -0.05%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 791429 791225 -0.03%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 1111514 1111257 -0.02%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 2111498 2111369 -0.01%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 841433 841220 -0.03%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 1911469 1911202 -0.01%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 3041558 3041328 -0.01%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 6741534 6741382 -0.00%
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 824856 820873 -0.48%
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 887220 885180 -0.23%
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 905253 901539 -0.41%
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 925148 913632 -1.24%
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 1019141 978727 -3.97%
benchmark old bytes new bytes delta
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 35694744 41523836 +16.33%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 53405264 59499056 +11.41%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 74160320 78151568 +5.38%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 120878480 135364672 +11.98%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 203832448 209925504 +2.99%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 341029208 346551064 +1.62%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 580217176 582345224 +0.37%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 1356872288 1363495368 +0.49%
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 119535672 94815920 -20.68%
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 115352280 95980776 -16.79%
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 119472320 98724460 -17.37%
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 111979312 94325456 -15.77%
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 116628584 98566344 -15.49%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-12-17 17:29:41 -08:00
|
|
|
if err := w.ensureStage(s - 1); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2019-12-17 16:55:29 -08:00
|
|
|
}
|
2017-04-26 09:01:13 -07:00
|
|
|
if w.stage > s {
|
|
|
|
return errors.Errorf("invalid stage %q, currently at %q", s, w.stage)
|
|
|
|
}
|
|
|
|
|
2017-08-05 04:31:48 -07:00
|
|
|
// Mark start of sections in table of contents.
|
|
|
|
switch s {
|
|
|
|
case idxStageSymbols:
|
2019-12-17 13:54:13 -08:00
|
|
|
w.toc.Symbols = w.f.pos
|
Stream symbols during compaction. (#6468)
Rather than buffer up symbols in RAM, do it one by one
during compaction. Then use the reader's symbol handling
for symbol lookups during the rest of the index write.
There is some slowdown in compaction, due to having to look through a file
rather than a hash lookup. This is noise to the overall cost of compacting
series with thousands of samples though.
benchmark old ns/op new ns/op delta
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 539917175 675341565 +25.08%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 2441815993 2477453524 +1.46%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 3978543559 3922909687 -1.40%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 8430219716 8586610007 +1.86%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 1786424591 1909552782 +6.89%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 5328998202 6020839950 +12.98%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 10085059958 11085278690 +9.92%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 25497010155 27018079806 +5.97%
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 2427391406 2817217987 +16.06%
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 2592965497 2538805050 -2.09%
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 2437388343 2668012858 +9.46%
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 2317095324 2787423966 +20.30%
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 2600239857 2096973860 -19.35%
benchmark old allocs new allocs delta
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 500851 470794 -6.00%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 821527 791451 -3.66%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 1141562 1111508 -2.63%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 2141576 2111504 -1.40%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 871466 841424 -3.45%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 1941428 1911415 -1.55%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 3071573 3041510 -0.98%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 6771648 6741509 -0.45%
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 731493 824888 +12.77%
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 793918 887311 +11.76%
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 811842 905204 +11.50%
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 832244 925081 +11.16%
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 921553 1019162 +10.59%
benchmark old bytes new bytes delta
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 40532648 35698276 -11.93%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 60340216 53409568 -11.49%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 81087336 72065552 -11.13%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 142485576 120878544 -15.16%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 208661368 203831136 -2.31%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 347345904 340484696 -1.98%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 585185856 576244648 -1.53%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 1357641792 1358966528 +0.10%
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 126486664 119666744 -5.39%
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 122323192 115117224 -5.89%
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 126404504 119469864 -5.49%
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 119047832 112230408 -5.73%
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 136576016 116634800 -14.60%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-12-17 11:49:54 -08:00
|
|
|
if err := w.startSymbols(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2017-08-05 04:31:48 -07:00
|
|
|
case idxStageSeries:
|
Stream symbols during compaction. (#6468)
Rather than buffer up symbols in RAM, do it one by one
during compaction. Then use the reader's symbol handling
for symbol lookups during the rest of the index write.
There is some slowdown in compaction, due to having to look through a file
rather than a hash lookup. This is noise to the overall cost of compacting
series with thousands of samples though.
benchmark old ns/op new ns/op delta
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 539917175 675341565 +25.08%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 2441815993 2477453524 +1.46%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 3978543559 3922909687 -1.40%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 8430219716 8586610007 +1.86%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 1786424591 1909552782 +6.89%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 5328998202 6020839950 +12.98%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 10085059958 11085278690 +9.92%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 25497010155 27018079806 +5.97%
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 2427391406 2817217987 +16.06%
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 2592965497 2538805050 -2.09%
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 2437388343 2668012858 +9.46%
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 2317095324 2787423966 +20.30%
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 2600239857 2096973860 -19.35%
benchmark old allocs new allocs delta
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 500851 470794 -6.00%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 821527 791451 -3.66%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 1141562 1111508 -2.63%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 2141576 2111504 -1.40%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 871466 841424 -3.45%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 1941428 1911415 -1.55%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 3071573 3041510 -0.98%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 6771648 6741509 -0.45%
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 731493 824888 +12.77%
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 793918 887311 +11.76%
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 811842 905204 +11.50%
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 832244 925081 +11.16%
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 921553 1019162 +10.59%
benchmark old bytes new bytes delta
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 40532648 35698276 -11.93%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 60340216 53409568 -11.49%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 81087336 72065552 -11.13%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 142485576 120878544 -15.16%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 208661368 203831136 -2.31%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 347345904 340484696 -1.98%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 585185856 576244648 -1.53%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 1357641792 1358966528 +0.10%
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 126486664 119666744 -5.39%
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 122323192 115117224 -5.89%
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 126404504 119469864 -5.49%
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 119047832 112230408 -5.73%
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 136576016 116634800 -14.60%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-12-17 11:49:54 -08:00
|
|
|
if err := w.finishSymbols(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2019-12-17 13:54:13 -08:00
|
|
|
w.toc.Series = w.f.pos
|
2017-04-26 09:01:13 -07:00
|
|
|
|
2019-12-17 14:15:35 -08:00
|
|
|
case idxStageDone:
|
2019-12-17 13:54:13 -08:00
|
|
|
w.toc.LabelIndices = w.f.pos
|
2019-12-17 16:55:29 -08:00
|
|
|
// LabelIndices generation depends on the posting offset
|
|
|
|
// table produced at this stage.
|
|
|
|
if err := w.writePostingsToTmpFiles(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2019-12-17 14:15:35 -08:00
|
|
|
if err := w.writeLabelIndices(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2017-04-26 09:01:13 -07:00
|
|
|
|
2019-12-17 13:54:13 -08:00
|
|
|
w.toc.Postings = w.f.pos
|
2019-12-11 09:20:41 -08:00
|
|
|
if err := w.writePostings(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2017-10-02 06:56:57 -07:00
|
|
|
|
2019-12-17 13:54:13 -08:00
|
|
|
w.toc.LabelIndicesTable = w.f.pos
|
2019-07-09 23:31:44 -07:00
|
|
|
if err := w.writeLabelIndexesOffsetTable(); err != nil {
|
2017-04-26 09:01:13 -07:00
|
|
|
return err
|
|
|
|
}
|
2019-12-17 16:55:29 -08:00
|
|
|
|
2019-12-17 13:54:13 -08:00
|
|
|
w.toc.PostingsTable = w.f.pos
|
2019-07-09 23:31:44 -07:00
|
|
|
if err := w.writePostingsOffsetTable(); err != nil {
|
2017-04-26 09:01:13 -07:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
if err := w.writeTOC(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
w.stage = s
|
|
|
|
return nil
|
|
|
|
}
|
2016-12-10 09:08:50 -08:00
|
|
|
|
2017-11-30 06:34:49 -08:00
|
|
|
func (w *Writer) writeMeta() error {
|
2019-02-22 09:11:11 -08:00
|
|
|
w.buf1.Reset()
|
|
|
|
w.buf1.PutBE32(MagicIndex)
|
|
|
|
w.buf1.PutByte(FormatV2)
|
2016-12-09 12:23:34 -08:00
|
|
|
|
2019-02-22 09:11:11 -08:00
|
|
|
return w.write(w.buf1.Get())
|
2016-12-09 11:45:46 -08:00
|
|
|
}
|
|
|
|
|
2018-01-10 11:19:16 -08:00
|
|
|
// AddSeries adds the series one at a time along with its chunks.
|
2021-11-06 03:10:04 -07:00
|
|
|
func (w *Writer) AddSeries(ref storage.SeriesRef, lset labels.Labels, chunks ...chunks.Meta) error {
|
2017-08-05 04:31:48 -07:00
|
|
|
if err := w.ensureStage(idxStageSeries); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if labels.Compare(lset, w.lastSeries) <= 0 {
|
|
|
|
return errors.Errorf("out-of-order series added with label set %q", lset)
|
|
|
|
}
|
|
|
|
|
2019-12-11 09:20:41 -08:00
|
|
|
if ref < w.lastRef && len(w.lastSeries) != 0 {
|
|
|
|
return errors.Errorf("series with reference greater than %d already added", ref)
|
2017-02-24 22:24:20 -08:00
|
|
|
}
|
2018-02-09 04:11:03 -08:00
|
|
|
// We add padding to 16 bytes to increase the addressable space we get through 4 byte
|
|
|
|
// series references.
|
2018-09-20 01:33:52 -07:00
|
|
|
if err := w.addPadding(16); err != nil {
|
|
|
|
return errors.Errorf("failed to write padding bytes: %v", err)
|
|
|
|
}
|
2018-02-09 04:11:03 -08:00
|
|
|
|
2019-12-17 13:54:13 -08:00
|
|
|
if w.f.pos%16 != 0 {
|
|
|
|
return errors.Errorf("series write not 16-byte aligned at %d", w.f.pos)
|
2018-02-09 04:11:03 -08:00
|
|
|
}
|
2017-08-05 04:31:48 -07:00
|
|
|
|
2019-02-22 09:11:11 -08:00
|
|
|
w.buf2.Reset()
|
|
|
|
w.buf2.PutUvarint(len(lset))
|
2017-08-05 04:31:48 -07:00
|
|
|
|
2016-12-09 12:23:34 -08:00
|
|
|
for _, l := range lset {
|
2020-11-19 10:18:12 -08:00
|
|
|
var err error
|
|
|
|
cacheEntry, ok := w.symbolCache[l.Name]
|
|
|
|
nameIndex := cacheEntry.index
|
|
|
|
if !ok {
|
|
|
|
nameIndex, err = w.symbols.ReverseLookup(l.Name)
|
|
|
|
if err != nil {
|
|
|
|
return errors.Errorf("symbol entry for %q does not exist, %v", l.Name, err)
|
|
|
|
}
|
2017-08-05 04:31:48 -07:00
|
|
|
}
|
Coalesce series reads where we can.
When compacting rather than doing a read of all
series in the index per label name, do many at once
but only when it won't use (much) more ram than writing the
special all index does.
original in-memory postings:
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 1 1202383447 ns/op 158936496 B/op 1031511 allocs/op
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 1 1141792706 ns/op 154453408 B/op 1093453 allocs/op
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 1 1169288829 ns/op 161072336 B/op 1110021 allocs/op
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 1 1115700103 ns/op 149480472 B/op 1129180 allocs/op
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 1 1283813141 ns/op 162937800 B/op 1202771 allocs/op
before:
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 1 1145195941 ns/op 131749984 B/op 834400 allocs/op
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 1 1233526345 ns/op 127889416 B/op 897033 allocs/op
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 1 1821942296 ns/op 131665648 B/op 914836 allocs/op
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 1 8035568665 ns/op 123811832 B/op 934312 allocs/op
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 1 71325926267 ns/op 140722648 B/op 1016824 allocs/op
after:
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 1 1101429174 ns/op 129063496 B/op 832571 allocs/op
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 1 1074466374 ns/op 124154888 B/op 894875 allocs/op
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 1 1166510282 ns/op 128790648 B/op 912931 allocs/op
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 1 1075013071 ns/op 120570696 B/op 933511 allocs/op
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 1 1231673790 ns/op 138754288 B/op 1022791 allocs/op
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-12-11 13:24:03 -08:00
|
|
|
w.labelNames[l.Name]++
|
2020-11-19 10:18:12 -08:00
|
|
|
w.buf2.PutUvarint32(nameIndex)
|
|
|
|
|
|
|
|
valueIndex := cacheEntry.lastValueIndex
|
|
|
|
if !ok || cacheEntry.lastValue != l.Value {
|
|
|
|
valueIndex, err = w.symbols.ReverseLookup(l.Value)
|
|
|
|
if err != nil {
|
|
|
|
return errors.Errorf("symbol entry for %q does not exist, %v", l.Value, err)
|
|
|
|
}
|
|
|
|
w.symbolCache[l.Name] = symbolCacheEntry{
|
|
|
|
index: nameIndex,
|
|
|
|
lastValue: l.Value,
|
|
|
|
lastValueIndex: valueIndex,
|
|
|
|
}
|
2017-08-05 04:31:48 -07:00
|
|
|
}
|
2020-11-19 10:18:12 -08:00
|
|
|
w.buf2.PutUvarint32(valueIndex)
|
2016-12-09 12:23:34 -08:00
|
|
|
}
|
2016-12-10 00:44:00 -08:00
|
|
|
|
2019-02-22 09:11:11 -08:00
|
|
|
w.buf2.PutUvarint(len(chunks))
|
2017-08-05 04:31:48 -07:00
|
|
|
|
2017-09-07 12:05:28 -07:00
|
|
|
if len(chunks) > 0 {
|
|
|
|
c := chunks[0]
|
2019-02-22 09:11:11 -08:00
|
|
|
w.buf2.PutVarint64(c.MinTime)
|
|
|
|
w.buf2.PutUvarint64(uint64(c.MaxTime - c.MinTime))
|
2021-11-06 03:10:04 -07:00
|
|
|
w.buf2.PutUvarint64(uint64(c.Ref))
|
2017-09-07 12:05:28 -07:00
|
|
|
t0 := c.MaxTime
|
|
|
|
ref0 := int64(c.Ref)
|
|
|
|
|
|
|
|
for _, c := range chunks[1:] {
|
2019-02-22 09:11:11 -08:00
|
|
|
w.buf2.PutUvarint64(uint64(c.MinTime - t0))
|
|
|
|
w.buf2.PutUvarint64(uint64(c.MaxTime - c.MinTime))
|
2017-09-07 12:05:28 -07:00
|
|
|
t0 = c.MaxTime
|
|
|
|
|
2019-02-22 09:11:11 -08:00
|
|
|
w.buf2.PutVarint64(int64(c.Ref) - ref0)
|
2017-09-07 12:05:28 -07:00
|
|
|
ref0 = int64(c.Ref)
|
|
|
|
}
|
2017-08-05 04:31:48 -07:00
|
|
|
}
|
|
|
|
|
2019-02-22 09:11:11 -08:00
|
|
|
w.buf1.Reset()
|
|
|
|
w.buf1.PutUvarint(w.buf2.Len())
|
2017-08-05 04:31:48 -07:00
|
|
|
|
2019-02-22 09:11:11 -08:00
|
|
|
w.buf2.PutHash(w.crc32)
|
2017-08-05 04:31:48 -07:00
|
|
|
|
2019-02-22 09:11:11 -08:00
|
|
|
if err := w.write(w.buf1.Get(), w.buf2.Get()); err != nil {
|
2017-08-05 04:31:48 -07:00
|
|
|
return errors.Wrap(err, "write series data")
|
2016-12-10 00:44:00 -08:00
|
|
|
}
|
2017-08-05 04:31:48 -07:00
|
|
|
|
|
|
|
w.lastSeries = append(w.lastSeries[:0], lset...)
|
2019-12-11 09:20:41 -08:00
|
|
|
w.lastRef = ref
|
2017-08-05 04:31:48 -07:00
|
|
|
|
2017-02-24 22:24:20 -08:00
|
|
|
return nil
|
2016-12-09 11:45:46 -08:00
|
|
|
}
|
|
|
|
|
Stream symbols during compaction. (#6468)
Rather than buffer up symbols in RAM, do it one by one
during compaction. Then use the reader's symbol handling
for symbol lookups during the rest of the index write.
There is some slowdown in compaction, due to having to look through a file
rather than a hash lookup. This is noise to the overall cost of compacting
series with thousands of samples though.
benchmark old ns/op new ns/op delta
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 539917175 675341565 +25.08%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 2441815993 2477453524 +1.46%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 3978543559 3922909687 -1.40%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 8430219716 8586610007 +1.86%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 1786424591 1909552782 +6.89%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 5328998202 6020839950 +12.98%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 10085059958 11085278690 +9.92%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 25497010155 27018079806 +5.97%
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 2427391406 2817217987 +16.06%
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 2592965497 2538805050 -2.09%
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 2437388343 2668012858 +9.46%
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 2317095324 2787423966 +20.30%
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 2600239857 2096973860 -19.35%
benchmark old allocs new allocs delta
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 500851 470794 -6.00%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 821527 791451 -3.66%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 1141562 1111508 -2.63%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 2141576 2111504 -1.40%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 871466 841424 -3.45%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 1941428 1911415 -1.55%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 3071573 3041510 -0.98%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 6771648 6741509 -0.45%
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 731493 824888 +12.77%
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 793918 887311 +11.76%
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 811842 905204 +11.50%
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 832244 925081 +11.16%
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 921553 1019162 +10.59%
benchmark old bytes new bytes delta
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 40532648 35698276 -11.93%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 60340216 53409568 -11.49%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 81087336 72065552 -11.13%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 142485576 120878544 -15.16%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 208661368 203831136 -2.31%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 347345904 340484696 -1.98%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 585185856 576244648 -1.53%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 1357641792 1358966528 +0.10%
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 126486664 119666744 -5.39%
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 122323192 115117224 -5.89%
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 126404504 119469864 -5.49%
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 119047832 112230408 -5.73%
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 136576016 116634800 -14.60%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-12-17 11:49:54 -08:00
|
|
|
func (w *Writer) startSymbols() error {
|
|
|
|
// We are at w.toc.Symbols.
|
|
|
|
// Leave 4 bytes of space for the length, and another 4 for the number of symbols
|
|
|
|
// which will both be calculated later.
|
|
|
|
return w.write([]byte("alenblen"))
|
|
|
|
}
|
|
|
|
|
|
|
|
func (w *Writer) AddSymbol(sym string) error {
|
2017-08-05 04:31:48 -07:00
|
|
|
if err := w.ensureStage(idxStageSymbols); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
Stream symbols during compaction. (#6468)
Rather than buffer up symbols in RAM, do it one by one
during compaction. Then use the reader's symbol handling
for symbol lookups during the rest of the index write.
There is some slowdown in compaction, due to having to look through a file
rather than a hash lookup. This is noise to the overall cost of compacting
series with thousands of samples though.
benchmark old ns/op new ns/op delta
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 539917175 675341565 +25.08%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 2441815993 2477453524 +1.46%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 3978543559 3922909687 -1.40%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 8430219716 8586610007 +1.86%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 1786424591 1909552782 +6.89%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 5328998202 6020839950 +12.98%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 10085059958 11085278690 +9.92%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 25497010155 27018079806 +5.97%
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 2427391406 2817217987 +16.06%
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 2592965497 2538805050 -2.09%
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 2437388343 2668012858 +9.46%
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 2317095324 2787423966 +20.30%
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 2600239857 2096973860 -19.35%
benchmark old allocs new allocs delta
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 500851 470794 -6.00%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 821527 791451 -3.66%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 1141562 1111508 -2.63%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 2141576 2111504 -1.40%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 871466 841424 -3.45%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 1941428 1911415 -1.55%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 3071573 3041510 -0.98%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 6771648 6741509 -0.45%
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 731493 824888 +12.77%
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 793918 887311 +11.76%
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 811842 905204 +11.50%
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 832244 925081 +11.16%
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 921553 1019162 +10.59%
benchmark old bytes new bytes delta
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 40532648 35698276 -11.93%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 60340216 53409568 -11.49%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 81087336 72065552 -11.13%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 142485576 120878544 -15.16%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 208661368 203831136 -2.31%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 347345904 340484696 -1.98%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 585185856 576244648 -1.53%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 1357641792 1358966528 +0.10%
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 126486664 119666744 -5.39%
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 122323192 115117224 -5.89%
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 126404504 119469864 -5.49%
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 119047832 112230408 -5.73%
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 136576016 116634800 -14.60%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-12-17 11:49:54 -08:00
|
|
|
if w.numSymbols != 0 && sym <= w.lastSymbol {
|
|
|
|
return errors.Errorf("symbol %q out-of-order", sym)
|
2016-12-09 12:23:34 -08:00
|
|
|
}
|
Stream symbols during compaction. (#6468)
Rather than buffer up symbols in RAM, do it one by one
during compaction. Then use the reader's symbol handling
for symbol lookups during the rest of the index write.
There is some slowdown in compaction, due to having to look through a file
rather than a hash lookup. This is noise to the overall cost of compacting
series with thousands of samples though.
benchmark old ns/op new ns/op delta
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 539917175 675341565 +25.08%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 2441815993 2477453524 +1.46%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 3978543559 3922909687 -1.40%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 8430219716 8586610007 +1.86%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 1786424591 1909552782 +6.89%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 5328998202 6020839950 +12.98%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 10085059958 11085278690 +9.92%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 25497010155 27018079806 +5.97%
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 2427391406 2817217987 +16.06%
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 2592965497 2538805050 -2.09%
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 2437388343 2668012858 +9.46%
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 2317095324 2787423966 +20.30%
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 2600239857 2096973860 -19.35%
benchmark old allocs new allocs delta
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 500851 470794 -6.00%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 821527 791451 -3.66%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 1141562 1111508 -2.63%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 2141576 2111504 -1.40%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 871466 841424 -3.45%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 1941428 1911415 -1.55%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 3071573 3041510 -0.98%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 6771648 6741509 -0.45%
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 731493 824888 +12.77%
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 793918 887311 +11.76%
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 811842 905204 +11.50%
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 832244 925081 +11.16%
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 921553 1019162 +10.59%
benchmark old bytes new bytes delta
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 40532648 35698276 -11.93%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 60340216 53409568 -11.49%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 81087336 72065552 -11.13%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 142485576 120878544 -15.16%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 208661368 203831136 -2.31%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 347345904 340484696 -1.98%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 585185856 576244648 -1.53%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 1357641792 1358966528 +0.10%
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 126486664 119666744 -5.39%
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 122323192 115117224 -5.89%
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 126404504 119469864 -5.49%
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 119047832 112230408 -5.73%
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 136576016 116634800 -14.60%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-12-17 11:49:54 -08:00
|
|
|
w.lastSymbol = sym
|
|
|
|
w.numSymbols++
|
|
|
|
w.buf1.Reset()
|
|
|
|
w.buf1.PutUvarintStr(sym)
|
|
|
|
return w.write(w.buf1.Get())
|
|
|
|
}
|
2016-12-09 12:23:34 -08:00
|
|
|
|
Stream symbols during compaction. (#6468)
Rather than buffer up symbols in RAM, do it one by one
during compaction. Then use the reader's symbol handling
for symbol lookups during the rest of the index write.
There is some slowdown in compaction, due to having to look through a file
rather than a hash lookup. This is noise to the overall cost of compacting
series with thousands of samples though.
benchmark old ns/op new ns/op delta
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 539917175 675341565 +25.08%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 2441815993 2477453524 +1.46%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 3978543559 3922909687 -1.40%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 8430219716 8586610007 +1.86%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 1786424591 1909552782 +6.89%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 5328998202 6020839950 +12.98%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 10085059958 11085278690 +9.92%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 25497010155 27018079806 +5.97%
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 2427391406 2817217987 +16.06%
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 2592965497 2538805050 -2.09%
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 2437388343 2668012858 +9.46%
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 2317095324 2787423966 +20.30%
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 2600239857 2096973860 -19.35%
benchmark old allocs new allocs delta
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 500851 470794 -6.00%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 821527 791451 -3.66%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 1141562 1111508 -2.63%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 2141576 2111504 -1.40%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 871466 841424 -3.45%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 1941428 1911415 -1.55%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 3071573 3041510 -0.98%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 6771648 6741509 -0.45%
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 731493 824888 +12.77%
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 793918 887311 +11.76%
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 811842 905204 +11.50%
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 832244 925081 +11.16%
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 921553 1019162 +10.59%
benchmark old bytes new bytes delta
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 40532648 35698276 -11.93%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 60340216 53409568 -11.49%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 81087336 72065552 -11.13%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 142485576 120878544 -15.16%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 208661368 203831136 -2.31%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 347345904 340484696 -1.98%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 585185856 576244648 -1.53%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 1357641792 1358966528 +0.10%
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 126486664 119666744 -5.39%
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 122323192 115117224 -5.89%
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 126404504 119469864 -5.49%
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 119047832 112230408 -5.73%
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 136576016 116634800 -14.60%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-12-17 11:49:54 -08:00
|
|
|
func (w *Writer) finishSymbols() error {
|
2021-07-20 03:21:36 -07:00
|
|
|
symbolTableSize := w.f.pos - w.toc.Symbols - 4
|
|
|
|
// The symbol table's <len> part is 4 bytes. So the total symbol table size must be less than or equal to 2^32-1
|
2021-11-11 02:14:28 -08:00
|
|
|
if symbolTableSize > math.MaxUint32 {
|
2022-05-25 01:40:36 -07:00
|
|
|
return errors.Errorf("symbol table size exceeds %d bytes: %d", uint32(math.MaxUint32), symbolTableSize)
|
2021-07-20 03:21:36 -07:00
|
|
|
}
|
|
|
|
|
Stream symbols during compaction. (#6468)
Rather than buffer up symbols in RAM, do it one by one
during compaction. Then use the reader's symbol handling
for symbol lookups during the rest of the index write.
There is some slowdown in compaction, due to having to look through a file
rather than a hash lookup. This is noise to the overall cost of compacting
series with thousands of samples though.
benchmark old ns/op new ns/op delta
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 539917175 675341565 +25.08%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 2441815993 2477453524 +1.46%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 3978543559 3922909687 -1.40%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 8430219716 8586610007 +1.86%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 1786424591 1909552782 +6.89%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 5328998202 6020839950 +12.98%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 10085059958 11085278690 +9.92%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 25497010155 27018079806 +5.97%
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 2427391406 2817217987 +16.06%
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 2592965497 2538805050 -2.09%
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 2437388343 2668012858 +9.46%
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 2317095324 2787423966 +20.30%
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 2600239857 2096973860 -19.35%
benchmark old allocs new allocs delta
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 500851 470794 -6.00%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 821527 791451 -3.66%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 1141562 1111508 -2.63%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 2141576 2111504 -1.40%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 871466 841424 -3.45%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 1941428 1911415 -1.55%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 3071573 3041510 -0.98%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 6771648 6741509 -0.45%
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 731493 824888 +12.77%
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 793918 887311 +11.76%
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 811842 905204 +11.50%
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 832244 925081 +11.16%
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 921553 1019162 +10.59%
benchmark old bytes new bytes delta
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 40532648 35698276 -11.93%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 60340216 53409568 -11.49%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 81087336 72065552 -11.13%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 142485576 120878544 -15.16%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 208661368 203831136 -2.31%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 347345904 340484696 -1.98%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 585185856 576244648 -1.53%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 1357641792 1358966528 +0.10%
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 126486664 119666744 -5.39%
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 122323192 115117224 -5.89%
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 126404504 119469864 -5.49%
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 119047832 112230408 -5.73%
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 136576016 116634800 -14.60%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-12-17 11:49:54 -08:00
|
|
|
// Write out the length and symbol count.
|
|
|
|
w.buf1.Reset()
|
2021-07-20 03:21:36 -07:00
|
|
|
w.buf1.PutBE32int(int(symbolTableSize))
|
Stream symbols during compaction. (#6468)
Rather than buffer up symbols in RAM, do it one by one
during compaction. Then use the reader's symbol handling
for symbol lookups during the rest of the index write.
There is some slowdown in compaction, due to having to look through a file
rather than a hash lookup. This is noise to the overall cost of compacting
series with thousands of samples though.
benchmark old ns/op new ns/op delta
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 539917175 675341565 +25.08%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 2441815993 2477453524 +1.46%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 3978543559 3922909687 -1.40%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 8430219716 8586610007 +1.86%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 1786424591 1909552782 +6.89%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 5328998202 6020839950 +12.98%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 10085059958 11085278690 +9.92%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 25497010155 27018079806 +5.97%
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 2427391406 2817217987 +16.06%
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 2592965497 2538805050 -2.09%
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 2437388343 2668012858 +9.46%
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 2317095324 2787423966 +20.30%
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 2600239857 2096973860 -19.35%
benchmark old allocs new allocs delta
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 500851 470794 -6.00%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 821527 791451 -3.66%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 1141562 1111508 -2.63%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 2141576 2111504 -1.40%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 871466 841424 -3.45%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 1941428 1911415 -1.55%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 3071573 3041510 -0.98%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 6771648 6741509 -0.45%
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 731493 824888 +12.77%
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 793918 887311 +11.76%
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 811842 905204 +11.50%
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 832244 925081 +11.16%
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 921553 1019162 +10.59%
benchmark old bytes new bytes delta
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 40532648 35698276 -11.93%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 60340216 53409568 -11.49%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 81087336 72065552 -11.13%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 142485576 120878544 -15.16%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 208661368 203831136 -2.31%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 347345904 340484696 -1.98%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 585185856 576244648 -1.53%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 1357641792 1358966528 +0.10%
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 126486664 119666744 -5.39%
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 122323192 115117224 -5.89%
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 126404504 119469864 -5.49%
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 119047832 112230408 -5.73%
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 136576016 116634800 -14.60%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-12-17 11:49:54 -08:00
|
|
|
w.buf1.PutBE32int(int(w.numSymbols))
|
|
|
|
if err := w.writeAt(w.buf1.Get(), w.toc.Symbols); err != nil {
|
2019-12-11 04:49:13 -08:00
|
|
|
return err
|
|
|
|
}
|
2016-12-09 12:23:34 -08:00
|
|
|
|
2019-12-17 13:54:13 -08:00
|
|
|
hashPos := w.f.pos
|
Stream symbols during compaction. (#6468)
Rather than buffer up symbols in RAM, do it one by one
during compaction. Then use the reader's symbol handling
for symbol lookups during the rest of the index write.
There is some slowdown in compaction, due to having to look through a file
rather than a hash lookup. This is noise to the overall cost of compacting
series with thousands of samples though.
benchmark old ns/op new ns/op delta
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 539917175 675341565 +25.08%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 2441815993 2477453524 +1.46%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 3978543559 3922909687 -1.40%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 8430219716 8586610007 +1.86%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 1786424591 1909552782 +6.89%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 5328998202 6020839950 +12.98%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 10085059958 11085278690 +9.92%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 25497010155 27018079806 +5.97%
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 2427391406 2817217987 +16.06%
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 2592965497 2538805050 -2.09%
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 2437388343 2668012858 +9.46%
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 2317095324 2787423966 +20.30%
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 2600239857 2096973860 -19.35%
benchmark old allocs new allocs delta
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 500851 470794 -6.00%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 821527 791451 -3.66%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 1141562 1111508 -2.63%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 2141576 2111504 -1.40%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 871466 841424 -3.45%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 1941428 1911415 -1.55%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 3071573 3041510 -0.98%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 6771648 6741509 -0.45%
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 731493 824888 +12.77%
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 793918 887311 +11.76%
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 811842 905204 +11.50%
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 832244 925081 +11.16%
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 921553 1019162 +10.59%
benchmark old bytes new bytes delta
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 40532648 35698276 -11.93%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 60340216 53409568 -11.49%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 81087336 72065552 -11.13%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 142485576 120878544 -15.16%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 208661368 203831136 -2.31%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 347345904 340484696 -1.98%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 585185856 576244648 -1.53%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 1357641792 1358966528 +0.10%
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 126486664 119666744 -5.39%
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 122323192 115117224 -5.89%
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 126404504 119469864 -5.49%
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 119047832 112230408 -5.73%
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 136576016 116634800 -14.60%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-12-17 11:49:54 -08:00
|
|
|
// Leave space for the hash. We can only calculate it
|
|
|
|
// now that the number of symbols is known, so mmap and do it from there.
|
|
|
|
if err := w.write([]byte("hash")); err != nil {
|
2019-12-11 04:49:13 -08:00
|
|
|
return err
|
|
|
|
}
|
2020-01-09 03:28:10 -08:00
|
|
|
if err := w.f.Flush(); err != nil {
|
Stream symbols during compaction. (#6468)
Rather than buffer up symbols in RAM, do it one by one
during compaction. Then use the reader's symbol handling
for symbol lookups during the rest of the index write.
There is some slowdown in compaction, due to having to look through a file
rather than a hash lookup. This is noise to the overall cost of compacting
series with thousands of samples though.
benchmark old ns/op new ns/op delta
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 539917175 675341565 +25.08%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 2441815993 2477453524 +1.46%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 3978543559 3922909687 -1.40%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 8430219716 8586610007 +1.86%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 1786424591 1909552782 +6.89%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 5328998202 6020839950 +12.98%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 10085059958 11085278690 +9.92%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 25497010155 27018079806 +5.97%
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 2427391406 2817217987 +16.06%
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 2592965497 2538805050 -2.09%
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 2437388343 2668012858 +9.46%
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 2317095324 2787423966 +20.30%
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 2600239857 2096973860 -19.35%
benchmark old allocs new allocs delta
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 500851 470794 -6.00%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 821527 791451 -3.66%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 1141562 1111508 -2.63%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 2141576 2111504 -1.40%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 871466 841424 -3.45%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 1941428 1911415 -1.55%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 3071573 3041510 -0.98%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 6771648 6741509 -0.45%
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 731493 824888 +12.77%
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 793918 887311 +11.76%
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 811842 905204 +11.50%
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 832244 925081 +11.16%
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 921553 1019162 +10.59%
benchmark old bytes new bytes delta
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 40532648 35698276 -11.93%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 60340216 53409568 -11.49%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 81087336 72065552 -11.13%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 142485576 120878544 -15.16%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 208661368 203831136 -2.31%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 347345904 340484696 -1.98%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 585185856 576244648 -1.53%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 1357641792 1358966528 +0.10%
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 126486664 119666744 -5.39%
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 122323192 115117224 -5.89%
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 126404504 119469864 -5.49%
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 119047832 112230408 -5.73%
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 136576016 116634800 -14.60%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-12-17 11:49:54 -08:00
|
|
|
return err
|
2016-12-09 13:12:16 -08:00
|
|
|
}
|
2016-12-09 12:23:34 -08:00
|
|
|
|
2020-01-04 06:55:02 -08:00
|
|
|
sf, err := fileutil.OpenMmapFile(w.f.name)
|
Stream symbols during compaction. (#6468)
Rather than buffer up symbols in RAM, do it one by one
during compaction. Then use the reader's symbol handling
for symbol lookups during the rest of the index write.
There is some slowdown in compaction, due to having to look through a file
rather than a hash lookup. This is noise to the overall cost of compacting
series with thousands of samples though.
benchmark old ns/op new ns/op delta
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 539917175 675341565 +25.08%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 2441815993 2477453524 +1.46%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 3978543559 3922909687 -1.40%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 8430219716 8586610007 +1.86%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 1786424591 1909552782 +6.89%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 5328998202 6020839950 +12.98%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 10085059958 11085278690 +9.92%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 25497010155 27018079806 +5.97%
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 2427391406 2817217987 +16.06%
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 2592965497 2538805050 -2.09%
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 2437388343 2668012858 +9.46%
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 2317095324 2787423966 +20.30%
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 2600239857 2096973860 -19.35%
benchmark old allocs new allocs delta
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 500851 470794 -6.00%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 821527 791451 -3.66%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 1141562 1111508 -2.63%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 2141576 2111504 -1.40%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 871466 841424 -3.45%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 1941428 1911415 -1.55%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 3071573 3041510 -0.98%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 6771648 6741509 -0.45%
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 731493 824888 +12.77%
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 793918 887311 +11.76%
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 811842 905204 +11.50%
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 832244 925081 +11.16%
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 921553 1019162 +10.59%
benchmark old bytes new bytes delta
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 40532648 35698276 -11.93%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 60340216 53409568 -11.49%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 81087336 72065552 -11.13%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 142485576 120878544 -15.16%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 208661368 203831136 -2.31%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 347345904 340484696 -1.98%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 585185856 576244648 -1.53%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 1357641792 1358966528 +0.10%
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 126486664 119666744 -5.39%
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 122323192 115117224 -5.89%
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 126404504 119469864 -5.49%
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 119047832 112230408 -5.73%
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 136576016 116634800 -14.60%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-12-17 11:49:54 -08:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2020-01-04 06:55:02 -08:00
|
|
|
w.symbolFile = sf
|
Stream symbols during compaction. (#6468)
Rather than buffer up symbols in RAM, do it one by one
during compaction. Then use the reader's symbol handling
for symbol lookups during the rest of the index write.
There is some slowdown in compaction, due to having to look through a file
rather than a hash lookup. This is noise to the overall cost of compacting
series with thousands of samples though.
benchmark old ns/op new ns/op delta
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 539917175 675341565 +25.08%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 2441815993 2477453524 +1.46%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 3978543559 3922909687 -1.40%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 8430219716 8586610007 +1.86%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 1786424591 1909552782 +6.89%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 5328998202 6020839950 +12.98%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 10085059958 11085278690 +9.92%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 25497010155 27018079806 +5.97%
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 2427391406 2817217987 +16.06%
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 2592965497 2538805050 -2.09%
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 2437388343 2668012858 +9.46%
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 2317095324 2787423966 +20.30%
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 2600239857 2096973860 -19.35%
benchmark old allocs new allocs delta
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 500851 470794 -6.00%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 821527 791451 -3.66%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 1141562 1111508 -2.63%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 2141576 2111504 -1.40%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 871466 841424 -3.45%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 1941428 1911415 -1.55%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 3071573 3041510 -0.98%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 6771648 6741509 -0.45%
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 731493 824888 +12.77%
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 793918 887311 +11.76%
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 811842 905204 +11.50%
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 832244 925081 +11.16%
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 921553 1019162 +10.59%
benchmark old bytes new bytes delta
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 40532648 35698276 -11.93%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 60340216 53409568 -11.49%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 81087336 72065552 -11.13%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 142485576 120878544 -15.16%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 208661368 203831136 -2.31%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 347345904 340484696 -1.98%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 585185856 576244648 -1.53%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 1357641792 1358966528 +0.10%
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 126486664 119666744 -5.39%
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 122323192 115117224 -5.89%
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 126404504 119469864 -5.49%
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 119047832 112230408 -5.73%
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 136576016 116634800 -14.60%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-12-17 11:49:54 -08:00
|
|
|
hash := crc32.Checksum(w.symbolFile.Bytes()[w.toc.Symbols+4:hashPos], castagnoliTable)
|
2019-12-11 04:49:13 -08:00
|
|
|
w.buf1.Reset()
|
Stream symbols during compaction. (#6468)
Rather than buffer up symbols in RAM, do it one by one
during compaction. Then use the reader's symbol handling
for symbol lookups during the rest of the index write.
There is some slowdown in compaction, due to having to look through a file
rather than a hash lookup. This is noise to the overall cost of compacting
series with thousands of samples though.
benchmark old ns/op new ns/op delta
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 539917175 675341565 +25.08%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 2441815993 2477453524 +1.46%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 3978543559 3922909687 -1.40%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 8430219716 8586610007 +1.86%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 1786424591 1909552782 +6.89%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 5328998202 6020839950 +12.98%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 10085059958 11085278690 +9.92%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 25497010155 27018079806 +5.97%
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 2427391406 2817217987 +16.06%
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 2592965497 2538805050 -2.09%
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 2437388343 2668012858 +9.46%
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 2317095324 2787423966 +20.30%
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 2600239857 2096973860 -19.35%
benchmark old allocs new allocs delta
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 500851 470794 -6.00%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 821527 791451 -3.66%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 1141562 1111508 -2.63%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 2141576 2111504 -1.40%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 871466 841424 -3.45%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 1941428 1911415 -1.55%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 3071573 3041510 -0.98%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 6771648 6741509 -0.45%
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 731493 824888 +12.77%
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 793918 887311 +11.76%
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 811842 905204 +11.50%
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 832244 925081 +11.16%
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 921553 1019162 +10.59%
benchmark old bytes new bytes delta
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 40532648 35698276 -11.93%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 60340216 53409568 -11.49%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 81087336 72065552 -11.13%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 142485576 120878544 -15.16%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 208661368 203831136 -2.31%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 347345904 340484696 -1.98%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 585185856 576244648 -1.53%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 1357641792 1358966528 +0.10%
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 126486664 119666744 -5.39%
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 122323192 115117224 -5.89%
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 126404504 119469864 -5.49%
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 119047832 112230408 -5.73%
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 136576016 116634800 -14.60%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-12-17 11:49:54 -08:00
|
|
|
w.buf1.PutBE32(hash)
|
|
|
|
if err := w.writeAt(w.buf1.Get(), hashPos); err != nil {
|
2019-12-11 04:49:13 -08:00
|
|
|
return err
|
|
|
|
}
|
2017-04-25 07:45:44 -07:00
|
|
|
|
Stream symbols during compaction. (#6468)
Rather than buffer up symbols in RAM, do it one by one
during compaction. Then use the reader's symbol handling
for symbol lookups during the rest of the index write.
There is some slowdown in compaction, due to having to look through a file
rather than a hash lookup. This is noise to the overall cost of compacting
series with thousands of samples though.
benchmark old ns/op new ns/op delta
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 539917175 675341565 +25.08%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 2441815993 2477453524 +1.46%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 3978543559 3922909687 -1.40%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 8430219716 8586610007 +1.86%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 1786424591 1909552782 +6.89%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 5328998202 6020839950 +12.98%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 10085059958 11085278690 +9.92%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 25497010155 27018079806 +5.97%
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 2427391406 2817217987 +16.06%
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 2592965497 2538805050 -2.09%
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 2437388343 2668012858 +9.46%
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 2317095324 2787423966 +20.30%
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 2600239857 2096973860 -19.35%
benchmark old allocs new allocs delta
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 500851 470794 -6.00%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 821527 791451 -3.66%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 1141562 1111508 -2.63%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 2141576 2111504 -1.40%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 871466 841424 -3.45%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 1941428 1911415 -1.55%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 3071573 3041510 -0.98%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 6771648 6741509 -0.45%
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 731493 824888 +12.77%
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 793918 887311 +11.76%
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 811842 905204 +11.50%
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 832244 925081 +11.16%
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 921553 1019162 +10.59%
benchmark old bytes new bytes delta
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 40532648 35698276 -11.93%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 60340216 53409568 -11.49%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 81087336 72065552 -11.13%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 142485576 120878544 -15.16%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 208661368 203831136 -2.31%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 347345904 340484696 -1.98%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 585185856 576244648 -1.53%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 1357641792 1358966528 +0.10%
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 126486664 119666744 -5.39%
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 122323192 115117224 -5.89%
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 126404504 119469864 -5.49%
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 119047832 112230408 -5.73%
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 136576016 116634800 -14.60%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-12-17 11:49:54 -08:00
|
|
|
// Load in the symbol table efficiently for the rest of the index writing.
|
|
|
|
w.symbols, err = NewSymbols(realByteSlice(w.symbolFile.Bytes()), FormatV2, int(w.toc.Symbols))
|
|
|
|
if err != nil {
|
|
|
|
return errors.Wrap(err, "read symbols")
|
|
|
|
}
|
|
|
|
return nil
|
2016-12-09 13:27:43 -08:00
|
|
|
}
|
|
|
|
|
2019-12-17 14:15:35 -08:00
|
|
|
func (w *Writer) writeLabelIndices() error {
|
2020-01-09 03:28:10 -08:00
|
|
|
if err := w.fPO.Flush(); err != nil {
|
Write label indices based on the posting offset table.
This avoids having to build it up in RAM, and means that all variable
memory usage for compactions is now 0.25 bytes per symbol plus a few
O(labelnames) structures. So in practice, pretty close to constant
memory for compactions.
benchmark old ns/op new ns/op delta
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 662974828 667162981 +0.63%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 2459590377 2131168138 -13.35%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 3808280548 3919290378 +2.91%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 8513884311 8738099339 +2.63%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 1898843003 1944131966 +2.39%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 5601478437 6031391658 +7.67%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 11225096097 11359624463 +1.20%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 23994637282 23919583343 -0.31%
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 891042098 826898358 -7.20%
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 915949138 902555676 -1.46%
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 955138431 879067946 -7.96%
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 991447640 958785968 -3.29%
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 1068729356 980249080 -8.28%
benchmark old allocs new allocs delta
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 470778 470556 -0.05%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 791429 791225 -0.03%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 1111514 1111257 -0.02%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 2111498 2111369 -0.01%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 841433 841220 -0.03%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 1911469 1911202 -0.01%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 3041558 3041328 -0.01%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 6741534 6741382 -0.00%
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 824856 820873 -0.48%
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 887220 885180 -0.23%
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 905253 901539 -0.41%
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 925148 913632 -1.24%
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 1019141 978727 -3.97%
benchmark old bytes new bytes delta
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 35694744 41523836 +16.33%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 53405264 59499056 +11.41%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 74160320 78151568 +5.38%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 120878480 135364672 +11.98%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 203832448 209925504 +2.99%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 341029208 346551064 +1.62%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 580217176 582345224 +0.37%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 1356872288 1363495368 +0.49%
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 119535672 94815920 -20.68%
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 115352280 95980776 -16.79%
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 119472320 98724460 -17.37%
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 111979312 94325456 -15.77%
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 116628584 98566344 -15.49%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-12-17 17:29:41 -08:00
|
|
|
return err
|
2017-01-19 05:01:38 -08:00
|
|
|
}
|
|
|
|
|
Write label indices based on the posting offset table.
This avoids having to build it up in RAM, and means that all variable
memory usage for compactions is now 0.25 bytes per symbol plus a few
O(labelnames) structures. So in practice, pretty close to constant
memory for compactions.
benchmark old ns/op new ns/op delta
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 662974828 667162981 +0.63%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 2459590377 2131168138 -13.35%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 3808280548 3919290378 +2.91%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 8513884311 8738099339 +2.63%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 1898843003 1944131966 +2.39%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 5601478437 6031391658 +7.67%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 11225096097 11359624463 +1.20%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 23994637282 23919583343 -0.31%
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 891042098 826898358 -7.20%
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 915949138 902555676 -1.46%
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 955138431 879067946 -7.96%
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 991447640 958785968 -3.29%
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 1068729356 980249080 -8.28%
benchmark old allocs new allocs delta
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 470778 470556 -0.05%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 791429 791225 -0.03%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 1111514 1111257 -0.02%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 2111498 2111369 -0.01%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 841433 841220 -0.03%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 1911469 1911202 -0.01%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 3041558 3041328 -0.01%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 6741534 6741382 -0.00%
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 824856 820873 -0.48%
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 887220 885180 -0.23%
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 905253 901539 -0.41%
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 925148 913632 -1.24%
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 1019141 978727 -3.97%
benchmark old bytes new bytes delta
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 35694744 41523836 +16.33%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 53405264 59499056 +11.41%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 74160320 78151568 +5.38%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 120878480 135364672 +11.98%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 203832448 209925504 +2.99%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 341029208 346551064 +1.62%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 580217176 582345224 +0.37%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 1356872288 1363495368 +0.49%
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 119535672 94815920 -20.68%
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 115352280 95980776 -16.79%
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 119472320 98724460 -17.37%
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 111979312 94325456 -15.77%
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 116628584 98566344 -15.49%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-12-17 17:29:41 -08:00
|
|
|
// Find all the label values in the tmp posting offset table.
|
|
|
|
f, err := fileutil.OpenMmapFile(w.fPO.name)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
defer f.Close()
|
|
|
|
|
|
|
|
d := encoding.NewDecbufRaw(realByteSlice(f.Bytes()), int(w.fPO.pos))
|
|
|
|
cnt := w.cntPO
|
|
|
|
current := []byte{}
|
|
|
|
values := []uint32{}
|
|
|
|
for d.Err() == nil && cnt > 0 {
|
|
|
|
cnt--
|
|
|
|
d.Uvarint() // Keycount.
|
|
|
|
name := d.UvarintBytes() // Label name.
|
|
|
|
value := yoloString(d.UvarintBytes()) // Label value.
|
|
|
|
d.Uvarint64() // Offset.
|
|
|
|
if len(name) == 0 {
|
|
|
|
continue // All index is ignored.
|
|
|
|
}
|
|
|
|
|
|
|
|
if !bytes.Equal(name, current) && len(values) > 0 {
|
|
|
|
// We've reached a new label name.
|
|
|
|
if err := w.writeLabelIndex(string(current), values); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
values = values[:0]
|
2019-12-17 14:15:35 -08:00
|
|
|
}
|
Write label indices based on the posting offset table.
This avoids having to build it up in RAM, and means that all variable
memory usage for compactions is now 0.25 bytes per symbol plus a few
O(labelnames) structures. So in practice, pretty close to constant
memory for compactions.
benchmark old ns/op new ns/op delta
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 662974828 667162981 +0.63%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 2459590377 2131168138 -13.35%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 3808280548 3919290378 +2.91%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 8513884311 8738099339 +2.63%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 1898843003 1944131966 +2.39%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 5601478437 6031391658 +7.67%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 11225096097 11359624463 +1.20%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 23994637282 23919583343 -0.31%
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 891042098 826898358 -7.20%
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 915949138 902555676 -1.46%
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 955138431 879067946 -7.96%
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 991447640 958785968 -3.29%
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 1068729356 980249080 -8.28%
benchmark old allocs new allocs delta
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 470778 470556 -0.05%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 791429 791225 -0.03%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 1111514 1111257 -0.02%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 2111498 2111369 -0.01%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 841433 841220 -0.03%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 1911469 1911202 -0.01%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 3041558 3041328 -0.01%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 6741534 6741382 -0.00%
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 824856 820873 -0.48%
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 887220 885180 -0.23%
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 905253 901539 -0.41%
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 925148 913632 -1.24%
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 1019141 978727 -3.97%
benchmark old bytes new bytes delta
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 35694744 41523836 +16.33%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 53405264 59499056 +11.41%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 74160320 78151568 +5.38%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 120878480 135364672 +11.98%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 203832448 209925504 +2.99%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 341029208 346551064 +1.62%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 580217176 582345224 +0.37%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 1356872288 1363495368 +0.49%
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 119535672 94815920 -20.68%
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 115352280 95980776 -16.79%
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 119472320 98724460 -17.37%
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 111979312 94325456 -15.77%
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 116628584 98566344 -15.49%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-12-17 17:29:41 -08:00
|
|
|
current = name
|
|
|
|
sid, err := w.symbols.ReverseLookup(value)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
values = append(values, sid)
|
|
|
|
}
|
|
|
|
if d.Err() != nil {
|
|
|
|
return d.Err()
|
|
|
|
}
|
|
|
|
|
|
|
|
// Handle the last label.
|
|
|
|
if len(values) > 0 {
|
|
|
|
if err := w.writeLabelIndex(string(current), values); err != nil {
|
2019-12-17 14:15:35 -08:00
|
|
|
return err
|
|
|
|
}
|
2016-12-09 12:40:38 -08:00
|
|
|
}
|
2019-12-17 14:15:35 -08:00
|
|
|
return nil
|
|
|
|
}
|
2016-12-09 12:40:38 -08:00
|
|
|
|
2019-12-17 14:15:35 -08:00
|
|
|
func (w *Writer) writeLabelIndex(name string, values []uint32) error {
|
2017-04-28 05:28:25 -07:00
|
|
|
// Align beginning to 4 bytes for more efficient index list scans.
|
2017-11-30 06:34:49 -08:00
|
|
|
if err := w.addPadding(4); err != nil {
|
2017-04-28 05:28:25 -07:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2019-07-09 23:31:44 -07:00
|
|
|
w.labelIndexes = append(w.labelIndexes, labelIndexHashEntry{
|
2019-12-17 14:15:35 -08:00
|
|
|
keys: []string{name},
|
2019-12-17 13:54:13 -08:00
|
|
|
offset: w.f.pos,
|
2016-12-09 13:12:16 -08:00
|
|
|
})
|
|
|
|
|
2019-12-17 13:54:13 -08:00
|
|
|
startPos := w.f.pos
|
2019-12-11 04:49:13 -08:00
|
|
|
// Leave 4 bytes of space for the length, which will be calculated later.
|
|
|
|
if err := w.write([]byte("alen")); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
w.crc32.Reset()
|
|
|
|
|
|
|
|
w.buf1.Reset()
|
2019-12-17 14:15:35 -08:00
|
|
|
w.buf1.PutBE32int(1) // Number of names.
|
|
|
|
w.buf1.PutBE32int(len(values))
|
2019-12-11 04:49:13 -08:00
|
|
|
w.buf1.WriteToHash(w.crc32)
|
|
|
|
if err := w.write(w.buf1.Get()); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2016-12-09 12:40:38 -08:00
|
|
|
|
2019-12-17 14:15:35 -08:00
|
|
|
for _, v := range values {
|
2019-12-11 04:49:13 -08:00
|
|
|
w.buf1.Reset()
|
2019-12-17 14:15:35 -08:00
|
|
|
w.buf1.PutBE32(v)
|
2019-12-11 04:49:13 -08:00
|
|
|
w.buf1.WriteToHash(w.crc32)
|
|
|
|
if err := w.write(w.buf1.Get()); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2017-04-25 07:45:44 -07:00
|
|
|
}
|
|
|
|
|
2019-12-11 04:49:13 -08:00
|
|
|
// Write out the length.
|
2019-02-22 09:11:11 -08:00
|
|
|
w.buf1.Reset()
|
2021-11-11 02:14:28 -08:00
|
|
|
l := w.f.pos - startPos - 4
|
|
|
|
if l > math.MaxUint32 {
|
|
|
|
return errors.Errorf("label index size exceeds 4 bytes: %d", l)
|
|
|
|
}
|
|
|
|
w.buf1.PutBE32int(int(l))
|
2019-12-11 04:49:13 -08:00
|
|
|
if err := w.writeAt(w.buf1.Get(), startPos); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2017-04-25 10:01:25 -07:00
|
|
|
|
2019-12-11 04:49:13 -08:00
|
|
|
w.buf1.Reset()
|
|
|
|
w.buf1.PutHashSum(w.crc32)
|
|
|
|
return w.write(w.buf1.Get())
|
2016-12-09 11:45:46 -08:00
|
|
|
}
|
|
|
|
|
2019-07-09 23:31:44 -07:00
|
|
|
// writeLabelIndexesOffsetTable writes the label indices offset table.
|
|
|
|
func (w *Writer) writeLabelIndexesOffsetTable() error {
|
2019-12-17 13:54:13 -08:00
|
|
|
startPos := w.f.pos
|
2019-12-11 04:49:13 -08:00
|
|
|
// Leave 4 bytes of space for the length, which will be calculated later.
|
|
|
|
if err := w.write([]byte("alen")); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
w.crc32.Reset()
|
|
|
|
|
|
|
|
w.buf1.Reset()
|
|
|
|
w.buf1.PutBE32int(len(w.labelIndexes))
|
|
|
|
w.buf1.WriteToHash(w.crc32)
|
|
|
|
if err := w.write(w.buf1.Get()); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2017-04-26 09:01:13 -07:00
|
|
|
|
2019-07-09 23:31:44 -07:00
|
|
|
for _, e := range w.labelIndexes {
|
2019-12-11 04:49:13 -08:00
|
|
|
w.buf1.Reset()
|
|
|
|
w.buf1.PutUvarint(len(e.keys))
|
2017-04-26 09:01:13 -07:00
|
|
|
for _, k := range e.keys {
|
2019-12-11 04:49:13 -08:00
|
|
|
w.buf1.PutUvarintStr(k)
|
|
|
|
}
|
|
|
|
w.buf1.PutUvarint64(e.offset)
|
|
|
|
w.buf1.WriteToHash(w.crc32)
|
|
|
|
if err := w.write(w.buf1.Get()); err != nil {
|
|
|
|
return err
|
2017-04-26 09:01:13 -07:00
|
|
|
}
|
|
|
|
}
|
2019-12-11 04:49:13 -08:00
|
|
|
// Write out the length.
|
2019-02-22 09:11:11 -08:00
|
|
|
w.buf1.Reset()
|
2021-11-11 02:14:28 -08:00
|
|
|
l := w.f.pos - startPos - 4
|
|
|
|
if l > math.MaxUint32 {
|
|
|
|
return errors.Errorf("label indexes offset table size exceeds 4 bytes: %d", l)
|
|
|
|
}
|
|
|
|
w.buf1.PutBE32int(int(l))
|
2019-12-11 04:49:13 -08:00
|
|
|
if err := w.writeAt(w.buf1.Get(), startPos); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2017-04-26 09:01:13 -07:00
|
|
|
|
2019-12-11 04:49:13 -08:00
|
|
|
w.buf1.Reset()
|
|
|
|
w.buf1.PutHashSum(w.crc32)
|
|
|
|
return w.write(w.buf1.Get())
|
2017-04-26 09:01:13 -07:00
|
|
|
}
|
|
|
|
|
2019-07-09 23:31:44 -07:00
|
|
|
// writePostingsOffsetTable writes the postings offset table.
|
|
|
|
func (w *Writer) writePostingsOffsetTable() error {
|
2019-12-17 13:16:56 -08:00
|
|
|
// Ensure everything is in the temporary file.
|
2020-01-09 03:28:10 -08:00
|
|
|
if err := w.fPO.Flush(); err != nil {
|
2019-12-11 04:49:13 -08:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2019-12-17 16:55:29 -08:00
|
|
|
startPos := w.f.pos
|
|
|
|
// Leave 4 bytes of space for the length, which will be calculated later.
|
|
|
|
if err := w.write([]byte("alen")); err != nil {
|
2019-12-17 13:16:56 -08:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2019-12-17 16:55:29 -08:00
|
|
|
// Copy over the tmp posting offset table, however we need to
|
|
|
|
// adjust the offsets.
|
Write label indices based on the posting offset table.
This avoids having to build it up in RAM, and means that all variable
memory usage for compactions is now 0.25 bytes per symbol plus a few
O(labelnames) structures. So in practice, pretty close to constant
memory for compactions.
benchmark old ns/op new ns/op delta
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 662974828 667162981 +0.63%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 2459590377 2131168138 -13.35%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 3808280548 3919290378 +2.91%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 8513884311 8738099339 +2.63%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 1898843003 1944131966 +2.39%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 5601478437 6031391658 +7.67%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 11225096097 11359624463 +1.20%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 23994637282 23919583343 -0.31%
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 891042098 826898358 -7.20%
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 915949138 902555676 -1.46%
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 955138431 879067946 -7.96%
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 991447640 958785968 -3.29%
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 1068729356 980249080 -8.28%
benchmark old allocs new allocs delta
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 470778 470556 -0.05%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 791429 791225 -0.03%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 1111514 1111257 -0.02%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 2111498 2111369 -0.01%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 841433 841220 -0.03%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 1911469 1911202 -0.01%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 3041558 3041328 -0.01%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 6741534 6741382 -0.00%
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 824856 820873 -0.48%
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 887220 885180 -0.23%
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 905253 901539 -0.41%
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 925148 913632 -1.24%
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 1019141 978727 -3.97%
benchmark old bytes new bytes delta
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 35694744 41523836 +16.33%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 53405264 59499056 +11.41%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 74160320 78151568 +5.38%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 120878480 135364672 +11.98%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 203832448 209925504 +2.99%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 341029208 346551064 +1.62%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 580217176 582345224 +0.37%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 1356872288 1363495368 +0.49%
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 119535672 94815920 -20.68%
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 115352280 95980776 -16.79%
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 119472320 98724460 -17.37%
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 111979312 94325456 -15.77%
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 116628584 98566344 -15.49%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-12-17 17:29:41 -08:00
|
|
|
adjustment := w.postingsStart
|
2019-12-17 16:55:29 -08:00
|
|
|
|
2019-12-17 13:16:56 -08:00
|
|
|
w.buf1.Reset()
|
|
|
|
w.crc32.Reset()
|
|
|
|
w.buf1.PutBE32int(int(w.cntPO)) // Count.
|
2019-12-11 04:49:13 -08:00
|
|
|
w.buf1.WriteToHash(w.crc32)
|
|
|
|
if err := w.write(w.buf1.Get()); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2019-12-17 16:55:29 -08:00
|
|
|
|
|
|
|
f, err := fileutil.OpenMmapFile(w.fPO.name)
|
|
|
|
if err != nil {
|
2019-12-17 13:16:56 -08:00
|
|
|
return err
|
|
|
|
}
|
2020-01-04 06:55:02 -08:00
|
|
|
defer func() {
|
|
|
|
if f != nil {
|
|
|
|
f.Close()
|
|
|
|
}
|
|
|
|
}()
|
2019-12-17 16:55:29 -08:00
|
|
|
d := encoding.NewDecbufRaw(realByteSlice(f.Bytes()), int(w.fPO.pos))
|
|
|
|
cnt := w.cntPO
|
|
|
|
for d.Err() == nil && cnt > 0 {
|
|
|
|
w.buf1.Reset()
|
|
|
|
w.buf1.PutUvarint(d.Uvarint()) // Keycount.
|
|
|
|
w.buf1.PutUvarintStr(yoloString(d.UvarintBytes())) // Label name.
|
|
|
|
w.buf1.PutUvarintStr(yoloString(d.UvarintBytes())) // Label value.
|
|
|
|
w.buf1.PutUvarint64(d.Uvarint64() + adjustment) // Offset.
|
|
|
|
w.buf1.WriteToHash(w.crc32)
|
|
|
|
if err := w.write(w.buf1.Get()); err != nil {
|
2019-12-17 13:16:56 -08:00
|
|
|
return err
|
|
|
|
}
|
2019-12-17 16:55:29 -08:00
|
|
|
cnt--
|
2019-12-17 13:16:56 -08:00
|
|
|
}
|
2019-12-17 16:55:29 -08:00
|
|
|
if d.Err() != nil {
|
|
|
|
return d.Err()
|
2019-07-09 23:31:44 -07:00
|
|
|
}
|
|
|
|
|
2019-12-17 13:16:56 -08:00
|
|
|
// Cleanup temporary file.
|
2020-01-04 06:55:02 -08:00
|
|
|
if err := f.Close(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
f = nil
|
2020-01-09 03:28:10 -08:00
|
|
|
if err := w.fPO.Close(); err != nil {
|
2019-12-17 13:16:56 -08:00
|
|
|
return err
|
|
|
|
}
|
2020-01-09 03:28:10 -08:00
|
|
|
if err := w.fPO.Remove(); err != nil {
|
2019-12-11 04:49:13 -08:00
|
|
|
return err
|
|
|
|
}
|
2019-12-17 13:54:13 -08:00
|
|
|
w.fPO = nil
|
2019-07-09 23:31:44 -07:00
|
|
|
|
2019-12-17 16:55:29 -08:00
|
|
|
// Write out the length.
|
|
|
|
w.buf1.Reset()
|
2021-11-11 02:14:28 -08:00
|
|
|
l := w.f.pos - startPos - 4
|
|
|
|
if l > math.MaxUint32 {
|
|
|
|
return errors.Errorf("postings offset table size exceeds 4 bytes: %d", l)
|
|
|
|
}
|
|
|
|
w.buf1.PutBE32int(int(l))
|
2019-12-17 16:55:29 -08:00
|
|
|
if err := w.writeAt(w.buf1.Get(), startPos); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2019-12-17 13:16:56 -08:00
|
|
|
// Finally write the hash.
|
2019-12-11 04:49:13 -08:00
|
|
|
w.buf1.Reset()
|
|
|
|
w.buf1.PutHashSum(w.crc32)
|
|
|
|
return w.write(w.buf1.Get())
|
2019-07-09 23:31:44 -07:00
|
|
|
}
|
|
|
|
|
Stream symbols during compaction. (#6468)
Rather than buffer up symbols in RAM, do it one by one
during compaction. Then use the reader's symbol handling
for symbol lookups during the rest of the index write.
There is some slowdown in compaction, due to having to look through a file
rather than a hash lookup. This is noise to the overall cost of compacting
series with thousands of samples though.
benchmark old ns/op new ns/op delta
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 539917175 675341565 +25.08%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 2441815993 2477453524 +1.46%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 3978543559 3922909687 -1.40%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 8430219716 8586610007 +1.86%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 1786424591 1909552782 +6.89%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 5328998202 6020839950 +12.98%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 10085059958 11085278690 +9.92%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 25497010155 27018079806 +5.97%
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 2427391406 2817217987 +16.06%
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 2592965497 2538805050 -2.09%
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 2437388343 2668012858 +9.46%
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 2317095324 2787423966 +20.30%
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 2600239857 2096973860 -19.35%
benchmark old allocs new allocs delta
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 500851 470794 -6.00%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 821527 791451 -3.66%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 1141562 1111508 -2.63%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 2141576 2111504 -1.40%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 871466 841424 -3.45%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 1941428 1911415 -1.55%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 3071573 3041510 -0.98%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 6771648 6741509 -0.45%
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 731493 824888 +12.77%
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 793918 887311 +11.76%
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 811842 905204 +11.50%
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 832244 925081 +11.16%
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 921553 1019162 +10.59%
benchmark old bytes new bytes delta
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 40532648 35698276 -11.93%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 60340216 53409568 -11.49%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 81087336 72065552 -11.13%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 142485576 120878544 -15.16%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 208661368 203831136 -2.31%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 347345904 340484696 -1.98%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 585185856 576244648 -1.53%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 1357641792 1358966528 +0.10%
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 126486664 119666744 -5.39%
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 122323192 115117224 -5.89%
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 126404504 119469864 -5.49%
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 119047832 112230408 -5.73%
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 136576016 116634800 -14.60%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-12-17 11:49:54 -08:00
|
|
|
const indexTOCLen = 6*8 + crc32.Size
|
2017-04-26 09:01:13 -07:00
|
|
|
|
2017-11-30 06:34:49 -08:00
|
|
|
func (w *Writer) writeTOC() error {
|
2019-02-22 09:11:11 -08:00
|
|
|
w.buf1.Reset()
|
2017-04-26 09:01:13 -07:00
|
|
|
|
2019-02-22 09:11:11 -08:00
|
|
|
w.buf1.PutBE64(w.toc.Symbols)
|
|
|
|
w.buf1.PutBE64(w.toc.Series)
|
|
|
|
w.buf1.PutBE64(w.toc.LabelIndices)
|
|
|
|
w.buf1.PutBE64(w.toc.LabelIndicesTable)
|
|
|
|
w.buf1.PutBE64(w.toc.Postings)
|
|
|
|
w.buf1.PutBE64(w.toc.PostingsTable)
|
2017-04-26 09:01:13 -07:00
|
|
|
|
2019-02-22 09:11:11 -08:00
|
|
|
w.buf1.PutHash(w.crc32)
|
2017-04-26 09:01:13 -07:00
|
|
|
|
2019-02-22 09:11:11 -08:00
|
|
|
return w.write(w.buf1.Get())
|
2017-04-26 09:01:13 -07:00
|
|
|
}
|
|
|
|
|
2019-12-17 16:55:29 -08:00
|
|
|
func (w *Writer) writePostingsToTmpFiles() error {
|
2019-12-11 09:20:41 -08:00
|
|
|
names := make([]string, 0, len(w.labelNames))
|
|
|
|
for n := range w.labelNames {
|
|
|
|
names = append(names, n)
|
|
|
|
}
|
2022-09-30 07:33:56 -07:00
|
|
|
slices.Sort(names)
|
2019-12-11 09:20:41 -08:00
|
|
|
|
2020-01-09 03:28:10 -08:00
|
|
|
if err := w.f.Flush(); err != nil {
|
2019-12-11 09:20:41 -08:00
|
|
|
return err
|
|
|
|
}
|
2019-12-17 13:54:13 -08:00
|
|
|
f, err := fileutil.OpenMmapFile(w.f.name)
|
2019-12-11 09:20:41 -08:00
|
|
|
if err != nil {
|
|
|
|
return err
|
2017-02-01 22:58:54 -08:00
|
|
|
}
|
2019-12-11 09:20:41 -08:00
|
|
|
defer f.Close()
|
|
|
|
|
Avoid WriteAt for Postings.
Flushing buffers and doing a pwrite per posting is expensive
time wise, so go back to the old way for those. This doubles
our memory usage, but that's still small as it's only
~8 bytes per time series in the index. This is 30-40% faster.
benchmark old ns/op new ns/op delta
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 1101429174 724362123 -34.23%
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 1074466374 720977022 -32.90%
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 1166510282 677702636 -41.90%
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 1075013071 696855960 -35.18%
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 1231673790 829328610 -32.67%
benchmark old allocs new allocs delta
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 832571 731435 -12.15%
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 894875 793823 -11.29%
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 912931 811804 -11.08%
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 933511 832366 -10.83%
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 1022791 921554 -9.90%
benchmark old bytes new bytes delta
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 129063496 126472364 -2.01%
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 124154888 122300764 -1.49%
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 128790648 126394856 -1.86%
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 120570696 118946548 -1.35%
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 138754288 136317432 -1.76%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-12-12 05:31:45 -08:00
|
|
|
// Write out the special all posting.
|
2019-12-11 09:20:41 -08:00
|
|
|
offsets := []uint32{}
|
|
|
|
d := encoding.NewDecbufRaw(realByteSlice(f.Bytes()), int(w.toc.LabelIndices))
|
Avoid WriteAt for Postings.
Flushing buffers and doing a pwrite per posting is expensive
time wise, so go back to the old way for those. This doubles
our memory usage, but that's still small as it's only
~8 bytes per time series in the index. This is 30-40% faster.
benchmark old ns/op new ns/op delta
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 1101429174 724362123 -34.23%
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 1074466374 720977022 -32.90%
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 1166510282 677702636 -41.90%
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 1075013071 696855960 -35.18%
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 1231673790 829328610 -32.67%
benchmark old allocs new allocs delta
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 832571 731435 -12.15%
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 894875 793823 -11.29%
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 912931 811804 -11.08%
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 933511 832366 -10.83%
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 1022791 921554 -9.90%
benchmark old bytes new bytes delta
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 129063496 126472364 -2.01%
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 124154888 122300764 -1.49%
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 128790648 126394856 -1.86%
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 120570696 118946548 -1.35%
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 138754288 136317432 -1.76%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-12-12 05:31:45 -08:00
|
|
|
d.Skip(int(w.toc.Series))
|
2019-12-11 09:20:41 -08:00
|
|
|
for d.Len() > 0 {
|
Coalesce series reads where we can.
When compacting rather than doing a read of all
series in the index per label name, do many at once
but only when it won't use (much) more ram than writing the
special all index does.
original in-memory postings:
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 1 1202383447 ns/op 158936496 B/op 1031511 allocs/op
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 1 1141792706 ns/op 154453408 B/op 1093453 allocs/op
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 1 1169288829 ns/op 161072336 B/op 1110021 allocs/op
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 1 1115700103 ns/op 149480472 B/op 1129180 allocs/op
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 1 1283813141 ns/op 162937800 B/op 1202771 allocs/op
before:
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 1 1145195941 ns/op 131749984 B/op 834400 allocs/op
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 1 1233526345 ns/op 127889416 B/op 897033 allocs/op
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 1 1821942296 ns/op 131665648 B/op 914836 allocs/op
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 1 8035568665 ns/op 123811832 B/op 934312 allocs/op
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 1 71325926267 ns/op 140722648 B/op 1016824 allocs/op
after:
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 1 1101429174 ns/op 129063496 B/op 832571 allocs/op
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 1 1074466374 ns/op 124154888 B/op 894875 allocs/op
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 1 1166510282 ns/op 128790648 B/op 912931 allocs/op
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 1 1075013071 ns/op 120570696 B/op 933511 allocs/op
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 1 1231673790 ns/op 138754288 B/op 1022791 allocs/op
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-12-11 13:24:03 -08:00
|
|
|
d.ConsumePadding()
|
2019-12-11 09:20:41 -08:00
|
|
|
startPos := w.toc.LabelIndices - uint64(d.Len())
|
|
|
|
if startPos%16 != 0 {
|
|
|
|
return errors.Errorf("series not 16-byte aligned at %d", startPos)
|
|
|
|
}
|
|
|
|
offsets = append(offsets, uint32(startPos/16))
|
Stream symbols during compaction. (#6468)
Rather than buffer up symbols in RAM, do it one by one
during compaction. Then use the reader's symbol handling
for symbol lookups during the rest of the index write.
There is some slowdown in compaction, due to having to look through a file
rather than a hash lookup. This is noise to the overall cost of compacting
series with thousands of samples though.
benchmark old ns/op new ns/op delta
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 539917175 675341565 +25.08%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 2441815993 2477453524 +1.46%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 3978543559 3922909687 -1.40%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 8430219716 8586610007 +1.86%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 1786424591 1909552782 +6.89%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 5328998202 6020839950 +12.98%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 10085059958 11085278690 +9.92%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 25497010155 27018079806 +5.97%
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 2427391406 2817217987 +16.06%
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 2592965497 2538805050 -2.09%
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 2437388343 2668012858 +9.46%
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 2317095324 2787423966 +20.30%
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 2600239857 2096973860 -19.35%
benchmark old allocs new allocs delta
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 500851 470794 -6.00%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 821527 791451 -3.66%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 1141562 1111508 -2.63%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 2141576 2111504 -1.40%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 871466 841424 -3.45%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 1941428 1911415 -1.55%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 3071573 3041510 -0.98%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 6771648 6741509 -0.45%
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 731493 824888 +12.77%
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 793918 887311 +11.76%
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 811842 905204 +11.50%
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 832244 925081 +11.16%
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 921553 1019162 +10.59%
benchmark old bytes new bytes delta
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 40532648 35698276 -11.93%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 60340216 53409568 -11.49%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 81087336 72065552 -11.13%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 142485576 120878544 -15.16%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 208661368 203831136 -2.31%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 347345904 340484696 -1.98%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 585185856 576244648 -1.53%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 1357641792 1358966528 +0.10%
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 126486664 119666744 -5.39%
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 122323192 115117224 -5.89%
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 126404504 119469864 -5.49%
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 119047832 112230408 -5.73%
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 136576016 116634800 -14.60%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-12-17 11:49:54 -08:00
|
|
|
// Skip to next series.
|
2019-12-17 16:55:29 -08:00
|
|
|
x := d.Uvarint()
|
|
|
|
d.Skip(x + crc32.Size)
|
2019-12-11 09:20:41 -08:00
|
|
|
if err := d.Err(); err != nil {
|
2019-12-17 16:55:29 -08:00
|
|
|
return err
|
2019-12-11 09:20:41 -08:00
|
|
|
}
|
|
|
|
}
|
Coalesce series reads where we can.
When compacting rather than doing a read of all
series in the index per label name, do many at once
but only when it won't use (much) more ram than writing the
special all index does.
original in-memory postings:
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 1 1202383447 ns/op 158936496 B/op 1031511 allocs/op
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 1 1141792706 ns/op 154453408 B/op 1093453 allocs/op
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 1 1169288829 ns/op 161072336 B/op 1110021 allocs/op
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 1 1115700103 ns/op 149480472 B/op 1129180 allocs/op
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 1 1283813141 ns/op 162937800 B/op 1202771 allocs/op
before:
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 1 1145195941 ns/op 131749984 B/op 834400 allocs/op
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 1 1233526345 ns/op 127889416 B/op 897033 allocs/op
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 1 1821942296 ns/op 131665648 B/op 914836 allocs/op
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 1 8035568665 ns/op 123811832 B/op 934312 allocs/op
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 1 71325926267 ns/op 140722648 B/op 1016824 allocs/op
after:
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 1 1101429174 ns/op 129063496 B/op 832571 allocs/op
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 1 1074466374 ns/op 124154888 B/op 894875 allocs/op
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 1 1166510282 ns/op 128790648 B/op 912931 allocs/op
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 1 1075013071 ns/op 120570696 B/op 933511 allocs/op
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 1 1231673790 ns/op 138754288 B/op 1022791 allocs/op
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-12-11 13:24:03 -08:00
|
|
|
if err := w.writePosting("", "", offsets); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
maxPostings := uint64(len(offsets)) // No label name can have more postings than this.
|
|
|
|
|
|
|
|
for len(names) > 0 {
|
|
|
|
batchNames := []string{}
|
|
|
|
var c uint64
|
|
|
|
// Try to bunch up label names into one loop, but avoid
|
|
|
|
// using more memory than a single label name can.
|
|
|
|
for len(names) > 0 {
|
|
|
|
if w.labelNames[names[0]]+c > maxPostings {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
batchNames = append(batchNames, names[0])
|
Avoid WriteAt for Postings.
Flushing buffers and doing a pwrite per posting is expensive
time wise, so go back to the old way for those. This doubles
our memory usage, but that's still small as it's only
~8 bytes per time series in the index. This is 30-40% faster.
benchmark old ns/op new ns/op delta
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 1101429174 724362123 -34.23%
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 1074466374 720977022 -32.90%
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 1166510282 677702636 -41.90%
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 1075013071 696855960 -35.18%
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 1231673790 829328610 -32.67%
benchmark old allocs new allocs delta
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 832571 731435 -12.15%
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 894875 793823 -11.29%
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 912931 811804 -11.08%
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 933511 832366 -10.83%
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 1022791 921554 -9.90%
benchmark old bytes new bytes delta
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 129063496 126472364 -2.01%
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 124154888 122300764 -1.49%
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 128790648 126394856 -1.86%
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 120570696 118946548 -1.35%
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 138754288 136317432 -1.76%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-12-12 05:31:45 -08:00
|
|
|
c += w.labelNames[names[0]]
|
Coalesce series reads where we can.
When compacting rather than doing a read of all
series in the index per label name, do many at once
but only when it won't use (much) more ram than writing the
special all index does.
original in-memory postings:
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 1 1202383447 ns/op 158936496 B/op 1031511 allocs/op
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 1 1141792706 ns/op 154453408 B/op 1093453 allocs/op
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 1 1169288829 ns/op 161072336 B/op 1110021 allocs/op
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 1 1115700103 ns/op 149480472 B/op 1129180 allocs/op
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 1 1283813141 ns/op 162937800 B/op 1202771 allocs/op
before:
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 1 1145195941 ns/op 131749984 B/op 834400 allocs/op
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 1 1233526345 ns/op 127889416 B/op 897033 allocs/op
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 1 1821942296 ns/op 131665648 B/op 914836 allocs/op
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 1 8035568665 ns/op 123811832 B/op 934312 allocs/op
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 1 71325926267 ns/op 140722648 B/op 1016824 allocs/op
after:
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 1 1101429174 ns/op 129063496 B/op 832571 allocs/op
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 1 1074466374 ns/op 124154888 B/op 894875 allocs/op
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 1 1166510282 ns/op 128790648 B/op 912931 allocs/op
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 1 1075013071 ns/op 120570696 B/op 933511 allocs/op
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 1 1231673790 ns/op 138754288 B/op 1022791 allocs/op
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-12-11 13:24:03 -08:00
|
|
|
names = names[1:]
|
|
|
|
}
|
2019-12-11 09:20:41 -08:00
|
|
|
|
Stream symbols during compaction. (#6468)
Rather than buffer up symbols in RAM, do it one by one
during compaction. Then use the reader's symbol handling
for symbol lookups during the rest of the index write.
There is some slowdown in compaction, due to having to look through a file
rather than a hash lookup. This is noise to the overall cost of compacting
series with thousands of samples though.
benchmark old ns/op new ns/op delta
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 539917175 675341565 +25.08%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 2441815993 2477453524 +1.46%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 3978543559 3922909687 -1.40%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 8430219716 8586610007 +1.86%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 1786424591 1909552782 +6.89%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 5328998202 6020839950 +12.98%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 10085059958 11085278690 +9.92%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 25497010155 27018079806 +5.97%
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 2427391406 2817217987 +16.06%
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 2592965497 2538805050 -2.09%
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 2437388343 2668012858 +9.46%
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 2317095324 2787423966 +20.30%
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 2600239857 2096973860 -19.35%
benchmark old allocs new allocs delta
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 500851 470794 -6.00%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 821527 791451 -3.66%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 1141562 1111508 -2.63%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 2141576 2111504 -1.40%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 871466 841424 -3.45%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 1941428 1911415 -1.55%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 3071573 3041510 -0.98%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 6771648 6741509 -0.45%
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 731493 824888 +12.77%
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 793918 887311 +11.76%
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 811842 905204 +11.50%
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 832244 925081 +11.16%
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 921553 1019162 +10.59%
benchmark old bytes new bytes delta
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 40532648 35698276 -11.93%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 60340216 53409568 -11.49%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 81087336 72065552 -11.13%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 142485576 120878544 -15.16%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 208661368 203831136 -2.31%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 347345904 340484696 -1.98%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 585185856 576244648 -1.53%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 1357641792 1358966528 +0.10%
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 126486664 119666744 -5.39%
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 122323192 115117224 -5.89%
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 126404504 119469864 -5.49%
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 119047832 112230408 -5.73%
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 136576016 116634800 -14.60%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-12-17 11:49:54 -08:00
|
|
|
nameSymbols := map[uint32]string{}
|
Coalesce series reads where we can.
When compacting rather than doing a read of all
series in the index per label name, do many at once
but only when it won't use (much) more ram than writing the
special all index does.
original in-memory postings:
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 1 1202383447 ns/op 158936496 B/op 1031511 allocs/op
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 1 1141792706 ns/op 154453408 B/op 1093453 allocs/op
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 1 1169288829 ns/op 161072336 B/op 1110021 allocs/op
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 1 1115700103 ns/op 149480472 B/op 1129180 allocs/op
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 1 1283813141 ns/op 162937800 B/op 1202771 allocs/op
before:
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 1 1145195941 ns/op 131749984 B/op 834400 allocs/op
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 1 1233526345 ns/op 127889416 B/op 897033 allocs/op
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 1 1821942296 ns/op 131665648 B/op 914836 allocs/op
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 1 8035568665 ns/op 123811832 B/op 934312 allocs/op
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 1 71325926267 ns/op 140722648 B/op 1016824 allocs/op
after:
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 1 1101429174 ns/op 129063496 B/op 832571 allocs/op
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 1 1074466374 ns/op 124154888 B/op 894875 allocs/op
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 1 1166510282 ns/op 128790648 B/op 912931 allocs/op
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 1 1075013071 ns/op 120570696 B/op 933511 allocs/op
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 1 1231673790 ns/op 138754288 B/op 1022791 allocs/op
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-12-11 13:24:03 -08:00
|
|
|
for _, name := range batchNames {
|
Stream symbols during compaction. (#6468)
Rather than buffer up symbols in RAM, do it one by one
during compaction. Then use the reader's symbol handling
for symbol lookups during the rest of the index write.
There is some slowdown in compaction, due to having to look through a file
rather than a hash lookup. This is noise to the overall cost of compacting
series with thousands of samples though.
benchmark old ns/op new ns/op delta
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 539917175 675341565 +25.08%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 2441815993 2477453524 +1.46%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 3978543559 3922909687 -1.40%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 8430219716 8586610007 +1.86%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 1786424591 1909552782 +6.89%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 5328998202 6020839950 +12.98%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 10085059958 11085278690 +9.92%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 25497010155 27018079806 +5.97%
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 2427391406 2817217987 +16.06%
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 2592965497 2538805050 -2.09%
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 2437388343 2668012858 +9.46%
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 2317095324 2787423966 +20.30%
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 2600239857 2096973860 -19.35%
benchmark old allocs new allocs delta
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 500851 470794 -6.00%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 821527 791451 -3.66%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 1141562 1111508 -2.63%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 2141576 2111504 -1.40%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 871466 841424 -3.45%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 1941428 1911415 -1.55%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 3071573 3041510 -0.98%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 6771648 6741509 -0.45%
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 731493 824888 +12.77%
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 793918 887311 +11.76%
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 811842 905204 +11.50%
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 832244 925081 +11.16%
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 921553 1019162 +10.59%
benchmark old bytes new bytes delta
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 40532648 35698276 -11.93%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 60340216 53409568 -11.49%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 81087336 72065552 -11.13%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 142485576 120878544 -15.16%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 208661368 203831136 -2.31%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 347345904 340484696 -1.98%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 585185856 576244648 -1.53%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 1357641792 1358966528 +0.10%
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 126486664 119666744 -5.39%
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 122323192 115117224 -5.89%
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 126404504 119469864 -5.49%
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 119047832 112230408 -5.73%
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 136576016 116634800 -14.60%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-12-17 11:49:54 -08:00
|
|
|
sid, err := w.symbols.ReverseLookup(name)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
nameSymbols[sid] = name
|
Coalesce series reads where we can.
When compacting rather than doing a read of all
series in the index per label name, do many at once
but only when it won't use (much) more ram than writing the
special all index does.
original in-memory postings:
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 1 1202383447 ns/op 158936496 B/op 1031511 allocs/op
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 1 1141792706 ns/op 154453408 B/op 1093453 allocs/op
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 1 1169288829 ns/op 161072336 B/op 1110021 allocs/op
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 1 1115700103 ns/op 149480472 B/op 1129180 allocs/op
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 1 1283813141 ns/op 162937800 B/op 1202771 allocs/op
before:
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 1 1145195941 ns/op 131749984 B/op 834400 allocs/op
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 1 1233526345 ns/op 127889416 B/op 897033 allocs/op
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 1 1821942296 ns/op 131665648 B/op 914836 allocs/op
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 1 8035568665 ns/op 123811832 B/op 934312 allocs/op
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 1 71325926267 ns/op 140722648 B/op 1016824 allocs/op
after:
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 1 1101429174 ns/op 129063496 B/op 832571 allocs/op
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 1 1074466374 ns/op 124154888 B/op 894875 allocs/op
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 1 1166510282 ns/op 128790648 B/op 912931 allocs/op
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 1 1075013071 ns/op 120570696 B/op 933511 allocs/op
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 1 1231673790 ns/op 138754288 B/op 1022791 allocs/op
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-12-11 13:24:03 -08:00
|
|
|
}
|
|
|
|
// Label name -> label value -> positions.
|
|
|
|
postings := map[uint32]map[uint32][]uint32{}
|
2019-12-11 09:20:41 -08:00
|
|
|
|
|
|
|
d := encoding.NewDecbufRaw(realByteSlice(f.Bytes()), int(w.toc.LabelIndices))
|
Coalesce series reads where we can.
When compacting rather than doing a read of all
series in the index per label name, do many at once
but only when it won't use (much) more ram than writing the
special all index does.
original in-memory postings:
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 1 1202383447 ns/op 158936496 B/op 1031511 allocs/op
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 1 1141792706 ns/op 154453408 B/op 1093453 allocs/op
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 1 1169288829 ns/op 161072336 B/op 1110021 allocs/op
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 1 1115700103 ns/op 149480472 B/op 1129180 allocs/op
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 1 1283813141 ns/op 162937800 B/op 1202771 allocs/op
before:
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 1 1145195941 ns/op 131749984 B/op 834400 allocs/op
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 1 1233526345 ns/op 127889416 B/op 897033 allocs/op
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 1 1821942296 ns/op 131665648 B/op 914836 allocs/op
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 1 8035568665 ns/op 123811832 B/op 934312 allocs/op
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 1 71325926267 ns/op 140722648 B/op 1016824 allocs/op
after:
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 1 1101429174 ns/op 129063496 B/op 832571 allocs/op
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 1 1074466374 ns/op 124154888 B/op 894875 allocs/op
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 1 1166510282 ns/op 128790648 B/op 912931 allocs/op
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 1 1075013071 ns/op 120570696 B/op 933511 allocs/op
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 1 1231673790 ns/op 138754288 B/op 1022791 allocs/op
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-12-11 13:24:03 -08:00
|
|
|
d.Skip(int(w.toc.Series))
|
2019-12-11 09:20:41 -08:00
|
|
|
for d.Len() > 0 {
|
Coalesce series reads where we can.
When compacting rather than doing a read of all
series in the index per label name, do many at once
but only when it won't use (much) more ram than writing the
special all index does.
original in-memory postings:
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 1 1202383447 ns/op 158936496 B/op 1031511 allocs/op
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 1 1141792706 ns/op 154453408 B/op 1093453 allocs/op
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 1 1169288829 ns/op 161072336 B/op 1110021 allocs/op
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 1 1115700103 ns/op 149480472 B/op 1129180 allocs/op
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 1 1283813141 ns/op 162937800 B/op 1202771 allocs/op
before:
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 1 1145195941 ns/op 131749984 B/op 834400 allocs/op
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 1 1233526345 ns/op 127889416 B/op 897033 allocs/op
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 1 1821942296 ns/op 131665648 B/op 914836 allocs/op
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 1 8035568665 ns/op 123811832 B/op 934312 allocs/op
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 1 71325926267 ns/op 140722648 B/op 1016824 allocs/op
after:
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 1 1101429174 ns/op 129063496 B/op 832571 allocs/op
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 1 1074466374 ns/op 124154888 B/op 894875 allocs/op
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 1 1166510282 ns/op 128790648 B/op 912931 allocs/op
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 1 1075013071 ns/op 120570696 B/op 933511 allocs/op
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 1 1231673790 ns/op 138754288 B/op 1022791 allocs/op
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-12-11 13:24:03 -08:00
|
|
|
d.ConsumePadding()
|
2019-12-11 09:20:41 -08:00
|
|
|
startPos := w.toc.LabelIndices - uint64(d.Len())
|
|
|
|
l := d.Uvarint() // Length of this series in bytes.
|
|
|
|
startLen := d.Len()
|
|
|
|
|
Coalesce series reads where we can.
When compacting rather than doing a read of all
series in the index per label name, do many at once
but only when it won't use (much) more ram than writing the
special all index does.
original in-memory postings:
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 1 1202383447 ns/op 158936496 B/op 1031511 allocs/op
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 1 1141792706 ns/op 154453408 B/op 1093453 allocs/op
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 1 1169288829 ns/op 161072336 B/op 1110021 allocs/op
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 1 1115700103 ns/op 149480472 B/op 1129180 allocs/op
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 1 1283813141 ns/op 162937800 B/op 1202771 allocs/op
before:
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 1 1145195941 ns/op 131749984 B/op 834400 allocs/op
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 1 1233526345 ns/op 127889416 B/op 897033 allocs/op
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 1 1821942296 ns/op 131665648 B/op 914836 allocs/op
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 1 8035568665 ns/op 123811832 B/op 934312 allocs/op
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 1 71325926267 ns/op 140722648 B/op 1016824 allocs/op
after:
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 1 1101429174 ns/op 129063496 B/op 832571 allocs/op
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 1 1074466374 ns/op 124154888 B/op 894875 allocs/op
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 1 1166510282 ns/op 128790648 B/op 912931 allocs/op
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 1 1075013071 ns/op 120570696 B/op 933511 allocs/op
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 1 1231673790 ns/op 138754288 B/op 1022791 allocs/op
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-12-11 13:24:03 -08:00
|
|
|
// See if label names we want are in the series.
|
2019-12-11 09:20:41 -08:00
|
|
|
numLabels := d.Uvarint()
|
|
|
|
for i := 0; i < numLabels; i++ {
|
|
|
|
lno := uint32(d.Uvarint())
|
|
|
|
lvo := uint32(d.Uvarint())
|
|
|
|
|
Coalesce series reads where we can.
When compacting rather than doing a read of all
series in the index per label name, do many at once
but only when it won't use (much) more ram than writing the
special all index does.
original in-memory postings:
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 1 1202383447 ns/op 158936496 B/op 1031511 allocs/op
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 1 1141792706 ns/op 154453408 B/op 1093453 allocs/op
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 1 1169288829 ns/op 161072336 B/op 1110021 allocs/op
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 1 1115700103 ns/op 149480472 B/op 1129180 allocs/op
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 1 1283813141 ns/op 162937800 B/op 1202771 allocs/op
before:
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 1 1145195941 ns/op 131749984 B/op 834400 allocs/op
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 1 1233526345 ns/op 127889416 B/op 897033 allocs/op
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 1 1821942296 ns/op 131665648 B/op 914836 allocs/op
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 1 8035568665 ns/op 123811832 B/op 934312 allocs/op
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 1 71325926267 ns/op 140722648 B/op 1016824 allocs/op
after:
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 1 1101429174 ns/op 129063496 B/op 832571 allocs/op
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 1 1074466374 ns/op 124154888 B/op 894875 allocs/op
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 1 1166510282 ns/op 128790648 B/op 912931 allocs/op
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 1 1075013071 ns/op 120570696 B/op 933511 allocs/op
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 1 1231673790 ns/op 138754288 B/op 1022791 allocs/op
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-12-11 13:24:03 -08:00
|
|
|
if _, ok := nameSymbols[lno]; ok {
|
|
|
|
if _, ok := postings[lno]; !ok {
|
|
|
|
postings[lno] = map[uint32][]uint32{}
|
|
|
|
}
|
|
|
|
postings[lno][lvo] = append(postings[lno][lvo], uint32(startPos/16))
|
2019-12-11 09:20:41 -08:00
|
|
|
}
|
|
|
|
}
|
Stream symbols during compaction. (#6468)
Rather than buffer up symbols in RAM, do it one by one
during compaction. Then use the reader's symbol handling
for symbol lookups during the rest of the index write.
There is some slowdown in compaction, due to having to look through a file
rather than a hash lookup. This is noise to the overall cost of compacting
series with thousands of samples though.
benchmark old ns/op new ns/op delta
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 539917175 675341565 +25.08%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 2441815993 2477453524 +1.46%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 3978543559 3922909687 -1.40%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 8430219716 8586610007 +1.86%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 1786424591 1909552782 +6.89%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 5328998202 6020839950 +12.98%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 10085059958 11085278690 +9.92%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 25497010155 27018079806 +5.97%
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 2427391406 2817217987 +16.06%
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 2592965497 2538805050 -2.09%
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 2437388343 2668012858 +9.46%
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 2317095324 2787423966 +20.30%
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 2600239857 2096973860 -19.35%
benchmark old allocs new allocs delta
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 500851 470794 -6.00%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 821527 791451 -3.66%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 1141562 1111508 -2.63%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 2141576 2111504 -1.40%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 871466 841424 -3.45%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 1941428 1911415 -1.55%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 3071573 3041510 -0.98%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 6771648 6741509 -0.45%
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 731493 824888 +12.77%
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 793918 887311 +11.76%
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 811842 905204 +11.50%
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 832244 925081 +11.16%
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 921553 1019162 +10.59%
benchmark old bytes new bytes delta
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 40532648 35698276 -11.93%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 60340216 53409568 -11.49%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 81087336 72065552 -11.13%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 142485576 120878544 -15.16%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 208661368 203831136 -2.31%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 347345904 340484696 -1.98%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 585185856 576244648 -1.53%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 1357641792 1358966528 +0.10%
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 126486664 119666744 -5.39%
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 122323192 115117224 -5.89%
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 126404504 119469864 -5.49%
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 119047832 112230408 -5.73%
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 136576016 116634800 -14.60%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-12-17 11:49:54 -08:00
|
|
|
// Skip to next series.
|
|
|
|
d.Skip(l - (startLen - d.Len()) + crc32.Size)
|
2019-12-11 09:20:41 -08:00
|
|
|
if err := d.Err(); err != nil {
|
2020-01-13 14:40:12 -08:00
|
|
|
return err
|
2019-12-11 09:20:41 -08:00
|
|
|
}
|
|
|
|
}
|
2017-02-01 22:58:54 -08:00
|
|
|
|
Coalesce series reads where we can.
When compacting rather than doing a read of all
series in the index per label name, do many at once
but only when it won't use (much) more ram than writing the
special all index does.
original in-memory postings:
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 1 1202383447 ns/op 158936496 B/op 1031511 allocs/op
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 1 1141792706 ns/op 154453408 B/op 1093453 allocs/op
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 1 1169288829 ns/op 161072336 B/op 1110021 allocs/op
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 1 1115700103 ns/op 149480472 B/op 1129180 allocs/op
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 1 1283813141 ns/op 162937800 B/op 1202771 allocs/op
before:
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 1 1145195941 ns/op 131749984 B/op 834400 allocs/op
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 1 1233526345 ns/op 127889416 B/op 897033 allocs/op
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 1 1821942296 ns/op 131665648 B/op 914836 allocs/op
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 1 8035568665 ns/op 123811832 B/op 934312 allocs/op
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 1 71325926267 ns/op 140722648 B/op 1016824 allocs/op
after:
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 1 1101429174 ns/op 129063496 B/op 832571 allocs/op
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 1 1074466374 ns/op 124154888 B/op 894875 allocs/op
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 1 1166510282 ns/op 128790648 B/op 912931 allocs/op
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 1 1075013071 ns/op 120570696 B/op 933511 allocs/op
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 1 1231673790 ns/op 138754288 B/op 1022791 allocs/op
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-12-11 13:24:03 -08:00
|
|
|
for _, name := range batchNames {
|
|
|
|
// Write out postings for this label name.
|
Stream symbols during compaction. (#6468)
Rather than buffer up symbols in RAM, do it one by one
during compaction. Then use the reader's symbol handling
for symbol lookups during the rest of the index write.
There is some slowdown in compaction, due to having to look through a file
rather than a hash lookup. This is noise to the overall cost of compacting
series with thousands of samples though.
benchmark old ns/op new ns/op delta
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 539917175 675341565 +25.08%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 2441815993 2477453524 +1.46%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 3978543559 3922909687 -1.40%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 8430219716 8586610007 +1.86%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 1786424591 1909552782 +6.89%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 5328998202 6020839950 +12.98%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 10085059958 11085278690 +9.92%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 25497010155 27018079806 +5.97%
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 2427391406 2817217987 +16.06%
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 2592965497 2538805050 -2.09%
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 2437388343 2668012858 +9.46%
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 2317095324 2787423966 +20.30%
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 2600239857 2096973860 -19.35%
benchmark old allocs new allocs delta
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 500851 470794 -6.00%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 821527 791451 -3.66%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 1141562 1111508 -2.63%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 2141576 2111504 -1.40%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 871466 841424 -3.45%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 1941428 1911415 -1.55%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 3071573 3041510 -0.98%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 6771648 6741509 -0.45%
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 731493 824888 +12.77%
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 793918 887311 +11.76%
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 811842 905204 +11.50%
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 832244 925081 +11.16%
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 921553 1019162 +10.59%
benchmark old bytes new bytes delta
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 40532648 35698276 -11.93%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 60340216 53409568 -11.49%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 81087336 72065552 -11.13%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 142485576 120878544 -15.16%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 208661368 203831136 -2.31%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 347345904 340484696 -1.98%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 585185856 576244648 -1.53%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 1357641792 1358966528 +0.10%
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 126486664 119666744 -5.39%
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 122323192 115117224 -5.89%
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 126404504 119469864 -5.49%
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 119047832 112230408 -5.73%
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 136576016 116634800 -14.60%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-12-17 11:49:54 -08:00
|
|
|
sid, err := w.symbols.ReverseLookup(name)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
values := make([]uint32, 0, len(postings[sid]))
|
|
|
|
for v := range postings[sid] {
|
Coalesce series reads where we can.
When compacting rather than doing a read of all
series in the index per label name, do many at once
but only when it won't use (much) more ram than writing the
special all index does.
original in-memory postings:
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 1 1202383447 ns/op 158936496 B/op 1031511 allocs/op
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 1 1141792706 ns/op 154453408 B/op 1093453 allocs/op
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 1 1169288829 ns/op 161072336 B/op 1110021 allocs/op
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 1 1115700103 ns/op 149480472 B/op 1129180 allocs/op
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 1 1283813141 ns/op 162937800 B/op 1202771 allocs/op
before:
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 1 1145195941 ns/op 131749984 B/op 834400 allocs/op
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 1 1233526345 ns/op 127889416 B/op 897033 allocs/op
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 1 1821942296 ns/op 131665648 B/op 914836 allocs/op
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 1 8035568665 ns/op 123811832 B/op 934312 allocs/op
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 1 71325926267 ns/op 140722648 B/op 1016824 allocs/op
after:
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 1 1101429174 ns/op 129063496 B/op 832571 allocs/op
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 1 1074466374 ns/op 124154888 B/op 894875 allocs/op
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 1 1166510282 ns/op 128790648 B/op 912931 allocs/op
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 1 1075013071 ns/op 120570696 B/op 933511 allocs/op
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 1 1231673790 ns/op 138754288 B/op 1022791 allocs/op
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-12-11 13:24:03 -08:00
|
|
|
values = append(values, v)
|
|
|
|
}
|
|
|
|
// Symbol numbers are in order, so the strings will also be in order.
|
|
|
|
sort.Sort(uint32slice(values))
|
|
|
|
for _, v := range values {
|
Stream symbols during compaction. (#6468)
Rather than buffer up symbols in RAM, do it one by one
during compaction. Then use the reader's symbol handling
for symbol lookups during the rest of the index write.
There is some slowdown in compaction, due to having to look through a file
rather than a hash lookup. This is noise to the overall cost of compacting
series with thousands of samples though.
benchmark old ns/op new ns/op delta
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 539917175 675341565 +25.08%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 2441815993 2477453524 +1.46%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 3978543559 3922909687 -1.40%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 8430219716 8586610007 +1.86%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 1786424591 1909552782 +6.89%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 5328998202 6020839950 +12.98%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 10085059958 11085278690 +9.92%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 25497010155 27018079806 +5.97%
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 2427391406 2817217987 +16.06%
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 2592965497 2538805050 -2.09%
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 2437388343 2668012858 +9.46%
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 2317095324 2787423966 +20.30%
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 2600239857 2096973860 -19.35%
benchmark old allocs new allocs delta
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 500851 470794 -6.00%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 821527 791451 -3.66%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 1141562 1111508 -2.63%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 2141576 2111504 -1.40%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 871466 841424 -3.45%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 1941428 1911415 -1.55%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 3071573 3041510 -0.98%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 6771648 6741509 -0.45%
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 731493 824888 +12.77%
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 793918 887311 +11.76%
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 811842 905204 +11.50%
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 832244 925081 +11.16%
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 921553 1019162 +10.59%
benchmark old bytes new bytes delta
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 40532648 35698276 -11.93%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 60340216 53409568 -11.49%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 81087336 72065552 -11.13%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 142485576 120878544 -15.16%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 208661368 203831136 -2.31%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 347345904 340484696 -1.98%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 585185856 576244648 -1.53%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 1357641792 1358966528 +0.10%
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 126486664 119666744 -5.39%
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 122323192 115117224 -5.89%
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 126404504 119469864 -5.49%
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 119047832 112230408 -5.73%
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 136576016 116634800 -14.60%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-12-17 11:49:54 -08:00
|
|
|
value, err := w.symbols.Lookup(v)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if err := w.writePosting(name, value, postings[sid][v]); err != nil {
|
Coalesce series reads where we can.
When compacting rather than doing a read of all
series in the index per label name, do many at once
but only when it won't use (much) more ram than writing the
special all index does.
original in-memory postings:
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 1 1202383447 ns/op 158936496 B/op 1031511 allocs/op
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 1 1141792706 ns/op 154453408 B/op 1093453 allocs/op
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 1 1169288829 ns/op 161072336 B/op 1110021 allocs/op
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 1 1115700103 ns/op 149480472 B/op 1129180 allocs/op
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 1 1283813141 ns/op 162937800 B/op 1202771 allocs/op
before:
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 1 1145195941 ns/op 131749984 B/op 834400 allocs/op
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 1 1233526345 ns/op 127889416 B/op 897033 allocs/op
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 1 1821942296 ns/op 131665648 B/op 914836 allocs/op
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 1 8035568665 ns/op 123811832 B/op 934312 allocs/op
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 1 71325926267 ns/op 140722648 B/op 1016824 allocs/op
after:
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 1 1101429174 ns/op 129063496 B/op 832571 allocs/op
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 1 1074466374 ns/op 124154888 B/op 894875 allocs/op
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 1 1166510282 ns/op 128790648 B/op 912931 allocs/op
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 1 1075013071 ns/op 120570696 B/op 933511 allocs/op
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 1 1231673790 ns/op 138754288 B/op 1022791 allocs/op
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-12-11 13:24:03 -08:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
2019-12-11 09:20:41 -08:00
|
|
|
}
|
2019-12-16 09:24:48 -08:00
|
|
|
select {
|
|
|
|
case <-w.ctx.Done():
|
|
|
|
return w.ctx.Err()
|
|
|
|
default:
|
|
|
|
}
|
2019-12-11 09:20:41 -08:00
|
|
|
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (w *Writer) writePosting(name, value string, offs []uint32) error {
|
2017-04-28 05:28:25 -07:00
|
|
|
// Align beginning to 4 bytes for more efficient postings list scans.
|
2020-01-09 03:28:10 -08:00
|
|
|
if err := w.fP.AddPadding(4); err != nil {
|
2017-04-28 05:28:25 -07:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2019-12-17 13:16:56 -08:00
|
|
|
// Write out postings offset table to temporary file as we go.
|
|
|
|
w.buf1.Reset()
|
|
|
|
w.buf1.PutUvarint(2)
|
|
|
|
w.buf1.PutUvarintStr(name)
|
|
|
|
w.buf1.PutUvarintStr(value)
|
2019-12-17 16:55:29 -08:00
|
|
|
w.buf1.PutUvarint64(w.fP.pos) // This is relative to the postings tmp file, not the final index file.
|
2020-01-09 03:28:10 -08:00
|
|
|
if err := w.fPO.Write(w.buf1.Get()); err != nil {
|
2019-12-17 13:16:56 -08:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
w.cntPO++
|
2016-12-10 00:44:00 -08:00
|
|
|
|
2019-12-11 04:49:13 -08:00
|
|
|
w.buf1.Reset()
|
2019-12-11 09:20:41 -08:00
|
|
|
w.buf1.PutBE32int(len(offs))
|
2017-04-28 05:17:53 -07:00
|
|
|
|
2019-12-11 09:20:41 -08:00
|
|
|
for _, off := range offs {
|
|
|
|
if off > (1<<32)-1 {
|
|
|
|
return errors.Errorf("series offset %d exceeds 4 bytes", off)
|
|
|
|
}
|
|
|
|
w.buf1.PutBE32(off)
|
2019-12-11 04:49:13 -08:00
|
|
|
}
|
2017-04-25 07:45:44 -07:00
|
|
|
|
Avoid WriteAt for Postings.
Flushing buffers and doing a pwrite per posting is expensive
time wise, so go back to the old way for those. This doubles
our memory usage, but that's still small as it's only
~8 bytes per time series in the index. This is 30-40% faster.
benchmark old ns/op new ns/op delta
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 1101429174 724362123 -34.23%
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 1074466374 720977022 -32.90%
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 1166510282 677702636 -41.90%
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 1075013071 696855960 -35.18%
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 1231673790 829328610 -32.67%
benchmark old allocs new allocs delta
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 832571 731435 -12.15%
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 894875 793823 -11.29%
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 912931 811804 -11.08%
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 933511 832366 -10.83%
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 1022791 921554 -9.90%
benchmark old bytes new bytes delta
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 129063496 126472364 -2.01%
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 124154888 122300764 -1.49%
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 128790648 126394856 -1.86%
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 120570696 118946548 -1.35%
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 138754288 136317432 -1.76%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-12-12 05:31:45 -08:00
|
|
|
w.buf2.Reset()
|
2021-11-11 02:14:28 -08:00
|
|
|
l := w.buf1.Len()
|
|
|
|
// We convert to uint to make code compile on 32-bit systems, as math.MaxUint32 doesn't fit into int there.
|
|
|
|
if uint(l) > math.MaxUint32 {
|
|
|
|
return errors.Errorf("posting size exceeds 4 bytes: %d", l)
|
|
|
|
}
|
|
|
|
w.buf2.PutBE32int(l)
|
Avoid WriteAt for Postings.
Flushing buffers and doing a pwrite per posting is expensive
time wise, so go back to the old way for those. This doubles
our memory usage, but that's still small as it's only
~8 bytes per time series in the index. This is 30-40% faster.
benchmark old ns/op new ns/op delta
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 1101429174 724362123 -34.23%
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 1074466374 720977022 -32.90%
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 1166510282 677702636 -41.90%
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 1075013071 696855960 -35.18%
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 1231673790 829328610 -32.67%
benchmark old allocs new allocs delta
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 832571 731435 -12.15%
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 894875 793823 -11.29%
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 912931 811804 -11.08%
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 933511 832366 -10.83%
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 1022791 921554 -9.90%
benchmark old bytes new bytes delta
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 129063496 126472364 -2.01%
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 124154888 122300764 -1.49%
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 128790648 126394856 -1.86%
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 120570696 118946548 -1.35%
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 138754288 136317432 -1.76%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-12-12 05:31:45 -08:00
|
|
|
w.buf1.PutHash(w.crc32)
|
2020-01-09 03:28:10 -08:00
|
|
|
return w.fP.Write(w.buf2.Get(), w.buf1.Get())
|
2019-12-17 16:55:29 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
func (w *Writer) writePostings() error {
|
Write label indices based on the posting offset table.
This avoids having to build it up in RAM, and means that all variable
memory usage for compactions is now 0.25 bytes per symbol plus a few
O(labelnames) structures. So in practice, pretty close to constant
memory for compactions.
benchmark old ns/op new ns/op delta
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 662974828 667162981 +0.63%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 2459590377 2131168138 -13.35%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 3808280548 3919290378 +2.91%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 8513884311 8738099339 +2.63%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 1898843003 1944131966 +2.39%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 5601478437 6031391658 +7.67%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 11225096097 11359624463 +1.20%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 23994637282 23919583343 -0.31%
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 891042098 826898358 -7.20%
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 915949138 902555676 -1.46%
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 955138431 879067946 -7.96%
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 991447640 958785968 -3.29%
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 1068729356 980249080 -8.28%
benchmark old allocs new allocs delta
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 470778 470556 -0.05%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 791429 791225 -0.03%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 1111514 1111257 -0.02%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 2111498 2111369 -0.01%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 841433 841220 -0.03%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 1911469 1911202 -0.01%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 3041558 3041328 -0.01%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 6741534 6741382 -0.00%
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 824856 820873 -0.48%
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 887220 885180 -0.23%
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 905253 901539 -0.41%
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 925148 913632 -1.24%
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 1019141 978727 -3.97%
benchmark old bytes new bytes delta
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 35694744 41523836 +16.33%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 53405264 59499056 +11.41%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 74160320 78151568 +5.38%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 120878480 135364672 +11.98%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 203832448 209925504 +2.99%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 341029208 346551064 +1.62%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 580217176 582345224 +0.37%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 1356872288 1363495368 +0.49%
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 119535672 94815920 -20.68%
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 115352280 95980776 -16.79%
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 119472320 98724460 -17.37%
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 111979312 94325456 -15.77%
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 116628584 98566344 -15.49%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-12-17 17:29:41 -08:00
|
|
|
// There's padding in the tmp file, make sure it actually works.
|
2020-01-09 03:28:10 -08:00
|
|
|
if err := w.f.AddPadding(4); err != nil {
|
2019-12-17 16:55:29 -08:00
|
|
|
return err
|
|
|
|
}
|
Write label indices based on the posting offset table.
This avoids having to build it up in RAM, and means that all variable
memory usage for compactions is now 0.25 bytes per symbol plus a few
O(labelnames) structures. So in practice, pretty close to constant
memory for compactions.
benchmark old ns/op new ns/op delta
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 662974828 667162981 +0.63%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 2459590377 2131168138 -13.35%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 3808280548 3919290378 +2.91%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 8513884311 8738099339 +2.63%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 1898843003 1944131966 +2.39%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 5601478437 6031391658 +7.67%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 11225096097 11359624463 +1.20%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 23994637282 23919583343 -0.31%
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 891042098 826898358 -7.20%
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 915949138 902555676 -1.46%
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 955138431 879067946 -7.96%
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 991447640 958785968 -3.29%
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 1068729356 980249080 -8.28%
benchmark old allocs new allocs delta
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 470778 470556 -0.05%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 791429 791225 -0.03%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 1111514 1111257 -0.02%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 2111498 2111369 -0.01%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 841433 841220 -0.03%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 1911469 1911202 -0.01%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 3041558 3041328 -0.01%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 6741534 6741382 -0.00%
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 824856 820873 -0.48%
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 887220 885180 -0.23%
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 905253 901539 -0.41%
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 925148 913632 -1.24%
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 1019141 978727 -3.97%
benchmark old bytes new bytes delta
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 35694744 41523836 +16.33%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 53405264 59499056 +11.41%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 74160320 78151568 +5.38%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 120878480 135364672 +11.98%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 203832448 209925504 +2.99%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 341029208 346551064 +1.62%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 580217176 582345224 +0.37%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 1356872288 1363495368 +0.49%
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 119535672 94815920 -20.68%
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 115352280 95980776 -16.79%
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 119472320 98724460 -17.37%
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 111979312 94325456 -15.77%
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 116628584 98566344 -15.49%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-12-17 17:29:41 -08:00
|
|
|
w.postingsStart = w.f.pos
|
2019-12-17 16:55:29 -08:00
|
|
|
|
|
|
|
// Copy temporary file into main index.
|
2020-01-09 03:28:10 -08:00
|
|
|
if err := w.fP.Flush(); err != nil {
|
2019-12-17 16:55:29 -08:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
if _, err := w.fP.f.Seek(0, 0); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
// Don't need to calculate a checksum, so can copy directly.
|
|
|
|
n, err := io.CopyBuffer(w.f.fbuf, w.fP.f, make([]byte, 1<<20))
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if uint64(n) != w.fP.pos {
|
|
|
|
return errors.Errorf("wrote %d bytes to posting temporary file, but only read back %d", w.fP.pos, n)
|
|
|
|
}
|
|
|
|
w.f.pos += uint64(n)
|
|
|
|
|
2020-01-09 03:28:10 -08:00
|
|
|
if err := w.fP.Close(); err != nil {
|
Write label indices based on the posting offset table.
This avoids having to build it up in RAM, and means that all variable
memory usage for compactions is now 0.25 bytes per symbol plus a few
O(labelnames) structures. So in practice, pretty close to constant
memory for compactions.
benchmark old ns/op new ns/op delta
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 662974828 667162981 +0.63%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 2459590377 2131168138 -13.35%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 3808280548 3919290378 +2.91%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 8513884311 8738099339 +2.63%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 1898843003 1944131966 +2.39%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 5601478437 6031391658 +7.67%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 11225096097 11359624463 +1.20%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 23994637282 23919583343 -0.31%
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 891042098 826898358 -7.20%
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 915949138 902555676 -1.46%
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 955138431 879067946 -7.96%
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 991447640 958785968 -3.29%
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 1068729356 980249080 -8.28%
benchmark old allocs new allocs delta
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 470778 470556 -0.05%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 791429 791225 -0.03%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 1111514 1111257 -0.02%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 2111498 2111369 -0.01%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 841433 841220 -0.03%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 1911469 1911202 -0.01%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 3041558 3041328 -0.01%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 6741534 6741382 -0.00%
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 824856 820873 -0.48%
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 887220 885180 -0.23%
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 905253 901539 -0.41%
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 925148 913632 -1.24%
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 1019141 978727 -3.97%
benchmark old bytes new bytes delta
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 35694744 41523836 +16.33%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 53405264 59499056 +11.41%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 74160320 78151568 +5.38%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 120878480 135364672 +11.98%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 203832448 209925504 +2.99%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 341029208 346551064 +1.62%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 580217176 582345224 +0.37%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 1356872288 1363495368 +0.49%
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 119535672 94815920 -20.68%
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 115352280 95980776 -16.79%
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 119472320 98724460 -17.37%
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 111979312 94325456 -15.77%
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 116628584 98566344 -15.49%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-12-17 17:29:41 -08:00
|
|
|
return err
|
|
|
|
}
|
2020-01-09 03:28:10 -08:00
|
|
|
if err := w.fP.Remove(); err != nil {
|
2019-12-17 16:55:29 -08:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
w.fP = nil
|
|
|
|
return nil
|
2016-12-09 11:45:46 -08:00
|
|
|
}
|
|
|
|
|
2017-03-02 14:35:02 -08:00
|
|
|
type uint32slice []uint32
|
|
|
|
|
|
|
|
func (s uint32slice) Len() int { return len(s) }
|
|
|
|
func (s uint32slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
|
|
|
func (s uint32slice) Less(i, j int) bool { return s[i] < s[j] }
|
|
|
|
|
2019-07-09 23:31:44 -07:00
|
|
|
type labelIndexHashEntry struct {
|
2017-04-26 09:01:13 -07:00
|
|
|
keys []string
|
|
|
|
offset uint64
|
2016-12-09 13:12:16 -08:00
|
|
|
}
|
|
|
|
|
2017-11-30 06:34:49 -08:00
|
|
|
func (w *Writer) Close() error {
|
2020-01-04 06:55:02 -08:00
|
|
|
// Even if this fails, we need to close all the files.
|
|
|
|
ensureErr := w.ensureStage(idxStageDone)
|
|
|
|
|
Stream symbols during compaction. (#6468)
Rather than buffer up symbols in RAM, do it one by one
during compaction. Then use the reader's symbol handling
for symbol lookups during the rest of the index write.
There is some slowdown in compaction, due to having to look through a file
rather than a hash lookup. This is noise to the overall cost of compacting
series with thousands of samples though.
benchmark old ns/op new ns/op delta
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 539917175 675341565 +25.08%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 2441815993 2477453524 +1.46%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 3978543559 3922909687 -1.40%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 8430219716 8586610007 +1.86%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 1786424591 1909552782 +6.89%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 5328998202 6020839950 +12.98%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 10085059958 11085278690 +9.92%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 25497010155 27018079806 +5.97%
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 2427391406 2817217987 +16.06%
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 2592965497 2538805050 -2.09%
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 2437388343 2668012858 +9.46%
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 2317095324 2787423966 +20.30%
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 2600239857 2096973860 -19.35%
benchmark old allocs new allocs delta
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 500851 470794 -6.00%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 821527 791451 -3.66%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 1141562 1111508 -2.63%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 2141576 2111504 -1.40%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 871466 841424 -3.45%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 1941428 1911415 -1.55%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 3071573 3041510 -0.98%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 6771648 6741509 -0.45%
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 731493 824888 +12.77%
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 793918 887311 +11.76%
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 811842 905204 +11.50%
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 832244 925081 +11.16%
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 921553 1019162 +10.59%
benchmark old bytes new bytes delta
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 40532648 35698276 -11.93%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 60340216 53409568 -11.49%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 81087336 72065552 -11.13%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 142485576 120878544 -15.16%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 208661368 203831136 -2.31%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 347345904 340484696 -1.98%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 585185856 576244648 -1.53%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 1357641792 1358966528 +0.10%
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 126486664 119666744 -5.39%
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 122323192 115117224 -5.89%
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 126404504 119469864 -5.49%
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 119047832 112230408 -5.73%
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 136576016 116634800 -14.60%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-12-17 11:49:54 -08:00
|
|
|
if w.symbolFile != nil {
|
|
|
|
if err := w.symbolFile.Close(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
2019-12-17 16:55:29 -08:00
|
|
|
if w.fP != nil {
|
2020-01-09 03:28:10 -08:00
|
|
|
if err := w.fP.Close(); err != nil {
|
2019-12-17 16:55:29 -08:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
2019-12-17 13:16:56 -08:00
|
|
|
if w.fPO != nil {
|
2020-01-09 03:28:10 -08:00
|
|
|
if err := w.fPO.Close(); err != nil {
|
2019-12-17 13:16:56 -08:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
2020-01-09 03:28:10 -08:00
|
|
|
if err := w.f.Close(); err != nil {
|
2020-01-04 06:55:02 -08:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
return ensureErr
|
2016-12-09 11:45:46 -08:00
|
|
|
}
|
2017-03-07 03:47:49 -08:00
|
|
|
|
Stream symbols during compaction. (#6468)
Rather than buffer up symbols in RAM, do it one by one
during compaction. Then use the reader's symbol handling
for symbol lookups during the rest of the index write.
There is some slowdown in compaction, due to having to look through a file
rather than a hash lookup. This is noise to the overall cost of compacting
series with thousands of samples though.
benchmark old ns/op new ns/op delta
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 539917175 675341565 +25.08%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 2441815993 2477453524 +1.46%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 3978543559 3922909687 -1.40%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 8430219716 8586610007 +1.86%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 1786424591 1909552782 +6.89%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 5328998202 6020839950 +12.98%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 10085059958 11085278690 +9.92%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 25497010155 27018079806 +5.97%
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 2427391406 2817217987 +16.06%
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 2592965497 2538805050 -2.09%
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 2437388343 2668012858 +9.46%
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 2317095324 2787423966 +20.30%
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 2600239857 2096973860 -19.35%
benchmark old allocs new allocs delta
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 500851 470794 -6.00%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 821527 791451 -3.66%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 1141562 1111508 -2.63%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 2141576 2111504 -1.40%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 871466 841424 -3.45%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 1941428 1911415 -1.55%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 3071573 3041510 -0.98%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 6771648 6741509 -0.45%
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 731493 824888 +12.77%
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 793918 887311 +11.76%
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 811842 905204 +11.50%
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 832244 925081 +11.16%
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 921553 1019162 +10.59%
benchmark old bytes new bytes delta
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 40532648 35698276 -11.93%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 60340216 53409568 -11.49%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 81087336 72065552 -11.13%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 142485576 120878544 -15.16%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 208661368 203831136 -2.31%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 347345904 340484696 -1.98%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 585185856 576244648 -1.53%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 1357641792 1358966528 +0.10%
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 126486664 119666744 -5.39%
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 122323192 115117224 -5.89%
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 126404504 119469864 -5.49%
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 119047832 112230408 -5.73%
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 136576016 116634800 -14.60%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-12-17 11:49:54 -08:00
|
|
|
// StringIter iterates over a sorted list of strings.
|
|
|
|
type StringIter interface {
|
|
|
|
// Next advances the iterator and returns true if another value was found.
|
|
|
|
Next() bool
|
|
|
|
|
|
|
|
// At returns the value at the current iterator position.
|
|
|
|
At() string
|
|
|
|
|
|
|
|
// Err returns the last error of the iterator.
|
|
|
|
Err() error
|
|
|
|
}
|
|
|
|
|
2017-11-30 06:34:49 -08:00
|
|
|
type Reader struct {
|
Reduce memory used by postings offset table.
Rather than keeping the offset of each postings list, instead
keep the nth offset of the offset of the posting list. As postings
list offsets have always been sorted, we can then get to the closest
entry before the one we want an iterate forwards.
I haven't done much tuning on the 32 number, it was chosen to try
not to read through more than a 4k page of data.
Switch to a bulk interface for fetching postings. Use it to avoid having
to re-read parts of the posting offset table when querying lots of it.
For a index with what BenchmarkHeadPostingForMatchers uses RAM
for r.postings drops from 3.79MB to 80.19kB or about 48x.
Bytes allocated go down by 30%, and suprisingly CPU usage drops by
4-6% for typical queries too.
benchmark old ns/op new ns/op delta
BenchmarkPostingsForMatchers/Block/n="1"-4 35231 36673 +4.09%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 563380 540627 -4.04%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 536782 534186 -0.48%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 533990 541550 +1.42%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 113374598 117969608 +4.05%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 146329884 139651442 -4.56%
BenchmarkPostingsForMatchers/Block/i=~""-4 50346510 44961127 -10.70%
BenchmarkPostingsForMatchers/Block/i!=""-4 41261550 35356165 -14.31%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 112544418 116904010 +3.87%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 112487086 116864918 +3.89%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 41094758 35457904 -13.72%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 41906372 36151473 -13.73%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 147262414 140424800 -4.64%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 28615629 27872072 -2.60%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 147117177 140462403 -4.52%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 175096826 167902298 -4.11%
benchmark old allocs new allocs delta
BenchmarkPostingsForMatchers/Block/n="1"-4 4 6 +50.00%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 7 11 +57.14%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 7 11 +57.14%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 15 17 +13.33%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 100010 100012 +0.00%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 200069 200040 -0.01%
BenchmarkPostingsForMatchers/Block/i=~""-4 200072 200045 -0.01%
BenchmarkPostingsForMatchers/Block/i!=""-4 200070 200041 -0.01%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 100013 100017 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 100017 100023 +0.01%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 200073 200046 -0.01%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 200075 200050 -0.01%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 200074 200049 -0.01%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 111165 111150 -0.01%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 200078 200055 -0.01%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 311282 311238 -0.01%
benchmark old bytes new bytes delta
BenchmarkPostingsForMatchers/Block/n="1"-4 264 296 +12.12%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 360 424 +17.78%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 360 424 +17.78%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 520 552 +6.15%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 1600461 1600482 +0.00%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 24900801 17259077 -30.69%
BenchmarkPostingsForMatchers/Block/i=~""-4 24900836 17259151 -30.69%
BenchmarkPostingsForMatchers/Block/i!=""-4 24900760 17259048 -30.69%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 1600557 1600621 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 1600717 1600813 +0.01%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 24900856 17259176 -30.69%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 24900952 17259304 -30.69%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 24900993 17259333 -30.69%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 3788311 3142630 -17.04%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 24901137 17259509 -30.69%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 28693086 20405680 -28.88%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-12-05 10:27:40 -08:00
|
|
|
b ByteSlice
|
|
|
|
toc *TOC
|
2017-03-07 03:47:49 -08:00
|
|
|
|
|
|
|
// Close that releases the underlying resources of the byte slice.
|
|
|
|
c io.Closer
|
|
|
|
|
Reduce memory used by postings offset table.
Rather than keeping the offset of each postings list, instead
keep the nth offset of the offset of the posting list. As postings
list offsets have always been sorted, we can then get to the closest
entry before the one we want an iterate forwards.
I haven't done much tuning on the 32 number, it was chosen to try
not to read through more than a 4k page of data.
Switch to a bulk interface for fetching postings. Use it to avoid having
to re-read parts of the posting offset table when querying lots of it.
For a index with what BenchmarkHeadPostingForMatchers uses RAM
for r.postings drops from 3.79MB to 80.19kB or about 48x.
Bytes allocated go down by 30%, and suprisingly CPU usage drops by
4-6% for typical queries too.
benchmark old ns/op new ns/op delta
BenchmarkPostingsForMatchers/Block/n="1"-4 35231 36673 +4.09%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 563380 540627 -4.04%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 536782 534186 -0.48%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 533990 541550 +1.42%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 113374598 117969608 +4.05%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 146329884 139651442 -4.56%
BenchmarkPostingsForMatchers/Block/i=~""-4 50346510 44961127 -10.70%
BenchmarkPostingsForMatchers/Block/i!=""-4 41261550 35356165 -14.31%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 112544418 116904010 +3.87%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 112487086 116864918 +3.89%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 41094758 35457904 -13.72%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 41906372 36151473 -13.73%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 147262414 140424800 -4.64%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 28615629 27872072 -2.60%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 147117177 140462403 -4.52%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 175096826 167902298 -4.11%
benchmark old allocs new allocs delta
BenchmarkPostingsForMatchers/Block/n="1"-4 4 6 +50.00%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 7 11 +57.14%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 7 11 +57.14%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 15 17 +13.33%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 100010 100012 +0.00%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 200069 200040 -0.01%
BenchmarkPostingsForMatchers/Block/i=~""-4 200072 200045 -0.01%
BenchmarkPostingsForMatchers/Block/i!=""-4 200070 200041 -0.01%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 100013 100017 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 100017 100023 +0.01%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 200073 200046 -0.01%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 200075 200050 -0.01%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 200074 200049 -0.01%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 111165 111150 -0.01%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 200078 200055 -0.01%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 311282 311238 -0.01%
benchmark old bytes new bytes delta
BenchmarkPostingsForMatchers/Block/n="1"-4 264 296 +12.12%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 360 424 +17.78%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 360 424 +17.78%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 520 552 +6.15%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 1600461 1600482 +0.00%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 24900801 17259077 -30.69%
BenchmarkPostingsForMatchers/Block/i=~""-4 24900836 17259151 -30.69%
BenchmarkPostingsForMatchers/Block/i!=""-4 24900760 17259048 -30.69%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 1600557 1600621 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 1600717 1600813 +0.01%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 24900856 17259176 -30.69%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 24900952 17259304 -30.69%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 24900993 17259333 -30.69%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 3788311 3142630 -17.04%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 24901137 17259509 -30.69%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 28693086 20405680 -28.88%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-12-05 10:27:40 -08:00
|
|
|
// Map of LabelName to a list of some LabelValues's position in the offset table.
|
|
|
|
// The first and last values for each name are always present.
|
|
|
|
postings map[string][]postingOffset
|
2020-01-06 06:06:11 -08:00
|
|
|
// For the v1 format, labelname -> labelvalue -> offset.
|
|
|
|
postingsV1 map[string]map[string]uint64
|
Load only some offsets into the symbol table into memory.
Rather than keeping the entire symbol table in memory, keep every nth
offset and walk from there to the entry we need. This ends up slightly
slower, ~360ms per 1M series returned from PostingsForMatchers which is
not much considering the rest of the CPU such a query would go on to
use.
Make LabelValues use the postings tables, rather than having
to do symbol lookups. Use yoloString, as PostingsForMatchers
doesn't need the strings to stick around and adjust the API
call to keep the Querier open until it's all marshalled.
Remove allocatedSymbols memory optimisation, we no longer keep all the
symbol strings in heap memory. Remove LabelValuesFor and LabelIndices,
they're dead code. Ensure we've still tests for label indices,
and add missing test that we can work with old V1 Format index files.
PostingForMatchers performance is slightly better, with a big drop in
allocation counts due to using yoloString for LabelValues:
benchmark old ns/op new ns/op delta
BenchmarkPostingsForMatchers/Block/n="1"-4 36698 36681 -0.05%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 522786 560887 +7.29%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 511652 537680 +5.09%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 522102 564239 +8.07%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 113689911 111795919 -1.67%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 135825572 132871085 -2.18%
BenchmarkPostingsForMatchers/Block/i=~""-4 40782628 38038181 -6.73%
BenchmarkPostingsForMatchers/Block/i!=""-4 31267869 29194327 -6.63%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 112733329 111568823 -1.03%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 112868153 111232029 -1.45%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 31338257 29349446 -6.35%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 32054482 29972436 -6.50%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 136504654 133968442 -1.86%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 27960350 27264997 -2.49%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 136765564 133860724 -2.12%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 163714583 159453668 -2.60%
benchmark old allocs new allocs delta
BenchmarkPostingsForMatchers/Block/n="1"-4 6 6 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 11 11 +0.00%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 11 11 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 17 15 -11.76%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 100012 12 -99.99%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 200040 100040 -49.99%
BenchmarkPostingsForMatchers/Block/i=~""-4 200045 100045 -49.99%
BenchmarkPostingsForMatchers/Block/i!=""-4 200041 100041 -49.99%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 100017 17 -99.98%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 100023 23 -99.98%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 200046 100046 -49.99%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 200050 100050 -49.99%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 200049 100049 -49.99%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 111150 11150 -89.97%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 200055 100055 -49.99%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 311238 111238 -64.26%
benchmark old bytes new bytes delta
BenchmarkPostingsForMatchers/Block/n="1"-4 296 296 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 424 424 +0.00%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 424 424 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 552 1544 +179.71%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 1600482 1606125 +0.35%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 17259065 17264709 +0.03%
BenchmarkPostingsForMatchers/Block/i=~""-4 17259150 17264780 +0.03%
BenchmarkPostingsForMatchers/Block/i!=""-4 17259048 17264680 +0.03%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 1600610 1606242 +0.35%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 1600813 1606434 +0.35%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 17259176 17264808 +0.03%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 17259304 17264936 +0.03%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 17259333 17264965 +0.03%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 3142628 3148262 +0.18%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 17259509 17265141 +0.03%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 20405680 20416944 +0.06%
However overall Select performance is down and involves more allocs, due to
having to do more than a simple map lookup to resolve a symbol and that all the strings
returned are allocated:
benchmark old ns/op new ns/op delta
BenchmarkQuerierSelect/Block/1of1000000-4 506092636 862678244 +70.46%
BenchmarkQuerierSelect/Block/10of1000000-4 505638968 860917636 +70.26%
BenchmarkQuerierSelect/Block/100of1000000-4 505229450 882150048 +74.60%
BenchmarkQuerierSelect/Block/1000of1000000-4 515905414 862241115 +67.13%
BenchmarkQuerierSelect/Block/10000of1000000-4 516785354 874841110 +69.29%
BenchmarkQuerierSelect/Block/100000of1000000-4 540742808 907030187 +67.74%
BenchmarkQuerierSelect/Block/1000000of1000000-4 815224288 1181236903 +44.90%
benchmark old allocs new allocs delta
BenchmarkQuerierSelect/Block/1of1000000-4 4000020 6000020 +50.00%
BenchmarkQuerierSelect/Block/10of1000000-4 4000038 6000038 +50.00%
BenchmarkQuerierSelect/Block/100of1000000-4 4000218 6000218 +50.00%
BenchmarkQuerierSelect/Block/1000of1000000-4 4002018 6002018 +49.97%
BenchmarkQuerierSelect/Block/10000of1000000-4 4020018 6020018 +49.75%
BenchmarkQuerierSelect/Block/100000of1000000-4 4200018 6200018 +47.62%
BenchmarkQuerierSelect/Block/1000000of1000000-4 6000018 8000019 +33.33%
benchmark old bytes new bytes delta
BenchmarkQuerierSelect/Block/1of1000000-4 176001468 227201476 +29.09%
BenchmarkQuerierSelect/Block/10of1000000-4 176002620 227202628 +29.09%
BenchmarkQuerierSelect/Block/100of1000000-4 176014140 227214148 +29.09%
BenchmarkQuerierSelect/Block/1000of1000000-4 176129340 227329348 +29.07%
BenchmarkQuerierSelect/Block/10000of1000000-4 177281340 228481348 +28.88%
BenchmarkQuerierSelect/Block/100000of1000000-4 188801340 240001348 +27.12%
BenchmarkQuerierSelect/Block/1000000of1000000-4 304001340 355201616 +16.84%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-12-12 08:55:32 -08:00
|
|
|
|
|
|
|
symbols *Symbols
|
|
|
|
nameSymbols map[uint32]string // Cache of the label name symbol lookups,
|
|
|
|
// as there are not many and they are half of all lookups.
|
2017-10-27 09:29:59 -07:00
|
|
|
|
2018-01-10 11:19:16 -08:00
|
|
|
dec *Decoder
|
2017-12-01 03:06:37 -08:00
|
|
|
|
2018-01-10 11:19:16 -08:00
|
|
|
version int
|
2017-03-07 03:47:49 -08:00
|
|
|
}
|
|
|
|
|
Reduce memory used by postings offset table.
Rather than keeping the offset of each postings list, instead
keep the nth offset of the offset of the posting list. As postings
list offsets have always been sorted, we can then get to the closest
entry before the one we want an iterate forwards.
I haven't done much tuning on the 32 number, it was chosen to try
not to read through more than a 4k page of data.
Switch to a bulk interface for fetching postings. Use it to avoid having
to re-read parts of the posting offset table when querying lots of it.
For a index with what BenchmarkHeadPostingForMatchers uses RAM
for r.postings drops from 3.79MB to 80.19kB or about 48x.
Bytes allocated go down by 30%, and suprisingly CPU usage drops by
4-6% for typical queries too.
benchmark old ns/op new ns/op delta
BenchmarkPostingsForMatchers/Block/n="1"-4 35231 36673 +4.09%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 563380 540627 -4.04%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 536782 534186 -0.48%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 533990 541550 +1.42%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 113374598 117969608 +4.05%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 146329884 139651442 -4.56%
BenchmarkPostingsForMatchers/Block/i=~""-4 50346510 44961127 -10.70%
BenchmarkPostingsForMatchers/Block/i!=""-4 41261550 35356165 -14.31%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 112544418 116904010 +3.87%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 112487086 116864918 +3.89%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 41094758 35457904 -13.72%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 41906372 36151473 -13.73%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 147262414 140424800 -4.64%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 28615629 27872072 -2.60%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 147117177 140462403 -4.52%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 175096826 167902298 -4.11%
benchmark old allocs new allocs delta
BenchmarkPostingsForMatchers/Block/n="1"-4 4 6 +50.00%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 7 11 +57.14%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 7 11 +57.14%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 15 17 +13.33%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 100010 100012 +0.00%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 200069 200040 -0.01%
BenchmarkPostingsForMatchers/Block/i=~""-4 200072 200045 -0.01%
BenchmarkPostingsForMatchers/Block/i!=""-4 200070 200041 -0.01%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 100013 100017 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 100017 100023 +0.01%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 200073 200046 -0.01%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 200075 200050 -0.01%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 200074 200049 -0.01%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 111165 111150 -0.01%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 200078 200055 -0.01%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 311282 311238 -0.01%
benchmark old bytes new bytes delta
BenchmarkPostingsForMatchers/Block/n="1"-4 264 296 +12.12%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 360 424 +17.78%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 360 424 +17.78%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 520 552 +6.15%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 1600461 1600482 +0.00%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 24900801 17259077 -30.69%
BenchmarkPostingsForMatchers/Block/i=~""-4 24900836 17259151 -30.69%
BenchmarkPostingsForMatchers/Block/i!=""-4 24900760 17259048 -30.69%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 1600557 1600621 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 1600717 1600813 +0.01%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 24900856 17259176 -30.69%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 24900952 17259304 -30.69%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 24900993 17259333 -30.69%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 3788311 3142630 -17.04%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 24901137 17259509 -30.69%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 28693086 20405680 -28.88%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-12-05 10:27:40 -08:00
|
|
|
type postingOffset struct {
|
|
|
|
value string
|
|
|
|
off int
|
|
|
|
}
|
|
|
|
|
2017-11-09 09:27:09 -08:00
|
|
|
// ByteSlice abstracts a byte slice.
|
|
|
|
type ByteSlice interface {
|
|
|
|
Len() int
|
|
|
|
Range(start, end int) []byte
|
|
|
|
}
|
|
|
|
|
|
|
|
type realByteSlice []byte
|
|
|
|
|
|
|
|
func (b realByteSlice) Len() int {
|
|
|
|
return len(b)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (b realByteSlice) Range(start, end int) []byte {
|
|
|
|
return b[start:end]
|
|
|
|
}
|
|
|
|
|
|
|
|
func (b realByteSlice) Sub(start, end int) ByteSlice {
|
|
|
|
return b[start:end]
|
|
|
|
}
|
|
|
|
|
2019-01-11 09:31:26 -08:00
|
|
|
// NewReader returns a new index reader on the given byte slice. It automatically
|
2018-02-12 02:40:12 -08:00
|
|
|
// handles different format versions.
|
2018-02-09 04:11:03 -08:00
|
|
|
func NewReader(b ByteSlice) (*Reader, error) {
|
2022-04-27 02:24:36 -07:00
|
|
|
return newReader(b, io.NopCloser(nil))
|
2018-01-08 09:33:35 -08:00
|
|
|
}
|
|
|
|
|
2017-11-30 06:34:49 -08:00
|
|
|
// NewFileReader returns a new index reader against the given index file.
|
2018-02-09 04:11:03 -08:00
|
|
|
func NewFileReader(path string) (*Reader, error) {
|
2017-11-30 06:34:49 -08:00
|
|
|
f, err := fileutil.OpenMmapFile(path)
|
2017-03-07 03:47:49 -08:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2019-04-03 01:34:04 -07:00
|
|
|
r, err := newReader(realByteSlice(f.Bytes()), f)
|
|
|
|
if err != nil {
|
2020-10-28 08:24:58 -07:00
|
|
|
return nil, tsdb_errors.NewMulti(
|
|
|
|
err,
|
|
|
|
f.Close(),
|
|
|
|
).Err()
|
2019-04-03 01:34:04 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
return r, nil
|
2017-11-09 09:27:09 -08:00
|
|
|
}
|
|
|
|
|
2018-02-09 04:11:03 -08:00
|
|
|
func newReader(b ByteSlice, c io.Closer) (*Reader, error) {
|
2017-11-30 06:34:49 -08:00
|
|
|
r := &Reader{
|
2017-12-01 02:01:40 -08:00
|
|
|
b: b,
|
|
|
|
c: c,
|
Reduce memory used by postings offset table.
Rather than keeping the offset of each postings list, instead
keep the nth offset of the offset of the posting list. As postings
list offsets have always been sorted, we can then get to the closest
entry before the one we want an iterate forwards.
I haven't done much tuning on the 32 number, it was chosen to try
not to read through more than a 4k page of data.
Switch to a bulk interface for fetching postings. Use it to avoid having
to re-read parts of the posting offset table when querying lots of it.
For a index with what BenchmarkHeadPostingForMatchers uses RAM
for r.postings drops from 3.79MB to 80.19kB or about 48x.
Bytes allocated go down by 30%, and suprisingly CPU usage drops by
4-6% for typical queries too.
benchmark old ns/op new ns/op delta
BenchmarkPostingsForMatchers/Block/n="1"-4 35231 36673 +4.09%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 563380 540627 -4.04%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 536782 534186 -0.48%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 533990 541550 +1.42%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 113374598 117969608 +4.05%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 146329884 139651442 -4.56%
BenchmarkPostingsForMatchers/Block/i=~""-4 50346510 44961127 -10.70%
BenchmarkPostingsForMatchers/Block/i!=""-4 41261550 35356165 -14.31%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 112544418 116904010 +3.87%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 112487086 116864918 +3.89%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 41094758 35457904 -13.72%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 41906372 36151473 -13.73%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 147262414 140424800 -4.64%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 28615629 27872072 -2.60%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 147117177 140462403 -4.52%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 175096826 167902298 -4.11%
benchmark old allocs new allocs delta
BenchmarkPostingsForMatchers/Block/n="1"-4 4 6 +50.00%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 7 11 +57.14%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 7 11 +57.14%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 15 17 +13.33%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 100010 100012 +0.00%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 200069 200040 -0.01%
BenchmarkPostingsForMatchers/Block/i=~""-4 200072 200045 -0.01%
BenchmarkPostingsForMatchers/Block/i!=""-4 200070 200041 -0.01%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 100013 100017 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 100017 100023 +0.01%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 200073 200046 -0.01%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 200075 200050 -0.01%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 200074 200049 -0.01%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 111165 111150 -0.01%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 200078 200055 -0.01%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 311282 311238 -0.01%
benchmark old bytes new bytes delta
BenchmarkPostingsForMatchers/Block/n="1"-4 264 296 +12.12%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 360 424 +17.78%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 360 424 +17.78%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 520 552 +6.15%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 1600461 1600482 +0.00%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 24900801 17259077 -30.69%
BenchmarkPostingsForMatchers/Block/i=~""-4 24900836 17259151 -30.69%
BenchmarkPostingsForMatchers/Block/i!=""-4 24900760 17259048 -30.69%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 1600557 1600621 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 1600717 1600813 +0.01%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 24900856 17259176 -30.69%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 24900952 17259304 -30.69%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 24900993 17259333 -30.69%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 3788311 3142630 -17.04%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 24901137 17259509 -30.69%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 28693086 20405680 -28.88%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-12-05 10:27:40 -08:00
|
|
|
postings: map[string][]postingOffset{},
|
2017-10-02 06:56:57 -07:00
|
|
|
}
|
2018-01-12 11:06:20 -08:00
|
|
|
|
2018-02-09 04:11:03 -08:00
|
|
|
// Verify header.
|
2019-01-11 09:31:26 -08:00
|
|
|
if r.b.Len() < HeaderLen {
|
2019-02-22 09:11:11 -08:00
|
|
|
return nil, errors.Wrap(encoding.ErrInvalidSize, "index header")
|
2017-03-07 03:47:49 -08:00
|
|
|
}
|
2017-11-09 09:27:09 -08:00
|
|
|
if m := binary.BigEndian.Uint32(r.b.Range(0, 4)); m != MagicIndex {
|
2017-03-07 03:47:49 -08:00
|
|
|
return nil, errors.Errorf("invalid magic number %x", m)
|
|
|
|
}
|
2018-02-09 04:11:03 -08:00
|
|
|
r.version = int(r.b.Range(4, 5)[0])
|
|
|
|
|
2019-01-11 09:31:26 -08:00
|
|
|
if r.version != FormatV1 && r.version != FormatV2 {
|
2018-02-09 04:11:03 -08:00
|
|
|
return nil, errors.Errorf("unknown index file version %d", r.version)
|
|
|
|
}
|
2017-03-07 03:47:49 -08:00
|
|
|
|
Reduce memory used by postings offset table.
Rather than keeping the offset of each postings list, instead
keep the nth offset of the offset of the posting list. As postings
list offsets have always been sorted, we can then get to the closest
entry before the one we want an iterate forwards.
I haven't done much tuning on the 32 number, it was chosen to try
not to read through more than a 4k page of data.
Switch to a bulk interface for fetching postings. Use it to avoid having
to re-read parts of the posting offset table when querying lots of it.
For a index with what BenchmarkHeadPostingForMatchers uses RAM
for r.postings drops from 3.79MB to 80.19kB or about 48x.
Bytes allocated go down by 30%, and suprisingly CPU usage drops by
4-6% for typical queries too.
benchmark old ns/op new ns/op delta
BenchmarkPostingsForMatchers/Block/n="1"-4 35231 36673 +4.09%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 563380 540627 -4.04%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 536782 534186 -0.48%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 533990 541550 +1.42%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 113374598 117969608 +4.05%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 146329884 139651442 -4.56%
BenchmarkPostingsForMatchers/Block/i=~""-4 50346510 44961127 -10.70%
BenchmarkPostingsForMatchers/Block/i!=""-4 41261550 35356165 -14.31%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 112544418 116904010 +3.87%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 112487086 116864918 +3.89%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 41094758 35457904 -13.72%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 41906372 36151473 -13.73%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 147262414 140424800 -4.64%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 28615629 27872072 -2.60%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 147117177 140462403 -4.52%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 175096826 167902298 -4.11%
benchmark old allocs new allocs delta
BenchmarkPostingsForMatchers/Block/n="1"-4 4 6 +50.00%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 7 11 +57.14%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 7 11 +57.14%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 15 17 +13.33%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 100010 100012 +0.00%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 200069 200040 -0.01%
BenchmarkPostingsForMatchers/Block/i=~""-4 200072 200045 -0.01%
BenchmarkPostingsForMatchers/Block/i!=""-4 200070 200041 -0.01%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 100013 100017 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 100017 100023 +0.01%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 200073 200046 -0.01%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 200075 200050 -0.01%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 200074 200049 -0.01%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 111165 111150 -0.01%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 200078 200055 -0.01%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 311282 311238 -0.01%
benchmark old bytes new bytes delta
BenchmarkPostingsForMatchers/Block/n="1"-4 264 296 +12.12%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 360 424 +17.78%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 360 424 +17.78%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 520 552 +6.15%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 1600461 1600482 +0.00%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 24900801 17259077 -30.69%
BenchmarkPostingsForMatchers/Block/i=~""-4 24900836 17259151 -30.69%
BenchmarkPostingsForMatchers/Block/i!=""-4 24900760 17259048 -30.69%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 1600557 1600621 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 1600717 1600813 +0.01%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 24900856 17259176 -30.69%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 24900952 17259304 -30.69%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 24900993 17259333 -30.69%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 3788311 3142630 -17.04%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 24901137 17259509 -30.69%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 28693086 20405680 -28.88%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-12-05 10:27:40 -08:00
|
|
|
var err error
|
|
|
|
r.toc, err = NewTOCFromByteSlice(b)
|
2019-01-11 09:31:26 -08:00
|
|
|
if err != nil {
|
2017-04-26 09:01:13 -07:00
|
|
|
return nil, errors.Wrap(err, "read TOC")
|
|
|
|
}
|
2019-01-11 09:31:26 -08:00
|
|
|
|
Load only some offsets into the symbol table into memory.
Rather than keeping the entire symbol table in memory, keep every nth
offset and walk from there to the entry we need. This ends up slightly
slower, ~360ms per 1M series returned from PostingsForMatchers which is
not much considering the rest of the CPU such a query would go on to
use.
Make LabelValues use the postings tables, rather than having
to do symbol lookups. Use yoloString, as PostingsForMatchers
doesn't need the strings to stick around and adjust the API
call to keep the Querier open until it's all marshalled.
Remove allocatedSymbols memory optimisation, we no longer keep all the
symbol strings in heap memory. Remove LabelValuesFor and LabelIndices,
they're dead code. Ensure we've still tests for label indices,
and add missing test that we can work with old V1 Format index files.
PostingForMatchers performance is slightly better, with a big drop in
allocation counts due to using yoloString for LabelValues:
benchmark old ns/op new ns/op delta
BenchmarkPostingsForMatchers/Block/n="1"-4 36698 36681 -0.05%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 522786 560887 +7.29%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 511652 537680 +5.09%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 522102 564239 +8.07%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 113689911 111795919 -1.67%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 135825572 132871085 -2.18%
BenchmarkPostingsForMatchers/Block/i=~""-4 40782628 38038181 -6.73%
BenchmarkPostingsForMatchers/Block/i!=""-4 31267869 29194327 -6.63%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 112733329 111568823 -1.03%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 112868153 111232029 -1.45%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 31338257 29349446 -6.35%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 32054482 29972436 -6.50%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 136504654 133968442 -1.86%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 27960350 27264997 -2.49%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 136765564 133860724 -2.12%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 163714583 159453668 -2.60%
benchmark old allocs new allocs delta
BenchmarkPostingsForMatchers/Block/n="1"-4 6 6 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 11 11 +0.00%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 11 11 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 17 15 -11.76%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 100012 12 -99.99%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 200040 100040 -49.99%
BenchmarkPostingsForMatchers/Block/i=~""-4 200045 100045 -49.99%
BenchmarkPostingsForMatchers/Block/i!=""-4 200041 100041 -49.99%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 100017 17 -99.98%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 100023 23 -99.98%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 200046 100046 -49.99%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 200050 100050 -49.99%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 200049 100049 -49.99%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 111150 11150 -89.97%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 200055 100055 -49.99%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 311238 111238 -64.26%
benchmark old bytes new bytes delta
BenchmarkPostingsForMatchers/Block/n="1"-4 296 296 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 424 424 +0.00%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 424 424 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 552 1544 +179.71%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 1600482 1606125 +0.35%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 17259065 17264709 +0.03%
BenchmarkPostingsForMatchers/Block/i=~""-4 17259150 17264780 +0.03%
BenchmarkPostingsForMatchers/Block/i!=""-4 17259048 17264680 +0.03%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 1600610 1606242 +0.35%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 1600813 1606434 +0.35%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 17259176 17264808 +0.03%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 17259304 17264936 +0.03%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 17259333 17264965 +0.03%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 3142628 3148262 +0.18%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 17259509 17265141 +0.03%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 20405680 20416944 +0.06%
However overall Select performance is down and involves more allocs, due to
having to do more than a simple map lookup to resolve a symbol and that all the strings
returned are allocated:
benchmark old ns/op new ns/op delta
BenchmarkQuerierSelect/Block/1of1000000-4 506092636 862678244 +70.46%
BenchmarkQuerierSelect/Block/10of1000000-4 505638968 860917636 +70.26%
BenchmarkQuerierSelect/Block/100of1000000-4 505229450 882150048 +74.60%
BenchmarkQuerierSelect/Block/1000of1000000-4 515905414 862241115 +67.13%
BenchmarkQuerierSelect/Block/10000of1000000-4 516785354 874841110 +69.29%
BenchmarkQuerierSelect/Block/100000of1000000-4 540742808 907030187 +67.74%
BenchmarkQuerierSelect/Block/1000000of1000000-4 815224288 1181236903 +44.90%
benchmark old allocs new allocs delta
BenchmarkQuerierSelect/Block/1of1000000-4 4000020 6000020 +50.00%
BenchmarkQuerierSelect/Block/10of1000000-4 4000038 6000038 +50.00%
BenchmarkQuerierSelect/Block/100of1000000-4 4000218 6000218 +50.00%
BenchmarkQuerierSelect/Block/1000of1000000-4 4002018 6002018 +49.97%
BenchmarkQuerierSelect/Block/10000of1000000-4 4020018 6020018 +49.75%
BenchmarkQuerierSelect/Block/100000of1000000-4 4200018 6200018 +47.62%
BenchmarkQuerierSelect/Block/1000000of1000000-4 6000018 8000019 +33.33%
benchmark old bytes new bytes delta
BenchmarkQuerierSelect/Block/1of1000000-4 176001468 227201476 +29.09%
BenchmarkQuerierSelect/Block/10of1000000-4 176002620 227202628 +29.09%
BenchmarkQuerierSelect/Block/100of1000000-4 176014140 227214148 +29.09%
BenchmarkQuerierSelect/Block/1000of1000000-4 176129340 227329348 +29.07%
BenchmarkQuerierSelect/Block/10000of1000000-4 177281340 228481348 +28.88%
BenchmarkQuerierSelect/Block/100000of1000000-4 188801340 240001348 +27.12%
BenchmarkQuerierSelect/Block/1000000of1000000-4 304001340 355201616 +16.84%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-12-12 08:55:32 -08:00
|
|
|
r.symbols, err = NewSymbols(r.b, r.version, int(r.toc.Symbols))
|
2019-01-11 09:31:26 -08:00
|
|
|
if err != nil {
|
2017-10-02 06:56:57 -07:00
|
|
|
return nil, errors.Wrap(err, "read symbols")
|
|
|
|
}
|
2017-03-07 03:47:49 -08:00
|
|
|
|
2020-01-06 06:06:11 -08:00
|
|
|
if r.version == FormatV1 {
|
|
|
|
// Earlier V1 formats don't have a sorted postings offset table, so
|
|
|
|
// load the whole offset table into memory.
|
|
|
|
r.postingsV1 = map[string]map[string]uint64{}
|
2022-11-14 08:48:16 -08:00
|
|
|
if err := ReadPostingsOffsetTable(r.b, r.toc.PostingsTable, func(name, value []byte, off uint64, _ int) error {
|
|
|
|
if _, ok := r.postingsV1[string(name)]; !ok {
|
|
|
|
r.postingsV1[string(name)] = map[string]uint64{}
|
|
|
|
r.postings[string(name)] = nil // Used to get a list of labelnames in places.
|
2020-01-06 06:06:11 -08:00
|
|
|
}
|
2022-11-14 08:48:16 -08:00
|
|
|
r.postingsV1[string(name)][string(value)] = off
|
2020-01-06 06:06:11 -08:00
|
|
|
return nil
|
|
|
|
}); err != nil {
|
|
|
|
return nil, errors.Wrap(err, "read postings table")
|
2017-12-01 02:01:40 -08:00
|
|
|
}
|
2020-01-06 06:06:11 -08:00
|
|
|
} else {
|
2022-11-14 08:48:16 -08:00
|
|
|
var lastName, lastValue []byte
|
2020-01-06 06:06:11 -08:00
|
|
|
lastOff := 0
|
|
|
|
valueCount := 0
|
|
|
|
// For the postings offset table we keep every label name but only every nth
|
|
|
|
// label value (plus the first and last one), to save memory.
|
2022-11-14 08:48:16 -08:00
|
|
|
if err := ReadPostingsOffsetTable(r.b, r.toc.PostingsTable, func(name, value []byte, _ uint64, off int) error {
|
|
|
|
if _, ok := r.postings[string(name)]; !ok {
|
2020-01-06 06:06:11 -08:00
|
|
|
// Next label name.
|
2022-11-14 08:48:16 -08:00
|
|
|
r.postings[string(name)] = []postingOffset{}
|
|
|
|
if lastName != nil {
|
2020-01-06 06:06:11 -08:00
|
|
|
// Always include last value for each label name.
|
2022-11-14 08:48:16 -08:00
|
|
|
r.postings[string(lastName)] = append(r.postings[string(lastName)], postingOffset{value: string(lastValue), off: lastOff})
|
2020-01-06 06:06:11 -08:00
|
|
|
}
|
|
|
|
valueCount = 0
|
Reduce memory used by postings offset table.
Rather than keeping the offset of each postings list, instead
keep the nth offset of the offset of the posting list. As postings
list offsets have always been sorted, we can then get to the closest
entry before the one we want an iterate forwards.
I haven't done much tuning on the 32 number, it was chosen to try
not to read through more than a 4k page of data.
Switch to a bulk interface for fetching postings. Use it to avoid having
to re-read parts of the posting offset table when querying lots of it.
For a index with what BenchmarkHeadPostingForMatchers uses RAM
for r.postings drops from 3.79MB to 80.19kB or about 48x.
Bytes allocated go down by 30%, and suprisingly CPU usage drops by
4-6% for typical queries too.
benchmark old ns/op new ns/op delta
BenchmarkPostingsForMatchers/Block/n="1"-4 35231 36673 +4.09%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 563380 540627 -4.04%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 536782 534186 -0.48%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 533990 541550 +1.42%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 113374598 117969608 +4.05%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 146329884 139651442 -4.56%
BenchmarkPostingsForMatchers/Block/i=~""-4 50346510 44961127 -10.70%
BenchmarkPostingsForMatchers/Block/i!=""-4 41261550 35356165 -14.31%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 112544418 116904010 +3.87%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 112487086 116864918 +3.89%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 41094758 35457904 -13.72%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 41906372 36151473 -13.73%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 147262414 140424800 -4.64%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 28615629 27872072 -2.60%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 147117177 140462403 -4.52%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 175096826 167902298 -4.11%
benchmark old allocs new allocs delta
BenchmarkPostingsForMatchers/Block/n="1"-4 4 6 +50.00%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 7 11 +57.14%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 7 11 +57.14%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 15 17 +13.33%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 100010 100012 +0.00%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 200069 200040 -0.01%
BenchmarkPostingsForMatchers/Block/i=~""-4 200072 200045 -0.01%
BenchmarkPostingsForMatchers/Block/i!=""-4 200070 200041 -0.01%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 100013 100017 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 100017 100023 +0.01%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 200073 200046 -0.01%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 200075 200050 -0.01%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 200074 200049 -0.01%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 111165 111150 -0.01%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 200078 200055 -0.01%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 311282 311238 -0.01%
benchmark old bytes new bytes delta
BenchmarkPostingsForMatchers/Block/n="1"-4 264 296 +12.12%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 360 424 +17.78%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 360 424 +17.78%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 520 552 +6.15%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 1600461 1600482 +0.00%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 24900801 17259077 -30.69%
BenchmarkPostingsForMatchers/Block/i=~""-4 24900836 17259151 -30.69%
BenchmarkPostingsForMatchers/Block/i!=""-4 24900760 17259048 -30.69%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 1600557 1600621 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 1600717 1600813 +0.01%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 24900856 17259176 -30.69%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 24900952 17259304 -30.69%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 24900993 17259333 -30.69%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 3788311 3142630 -17.04%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 24901137 17259509 -30.69%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 28693086 20405680 -28.88%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-12-05 10:27:40 -08:00
|
|
|
}
|
2021-08-13 04:08:53 -07:00
|
|
|
if valueCount%symbolFactor == 0 {
|
2022-11-14 08:48:16 -08:00
|
|
|
r.postings[string(name)] = append(r.postings[string(name)], postingOffset{value: string(value), off: off})
|
|
|
|
lastName, lastValue = nil, nil
|
2020-01-06 06:06:11 -08:00
|
|
|
} else {
|
2022-11-14 08:48:16 -08:00
|
|
|
lastName, lastValue = name, value
|
2020-01-06 06:06:11 -08:00
|
|
|
lastOff = off
|
|
|
|
}
|
|
|
|
valueCount++
|
|
|
|
return nil
|
|
|
|
}); err != nil {
|
|
|
|
return nil, errors.Wrap(err, "read postings table")
|
Reduce memory used by postings offset table.
Rather than keeping the offset of each postings list, instead
keep the nth offset of the offset of the posting list. As postings
list offsets have always been sorted, we can then get to the closest
entry before the one we want an iterate forwards.
I haven't done much tuning on the 32 number, it was chosen to try
not to read through more than a 4k page of data.
Switch to a bulk interface for fetching postings. Use it to avoid having
to re-read parts of the posting offset table when querying lots of it.
For a index with what BenchmarkHeadPostingForMatchers uses RAM
for r.postings drops from 3.79MB to 80.19kB or about 48x.
Bytes allocated go down by 30%, and suprisingly CPU usage drops by
4-6% for typical queries too.
benchmark old ns/op new ns/op delta
BenchmarkPostingsForMatchers/Block/n="1"-4 35231 36673 +4.09%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 563380 540627 -4.04%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 536782 534186 -0.48%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 533990 541550 +1.42%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 113374598 117969608 +4.05%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 146329884 139651442 -4.56%
BenchmarkPostingsForMatchers/Block/i=~""-4 50346510 44961127 -10.70%
BenchmarkPostingsForMatchers/Block/i!=""-4 41261550 35356165 -14.31%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 112544418 116904010 +3.87%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 112487086 116864918 +3.89%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 41094758 35457904 -13.72%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 41906372 36151473 -13.73%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 147262414 140424800 -4.64%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 28615629 27872072 -2.60%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 147117177 140462403 -4.52%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 175096826 167902298 -4.11%
benchmark old allocs new allocs delta
BenchmarkPostingsForMatchers/Block/n="1"-4 4 6 +50.00%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 7 11 +57.14%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 7 11 +57.14%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 15 17 +13.33%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 100010 100012 +0.00%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 200069 200040 -0.01%
BenchmarkPostingsForMatchers/Block/i=~""-4 200072 200045 -0.01%
BenchmarkPostingsForMatchers/Block/i!=""-4 200070 200041 -0.01%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 100013 100017 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 100017 100023 +0.01%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 200073 200046 -0.01%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 200075 200050 -0.01%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 200074 200049 -0.01%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 111165 111150 -0.01%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 200078 200055 -0.01%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 311282 311238 -0.01%
benchmark old bytes new bytes delta
BenchmarkPostingsForMatchers/Block/n="1"-4 264 296 +12.12%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 360 424 +17.78%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 360 424 +17.78%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 520 552 +6.15%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 1600461 1600482 +0.00%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 24900801 17259077 -30.69%
BenchmarkPostingsForMatchers/Block/i=~""-4 24900836 17259151 -30.69%
BenchmarkPostingsForMatchers/Block/i!=""-4 24900760 17259048 -30.69%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 1600557 1600621 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 1600717 1600813 +0.01%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 24900856 17259176 -30.69%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 24900952 17259304 -30.69%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 24900993 17259333 -30.69%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 3788311 3142630 -17.04%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 24901137 17259509 -30.69%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 28693086 20405680 -28.88%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-12-05 10:27:40 -08:00
|
|
|
}
|
2022-11-14 08:48:16 -08:00
|
|
|
if lastName != nil {
|
|
|
|
r.postings[string(lastName)] = append(r.postings[string(lastName)], postingOffset{value: string(lastValue), off: lastOff})
|
2020-01-06 06:06:11 -08:00
|
|
|
}
|
|
|
|
// Trim any extra space in the slices.
|
|
|
|
for k, v := range r.postings {
|
|
|
|
l := make([]postingOffset, len(v))
|
|
|
|
copy(l, v)
|
|
|
|
r.postings[k] = l
|
2018-11-02 03:45:09 -07:00
|
|
|
}
|
Reduce memory used by postings offset table.
Rather than keeping the offset of each postings list, instead
keep the nth offset of the offset of the posting list. As postings
list offsets have always been sorted, we can then get to the closest
entry before the one we want an iterate forwards.
I haven't done much tuning on the 32 number, it was chosen to try
not to read through more than a 4k page of data.
Switch to a bulk interface for fetching postings. Use it to avoid having
to re-read parts of the posting offset table when querying lots of it.
For a index with what BenchmarkHeadPostingForMatchers uses RAM
for r.postings drops from 3.79MB to 80.19kB or about 48x.
Bytes allocated go down by 30%, and suprisingly CPU usage drops by
4-6% for typical queries too.
benchmark old ns/op new ns/op delta
BenchmarkPostingsForMatchers/Block/n="1"-4 35231 36673 +4.09%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 563380 540627 -4.04%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 536782 534186 -0.48%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 533990 541550 +1.42%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 113374598 117969608 +4.05%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 146329884 139651442 -4.56%
BenchmarkPostingsForMatchers/Block/i=~""-4 50346510 44961127 -10.70%
BenchmarkPostingsForMatchers/Block/i!=""-4 41261550 35356165 -14.31%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 112544418 116904010 +3.87%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 112487086 116864918 +3.89%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 41094758 35457904 -13.72%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 41906372 36151473 -13.73%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 147262414 140424800 -4.64%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 28615629 27872072 -2.60%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 147117177 140462403 -4.52%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 175096826 167902298 -4.11%
benchmark old allocs new allocs delta
BenchmarkPostingsForMatchers/Block/n="1"-4 4 6 +50.00%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 7 11 +57.14%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 7 11 +57.14%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 15 17 +13.33%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 100010 100012 +0.00%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 200069 200040 -0.01%
BenchmarkPostingsForMatchers/Block/i=~""-4 200072 200045 -0.01%
BenchmarkPostingsForMatchers/Block/i!=""-4 200070 200041 -0.01%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 100013 100017 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 100017 100023 +0.01%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 200073 200046 -0.01%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 200075 200050 -0.01%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 200074 200049 -0.01%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 111165 111150 -0.01%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 200078 200055 -0.01%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 311282 311238 -0.01%
benchmark old bytes new bytes delta
BenchmarkPostingsForMatchers/Block/n="1"-4 264 296 +12.12%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 360 424 +17.78%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 360 424 +17.78%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 520 552 +6.15%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 1600461 1600482 +0.00%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 24900801 17259077 -30.69%
BenchmarkPostingsForMatchers/Block/i=~""-4 24900836 17259151 -30.69%
BenchmarkPostingsForMatchers/Block/i!=""-4 24900760 17259048 -30.69%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 1600557 1600621 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 1600717 1600813 +0.01%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 24900856 17259176 -30.69%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 24900952 17259304 -30.69%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 24900993 17259333 -30.69%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 3788311 3142630 -17.04%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 24901137 17259509 -30.69%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 28693086 20405680 -28.88%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-12-05 10:27:40 -08:00
|
|
|
}
|
2017-12-01 03:06:37 -08:00
|
|
|
|
Load only some offsets into the symbol table into memory.
Rather than keeping the entire symbol table in memory, keep every nth
offset and walk from there to the entry we need. This ends up slightly
slower, ~360ms per 1M series returned from PostingsForMatchers which is
not much considering the rest of the CPU such a query would go on to
use.
Make LabelValues use the postings tables, rather than having
to do symbol lookups. Use yoloString, as PostingsForMatchers
doesn't need the strings to stick around and adjust the API
call to keep the Querier open until it's all marshalled.
Remove allocatedSymbols memory optimisation, we no longer keep all the
symbol strings in heap memory. Remove LabelValuesFor and LabelIndices,
they're dead code. Ensure we've still tests for label indices,
and add missing test that we can work with old V1 Format index files.
PostingForMatchers performance is slightly better, with a big drop in
allocation counts due to using yoloString for LabelValues:
benchmark old ns/op new ns/op delta
BenchmarkPostingsForMatchers/Block/n="1"-4 36698 36681 -0.05%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 522786 560887 +7.29%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 511652 537680 +5.09%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 522102 564239 +8.07%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 113689911 111795919 -1.67%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 135825572 132871085 -2.18%
BenchmarkPostingsForMatchers/Block/i=~""-4 40782628 38038181 -6.73%
BenchmarkPostingsForMatchers/Block/i!=""-4 31267869 29194327 -6.63%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 112733329 111568823 -1.03%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 112868153 111232029 -1.45%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 31338257 29349446 -6.35%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 32054482 29972436 -6.50%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 136504654 133968442 -1.86%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 27960350 27264997 -2.49%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 136765564 133860724 -2.12%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 163714583 159453668 -2.60%
benchmark old allocs new allocs delta
BenchmarkPostingsForMatchers/Block/n="1"-4 6 6 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 11 11 +0.00%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 11 11 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 17 15 -11.76%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 100012 12 -99.99%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 200040 100040 -49.99%
BenchmarkPostingsForMatchers/Block/i=~""-4 200045 100045 -49.99%
BenchmarkPostingsForMatchers/Block/i!=""-4 200041 100041 -49.99%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 100017 17 -99.98%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 100023 23 -99.98%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 200046 100046 -49.99%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 200050 100050 -49.99%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 200049 100049 -49.99%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 111150 11150 -89.97%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 200055 100055 -49.99%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 311238 111238 -64.26%
benchmark old bytes new bytes delta
BenchmarkPostingsForMatchers/Block/n="1"-4 296 296 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 424 424 +0.00%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 424 424 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 552 1544 +179.71%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 1600482 1606125 +0.35%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 17259065 17264709 +0.03%
BenchmarkPostingsForMatchers/Block/i=~""-4 17259150 17264780 +0.03%
BenchmarkPostingsForMatchers/Block/i!=""-4 17259048 17264680 +0.03%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 1600610 1606242 +0.35%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 1600813 1606434 +0.35%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 17259176 17264808 +0.03%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 17259304 17264936 +0.03%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 17259333 17264965 +0.03%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 3142628 3148262 +0.18%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 17259509 17265141 +0.03%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 20405680 20416944 +0.06%
However overall Select performance is down and involves more allocs, due to
having to do more than a simple map lookup to resolve a symbol and that all the strings
returned are allocated:
benchmark old ns/op new ns/op delta
BenchmarkQuerierSelect/Block/1of1000000-4 506092636 862678244 +70.46%
BenchmarkQuerierSelect/Block/10of1000000-4 505638968 860917636 +70.26%
BenchmarkQuerierSelect/Block/100of1000000-4 505229450 882150048 +74.60%
BenchmarkQuerierSelect/Block/1000of1000000-4 515905414 862241115 +67.13%
BenchmarkQuerierSelect/Block/10000of1000000-4 516785354 874841110 +69.29%
BenchmarkQuerierSelect/Block/100000of1000000-4 540742808 907030187 +67.74%
BenchmarkQuerierSelect/Block/1000000of1000000-4 815224288 1181236903 +44.90%
benchmark old allocs new allocs delta
BenchmarkQuerierSelect/Block/1of1000000-4 4000020 6000020 +50.00%
BenchmarkQuerierSelect/Block/10of1000000-4 4000038 6000038 +50.00%
BenchmarkQuerierSelect/Block/100of1000000-4 4000218 6000218 +50.00%
BenchmarkQuerierSelect/Block/1000of1000000-4 4002018 6002018 +49.97%
BenchmarkQuerierSelect/Block/10000of1000000-4 4020018 6020018 +49.75%
BenchmarkQuerierSelect/Block/100000of1000000-4 4200018 6200018 +47.62%
BenchmarkQuerierSelect/Block/1000000of1000000-4 6000018 8000019 +33.33%
benchmark old bytes new bytes delta
BenchmarkQuerierSelect/Block/1of1000000-4 176001468 227201476 +29.09%
BenchmarkQuerierSelect/Block/10of1000000-4 176002620 227202628 +29.09%
BenchmarkQuerierSelect/Block/100of1000000-4 176014140 227214148 +29.09%
BenchmarkQuerierSelect/Block/1000of1000000-4 176129340 227329348 +29.07%
BenchmarkQuerierSelect/Block/10000of1000000-4 177281340 228481348 +28.88%
BenchmarkQuerierSelect/Block/100000of1000000-4 188801340 240001348 +27.12%
BenchmarkQuerierSelect/Block/1000000of1000000-4 304001340 355201616 +16.84%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-12-12 08:55:32 -08:00
|
|
|
r.nameSymbols = make(map[uint32]string, len(r.postings))
|
|
|
|
for k := range r.postings {
|
|
|
|
if k == "" {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
off, err := r.symbols.ReverseLookup(k)
|
|
|
|
if err != nil {
|
|
|
|
return nil, errors.Wrap(err, "reverse symbol lookup")
|
|
|
|
}
|
|
|
|
r.nameSymbols[off] = k
|
|
|
|
}
|
|
|
|
|
2019-01-11 09:31:26 -08:00
|
|
|
r.dec = &Decoder{LookupSymbol: r.lookupSymbol}
|
2017-12-01 03:06:37 -08:00
|
|
|
|
2017-12-01 02:01:40 -08:00
|
|
|
return r, nil
|
2017-03-07 03:47:49 -08:00
|
|
|
}
|
|
|
|
|
2018-02-21 12:06:19 -08:00
|
|
|
// Version returns the file format version of the underlying index.
|
|
|
|
func (r *Reader) Version() int {
|
|
|
|
return r.version
|
|
|
|
}
|
|
|
|
|
2017-12-01 03:06:37 -08:00
|
|
|
// Range marks a byte range.
|
|
|
|
type Range struct {
|
|
|
|
Start, End int64
|
|
|
|
}
|
|
|
|
|
|
|
|
// PostingsRanges returns a new map of byte range in the underlying index file
|
|
|
|
// for all postings lists.
|
|
|
|
func (r *Reader) PostingsRanges() (map[labels.Label]Range, error) {
|
|
|
|
m := map[labels.Label]Range{}
|
2022-11-14 08:48:16 -08:00
|
|
|
if err := ReadPostingsOffsetTable(r.b, r.toc.PostingsTable, func(name, value []byte, off uint64, _ int) error {
|
Reduce memory used by postings offset table.
Rather than keeping the offset of each postings list, instead
keep the nth offset of the offset of the posting list. As postings
list offsets have always been sorted, we can then get to the closest
entry before the one we want an iterate forwards.
I haven't done much tuning on the 32 number, it was chosen to try
not to read through more than a 4k page of data.
Switch to a bulk interface for fetching postings. Use it to avoid having
to re-read parts of the posting offset table when querying lots of it.
For a index with what BenchmarkHeadPostingForMatchers uses RAM
for r.postings drops from 3.79MB to 80.19kB or about 48x.
Bytes allocated go down by 30%, and suprisingly CPU usage drops by
4-6% for typical queries too.
benchmark old ns/op new ns/op delta
BenchmarkPostingsForMatchers/Block/n="1"-4 35231 36673 +4.09%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 563380 540627 -4.04%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 536782 534186 -0.48%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 533990 541550 +1.42%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 113374598 117969608 +4.05%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 146329884 139651442 -4.56%
BenchmarkPostingsForMatchers/Block/i=~""-4 50346510 44961127 -10.70%
BenchmarkPostingsForMatchers/Block/i!=""-4 41261550 35356165 -14.31%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 112544418 116904010 +3.87%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 112487086 116864918 +3.89%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 41094758 35457904 -13.72%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 41906372 36151473 -13.73%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 147262414 140424800 -4.64%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 28615629 27872072 -2.60%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 147117177 140462403 -4.52%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 175096826 167902298 -4.11%
benchmark old allocs new allocs delta
BenchmarkPostingsForMatchers/Block/n="1"-4 4 6 +50.00%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 7 11 +57.14%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 7 11 +57.14%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 15 17 +13.33%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 100010 100012 +0.00%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 200069 200040 -0.01%
BenchmarkPostingsForMatchers/Block/i=~""-4 200072 200045 -0.01%
BenchmarkPostingsForMatchers/Block/i!=""-4 200070 200041 -0.01%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 100013 100017 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 100017 100023 +0.01%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 200073 200046 -0.01%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 200075 200050 -0.01%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 200074 200049 -0.01%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 111165 111150 -0.01%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 200078 200055 -0.01%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 311282 311238 -0.01%
benchmark old bytes new bytes delta
BenchmarkPostingsForMatchers/Block/n="1"-4 264 296 +12.12%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 360 424 +17.78%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 360 424 +17.78%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 520 552 +6.15%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 1600461 1600482 +0.00%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 24900801 17259077 -30.69%
BenchmarkPostingsForMatchers/Block/i=~""-4 24900836 17259151 -30.69%
BenchmarkPostingsForMatchers/Block/i!=""-4 24900760 17259048 -30.69%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 1600557 1600621 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 1600717 1600813 +0.01%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 24900856 17259176 -30.69%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 24900952 17259304 -30.69%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 24900993 17259333 -30.69%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 3788311 3142630 -17.04%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 24901137 17259509 -30.69%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 28693086 20405680 -28.88%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-12-05 10:27:40 -08:00
|
|
|
d := encoding.NewDecbufAt(r.b, int(off), castagnoliTable)
|
|
|
|
if d.Err() != nil {
|
|
|
|
return d.Err()
|
2017-12-01 03:06:37 -08:00
|
|
|
}
|
2022-11-14 08:48:16 -08:00
|
|
|
m[labels.Label{Name: string(name), Value: string(value)}] = Range{
|
Reduce memory used by postings offset table.
Rather than keeping the offset of each postings list, instead
keep the nth offset of the offset of the posting list. As postings
list offsets have always been sorted, we can then get to the closest
entry before the one we want an iterate forwards.
I haven't done much tuning on the 32 number, it was chosen to try
not to read through more than a 4k page of data.
Switch to a bulk interface for fetching postings. Use it to avoid having
to re-read parts of the posting offset table when querying lots of it.
For a index with what BenchmarkHeadPostingForMatchers uses RAM
for r.postings drops from 3.79MB to 80.19kB or about 48x.
Bytes allocated go down by 30%, and suprisingly CPU usage drops by
4-6% for typical queries too.
benchmark old ns/op new ns/op delta
BenchmarkPostingsForMatchers/Block/n="1"-4 35231 36673 +4.09%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 563380 540627 -4.04%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 536782 534186 -0.48%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 533990 541550 +1.42%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 113374598 117969608 +4.05%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 146329884 139651442 -4.56%
BenchmarkPostingsForMatchers/Block/i=~""-4 50346510 44961127 -10.70%
BenchmarkPostingsForMatchers/Block/i!=""-4 41261550 35356165 -14.31%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 112544418 116904010 +3.87%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 112487086 116864918 +3.89%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 41094758 35457904 -13.72%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 41906372 36151473 -13.73%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 147262414 140424800 -4.64%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 28615629 27872072 -2.60%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 147117177 140462403 -4.52%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 175096826 167902298 -4.11%
benchmark old allocs new allocs delta
BenchmarkPostingsForMatchers/Block/n="1"-4 4 6 +50.00%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 7 11 +57.14%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 7 11 +57.14%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 15 17 +13.33%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 100010 100012 +0.00%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 200069 200040 -0.01%
BenchmarkPostingsForMatchers/Block/i=~""-4 200072 200045 -0.01%
BenchmarkPostingsForMatchers/Block/i!=""-4 200070 200041 -0.01%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 100013 100017 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 100017 100023 +0.01%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 200073 200046 -0.01%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 200075 200050 -0.01%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 200074 200049 -0.01%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 111165 111150 -0.01%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 200078 200055 -0.01%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 311282 311238 -0.01%
benchmark old bytes new bytes delta
BenchmarkPostingsForMatchers/Block/n="1"-4 264 296 +12.12%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 360 424 +17.78%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 360 424 +17.78%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 520 552 +6.15%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 1600461 1600482 +0.00%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 24900801 17259077 -30.69%
BenchmarkPostingsForMatchers/Block/i=~""-4 24900836 17259151 -30.69%
BenchmarkPostingsForMatchers/Block/i!=""-4 24900760 17259048 -30.69%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 1600557 1600621 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 1600717 1600813 +0.01%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 24900856 17259176 -30.69%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 24900952 17259304 -30.69%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 24900993 17259333 -30.69%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 3788311 3142630 -17.04%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 24901137 17259509 -30.69%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 28693086 20405680 -28.88%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-12-05 10:27:40 -08:00
|
|
|
Start: int64(off) + 4,
|
|
|
|
End: int64(off) + 4 + int64(d.Len()),
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}); err != nil {
|
|
|
|
return nil, errors.Wrap(err, "read postings table")
|
2017-12-01 03:06:37 -08:00
|
|
|
}
|
|
|
|
return m, nil
|
|
|
|
}
|
|
|
|
|
Load only some offsets into the symbol table into memory.
Rather than keeping the entire symbol table in memory, keep every nth
offset and walk from there to the entry we need. This ends up slightly
slower, ~360ms per 1M series returned from PostingsForMatchers which is
not much considering the rest of the CPU such a query would go on to
use.
Make LabelValues use the postings tables, rather than having
to do symbol lookups. Use yoloString, as PostingsForMatchers
doesn't need the strings to stick around and adjust the API
call to keep the Querier open until it's all marshalled.
Remove allocatedSymbols memory optimisation, we no longer keep all the
symbol strings in heap memory. Remove LabelValuesFor and LabelIndices,
they're dead code. Ensure we've still tests for label indices,
and add missing test that we can work with old V1 Format index files.
PostingForMatchers performance is slightly better, with a big drop in
allocation counts due to using yoloString for LabelValues:
benchmark old ns/op new ns/op delta
BenchmarkPostingsForMatchers/Block/n="1"-4 36698 36681 -0.05%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 522786 560887 +7.29%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 511652 537680 +5.09%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 522102 564239 +8.07%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 113689911 111795919 -1.67%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 135825572 132871085 -2.18%
BenchmarkPostingsForMatchers/Block/i=~""-4 40782628 38038181 -6.73%
BenchmarkPostingsForMatchers/Block/i!=""-4 31267869 29194327 -6.63%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 112733329 111568823 -1.03%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 112868153 111232029 -1.45%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 31338257 29349446 -6.35%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 32054482 29972436 -6.50%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 136504654 133968442 -1.86%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 27960350 27264997 -2.49%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 136765564 133860724 -2.12%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 163714583 159453668 -2.60%
benchmark old allocs new allocs delta
BenchmarkPostingsForMatchers/Block/n="1"-4 6 6 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 11 11 +0.00%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 11 11 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 17 15 -11.76%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 100012 12 -99.99%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 200040 100040 -49.99%
BenchmarkPostingsForMatchers/Block/i=~""-4 200045 100045 -49.99%
BenchmarkPostingsForMatchers/Block/i!=""-4 200041 100041 -49.99%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 100017 17 -99.98%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 100023 23 -99.98%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 200046 100046 -49.99%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 200050 100050 -49.99%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 200049 100049 -49.99%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 111150 11150 -89.97%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 200055 100055 -49.99%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 311238 111238 -64.26%
benchmark old bytes new bytes delta
BenchmarkPostingsForMatchers/Block/n="1"-4 296 296 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 424 424 +0.00%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 424 424 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 552 1544 +179.71%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 1600482 1606125 +0.35%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 17259065 17264709 +0.03%
BenchmarkPostingsForMatchers/Block/i=~""-4 17259150 17264780 +0.03%
BenchmarkPostingsForMatchers/Block/i!=""-4 17259048 17264680 +0.03%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 1600610 1606242 +0.35%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 1600813 1606434 +0.35%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 17259176 17264808 +0.03%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 17259304 17264936 +0.03%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 17259333 17264965 +0.03%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 3142628 3148262 +0.18%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 17259509 17265141 +0.03%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 20405680 20416944 +0.06%
However overall Select performance is down and involves more allocs, due to
having to do more than a simple map lookup to resolve a symbol and that all the strings
returned are allocated:
benchmark old ns/op new ns/op delta
BenchmarkQuerierSelect/Block/1of1000000-4 506092636 862678244 +70.46%
BenchmarkQuerierSelect/Block/10of1000000-4 505638968 860917636 +70.26%
BenchmarkQuerierSelect/Block/100of1000000-4 505229450 882150048 +74.60%
BenchmarkQuerierSelect/Block/1000of1000000-4 515905414 862241115 +67.13%
BenchmarkQuerierSelect/Block/10000of1000000-4 516785354 874841110 +69.29%
BenchmarkQuerierSelect/Block/100000of1000000-4 540742808 907030187 +67.74%
BenchmarkQuerierSelect/Block/1000000of1000000-4 815224288 1181236903 +44.90%
benchmark old allocs new allocs delta
BenchmarkQuerierSelect/Block/1of1000000-4 4000020 6000020 +50.00%
BenchmarkQuerierSelect/Block/10of1000000-4 4000038 6000038 +50.00%
BenchmarkQuerierSelect/Block/100of1000000-4 4000218 6000218 +50.00%
BenchmarkQuerierSelect/Block/1000of1000000-4 4002018 6002018 +49.97%
BenchmarkQuerierSelect/Block/10000of1000000-4 4020018 6020018 +49.75%
BenchmarkQuerierSelect/Block/100000of1000000-4 4200018 6200018 +47.62%
BenchmarkQuerierSelect/Block/1000000of1000000-4 6000018 8000019 +33.33%
benchmark old bytes new bytes delta
BenchmarkQuerierSelect/Block/1of1000000-4 176001468 227201476 +29.09%
BenchmarkQuerierSelect/Block/10of1000000-4 176002620 227202628 +29.09%
BenchmarkQuerierSelect/Block/100of1000000-4 176014140 227214148 +29.09%
BenchmarkQuerierSelect/Block/1000of1000000-4 176129340 227329348 +29.07%
BenchmarkQuerierSelect/Block/10000of1000000-4 177281340 228481348 +28.88%
BenchmarkQuerierSelect/Block/100000of1000000-4 188801340 240001348 +27.12%
BenchmarkQuerierSelect/Block/1000000of1000000-4 304001340 355201616 +16.84%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-12-12 08:55:32 -08:00
|
|
|
type Symbols struct {
|
|
|
|
bs ByteSlice
|
|
|
|
version int
|
|
|
|
off int
|
|
|
|
|
|
|
|
offsets []int
|
|
|
|
seen int
|
|
|
|
}
|
|
|
|
|
|
|
|
const symbolFactor = 32
|
|
|
|
|
|
|
|
// NewSymbols returns a Symbols object for symbol lookups.
|
2021-10-22 01:06:44 -07:00
|
|
|
func NewSymbols(bs ByteSlice, version, off int) (*Symbols, error) {
|
Load only some offsets into the symbol table into memory.
Rather than keeping the entire symbol table in memory, keep every nth
offset and walk from there to the entry we need. This ends up slightly
slower, ~360ms per 1M series returned from PostingsForMatchers which is
not much considering the rest of the CPU such a query would go on to
use.
Make LabelValues use the postings tables, rather than having
to do symbol lookups. Use yoloString, as PostingsForMatchers
doesn't need the strings to stick around and adjust the API
call to keep the Querier open until it's all marshalled.
Remove allocatedSymbols memory optimisation, we no longer keep all the
symbol strings in heap memory. Remove LabelValuesFor and LabelIndices,
they're dead code. Ensure we've still tests for label indices,
and add missing test that we can work with old V1 Format index files.
PostingForMatchers performance is slightly better, with a big drop in
allocation counts due to using yoloString for LabelValues:
benchmark old ns/op new ns/op delta
BenchmarkPostingsForMatchers/Block/n="1"-4 36698 36681 -0.05%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 522786 560887 +7.29%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 511652 537680 +5.09%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 522102 564239 +8.07%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 113689911 111795919 -1.67%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 135825572 132871085 -2.18%
BenchmarkPostingsForMatchers/Block/i=~""-4 40782628 38038181 -6.73%
BenchmarkPostingsForMatchers/Block/i!=""-4 31267869 29194327 -6.63%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 112733329 111568823 -1.03%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 112868153 111232029 -1.45%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 31338257 29349446 -6.35%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 32054482 29972436 -6.50%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 136504654 133968442 -1.86%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 27960350 27264997 -2.49%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 136765564 133860724 -2.12%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 163714583 159453668 -2.60%
benchmark old allocs new allocs delta
BenchmarkPostingsForMatchers/Block/n="1"-4 6 6 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 11 11 +0.00%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 11 11 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 17 15 -11.76%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 100012 12 -99.99%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 200040 100040 -49.99%
BenchmarkPostingsForMatchers/Block/i=~""-4 200045 100045 -49.99%
BenchmarkPostingsForMatchers/Block/i!=""-4 200041 100041 -49.99%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 100017 17 -99.98%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 100023 23 -99.98%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 200046 100046 -49.99%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 200050 100050 -49.99%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 200049 100049 -49.99%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 111150 11150 -89.97%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 200055 100055 -49.99%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 311238 111238 -64.26%
benchmark old bytes new bytes delta
BenchmarkPostingsForMatchers/Block/n="1"-4 296 296 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 424 424 +0.00%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 424 424 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 552 1544 +179.71%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 1600482 1606125 +0.35%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 17259065 17264709 +0.03%
BenchmarkPostingsForMatchers/Block/i=~""-4 17259150 17264780 +0.03%
BenchmarkPostingsForMatchers/Block/i!=""-4 17259048 17264680 +0.03%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 1600610 1606242 +0.35%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 1600813 1606434 +0.35%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 17259176 17264808 +0.03%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 17259304 17264936 +0.03%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 17259333 17264965 +0.03%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 3142628 3148262 +0.18%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 17259509 17265141 +0.03%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 20405680 20416944 +0.06%
However overall Select performance is down and involves more allocs, due to
having to do more than a simple map lookup to resolve a symbol and that all the strings
returned are allocated:
benchmark old ns/op new ns/op delta
BenchmarkQuerierSelect/Block/1of1000000-4 506092636 862678244 +70.46%
BenchmarkQuerierSelect/Block/10of1000000-4 505638968 860917636 +70.26%
BenchmarkQuerierSelect/Block/100of1000000-4 505229450 882150048 +74.60%
BenchmarkQuerierSelect/Block/1000of1000000-4 515905414 862241115 +67.13%
BenchmarkQuerierSelect/Block/10000of1000000-4 516785354 874841110 +69.29%
BenchmarkQuerierSelect/Block/100000of1000000-4 540742808 907030187 +67.74%
BenchmarkQuerierSelect/Block/1000000of1000000-4 815224288 1181236903 +44.90%
benchmark old allocs new allocs delta
BenchmarkQuerierSelect/Block/1of1000000-4 4000020 6000020 +50.00%
BenchmarkQuerierSelect/Block/10of1000000-4 4000038 6000038 +50.00%
BenchmarkQuerierSelect/Block/100of1000000-4 4000218 6000218 +50.00%
BenchmarkQuerierSelect/Block/1000of1000000-4 4002018 6002018 +49.97%
BenchmarkQuerierSelect/Block/10000of1000000-4 4020018 6020018 +49.75%
BenchmarkQuerierSelect/Block/100000of1000000-4 4200018 6200018 +47.62%
BenchmarkQuerierSelect/Block/1000000of1000000-4 6000018 8000019 +33.33%
benchmark old bytes new bytes delta
BenchmarkQuerierSelect/Block/1of1000000-4 176001468 227201476 +29.09%
BenchmarkQuerierSelect/Block/10of1000000-4 176002620 227202628 +29.09%
BenchmarkQuerierSelect/Block/100of1000000-4 176014140 227214148 +29.09%
BenchmarkQuerierSelect/Block/1000of1000000-4 176129340 227329348 +29.07%
BenchmarkQuerierSelect/Block/10000of1000000-4 177281340 228481348 +28.88%
BenchmarkQuerierSelect/Block/100000of1000000-4 188801340 240001348 +27.12%
BenchmarkQuerierSelect/Block/1000000of1000000-4 304001340 355201616 +16.84%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-12-12 08:55:32 -08:00
|
|
|
s := &Symbols{
|
|
|
|
bs: bs,
|
|
|
|
version: version,
|
|
|
|
off: off,
|
|
|
|
}
|
2019-02-22 09:11:11 -08:00
|
|
|
d := encoding.NewDecbufAt(bs, off, castagnoliTable)
|
2017-10-02 06:56:57 -07:00
|
|
|
var (
|
Load only some offsets into the symbol table into memory.
Rather than keeping the entire symbol table in memory, keep every nth
offset and walk from there to the entry we need. This ends up slightly
slower, ~360ms per 1M series returned from PostingsForMatchers which is
not much considering the rest of the CPU such a query would go on to
use.
Make LabelValues use the postings tables, rather than having
to do symbol lookups. Use yoloString, as PostingsForMatchers
doesn't need the strings to stick around and adjust the API
call to keep the Querier open until it's all marshalled.
Remove allocatedSymbols memory optimisation, we no longer keep all the
symbol strings in heap memory. Remove LabelValuesFor and LabelIndices,
they're dead code. Ensure we've still tests for label indices,
and add missing test that we can work with old V1 Format index files.
PostingForMatchers performance is slightly better, with a big drop in
allocation counts due to using yoloString for LabelValues:
benchmark old ns/op new ns/op delta
BenchmarkPostingsForMatchers/Block/n="1"-4 36698 36681 -0.05%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 522786 560887 +7.29%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 511652 537680 +5.09%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 522102 564239 +8.07%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 113689911 111795919 -1.67%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 135825572 132871085 -2.18%
BenchmarkPostingsForMatchers/Block/i=~""-4 40782628 38038181 -6.73%
BenchmarkPostingsForMatchers/Block/i!=""-4 31267869 29194327 -6.63%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 112733329 111568823 -1.03%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 112868153 111232029 -1.45%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 31338257 29349446 -6.35%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 32054482 29972436 -6.50%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 136504654 133968442 -1.86%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 27960350 27264997 -2.49%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 136765564 133860724 -2.12%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 163714583 159453668 -2.60%
benchmark old allocs new allocs delta
BenchmarkPostingsForMatchers/Block/n="1"-4 6 6 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 11 11 +0.00%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 11 11 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 17 15 -11.76%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 100012 12 -99.99%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 200040 100040 -49.99%
BenchmarkPostingsForMatchers/Block/i=~""-4 200045 100045 -49.99%
BenchmarkPostingsForMatchers/Block/i!=""-4 200041 100041 -49.99%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 100017 17 -99.98%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 100023 23 -99.98%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 200046 100046 -49.99%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 200050 100050 -49.99%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 200049 100049 -49.99%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 111150 11150 -89.97%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 200055 100055 -49.99%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 311238 111238 -64.26%
benchmark old bytes new bytes delta
BenchmarkPostingsForMatchers/Block/n="1"-4 296 296 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 424 424 +0.00%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 424 424 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 552 1544 +179.71%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 1600482 1606125 +0.35%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 17259065 17264709 +0.03%
BenchmarkPostingsForMatchers/Block/i=~""-4 17259150 17264780 +0.03%
BenchmarkPostingsForMatchers/Block/i!=""-4 17259048 17264680 +0.03%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 1600610 1606242 +0.35%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 1600813 1606434 +0.35%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 17259176 17264808 +0.03%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 17259304 17264936 +0.03%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 17259333 17264965 +0.03%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 3142628 3148262 +0.18%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 17259509 17265141 +0.03%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 20405680 20416944 +0.06%
However overall Select performance is down and involves more allocs, due to
having to do more than a simple map lookup to resolve a symbol and that all the strings
returned are allocated:
benchmark old ns/op new ns/op delta
BenchmarkQuerierSelect/Block/1of1000000-4 506092636 862678244 +70.46%
BenchmarkQuerierSelect/Block/10of1000000-4 505638968 860917636 +70.26%
BenchmarkQuerierSelect/Block/100of1000000-4 505229450 882150048 +74.60%
BenchmarkQuerierSelect/Block/1000of1000000-4 515905414 862241115 +67.13%
BenchmarkQuerierSelect/Block/10000of1000000-4 516785354 874841110 +69.29%
BenchmarkQuerierSelect/Block/100000of1000000-4 540742808 907030187 +67.74%
BenchmarkQuerierSelect/Block/1000000of1000000-4 815224288 1181236903 +44.90%
benchmark old allocs new allocs delta
BenchmarkQuerierSelect/Block/1of1000000-4 4000020 6000020 +50.00%
BenchmarkQuerierSelect/Block/10of1000000-4 4000038 6000038 +50.00%
BenchmarkQuerierSelect/Block/100of1000000-4 4000218 6000218 +50.00%
BenchmarkQuerierSelect/Block/1000of1000000-4 4002018 6002018 +49.97%
BenchmarkQuerierSelect/Block/10000of1000000-4 4020018 6020018 +49.75%
BenchmarkQuerierSelect/Block/100000of1000000-4 4200018 6200018 +47.62%
BenchmarkQuerierSelect/Block/1000000of1000000-4 6000018 8000019 +33.33%
benchmark old bytes new bytes delta
BenchmarkQuerierSelect/Block/1of1000000-4 176001468 227201476 +29.09%
BenchmarkQuerierSelect/Block/10of1000000-4 176002620 227202628 +29.09%
BenchmarkQuerierSelect/Block/100of1000000-4 176014140 227214148 +29.09%
BenchmarkQuerierSelect/Block/1000of1000000-4 176129340 227329348 +29.07%
BenchmarkQuerierSelect/Block/10000of1000000-4 177281340 228481348 +28.88%
BenchmarkQuerierSelect/Block/100000of1000000-4 188801340 240001348 +27.12%
BenchmarkQuerierSelect/Block/1000000of1000000-4 304001340 355201616 +16.84%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-12-12 08:55:32 -08:00
|
|
|
origLen = d.Len()
|
|
|
|
cnt = d.Be32int()
|
|
|
|
basePos = off + 4
|
2017-10-02 06:56:57 -07:00
|
|
|
)
|
2020-01-08 12:39:38 -08:00
|
|
|
s.offsets = make([]int, 0, 1+cnt/symbolFactor)
|
Load only some offsets into the symbol table into memory.
Rather than keeping the entire symbol table in memory, keep every nth
offset and walk from there to the entry we need. This ends up slightly
slower, ~360ms per 1M series returned from PostingsForMatchers which is
not much considering the rest of the CPU such a query would go on to
use.
Make LabelValues use the postings tables, rather than having
to do symbol lookups. Use yoloString, as PostingsForMatchers
doesn't need the strings to stick around and adjust the API
call to keep the Querier open until it's all marshalled.
Remove allocatedSymbols memory optimisation, we no longer keep all the
symbol strings in heap memory. Remove LabelValuesFor and LabelIndices,
they're dead code. Ensure we've still tests for label indices,
and add missing test that we can work with old V1 Format index files.
PostingForMatchers performance is slightly better, with a big drop in
allocation counts due to using yoloString for LabelValues:
benchmark old ns/op new ns/op delta
BenchmarkPostingsForMatchers/Block/n="1"-4 36698 36681 -0.05%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 522786 560887 +7.29%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 511652 537680 +5.09%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 522102 564239 +8.07%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 113689911 111795919 -1.67%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 135825572 132871085 -2.18%
BenchmarkPostingsForMatchers/Block/i=~""-4 40782628 38038181 -6.73%
BenchmarkPostingsForMatchers/Block/i!=""-4 31267869 29194327 -6.63%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 112733329 111568823 -1.03%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 112868153 111232029 -1.45%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 31338257 29349446 -6.35%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 32054482 29972436 -6.50%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 136504654 133968442 -1.86%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 27960350 27264997 -2.49%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 136765564 133860724 -2.12%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 163714583 159453668 -2.60%
benchmark old allocs new allocs delta
BenchmarkPostingsForMatchers/Block/n="1"-4 6 6 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 11 11 +0.00%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 11 11 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 17 15 -11.76%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 100012 12 -99.99%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 200040 100040 -49.99%
BenchmarkPostingsForMatchers/Block/i=~""-4 200045 100045 -49.99%
BenchmarkPostingsForMatchers/Block/i!=""-4 200041 100041 -49.99%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 100017 17 -99.98%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 100023 23 -99.98%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 200046 100046 -49.99%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 200050 100050 -49.99%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 200049 100049 -49.99%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 111150 11150 -89.97%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 200055 100055 -49.99%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 311238 111238 -64.26%
benchmark old bytes new bytes delta
BenchmarkPostingsForMatchers/Block/n="1"-4 296 296 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 424 424 +0.00%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 424 424 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 552 1544 +179.71%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 1600482 1606125 +0.35%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 17259065 17264709 +0.03%
BenchmarkPostingsForMatchers/Block/i=~""-4 17259150 17264780 +0.03%
BenchmarkPostingsForMatchers/Block/i!=""-4 17259048 17264680 +0.03%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 1600610 1606242 +0.35%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 1600813 1606434 +0.35%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 17259176 17264808 +0.03%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 17259304 17264936 +0.03%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 17259333 17264965 +0.03%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 3142628 3148262 +0.18%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 17259509 17265141 +0.03%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 20405680 20416944 +0.06%
However overall Select performance is down and involves more allocs, due to
having to do more than a simple map lookup to resolve a symbol and that all the strings
returned are allocated:
benchmark old ns/op new ns/op delta
BenchmarkQuerierSelect/Block/1of1000000-4 506092636 862678244 +70.46%
BenchmarkQuerierSelect/Block/10of1000000-4 505638968 860917636 +70.26%
BenchmarkQuerierSelect/Block/100of1000000-4 505229450 882150048 +74.60%
BenchmarkQuerierSelect/Block/1000of1000000-4 515905414 862241115 +67.13%
BenchmarkQuerierSelect/Block/10000of1000000-4 516785354 874841110 +69.29%
BenchmarkQuerierSelect/Block/100000of1000000-4 540742808 907030187 +67.74%
BenchmarkQuerierSelect/Block/1000000of1000000-4 815224288 1181236903 +44.90%
benchmark old allocs new allocs delta
BenchmarkQuerierSelect/Block/1of1000000-4 4000020 6000020 +50.00%
BenchmarkQuerierSelect/Block/10of1000000-4 4000038 6000038 +50.00%
BenchmarkQuerierSelect/Block/100of1000000-4 4000218 6000218 +50.00%
BenchmarkQuerierSelect/Block/1000of1000000-4 4002018 6002018 +49.97%
BenchmarkQuerierSelect/Block/10000of1000000-4 4020018 6020018 +49.75%
BenchmarkQuerierSelect/Block/100000of1000000-4 4200018 6200018 +47.62%
BenchmarkQuerierSelect/Block/1000000of1000000-4 6000018 8000019 +33.33%
benchmark old bytes new bytes delta
BenchmarkQuerierSelect/Block/1of1000000-4 176001468 227201476 +29.09%
BenchmarkQuerierSelect/Block/10of1000000-4 176002620 227202628 +29.09%
BenchmarkQuerierSelect/Block/100of1000000-4 176014140 227214148 +29.09%
BenchmarkQuerierSelect/Block/1000of1000000-4 176129340 227329348 +29.07%
BenchmarkQuerierSelect/Block/10000of1000000-4 177281340 228481348 +28.88%
BenchmarkQuerierSelect/Block/100000of1000000-4 188801340 240001348 +27.12%
BenchmarkQuerierSelect/Block/1000000of1000000-4 304001340 355201616 +16.84%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-12-12 08:55:32 -08:00
|
|
|
for d.Err() == nil && s.seen < cnt {
|
|
|
|
if s.seen%symbolFactor == 0 {
|
|
|
|
s.offsets = append(s.offsets, basePos+origLen-d.Len())
|
|
|
|
}
|
|
|
|
d.UvarintBytes() // The symbol.
|
|
|
|
s.seen++
|
2018-01-16 20:37:57 -08:00
|
|
|
}
|
Load only some offsets into the symbol table into memory.
Rather than keeping the entire symbol table in memory, keep every nth
offset and walk from there to the entry we need. This ends up slightly
slower, ~360ms per 1M series returned from PostingsForMatchers which is
not much considering the rest of the CPU such a query would go on to
use.
Make LabelValues use the postings tables, rather than having
to do symbol lookups. Use yoloString, as PostingsForMatchers
doesn't need the strings to stick around and adjust the API
call to keep the Querier open until it's all marshalled.
Remove allocatedSymbols memory optimisation, we no longer keep all the
symbol strings in heap memory. Remove LabelValuesFor and LabelIndices,
they're dead code. Ensure we've still tests for label indices,
and add missing test that we can work with old V1 Format index files.
PostingForMatchers performance is slightly better, with a big drop in
allocation counts due to using yoloString for LabelValues:
benchmark old ns/op new ns/op delta
BenchmarkPostingsForMatchers/Block/n="1"-4 36698 36681 -0.05%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 522786 560887 +7.29%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 511652 537680 +5.09%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 522102 564239 +8.07%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 113689911 111795919 -1.67%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 135825572 132871085 -2.18%
BenchmarkPostingsForMatchers/Block/i=~""-4 40782628 38038181 -6.73%
BenchmarkPostingsForMatchers/Block/i!=""-4 31267869 29194327 -6.63%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 112733329 111568823 -1.03%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 112868153 111232029 -1.45%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 31338257 29349446 -6.35%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 32054482 29972436 -6.50%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 136504654 133968442 -1.86%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 27960350 27264997 -2.49%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 136765564 133860724 -2.12%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 163714583 159453668 -2.60%
benchmark old allocs new allocs delta
BenchmarkPostingsForMatchers/Block/n="1"-4 6 6 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 11 11 +0.00%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 11 11 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 17 15 -11.76%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 100012 12 -99.99%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 200040 100040 -49.99%
BenchmarkPostingsForMatchers/Block/i=~""-4 200045 100045 -49.99%
BenchmarkPostingsForMatchers/Block/i!=""-4 200041 100041 -49.99%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 100017 17 -99.98%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 100023 23 -99.98%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 200046 100046 -49.99%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 200050 100050 -49.99%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 200049 100049 -49.99%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 111150 11150 -89.97%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 200055 100055 -49.99%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 311238 111238 -64.26%
benchmark old bytes new bytes delta
BenchmarkPostingsForMatchers/Block/n="1"-4 296 296 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 424 424 +0.00%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 424 424 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 552 1544 +179.71%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 1600482 1606125 +0.35%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 17259065 17264709 +0.03%
BenchmarkPostingsForMatchers/Block/i=~""-4 17259150 17264780 +0.03%
BenchmarkPostingsForMatchers/Block/i!=""-4 17259048 17264680 +0.03%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 1600610 1606242 +0.35%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 1600813 1606434 +0.35%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 17259176 17264808 +0.03%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 17259304 17264936 +0.03%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 17259333 17264965 +0.03%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 3142628 3148262 +0.18%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 17259509 17265141 +0.03%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 20405680 20416944 +0.06%
However overall Select performance is down and involves more allocs, due to
having to do more than a simple map lookup to resolve a symbol and that all the strings
returned are allocated:
benchmark old ns/op new ns/op delta
BenchmarkQuerierSelect/Block/1of1000000-4 506092636 862678244 +70.46%
BenchmarkQuerierSelect/Block/10of1000000-4 505638968 860917636 +70.26%
BenchmarkQuerierSelect/Block/100of1000000-4 505229450 882150048 +74.60%
BenchmarkQuerierSelect/Block/1000of1000000-4 515905414 862241115 +67.13%
BenchmarkQuerierSelect/Block/10000of1000000-4 516785354 874841110 +69.29%
BenchmarkQuerierSelect/Block/100000of1000000-4 540742808 907030187 +67.74%
BenchmarkQuerierSelect/Block/1000000of1000000-4 815224288 1181236903 +44.90%
benchmark old allocs new allocs delta
BenchmarkQuerierSelect/Block/1of1000000-4 4000020 6000020 +50.00%
BenchmarkQuerierSelect/Block/10of1000000-4 4000038 6000038 +50.00%
BenchmarkQuerierSelect/Block/100of1000000-4 4000218 6000218 +50.00%
BenchmarkQuerierSelect/Block/1000of1000000-4 4002018 6002018 +49.97%
BenchmarkQuerierSelect/Block/10000of1000000-4 4020018 6020018 +49.75%
BenchmarkQuerierSelect/Block/100000of1000000-4 4200018 6200018 +47.62%
BenchmarkQuerierSelect/Block/1000000of1000000-4 6000018 8000019 +33.33%
benchmark old bytes new bytes delta
BenchmarkQuerierSelect/Block/1of1000000-4 176001468 227201476 +29.09%
BenchmarkQuerierSelect/Block/10of1000000-4 176002620 227202628 +29.09%
BenchmarkQuerierSelect/Block/100of1000000-4 176014140 227214148 +29.09%
BenchmarkQuerierSelect/Block/1000of1000000-4 176129340 227329348 +29.07%
BenchmarkQuerierSelect/Block/10000of1000000-4 177281340 228481348 +28.88%
BenchmarkQuerierSelect/Block/100000of1000000-4 188801340 240001348 +27.12%
BenchmarkQuerierSelect/Block/1000000of1000000-4 304001340 355201616 +16.84%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-12-12 08:55:32 -08:00
|
|
|
if d.Err() != nil {
|
|
|
|
return nil, d.Err()
|
|
|
|
}
|
|
|
|
return s, nil
|
|
|
|
}
|
2018-01-16 20:37:57 -08:00
|
|
|
|
Load only some offsets into the symbol table into memory.
Rather than keeping the entire symbol table in memory, keep every nth
offset and walk from there to the entry we need. This ends up slightly
slower, ~360ms per 1M series returned from PostingsForMatchers which is
not much considering the rest of the CPU such a query would go on to
use.
Make LabelValues use the postings tables, rather than having
to do symbol lookups. Use yoloString, as PostingsForMatchers
doesn't need the strings to stick around and adjust the API
call to keep the Querier open until it's all marshalled.
Remove allocatedSymbols memory optimisation, we no longer keep all the
symbol strings in heap memory. Remove LabelValuesFor and LabelIndices,
they're dead code. Ensure we've still tests for label indices,
and add missing test that we can work with old V1 Format index files.
PostingForMatchers performance is slightly better, with a big drop in
allocation counts due to using yoloString for LabelValues:
benchmark old ns/op new ns/op delta
BenchmarkPostingsForMatchers/Block/n="1"-4 36698 36681 -0.05%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 522786 560887 +7.29%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 511652 537680 +5.09%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 522102 564239 +8.07%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 113689911 111795919 -1.67%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 135825572 132871085 -2.18%
BenchmarkPostingsForMatchers/Block/i=~""-4 40782628 38038181 -6.73%
BenchmarkPostingsForMatchers/Block/i!=""-4 31267869 29194327 -6.63%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 112733329 111568823 -1.03%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 112868153 111232029 -1.45%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 31338257 29349446 -6.35%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 32054482 29972436 -6.50%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 136504654 133968442 -1.86%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 27960350 27264997 -2.49%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 136765564 133860724 -2.12%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 163714583 159453668 -2.60%
benchmark old allocs new allocs delta
BenchmarkPostingsForMatchers/Block/n="1"-4 6 6 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 11 11 +0.00%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 11 11 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 17 15 -11.76%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 100012 12 -99.99%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 200040 100040 -49.99%
BenchmarkPostingsForMatchers/Block/i=~""-4 200045 100045 -49.99%
BenchmarkPostingsForMatchers/Block/i!=""-4 200041 100041 -49.99%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 100017 17 -99.98%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 100023 23 -99.98%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 200046 100046 -49.99%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 200050 100050 -49.99%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 200049 100049 -49.99%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 111150 11150 -89.97%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 200055 100055 -49.99%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 311238 111238 -64.26%
benchmark old bytes new bytes delta
BenchmarkPostingsForMatchers/Block/n="1"-4 296 296 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 424 424 +0.00%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 424 424 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 552 1544 +179.71%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 1600482 1606125 +0.35%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 17259065 17264709 +0.03%
BenchmarkPostingsForMatchers/Block/i=~""-4 17259150 17264780 +0.03%
BenchmarkPostingsForMatchers/Block/i!=""-4 17259048 17264680 +0.03%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 1600610 1606242 +0.35%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 1600813 1606434 +0.35%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 17259176 17264808 +0.03%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 17259304 17264936 +0.03%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 17259333 17264965 +0.03%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 3142628 3148262 +0.18%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 17259509 17265141 +0.03%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 20405680 20416944 +0.06%
However overall Select performance is down and involves more allocs, due to
having to do more than a simple map lookup to resolve a symbol and that all the strings
returned are allocated:
benchmark old ns/op new ns/op delta
BenchmarkQuerierSelect/Block/1of1000000-4 506092636 862678244 +70.46%
BenchmarkQuerierSelect/Block/10of1000000-4 505638968 860917636 +70.26%
BenchmarkQuerierSelect/Block/100of1000000-4 505229450 882150048 +74.60%
BenchmarkQuerierSelect/Block/1000of1000000-4 515905414 862241115 +67.13%
BenchmarkQuerierSelect/Block/10000of1000000-4 516785354 874841110 +69.29%
BenchmarkQuerierSelect/Block/100000of1000000-4 540742808 907030187 +67.74%
BenchmarkQuerierSelect/Block/1000000of1000000-4 815224288 1181236903 +44.90%
benchmark old allocs new allocs delta
BenchmarkQuerierSelect/Block/1of1000000-4 4000020 6000020 +50.00%
BenchmarkQuerierSelect/Block/10of1000000-4 4000038 6000038 +50.00%
BenchmarkQuerierSelect/Block/100of1000000-4 4000218 6000218 +50.00%
BenchmarkQuerierSelect/Block/1000of1000000-4 4002018 6002018 +49.97%
BenchmarkQuerierSelect/Block/10000of1000000-4 4020018 6020018 +49.75%
BenchmarkQuerierSelect/Block/100000of1000000-4 4200018 6200018 +47.62%
BenchmarkQuerierSelect/Block/1000000of1000000-4 6000018 8000019 +33.33%
benchmark old bytes new bytes delta
BenchmarkQuerierSelect/Block/1of1000000-4 176001468 227201476 +29.09%
BenchmarkQuerierSelect/Block/10of1000000-4 176002620 227202628 +29.09%
BenchmarkQuerierSelect/Block/100of1000000-4 176014140 227214148 +29.09%
BenchmarkQuerierSelect/Block/1000of1000000-4 176129340 227329348 +29.07%
BenchmarkQuerierSelect/Block/10000of1000000-4 177281340 228481348 +28.88%
BenchmarkQuerierSelect/Block/100000of1000000-4 188801340 240001348 +27.12%
BenchmarkQuerierSelect/Block/1000000of1000000-4 304001340 355201616 +16.84%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-12-12 08:55:32 -08:00
|
|
|
func (s Symbols) Lookup(o uint32) (string, error) {
|
|
|
|
d := encoding.Decbuf{
|
|
|
|
B: s.bs.Range(0, s.bs.Len()),
|
|
|
|
}
|
2020-01-08 12:39:38 -08:00
|
|
|
|
Load only some offsets into the symbol table into memory.
Rather than keeping the entire symbol table in memory, keep every nth
offset and walk from there to the entry we need. This ends up slightly
slower, ~360ms per 1M series returned from PostingsForMatchers which is
not much considering the rest of the CPU such a query would go on to
use.
Make LabelValues use the postings tables, rather than having
to do symbol lookups. Use yoloString, as PostingsForMatchers
doesn't need the strings to stick around and adjust the API
call to keep the Querier open until it's all marshalled.
Remove allocatedSymbols memory optimisation, we no longer keep all the
symbol strings in heap memory. Remove LabelValuesFor and LabelIndices,
they're dead code. Ensure we've still tests for label indices,
and add missing test that we can work with old V1 Format index files.
PostingForMatchers performance is slightly better, with a big drop in
allocation counts due to using yoloString for LabelValues:
benchmark old ns/op new ns/op delta
BenchmarkPostingsForMatchers/Block/n="1"-4 36698 36681 -0.05%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 522786 560887 +7.29%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 511652 537680 +5.09%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 522102 564239 +8.07%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 113689911 111795919 -1.67%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 135825572 132871085 -2.18%
BenchmarkPostingsForMatchers/Block/i=~""-4 40782628 38038181 -6.73%
BenchmarkPostingsForMatchers/Block/i!=""-4 31267869 29194327 -6.63%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 112733329 111568823 -1.03%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 112868153 111232029 -1.45%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 31338257 29349446 -6.35%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 32054482 29972436 -6.50%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 136504654 133968442 -1.86%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 27960350 27264997 -2.49%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 136765564 133860724 -2.12%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 163714583 159453668 -2.60%
benchmark old allocs new allocs delta
BenchmarkPostingsForMatchers/Block/n="1"-4 6 6 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 11 11 +0.00%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 11 11 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 17 15 -11.76%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 100012 12 -99.99%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 200040 100040 -49.99%
BenchmarkPostingsForMatchers/Block/i=~""-4 200045 100045 -49.99%
BenchmarkPostingsForMatchers/Block/i!=""-4 200041 100041 -49.99%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 100017 17 -99.98%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 100023 23 -99.98%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 200046 100046 -49.99%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 200050 100050 -49.99%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 200049 100049 -49.99%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 111150 11150 -89.97%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 200055 100055 -49.99%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 311238 111238 -64.26%
benchmark old bytes new bytes delta
BenchmarkPostingsForMatchers/Block/n="1"-4 296 296 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 424 424 +0.00%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 424 424 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 552 1544 +179.71%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 1600482 1606125 +0.35%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 17259065 17264709 +0.03%
BenchmarkPostingsForMatchers/Block/i=~""-4 17259150 17264780 +0.03%
BenchmarkPostingsForMatchers/Block/i!=""-4 17259048 17264680 +0.03%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 1600610 1606242 +0.35%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 1600813 1606434 +0.35%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 17259176 17264808 +0.03%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 17259304 17264936 +0.03%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 17259333 17264965 +0.03%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 3142628 3148262 +0.18%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 17259509 17265141 +0.03%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 20405680 20416944 +0.06%
However overall Select performance is down and involves more allocs, due to
having to do more than a simple map lookup to resolve a symbol and that all the strings
returned are allocated:
benchmark old ns/op new ns/op delta
BenchmarkQuerierSelect/Block/1of1000000-4 506092636 862678244 +70.46%
BenchmarkQuerierSelect/Block/10of1000000-4 505638968 860917636 +70.26%
BenchmarkQuerierSelect/Block/100of1000000-4 505229450 882150048 +74.60%
BenchmarkQuerierSelect/Block/1000of1000000-4 515905414 862241115 +67.13%
BenchmarkQuerierSelect/Block/10000of1000000-4 516785354 874841110 +69.29%
BenchmarkQuerierSelect/Block/100000of1000000-4 540742808 907030187 +67.74%
BenchmarkQuerierSelect/Block/1000000of1000000-4 815224288 1181236903 +44.90%
benchmark old allocs new allocs delta
BenchmarkQuerierSelect/Block/1of1000000-4 4000020 6000020 +50.00%
BenchmarkQuerierSelect/Block/10of1000000-4 4000038 6000038 +50.00%
BenchmarkQuerierSelect/Block/100of1000000-4 4000218 6000218 +50.00%
BenchmarkQuerierSelect/Block/1000of1000000-4 4002018 6002018 +49.97%
BenchmarkQuerierSelect/Block/10000of1000000-4 4020018 6020018 +49.75%
BenchmarkQuerierSelect/Block/100000of1000000-4 4200018 6200018 +47.62%
BenchmarkQuerierSelect/Block/1000000of1000000-4 6000018 8000019 +33.33%
benchmark old bytes new bytes delta
BenchmarkQuerierSelect/Block/1of1000000-4 176001468 227201476 +29.09%
BenchmarkQuerierSelect/Block/10of1000000-4 176002620 227202628 +29.09%
BenchmarkQuerierSelect/Block/100of1000000-4 176014140 227214148 +29.09%
BenchmarkQuerierSelect/Block/1000of1000000-4 176129340 227329348 +29.07%
BenchmarkQuerierSelect/Block/10000of1000000-4 177281340 228481348 +28.88%
BenchmarkQuerierSelect/Block/100000of1000000-4 188801340 240001348 +27.12%
BenchmarkQuerierSelect/Block/1000000of1000000-4 304001340 355201616 +16.84%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-12-12 08:55:32 -08:00
|
|
|
if s.version == FormatV2 {
|
2020-01-08 12:39:38 -08:00
|
|
|
if int(o) >= s.seen {
|
Load only some offsets into the symbol table into memory.
Rather than keeping the entire symbol table in memory, keep every nth
offset and walk from there to the entry we need. This ends up slightly
slower, ~360ms per 1M series returned from PostingsForMatchers which is
not much considering the rest of the CPU such a query would go on to
use.
Make LabelValues use the postings tables, rather than having
to do symbol lookups. Use yoloString, as PostingsForMatchers
doesn't need the strings to stick around and adjust the API
call to keep the Querier open until it's all marshalled.
Remove allocatedSymbols memory optimisation, we no longer keep all the
symbol strings in heap memory. Remove LabelValuesFor and LabelIndices,
they're dead code. Ensure we've still tests for label indices,
and add missing test that we can work with old V1 Format index files.
PostingForMatchers performance is slightly better, with a big drop in
allocation counts due to using yoloString for LabelValues:
benchmark old ns/op new ns/op delta
BenchmarkPostingsForMatchers/Block/n="1"-4 36698 36681 -0.05%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 522786 560887 +7.29%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 511652 537680 +5.09%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 522102 564239 +8.07%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 113689911 111795919 -1.67%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 135825572 132871085 -2.18%
BenchmarkPostingsForMatchers/Block/i=~""-4 40782628 38038181 -6.73%
BenchmarkPostingsForMatchers/Block/i!=""-4 31267869 29194327 -6.63%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 112733329 111568823 -1.03%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 112868153 111232029 -1.45%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 31338257 29349446 -6.35%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 32054482 29972436 -6.50%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 136504654 133968442 -1.86%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 27960350 27264997 -2.49%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 136765564 133860724 -2.12%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 163714583 159453668 -2.60%
benchmark old allocs new allocs delta
BenchmarkPostingsForMatchers/Block/n="1"-4 6 6 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 11 11 +0.00%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 11 11 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 17 15 -11.76%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 100012 12 -99.99%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 200040 100040 -49.99%
BenchmarkPostingsForMatchers/Block/i=~""-4 200045 100045 -49.99%
BenchmarkPostingsForMatchers/Block/i!=""-4 200041 100041 -49.99%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 100017 17 -99.98%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 100023 23 -99.98%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 200046 100046 -49.99%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 200050 100050 -49.99%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 200049 100049 -49.99%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 111150 11150 -89.97%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 200055 100055 -49.99%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 311238 111238 -64.26%
benchmark old bytes new bytes delta
BenchmarkPostingsForMatchers/Block/n="1"-4 296 296 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 424 424 +0.00%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 424 424 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 552 1544 +179.71%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 1600482 1606125 +0.35%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 17259065 17264709 +0.03%
BenchmarkPostingsForMatchers/Block/i=~""-4 17259150 17264780 +0.03%
BenchmarkPostingsForMatchers/Block/i!=""-4 17259048 17264680 +0.03%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 1600610 1606242 +0.35%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 1600813 1606434 +0.35%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 17259176 17264808 +0.03%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 17259304 17264936 +0.03%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 17259333 17264965 +0.03%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 3142628 3148262 +0.18%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 17259509 17265141 +0.03%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 20405680 20416944 +0.06%
However overall Select performance is down and involves more allocs, due to
having to do more than a simple map lookup to resolve a symbol and that all the strings
returned are allocated:
benchmark old ns/op new ns/op delta
BenchmarkQuerierSelect/Block/1of1000000-4 506092636 862678244 +70.46%
BenchmarkQuerierSelect/Block/10of1000000-4 505638968 860917636 +70.26%
BenchmarkQuerierSelect/Block/100of1000000-4 505229450 882150048 +74.60%
BenchmarkQuerierSelect/Block/1000of1000000-4 515905414 862241115 +67.13%
BenchmarkQuerierSelect/Block/10000of1000000-4 516785354 874841110 +69.29%
BenchmarkQuerierSelect/Block/100000of1000000-4 540742808 907030187 +67.74%
BenchmarkQuerierSelect/Block/1000000of1000000-4 815224288 1181236903 +44.90%
benchmark old allocs new allocs delta
BenchmarkQuerierSelect/Block/1of1000000-4 4000020 6000020 +50.00%
BenchmarkQuerierSelect/Block/10of1000000-4 4000038 6000038 +50.00%
BenchmarkQuerierSelect/Block/100of1000000-4 4000218 6000218 +50.00%
BenchmarkQuerierSelect/Block/1000of1000000-4 4002018 6002018 +49.97%
BenchmarkQuerierSelect/Block/10000of1000000-4 4020018 6020018 +49.75%
BenchmarkQuerierSelect/Block/100000of1000000-4 4200018 6200018 +47.62%
BenchmarkQuerierSelect/Block/1000000of1000000-4 6000018 8000019 +33.33%
benchmark old bytes new bytes delta
BenchmarkQuerierSelect/Block/1of1000000-4 176001468 227201476 +29.09%
BenchmarkQuerierSelect/Block/10of1000000-4 176002620 227202628 +29.09%
BenchmarkQuerierSelect/Block/100of1000000-4 176014140 227214148 +29.09%
BenchmarkQuerierSelect/Block/1000of1000000-4 176129340 227329348 +29.07%
BenchmarkQuerierSelect/Block/10000of1000000-4 177281340 228481348 +28.88%
BenchmarkQuerierSelect/Block/100000of1000000-4 188801340 240001348 +27.12%
BenchmarkQuerierSelect/Block/1000000of1000000-4 304001340 355201616 +16.84%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-12-12 08:55:32 -08:00
|
|
|
return "", errors.Errorf("unknown symbol offset %d", o)
|
|
|
|
}
|
|
|
|
d.Skip(s.offsets[int(o/symbolFactor)])
|
|
|
|
// Walk until we find the one we want.
|
|
|
|
for i := o - (o / symbolFactor * symbolFactor); i > 0; i-- {
|
|
|
|
d.UvarintBytes()
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
d.Skip(int(o))
|
|
|
|
}
|
|
|
|
sym := d.UvarintStr()
|
|
|
|
if d.Err() != nil {
|
|
|
|
return "", d.Err()
|
|
|
|
}
|
|
|
|
return sym, nil
|
|
|
|
}
|
2017-10-02 06:56:57 -07:00
|
|
|
|
Load only some offsets into the symbol table into memory.
Rather than keeping the entire symbol table in memory, keep every nth
offset and walk from there to the entry we need. This ends up slightly
slower, ~360ms per 1M series returned from PostingsForMatchers which is
not much considering the rest of the CPU such a query would go on to
use.
Make LabelValues use the postings tables, rather than having
to do symbol lookups. Use yoloString, as PostingsForMatchers
doesn't need the strings to stick around and adjust the API
call to keep the Querier open until it's all marshalled.
Remove allocatedSymbols memory optimisation, we no longer keep all the
symbol strings in heap memory. Remove LabelValuesFor and LabelIndices,
they're dead code. Ensure we've still tests for label indices,
and add missing test that we can work with old V1 Format index files.
PostingForMatchers performance is slightly better, with a big drop in
allocation counts due to using yoloString for LabelValues:
benchmark old ns/op new ns/op delta
BenchmarkPostingsForMatchers/Block/n="1"-4 36698 36681 -0.05%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 522786 560887 +7.29%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 511652 537680 +5.09%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 522102 564239 +8.07%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 113689911 111795919 -1.67%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 135825572 132871085 -2.18%
BenchmarkPostingsForMatchers/Block/i=~""-4 40782628 38038181 -6.73%
BenchmarkPostingsForMatchers/Block/i!=""-4 31267869 29194327 -6.63%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 112733329 111568823 -1.03%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 112868153 111232029 -1.45%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 31338257 29349446 -6.35%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 32054482 29972436 -6.50%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 136504654 133968442 -1.86%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 27960350 27264997 -2.49%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 136765564 133860724 -2.12%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 163714583 159453668 -2.60%
benchmark old allocs new allocs delta
BenchmarkPostingsForMatchers/Block/n="1"-4 6 6 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 11 11 +0.00%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 11 11 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 17 15 -11.76%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 100012 12 -99.99%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 200040 100040 -49.99%
BenchmarkPostingsForMatchers/Block/i=~""-4 200045 100045 -49.99%
BenchmarkPostingsForMatchers/Block/i!=""-4 200041 100041 -49.99%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 100017 17 -99.98%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 100023 23 -99.98%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 200046 100046 -49.99%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 200050 100050 -49.99%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 200049 100049 -49.99%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 111150 11150 -89.97%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 200055 100055 -49.99%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 311238 111238 -64.26%
benchmark old bytes new bytes delta
BenchmarkPostingsForMatchers/Block/n="1"-4 296 296 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 424 424 +0.00%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 424 424 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 552 1544 +179.71%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 1600482 1606125 +0.35%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 17259065 17264709 +0.03%
BenchmarkPostingsForMatchers/Block/i=~""-4 17259150 17264780 +0.03%
BenchmarkPostingsForMatchers/Block/i!=""-4 17259048 17264680 +0.03%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 1600610 1606242 +0.35%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 1600813 1606434 +0.35%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 17259176 17264808 +0.03%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 17259304 17264936 +0.03%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 17259333 17264965 +0.03%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 3142628 3148262 +0.18%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 17259509 17265141 +0.03%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 20405680 20416944 +0.06%
However overall Select performance is down and involves more allocs, due to
having to do more than a simple map lookup to resolve a symbol and that all the strings
returned are allocated:
benchmark old ns/op new ns/op delta
BenchmarkQuerierSelect/Block/1of1000000-4 506092636 862678244 +70.46%
BenchmarkQuerierSelect/Block/10of1000000-4 505638968 860917636 +70.26%
BenchmarkQuerierSelect/Block/100of1000000-4 505229450 882150048 +74.60%
BenchmarkQuerierSelect/Block/1000of1000000-4 515905414 862241115 +67.13%
BenchmarkQuerierSelect/Block/10000of1000000-4 516785354 874841110 +69.29%
BenchmarkQuerierSelect/Block/100000of1000000-4 540742808 907030187 +67.74%
BenchmarkQuerierSelect/Block/1000000of1000000-4 815224288 1181236903 +44.90%
benchmark old allocs new allocs delta
BenchmarkQuerierSelect/Block/1of1000000-4 4000020 6000020 +50.00%
BenchmarkQuerierSelect/Block/10of1000000-4 4000038 6000038 +50.00%
BenchmarkQuerierSelect/Block/100of1000000-4 4000218 6000218 +50.00%
BenchmarkQuerierSelect/Block/1000of1000000-4 4002018 6002018 +49.97%
BenchmarkQuerierSelect/Block/10000of1000000-4 4020018 6020018 +49.75%
BenchmarkQuerierSelect/Block/100000of1000000-4 4200018 6200018 +47.62%
BenchmarkQuerierSelect/Block/1000000of1000000-4 6000018 8000019 +33.33%
benchmark old bytes new bytes delta
BenchmarkQuerierSelect/Block/1of1000000-4 176001468 227201476 +29.09%
BenchmarkQuerierSelect/Block/10of1000000-4 176002620 227202628 +29.09%
BenchmarkQuerierSelect/Block/100of1000000-4 176014140 227214148 +29.09%
BenchmarkQuerierSelect/Block/1000of1000000-4 176129340 227329348 +29.07%
BenchmarkQuerierSelect/Block/10000of1000000-4 177281340 228481348 +28.88%
BenchmarkQuerierSelect/Block/100000of1000000-4 188801340 240001348 +27.12%
BenchmarkQuerierSelect/Block/1000000of1000000-4 304001340 355201616 +16.84%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-12-12 08:55:32 -08:00
|
|
|
func (s Symbols) ReverseLookup(sym string) (uint32, error) {
|
Stream symbols during compaction. (#6468)
Rather than buffer up symbols in RAM, do it one by one
during compaction. Then use the reader's symbol handling
for symbol lookups during the rest of the index write.
There is some slowdown in compaction, due to having to look through a file
rather than a hash lookup. This is noise to the overall cost of compacting
series with thousands of samples though.
benchmark old ns/op new ns/op delta
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 539917175 675341565 +25.08%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 2441815993 2477453524 +1.46%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 3978543559 3922909687 -1.40%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 8430219716 8586610007 +1.86%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 1786424591 1909552782 +6.89%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 5328998202 6020839950 +12.98%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 10085059958 11085278690 +9.92%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 25497010155 27018079806 +5.97%
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 2427391406 2817217987 +16.06%
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 2592965497 2538805050 -2.09%
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 2437388343 2668012858 +9.46%
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 2317095324 2787423966 +20.30%
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 2600239857 2096973860 -19.35%
benchmark old allocs new allocs delta
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 500851 470794 -6.00%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 821527 791451 -3.66%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 1141562 1111508 -2.63%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 2141576 2111504 -1.40%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 871466 841424 -3.45%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 1941428 1911415 -1.55%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 3071573 3041510 -0.98%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 6771648 6741509 -0.45%
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 731493 824888 +12.77%
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 793918 887311 +11.76%
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 811842 905204 +11.50%
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 832244 925081 +11.16%
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 921553 1019162 +10.59%
benchmark old bytes new bytes delta
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 40532648 35698276 -11.93%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 60340216 53409568 -11.49%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 81087336 72065552 -11.13%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 142485576 120878544 -15.16%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 208661368 203831136 -2.31%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 347345904 340484696 -1.98%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 585185856 576244648 -1.53%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 1357641792 1358966528 +0.10%
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 126486664 119666744 -5.39%
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 122323192 115117224 -5.89%
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 126404504 119469864 -5.49%
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 119047832 112230408 -5.73%
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 136576016 116634800 -14.60%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-12-17 11:49:54 -08:00
|
|
|
if len(s.offsets) == 0 {
|
|
|
|
return 0, errors.Errorf("unknown symbol %q - no symbols", sym)
|
|
|
|
}
|
Load only some offsets into the symbol table into memory.
Rather than keeping the entire symbol table in memory, keep every nth
offset and walk from there to the entry we need. This ends up slightly
slower, ~360ms per 1M series returned from PostingsForMatchers which is
not much considering the rest of the CPU such a query would go on to
use.
Make LabelValues use the postings tables, rather than having
to do symbol lookups. Use yoloString, as PostingsForMatchers
doesn't need the strings to stick around and adjust the API
call to keep the Querier open until it's all marshalled.
Remove allocatedSymbols memory optimisation, we no longer keep all the
symbol strings in heap memory. Remove LabelValuesFor and LabelIndices,
they're dead code. Ensure we've still tests for label indices,
and add missing test that we can work with old V1 Format index files.
PostingForMatchers performance is slightly better, with a big drop in
allocation counts due to using yoloString for LabelValues:
benchmark old ns/op new ns/op delta
BenchmarkPostingsForMatchers/Block/n="1"-4 36698 36681 -0.05%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 522786 560887 +7.29%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 511652 537680 +5.09%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 522102 564239 +8.07%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 113689911 111795919 -1.67%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 135825572 132871085 -2.18%
BenchmarkPostingsForMatchers/Block/i=~""-4 40782628 38038181 -6.73%
BenchmarkPostingsForMatchers/Block/i!=""-4 31267869 29194327 -6.63%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 112733329 111568823 -1.03%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 112868153 111232029 -1.45%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 31338257 29349446 -6.35%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 32054482 29972436 -6.50%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 136504654 133968442 -1.86%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 27960350 27264997 -2.49%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 136765564 133860724 -2.12%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 163714583 159453668 -2.60%
benchmark old allocs new allocs delta
BenchmarkPostingsForMatchers/Block/n="1"-4 6 6 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 11 11 +0.00%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 11 11 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 17 15 -11.76%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 100012 12 -99.99%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 200040 100040 -49.99%
BenchmarkPostingsForMatchers/Block/i=~""-4 200045 100045 -49.99%
BenchmarkPostingsForMatchers/Block/i!=""-4 200041 100041 -49.99%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 100017 17 -99.98%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 100023 23 -99.98%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 200046 100046 -49.99%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 200050 100050 -49.99%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 200049 100049 -49.99%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 111150 11150 -89.97%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 200055 100055 -49.99%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 311238 111238 -64.26%
benchmark old bytes new bytes delta
BenchmarkPostingsForMatchers/Block/n="1"-4 296 296 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 424 424 +0.00%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 424 424 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 552 1544 +179.71%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 1600482 1606125 +0.35%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 17259065 17264709 +0.03%
BenchmarkPostingsForMatchers/Block/i=~""-4 17259150 17264780 +0.03%
BenchmarkPostingsForMatchers/Block/i!=""-4 17259048 17264680 +0.03%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 1600610 1606242 +0.35%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 1600813 1606434 +0.35%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 17259176 17264808 +0.03%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 17259304 17264936 +0.03%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 17259333 17264965 +0.03%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 3142628 3148262 +0.18%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 17259509 17265141 +0.03%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 20405680 20416944 +0.06%
However overall Select performance is down and involves more allocs, due to
having to do more than a simple map lookup to resolve a symbol and that all the strings
returned are allocated:
benchmark old ns/op new ns/op delta
BenchmarkQuerierSelect/Block/1of1000000-4 506092636 862678244 +70.46%
BenchmarkQuerierSelect/Block/10of1000000-4 505638968 860917636 +70.26%
BenchmarkQuerierSelect/Block/100of1000000-4 505229450 882150048 +74.60%
BenchmarkQuerierSelect/Block/1000of1000000-4 515905414 862241115 +67.13%
BenchmarkQuerierSelect/Block/10000of1000000-4 516785354 874841110 +69.29%
BenchmarkQuerierSelect/Block/100000of1000000-4 540742808 907030187 +67.74%
BenchmarkQuerierSelect/Block/1000000of1000000-4 815224288 1181236903 +44.90%
benchmark old allocs new allocs delta
BenchmarkQuerierSelect/Block/1of1000000-4 4000020 6000020 +50.00%
BenchmarkQuerierSelect/Block/10of1000000-4 4000038 6000038 +50.00%
BenchmarkQuerierSelect/Block/100of1000000-4 4000218 6000218 +50.00%
BenchmarkQuerierSelect/Block/1000of1000000-4 4002018 6002018 +49.97%
BenchmarkQuerierSelect/Block/10000of1000000-4 4020018 6020018 +49.75%
BenchmarkQuerierSelect/Block/100000of1000000-4 4200018 6200018 +47.62%
BenchmarkQuerierSelect/Block/1000000of1000000-4 6000018 8000019 +33.33%
benchmark old bytes new bytes delta
BenchmarkQuerierSelect/Block/1of1000000-4 176001468 227201476 +29.09%
BenchmarkQuerierSelect/Block/10of1000000-4 176002620 227202628 +29.09%
BenchmarkQuerierSelect/Block/100of1000000-4 176014140 227214148 +29.09%
BenchmarkQuerierSelect/Block/1000of1000000-4 176129340 227329348 +29.07%
BenchmarkQuerierSelect/Block/10000of1000000-4 177281340 228481348 +28.88%
BenchmarkQuerierSelect/Block/100000of1000000-4 188801340 240001348 +27.12%
BenchmarkQuerierSelect/Block/1000000of1000000-4 304001340 355201616 +16.84%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-12-12 08:55:32 -08:00
|
|
|
i := sort.Search(len(s.offsets), func(i int) bool {
|
|
|
|
// Any decoding errors here will be lost, however
|
|
|
|
// we already read through all of this at startup.
|
|
|
|
d := encoding.Decbuf{
|
|
|
|
B: s.bs.Range(0, s.bs.Len()),
|
|
|
|
}
|
|
|
|
d.Skip(s.offsets[i])
|
|
|
|
return yoloString(d.UvarintBytes()) > sym
|
|
|
|
})
|
|
|
|
d := encoding.Decbuf{
|
|
|
|
B: s.bs.Range(0, s.bs.Len()),
|
|
|
|
}
|
|
|
|
if i > 0 {
|
|
|
|
i--
|
|
|
|
}
|
|
|
|
d.Skip(s.offsets[i])
|
2021-08-13 04:08:53 -07:00
|
|
|
res := i * symbolFactor
|
Load only some offsets into the symbol table into memory.
Rather than keeping the entire symbol table in memory, keep every nth
offset and walk from there to the entry we need. This ends up slightly
slower, ~360ms per 1M series returned from PostingsForMatchers which is
not much considering the rest of the CPU such a query would go on to
use.
Make LabelValues use the postings tables, rather than having
to do symbol lookups. Use yoloString, as PostingsForMatchers
doesn't need the strings to stick around and adjust the API
call to keep the Querier open until it's all marshalled.
Remove allocatedSymbols memory optimisation, we no longer keep all the
symbol strings in heap memory. Remove LabelValuesFor and LabelIndices,
they're dead code. Ensure we've still tests for label indices,
and add missing test that we can work with old V1 Format index files.
PostingForMatchers performance is slightly better, with a big drop in
allocation counts due to using yoloString for LabelValues:
benchmark old ns/op new ns/op delta
BenchmarkPostingsForMatchers/Block/n="1"-4 36698 36681 -0.05%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 522786 560887 +7.29%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 511652 537680 +5.09%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 522102 564239 +8.07%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 113689911 111795919 -1.67%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 135825572 132871085 -2.18%
BenchmarkPostingsForMatchers/Block/i=~""-4 40782628 38038181 -6.73%
BenchmarkPostingsForMatchers/Block/i!=""-4 31267869 29194327 -6.63%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 112733329 111568823 -1.03%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 112868153 111232029 -1.45%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 31338257 29349446 -6.35%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 32054482 29972436 -6.50%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 136504654 133968442 -1.86%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 27960350 27264997 -2.49%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 136765564 133860724 -2.12%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 163714583 159453668 -2.60%
benchmark old allocs new allocs delta
BenchmarkPostingsForMatchers/Block/n="1"-4 6 6 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 11 11 +0.00%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 11 11 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 17 15 -11.76%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 100012 12 -99.99%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 200040 100040 -49.99%
BenchmarkPostingsForMatchers/Block/i=~""-4 200045 100045 -49.99%
BenchmarkPostingsForMatchers/Block/i!=""-4 200041 100041 -49.99%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 100017 17 -99.98%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 100023 23 -99.98%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 200046 100046 -49.99%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 200050 100050 -49.99%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 200049 100049 -49.99%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 111150 11150 -89.97%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 200055 100055 -49.99%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 311238 111238 -64.26%
benchmark old bytes new bytes delta
BenchmarkPostingsForMatchers/Block/n="1"-4 296 296 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 424 424 +0.00%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 424 424 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 552 1544 +179.71%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 1600482 1606125 +0.35%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 17259065 17264709 +0.03%
BenchmarkPostingsForMatchers/Block/i=~""-4 17259150 17264780 +0.03%
BenchmarkPostingsForMatchers/Block/i!=""-4 17259048 17264680 +0.03%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 1600610 1606242 +0.35%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 1600813 1606434 +0.35%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 17259176 17264808 +0.03%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 17259304 17264936 +0.03%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 17259333 17264965 +0.03%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 3142628 3148262 +0.18%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 17259509 17265141 +0.03%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 20405680 20416944 +0.06%
However overall Select performance is down and involves more allocs, due to
having to do more than a simple map lookup to resolve a symbol and that all the strings
returned are allocated:
benchmark old ns/op new ns/op delta
BenchmarkQuerierSelect/Block/1of1000000-4 506092636 862678244 +70.46%
BenchmarkQuerierSelect/Block/10of1000000-4 505638968 860917636 +70.26%
BenchmarkQuerierSelect/Block/100of1000000-4 505229450 882150048 +74.60%
BenchmarkQuerierSelect/Block/1000of1000000-4 515905414 862241115 +67.13%
BenchmarkQuerierSelect/Block/10000of1000000-4 516785354 874841110 +69.29%
BenchmarkQuerierSelect/Block/100000of1000000-4 540742808 907030187 +67.74%
BenchmarkQuerierSelect/Block/1000000of1000000-4 815224288 1181236903 +44.90%
benchmark old allocs new allocs delta
BenchmarkQuerierSelect/Block/1of1000000-4 4000020 6000020 +50.00%
BenchmarkQuerierSelect/Block/10of1000000-4 4000038 6000038 +50.00%
BenchmarkQuerierSelect/Block/100of1000000-4 4000218 6000218 +50.00%
BenchmarkQuerierSelect/Block/1000of1000000-4 4002018 6002018 +49.97%
BenchmarkQuerierSelect/Block/10000of1000000-4 4020018 6020018 +49.75%
BenchmarkQuerierSelect/Block/100000of1000000-4 4200018 6200018 +47.62%
BenchmarkQuerierSelect/Block/1000000of1000000-4 6000018 8000019 +33.33%
benchmark old bytes new bytes delta
BenchmarkQuerierSelect/Block/1of1000000-4 176001468 227201476 +29.09%
BenchmarkQuerierSelect/Block/10of1000000-4 176002620 227202628 +29.09%
BenchmarkQuerierSelect/Block/100of1000000-4 176014140 227214148 +29.09%
BenchmarkQuerierSelect/Block/1000of1000000-4 176129340 227329348 +29.07%
BenchmarkQuerierSelect/Block/10000of1000000-4 177281340 228481348 +28.88%
BenchmarkQuerierSelect/Block/100000of1000000-4 188801340 240001348 +27.12%
BenchmarkQuerierSelect/Block/1000000of1000000-4 304001340 355201616 +16.84%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-12-12 08:55:32 -08:00
|
|
|
var lastLen int
|
|
|
|
var lastSymbol string
|
|
|
|
for d.Err() == nil && res <= s.seen {
|
|
|
|
lastLen = d.Len()
|
|
|
|
lastSymbol = yoloString(d.UvarintBytes())
|
|
|
|
if lastSymbol >= sym {
|
|
|
|
break
|
2018-01-16 20:37:57 -08:00
|
|
|
}
|
Load only some offsets into the symbol table into memory.
Rather than keeping the entire symbol table in memory, keep every nth
offset and walk from there to the entry we need. This ends up slightly
slower, ~360ms per 1M series returned from PostingsForMatchers which is
not much considering the rest of the CPU such a query would go on to
use.
Make LabelValues use the postings tables, rather than having
to do symbol lookups. Use yoloString, as PostingsForMatchers
doesn't need the strings to stick around and adjust the API
call to keep the Querier open until it's all marshalled.
Remove allocatedSymbols memory optimisation, we no longer keep all the
symbol strings in heap memory. Remove LabelValuesFor and LabelIndices,
they're dead code. Ensure we've still tests for label indices,
and add missing test that we can work with old V1 Format index files.
PostingForMatchers performance is slightly better, with a big drop in
allocation counts due to using yoloString for LabelValues:
benchmark old ns/op new ns/op delta
BenchmarkPostingsForMatchers/Block/n="1"-4 36698 36681 -0.05%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 522786 560887 +7.29%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 511652 537680 +5.09%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 522102 564239 +8.07%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 113689911 111795919 -1.67%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 135825572 132871085 -2.18%
BenchmarkPostingsForMatchers/Block/i=~""-4 40782628 38038181 -6.73%
BenchmarkPostingsForMatchers/Block/i!=""-4 31267869 29194327 -6.63%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 112733329 111568823 -1.03%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 112868153 111232029 -1.45%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 31338257 29349446 -6.35%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 32054482 29972436 -6.50%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 136504654 133968442 -1.86%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 27960350 27264997 -2.49%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 136765564 133860724 -2.12%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 163714583 159453668 -2.60%
benchmark old allocs new allocs delta
BenchmarkPostingsForMatchers/Block/n="1"-4 6 6 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 11 11 +0.00%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 11 11 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 17 15 -11.76%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 100012 12 -99.99%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 200040 100040 -49.99%
BenchmarkPostingsForMatchers/Block/i=~""-4 200045 100045 -49.99%
BenchmarkPostingsForMatchers/Block/i!=""-4 200041 100041 -49.99%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 100017 17 -99.98%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 100023 23 -99.98%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 200046 100046 -49.99%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 200050 100050 -49.99%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 200049 100049 -49.99%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 111150 11150 -89.97%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 200055 100055 -49.99%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 311238 111238 -64.26%
benchmark old bytes new bytes delta
BenchmarkPostingsForMatchers/Block/n="1"-4 296 296 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 424 424 +0.00%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 424 424 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 552 1544 +179.71%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 1600482 1606125 +0.35%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 17259065 17264709 +0.03%
BenchmarkPostingsForMatchers/Block/i=~""-4 17259150 17264780 +0.03%
BenchmarkPostingsForMatchers/Block/i!=""-4 17259048 17264680 +0.03%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 1600610 1606242 +0.35%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 1600813 1606434 +0.35%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 17259176 17264808 +0.03%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 17259304 17264936 +0.03%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 17259333 17264965 +0.03%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 3142628 3148262 +0.18%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 17259509 17265141 +0.03%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 20405680 20416944 +0.06%
However overall Select performance is down and involves more allocs, due to
having to do more than a simple map lookup to resolve a symbol and that all the strings
returned are allocated:
benchmark old ns/op new ns/op delta
BenchmarkQuerierSelect/Block/1of1000000-4 506092636 862678244 +70.46%
BenchmarkQuerierSelect/Block/10of1000000-4 505638968 860917636 +70.26%
BenchmarkQuerierSelect/Block/100of1000000-4 505229450 882150048 +74.60%
BenchmarkQuerierSelect/Block/1000of1000000-4 515905414 862241115 +67.13%
BenchmarkQuerierSelect/Block/10000of1000000-4 516785354 874841110 +69.29%
BenchmarkQuerierSelect/Block/100000of1000000-4 540742808 907030187 +67.74%
BenchmarkQuerierSelect/Block/1000000of1000000-4 815224288 1181236903 +44.90%
benchmark old allocs new allocs delta
BenchmarkQuerierSelect/Block/1of1000000-4 4000020 6000020 +50.00%
BenchmarkQuerierSelect/Block/10of1000000-4 4000038 6000038 +50.00%
BenchmarkQuerierSelect/Block/100of1000000-4 4000218 6000218 +50.00%
BenchmarkQuerierSelect/Block/1000of1000000-4 4002018 6002018 +49.97%
BenchmarkQuerierSelect/Block/10000of1000000-4 4020018 6020018 +49.75%
BenchmarkQuerierSelect/Block/100000of1000000-4 4200018 6200018 +47.62%
BenchmarkQuerierSelect/Block/1000000of1000000-4 6000018 8000019 +33.33%
benchmark old bytes new bytes delta
BenchmarkQuerierSelect/Block/1of1000000-4 176001468 227201476 +29.09%
BenchmarkQuerierSelect/Block/10of1000000-4 176002620 227202628 +29.09%
BenchmarkQuerierSelect/Block/100of1000000-4 176014140 227214148 +29.09%
BenchmarkQuerierSelect/Block/1000of1000000-4 176129340 227329348 +29.07%
BenchmarkQuerierSelect/Block/10000of1000000-4 177281340 228481348 +28.88%
BenchmarkQuerierSelect/Block/100000of1000000-4 188801340 240001348 +27.12%
BenchmarkQuerierSelect/Block/1000000of1000000-4 304001340 355201616 +16.84%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-12-12 08:55:32 -08:00
|
|
|
res++
|
|
|
|
}
|
|
|
|
if d.Err() != nil {
|
|
|
|
return 0, d.Err()
|
|
|
|
}
|
|
|
|
if lastSymbol != sym {
|
|
|
|
return 0, errors.Errorf("unknown symbol %q", sym)
|
|
|
|
}
|
|
|
|
if s.version == FormatV2 {
|
|
|
|
return uint32(res), nil
|
|
|
|
}
|
|
|
|
return uint32(s.bs.Len() - lastLen), nil
|
|
|
|
}
|
|
|
|
|
Stream symbols during compaction. (#6468)
Rather than buffer up symbols in RAM, do it one by one
during compaction. Then use the reader's symbol handling
for symbol lookups during the rest of the index write.
There is some slowdown in compaction, due to having to look through a file
rather than a hash lookup. This is noise to the overall cost of compacting
series with thousands of samples though.
benchmark old ns/op new ns/op delta
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 539917175 675341565 +25.08%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 2441815993 2477453524 +1.46%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 3978543559 3922909687 -1.40%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 8430219716 8586610007 +1.86%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 1786424591 1909552782 +6.89%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 5328998202 6020839950 +12.98%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 10085059958 11085278690 +9.92%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 25497010155 27018079806 +5.97%
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 2427391406 2817217987 +16.06%
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 2592965497 2538805050 -2.09%
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 2437388343 2668012858 +9.46%
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 2317095324 2787423966 +20.30%
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 2600239857 2096973860 -19.35%
benchmark old allocs new allocs delta
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 500851 470794 -6.00%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 821527 791451 -3.66%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 1141562 1111508 -2.63%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 2141576 2111504 -1.40%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 871466 841424 -3.45%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 1941428 1911415 -1.55%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 3071573 3041510 -0.98%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 6771648 6741509 -0.45%
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 731493 824888 +12.77%
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 793918 887311 +11.76%
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 811842 905204 +11.50%
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 832244 925081 +11.16%
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 921553 1019162 +10.59%
benchmark old bytes new bytes delta
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 40532648 35698276 -11.93%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 60340216 53409568 -11.49%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 81087336 72065552 -11.13%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 142485576 120878544 -15.16%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 208661368 203831136 -2.31%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 347345904 340484696 -1.98%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 585185856 576244648 -1.53%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 1357641792 1358966528 +0.10%
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 126486664 119666744 -5.39%
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 122323192 115117224 -5.89%
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 126404504 119469864 -5.49%
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 119047832 112230408 -5.73%
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 136576016 116634800 -14.60%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-12-17 11:49:54 -08:00
|
|
|
func (s Symbols) Size() int {
|
|
|
|
return len(s.offsets) * 8
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s Symbols) Iter() StringIter {
|
Load only some offsets into the symbol table into memory.
Rather than keeping the entire symbol table in memory, keep every nth
offset and walk from there to the entry we need. This ends up slightly
slower, ~360ms per 1M series returned from PostingsForMatchers which is
not much considering the rest of the CPU such a query would go on to
use.
Make LabelValues use the postings tables, rather than having
to do symbol lookups. Use yoloString, as PostingsForMatchers
doesn't need the strings to stick around and adjust the API
call to keep the Querier open until it's all marshalled.
Remove allocatedSymbols memory optimisation, we no longer keep all the
symbol strings in heap memory. Remove LabelValuesFor and LabelIndices,
they're dead code. Ensure we've still tests for label indices,
and add missing test that we can work with old V1 Format index files.
PostingForMatchers performance is slightly better, with a big drop in
allocation counts due to using yoloString for LabelValues:
benchmark old ns/op new ns/op delta
BenchmarkPostingsForMatchers/Block/n="1"-4 36698 36681 -0.05%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 522786 560887 +7.29%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 511652 537680 +5.09%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 522102 564239 +8.07%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 113689911 111795919 -1.67%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 135825572 132871085 -2.18%
BenchmarkPostingsForMatchers/Block/i=~""-4 40782628 38038181 -6.73%
BenchmarkPostingsForMatchers/Block/i!=""-4 31267869 29194327 -6.63%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 112733329 111568823 -1.03%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 112868153 111232029 -1.45%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 31338257 29349446 -6.35%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 32054482 29972436 -6.50%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 136504654 133968442 -1.86%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 27960350 27264997 -2.49%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 136765564 133860724 -2.12%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 163714583 159453668 -2.60%
benchmark old allocs new allocs delta
BenchmarkPostingsForMatchers/Block/n="1"-4 6 6 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 11 11 +0.00%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 11 11 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 17 15 -11.76%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 100012 12 -99.99%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 200040 100040 -49.99%
BenchmarkPostingsForMatchers/Block/i=~""-4 200045 100045 -49.99%
BenchmarkPostingsForMatchers/Block/i!=""-4 200041 100041 -49.99%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 100017 17 -99.98%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 100023 23 -99.98%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 200046 100046 -49.99%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 200050 100050 -49.99%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 200049 100049 -49.99%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 111150 11150 -89.97%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 200055 100055 -49.99%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 311238 111238 -64.26%
benchmark old bytes new bytes delta
BenchmarkPostingsForMatchers/Block/n="1"-4 296 296 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 424 424 +0.00%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 424 424 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 552 1544 +179.71%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 1600482 1606125 +0.35%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 17259065 17264709 +0.03%
BenchmarkPostingsForMatchers/Block/i=~""-4 17259150 17264780 +0.03%
BenchmarkPostingsForMatchers/Block/i!=""-4 17259048 17264680 +0.03%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 1600610 1606242 +0.35%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 1600813 1606434 +0.35%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 17259176 17264808 +0.03%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 17259304 17264936 +0.03%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 17259333 17264965 +0.03%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 3142628 3148262 +0.18%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 17259509 17265141 +0.03%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 20405680 20416944 +0.06%
However overall Select performance is down and involves more allocs, due to
having to do more than a simple map lookup to resolve a symbol and that all the strings
returned are allocated:
benchmark old ns/op new ns/op delta
BenchmarkQuerierSelect/Block/1of1000000-4 506092636 862678244 +70.46%
BenchmarkQuerierSelect/Block/10of1000000-4 505638968 860917636 +70.26%
BenchmarkQuerierSelect/Block/100of1000000-4 505229450 882150048 +74.60%
BenchmarkQuerierSelect/Block/1000of1000000-4 515905414 862241115 +67.13%
BenchmarkQuerierSelect/Block/10000of1000000-4 516785354 874841110 +69.29%
BenchmarkQuerierSelect/Block/100000of1000000-4 540742808 907030187 +67.74%
BenchmarkQuerierSelect/Block/1000000of1000000-4 815224288 1181236903 +44.90%
benchmark old allocs new allocs delta
BenchmarkQuerierSelect/Block/1of1000000-4 4000020 6000020 +50.00%
BenchmarkQuerierSelect/Block/10of1000000-4 4000038 6000038 +50.00%
BenchmarkQuerierSelect/Block/100of1000000-4 4000218 6000218 +50.00%
BenchmarkQuerierSelect/Block/1000of1000000-4 4002018 6002018 +49.97%
BenchmarkQuerierSelect/Block/10000of1000000-4 4020018 6020018 +49.75%
BenchmarkQuerierSelect/Block/100000of1000000-4 4200018 6200018 +47.62%
BenchmarkQuerierSelect/Block/1000000of1000000-4 6000018 8000019 +33.33%
benchmark old bytes new bytes delta
BenchmarkQuerierSelect/Block/1of1000000-4 176001468 227201476 +29.09%
BenchmarkQuerierSelect/Block/10of1000000-4 176002620 227202628 +29.09%
BenchmarkQuerierSelect/Block/100of1000000-4 176014140 227214148 +29.09%
BenchmarkQuerierSelect/Block/1000of1000000-4 176129340 227329348 +29.07%
BenchmarkQuerierSelect/Block/10000of1000000-4 177281340 228481348 +28.88%
BenchmarkQuerierSelect/Block/100000of1000000-4 188801340 240001348 +27.12%
BenchmarkQuerierSelect/Block/1000000of1000000-4 304001340 355201616 +16.84%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-12-12 08:55:32 -08:00
|
|
|
d := encoding.NewDecbufAt(s.bs, s.off, castagnoliTable)
|
|
|
|
cnt := d.Be32int()
|
Stream symbols during compaction. (#6468)
Rather than buffer up symbols in RAM, do it one by one
during compaction. Then use the reader's symbol handling
for symbol lookups during the rest of the index write.
There is some slowdown in compaction, due to having to look through a file
rather than a hash lookup. This is noise to the overall cost of compacting
series with thousands of samples though.
benchmark old ns/op new ns/op delta
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 539917175 675341565 +25.08%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 2441815993 2477453524 +1.46%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 3978543559 3922909687 -1.40%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 8430219716 8586610007 +1.86%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 1786424591 1909552782 +6.89%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 5328998202 6020839950 +12.98%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 10085059958 11085278690 +9.92%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 25497010155 27018079806 +5.97%
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 2427391406 2817217987 +16.06%
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 2592965497 2538805050 -2.09%
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 2437388343 2668012858 +9.46%
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 2317095324 2787423966 +20.30%
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 2600239857 2096973860 -19.35%
benchmark old allocs new allocs delta
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 500851 470794 -6.00%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 821527 791451 -3.66%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 1141562 1111508 -2.63%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 2141576 2111504 -1.40%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 871466 841424 -3.45%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 1941428 1911415 -1.55%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 3071573 3041510 -0.98%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 6771648 6741509 -0.45%
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 731493 824888 +12.77%
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 793918 887311 +11.76%
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 811842 905204 +11.50%
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 832244 925081 +11.16%
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 921553 1019162 +10.59%
benchmark old bytes new bytes delta
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 40532648 35698276 -11.93%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 60340216 53409568 -11.49%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 81087336 72065552 -11.13%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 142485576 120878544 -15.16%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 208661368 203831136 -2.31%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 347345904 340484696 -1.98%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 585185856 576244648 -1.53%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 1357641792 1358966528 +0.10%
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 126486664 119666744 -5.39%
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 122323192 115117224 -5.89%
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 126404504 119469864 -5.49%
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 119047832 112230408 -5.73%
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 136576016 116634800 -14.60%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-12-17 11:49:54 -08:00
|
|
|
return &symbolsIter{
|
|
|
|
d: d,
|
|
|
|
cnt: cnt,
|
Load only some offsets into the symbol table into memory.
Rather than keeping the entire symbol table in memory, keep every nth
offset and walk from there to the entry we need. This ends up slightly
slower, ~360ms per 1M series returned from PostingsForMatchers which is
not much considering the rest of the CPU such a query would go on to
use.
Make LabelValues use the postings tables, rather than having
to do symbol lookups. Use yoloString, as PostingsForMatchers
doesn't need the strings to stick around and adjust the API
call to keep the Querier open until it's all marshalled.
Remove allocatedSymbols memory optimisation, we no longer keep all the
symbol strings in heap memory. Remove LabelValuesFor and LabelIndices,
they're dead code. Ensure we've still tests for label indices,
and add missing test that we can work with old V1 Format index files.
PostingForMatchers performance is slightly better, with a big drop in
allocation counts due to using yoloString for LabelValues:
benchmark old ns/op new ns/op delta
BenchmarkPostingsForMatchers/Block/n="1"-4 36698 36681 -0.05%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 522786 560887 +7.29%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 511652 537680 +5.09%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 522102 564239 +8.07%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 113689911 111795919 -1.67%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 135825572 132871085 -2.18%
BenchmarkPostingsForMatchers/Block/i=~""-4 40782628 38038181 -6.73%
BenchmarkPostingsForMatchers/Block/i!=""-4 31267869 29194327 -6.63%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 112733329 111568823 -1.03%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 112868153 111232029 -1.45%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 31338257 29349446 -6.35%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 32054482 29972436 -6.50%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 136504654 133968442 -1.86%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 27960350 27264997 -2.49%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 136765564 133860724 -2.12%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 163714583 159453668 -2.60%
benchmark old allocs new allocs delta
BenchmarkPostingsForMatchers/Block/n="1"-4 6 6 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 11 11 +0.00%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 11 11 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 17 15 -11.76%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 100012 12 -99.99%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 200040 100040 -49.99%
BenchmarkPostingsForMatchers/Block/i=~""-4 200045 100045 -49.99%
BenchmarkPostingsForMatchers/Block/i!=""-4 200041 100041 -49.99%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 100017 17 -99.98%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 100023 23 -99.98%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 200046 100046 -49.99%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 200050 100050 -49.99%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 200049 100049 -49.99%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 111150 11150 -89.97%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 200055 100055 -49.99%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 311238 111238 -64.26%
benchmark old bytes new bytes delta
BenchmarkPostingsForMatchers/Block/n="1"-4 296 296 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 424 424 +0.00%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 424 424 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 552 1544 +179.71%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 1600482 1606125 +0.35%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 17259065 17264709 +0.03%
BenchmarkPostingsForMatchers/Block/i=~""-4 17259150 17264780 +0.03%
BenchmarkPostingsForMatchers/Block/i!=""-4 17259048 17264680 +0.03%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 1600610 1606242 +0.35%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 1600813 1606434 +0.35%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 17259176 17264808 +0.03%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 17259304 17264936 +0.03%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 17259333 17264965 +0.03%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 3142628 3148262 +0.18%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 17259509 17265141 +0.03%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 20405680 20416944 +0.06%
However overall Select performance is down and involves more allocs, due to
having to do more than a simple map lookup to resolve a symbol and that all the strings
returned are allocated:
benchmark old ns/op new ns/op delta
BenchmarkQuerierSelect/Block/1of1000000-4 506092636 862678244 +70.46%
BenchmarkQuerierSelect/Block/10of1000000-4 505638968 860917636 +70.26%
BenchmarkQuerierSelect/Block/100of1000000-4 505229450 882150048 +74.60%
BenchmarkQuerierSelect/Block/1000of1000000-4 515905414 862241115 +67.13%
BenchmarkQuerierSelect/Block/10000of1000000-4 516785354 874841110 +69.29%
BenchmarkQuerierSelect/Block/100000of1000000-4 540742808 907030187 +67.74%
BenchmarkQuerierSelect/Block/1000000of1000000-4 815224288 1181236903 +44.90%
benchmark old allocs new allocs delta
BenchmarkQuerierSelect/Block/1of1000000-4 4000020 6000020 +50.00%
BenchmarkQuerierSelect/Block/10of1000000-4 4000038 6000038 +50.00%
BenchmarkQuerierSelect/Block/100of1000000-4 4000218 6000218 +50.00%
BenchmarkQuerierSelect/Block/1000of1000000-4 4002018 6002018 +49.97%
BenchmarkQuerierSelect/Block/10000of1000000-4 4020018 6020018 +49.75%
BenchmarkQuerierSelect/Block/100000of1000000-4 4200018 6200018 +47.62%
BenchmarkQuerierSelect/Block/1000000of1000000-4 6000018 8000019 +33.33%
benchmark old bytes new bytes delta
BenchmarkQuerierSelect/Block/1of1000000-4 176001468 227201476 +29.09%
BenchmarkQuerierSelect/Block/10of1000000-4 176002620 227202628 +29.09%
BenchmarkQuerierSelect/Block/100of1000000-4 176014140 227214148 +29.09%
BenchmarkQuerierSelect/Block/1000of1000000-4 176129340 227329348 +29.07%
BenchmarkQuerierSelect/Block/10000of1000000-4 177281340 228481348 +28.88%
BenchmarkQuerierSelect/Block/100000of1000000-4 188801340 240001348 +27.12%
BenchmarkQuerierSelect/Block/1000000of1000000-4 304001340 355201616 +16.84%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-12-12 08:55:32 -08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
Stream symbols during compaction. (#6468)
Rather than buffer up symbols in RAM, do it one by one
during compaction. Then use the reader's symbol handling
for symbol lookups during the rest of the index write.
There is some slowdown in compaction, due to having to look through a file
rather than a hash lookup. This is noise to the overall cost of compacting
series with thousands of samples though.
benchmark old ns/op new ns/op delta
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 539917175 675341565 +25.08%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 2441815993 2477453524 +1.46%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 3978543559 3922909687 -1.40%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 8430219716 8586610007 +1.86%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 1786424591 1909552782 +6.89%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 5328998202 6020839950 +12.98%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 10085059958 11085278690 +9.92%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 25497010155 27018079806 +5.97%
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 2427391406 2817217987 +16.06%
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 2592965497 2538805050 -2.09%
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 2437388343 2668012858 +9.46%
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 2317095324 2787423966 +20.30%
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 2600239857 2096973860 -19.35%
benchmark old allocs new allocs delta
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 500851 470794 -6.00%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 821527 791451 -3.66%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 1141562 1111508 -2.63%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 2141576 2111504 -1.40%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 871466 841424 -3.45%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 1941428 1911415 -1.55%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 3071573 3041510 -0.98%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 6771648 6741509 -0.45%
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 731493 824888 +12.77%
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 793918 887311 +11.76%
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 811842 905204 +11.50%
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 832244 925081 +11.16%
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 921553 1019162 +10.59%
benchmark old bytes new bytes delta
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 40532648 35698276 -11.93%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 60340216 53409568 -11.49%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 81087336 72065552 -11.13%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 142485576 120878544 -15.16%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 208661368 203831136 -2.31%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 347345904 340484696 -1.98%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 585185856 576244648 -1.53%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 1357641792 1358966528 +0.10%
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 126486664 119666744 -5.39%
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 122323192 115117224 -5.89%
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 126404504 119469864 -5.49%
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 119047832 112230408 -5.73%
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 136576016 116634800 -14.60%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-12-17 11:49:54 -08:00
|
|
|
// symbolsIter implements StringIter.
|
|
|
|
type symbolsIter struct {
|
|
|
|
d encoding.Decbuf
|
|
|
|
cnt int
|
|
|
|
cur string
|
|
|
|
err error
|
2017-10-02 06:56:57 -07:00
|
|
|
}
|
2017-04-26 09:01:13 -07:00
|
|
|
|
Stream symbols during compaction. (#6468)
Rather than buffer up symbols in RAM, do it one by one
during compaction. Then use the reader's symbol handling
for symbol lookups during the rest of the index write.
There is some slowdown in compaction, due to having to look through a file
rather than a hash lookup. This is noise to the overall cost of compacting
series with thousands of samples though.
benchmark old ns/op new ns/op delta
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 539917175 675341565 +25.08%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 2441815993 2477453524 +1.46%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 3978543559 3922909687 -1.40%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 8430219716 8586610007 +1.86%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 1786424591 1909552782 +6.89%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 5328998202 6020839950 +12.98%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 10085059958 11085278690 +9.92%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 25497010155 27018079806 +5.97%
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 2427391406 2817217987 +16.06%
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 2592965497 2538805050 -2.09%
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 2437388343 2668012858 +9.46%
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 2317095324 2787423966 +20.30%
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 2600239857 2096973860 -19.35%
benchmark old allocs new allocs delta
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 500851 470794 -6.00%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 821527 791451 -3.66%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 1141562 1111508 -2.63%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 2141576 2111504 -1.40%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 871466 841424 -3.45%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 1941428 1911415 -1.55%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 3071573 3041510 -0.98%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 6771648 6741509 -0.45%
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 731493 824888 +12.77%
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 793918 887311 +11.76%
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 811842 905204 +11.50%
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 832244 925081 +11.16%
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 921553 1019162 +10.59%
benchmark old bytes new bytes delta
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 40532648 35698276 -11.93%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 60340216 53409568 -11.49%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 81087336 72065552 -11.13%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 142485576 120878544 -15.16%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 208661368 203831136 -2.31%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 347345904 340484696 -1.98%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 585185856 576244648 -1.53%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 1357641792 1358966528 +0.10%
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 126486664 119666744 -5.39%
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 122323192 115117224 -5.89%
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 126404504 119469864 -5.49%
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 119047832 112230408 -5.73%
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 136576016 116634800 -14.60%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-12-17 11:49:54 -08:00
|
|
|
func (s *symbolsIter) Next() bool {
|
|
|
|
if s.cnt == 0 || s.err != nil {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
s.cur = yoloString(s.d.UvarintBytes())
|
|
|
|
s.cnt--
|
|
|
|
if s.d.Err() != nil {
|
|
|
|
s.err = s.d.Err()
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s symbolsIter) At() string { return s.cur }
|
|
|
|
func (s symbolsIter) Err() error { return s.err }
|
|
|
|
|
2022-11-14 08:48:16 -08:00
|
|
|
// ReadPostingsOffsetTable reads the postings offset table and at the given position calls f for each
|
|
|
|
// found entry.
|
|
|
|
// The name and value parameters passed to f reuse the backing memory of the underlying byte slice,
|
|
|
|
// so they shouldn't be persisted without previously copying them.
|
|
|
|
// If f returns an error it stops decoding and returns the received error.
|
|
|
|
func ReadPostingsOffsetTable(bs ByteSlice, off uint64, f func(name, value []byte, postingsOffset uint64, labelOffset int) error) error {
|
2019-02-22 09:11:11 -08:00
|
|
|
d := encoding.NewDecbufAt(bs, int(off), castagnoliTable)
|
Reduce memory used by postings offset table.
Rather than keeping the offset of each postings list, instead
keep the nth offset of the offset of the posting list. As postings
list offsets have always been sorted, we can then get to the closest
entry before the one we want an iterate forwards.
I haven't done much tuning on the 32 number, it was chosen to try
not to read through more than a 4k page of data.
Switch to a bulk interface for fetching postings. Use it to avoid having
to re-read parts of the posting offset table when querying lots of it.
For a index with what BenchmarkHeadPostingForMatchers uses RAM
for r.postings drops from 3.79MB to 80.19kB or about 48x.
Bytes allocated go down by 30%, and suprisingly CPU usage drops by
4-6% for typical queries too.
benchmark old ns/op new ns/op delta
BenchmarkPostingsForMatchers/Block/n="1"-4 35231 36673 +4.09%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 563380 540627 -4.04%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 536782 534186 -0.48%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 533990 541550 +1.42%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 113374598 117969608 +4.05%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 146329884 139651442 -4.56%
BenchmarkPostingsForMatchers/Block/i=~""-4 50346510 44961127 -10.70%
BenchmarkPostingsForMatchers/Block/i!=""-4 41261550 35356165 -14.31%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 112544418 116904010 +3.87%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 112487086 116864918 +3.89%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 41094758 35457904 -13.72%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 41906372 36151473 -13.73%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 147262414 140424800 -4.64%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 28615629 27872072 -2.60%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 147117177 140462403 -4.52%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 175096826 167902298 -4.11%
benchmark old allocs new allocs delta
BenchmarkPostingsForMatchers/Block/n="1"-4 4 6 +50.00%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 7 11 +57.14%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 7 11 +57.14%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 15 17 +13.33%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 100010 100012 +0.00%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 200069 200040 -0.01%
BenchmarkPostingsForMatchers/Block/i=~""-4 200072 200045 -0.01%
BenchmarkPostingsForMatchers/Block/i!=""-4 200070 200041 -0.01%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 100013 100017 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 100017 100023 +0.01%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 200073 200046 -0.01%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 200075 200050 -0.01%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 200074 200049 -0.01%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 111165 111150 -0.01%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 200078 200055 -0.01%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 311282 311238 -0.01%
benchmark old bytes new bytes delta
BenchmarkPostingsForMatchers/Block/n="1"-4 264 296 +12.12%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 360 424 +17.78%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 360 424 +17.78%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 520 552 +6.15%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 1600461 1600482 +0.00%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 24900801 17259077 -30.69%
BenchmarkPostingsForMatchers/Block/i=~""-4 24900836 17259151 -30.69%
BenchmarkPostingsForMatchers/Block/i!=""-4 24900760 17259048 -30.69%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 1600557 1600621 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 1600717 1600813 +0.01%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 24900856 17259176 -30.69%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 24900952 17259304 -30.69%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 24900993 17259333 -30.69%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 3788311 3142630 -17.04%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 24901137 17259509 -30.69%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 28693086 20405680 -28.88%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-12-05 10:27:40 -08:00
|
|
|
startLen := d.Len()
|
2019-02-22 09:11:11 -08:00
|
|
|
cnt := d.Be32()
|
2017-04-26 09:01:13 -07:00
|
|
|
|
2019-02-22 09:11:11 -08:00
|
|
|
for d.Err() == nil && d.Len() > 0 && cnt > 0 {
|
Reduce memory used by postings offset table.
Rather than keeping the offset of each postings list, instead
keep the nth offset of the offset of the posting list. As postings
list offsets have always been sorted, we can then get to the closest
entry before the one we want an iterate forwards.
I haven't done much tuning on the 32 number, it was chosen to try
not to read through more than a 4k page of data.
Switch to a bulk interface for fetching postings. Use it to avoid having
to re-read parts of the posting offset table when querying lots of it.
For a index with what BenchmarkHeadPostingForMatchers uses RAM
for r.postings drops from 3.79MB to 80.19kB or about 48x.
Bytes allocated go down by 30%, and suprisingly CPU usage drops by
4-6% for typical queries too.
benchmark old ns/op new ns/op delta
BenchmarkPostingsForMatchers/Block/n="1"-4 35231 36673 +4.09%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 563380 540627 -4.04%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 536782 534186 -0.48%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 533990 541550 +1.42%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 113374598 117969608 +4.05%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 146329884 139651442 -4.56%
BenchmarkPostingsForMatchers/Block/i=~""-4 50346510 44961127 -10.70%
BenchmarkPostingsForMatchers/Block/i!=""-4 41261550 35356165 -14.31%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 112544418 116904010 +3.87%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 112487086 116864918 +3.89%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 41094758 35457904 -13.72%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 41906372 36151473 -13.73%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 147262414 140424800 -4.64%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 28615629 27872072 -2.60%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 147117177 140462403 -4.52%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 175096826 167902298 -4.11%
benchmark old allocs new allocs delta
BenchmarkPostingsForMatchers/Block/n="1"-4 4 6 +50.00%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 7 11 +57.14%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 7 11 +57.14%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 15 17 +13.33%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 100010 100012 +0.00%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 200069 200040 -0.01%
BenchmarkPostingsForMatchers/Block/i=~""-4 200072 200045 -0.01%
BenchmarkPostingsForMatchers/Block/i!=""-4 200070 200041 -0.01%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 100013 100017 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 100017 100023 +0.01%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 200073 200046 -0.01%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 200075 200050 -0.01%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 200074 200049 -0.01%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 111165 111150 -0.01%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 200078 200055 -0.01%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 311282 311238 -0.01%
benchmark old bytes new bytes delta
BenchmarkPostingsForMatchers/Block/n="1"-4 264 296 +12.12%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 360 424 +17.78%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 360 424 +17.78%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 520 552 +6.15%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 1600461 1600482 +0.00%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 24900801 17259077 -30.69%
BenchmarkPostingsForMatchers/Block/i=~""-4 24900836 17259151 -30.69%
BenchmarkPostingsForMatchers/Block/i!=""-4 24900760 17259048 -30.69%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 1600557 1600621 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 1600717 1600813 +0.01%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 24900856 17259176 -30.69%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 24900952 17259304 -30.69%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 24900993 17259333 -30.69%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 3788311 3142630 -17.04%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 24901137 17259509 -30.69%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 28693086 20405680 -28.88%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-12-05 10:27:40 -08:00
|
|
|
offsetPos := startLen - d.Len()
|
2022-11-14 08:48:16 -08:00
|
|
|
|
|
|
|
if keyCount := d.Uvarint(); keyCount != 2 {
|
|
|
|
return errors.Errorf("unexpected number of keys for postings offset table %d", keyCount)
|
2017-03-07 03:47:49 -08:00
|
|
|
}
|
2022-11-14 08:48:16 -08:00
|
|
|
name := d.UvarintBytes()
|
|
|
|
value := d.UvarintBytes()
|
2019-02-22 09:11:11 -08:00
|
|
|
o := d.Uvarint64()
|
|
|
|
if d.Err() != nil {
|
2017-12-01 02:01:40 -08:00
|
|
|
break
|
|
|
|
}
|
2022-11-14 08:48:16 -08:00
|
|
|
if err := f(name, value, o, offsetPos); err != nil {
|
2017-12-01 02:01:40 -08:00
|
|
|
return err
|
|
|
|
}
|
2017-04-26 09:01:13 -07:00
|
|
|
cnt--
|
2017-03-07 03:47:49 -08:00
|
|
|
}
|
2019-02-22 09:11:11 -08:00
|
|
|
return d.Err()
|
2017-03-07 03:47:49 -08:00
|
|
|
}
|
|
|
|
|
2017-12-01 03:06:37 -08:00
|
|
|
// Close the reader and its underlying resources.
|
2017-11-30 06:34:49 -08:00
|
|
|
func (r *Reader) Close() error {
|
2017-03-07 03:47:49 -08:00
|
|
|
return r.c.Close()
|
|
|
|
}
|
|
|
|
|
2017-11-30 06:34:49 -08:00
|
|
|
func (r *Reader) lookupSymbol(o uint32) (string, error) {
|
Load only some offsets into the symbol table into memory.
Rather than keeping the entire symbol table in memory, keep every nth
offset and walk from there to the entry we need. This ends up slightly
slower, ~360ms per 1M series returned from PostingsForMatchers which is
not much considering the rest of the CPU such a query would go on to
use.
Make LabelValues use the postings tables, rather than having
to do symbol lookups. Use yoloString, as PostingsForMatchers
doesn't need the strings to stick around and adjust the API
call to keep the Querier open until it's all marshalled.
Remove allocatedSymbols memory optimisation, we no longer keep all the
symbol strings in heap memory. Remove LabelValuesFor and LabelIndices,
they're dead code. Ensure we've still tests for label indices,
and add missing test that we can work with old V1 Format index files.
PostingForMatchers performance is slightly better, with a big drop in
allocation counts due to using yoloString for LabelValues:
benchmark old ns/op new ns/op delta
BenchmarkPostingsForMatchers/Block/n="1"-4 36698 36681 -0.05%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 522786 560887 +7.29%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 511652 537680 +5.09%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 522102 564239 +8.07%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 113689911 111795919 -1.67%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 135825572 132871085 -2.18%
BenchmarkPostingsForMatchers/Block/i=~""-4 40782628 38038181 -6.73%
BenchmarkPostingsForMatchers/Block/i!=""-4 31267869 29194327 -6.63%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 112733329 111568823 -1.03%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 112868153 111232029 -1.45%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 31338257 29349446 -6.35%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 32054482 29972436 -6.50%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 136504654 133968442 -1.86%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 27960350 27264997 -2.49%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 136765564 133860724 -2.12%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 163714583 159453668 -2.60%
benchmark old allocs new allocs delta
BenchmarkPostingsForMatchers/Block/n="1"-4 6 6 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 11 11 +0.00%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 11 11 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 17 15 -11.76%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 100012 12 -99.99%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 200040 100040 -49.99%
BenchmarkPostingsForMatchers/Block/i=~""-4 200045 100045 -49.99%
BenchmarkPostingsForMatchers/Block/i!=""-4 200041 100041 -49.99%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 100017 17 -99.98%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 100023 23 -99.98%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 200046 100046 -49.99%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 200050 100050 -49.99%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 200049 100049 -49.99%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 111150 11150 -89.97%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 200055 100055 -49.99%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 311238 111238 -64.26%
benchmark old bytes new bytes delta
BenchmarkPostingsForMatchers/Block/n="1"-4 296 296 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 424 424 +0.00%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 424 424 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 552 1544 +179.71%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 1600482 1606125 +0.35%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 17259065 17264709 +0.03%
BenchmarkPostingsForMatchers/Block/i=~""-4 17259150 17264780 +0.03%
BenchmarkPostingsForMatchers/Block/i!=""-4 17259048 17264680 +0.03%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 1600610 1606242 +0.35%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 1600813 1606434 +0.35%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 17259176 17264808 +0.03%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 17259304 17264936 +0.03%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 17259333 17264965 +0.03%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 3142628 3148262 +0.18%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 17259509 17265141 +0.03%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 20405680 20416944 +0.06%
However overall Select performance is down and involves more allocs, due to
having to do more than a simple map lookup to resolve a symbol and that all the strings
returned are allocated:
benchmark old ns/op new ns/op delta
BenchmarkQuerierSelect/Block/1of1000000-4 506092636 862678244 +70.46%
BenchmarkQuerierSelect/Block/10of1000000-4 505638968 860917636 +70.26%
BenchmarkQuerierSelect/Block/100of1000000-4 505229450 882150048 +74.60%
BenchmarkQuerierSelect/Block/1000of1000000-4 515905414 862241115 +67.13%
BenchmarkQuerierSelect/Block/10000of1000000-4 516785354 874841110 +69.29%
BenchmarkQuerierSelect/Block/100000of1000000-4 540742808 907030187 +67.74%
BenchmarkQuerierSelect/Block/1000000of1000000-4 815224288 1181236903 +44.90%
benchmark old allocs new allocs delta
BenchmarkQuerierSelect/Block/1of1000000-4 4000020 6000020 +50.00%
BenchmarkQuerierSelect/Block/10of1000000-4 4000038 6000038 +50.00%
BenchmarkQuerierSelect/Block/100of1000000-4 4000218 6000218 +50.00%
BenchmarkQuerierSelect/Block/1000of1000000-4 4002018 6002018 +49.97%
BenchmarkQuerierSelect/Block/10000of1000000-4 4020018 6020018 +49.75%
BenchmarkQuerierSelect/Block/100000of1000000-4 4200018 6200018 +47.62%
BenchmarkQuerierSelect/Block/1000000of1000000-4 6000018 8000019 +33.33%
benchmark old bytes new bytes delta
BenchmarkQuerierSelect/Block/1of1000000-4 176001468 227201476 +29.09%
BenchmarkQuerierSelect/Block/10of1000000-4 176002620 227202628 +29.09%
BenchmarkQuerierSelect/Block/100of1000000-4 176014140 227214148 +29.09%
BenchmarkQuerierSelect/Block/1000of1000000-4 176129340 227329348 +29.07%
BenchmarkQuerierSelect/Block/10000of1000000-4 177281340 228481348 +28.88%
BenchmarkQuerierSelect/Block/100000of1000000-4 188801340 240001348 +27.12%
BenchmarkQuerierSelect/Block/1000000of1000000-4 304001340 355201616 +16.84%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-12-12 08:55:32 -08:00
|
|
|
if s, ok := r.nameSymbols[o]; ok {
|
|
|
|
return s, nil
|
2017-04-25 10:40:52 -07:00
|
|
|
}
|
Load only some offsets into the symbol table into memory.
Rather than keeping the entire symbol table in memory, keep every nth
offset and walk from there to the entry we need. This ends up slightly
slower, ~360ms per 1M series returned from PostingsForMatchers which is
not much considering the rest of the CPU such a query would go on to
use.
Make LabelValues use the postings tables, rather than having
to do symbol lookups. Use yoloString, as PostingsForMatchers
doesn't need the strings to stick around and adjust the API
call to keep the Querier open until it's all marshalled.
Remove allocatedSymbols memory optimisation, we no longer keep all the
symbol strings in heap memory. Remove LabelValuesFor and LabelIndices,
they're dead code. Ensure we've still tests for label indices,
and add missing test that we can work with old V1 Format index files.
PostingForMatchers performance is slightly better, with a big drop in
allocation counts due to using yoloString for LabelValues:
benchmark old ns/op new ns/op delta
BenchmarkPostingsForMatchers/Block/n="1"-4 36698 36681 -0.05%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 522786 560887 +7.29%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 511652 537680 +5.09%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 522102 564239 +8.07%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 113689911 111795919 -1.67%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 135825572 132871085 -2.18%
BenchmarkPostingsForMatchers/Block/i=~""-4 40782628 38038181 -6.73%
BenchmarkPostingsForMatchers/Block/i!=""-4 31267869 29194327 -6.63%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 112733329 111568823 -1.03%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 112868153 111232029 -1.45%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 31338257 29349446 -6.35%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 32054482 29972436 -6.50%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 136504654 133968442 -1.86%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 27960350 27264997 -2.49%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 136765564 133860724 -2.12%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 163714583 159453668 -2.60%
benchmark old allocs new allocs delta
BenchmarkPostingsForMatchers/Block/n="1"-4 6 6 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 11 11 +0.00%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 11 11 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 17 15 -11.76%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 100012 12 -99.99%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 200040 100040 -49.99%
BenchmarkPostingsForMatchers/Block/i=~""-4 200045 100045 -49.99%
BenchmarkPostingsForMatchers/Block/i!=""-4 200041 100041 -49.99%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 100017 17 -99.98%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 100023 23 -99.98%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 200046 100046 -49.99%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 200050 100050 -49.99%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 200049 100049 -49.99%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 111150 11150 -89.97%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 200055 100055 -49.99%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 311238 111238 -64.26%
benchmark old bytes new bytes delta
BenchmarkPostingsForMatchers/Block/n="1"-4 296 296 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 424 424 +0.00%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 424 424 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 552 1544 +179.71%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 1600482 1606125 +0.35%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 17259065 17264709 +0.03%
BenchmarkPostingsForMatchers/Block/i=~""-4 17259150 17264780 +0.03%
BenchmarkPostingsForMatchers/Block/i!=""-4 17259048 17264680 +0.03%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 1600610 1606242 +0.35%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 1600813 1606434 +0.35%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 17259176 17264808 +0.03%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 17259304 17264936 +0.03%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 17259333 17264965 +0.03%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 3142628 3148262 +0.18%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 17259509 17265141 +0.03%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 20405680 20416944 +0.06%
However overall Select performance is down and involves more allocs, due to
having to do more than a simple map lookup to resolve a symbol and that all the strings
returned are allocated:
benchmark old ns/op new ns/op delta
BenchmarkQuerierSelect/Block/1of1000000-4 506092636 862678244 +70.46%
BenchmarkQuerierSelect/Block/10of1000000-4 505638968 860917636 +70.26%
BenchmarkQuerierSelect/Block/100of1000000-4 505229450 882150048 +74.60%
BenchmarkQuerierSelect/Block/1000of1000000-4 515905414 862241115 +67.13%
BenchmarkQuerierSelect/Block/10000of1000000-4 516785354 874841110 +69.29%
BenchmarkQuerierSelect/Block/100000of1000000-4 540742808 907030187 +67.74%
BenchmarkQuerierSelect/Block/1000000of1000000-4 815224288 1181236903 +44.90%
benchmark old allocs new allocs delta
BenchmarkQuerierSelect/Block/1of1000000-4 4000020 6000020 +50.00%
BenchmarkQuerierSelect/Block/10of1000000-4 4000038 6000038 +50.00%
BenchmarkQuerierSelect/Block/100of1000000-4 4000218 6000218 +50.00%
BenchmarkQuerierSelect/Block/1000of1000000-4 4002018 6002018 +49.97%
BenchmarkQuerierSelect/Block/10000of1000000-4 4020018 6020018 +49.75%
BenchmarkQuerierSelect/Block/100000of1000000-4 4200018 6200018 +47.62%
BenchmarkQuerierSelect/Block/1000000of1000000-4 6000018 8000019 +33.33%
benchmark old bytes new bytes delta
BenchmarkQuerierSelect/Block/1of1000000-4 176001468 227201476 +29.09%
BenchmarkQuerierSelect/Block/10of1000000-4 176002620 227202628 +29.09%
BenchmarkQuerierSelect/Block/100of1000000-4 176014140 227214148 +29.09%
BenchmarkQuerierSelect/Block/1000of1000000-4 176129340 227329348 +29.07%
BenchmarkQuerierSelect/Block/10000of1000000-4 177281340 228481348 +28.88%
BenchmarkQuerierSelect/Block/100000of1000000-4 188801340 240001348 +27.12%
BenchmarkQuerierSelect/Block/1000000of1000000-4 304001340 355201616 +16.84%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-12-12 08:55:32 -08:00
|
|
|
return r.symbols.Lookup(o)
|
2017-04-25 10:40:52 -07:00
|
|
|
}
|
|
|
|
|
Stream symbols during compaction. (#6468)
Rather than buffer up symbols in RAM, do it one by one
during compaction. Then use the reader's symbol handling
for symbol lookups during the rest of the index write.
There is some slowdown in compaction, due to having to look through a file
rather than a hash lookup. This is noise to the overall cost of compacting
series with thousands of samples though.
benchmark old ns/op new ns/op delta
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 539917175 675341565 +25.08%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 2441815993 2477453524 +1.46%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 3978543559 3922909687 -1.40%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 8430219716 8586610007 +1.86%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 1786424591 1909552782 +6.89%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 5328998202 6020839950 +12.98%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 10085059958 11085278690 +9.92%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 25497010155 27018079806 +5.97%
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 2427391406 2817217987 +16.06%
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 2592965497 2538805050 -2.09%
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 2437388343 2668012858 +9.46%
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 2317095324 2787423966 +20.30%
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 2600239857 2096973860 -19.35%
benchmark old allocs new allocs delta
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 500851 470794 -6.00%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 821527 791451 -3.66%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 1141562 1111508 -2.63%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 2141576 2111504 -1.40%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 871466 841424 -3.45%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 1941428 1911415 -1.55%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 3071573 3041510 -0.98%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 6771648 6741509 -0.45%
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 731493 824888 +12.77%
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 793918 887311 +11.76%
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 811842 905204 +11.50%
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 832244 925081 +11.16%
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 921553 1019162 +10.59%
benchmark old bytes new bytes delta
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 40532648 35698276 -11.93%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 60340216 53409568 -11.49%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 81087336 72065552 -11.13%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 142485576 120878544 -15.16%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 208661368 203831136 -2.31%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 347345904 340484696 -1.98%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 585185856 576244648 -1.53%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 1357641792 1358966528 +0.10%
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 126486664 119666744 -5.39%
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 122323192 115117224 -5.89%
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 126404504 119469864 -5.49%
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 119047832 112230408 -5.73%
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 136576016 116634800 -14.60%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-12-17 11:49:54 -08:00
|
|
|
// Symbols returns an iterator over the symbols that exist within the index.
|
|
|
|
func (r *Reader) Symbols() StringIter {
|
|
|
|
return r.symbols.Iter()
|
2017-08-05 04:31:48 -07:00
|
|
|
}
|
|
|
|
|
2019-01-11 09:31:26 -08:00
|
|
|
// SymbolTableSize returns the symbol table size in bytes.
|
2018-11-02 02:52:45 -07:00
|
|
|
func (r *Reader) SymbolTableSize() uint64 {
|
Load only some offsets into the symbol table into memory.
Rather than keeping the entire symbol table in memory, keep every nth
offset and walk from there to the entry we need. This ends up slightly
slower, ~360ms per 1M series returned from PostingsForMatchers which is
not much considering the rest of the CPU such a query would go on to
use.
Make LabelValues use the postings tables, rather than having
to do symbol lookups. Use yoloString, as PostingsForMatchers
doesn't need the strings to stick around and adjust the API
call to keep the Querier open until it's all marshalled.
Remove allocatedSymbols memory optimisation, we no longer keep all the
symbol strings in heap memory. Remove LabelValuesFor and LabelIndices,
they're dead code. Ensure we've still tests for label indices,
and add missing test that we can work with old V1 Format index files.
PostingForMatchers performance is slightly better, with a big drop in
allocation counts due to using yoloString for LabelValues:
benchmark old ns/op new ns/op delta
BenchmarkPostingsForMatchers/Block/n="1"-4 36698 36681 -0.05%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 522786 560887 +7.29%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 511652 537680 +5.09%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 522102 564239 +8.07%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 113689911 111795919 -1.67%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 135825572 132871085 -2.18%
BenchmarkPostingsForMatchers/Block/i=~""-4 40782628 38038181 -6.73%
BenchmarkPostingsForMatchers/Block/i!=""-4 31267869 29194327 -6.63%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 112733329 111568823 -1.03%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 112868153 111232029 -1.45%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 31338257 29349446 -6.35%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 32054482 29972436 -6.50%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 136504654 133968442 -1.86%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 27960350 27264997 -2.49%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 136765564 133860724 -2.12%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 163714583 159453668 -2.60%
benchmark old allocs new allocs delta
BenchmarkPostingsForMatchers/Block/n="1"-4 6 6 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 11 11 +0.00%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 11 11 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 17 15 -11.76%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 100012 12 -99.99%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 200040 100040 -49.99%
BenchmarkPostingsForMatchers/Block/i=~""-4 200045 100045 -49.99%
BenchmarkPostingsForMatchers/Block/i!=""-4 200041 100041 -49.99%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 100017 17 -99.98%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 100023 23 -99.98%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 200046 100046 -49.99%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 200050 100050 -49.99%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 200049 100049 -49.99%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 111150 11150 -89.97%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 200055 100055 -49.99%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 311238 111238 -64.26%
benchmark old bytes new bytes delta
BenchmarkPostingsForMatchers/Block/n="1"-4 296 296 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 424 424 +0.00%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 424 424 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 552 1544 +179.71%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 1600482 1606125 +0.35%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 17259065 17264709 +0.03%
BenchmarkPostingsForMatchers/Block/i=~""-4 17259150 17264780 +0.03%
BenchmarkPostingsForMatchers/Block/i!=""-4 17259048 17264680 +0.03%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 1600610 1606242 +0.35%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 1600813 1606434 +0.35%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 17259176 17264808 +0.03%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 17259304 17264936 +0.03%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 17259333 17264965 +0.03%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 3142628 3148262 +0.18%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 17259509 17265141 +0.03%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 20405680 20416944 +0.06%
However overall Select performance is down and involves more allocs, due to
having to do more than a simple map lookup to resolve a symbol and that all the strings
returned are allocated:
benchmark old ns/op new ns/op delta
BenchmarkQuerierSelect/Block/1of1000000-4 506092636 862678244 +70.46%
BenchmarkQuerierSelect/Block/10of1000000-4 505638968 860917636 +70.26%
BenchmarkQuerierSelect/Block/100of1000000-4 505229450 882150048 +74.60%
BenchmarkQuerierSelect/Block/1000of1000000-4 515905414 862241115 +67.13%
BenchmarkQuerierSelect/Block/10000of1000000-4 516785354 874841110 +69.29%
BenchmarkQuerierSelect/Block/100000of1000000-4 540742808 907030187 +67.74%
BenchmarkQuerierSelect/Block/1000000of1000000-4 815224288 1181236903 +44.90%
benchmark old allocs new allocs delta
BenchmarkQuerierSelect/Block/1of1000000-4 4000020 6000020 +50.00%
BenchmarkQuerierSelect/Block/10of1000000-4 4000038 6000038 +50.00%
BenchmarkQuerierSelect/Block/100of1000000-4 4000218 6000218 +50.00%
BenchmarkQuerierSelect/Block/1000of1000000-4 4002018 6002018 +49.97%
BenchmarkQuerierSelect/Block/10000of1000000-4 4020018 6020018 +49.75%
BenchmarkQuerierSelect/Block/100000of1000000-4 4200018 6200018 +47.62%
BenchmarkQuerierSelect/Block/1000000of1000000-4 6000018 8000019 +33.33%
benchmark old bytes new bytes delta
BenchmarkQuerierSelect/Block/1of1000000-4 176001468 227201476 +29.09%
BenchmarkQuerierSelect/Block/10of1000000-4 176002620 227202628 +29.09%
BenchmarkQuerierSelect/Block/100of1000000-4 176014140 227214148 +29.09%
BenchmarkQuerierSelect/Block/1000of1000000-4 176129340 227329348 +29.07%
BenchmarkQuerierSelect/Block/10000of1000000-4 177281340 228481348 +28.88%
BenchmarkQuerierSelect/Block/100000of1000000-4 188801340 240001348 +27.12%
BenchmarkQuerierSelect/Block/1000000of1000000-4 304001340 355201616 +16.84%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-12-12 08:55:32 -08:00
|
|
|
return uint64(r.symbols.Size())
|
2017-12-01 03:06:37 -08:00
|
|
|
}
|
|
|
|
|
2020-06-25 06:10:29 -07:00
|
|
|
// SortedLabelValues returns value tuples that exist for the given label name.
|
|
|
|
// It is not safe to use the return value beyond the lifetime of the byte slice
|
|
|
|
// passed into the Reader.
|
2021-02-09 09:38:35 -08:00
|
|
|
func (r *Reader) SortedLabelValues(name string, matchers ...*labels.Matcher) ([]string, error) {
|
|
|
|
values, err := r.LabelValues(name, matchers...)
|
2020-06-25 06:10:29 -07:00
|
|
|
if err == nil && r.version == FormatV1 {
|
2022-09-30 07:33:56 -07:00
|
|
|
slices.Sort(values)
|
2020-06-25 06:10:29 -07:00
|
|
|
}
|
|
|
|
return values, err
|
|
|
|
}
|
|
|
|
|
2020-01-01 03:21:42 -08:00
|
|
|
// LabelValues returns value tuples that exist for the given label name.
|
Load only some offsets into the symbol table into memory.
Rather than keeping the entire symbol table in memory, keep every nth
offset and walk from there to the entry we need. This ends up slightly
slower, ~360ms per 1M series returned from PostingsForMatchers which is
not much considering the rest of the CPU such a query would go on to
use.
Make LabelValues use the postings tables, rather than having
to do symbol lookups. Use yoloString, as PostingsForMatchers
doesn't need the strings to stick around and adjust the API
call to keep the Querier open until it's all marshalled.
Remove allocatedSymbols memory optimisation, we no longer keep all the
symbol strings in heap memory. Remove LabelValuesFor and LabelIndices,
they're dead code. Ensure we've still tests for label indices,
and add missing test that we can work with old V1 Format index files.
PostingForMatchers performance is slightly better, with a big drop in
allocation counts due to using yoloString for LabelValues:
benchmark old ns/op new ns/op delta
BenchmarkPostingsForMatchers/Block/n="1"-4 36698 36681 -0.05%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 522786 560887 +7.29%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 511652 537680 +5.09%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 522102 564239 +8.07%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 113689911 111795919 -1.67%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 135825572 132871085 -2.18%
BenchmarkPostingsForMatchers/Block/i=~""-4 40782628 38038181 -6.73%
BenchmarkPostingsForMatchers/Block/i!=""-4 31267869 29194327 -6.63%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 112733329 111568823 -1.03%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 112868153 111232029 -1.45%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 31338257 29349446 -6.35%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 32054482 29972436 -6.50%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 136504654 133968442 -1.86%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 27960350 27264997 -2.49%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 136765564 133860724 -2.12%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 163714583 159453668 -2.60%
benchmark old allocs new allocs delta
BenchmarkPostingsForMatchers/Block/n="1"-4 6 6 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 11 11 +0.00%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 11 11 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 17 15 -11.76%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 100012 12 -99.99%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 200040 100040 -49.99%
BenchmarkPostingsForMatchers/Block/i=~""-4 200045 100045 -49.99%
BenchmarkPostingsForMatchers/Block/i!=""-4 200041 100041 -49.99%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 100017 17 -99.98%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 100023 23 -99.98%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 200046 100046 -49.99%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 200050 100050 -49.99%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 200049 100049 -49.99%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 111150 11150 -89.97%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 200055 100055 -49.99%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 311238 111238 -64.26%
benchmark old bytes new bytes delta
BenchmarkPostingsForMatchers/Block/n="1"-4 296 296 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 424 424 +0.00%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 424 424 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 552 1544 +179.71%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 1600482 1606125 +0.35%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 17259065 17264709 +0.03%
BenchmarkPostingsForMatchers/Block/i=~""-4 17259150 17264780 +0.03%
BenchmarkPostingsForMatchers/Block/i!=""-4 17259048 17264680 +0.03%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 1600610 1606242 +0.35%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 1600813 1606434 +0.35%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 17259176 17264808 +0.03%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 17259304 17264936 +0.03%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 17259333 17264965 +0.03%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 3142628 3148262 +0.18%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 17259509 17265141 +0.03%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 20405680 20416944 +0.06%
However overall Select performance is down and involves more allocs, due to
having to do more than a simple map lookup to resolve a symbol and that all the strings
returned are allocated:
benchmark old ns/op new ns/op delta
BenchmarkQuerierSelect/Block/1of1000000-4 506092636 862678244 +70.46%
BenchmarkQuerierSelect/Block/10of1000000-4 505638968 860917636 +70.26%
BenchmarkQuerierSelect/Block/100of1000000-4 505229450 882150048 +74.60%
BenchmarkQuerierSelect/Block/1000of1000000-4 515905414 862241115 +67.13%
BenchmarkQuerierSelect/Block/10000of1000000-4 516785354 874841110 +69.29%
BenchmarkQuerierSelect/Block/100000of1000000-4 540742808 907030187 +67.74%
BenchmarkQuerierSelect/Block/1000000of1000000-4 815224288 1181236903 +44.90%
benchmark old allocs new allocs delta
BenchmarkQuerierSelect/Block/1of1000000-4 4000020 6000020 +50.00%
BenchmarkQuerierSelect/Block/10of1000000-4 4000038 6000038 +50.00%
BenchmarkQuerierSelect/Block/100of1000000-4 4000218 6000218 +50.00%
BenchmarkQuerierSelect/Block/1000of1000000-4 4002018 6002018 +49.97%
BenchmarkQuerierSelect/Block/10000of1000000-4 4020018 6020018 +49.75%
BenchmarkQuerierSelect/Block/100000of1000000-4 4200018 6200018 +47.62%
BenchmarkQuerierSelect/Block/1000000of1000000-4 6000018 8000019 +33.33%
benchmark old bytes new bytes delta
BenchmarkQuerierSelect/Block/1of1000000-4 176001468 227201476 +29.09%
BenchmarkQuerierSelect/Block/10of1000000-4 176002620 227202628 +29.09%
BenchmarkQuerierSelect/Block/100of1000000-4 176014140 227214148 +29.09%
BenchmarkQuerierSelect/Block/1000of1000000-4 176129340 227329348 +29.07%
BenchmarkQuerierSelect/Block/10000of1000000-4 177281340 228481348 +28.88%
BenchmarkQuerierSelect/Block/100000of1000000-4 188801340 240001348 +27.12%
BenchmarkQuerierSelect/Block/1000000of1000000-4 304001340 355201616 +16.84%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-12-12 08:55:32 -08:00
|
|
|
// It is not safe to use the return value beyond the lifetime of the byte slice
|
|
|
|
// passed into the Reader.
|
2021-02-09 09:38:35 -08:00
|
|
|
// TODO(replay): Support filtering by matchers
|
|
|
|
func (r *Reader) LabelValues(name string, matchers ...*labels.Matcher) ([]string, error) {
|
|
|
|
if len(matchers) > 0 {
|
|
|
|
return nil, errors.Errorf("matchers parameter is not implemented: %+v", matchers)
|
|
|
|
}
|
|
|
|
|
2020-01-06 06:06:11 -08:00
|
|
|
if r.version == FormatV1 {
|
2020-01-01 03:21:42 -08:00
|
|
|
e, ok := r.postingsV1[name]
|
2020-01-06 06:06:11 -08:00
|
|
|
if !ok {
|
Replace StringTuples with []string
Benchmarks show slight cpu/allocs improvements.
benchmark old ns/op new ns/op delta
BenchmarkPostingsForMatchers/Head/n="1"-4 269978625 235305110 -12.84%
BenchmarkPostingsForMatchers/Head/n="1",j="foo"-4 129739974 121646193 -6.24%
BenchmarkPostingsForMatchers/Head/j="foo",n="1"-4 123826274 122056253 -1.43%
BenchmarkPostingsForMatchers/Head/n="1",j!="foo"-4 126962188 130038235 +2.42%
BenchmarkPostingsForMatchers/Head/i=~".*"-4 6423653989 5991126455 -6.73%
BenchmarkPostingsForMatchers/Head/i=~".+"-4 6934647521 7033370634 +1.42%
BenchmarkPostingsForMatchers/Head/i=~""-4 1177781285 1121497736 -4.78%
BenchmarkPostingsForMatchers/Head/i!=""-4 7033680256 7246094991 +3.02%
BenchmarkPostingsForMatchers/Head/n="1",i=~".*",j="foo"-4 293702332 287440212 -2.13%
BenchmarkPostingsForMatchers/Head/n="1",i=~".*",i!="2",j="foo"-4 307628268 307039964 -0.19%
BenchmarkPostingsForMatchers/Head/n="1",i!=""-4 512247746 480003862 -6.29%
BenchmarkPostingsForMatchers/Head/n="1",i!="",j="foo"-4 361199794 367066917 +1.62%
BenchmarkPostingsForMatchers/Head/n="1",i=~".+",j="foo"-4 478863761 476037784 -0.59%
BenchmarkPostingsForMatchers/Head/n="1",i=~"1.+",j="foo"-4 103394659 102902098 -0.48%
BenchmarkPostingsForMatchers/Head/n="1",i=~".+",i!="2",j="foo"-4 482552781 475453903 -1.47%
BenchmarkPostingsForMatchers/Head/n="1",i=~".+",i!~"2.*",j="foo"-4 559257389 589297047 +5.37%
BenchmarkPostingsForMatchers/Block/n="1"-4 36492 37012 +1.42%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 557788 611903 +9.70%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 554443 573814 +3.49%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 553227 553826 +0.11%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 113855090 111707221 -1.89%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 133994674 136520728 +1.89%
BenchmarkPostingsForMatchers/Block/i=~""-4 38138091 36299898 -4.82%
BenchmarkPostingsForMatchers/Block/i!=""-4 28861213 27396723 -5.07%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 112699941 110853868 -1.64%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 113198026 111389742 -1.60%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 28994069 27363804 -5.62%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 29709406 28589223 -3.77%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 134695119 135736971 +0.77%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 26783286 25826928 -3.57%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 134733254 134116739 -0.46%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 160713937 158802768 -1.19%
benchmark old allocs new allocs delta
BenchmarkPostingsForMatchers/Head/n="1"-4 36 36 +0.00%
BenchmarkPostingsForMatchers/Head/n="1",j="foo"-4 38 38 +0.00%
BenchmarkPostingsForMatchers/Head/j="foo",n="1"-4 38 38 +0.00%
BenchmarkPostingsForMatchers/Head/n="1",j!="foo"-4 42 40 -4.76%
BenchmarkPostingsForMatchers/Head/i=~".*"-4 61 59 -3.28%
BenchmarkPostingsForMatchers/Head/i=~".+"-4 100088 100087 -0.00%
BenchmarkPostingsForMatchers/Head/i=~""-4 100053 100051 -0.00%
BenchmarkPostingsForMatchers/Head/i!=""-4 100087 100085 -0.00%
BenchmarkPostingsForMatchers/Head/n="1",i=~".*",j="foo"-4 44 42 -4.55%
BenchmarkPostingsForMatchers/Head/n="1",i=~".*",i!="2",j="foo"-4 50 48 -4.00%
BenchmarkPostingsForMatchers/Head/n="1",i!=""-4 100076 100074 -0.00%
BenchmarkPostingsForMatchers/Head/n="1",i!="",j="foo"-4 100077 100075 -0.00%
BenchmarkPostingsForMatchers/Head/n="1",i=~".+",j="foo"-4 100077 100074 -0.00%
BenchmarkPostingsForMatchers/Head/n="1",i=~"1.+",j="foo"-4 11167 11165 -0.02%
BenchmarkPostingsForMatchers/Head/n="1",i=~".+",i!="2",j="foo"-4 100082 100080 -0.00%
BenchmarkPostingsForMatchers/Head/n="1",i=~".+",i!~"2.*",j="foo"-4 111265 111261 -0.00%
BenchmarkPostingsForMatchers/Block/n="1"-4 6 6 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 11 11 +0.00%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 11 11 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 15 13 -13.33%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 12 10 -16.67%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 100040 100038 -0.00%
BenchmarkPostingsForMatchers/Block/i=~""-4 100045 100043 -0.00%
BenchmarkPostingsForMatchers/Block/i!=""-4 100041 100039 -0.00%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 17 15 -11.76%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 23 21 -8.70%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 100046 100044 -0.00%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 100050 100048 -0.00%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 100049 100047 -0.00%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 11150 11148 -0.02%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 100055 100053 -0.00%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 111238 111234 -0.00%
benchmark old bytes new bytes delta
BenchmarkPostingsForMatchers/Head/n="1"-4 10887816 10887817 +0.00%
BenchmarkPostingsForMatchers/Head/n="1",j="foo"-4 5456648 5456648 +0.00%
BenchmarkPostingsForMatchers/Head/j="foo",n="1"-4 5456648 5456648 +0.00%
BenchmarkPostingsForMatchers/Head/n="1",j!="foo"-4 5456792 5456712 -0.00%
BenchmarkPostingsForMatchers/Head/i=~".*"-4 258254408 258254328 -0.00%
BenchmarkPostingsForMatchers/Head/i=~".+"-4 273912888 273912904 +0.00%
BenchmarkPostingsForMatchers/Head/i=~""-4 17266680 17266600 -0.00%
BenchmarkPostingsForMatchers/Head/i!=""-4 273912416 273912336 -0.00%
BenchmarkPostingsForMatchers/Head/n="1",i=~".*",j="foo"-4 7062578 7062498 -0.00%
BenchmarkPostingsForMatchers/Head/n="1",i=~".*",i!="2",j="foo"-4 7062770 7062690 -0.00%
BenchmarkPostingsForMatchers/Head/n="1",i!=""-4 28152346 28152266 -0.00%
BenchmarkPostingsForMatchers/Head/n="1",i!="",j="foo"-4 22721178 22721098 -0.00%
BenchmarkPostingsForMatchers/Head/n="1",i=~".+",j="foo"-4 22721336 22721224 -0.00%
BenchmarkPostingsForMatchers/Head/n="1",i=~"1.+",j="foo"-4 3623804 3623733 -0.00%
BenchmarkPostingsForMatchers/Head/n="1",i=~".+",i!="2",j="foo"-4 22721480 22721400 -0.00%
BenchmarkPostingsForMatchers/Head/n="1",i=~".+",i!~"2.*",j="foo"-4 24816652 24816444 -0.00%
BenchmarkPostingsForMatchers/Block/n="1"-4 296 296 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 424 424 +0.00%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 424 424 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 1544 1464 -5.18%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 1606114 1606045 -0.00%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 17264709 17264629 -0.00%
BenchmarkPostingsForMatchers/Block/i=~""-4 17264780 17264696 -0.00%
BenchmarkPostingsForMatchers/Block/i!=""-4 17264680 17264600 -0.00%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 1606253 1606165 -0.01%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 1606445 1606348 -0.01%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 17264808 17264728 -0.00%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 17264936 17264856 -0.00%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 17264965 17264885 -0.00%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 3148262 3148182 -0.00%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 17265141 17265061 -0.00%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 20416944 20416784 -0.00%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2020-01-01 03:38:01 -08:00
|
|
|
return nil, nil
|
2020-01-06 06:06:11 -08:00
|
|
|
}
|
|
|
|
values := make([]string, 0, len(e))
|
|
|
|
for k := range e {
|
|
|
|
values = append(values, k)
|
|
|
|
}
|
Replace StringTuples with []string
Benchmarks show slight cpu/allocs improvements.
benchmark old ns/op new ns/op delta
BenchmarkPostingsForMatchers/Head/n="1"-4 269978625 235305110 -12.84%
BenchmarkPostingsForMatchers/Head/n="1",j="foo"-4 129739974 121646193 -6.24%
BenchmarkPostingsForMatchers/Head/j="foo",n="1"-4 123826274 122056253 -1.43%
BenchmarkPostingsForMatchers/Head/n="1",j!="foo"-4 126962188 130038235 +2.42%
BenchmarkPostingsForMatchers/Head/i=~".*"-4 6423653989 5991126455 -6.73%
BenchmarkPostingsForMatchers/Head/i=~".+"-4 6934647521 7033370634 +1.42%
BenchmarkPostingsForMatchers/Head/i=~""-4 1177781285 1121497736 -4.78%
BenchmarkPostingsForMatchers/Head/i!=""-4 7033680256 7246094991 +3.02%
BenchmarkPostingsForMatchers/Head/n="1",i=~".*",j="foo"-4 293702332 287440212 -2.13%
BenchmarkPostingsForMatchers/Head/n="1",i=~".*",i!="2",j="foo"-4 307628268 307039964 -0.19%
BenchmarkPostingsForMatchers/Head/n="1",i!=""-4 512247746 480003862 -6.29%
BenchmarkPostingsForMatchers/Head/n="1",i!="",j="foo"-4 361199794 367066917 +1.62%
BenchmarkPostingsForMatchers/Head/n="1",i=~".+",j="foo"-4 478863761 476037784 -0.59%
BenchmarkPostingsForMatchers/Head/n="1",i=~"1.+",j="foo"-4 103394659 102902098 -0.48%
BenchmarkPostingsForMatchers/Head/n="1",i=~".+",i!="2",j="foo"-4 482552781 475453903 -1.47%
BenchmarkPostingsForMatchers/Head/n="1",i=~".+",i!~"2.*",j="foo"-4 559257389 589297047 +5.37%
BenchmarkPostingsForMatchers/Block/n="1"-4 36492 37012 +1.42%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 557788 611903 +9.70%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 554443 573814 +3.49%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 553227 553826 +0.11%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 113855090 111707221 -1.89%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 133994674 136520728 +1.89%
BenchmarkPostingsForMatchers/Block/i=~""-4 38138091 36299898 -4.82%
BenchmarkPostingsForMatchers/Block/i!=""-4 28861213 27396723 -5.07%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 112699941 110853868 -1.64%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 113198026 111389742 -1.60%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 28994069 27363804 -5.62%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 29709406 28589223 -3.77%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 134695119 135736971 +0.77%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 26783286 25826928 -3.57%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 134733254 134116739 -0.46%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 160713937 158802768 -1.19%
benchmark old allocs new allocs delta
BenchmarkPostingsForMatchers/Head/n="1"-4 36 36 +0.00%
BenchmarkPostingsForMatchers/Head/n="1",j="foo"-4 38 38 +0.00%
BenchmarkPostingsForMatchers/Head/j="foo",n="1"-4 38 38 +0.00%
BenchmarkPostingsForMatchers/Head/n="1",j!="foo"-4 42 40 -4.76%
BenchmarkPostingsForMatchers/Head/i=~".*"-4 61 59 -3.28%
BenchmarkPostingsForMatchers/Head/i=~".+"-4 100088 100087 -0.00%
BenchmarkPostingsForMatchers/Head/i=~""-4 100053 100051 -0.00%
BenchmarkPostingsForMatchers/Head/i!=""-4 100087 100085 -0.00%
BenchmarkPostingsForMatchers/Head/n="1",i=~".*",j="foo"-4 44 42 -4.55%
BenchmarkPostingsForMatchers/Head/n="1",i=~".*",i!="2",j="foo"-4 50 48 -4.00%
BenchmarkPostingsForMatchers/Head/n="1",i!=""-4 100076 100074 -0.00%
BenchmarkPostingsForMatchers/Head/n="1",i!="",j="foo"-4 100077 100075 -0.00%
BenchmarkPostingsForMatchers/Head/n="1",i=~".+",j="foo"-4 100077 100074 -0.00%
BenchmarkPostingsForMatchers/Head/n="1",i=~"1.+",j="foo"-4 11167 11165 -0.02%
BenchmarkPostingsForMatchers/Head/n="1",i=~".+",i!="2",j="foo"-4 100082 100080 -0.00%
BenchmarkPostingsForMatchers/Head/n="1",i=~".+",i!~"2.*",j="foo"-4 111265 111261 -0.00%
BenchmarkPostingsForMatchers/Block/n="1"-4 6 6 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 11 11 +0.00%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 11 11 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 15 13 -13.33%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 12 10 -16.67%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 100040 100038 -0.00%
BenchmarkPostingsForMatchers/Block/i=~""-4 100045 100043 -0.00%
BenchmarkPostingsForMatchers/Block/i!=""-4 100041 100039 -0.00%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 17 15 -11.76%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 23 21 -8.70%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 100046 100044 -0.00%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 100050 100048 -0.00%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 100049 100047 -0.00%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 11150 11148 -0.02%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 100055 100053 -0.00%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 111238 111234 -0.00%
benchmark old bytes new bytes delta
BenchmarkPostingsForMatchers/Head/n="1"-4 10887816 10887817 +0.00%
BenchmarkPostingsForMatchers/Head/n="1",j="foo"-4 5456648 5456648 +0.00%
BenchmarkPostingsForMatchers/Head/j="foo",n="1"-4 5456648 5456648 +0.00%
BenchmarkPostingsForMatchers/Head/n="1",j!="foo"-4 5456792 5456712 -0.00%
BenchmarkPostingsForMatchers/Head/i=~".*"-4 258254408 258254328 -0.00%
BenchmarkPostingsForMatchers/Head/i=~".+"-4 273912888 273912904 +0.00%
BenchmarkPostingsForMatchers/Head/i=~""-4 17266680 17266600 -0.00%
BenchmarkPostingsForMatchers/Head/i!=""-4 273912416 273912336 -0.00%
BenchmarkPostingsForMatchers/Head/n="1",i=~".*",j="foo"-4 7062578 7062498 -0.00%
BenchmarkPostingsForMatchers/Head/n="1",i=~".*",i!="2",j="foo"-4 7062770 7062690 -0.00%
BenchmarkPostingsForMatchers/Head/n="1",i!=""-4 28152346 28152266 -0.00%
BenchmarkPostingsForMatchers/Head/n="1",i!="",j="foo"-4 22721178 22721098 -0.00%
BenchmarkPostingsForMatchers/Head/n="1",i=~".+",j="foo"-4 22721336 22721224 -0.00%
BenchmarkPostingsForMatchers/Head/n="1",i=~"1.+",j="foo"-4 3623804 3623733 -0.00%
BenchmarkPostingsForMatchers/Head/n="1",i=~".+",i!="2",j="foo"-4 22721480 22721400 -0.00%
BenchmarkPostingsForMatchers/Head/n="1",i=~".+",i!~"2.*",j="foo"-4 24816652 24816444 -0.00%
BenchmarkPostingsForMatchers/Block/n="1"-4 296 296 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 424 424 +0.00%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 424 424 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 1544 1464 -5.18%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 1606114 1606045 -0.00%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 17264709 17264629 -0.00%
BenchmarkPostingsForMatchers/Block/i=~""-4 17264780 17264696 -0.00%
BenchmarkPostingsForMatchers/Block/i!=""-4 17264680 17264600 -0.00%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 1606253 1606165 -0.01%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 1606445 1606348 -0.01%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 17264808 17264728 -0.00%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 17264936 17264856 -0.00%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 17264965 17264885 -0.00%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 3148262 3148182 -0.00%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 17265141 17265061 -0.00%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 20416944 20416784 -0.00%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2020-01-01 03:38:01 -08:00
|
|
|
return values, nil
|
2020-01-06 06:06:11 -08:00
|
|
|
|
|
|
|
}
|
2020-01-01 03:21:42 -08:00
|
|
|
e, ok := r.postings[name]
|
2017-03-07 03:47:49 -08:00
|
|
|
if !ok {
|
Replace StringTuples with []string
Benchmarks show slight cpu/allocs improvements.
benchmark old ns/op new ns/op delta
BenchmarkPostingsForMatchers/Head/n="1"-4 269978625 235305110 -12.84%
BenchmarkPostingsForMatchers/Head/n="1",j="foo"-4 129739974 121646193 -6.24%
BenchmarkPostingsForMatchers/Head/j="foo",n="1"-4 123826274 122056253 -1.43%
BenchmarkPostingsForMatchers/Head/n="1",j!="foo"-4 126962188 130038235 +2.42%
BenchmarkPostingsForMatchers/Head/i=~".*"-4 6423653989 5991126455 -6.73%
BenchmarkPostingsForMatchers/Head/i=~".+"-4 6934647521 7033370634 +1.42%
BenchmarkPostingsForMatchers/Head/i=~""-4 1177781285 1121497736 -4.78%
BenchmarkPostingsForMatchers/Head/i!=""-4 7033680256 7246094991 +3.02%
BenchmarkPostingsForMatchers/Head/n="1",i=~".*",j="foo"-4 293702332 287440212 -2.13%
BenchmarkPostingsForMatchers/Head/n="1",i=~".*",i!="2",j="foo"-4 307628268 307039964 -0.19%
BenchmarkPostingsForMatchers/Head/n="1",i!=""-4 512247746 480003862 -6.29%
BenchmarkPostingsForMatchers/Head/n="1",i!="",j="foo"-4 361199794 367066917 +1.62%
BenchmarkPostingsForMatchers/Head/n="1",i=~".+",j="foo"-4 478863761 476037784 -0.59%
BenchmarkPostingsForMatchers/Head/n="1",i=~"1.+",j="foo"-4 103394659 102902098 -0.48%
BenchmarkPostingsForMatchers/Head/n="1",i=~".+",i!="2",j="foo"-4 482552781 475453903 -1.47%
BenchmarkPostingsForMatchers/Head/n="1",i=~".+",i!~"2.*",j="foo"-4 559257389 589297047 +5.37%
BenchmarkPostingsForMatchers/Block/n="1"-4 36492 37012 +1.42%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 557788 611903 +9.70%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 554443 573814 +3.49%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 553227 553826 +0.11%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 113855090 111707221 -1.89%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 133994674 136520728 +1.89%
BenchmarkPostingsForMatchers/Block/i=~""-4 38138091 36299898 -4.82%
BenchmarkPostingsForMatchers/Block/i!=""-4 28861213 27396723 -5.07%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 112699941 110853868 -1.64%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 113198026 111389742 -1.60%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 28994069 27363804 -5.62%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 29709406 28589223 -3.77%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 134695119 135736971 +0.77%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 26783286 25826928 -3.57%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 134733254 134116739 -0.46%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 160713937 158802768 -1.19%
benchmark old allocs new allocs delta
BenchmarkPostingsForMatchers/Head/n="1"-4 36 36 +0.00%
BenchmarkPostingsForMatchers/Head/n="1",j="foo"-4 38 38 +0.00%
BenchmarkPostingsForMatchers/Head/j="foo",n="1"-4 38 38 +0.00%
BenchmarkPostingsForMatchers/Head/n="1",j!="foo"-4 42 40 -4.76%
BenchmarkPostingsForMatchers/Head/i=~".*"-4 61 59 -3.28%
BenchmarkPostingsForMatchers/Head/i=~".+"-4 100088 100087 -0.00%
BenchmarkPostingsForMatchers/Head/i=~""-4 100053 100051 -0.00%
BenchmarkPostingsForMatchers/Head/i!=""-4 100087 100085 -0.00%
BenchmarkPostingsForMatchers/Head/n="1",i=~".*",j="foo"-4 44 42 -4.55%
BenchmarkPostingsForMatchers/Head/n="1",i=~".*",i!="2",j="foo"-4 50 48 -4.00%
BenchmarkPostingsForMatchers/Head/n="1",i!=""-4 100076 100074 -0.00%
BenchmarkPostingsForMatchers/Head/n="1",i!="",j="foo"-4 100077 100075 -0.00%
BenchmarkPostingsForMatchers/Head/n="1",i=~".+",j="foo"-4 100077 100074 -0.00%
BenchmarkPostingsForMatchers/Head/n="1",i=~"1.+",j="foo"-4 11167 11165 -0.02%
BenchmarkPostingsForMatchers/Head/n="1",i=~".+",i!="2",j="foo"-4 100082 100080 -0.00%
BenchmarkPostingsForMatchers/Head/n="1",i=~".+",i!~"2.*",j="foo"-4 111265 111261 -0.00%
BenchmarkPostingsForMatchers/Block/n="1"-4 6 6 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 11 11 +0.00%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 11 11 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 15 13 -13.33%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 12 10 -16.67%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 100040 100038 -0.00%
BenchmarkPostingsForMatchers/Block/i=~""-4 100045 100043 -0.00%
BenchmarkPostingsForMatchers/Block/i!=""-4 100041 100039 -0.00%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 17 15 -11.76%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 23 21 -8.70%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 100046 100044 -0.00%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 100050 100048 -0.00%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 100049 100047 -0.00%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 11150 11148 -0.02%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 100055 100053 -0.00%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 111238 111234 -0.00%
benchmark old bytes new bytes delta
BenchmarkPostingsForMatchers/Head/n="1"-4 10887816 10887817 +0.00%
BenchmarkPostingsForMatchers/Head/n="1",j="foo"-4 5456648 5456648 +0.00%
BenchmarkPostingsForMatchers/Head/j="foo",n="1"-4 5456648 5456648 +0.00%
BenchmarkPostingsForMatchers/Head/n="1",j!="foo"-4 5456792 5456712 -0.00%
BenchmarkPostingsForMatchers/Head/i=~".*"-4 258254408 258254328 -0.00%
BenchmarkPostingsForMatchers/Head/i=~".+"-4 273912888 273912904 +0.00%
BenchmarkPostingsForMatchers/Head/i=~""-4 17266680 17266600 -0.00%
BenchmarkPostingsForMatchers/Head/i!=""-4 273912416 273912336 -0.00%
BenchmarkPostingsForMatchers/Head/n="1",i=~".*",j="foo"-4 7062578 7062498 -0.00%
BenchmarkPostingsForMatchers/Head/n="1",i=~".*",i!="2",j="foo"-4 7062770 7062690 -0.00%
BenchmarkPostingsForMatchers/Head/n="1",i!=""-4 28152346 28152266 -0.00%
BenchmarkPostingsForMatchers/Head/n="1",i!="",j="foo"-4 22721178 22721098 -0.00%
BenchmarkPostingsForMatchers/Head/n="1",i=~".+",j="foo"-4 22721336 22721224 -0.00%
BenchmarkPostingsForMatchers/Head/n="1",i=~"1.+",j="foo"-4 3623804 3623733 -0.00%
BenchmarkPostingsForMatchers/Head/n="1",i=~".+",i!="2",j="foo"-4 22721480 22721400 -0.00%
BenchmarkPostingsForMatchers/Head/n="1",i=~".+",i!~"2.*",j="foo"-4 24816652 24816444 -0.00%
BenchmarkPostingsForMatchers/Block/n="1"-4 296 296 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 424 424 +0.00%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 424 424 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 1544 1464 -5.18%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 1606114 1606045 -0.00%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 17264709 17264629 -0.00%
BenchmarkPostingsForMatchers/Block/i=~""-4 17264780 17264696 -0.00%
BenchmarkPostingsForMatchers/Block/i!=""-4 17264680 17264600 -0.00%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 1606253 1606165 -0.01%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 1606445 1606348 -0.01%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 17264808 17264728 -0.00%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 17264936 17264856 -0.00%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 17264965 17264885 -0.00%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 3148262 3148182 -0.00%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 17265141 17265061 -0.00%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 20416944 20416784 -0.00%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2020-01-01 03:38:01 -08:00
|
|
|
return nil, nil
|
2017-03-07 03:47:49 -08:00
|
|
|
}
|
Load only some offsets into the symbol table into memory.
Rather than keeping the entire symbol table in memory, keep every nth
offset and walk from there to the entry we need. This ends up slightly
slower, ~360ms per 1M series returned from PostingsForMatchers which is
not much considering the rest of the CPU such a query would go on to
use.
Make LabelValues use the postings tables, rather than having
to do symbol lookups. Use yoloString, as PostingsForMatchers
doesn't need the strings to stick around and adjust the API
call to keep the Querier open until it's all marshalled.
Remove allocatedSymbols memory optimisation, we no longer keep all the
symbol strings in heap memory. Remove LabelValuesFor and LabelIndices,
they're dead code. Ensure we've still tests for label indices,
and add missing test that we can work with old V1 Format index files.
PostingForMatchers performance is slightly better, with a big drop in
allocation counts due to using yoloString for LabelValues:
benchmark old ns/op new ns/op delta
BenchmarkPostingsForMatchers/Block/n="1"-4 36698 36681 -0.05%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 522786 560887 +7.29%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 511652 537680 +5.09%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 522102 564239 +8.07%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 113689911 111795919 -1.67%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 135825572 132871085 -2.18%
BenchmarkPostingsForMatchers/Block/i=~""-4 40782628 38038181 -6.73%
BenchmarkPostingsForMatchers/Block/i!=""-4 31267869 29194327 -6.63%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 112733329 111568823 -1.03%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 112868153 111232029 -1.45%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 31338257 29349446 -6.35%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 32054482 29972436 -6.50%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 136504654 133968442 -1.86%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 27960350 27264997 -2.49%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 136765564 133860724 -2.12%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 163714583 159453668 -2.60%
benchmark old allocs new allocs delta
BenchmarkPostingsForMatchers/Block/n="1"-4 6 6 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 11 11 +0.00%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 11 11 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 17 15 -11.76%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 100012 12 -99.99%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 200040 100040 -49.99%
BenchmarkPostingsForMatchers/Block/i=~""-4 200045 100045 -49.99%
BenchmarkPostingsForMatchers/Block/i!=""-4 200041 100041 -49.99%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 100017 17 -99.98%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 100023 23 -99.98%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 200046 100046 -49.99%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 200050 100050 -49.99%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 200049 100049 -49.99%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 111150 11150 -89.97%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 200055 100055 -49.99%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 311238 111238 -64.26%
benchmark old bytes new bytes delta
BenchmarkPostingsForMatchers/Block/n="1"-4 296 296 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 424 424 +0.00%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 424 424 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 552 1544 +179.71%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 1600482 1606125 +0.35%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 17259065 17264709 +0.03%
BenchmarkPostingsForMatchers/Block/i=~""-4 17259150 17264780 +0.03%
BenchmarkPostingsForMatchers/Block/i!=""-4 17259048 17264680 +0.03%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 1600610 1606242 +0.35%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 1600813 1606434 +0.35%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 17259176 17264808 +0.03%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 17259304 17264936 +0.03%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 17259333 17264965 +0.03%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 3142628 3148262 +0.18%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 17259509 17265141 +0.03%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 20405680 20416944 +0.06%
However overall Select performance is down and involves more allocs, due to
having to do more than a simple map lookup to resolve a symbol and that all the strings
returned are allocated:
benchmark old ns/op new ns/op delta
BenchmarkQuerierSelect/Block/1of1000000-4 506092636 862678244 +70.46%
BenchmarkQuerierSelect/Block/10of1000000-4 505638968 860917636 +70.26%
BenchmarkQuerierSelect/Block/100of1000000-4 505229450 882150048 +74.60%
BenchmarkQuerierSelect/Block/1000of1000000-4 515905414 862241115 +67.13%
BenchmarkQuerierSelect/Block/10000of1000000-4 516785354 874841110 +69.29%
BenchmarkQuerierSelect/Block/100000of1000000-4 540742808 907030187 +67.74%
BenchmarkQuerierSelect/Block/1000000of1000000-4 815224288 1181236903 +44.90%
benchmark old allocs new allocs delta
BenchmarkQuerierSelect/Block/1of1000000-4 4000020 6000020 +50.00%
BenchmarkQuerierSelect/Block/10of1000000-4 4000038 6000038 +50.00%
BenchmarkQuerierSelect/Block/100of1000000-4 4000218 6000218 +50.00%
BenchmarkQuerierSelect/Block/1000of1000000-4 4002018 6002018 +49.97%
BenchmarkQuerierSelect/Block/10000of1000000-4 4020018 6020018 +49.75%
BenchmarkQuerierSelect/Block/100000of1000000-4 4200018 6200018 +47.62%
BenchmarkQuerierSelect/Block/1000000of1000000-4 6000018 8000019 +33.33%
benchmark old bytes new bytes delta
BenchmarkQuerierSelect/Block/1of1000000-4 176001468 227201476 +29.09%
BenchmarkQuerierSelect/Block/10of1000000-4 176002620 227202628 +29.09%
BenchmarkQuerierSelect/Block/100of1000000-4 176014140 227214148 +29.09%
BenchmarkQuerierSelect/Block/1000of1000000-4 176129340 227329348 +29.07%
BenchmarkQuerierSelect/Block/10000of1000000-4 177281340 228481348 +28.88%
BenchmarkQuerierSelect/Block/100000of1000000-4 188801340 240001348 +27.12%
BenchmarkQuerierSelect/Block/1000000of1000000-4 304001340 355201616 +16.84%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-12-12 08:55:32 -08:00
|
|
|
if len(e) == 0 {
|
Replace StringTuples with []string
Benchmarks show slight cpu/allocs improvements.
benchmark old ns/op new ns/op delta
BenchmarkPostingsForMatchers/Head/n="1"-4 269978625 235305110 -12.84%
BenchmarkPostingsForMatchers/Head/n="1",j="foo"-4 129739974 121646193 -6.24%
BenchmarkPostingsForMatchers/Head/j="foo",n="1"-4 123826274 122056253 -1.43%
BenchmarkPostingsForMatchers/Head/n="1",j!="foo"-4 126962188 130038235 +2.42%
BenchmarkPostingsForMatchers/Head/i=~".*"-4 6423653989 5991126455 -6.73%
BenchmarkPostingsForMatchers/Head/i=~".+"-4 6934647521 7033370634 +1.42%
BenchmarkPostingsForMatchers/Head/i=~""-4 1177781285 1121497736 -4.78%
BenchmarkPostingsForMatchers/Head/i!=""-4 7033680256 7246094991 +3.02%
BenchmarkPostingsForMatchers/Head/n="1",i=~".*",j="foo"-4 293702332 287440212 -2.13%
BenchmarkPostingsForMatchers/Head/n="1",i=~".*",i!="2",j="foo"-4 307628268 307039964 -0.19%
BenchmarkPostingsForMatchers/Head/n="1",i!=""-4 512247746 480003862 -6.29%
BenchmarkPostingsForMatchers/Head/n="1",i!="",j="foo"-4 361199794 367066917 +1.62%
BenchmarkPostingsForMatchers/Head/n="1",i=~".+",j="foo"-4 478863761 476037784 -0.59%
BenchmarkPostingsForMatchers/Head/n="1",i=~"1.+",j="foo"-4 103394659 102902098 -0.48%
BenchmarkPostingsForMatchers/Head/n="1",i=~".+",i!="2",j="foo"-4 482552781 475453903 -1.47%
BenchmarkPostingsForMatchers/Head/n="1",i=~".+",i!~"2.*",j="foo"-4 559257389 589297047 +5.37%
BenchmarkPostingsForMatchers/Block/n="1"-4 36492 37012 +1.42%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 557788 611903 +9.70%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 554443 573814 +3.49%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 553227 553826 +0.11%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 113855090 111707221 -1.89%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 133994674 136520728 +1.89%
BenchmarkPostingsForMatchers/Block/i=~""-4 38138091 36299898 -4.82%
BenchmarkPostingsForMatchers/Block/i!=""-4 28861213 27396723 -5.07%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 112699941 110853868 -1.64%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 113198026 111389742 -1.60%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 28994069 27363804 -5.62%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 29709406 28589223 -3.77%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 134695119 135736971 +0.77%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 26783286 25826928 -3.57%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 134733254 134116739 -0.46%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 160713937 158802768 -1.19%
benchmark old allocs new allocs delta
BenchmarkPostingsForMatchers/Head/n="1"-4 36 36 +0.00%
BenchmarkPostingsForMatchers/Head/n="1",j="foo"-4 38 38 +0.00%
BenchmarkPostingsForMatchers/Head/j="foo",n="1"-4 38 38 +0.00%
BenchmarkPostingsForMatchers/Head/n="1",j!="foo"-4 42 40 -4.76%
BenchmarkPostingsForMatchers/Head/i=~".*"-4 61 59 -3.28%
BenchmarkPostingsForMatchers/Head/i=~".+"-4 100088 100087 -0.00%
BenchmarkPostingsForMatchers/Head/i=~""-4 100053 100051 -0.00%
BenchmarkPostingsForMatchers/Head/i!=""-4 100087 100085 -0.00%
BenchmarkPostingsForMatchers/Head/n="1",i=~".*",j="foo"-4 44 42 -4.55%
BenchmarkPostingsForMatchers/Head/n="1",i=~".*",i!="2",j="foo"-4 50 48 -4.00%
BenchmarkPostingsForMatchers/Head/n="1",i!=""-4 100076 100074 -0.00%
BenchmarkPostingsForMatchers/Head/n="1",i!="",j="foo"-4 100077 100075 -0.00%
BenchmarkPostingsForMatchers/Head/n="1",i=~".+",j="foo"-4 100077 100074 -0.00%
BenchmarkPostingsForMatchers/Head/n="1",i=~"1.+",j="foo"-4 11167 11165 -0.02%
BenchmarkPostingsForMatchers/Head/n="1",i=~".+",i!="2",j="foo"-4 100082 100080 -0.00%
BenchmarkPostingsForMatchers/Head/n="1",i=~".+",i!~"2.*",j="foo"-4 111265 111261 -0.00%
BenchmarkPostingsForMatchers/Block/n="1"-4 6 6 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 11 11 +0.00%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 11 11 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 15 13 -13.33%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 12 10 -16.67%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 100040 100038 -0.00%
BenchmarkPostingsForMatchers/Block/i=~""-4 100045 100043 -0.00%
BenchmarkPostingsForMatchers/Block/i!=""-4 100041 100039 -0.00%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 17 15 -11.76%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 23 21 -8.70%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 100046 100044 -0.00%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 100050 100048 -0.00%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 100049 100047 -0.00%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 11150 11148 -0.02%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 100055 100053 -0.00%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 111238 111234 -0.00%
benchmark old bytes new bytes delta
BenchmarkPostingsForMatchers/Head/n="1"-4 10887816 10887817 +0.00%
BenchmarkPostingsForMatchers/Head/n="1",j="foo"-4 5456648 5456648 +0.00%
BenchmarkPostingsForMatchers/Head/j="foo",n="1"-4 5456648 5456648 +0.00%
BenchmarkPostingsForMatchers/Head/n="1",j!="foo"-4 5456792 5456712 -0.00%
BenchmarkPostingsForMatchers/Head/i=~".*"-4 258254408 258254328 -0.00%
BenchmarkPostingsForMatchers/Head/i=~".+"-4 273912888 273912904 +0.00%
BenchmarkPostingsForMatchers/Head/i=~""-4 17266680 17266600 -0.00%
BenchmarkPostingsForMatchers/Head/i!=""-4 273912416 273912336 -0.00%
BenchmarkPostingsForMatchers/Head/n="1",i=~".*",j="foo"-4 7062578 7062498 -0.00%
BenchmarkPostingsForMatchers/Head/n="1",i=~".*",i!="2",j="foo"-4 7062770 7062690 -0.00%
BenchmarkPostingsForMatchers/Head/n="1",i!=""-4 28152346 28152266 -0.00%
BenchmarkPostingsForMatchers/Head/n="1",i!="",j="foo"-4 22721178 22721098 -0.00%
BenchmarkPostingsForMatchers/Head/n="1",i=~".+",j="foo"-4 22721336 22721224 -0.00%
BenchmarkPostingsForMatchers/Head/n="1",i=~"1.+",j="foo"-4 3623804 3623733 -0.00%
BenchmarkPostingsForMatchers/Head/n="1",i=~".+",i!="2",j="foo"-4 22721480 22721400 -0.00%
BenchmarkPostingsForMatchers/Head/n="1",i=~".+",i!~"2.*",j="foo"-4 24816652 24816444 -0.00%
BenchmarkPostingsForMatchers/Block/n="1"-4 296 296 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 424 424 +0.00%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 424 424 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 1544 1464 -5.18%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 1606114 1606045 -0.00%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 17264709 17264629 -0.00%
BenchmarkPostingsForMatchers/Block/i=~""-4 17264780 17264696 -0.00%
BenchmarkPostingsForMatchers/Block/i!=""-4 17264680 17264600 -0.00%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 1606253 1606165 -0.01%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 1606445 1606348 -0.01%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 17264808 17264728 -0.00%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 17264936 17264856 -0.00%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 17264965 17264885 -0.00%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 3148262 3148182 -0.00%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 17265141 17265061 -0.00%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 20416944 20416784 -0.00%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2020-01-01 03:38:01 -08:00
|
|
|
return nil, nil
|
Load only some offsets into the symbol table into memory.
Rather than keeping the entire symbol table in memory, keep every nth
offset and walk from there to the entry we need. This ends up slightly
slower, ~360ms per 1M series returned from PostingsForMatchers which is
not much considering the rest of the CPU such a query would go on to
use.
Make LabelValues use the postings tables, rather than having
to do symbol lookups. Use yoloString, as PostingsForMatchers
doesn't need the strings to stick around and adjust the API
call to keep the Querier open until it's all marshalled.
Remove allocatedSymbols memory optimisation, we no longer keep all the
symbol strings in heap memory. Remove LabelValuesFor and LabelIndices,
they're dead code. Ensure we've still tests for label indices,
and add missing test that we can work with old V1 Format index files.
PostingForMatchers performance is slightly better, with a big drop in
allocation counts due to using yoloString for LabelValues:
benchmark old ns/op new ns/op delta
BenchmarkPostingsForMatchers/Block/n="1"-4 36698 36681 -0.05%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 522786 560887 +7.29%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 511652 537680 +5.09%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 522102 564239 +8.07%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 113689911 111795919 -1.67%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 135825572 132871085 -2.18%
BenchmarkPostingsForMatchers/Block/i=~""-4 40782628 38038181 -6.73%
BenchmarkPostingsForMatchers/Block/i!=""-4 31267869 29194327 -6.63%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 112733329 111568823 -1.03%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 112868153 111232029 -1.45%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 31338257 29349446 -6.35%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 32054482 29972436 -6.50%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 136504654 133968442 -1.86%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 27960350 27264997 -2.49%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 136765564 133860724 -2.12%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 163714583 159453668 -2.60%
benchmark old allocs new allocs delta
BenchmarkPostingsForMatchers/Block/n="1"-4 6 6 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 11 11 +0.00%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 11 11 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 17 15 -11.76%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 100012 12 -99.99%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 200040 100040 -49.99%
BenchmarkPostingsForMatchers/Block/i=~""-4 200045 100045 -49.99%
BenchmarkPostingsForMatchers/Block/i!=""-4 200041 100041 -49.99%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 100017 17 -99.98%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 100023 23 -99.98%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 200046 100046 -49.99%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 200050 100050 -49.99%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 200049 100049 -49.99%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 111150 11150 -89.97%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 200055 100055 -49.99%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 311238 111238 -64.26%
benchmark old bytes new bytes delta
BenchmarkPostingsForMatchers/Block/n="1"-4 296 296 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 424 424 +0.00%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 424 424 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 552 1544 +179.71%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 1600482 1606125 +0.35%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 17259065 17264709 +0.03%
BenchmarkPostingsForMatchers/Block/i=~""-4 17259150 17264780 +0.03%
BenchmarkPostingsForMatchers/Block/i!=""-4 17259048 17264680 +0.03%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 1600610 1606242 +0.35%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 1600813 1606434 +0.35%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 17259176 17264808 +0.03%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 17259304 17264936 +0.03%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 17259333 17264965 +0.03%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 3142628 3148262 +0.18%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 17259509 17265141 +0.03%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 20405680 20416944 +0.06%
However overall Select performance is down and involves more allocs, due to
having to do more than a simple map lookup to resolve a symbol and that all the strings
returned are allocated:
benchmark old ns/op new ns/op delta
BenchmarkQuerierSelect/Block/1of1000000-4 506092636 862678244 +70.46%
BenchmarkQuerierSelect/Block/10of1000000-4 505638968 860917636 +70.26%
BenchmarkQuerierSelect/Block/100of1000000-4 505229450 882150048 +74.60%
BenchmarkQuerierSelect/Block/1000of1000000-4 515905414 862241115 +67.13%
BenchmarkQuerierSelect/Block/10000of1000000-4 516785354 874841110 +69.29%
BenchmarkQuerierSelect/Block/100000of1000000-4 540742808 907030187 +67.74%
BenchmarkQuerierSelect/Block/1000000of1000000-4 815224288 1181236903 +44.90%
benchmark old allocs new allocs delta
BenchmarkQuerierSelect/Block/1of1000000-4 4000020 6000020 +50.00%
BenchmarkQuerierSelect/Block/10of1000000-4 4000038 6000038 +50.00%
BenchmarkQuerierSelect/Block/100of1000000-4 4000218 6000218 +50.00%
BenchmarkQuerierSelect/Block/1000of1000000-4 4002018 6002018 +49.97%
BenchmarkQuerierSelect/Block/10000of1000000-4 4020018 6020018 +49.75%
BenchmarkQuerierSelect/Block/100000of1000000-4 4200018 6200018 +47.62%
BenchmarkQuerierSelect/Block/1000000of1000000-4 6000018 8000019 +33.33%
benchmark old bytes new bytes delta
BenchmarkQuerierSelect/Block/1of1000000-4 176001468 227201476 +29.09%
BenchmarkQuerierSelect/Block/10of1000000-4 176002620 227202628 +29.09%
BenchmarkQuerierSelect/Block/100of1000000-4 176014140 227214148 +29.09%
BenchmarkQuerierSelect/Block/1000of1000000-4 176129340 227329348 +29.07%
BenchmarkQuerierSelect/Block/10000of1000000-4 177281340 228481348 +28.88%
BenchmarkQuerierSelect/Block/100000of1000000-4 188801340 240001348 +27.12%
BenchmarkQuerierSelect/Block/1000000of1000000-4 304001340 355201616 +16.84%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-12-12 08:55:32 -08:00
|
|
|
}
|
|
|
|
values := make([]string, 0, len(e)*symbolFactor)
|
2017-03-07 03:47:49 -08:00
|
|
|
|
Load only some offsets into the symbol table into memory.
Rather than keeping the entire symbol table in memory, keep every nth
offset and walk from there to the entry we need. This ends up slightly
slower, ~360ms per 1M series returned from PostingsForMatchers which is
not much considering the rest of the CPU such a query would go on to
use.
Make LabelValues use the postings tables, rather than having
to do symbol lookups. Use yoloString, as PostingsForMatchers
doesn't need the strings to stick around and adjust the API
call to keep the Querier open until it's all marshalled.
Remove allocatedSymbols memory optimisation, we no longer keep all the
symbol strings in heap memory. Remove LabelValuesFor and LabelIndices,
they're dead code. Ensure we've still tests for label indices,
and add missing test that we can work with old V1 Format index files.
PostingForMatchers performance is slightly better, with a big drop in
allocation counts due to using yoloString for LabelValues:
benchmark old ns/op new ns/op delta
BenchmarkPostingsForMatchers/Block/n="1"-4 36698 36681 -0.05%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 522786 560887 +7.29%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 511652 537680 +5.09%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 522102 564239 +8.07%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 113689911 111795919 -1.67%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 135825572 132871085 -2.18%
BenchmarkPostingsForMatchers/Block/i=~""-4 40782628 38038181 -6.73%
BenchmarkPostingsForMatchers/Block/i!=""-4 31267869 29194327 -6.63%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 112733329 111568823 -1.03%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 112868153 111232029 -1.45%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 31338257 29349446 -6.35%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 32054482 29972436 -6.50%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 136504654 133968442 -1.86%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 27960350 27264997 -2.49%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 136765564 133860724 -2.12%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 163714583 159453668 -2.60%
benchmark old allocs new allocs delta
BenchmarkPostingsForMatchers/Block/n="1"-4 6 6 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 11 11 +0.00%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 11 11 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 17 15 -11.76%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 100012 12 -99.99%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 200040 100040 -49.99%
BenchmarkPostingsForMatchers/Block/i=~""-4 200045 100045 -49.99%
BenchmarkPostingsForMatchers/Block/i!=""-4 200041 100041 -49.99%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 100017 17 -99.98%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 100023 23 -99.98%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 200046 100046 -49.99%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 200050 100050 -49.99%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 200049 100049 -49.99%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 111150 11150 -89.97%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 200055 100055 -49.99%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 311238 111238 -64.26%
benchmark old bytes new bytes delta
BenchmarkPostingsForMatchers/Block/n="1"-4 296 296 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 424 424 +0.00%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 424 424 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 552 1544 +179.71%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 1600482 1606125 +0.35%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 17259065 17264709 +0.03%
BenchmarkPostingsForMatchers/Block/i=~""-4 17259150 17264780 +0.03%
BenchmarkPostingsForMatchers/Block/i!=""-4 17259048 17264680 +0.03%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 1600610 1606242 +0.35%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 1600813 1606434 +0.35%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 17259176 17264808 +0.03%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 17259304 17264936 +0.03%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 17259333 17264965 +0.03%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 3142628 3148262 +0.18%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 17259509 17265141 +0.03%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 20405680 20416944 +0.06%
However overall Select performance is down and involves more allocs, due to
having to do more than a simple map lookup to resolve a symbol and that all the strings
returned are allocated:
benchmark old ns/op new ns/op delta
BenchmarkQuerierSelect/Block/1of1000000-4 506092636 862678244 +70.46%
BenchmarkQuerierSelect/Block/10of1000000-4 505638968 860917636 +70.26%
BenchmarkQuerierSelect/Block/100of1000000-4 505229450 882150048 +74.60%
BenchmarkQuerierSelect/Block/1000of1000000-4 515905414 862241115 +67.13%
BenchmarkQuerierSelect/Block/10000of1000000-4 516785354 874841110 +69.29%
BenchmarkQuerierSelect/Block/100000of1000000-4 540742808 907030187 +67.74%
BenchmarkQuerierSelect/Block/1000000of1000000-4 815224288 1181236903 +44.90%
benchmark old allocs new allocs delta
BenchmarkQuerierSelect/Block/1of1000000-4 4000020 6000020 +50.00%
BenchmarkQuerierSelect/Block/10of1000000-4 4000038 6000038 +50.00%
BenchmarkQuerierSelect/Block/100of1000000-4 4000218 6000218 +50.00%
BenchmarkQuerierSelect/Block/1000of1000000-4 4002018 6002018 +49.97%
BenchmarkQuerierSelect/Block/10000of1000000-4 4020018 6020018 +49.75%
BenchmarkQuerierSelect/Block/100000of1000000-4 4200018 6200018 +47.62%
BenchmarkQuerierSelect/Block/1000000of1000000-4 6000018 8000019 +33.33%
benchmark old bytes new bytes delta
BenchmarkQuerierSelect/Block/1of1000000-4 176001468 227201476 +29.09%
BenchmarkQuerierSelect/Block/10of1000000-4 176002620 227202628 +29.09%
BenchmarkQuerierSelect/Block/100of1000000-4 176014140 227214148 +29.09%
BenchmarkQuerierSelect/Block/1000of1000000-4 176129340 227329348 +29.07%
BenchmarkQuerierSelect/Block/10000of1000000-4 177281340 228481348 +28.88%
BenchmarkQuerierSelect/Block/100000of1000000-4 188801340 240001348 +27.12%
BenchmarkQuerierSelect/Block/1000000of1000000-4 304001340 355201616 +16.84%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-12-12 08:55:32 -08:00
|
|
|
d := encoding.NewDecbufAt(r.b, int(r.toc.PostingsTable), nil)
|
|
|
|
d.Skip(e[0].off)
|
|
|
|
lastVal := e[len(e)-1].value
|
2017-03-07 03:47:49 -08:00
|
|
|
|
Load only some offsets into the symbol table into memory.
Rather than keeping the entire symbol table in memory, keep every nth
offset and walk from there to the entry we need. This ends up slightly
slower, ~360ms per 1M series returned from PostingsForMatchers which is
not much considering the rest of the CPU such a query would go on to
use.
Make LabelValues use the postings tables, rather than having
to do symbol lookups. Use yoloString, as PostingsForMatchers
doesn't need the strings to stick around and adjust the API
call to keep the Querier open until it's all marshalled.
Remove allocatedSymbols memory optimisation, we no longer keep all the
symbol strings in heap memory. Remove LabelValuesFor and LabelIndices,
they're dead code. Ensure we've still tests for label indices,
and add missing test that we can work with old V1 Format index files.
PostingForMatchers performance is slightly better, with a big drop in
allocation counts due to using yoloString for LabelValues:
benchmark old ns/op new ns/op delta
BenchmarkPostingsForMatchers/Block/n="1"-4 36698 36681 -0.05%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 522786 560887 +7.29%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 511652 537680 +5.09%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 522102 564239 +8.07%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 113689911 111795919 -1.67%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 135825572 132871085 -2.18%
BenchmarkPostingsForMatchers/Block/i=~""-4 40782628 38038181 -6.73%
BenchmarkPostingsForMatchers/Block/i!=""-4 31267869 29194327 -6.63%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 112733329 111568823 -1.03%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 112868153 111232029 -1.45%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 31338257 29349446 -6.35%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 32054482 29972436 -6.50%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 136504654 133968442 -1.86%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 27960350 27264997 -2.49%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 136765564 133860724 -2.12%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 163714583 159453668 -2.60%
benchmark old allocs new allocs delta
BenchmarkPostingsForMatchers/Block/n="1"-4 6 6 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 11 11 +0.00%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 11 11 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 17 15 -11.76%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 100012 12 -99.99%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 200040 100040 -49.99%
BenchmarkPostingsForMatchers/Block/i=~""-4 200045 100045 -49.99%
BenchmarkPostingsForMatchers/Block/i!=""-4 200041 100041 -49.99%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 100017 17 -99.98%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 100023 23 -99.98%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 200046 100046 -49.99%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 200050 100050 -49.99%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 200049 100049 -49.99%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 111150 11150 -89.97%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 200055 100055 -49.99%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 311238 111238 -64.26%
benchmark old bytes new bytes delta
BenchmarkPostingsForMatchers/Block/n="1"-4 296 296 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 424 424 +0.00%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 424 424 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 552 1544 +179.71%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 1600482 1606125 +0.35%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 17259065 17264709 +0.03%
BenchmarkPostingsForMatchers/Block/i=~""-4 17259150 17264780 +0.03%
BenchmarkPostingsForMatchers/Block/i!=""-4 17259048 17264680 +0.03%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 1600610 1606242 +0.35%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 1600813 1606434 +0.35%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 17259176 17264808 +0.03%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 17259304 17264936 +0.03%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 17259333 17264965 +0.03%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 3142628 3148262 +0.18%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 17259509 17265141 +0.03%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 20405680 20416944 +0.06%
However overall Select performance is down and involves more allocs, due to
having to do more than a simple map lookup to resolve a symbol and that all the strings
returned are allocated:
benchmark old ns/op new ns/op delta
BenchmarkQuerierSelect/Block/1of1000000-4 506092636 862678244 +70.46%
BenchmarkQuerierSelect/Block/10of1000000-4 505638968 860917636 +70.26%
BenchmarkQuerierSelect/Block/100of1000000-4 505229450 882150048 +74.60%
BenchmarkQuerierSelect/Block/1000of1000000-4 515905414 862241115 +67.13%
BenchmarkQuerierSelect/Block/10000of1000000-4 516785354 874841110 +69.29%
BenchmarkQuerierSelect/Block/100000of1000000-4 540742808 907030187 +67.74%
BenchmarkQuerierSelect/Block/1000000of1000000-4 815224288 1181236903 +44.90%
benchmark old allocs new allocs delta
BenchmarkQuerierSelect/Block/1of1000000-4 4000020 6000020 +50.00%
BenchmarkQuerierSelect/Block/10of1000000-4 4000038 6000038 +50.00%
BenchmarkQuerierSelect/Block/100of1000000-4 4000218 6000218 +50.00%
BenchmarkQuerierSelect/Block/1000of1000000-4 4002018 6002018 +49.97%
BenchmarkQuerierSelect/Block/10000of1000000-4 4020018 6020018 +49.75%
BenchmarkQuerierSelect/Block/100000of1000000-4 4200018 6200018 +47.62%
BenchmarkQuerierSelect/Block/1000000of1000000-4 6000018 8000019 +33.33%
benchmark old bytes new bytes delta
BenchmarkQuerierSelect/Block/1of1000000-4 176001468 227201476 +29.09%
BenchmarkQuerierSelect/Block/10of1000000-4 176002620 227202628 +29.09%
BenchmarkQuerierSelect/Block/100of1000000-4 176014140 227214148 +29.09%
BenchmarkQuerierSelect/Block/1000of1000000-4 176129340 227329348 +29.07%
BenchmarkQuerierSelect/Block/10000of1000000-4 177281340 228481348 +28.88%
BenchmarkQuerierSelect/Block/100000of1000000-4 188801340 240001348 +27.12%
BenchmarkQuerierSelect/Block/1000000of1000000-4 304001340 355201616 +16.84%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-12-12 08:55:32 -08:00
|
|
|
skip := 0
|
|
|
|
for d.Err() == nil {
|
|
|
|
if skip == 0 {
|
|
|
|
// These are always the same number of bytes,
|
|
|
|
// and it's faster to skip than parse.
|
|
|
|
skip = d.Len()
|
|
|
|
d.Uvarint() // Keycount.
|
|
|
|
d.UvarintBytes() // Label name.
|
|
|
|
skip -= d.Len()
|
|
|
|
} else {
|
|
|
|
d.Skip(skip)
|
|
|
|
}
|
2021-10-22 01:06:44 -07:00
|
|
|
s := yoloString(d.UvarintBytes()) // Label value.
|
Load only some offsets into the symbol table into memory.
Rather than keeping the entire symbol table in memory, keep every nth
offset and walk from there to the entry we need. This ends up slightly
slower, ~360ms per 1M series returned from PostingsForMatchers which is
not much considering the rest of the CPU such a query would go on to
use.
Make LabelValues use the postings tables, rather than having
to do symbol lookups. Use yoloString, as PostingsForMatchers
doesn't need the strings to stick around and adjust the API
call to keep the Querier open until it's all marshalled.
Remove allocatedSymbols memory optimisation, we no longer keep all the
symbol strings in heap memory. Remove LabelValuesFor and LabelIndices,
they're dead code. Ensure we've still tests for label indices,
and add missing test that we can work with old V1 Format index files.
PostingForMatchers performance is slightly better, with a big drop in
allocation counts due to using yoloString for LabelValues:
benchmark old ns/op new ns/op delta
BenchmarkPostingsForMatchers/Block/n="1"-4 36698 36681 -0.05%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 522786 560887 +7.29%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 511652 537680 +5.09%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 522102 564239 +8.07%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 113689911 111795919 -1.67%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 135825572 132871085 -2.18%
BenchmarkPostingsForMatchers/Block/i=~""-4 40782628 38038181 -6.73%
BenchmarkPostingsForMatchers/Block/i!=""-4 31267869 29194327 -6.63%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 112733329 111568823 -1.03%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 112868153 111232029 -1.45%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 31338257 29349446 -6.35%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 32054482 29972436 -6.50%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 136504654 133968442 -1.86%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 27960350 27264997 -2.49%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 136765564 133860724 -2.12%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 163714583 159453668 -2.60%
benchmark old allocs new allocs delta
BenchmarkPostingsForMatchers/Block/n="1"-4 6 6 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 11 11 +0.00%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 11 11 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 17 15 -11.76%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 100012 12 -99.99%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 200040 100040 -49.99%
BenchmarkPostingsForMatchers/Block/i=~""-4 200045 100045 -49.99%
BenchmarkPostingsForMatchers/Block/i!=""-4 200041 100041 -49.99%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 100017 17 -99.98%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 100023 23 -99.98%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 200046 100046 -49.99%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 200050 100050 -49.99%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 200049 100049 -49.99%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 111150 11150 -89.97%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 200055 100055 -49.99%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 311238 111238 -64.26%
benchmark old bytes new bytes delta
BenchmarkPostingsForMatchers/Block/n="1"-4 296 296 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 424 424 +0.00%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 424 424 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 552 1544 +179.71%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 1600482 1606125 +0.35%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 17259065 17264709 +0.03%
BenchmarkPostingsForMatchers/Block/i=~""-4 17259150 17264780 +0.03%
BenchmarkPostingsForMatchers/Block/i!=""-4 17259048 17264680 +0.03%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 1600610 1606242 +0.35%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 1600813 1606434 +0.35%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 17259176 17264808 +0.03%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 17259304 17264936 +0.03%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 17259333 17264965 +0.03%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 3142628 3148262 +0.18%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 17259509 17265141 +0.03%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 20405680 20416944 +0.06%
However overall Select performance is down and involves more allocs, due to
having to do more than a simple map lookup to resolve a symbol and that all the strings
returned are allocated:
benchmark old ns/op new ns/op delta
BenchmarkQuerierSelect/Block/1of1000000-4 506092636 862678244 +70.46%
BenchmarkQuerierSelect/Block/10of1000000-4 505638968 860917636 +70.26%
BenchmarkQuerierSelect/Block/100of1000000-4 505229450 882150048 +74.60%
BenchmarkQuerierSelect/Block/1000of1000000-4 515905414 862241115 +67.13%
BenchmarkQuerierSelect/Block/10000of1000000-4 516785354 874841110 +69.29%
BenchmarkQuerierSelect/Block/100000of1000000-4 540742808 907030187 +67.74%
BenchmarkQuerierSelect/Block/1000000of1000000-4 815224288 1181236903 +44.90%
benchmark old allocs new allocs delta
BenchmarkQuerierSelect/Block/1of1000000-4 4000020 6000020 +50.00%
BenchmarkQuerierSelect/Block/10of1000000-4 4000038 6000038 +50.00%
BenchmarkQuerierSelect/Block/100of1000000-4 4000218 6000218 +50.00%
BenchmarkQuerierSelect/Block/1000of1000000-4 4002018 6002018 +49.97%
BenchmarkQuerierSelect/Block/10000of1000000-4 4020018 6020018 +49.75%
BenchmarkQuerierSelect/Block/100000of1000000-4 4200018 6200018 +47.62%
BenchmarkQuerierSelect/Block/1000000of1000000-4 6000018 8000019 +33.33%
benchmark old bytes new bytes delta
BenchmarkQuerierSelect/Block/1of1000000-4 176001468 227201476 +29.09%
BenchmarkQuerierSelect/Block/10of1000000-4 176002620 227202628 +29.09%
BenchmarkQuerierSelect/Block/100of1000000-4 176014140 227214148 +29.09%
BenchmarkQuerierSelect/Block/1000of1000000-4 176129340 227329348 +29.07%
BenchmarkQuerierSelect/Block/10000of1000000-4 177281340 228481348 +28.88%
BenchmarkQuerierSelect/Block/100000of1000000-4 188801340 240001348 +27.12%
BenchmarkQuerierSelect/Block/1000000of1000000-4 304001340 355201616 +16.84%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-12-12 08:55:32 -08:00
|
|
|
values = append(values, s)
|
|
|
|
if s == lastVal {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
d.Uvarint64() // Offset.
|
2017-10-26 12:34:31 -07:00
|
|
|
}
|
Load only some offsets into the symbol table into memory.
Rather than keeping the entire symbol table in memory, keep every nth
offset and walk from there to the entry we need. This ends up slightly
slower, ~360ms per 1M series returned from PostingsForMatchers which is
not much considering the rest of the CPU such a query would go on to
use.
Make LabelValues use the postings tables, rather than having
to do symbol lookups. Use yoloString, as PostingsForMatchers
doesn't need the strings to stick around and adjust the API
call to keep the Querier open until it's all marshalled.
Remove allocatedSymbols memory optimisation, we no longer keep all the
symbol strings in heap memory. Remove LabelValuesFor and LabelIndices,
they're dead code. Ensure we've still tests for label indices,
and add missing test that we can work with old V1 Format index files.
PostingForMatchers performance is slightly better, with a big drop in
allocation counts due to using yoloString for LabelValues:
benchmark old ns/op new ns/op delta
BenchmarkPostingsForMatchers/Block/n="1"-4 36698 36681 -0.05%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 522786 560887 +7.29%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 511652 537680 +5.09%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 522102 564239 +8.07%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 113689911 111795919 -1.67%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 135825572 132871085 -2.18%
BenchmarkPostingsForMatchers/Block/i=~""-4 40782628 38038181 -6.73%
BenchmarkPostingsForMatchers/Block/i!=""-4 31267869 29194327 -6.63%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 112733329 111568823 -1.03%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 112868153 111232029 -1.45%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 31338257 29349446 -6.35%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 32054482 29972436 -6.50%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 136504654 133968442 -1.86%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 27960350 27264997 -2.49%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 136765564 133860724 -2.12%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 163714583 159453668 -2.60%
benchmark old allocs new allocs delta
BenchmarkPostingsForMatchers/Block/n="1"-4 6 6 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 11 11 +0.00%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 11 11 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 17 15 -11.76%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 100012 12 -99.99%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 200040 100040 -49.99%
BenchmarkPostingsForMatchers/Block/i=~""-4 200045 100045 -49.99%
BenchmarkPostingsForMatchers/Block/i!=""-4 200041 100041 -49.99%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 100017 17 -99.98%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 100023 23 -99.98%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 200046 100046 -49.99%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 200050 100050 -49.99%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 200049 100049 -49.99%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 111150 11150 -89.97%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 200055 100055 -49.99%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 311238 111238 -64.26%
benchmark old bytes new bytes delta
BenchmarkPostingsForMatchers/Block/n="1"-4 296 296 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 424 424 +0.00%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 424 424 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 552 1544 +179.71%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 1600482 1606125 +0.35%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 17259065 17264709 +0.03%
BenchmarkPostingsForMatchers/Block/i=~""-4 17259150 17264780 +0.03%
BenchmarkPostingsForMatchers/Block/i!=""-4 17259048 17264680 +0.03%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 1600610 1606242 +0.35%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 1600813 1606434 +0.35%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 17259176 17264808 +0.03%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 17259304 17264936 +0.03%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 17259333 17264965 +0.03%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 3142628 3148262 +0.18%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 17259509 17265141 +0.03%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 20405680 20416944 +0.06%
However overall Select performance is down and involves more allocs, due to
having to do more than a simple map lookup to resolve a symbol and that all the strings
returned are allocated:
benchmark old ns/op new ns/op delta
BenchmarkQuerierSelect/Block/1of1000000-4 506092636 862678244 +70.46%
BenchmarkQuerierSelect/Block/10of1000000-4 505638968 860917636 +70.26%
BenchmarkQuerierSelect/Block/100of1000000-4 505229450 882150048 +74.60%
BenchmarkQuerierSelect/Block/1000of1000000-4 515905414 862241115 +67.13%
BenchmarkQuerierSelect/Block/10000of1000000-4 516785354 874841110 +69.29%
BenchmarkQuerierSelect/Block/100000of1000000-4 540742808 907030187 +67.74%
BenchmarkQuerierSelect/Block/1000000of1000000-4 815224288 1181236903 +44.90%
benchmark old allocs new allocs delta
BenchmarkQuerierSelect/Block/1of1000000-4 4000020 6000020 +50.00%
BenchmarkQuerierSelect/Block/10of1000000-4 4000038 6000038 +50.00%
BenchmarkQuerierSelect/Block/100of1000000-4 4000218 6000218 +50.00%
BenchmarkQuerierSelect/Block/1000of1000000-4 4002018 6002018 +49.97%
BenchmarkQuerierSelect/Block/10000of1000000-4 4020018 6020018 +49.75%
BenchmarkQuerierSelect/Block/100000of1000000-4 4200018 6200018 +47.62%
BenchmarkQuerierSelect/Block/1000000of1000000-4 6000018 8000019 +33.33%
benchmark old bytes new bytes delta
BenchmarkQuerierSelect/Block/1of1000000-4 176001468 227201476 +29.09%
BenchmarkQuerierSelect/Block/10of1000000-4 176002620 227202628 +29.09%
BenchmarkQuerierSelect/Block/100of1000000-4 176014140 227214148 +29.09%
BenchmarkQuerierSelect/Block/1000of1000000-4 176129340 227329348 +29.07%
BenchmarkQuerierSelect/Block/10000of1000000-4 177281340 228481348 +28.88%
BenchmarkQuerierSelect/Block/100000of1000000-4 188801340 240001348 +27.12%
BenchmarkQuerierSelect/Block/1000000of1000000-4 304001340 355201616 +16.84%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-12-12 08:55:32 -08:00
|
|
|
if d.Err() != nil {
|
|
|
|
return nil, errors.Wrap(d.Err(), "get postings offset entry")
|
2017-03-07 03:47:49 -08:00
|
|
|
}
|
Replace StringTuples with []string
Benchmarks show slight cpu/allocs improvements.
benchmark old ns/op new ns/op delta
BenchmarkPostingsForMatchers/Head/n="1"-4 269978625 235305110 -12.84%
BenchmarkPostingsForMatchers/Head/n="1",j="foo"-4 129739974 121646193 -6.24%
BenchmarkPostingsForMatchers/Head/j="foo",n="1"-4 123826274 122056253 -1.43%
BenchmarkPostingsForMatchers/Head/n="1",j!="foo"-4 126962188 130038235 +2.42%
BenchmarkPostingsForMatchers/Head/i=~".*"-4 6423653989 5991126455 -6.73%
BenchmarkPostingsForMatchers/Head/i=~".+"-4 6934647521 7033370634 +1.42%
BenchmarkPostingsForMatchers/Head/i=~""-4 1177781285 1121497736 -4.78%
BenchmarkPostingsForMatchers/Head/i!=""-4 7033680256 7246094991 +3.02%
BenchmarkPostingsForMatchers/Head/n="1",i=~".*",j="foo"-4 293702332 287440212 -2.13%
BenchmarkPostingsForMatchers/Head/n="1",i=~".*",i!="2",j="foo"-4 307628268 307039964 -0.19%
BenchmarkPostingsForMatchers/Head/n="1",i!=""-4 512247746 480003862 -6.29%
BenchmarkPostingsForMatchers/Head/n="1",i!="",j="foo"-4 361199794 367066917 +1.62%
BenchmarkPostingsForMatchers/Head/n="1",i=~".+",j="foo"-4 478863761 476037784 -0.59%
BenchmarkPostingsForMatchers/Head/n="1",i=~"1.+",j="foo"-4 103394659 102902098 -0.48%
BenchmarkPostingsForMatchers/Head/n="1",i=~".+",i!="2",j="foo"-4 482552781 475453903 -1.47%
BenchmarkPostingsForMatchers/Head/n="1",i=~".+",i!~"2.*",j="foo"-4 559257389 589297047 +5.37%
BenchmarkPostingsForMatchers/Block/n="1"-4 36492 37012 +1.42%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 557788 611903 +9.70%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 554443 573814 +3.49%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 553227 553826 +0.11%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 113855090 111707221 -1.89%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 133994674 136520728 +1.89%
BenchmarkPostingsForMatchers/Block/i=~""-4 38138091 36299898 -4.82%
BenchmarkPostingsForMatchers/Block/i!=""-4 28861213 27396723 -5.07%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 112699941 110853868 -1.64%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 113198026 111389742 -1.60%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 28994069 27363804 -5.62%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 29709406 28589223 -3.77%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 134695119 135736971 +0.77%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 26783286 25826928 -3.57%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 134733254 134116739 -0.46%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 160713937 158802768 -1.19%
benchmark old allocs new allocs delta
BenchmarkPostingsForMatchers/Head/n="1"-4 36 36 +0.00%
BenchmarkPostingsForMatchers/Head/n="1",j="foo"-4 38 38 +0.00%
BenchmarkPostingsForMatchers/Head/j="foo",n="1"-4 38 38 +0.00%
BenchmarkPostingsForMatchers/Head/n="1",j!="foo"-4 42 40 -4.76%
BenchmarkPostingsForMatchers/Head/i=~".*"-4 61 59 -3.28%
BenchmarkPostingsForMatchers/Head/i=~".+"-4 100088 100087 -0.00%
BenchmarkPostingsForMatchers/Head/i=~""-4 100053 100051 -0.00%
BenchmarkPostingsForMatchers/Head/i!=""-4 100087 100085 -0.00%
BenchmarkPostingsForMatchers/Head/n="1",i=~".*",j="foo"-4 44 42 -4.55%
BenchmarkPostingsForMatchers/Head/n="1",i=~".*",i!="2",j="foo"-4 50 48 -4.00%
BenchmarkPostingsForMatchers/Head/n="1",i!=""-4 100076 100074 -0.00%
BenchmarkPostingsForMatchers/Head/n="1",i!="",j="foo"-4 100077 100075 -0.00%
BenchmarkPostingsForMatchers/Head/n="1",i=~".+",j="foo"-4 100077 100074 -0.00%
BenchmarkPostingsForMatchers/Head/n="1",i=~"1.+",j="foo"-4 11167 11165 -0.02%
BenchmarkPostingsForMatchers/Head/n="1",i=~".+",i!="2",j="foo"-4 100082 100080 -0.00%
BenchmarkPostingsForMatchers/Head/n="1",i=~".+",i!~"2.*",j="foo"-4 111265 111261 -0.00%
BenchmarkPostingsForMatchers/Block/n="1"-4 6 6 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 11 11 +0.00%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 11 11 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 15 13 -13.33%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 12 10 -16.67%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 100040 100038 -0.00%
BenchmarkPostingsForMatchers/Block/i=~""-4 100045 100043 -0.00%
BenchmarkPostingsForMatchers/Block/i!=""-4 100041 100039 -0.00%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 17 15 -11.76%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 23 21 -8.70%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 100046 100044 -0.00%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 100050 100048 -0.00%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 100049 100047 -0.00%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 11150 11148 -0.02%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 100055 100053 -0.00%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 111238 111234 -0.00%
benchmark old bytes new bytes delta
BenchmarkPostingsForMatchers/Head/n="1"-4 10887816 10887817 +0.00%
BenchmarkPostingsForMatchers/Head/n="1",j="foo"-4 5456648 5456648 +0.00%
BenchmarkPostingsForMatchers/Head/j="foo",n="1"-4 5456648 5456648 +0.00%
BenchmarkPostingsForMatchers/Head/n="1",j!="foo"-4 5456792 5456712 -0.00%
BenchmarkPostingsForMatchers/Head/i=~".*"-4 258254408 258254328 -0.00%
BenchmarkPostingsForMatchers/Head/i=~".+"-4 273912888 273912904 +0.00%
BenchmarkPostingsForMatchers/Head/i=~""-4 17266680 17266600 -0.00%
BenchmarkPostingsForMatchers/Head/i!=""-4 273912416 273912336 -0.00%
BenchmarkPostingsForMatchers/Head/n="1",i=~".*",j="foo"-4 7062578 7062498 -0.00%
BenchmarkPostingsForMatchers/Head/n="1",i=~".*",i!="2",j="foo"-4 7062770 7062690 -0.00%
BenchmarkPostingsForMatchers/Head/n="1",i!=""-4 28152346 28152266 -0.00%
BenchmarkPostingsForMatchers/Head/n="1",i!="",j="foo"-4 22721178 22721098 -0.00%
BenchmarkPostingsForMatchers/Head/n="1",i=~".+",j="foo"-4 22721336 22721224 -0.00%
BenchmarkPostingsForMatchers/Head/n="1",i=~"1.+",j="foo"-4 3623804 3623733 -0.00%
BenchmarkPostingsForMatchers/Head/n="1",i=~".+",i!="2",j="foo"-4 22721480 22721400 -0.00%
BenchmarkPostingsForMatchers/Head/n="1",i=~".+",i!~"2.*",j="foo"-4 24816652 24816444 -0.00%
BenchmarkPostingsForMatchers/Block/n="1"-4 296 296 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 424 424 +0.00%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 424 424 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 1544 1464 -5.18%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 1606114 1606045 -0.00%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 17264709 17264629 -0.00%
BenchmarkPostingsForMatchers/Block/i=~""-4 17264780 17264696 -0.00%
BenchmarkPostingsForMatchers/Block/i!=""-4 17264680 17264600 -0.00%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 1606253 1606165 -0.01%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 1606445 1606348 -0.01%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 17264808 17264728 -0.00%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 17264936 17264856 -0.00%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 17264965 17264885 -0.00%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 3148262 3148182 -0.00%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 17265141 17265061 -0.00%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 20416944 20416784 -0.00%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2020-01-01 03:38:01 -08:00
|
|
|
return values, nil
|
2017-03-07 03:47:49 -08:00
|
|
|
}
|
|
|
|
|
2021-07-20 05:38:08 -07:00
|
|
|
// LabelNamesFor returns all the label names for the series referred to by IDs.
|
|
|
|
// The names returned are sorted.
|
2021-11-06 03:10:04 -07:00
|
|
|
func (r *Reader) LabelNamesFor(ids ...storage.SeriesRef) ([]string, error) {
|
2021-07-20 05:38:08 -07:00
|
|
|
// Gather offsetsMap the name offsetsMap in the symbol table first
|
|
|
|
offsetsMap := make(map[uint32]struct{})
|
|
|
|
for _, id := range ids {
|
|
|
|
offset := id
|
|
|
|
// In version 2 series IDs are no longer exact references but series are 16-byte padded
|
|
|
|
// and the ID is the multiple of 16 of the actual position.
|
|
|
|
if r.version == FormatV2 {
|
|
|
|
offset = id * 16
|
|
|
|
}
|
|
|
|
|
|
|
|
d := encoding.NewDecbufUvarintAt(r.b, int(offset), castagnoliTable)
|
|
|
|
buf := d.Get()
|
|
|
|
if d.Err() != nil {
|
|
|
|
return nil, errors.Wrap(d.Err(), "get buffer for series")
|
|
|
|
}
|
|
|
|
|
|
|
|
offsets, err := r.dec.LabelNamesOffsetsFor(buf)
|
|
|
|
if err != nil {
|
|
|
|
return nil, errors.Wrap(err, "get label name offsets")
|
|
|
|
}
|
|
|
|
for _, off := range offsets {
|
|
|
|
offsetsMap[off] = struct{}{}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Lookup the unique symbols.
|
|
|
|
names := make([]string, 0, len(offsetsMap))
|
|
|
|
for off := range offsetsMap {
|
|
|
|
name, err := r.lookupSymbol(off)
|
|
|
|
if err != nil {
|
|
|
|
return nil, errors.Wrap(err, "lookup symbol in LabelNamesFor")
|
|
|
|
}
|
|
|
|
names = append(names, name)
|
|
|
|
}
|
|
|
|
|
2022-09-30 07:33:56 -07:00
|
|
|
slices.Sort(names)
|
2021-07-20 05:38:08 -07:00
|
|
|
|
|
|
|
return names, nil
|
|
|
|
}
|
|
|
|
|
2021-02-09 09:38:35 -08:00
|
|
|
// LabelValueFor returns label value for the given label name in the series referred to by ID.
|
2021-11-06 03:10:04 -07:00
|
|
|
func (r *Reader) LabelValueFor(id storage.SeriesRef, label string) (string, error) {
|
2021-02-09 09:38:35 -08:00
|
|
|
offset := id
|
|
|
|
// In version 2 series IDs are no longer exact references but series are 16-byte padded
|
|
|
|
// and the ID is the multiple of 16 of the actual position.
|
|
|
|
if r.version == FormatV2 {
|
|
|
|
offset = id * 16
|
|
|
|
}
|
|
|
|
d := encoding.NewDecbufUvarintAt(r.b, int(offset), castagnoliTable)
|
|
|
|
buf := d.Get()
|
|
|
|
if d.Err() != nil {
|
|
|
|
return "", errors.Wrap(d.Err(), "label values for")
|
|
|
|
}
|
|
|
|
|
|
|
|
value, err := r.dec.LabelValueFor(buf, label)
|
|
|
|
if err != nil {
|
|
|
|
return "", storage.ErrNotFound
|
|
|
|
}
|
|
|
|
|
|
|
|
if value == "" {
|
|
|
|
return "", storage.ErrNotFound
|
|
|
|
}
|
|
|
|
|
|
|
|
return value, nil
|
|
|
|
}
|
|
|
|
|
2018-01-10 23:50:42 -08:00
|
|
|
// Series reads the series with the given ID and writes its labels and chunks into lbls and chks.
|
2021-11-06 03:10:04 -07:00
|
|
|
func (r *Reader) Series(id storage.SeriesRef, lbls *labels.Labels, chks *[]chunks.Meta) error {
|
2018-01-10 11:19:16 -08:00
|
|
|
offset := id
|
2018-02-09 04:11:03 -08:00
|
|
|
// In version 2 series IDs are no longer exact references but series are 16-byte padded
|
|
|
|
// and the ID is the multiple of 16 of the actual position.
|
2019-01-11 09:31:26 -08:00
|
|
|
if r.version == FormatV2 {
|
2018-02-09 04:11:03 -08:00
|
|
|
offset = id * 16
|
2018-01-10 11:19:16 -08:00
|
|
|
}
|
2019-02-22 09:11:11 -08:00
|
|
|
d := encoding.NewDecbufUvarintAt(r.b, int(offset), castagnoliTable)
|
|
|
|
if d.Err() != nil {
|
|
|
|
return d.Err()
|
2017-03-07 03:47:49 -08:00
|
|
|
}
|
2019-02-22 09:11:11 -08:00
|
|
|
return errors.Wrap(r.dec.Series(d.Get(), lbls, chks), "read series")
|
2017-03-07 03:47:49 -08:00
|
|
|
}
|
|
|
|
|
Reduce memory used by postings offset table.
Rather than keeping the offset of each postings list, instead
keep the nth offset of the offset of the posting list. As postings
list offsets have always been sorted, we can then get to the closest
entry before the one we want an iterate forwards.
I haven't done much tuning on the 32 number, it was chosen to try
not to read through more than a 4k page of data.
Switch to a bulk interface for fetching postings. Use it to avoid having
to re-read parts of the posting offset table when querying lots of it.
For a index with what BenchmarkHeadPostingForMatchers uses RAM
for r.postings drops from 3.79MB to 80.19kB or about 48x.
Bytes allocated go down by 30%, and suprisingly CPU usage drops by
4-6% for typical queries too.
benchmark old ns/op new ns/op delta
BenchmarkPostingsForMatchers/Block/n="1"-4 35231 36673 +4.09%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 563380 540627 -4.04%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 536782 534186 -0.48%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 533990 541550 +1.42%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 113374598 117969608 +4.05%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 146329884 139651442 -4.56%
BenchmarkPostingsForMatchers/Block/i=~""-4 50346510 44961127 -10.70%
BenchmarkPostingsForMatchers/Block/i!=""-4 41261550 35356165 -14.31%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 112544418 116904010 +3.87%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 112487086 116864918 +3.89%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 41094758 35457904 -13.72%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 41906372 36151473 -13.73%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 147262414 140424800 -4.64%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 28615629 27872072 -2.60%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 147117177 140462403 -4.52%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 175096826 167902298 -4.11%
benchmark old allocs new allocs delta
BenchmarkPostingsForMatchers/Block/n="1"-4 4 6 +50.00%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 7 11 +57.14%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 7 11 +57.14%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 15 17 +13.33%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 100010 100012 +0.00%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 200069 200040 -0.01%
BenchmarkPostingsForMatchers/Block/i=~""-4 200072 200045 -0.01%
BenchmarkPostingsForMatchers/Block/i!=""-4 200070 200041 -0.01%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 100013 100017 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 100017 100023 +0.01%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 200073 200046 -0.01%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 200075 200050 -0.01%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 200074 200049 -0.01%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 111165 111150 -0.01%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 200078 200055 -0.01%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 311282 311238 -0.01%
benchmark old bytes new bytes delta
BenchmarkPostingsForMatchers/Block/n="1"-4 264 296 +12.12%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 360 424 +17.78%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 360 424 +17.78%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 520 552 +6.15%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 1600461 1600482 +0.00%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 24900801 17259077 -30.69%
BenchmarkPostingsForMatchers/Block/i=~""-4 24900836 17259151 -30.69%
BenchmarkPostingsForMatchers/Block/i!=""-4 24900760 17259048 -30.69%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 1600557 1600621 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 1600717 1600813 +0.01%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 24900856 17259176 -30.69%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 24900952 17259304 -30.69%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 24900993 17259333 -30.69%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 3788311 3142630 -17.04%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 24901137 17259509 -30.69%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 28693086 20405680 -28.88%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-12-05 10:27:40 -08:00
|
|
|
func (r *Reader) Postings(name string, values ...string) (Postings, error) {
|
2020-01-06 06:06:11 -08:00
|
|
|
if r.version == FormatV1 {
|
|
|
|
e, ok := r.postingsV1[name]
|
|
|
|
if !ok {
|
|
|
|
return EmptyPostings(), nil
|
|
|
|
}
|
|
|
|
res := make([]Postings, 0, len(values))
|
|
|
|
for _, v := range values {
|
|
|
|
postingsOff, ok := e[v]
|
|
|
|
if !ok {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
// Read from the postings table.
|
|
|
|
d := encoding.NewDecbufAt(r.b, int(postingsOff), castagnoliTable)
|
|
|
|
_, p, err := r.dec.Postings(d.Get())
|
|
|
|
if err != nil {
|
|
|
|
return nil, errors.Wrap(err, "decode postings")
|
|
|
|
}
|
|
|
|
res = append(res, p)
|
|
|
|
}
|
|
|
|
return Merge(res...), nil
|
|
|
|
}
|
|
|
|
|
2018-11-02 03:45:09 -07:00
|
|
|
e, ok := r.postings[name]
|
|
|
|
if !ok {
|
|
|
|
return EmptyPostings(), nil
|
|
|
|
}
|
Reduce memory used by postings offset table.
Rather than keeping the offset of each postings list, instead
keep the nth offset of the offset of the posting list. As postings
list offsets have always been sorted, we can then get to the closest
entry before the one we want an iterate forwards.
I haven't done much tuning on the 32 number, it was chosen to try
not to read through more than a 4k page of data.
Switch to a bulk interface for fetching postings. Use it to avoid having
to re-read parts of the posting offset table when querying lots of it.
For a index with what BenchmarkHeadPostingForMatchers uses RAM
for r.postings drops from 3.79MB to 80.19kB or about 48x.
Bytes allocated go down by 30%, and suprisingly CPU usage drops by
4-6% for typical queries too.
benchmark old ns/op new ns/op delta
BenchmarkPostingsForMatchers/Block/n="1"-4 35231 36673 +4.09%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 563380 540627 -4.04%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 536782 534186 -0.48%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 533990 541550 +1.42%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 113374598 117969608 +4.05%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 146329884 139651442 -4.56%
BenchmarkPostingsForMatchers/Block/i=~""-4 50346510 44961127 -10.70%
BenchmarkPostingsForMatchers/Block/i!=""-4 41261550 35356165 -14.31%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 112544418 116904010 +3.87%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 112487086 116864918 +3.89%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 41094758 35457904 -13.72%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 41906372 36151473 -13.73%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 147262414 140424800 -4.64%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 28615629 27872072 -2.60%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 147117177 140462403 -4.52%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 175096826 167902298 -4.11%
benchmark old allocs new allocs delta
BenchmarkPostingsForMatchers/Block/n="1"-4 4 6 +50.00%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 7 11 +57.14%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 7 11 +57.14%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 15 17 +13.33%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 100010 100012 +0.00%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 200069 200040 -0.01%
BenchmarkPostingsForMatchers/Block/i=~""-4 200072 200045 -0.01%
BenchmarkPostingsForMatchers/Block/i!=""-4 200070 200041 -0.01%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 100013 100017 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 100017 100023 +0.01%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 200073 200046 -0.01%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 200075 200050 -0.01%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 200074 200049 -0.01%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 111165 111150 -0.01%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 200078 200055 -0.01%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 311282 311238 -0.01%
benchmark old bytes new bytes delta
BenchmarkPostingsForMatchers/Block/n="1"-4 264 296 +12.12%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 360 424 +17.78%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 360 424 +17.78%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 520 552 +6.15%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 1600461 1600482 +0.00%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 24900801 17259077 -30.69%
BenchmarkPostingsForMatchers/Block/i=~""-4 24900836 17259151 -30.69%
BenchmarkPostingsForMatchers/Block/i!=""-4 24900760 17259048 -30.69%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 1600557 1600621 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 1600717 1600813 +0.01%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 24900856 17259176 -30.69%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 24900952 17259304 -30.69%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 24900993 17259333 -30.69%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 3788311 3142630 -17.04%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 24901137 17259509 -30.69%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 28693086 20405680 -28.88%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-12-05 10:27:40 -08:00
|
|
|
|
|
|
|
if len(values) == 0 {
|
2017-12-01 03:06:37 -08:00
|
|
|
return EmptyPostings(), nil
|
2017-03-07 03:47:49 -08:00
|
|
|
}
|
Reduce memory used by postings offset table.
Rather than keeping the offset of each postings list, instead
keep the nth offset of the offset of the posting list. As postings
list offsets have always been sorted, we can then get to the closest
entry before the one we want an iterate forwards.
I haven't done much tuning on the 32 number, it was chosen to try
not to read through more than a 4k page of data.
Switch to a bulk interface for fetching postings. Use it to avoid having
to re-read parts of the posting offset table when querying lots of it.
For a index with what BenchmarkHeadPostingForMatchers uses RAM
for r.postings drops from 3.79MB to 80.19kB or about 48x.
Bytes allocated go down by 30%, and suprisingly CPU usage drops by
4-6% for typical queries too.
benchmark old ns/op new ns/op delta
BenchmarkPostingsForMatchers/Block/n="1"-4 35231 36673 +4.09%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 563380 540627 -4.04%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 536782 534186 -0.48%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 533990 541550 +1.42%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 113374598 117969608 +4.05%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 146329884 139651442 -4.56%
BenchmarkPostingsForMatchers/Block/i=~""-4 50346510 44961127 -10.70%
BenchmarkPostingsForMatchers/Block/i!=""-4 41261550 35356165 -14.31%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 112544418 116904010 +3.87%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 112487086 116864918 +3.89%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 41094758 35457904 -13.72%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 41906372 36151473 -13.73%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 147262414 140424800 -4.64%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 28615629 27872072 -2.60%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 147117177 140462403 -4.52%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 175096826 167902298 -4.11%
benchmark old allocs new allocs delta
BenchmarkPostingsForMatchers/Block/n="1"-4 4 6 +50.00%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 7 11 +57.14%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 7 11 +57.14%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 15 17 +13.33%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 100010 100012 +0.00%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 200069 200040 -0.01%
BenchmarkPostingsForMatchers/Block/i=~""-4 200072 200045 -0.01%
BenchmarkPostingsForMatchers/Block/i!=""-4 200070 200041 -0.01%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 100013 100017 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 100017 100023 +0.01%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 200073 200046 -0.01%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 200075 200050 -0.01%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 200074 200049 -0.01%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 111165 111150 -0.01%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 200078 200055 -0.01%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 311282 311238 -0.01%
benchmark old bytes new bytes delta
BenchmarkPostingsForMatchers/Block/n="1"-4 264 296 +12.12%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 360 424 +17.78%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 360 424 +17.78%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 520 552 +6.15%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 1600461 1600482 +0.00%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 24900801 17259077 -30.69%
BenchmarkPostingsForMatchers/Block/i=~""-4 24900836 17259151 -30.69%
BenchmarkPostingsForMatchers/Block/i!=""-4 24900760 17259048 -30.69%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 1600557 1600621 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 1600717 1600813 +0.01%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 24900856 17259176 -30.69%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 24900952 17259304 -30.69%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 24900993 17259333 -30.69%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 3788311 3142630 -17.04%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 24901137 17259509 -30.69%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 28693086 20405680 -28.88%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-12-05 10:27:40 -08:00
|
|
|
|
|
|
|
res := make([]Postings, 0, len(values))
|
|
|
|
skip := 0
|
|
|
|
valueIndex := 0
|
|
|
|
for valueIndex < len(values) && values[valueIndex] < e[0].value {
|
|
|
|
// Discard values before the start.
|
|
|
|
valueIndex++
|
2017-12-01 03:06:37 -08:00
|
|
|
}
|
Reduce memory used by postings offset table.
Rather than keeping the offset of each postings list, instead
keep the nth offset of the offset of the posting list. As postings
list offsets have always been sorted, we can then get to the closest
entry before the one we want an iterate forwards.
I haven't done much tuning on the 32 number, it was chosen to try
not to read through more than a 4k page of data.
Switch to a bulk interface for fetching postings. Use it to avoid having
to re-read parts of the posting offset table when querying lots of it.
For a index with what BenchmarkHeadPostingForMatchers uses RAM
for r.postings drops from 3.79MB to 80.19kB or about 48x.
Bytes allocated go down by 30%, and suprisingly CPU usage drops by
4-6% for typical queries too.
benchmark old ns/op new ns/op delta
BenchmarkPostingsForMatchers/Block/n="1"-4 35231 36673 +4.09%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 563380 540627 -4.04%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 536782 534186 -0.48%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 533990 541550 +1.42%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 113374598 117969608 +4.05%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 146329884 139651442 -4.56%
BenchmarkPostingsForMatchers/Block/i=~""-4 50346510 44961127 -10.70%
BenchmarkPostingsForMatchers/Block/i!=""-4 41261550 35356165 -14.31%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 112544418 116904010 +3.87%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 112487086 116864918 +3.89%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 41094758 35457904 -13.72%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 41906372 36151473 -13.73%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 147262414 140424800 -4.64%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 28615629 27872072 -2.60%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 147117177 140462403 -4.52%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 175096826 167902298 -4.11%
benchmark old allocs new allocs delta
BenchmarkPostingsForMatchers/Block/n="1"-4 4 6 +50.00%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 7 11 +57.14%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 7 11 +57.14%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 15 17 +13.33%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 100010 100012 +0.00%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 200069 200040 -0.01%
BenchmarkPostingsForMatchers/Block/i=~""-4 200072 200045 -0.01%
BenchmarkPostingsForMatchers/Block/i!=""-4 200070 200041 -0.01%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 100013 100017 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 100017 100023 +0.01%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 200073 200046 -0.01%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 200075 200050 -0.01%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 200074 200049 -0.01%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 111165 111150 -0.01%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 200078 200055 -0.01%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 311282 311238 -0.01%
benchmark old bytes new bytes delta
BenchmarkPostingsForMatchers/Block/n="1"-4 264 296 +12.12%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 360 424 +17.78%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 360 424 +17.78%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 520 552 +6.15%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 1600461 1600482 +0.00%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 24900801 17259077 -30.69%
BenchmarkPostingsForMatchers/Block/i=~""-4 24900836 17259151 -30.69%
BenchmarkPostingsForMatchers/Block/i!=""-4 24900760 17259048 -30.69%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 1600557 1600621 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 1600717 1600813 +0.01%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 24900856 17259176 -30.69%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 24900952 17259304 -30.69%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 24900993 17259333 -30.69%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 3788311 3142630 -17.04%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 24901137 17259509 -30.69%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 28693086 20405680 -28.88%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-12-05 10:27:40 -08:00
|
|
|
for valueIndex < len(values) {
|
|
|
|
value := values[valueIndex]
|
|
|
|
|
|
|
|
i := sort.Search(len(e), func(i int) bool { return e[i].value >= value })
|
|
|
|
if i == len(e) {
|
|
|
|
// We're past the end.
|
|
|
|
break
|
|
|
|
}
|
|
|
|
if i > 0 && e[i].value != value {
|
|
|
|
// Need to look from previous entry.
|
|
|
|
i--
|
|
|
|
}
|
|
|
|
// Don't Crc32 the entire postings offset table, this is very slow
|
|
|
|
// so hope any issues were caught at startup.
|
|
|
|
d := encoding.NewDecbufAt(r.b, int(r.toc.PostingsTable), nil)
|
|
|
|
d.Skip(e[i].off)
|
|
|
|
|
|
|
|
// Iterate on the offset table.
|
|
|
|
var postingsOff uint64 // The offset into the postings table.
|
|
|
|
for d.Err() == nil {
|
|
|
|
if skip == 0 {
|
|
|
|
// These are always the same number of bytes,
|
|
|
|
// and it's faster to skip than parse.
|
|
|
|
skip = d.Len()
|
|
|
|
d.Uvarint() // Keycount.
|
|
|
|
d.UvarintBytes() // Label name.
|
|
|
|
skip -= d.Len()
|
|
|
|
} else {
|
|
|
|
d.Skip(skip)
|
|
|
|
}
|
|
|
|
v := d.UvarintBytes() // Label value.
|
|
|
|
postingsOff = d.Uvarint64() // Offset.
|
|
|
|
for string(v) >= value {
|
|
|
|
if string(v) == value {
|
|
|
|
// Read from the postings table.
|
|
|
|
d2 := encoding.NewDecbufAt(r.b, int(postingsOff), castagnoliTable)
|
|
|
|
_, p, err := r.dec.Postings(d2.Get())
|
|
|
|
if err != nil {
|
|
|
|
return nil, errors.Wrap(err, "decode postings")
|
|
|
|
}
|
|
|
|
res = append(res, p)
|
|
|
|
}
|
|
|
|
valueIndex++
|
|
|
|
if valueIndex == len(values) {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
value = values[valueIndex]
|
|
|
|
}
|
|
|
|
if i+1 == len(e) || value >= e[i+1].value || valueIndex == len(values) {
|
|
|
|
// Need to go to a later postings offset entry, if there is one.
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if d.Err() != nil {
|
|
|
|
return nil, errors.Wrap(d.Err(), "get postings offset entry")
|
|
|
|
}
|
2017-12-01 03:06:37 -08:00
|
|
|
}
|
Reduce memory used by postings offset table.
Rather than keeping the offset of each postings list, instead
keep the nth offset of the offset of the posting list. As postings
list offsets have always been sorted, we can then get to the closest
entry before the one we want an iterate forwards.
I haven't done much tuning on the 32 number, it was chosen to try
not to read through more than a 4k page of data.
Switch to a bulk interface for fetching postings. Use it to avoid having
to re-read parts of the posting offset table when querying lots of it.
For a index with what BenchmarkHeadPostingForMatchers uses RAM
for r.postings drops from 3.79MB to 80.19kB or about 48x.
Bytes allocated go down by 30%, and suprisingly CPU usage drops by
4-6% for typical queries too.
benchmark old ns/op new ns/op delta
BenchmarkPostingsForMatchers/Block/n="1"-4 35231 36673 +4.09%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 563380 540627 -4.04%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 536782 534186 -0.48%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 533990 541550 +1.42%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 113374598 117969608 +4.05%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 146329884 139651442 -4.56%
BenchmarkPostingsForMatchers/Block/i=~""-4 50346510 44961127 -10.70%
BenchmarkPostingsForMatchers/Block/i!=""-4 41261550 35356165 -14.31%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 112544418 116904010 +3.87%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 112487086 116864918 +3.89%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 41094758 35457904 -13.72%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 41906372 36151473 -13.73%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 147262414 140424800 -4.64%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 28615629 27872072 -2.60%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 147117177 140462403 -4.52%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 175096826 167902298 -4.11%
benchmark old allocs new allocs delta
BenchmarkPostingsForMatchers/Block/n="1"-4 4 6 +50.00%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 7 11 +57.14%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 7 11 +57.14%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 15 17 +13.33%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 100010 100012 +0.00%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 200069 200040 -0.01%
BenchmarkPostingsForMatchers/Block/i=~""-4 200072 200045 -0.01%
BenchmarkPostingsForMatchers/Block/i!=""-4 200070 200041 -0.01%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 100013 100017 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 100017 100023 +0.01%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 200073 200046 -0.01%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 200075 200050 -0.01%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 200074 200049 -0.01%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 111165 111150 -0.01%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 200078 200055 -0.01%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 311282 311238 -0.01%
benchmark old bytes new bytes delta
BenchmarkPostingsForMatchers/Block/n="1"-4 264 296 +12.12%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 360 424 +17.78%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 360 424 +17.78%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 520 552 +6.15%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 1600461 1600482 +0.00%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 24900801 17259077 -30.69%
BenchmarkPostingsForMatchers/Block/i=~""-4 24900836 17259151 -30.69%
BenchmarkPostingsForMatchers/Block/i!=""-4 24900760 17259048 -30.69%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 1600557 1600621 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 1600717 1600813 +0.01%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 24900856 17259176 -30.69%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 24900952 17259304 -30.69%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 24900993 17259333 -30.69%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 3788311 3142630 -17.04%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 24901137 17259509 -30.69%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 28693086 20405680 -28.88%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-12-05 10:27:40 -08:00
|
|
|
|
|
|
|
return Merge(res...), nil
|
2017-03-07 03:47:49 -08:00
|
|
|
}
|
|
|
|
|
2017-12-01 03:06:37 -08:00
|
|
|
// SortedPostings returns the given postings list reordered so that the backing series
|
|
|
|
// are sorted.
|
2017-11-30 06:34:49 -08:00
|
|
|
func (r *Reader) SortedPostings(p Postings) Postings {
|
2017-08-05 04:31:48 -07:00
|
|
|
return p
|
|
|
|
}
|
|
|
|
|
2019-01-16 02:03:52 -08:00
|
|
|
// Size returns the size of an index file.
|
|
|
|
func (r *Reader) Size() int64 {
|
|
|
|
return int64(r.b.Len())
|
|
|
|
}
|
|
|
|
|
2018-11-07 07:52:41 -08:00
|
|
|
// LabelNames returns all the unique label names present in the index.
|
2021-07-20 05:38:08 -07:00
|
|
|
// TODO(twilkie) implement support for matchers
|
|
|
|
func (r *Reader) LabelNames(matchers ...*labels.Matcher) ([]string, error) {
|
|
|
|
if len(matchers) > 0 {
|
|
|
|
return nil, errors.Errorf("matchers parameter is not implemented: %+v", matchers)
|
|
|
|
}
|
|
|
|
|
Load only some offsets into the symbol table into memory.
Rather than keeping the entire symbol table in memory, keep every nth
offset and walk from there to the entry we need. This ends up slightly
slower, ~360ms per 1M series returned from PostingsForMatchers which is
not much considering the rest of the CPU such a query would go on to
use.
Make LabelValues use the postings tables, rather than having
to do symbol lookups. Use yoloString, as PostingsForMatchers
doesn't need the strings to stick around and adjust the API
call to keep the Querier open until it's all marshalled.
Remove allocatedSymbols memory optimisation, we no longer keep all the
symbol strings in heap memory. Remove LabelValuesFor and LabelIndices,
they're dead code. Ensure we've still tests for label indices,
and add missing test that we can work with old V1 Format index files.
PostingForMatchers performance is slightly better, with a big drop in
allocation counts due to using yoloString for LabelValues:
benchmark old ns/op new ns/op delta
BenchmarkPostingsForMatchers/Block/n="1"-4 36698 36681 -0.05%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 522786 560887 +7.29%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 511652 537680 +5.09%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 522102 564239 +8.07%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 113689911 111795919 -1.67%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 135825572 132871085 -2.18%
BenchmarkPostingsForMatchers/Block/i=~""-4 40782628 38038181 -6.73%
BenchmarkPostingsForMatchers/Block/i!=""-4 31267869 29194327 -6.63%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 112733329 111568823 -1.03%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 112868153 111232029 -1.45%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 31338257 29349446 -6.35%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 32054482 29972436 -6.50%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 136504654 133968442 -1.86%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 27960350 27264997 -2.49%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 136765564 133860724 -2.12%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 163714583 159453668 -2.60%
benchmark old allocs new allocs delta
BenchmarkPostingsForMatchers/Block/n="1"-4 6 6 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 11 11 +0.00%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 11 11 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 17 15 -11.76%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 100012 12 -99.99%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 200040 100040 -49.99%
BenchmarkPostingsForMatchers/Block/i=~""-4 200045 100045 -49.99%
BenchmarkPostingsForMatchers/Block/i!=""-4 200041 100041 -49.99%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 100017 17 -99.98%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 100023 23 -99.98%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 200046 100046 -49.99%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 200050 100050 -49.99%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 200049 100049 -49.99%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 111150 11150 -89.97%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 200055 100055 -49.99%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 311238 111238 -64.26%
benchmark old bytes new bytes delta
BenchmarkPostingsForMatchers/Block/n="1"-4 296 296 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 424 424 +0.00%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 424 424 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 552 1544 +179.71%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 1600482 1606125 +0.35%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 17259065 17264709 +0.03%
BenchmarkPostingsForMatchers/Block/i=~""-4 17259150 17264780 +0.03%
BenchmarkPostingsForMatchers/Block/i!=""-4 17259048 17264680 +0.03%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 1600610 1606242 +0.35%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 1600813 1606434 +0.35%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 17259176 17264808 +0.03%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 17259304 17264936 +0.03%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 17259333 17264965 +0.03%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 3142628 3148262 +0.18%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 17259509 17265141 +0.03%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 20405680 20416944 +0.06%
However overall Select performance is down and involves more allocs, due to
having to do more than a simple map lookup to resolve a symbol and that all the strings
returned are allocated:
benchmark old ns/op new ns/op delta
BenchmarkQuerierSelect/Block/1of1000000-4 506092636 862678244 +70.46%
BenchmarkQuerierSelect/Block/10of1000000-4 505638968 860917636 +70.26%
BenchmarkQuerierSelect/Block/100of1000000-4 505229450 882150048 +74.60%
BenchmarkQuerierSelect/Block/1000of1000000-4 515905414 862241115 +67.13%
BenchmarkQuerierSelect/Block/10000of1000000-4 516785354 874841110 +69.29%
BenchmarkQuerierSelect/Block/100000of1000000-4 540742808 907030187 +67.74%
BenchmarkQuerierSelect/Block/1000000of1000000-4 815224288 1181236903 +44.90%
benchmark old allocs new allocs delta
BenchmarkQuerierSelect/Block/1of1000000-4 4000020 6000020 +50.00%
BenchmarkQuerierSelect/Block/10of1000000-4 4000038 6000038 +50.00%
BenchmarkQuerierSelect/Block/100of1000000-4 4000218 6000218 +50.00%
BenchmarkQuerierSelect/Block/1000of1000000-4 4002018 6002018 +49.97%
BenchmarkQuerierSelect/Block/10000of1000000-4 4020018 6020018 +49.75%
BenchmarkQuerierSelect/Block/100000of1000000-4 4200018 6200018 +47.62%
BenchmarkQuerierSelect/Block/1000000of1000000-4 6000018 8000019 +33.33%
benchmark old bytes new bytes delta
BenchmarkQuerierSelect/Block/1of1000000-4 176001468 227201476 +29.09%
BenchmarkQuerierSelect/Block/10of1000000-4 176002620 227202628 +29.09%
BenchmarkQuerierSelect/Block/100of1000000-4 176014140 227214148 +29.09%
BenchmarkQuerierSelect/Block/1000of1000000-4 176129340 227329348 +29.07%
BenchmarkQuerierSelect/Block/10000of1000000-4 177281340 228481348 +28.88%
BenchmarkQuerierSelect/Block/100000of1000000-4 188801340 240001348 +27.12%
BenchmarkQuerierSelect/Block/1000000of1000000-4 304001340 355201616 +16.84%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-12-12 08:55:32 -08:00
|
|
|
labelNames := make([]string, 0, len(r.postings))
|
|
|
|
for name := range r.postings {
|
|
|
|
if name == allPostingsKey.Name {
|
|
|
|
// This is not from any metric.
|
|
|
|
continue
|
2018-11-07 07:52:41 -08:00
|
|
|
}
|
|
|
|
labelNames = append(labelNames, name)
|
|
|
|
}
|
2022-09-30 07:33:56 -07:00
|
|
|
slices.Sort(labelNames)
|
2018-11-07 07:52:41 -08:00
|
|
|
return labelNames, nil
|
|
|
|
}
|
|
|
|
|
2020-08-25 01:17:41 -07:00
|
|
|
// NewStringListIter returns a StringIter for the given sorted list of strings.
|
Stream symbols during compaction. (#6468)
Rather than buffer up symbols in RAM, do it one by one
during compaction. Then use the reader's symbol handling
for symbol lookups during the rest of the index write.
There is some slowdown in compaction, due to having to look through a file
rather than a hash lookup. This is noise to the overall cost of compacting
series with thousands of samples though.
benchmark old ns/op new ns/op delta
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 539917175 675341565 +25.08%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 2441815993 2477453524 +1.46%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 3978543559 3922909687 -1.40%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 8430219716 8586610007 +1.86%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 1786424591 1909552782 +6.89%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 5328998202 6020839950 +12.98%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 10085059958 11085278690 +9.92%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 25497010155 27018079806 +5.97%
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 2427391406 2817217987 +16.06%
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 2592965497 2538805050 -2.09%
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 2437388343 2668012858 +9.46%
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 2317095324 2787423966 +20.30%
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 2600239857 2096973860 -19.35%
benchmark old allocs new allocs delta
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 500851 470794 -6.00%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 821527 791451 -3.66%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 1141562 1111508 -2.63%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 2141576 2111504 -1.40%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 871466 841424 -3.45%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 1941428 1911415 -1.55%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 3071573 3041510 -0.98%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 6771648 6741509 -0.45%
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 731493 824888 +12.77%
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 793918 887311 +11.76%
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 811842 905204 +11.50%
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 832244 925081 +11.16%
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 921553 1019162 +10.59%
benchmark old bytes new bytes delta
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 40532648 35698276 -11.93%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 60340216 53409568 -11.49%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 81087336 72065552 -11.13%
BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 142485576 120878544 -15.16%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 208661368 203831136 -2.31%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 347345904 340484696 -1.98%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 585185856 576244648 -1.53%
BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 1357641792 1358966528 +0.10%
BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 126486664 119666744 -5.39%
BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 122323192 115117224 -5.89%
BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 126404504 119469864 -5.49%
BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 119047832 112230408 -5.73%
BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 136576016 116634800 -14.60%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-12-17 11:49:54 -08:00
|
|
|
func NewStringListIter(s []string) StringIter {
|
|
|
|
return &stringListIter{l: s}
|
|
|
|
}
|
|
|
|
|
|
|
|
// symbolsIter implements StringIter.
|
|
|
|
type stringListIter struct {
|
|
|
|
l []string
|
|
|
|
cur string
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *stringListIter) Next() bool {
|
|
|
|
if len(s.l) == 0 {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
s.cur = s.l[0]
|
|
|
|
s.l = s.l[1:]
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
func (s stringListIter) At() string { return s.cur }
|
|
|
|
func (s stringListIter) Err() error { return nil }
|
|
|
|
|
2018-01-08 09:33:35 -08:00
|
|
|
// Decoder provides decoding methods for the v1 and v2 index file format.
|
2017-12-01 03:06:37 -08:00
|
|
|
//
|
|
|
|
// It currently does not contain decoding methods for all entry types but can be extended
|
|
|
|
// by them if there's demand.
|
2018-01-10 11:19:16 -08:00
|
|
|
type Decoder struct {
|
2019-01-11 09:31:26 -08:00
|
|
|
LookupSymbol func(uint32) (string, error)
|
2017-12-01 03:06:37 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
// Postings returns a postings list for b and its number of elements.
|
2018-01-10 11:19:16 -08:00
|
|
|
func (dec *Decoder) Postings(b []byte) (int, Postings, error) {
|
2019-02-22 09:11:11 -08:00
|
|
|
d := encoding.Decbuf{B: b}
|
|
|
|
n := d.Be32int()
|
|
|
|
l := d.Get()
|
2021-11-24 01:56:37 -08:00
|
|
|
if d.Err() != nil {
|
|
|
|
return 0, nil, d.Err()
|
|
|
|
}
|
|
|
|
if len(l) != 4*n {
|
|
|
|
return 0, nil, fmt.Errorf("unexpected postings length, should be %d bytes for %d postings, got %d bytes", 4*n, n, len(l))
|
|
|
|
}
|
|
|
|
return n, newBigEndianPostings(l), nil
|
2017-12-01 03:06:37 -08:00
|
|
|
}
|
|
|
|
|
2021-07-20 05:38:08 -07:00
|
|
|
// LabelNamesOffsetsFor decodes the offsets of the name symbols for a given series.
|
|
|
|
// They are returned in the same order they're stored, which should be sorted lexicographically.
|
|
|
|
func (dec *Decoder) LabelNamesOffsetsFor(b []byte) ([]uint32, error) {
|
|
|
|
d := encoding.Decbuf{B: b}
|
|
|
|
k := d.Uvarint()
|
|
|
|
|
|
|
|
offsets := make([]uint32, k)
|
|
|
|
for i := 0; i < k; i++ {
|
|
|
|
offsets[i] = uint32(d.Uvarint())
|
|
|
|
_ = d.Uvarint() // skip the label value
|
|
|
|
|
|
|
|
if d.Err() != nil {
|
|
|
|
return nil, errors.Wrap(d.Err(), "read series label offsets")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return offsets, d.Err()
|
|
|
|
}
|
|
|
|
|
2021-02-09 09:38:35 -08:00
|
|
|
// LabelValueFor decodes a label for a given series.
|
|
|
|
func (dec *Decoder) LabelValueFor(b []byte, label string) (string, error) {
|
|
|
|
d := encoding.Decbuf{B: b}
|
|
|
|
k := d.Uvarint()
|
|
|
|
|
|
|
|
for i := 0; i < k; i++ {
|
|
|
|
lno := uint32(d.Uvarint())
|
|
|
|
lvo := uint32(d.Uvarint())
|
|
|
|
|
|
|
|
if d.Err() != nil {
|
|
|
|
return "", errors.Wrap(d.Err(), "read series label offsets")
|
|
|
|
}
|
|
|
|
|
|
|
|
ln, err := dec.LookupSymbol(lno)
|
|
|
|
if err != nil {
|
|
|
|
return "", errors.Wrap(err, "lookup label name")
|
|
|
|
}
|
|
|
|
|
|
|
|
if ln == label {
|
|
|
|
lv, err := dec.LookupSymbol(lvo)
|
|
|
|
if err != nil {
|
|
|
|
return "", errors.Wrap(err, "lookup label value")
|
|
|
|
}
|
|
|
|
|
|
|
|
return lv, nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return "", d.Err()
|
|
|
|
}
|
|
|
|
|
2017-12-01 03:06:37 -08:00
|
|
|
// Series decodes a series entry from the given byte slice into lset and chks.
|
2018-01-10 11:19:16 -08:00
|
|
|
func (dec *Decoder) Series(b []byte, lbls *labels.Labels, chks *[]chunks.Meta) error {
|
2017-12-01 03:06:37 -08:00
|
|
|
*lbls = (*lbls)[:0]
|
|
|
|
*chks = (*chks)[:0]
|
|
|
|
|
2019-02-22 09:11:11 -08:00
|
|
|
d := encoding.Decbuf{B: b}
|
2017-12-01 03:06:37 -08:00
|
|
|
|
2019-02-22 09:11:11 -08:00
|
|
|
k := d.Uvarint()
|
2017-12-01 03:06:37 -08:00
|
|
|
|
|
|
|
for i := 0; i < k; i++ {
|
2019-02-22 09:11:11 -08:00
|
|
|
lno := uint32(d.Uvarint())
|
|
|
|
lvo := uint32(d.Uvarint())
|
2017-12-01 03:06:37 -08:00
|
|
|
|
2019-02-22 09:11:11 -08:00
|
|
|
if d.Err() != nil {
|
|
|
|
return errors.Wrap(d.Err(), "read series label offsets")
|
2017-12-01 03:06:37 -08:00
|
|
|
}
|
|
|
|
|
2019-01-11 09:31:26 -08:00
|
|
|
ln, err := dec.LookupSymbol(lno)
|
2017-12-01 03:06:37 -08:00
|
|
|
if err != nil {
|
|
|
|
return errors.Wrap(err, "lookup label name")
|
|
|
|
}
|
2019-01-11 09:31:26 -08:00
|
|
|
lv, err := dec.LookupSymbol(lvo)
|
2017-12-01 03:06:37 -08:00
|
|
|
if err != nil {
|
|
|
|
return errors.Wrap(err, "lookup label value")
|
|
|
|
}
|
|
|
|
|
|
|
|
*lbls = append(*lbls, labels.Label{Name: ln, Value: lv})
|
|
|
|
}
|
|
|
|
|
|
|
|
// Read the chunks meta data.
|
2019-02-22 09:11:11 -08:00
|
|
|
k = d.Uvarint()
|
2017-12-01 03:06:37 -08:00
|
|
|
|
|
|
|
if k == 0 {
|
2021-08-13 03:41:41 -07:00
|
|
|
return d.Err()
|
2017-12-01 03:06:37 -08:00
|
|
|
}
|
|
|
|
|
2019-02-22 09:11:11 -08:00
|
|
|
t0 := d.Varint64()
|
|
|
|
maxt := int64(d.Uvarint64()) + t0
|
|
|
|
ref0 := int64(d.Uvarint64())
|
2017-12-01 03:06:37 -08:00
|
|
|
|
|
|
|
*chks = append(*chks, chunks.Meta{
|
2021-11-06 03:10:04 -07:00
|
|
|
Ref: chunks.ChunkRef(ref0),
|
2017-12-01 03:06:37 -08:00
|
|
|
MinTime: t0,
|
|
|
|
MaxTime: maxt,
|
|
|
|
})
|
|
|
|
t0 = maxt
|
|
|
|
|
|
|
|
for i := 1; i < k; i++ {
|
2019-02-22 09:11:11 -08:00
|
|
|
mint := int64(d.Uvarint64()) + t0
|
|
|
|
maxt := int64(d.Uvarint64()) + mint
|
2017-12-01 03:06:37 -08:00
|
|
|
|
2019-02-22 09:11:11 -08:00
|
|
|
ref0 += d.Varint64()
|
2017-12-01 03:06:37 -08:00
|
|
|
t0 = maxt
|
|
|
|
|
2019-02-22 09:11:11 -08:00
|
|
|
if d.Err() != nil {
|
|
|
|
return errors.Wrapf(d.Err(), "read meta for chunk %d", i)
|
2017-12-01 03:06:37 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
*chks = append(*chks, chunks.Meta{
|
2021-11-06 03:10:04 -07:00
|
|
|
Ref: chunks.ChunkRef(ref0),
|
2017-12-01 03:06:37 -08:00
|
|
|
MinTime: mint,
|
|
|
|
MaxTime: maxt,
|
|
|
|
})
|
|
|
|
}
|
2019-02-22 09:11:11 -08:00
|
|
|
return d.Err()
|
2017-12-01 03:06:37 -08:00
|
|
|
}
|
Load only some offsets into the symbol table into memory.
Rather than keeping the entire symbol table in memory, keep every nth
offset and walk from there to the entry we need. This ends up slightly
slower, ~360ms per 1M series returned from PostingsForMatchers which is
not much considering the rest of the CPU such a query would go on to
use.
Make LabelValues use the postings tables, rather than having
to do symbol lookups. Use yoloString, as PostingsForMatchers
doesn't need the strings to stick around and adjust the API
call to keep the Querier open until it's all marshalled.
Remove allocatedSymbols memory optimisation, we no longer keep all the
symbol strings in heap memory. Remove LabelValuesFor and LabelIndices,
they're dead code. Ensure we've still tests for label indices,
and add missing test that we can work with old V1 Format index files.
PostingForMatchers performance is slightly better, with a big drop in
allocation counts due to using yoloString for LabelValues:
benchmark old ns/op new ns/op delta
BenchmarkPostingsForMatchers/Block/n="1"-4 36698 36681 -0.05%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 522786 560887 +7.29%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 511652 537680 +5.09%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 522102 564239 +8.07%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 113689911 111795919 -1.67%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 135825572 132871085 -2.18%
BenchmarkPostingsForMatchers/Block/i=~""-4 40782628 38038181 -6.73%
BenchmarkPostingsForMatchers/Block/i!=""-4 31267869 29194327 -6.63%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 112733329 111568823 -1.03%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 112868153 111232029 -1.45%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 31338257 29349446 -6.35%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 32054482 29972436 -6.50%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 136504654 133968442 -1.86%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 27960350 27264997 -2.49%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 136765564 133860724 -2.12%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 163714583 159453668 -2.60%
benchmark old allocs new allocs delta
BenchmarkPostingsForMatchers/Block/n="1"-4 6 6 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 11 11 +0.00%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 11 11 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 17 15 -11.76%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 100012 12 -99.99%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 200040 100040 -49.99%
BenchmarkPostingsForMatchers/Block/i=~""-4 200045 100045 -49.99%
BenchmarkPostingsForMatchers/Block/i!=""-4 200041 100041 -49.99%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 100017 17 -99.98%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 100023 23 -99.98%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 200046 100046 -49.99%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 200050 100050 -49.99%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 200049 100049 -49.99%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 111150 11150 -89.97%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 200055 100055 -49.99%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 311238 111238 -64.26%
benchmark old bytes new bytes delta
BenchmarkPostingsForMatchers/Block/n="1"-4 296 296 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 424 424 +0.00%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 424 424 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 552 1544 +179.71%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 1600482 1606125 +0.35%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 17259065 17264709 +0.03%
BenchmarkPostingsForMatchers/Block/i=~""-4 17259150 17264780 +0.03%
BenchmarkPostingsForMatchers/Block/i!=""-4 17259048 17264680 +0.03%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 1600610 1606242 +0.35%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 1600813 1606434 +0.35%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 17259176 17264808 +0.03%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 17259304 17264936 +0.03%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 17259333 17264965 +0.03%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 3142628 3148262 +0.18%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 17259509 17265141 +0.03%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 20405680 20416944 +0.06%
However overall Select performance is down and involves more allocs, due to
having to do more than a simple map lookup to resolve a symbol and that all the strings
returned are allocated:
benchmark old ns/op new ns/op delta
BenchmarkQuerierSelect/Block/1of1000000-4 506092636 862678244 +70.46%
BenchmarkQuerierSelect/Block/10of1000000-4 505638968 860917636 +70.26%
BenchmarkQuerierSelect/Block/100of1000000-4 505229450 882150048 +74.60%
BenchmarkQuerierSelect/Block/1000of1000000-4 515905414 862241115 +67.13%
BenchmarkQuerierSelect/Block/10000of1000000-4 516785354 874841110 +69.29%
BenchmarkQuerierSelect/Block/100000of1000000-4 540742808 907030187 +67.74%
BenchmarkQuerierSelect/Block/1000000of1000000-4 815224288 1181236903 +44.90%
benchmark old allocs new allocs delta
BenchmarkQuerierSelect/Block/1of1000000-4 4000020 6000020 +50.00%
BenchmarkQuerierSelect/Block/10of1000000-4 4000038 6000038 +50.00%
BenchmarkQuerierSelect/Block/100of1000000-4 4000218 6000218 +50.00%
BenchmarkQuerierSelect/Block/1000of1000000-4 4002018 6002018 +49.97%
BenchmarkQuerierSelect/Block/10000of1000000-4 4020018 6020018 +49.75%
BenchmarkQuerierSelect/Block/100000of1000000-4 4200018 6200018 +47.62%
BenchmarkQuerierSelect/Block/1000000of1000000-4 6000018 8000019 +33.33%
benchmark old bytes new bytes delta
BenchmarkQuerierSelect/Block/1of1000000-4 176001468 227201476 +29.09%
BenchmarkQuerierSelect/Block/10of1000000-4 176002620 227202628 +29.09%
BenchmarkQuerierSelect/Block/100of1000000-4 176014140 227214148 +29.09%
BenchmarkQuerierSelect/Block/1000of1000000-4 176129340 227329348 +29.07%
BenchmarkQuerierSelect/Block/10000of1000000-4 177281340 228481348 +28.88%
BenchmarkQuerierSelect/Block/100000of1000000-4 188801340 240001348 +27.12%
BenchmarkQuerierSelect/Block/1000000of1000000-4 304001340 355201616 +16.84%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-12-12 08:55:32 -08:00
|
|
|
|
|
|
|
func yoloString(b []byte) string {
|
|
|
|
return *((*string)(unsafe.Pointer(&b)))
|
|
|
|
}
|