mirror of
https://github.com/prometheus/prometheus.git
synced 2024-11-09 23:24:05 -08:00
Cleanup after merging tsdb into prometheus
Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in>
This commit is contained in:
parent
750e438ebb
commit
5ecef3542d
|
@ -1,8 +1,6 @@
|
|||
<!--
|
||||
Don't forget!
|
||||
|
||||
- Most PRs would require a CHANGELOG entry.
|
||||
|
||||
- If the PR adds or changes a behaviour or fixes a bug of an exported API it would need a unit/e2e test.
|
||||
|
||||
- Where possible use only exported APIs for tests to simplify the review and make it as close as possible to an actual library usage.
|
|
@ -12,6 +12,7 @@ go_import_path: github.com/prometheus/prometheus
|
|||
# random issues on Travis.
|
||||
before_install:
|
||||
- travis_retry make deps
|
||||
- if [[ "$TRAVIS_OS_NAME" == "windows" ]]; then choco install make; fi
|
||||
|
||||
script:
|
||||
- make check_license style unused test lint check_assets
|
||||
|
|
19
Makefile
19
Makefile
|
@ -14,6 +14,13 @@
|
|||
# Needs to be defined before including Makefile.common to auto-generate targets
|
||||
DOCKER_ARCHS ?= amd64 armv7 arm64
|
||||
|
||||
TSDB_PROJECT_DIR = "./tsdb"
|
||||
TSDB_CLI_DIR="$(TSDB_PROJECT_DIR)/cmd/tsdb"
|
||||
TSDB_BIN = "$(TSDB_CLI_DIR)/tsdb"
|
||||
TSDB_BENCHMARK_NUM_METRICS ?= 1000
|
||||
TSDB_BENCHMARK_DATASET ?= "$(TSDB_PROJECT_DIR)/testdata/20kseries.json"
|
||||
TSDB_BENCHMARK_OUTPUT_DIR ?= "$(TSDB_CLI_DIR)/benchout"
|
||||
|
||||
include Makefile.common
|
||||
|
||||
DOCKER_IMAGE_NAME ?= prometheus
|
||||
|
@ -31,3 +38,15 @@ check_assets: assets
|
|||
echo "Run 'make assets' and commit the changes to fix the error."; \
|
||||
exit 1; \
|
||||
fi
|
||||
|
||||
build_tsdb:
|
||||
GO111MODULE=$(GO111MODULE) $(GO) build -o $(TSDB_BIN) $(TSDB_CLI_DIR)
|
||||
|
||||
bench_tsdb: build_tsdb
|
||||
@echo ">> running benchmark, writing result to $(TSDB_BENCHMARK_OUTPUT_DIR)"
|
||||
@$(TSDB_BIN) bench write --metrics=$(TSDB_BENCHMARK_NUM_METRICS) --out=$(TSDB_BENCHMARK_OUTPUT_DIR) $(TSDB_BENCHMARK_DATASET)
|
||||
@$(GO) tool pprof -svg $(TSDB_BIN) $(TSDB_BENCHMARK_OUTPUT_DIR)/cpu.prof > $(TSDB_BENCHMARK_OUTPUT_DIR)/cpuprof.svg
|
||||
@$(GO) tool pprof --inuse_space -svg $(TSDB_BIN) $(TSDB_BENCHMARK_OUTPUT_DIR)/mem.prof > $(TSDB_BENCHMARK_OUTPUT_DIR)/memprof.inuse.svg
|
||||
@$(GO) tool pprof --alloc_space -svg $(TSDB_BIN) $(TSDB_BENCHMARK_OUTPUT_DIR)/mem.prof > $(TSDB_BENCHMARK_OUTPUT_DIR)/memprof.alloc.svg
|
||||
@$(GO) tool pprof -svg $(TSDB_BIN) $(TSDB_BENCHMARK_OUTPUT_DIR)/block.prof > $(TSDB_BENCHMARK_OUTPUT_DIR)/blockprof.svg
|
||||
@$(GO) tool pprof -svg $(TSDB_BIN) $(TSDB_BENCHMARK_OUTPUT_DIR)/mutex.prof > $(TSDB_BENCHMARK_OUTPUT_DIR)/mutexprof.svg
|
||||
|
|
|
@ -79,7 +79,7 @@ func TestConfiguredServiceWithTags(t *testing.T) {
|
|||
}
|
||||
|
||||
cases := []testcase{
|
||||
testcase{
|
||||
{
|
||||
conf: &SDConfig{
|
||||
Services: []string{"configuredServiceName"},
|
||||
ServiceTags: []string{"http", "v1"},
|
||||
|
@ -88,7 +88,7 @@ func TestConfiguredServiceWithTags(t *testing.T) {
|
|||
serviceTags: []string{""},
|
||||
shouldWatch: false,
|
||||
},
|
||||
testcase{
|
||||
{
|
||||
conf: &SDConfig{
|
||||
Services: []string{"configuredServiceName"},
|
||||
ServiceTags: []string{"http", "v1"},
|
||||
|
@ -97,7 +97,7 @@ func TestConfiguredServiceWithTags(t *testing.T) {
|
|||
serviceTags: []string{"http", "v1"},
|
||||
shouldWatch: true,
|
||||
},
|
||||
testcase{
|
||||
{
|
||||
conf: &SDConfig{
|
||||
Services: []string{"configuredServiceName"},
|
||||
ServiceTags: []string{"http", "v1"},
|
||||
|
@ -106,7 +106,7 @@ func TestConfiguredServiceWithTags(t *testing.T) {
|
|||
serviceTags: []string{""},
|
||||
shouldWatch: false,
|
||||
},
|
||||
testcase{
|
||||
{
|
||||
conf: &SDConfig{
|
||||
Services: []string{"configuredServiceName"},
|
||||
ServiceTags: []string{"http", "v1"},
|
||||
|
@ -115,7 +115,7 @@ func TestConfiguredServiceWithTags(t *testing.T) {
|
|||
serviceTags: []string{"http, v1"},
|
||||
shouldWatch: false,
|
||||
},
|
||||
testcase{
|
||||
{
|
||||
conf: &SDConfig{
|
||||
Services: []string{"configuredServiceName"},
|
||||
ServiceTags: []string{"http", "v1"},
|
||||
|
@ -124,7 +124,7 @@ func TestConfiguredServiceWithTags(t *testing.T) {
|
|||
serviceTags: []string{"http", "v1", "foo"},
|
||||
shouldWatch: true,
|
||||
},
|
||||
testcase{
|
||||
{
|
||||
conf: &SDConfig{
|
||||
Services: []string{"configuredServiceName"},
|
||||
ServiceTags: []string{"http", "v1", "foo"},
|
||||
|
@ -133,7 +133,7 @@ func TestConfiguredServiceWithTags(t *testing.T) {
|
|||
serviceTags: []string{"http", "v1", "foo"},
|
||||
shouldWatch: true,
|
||||
},
|
||||
testcase{
|
||||
{
|
||||
conf: &SDConfig{
|
||||
Services: []string{"configuredServiceName"},
|
||||
ServiceTags: []string{"http", "v1"},
|
||||
|
|
|
@ -66,7 +66,7 @@ func TestDNS(t *testing.T) {
|
|||
nil
|
||||
},
|
||||
expected: []*targetgroup.Group{
|
||||
&targetgroup.Group{
|
||||
{
|
||||
Source: "web.example.com.",
|
||||
Targets: []model.LabelSet{
|
||||
{"__address__": "192.0.2.2:80", "__meta_dns_name": "web.example.com."},
|
||||
|
@ -91,7 +91,7 @@ func TestDNS(t *testing.T) {
|
|||
nil
|
||||
},
|
||||
expected: []*targetgroup.Group{
|
||||
&targetgroup.Group{
|
||||
{
|
||||
Source: "web.example.com.",
|
||||
Targets: []model.LabelSet{
|
||||
{"__address__": "[::1]:80", "__meta_dns_name": "web.example.com."},
|
||||
|
@ -115,7 +115,7 @@ func TestDNS(t *testing.T) {
|
|||
nil
|
||||
},
|
||||
expected: []*targetgroup.Group{
|
||||
&targetgroup.Group{
|
||||
{
|
||||
Source: "_mysql._tcp.db.example.com.",
|
||||
Targets: []model.LabelSet{
|
||||
{"__address__": "db1.example.com:3306", "__meta_dns_name": "_mysql._tcp.db.example.com."},
|
||||
|
@ -140,7 +140,7 @@ func TestDNS(t *testing.T) {
|
|||
nil
|
||||
},
|
||||
expected: []*targetgroup.Group{
|
||||
&targetgroup.Group{
|
||||
{
|
||||
Source: "_mysql._tcp.db.example.com.",
|
||||
Targets: []model.LabelSet{
|
||||
{"__address__": "db1.example.com:3306", "__meta_dns_name": "_mysql._tcp.db.example.com."},
|
||||
|
@ -158,7 +158,7 @@ func TestDNS(t *testing.T) {
|
|||
return &dns.Msg{}, nil
|
||||
},
|
||||
expected: []*targetgroup.Group{
|
||||
&targetgroup.Group{
|
||||
{
|
||||
Source: "_mysql._tcp.db.example.com.",
|
||||
},
|
||||
},
|
||||
|
|
|
@ -45,7 +45,7 @@ The directory structure of a Prometheus server's data directory will look someth
|
|||
|
||||
Note that a limitation of the local storage is that it is not clustered or replicated. Thus, it is not arbitrarily scalable or durable in the face of disk or node outages and should thus be treated as more of an ephemeral sliding window of recent data. However, if your durability requirements are not strict, you may still succeed in storing up to years of data in the local storage.
|
||||
|
||||
For further details on file format, see [TSDB format](https://github.com/prometheus/tsdb/blob/master/docs/format/README.md).
|
||||
For further details on file format, see [TSDB format](https://github.com/prometheus/prometheus/blob/master/tsdb/docs/format/README.md).
|
||||
|
||||
## Compaction
|
||||
|
||||
|
|
|
@ -93,7 +93,7 @@ Currently rules still read and write directly from/to the fanout storage, but th
|
|||
|
||||
### Local storage
|
||||
|
||||
Prometheus's local on-disk time series database is a [light-weight wrapper](https://github.com/prometheus/prometheus/blob/v2.3.1/storage/tsdb/tsdb.go#L102-L106) around [`github.com/prometheus/tsdb.DB`](https://github.com/prometheus/tsdb/blob/master/db.go#L92-L117). The wrapper makes only minor interface adjustments for use of the TSDB in the context of the Prometheus server and implements the [`storage.Storage` interface](https://github.com/prometheus/prometheus/blob/v2.3.1/storage/interface.go#L31-L44). You can find more details about the TSDB's on-disk layout in the [local storage documentation](https://prometheus.io/docs/prometheus/latest/storage/).
|
||||
Prometheus's local on-disk time series database is a [light-weight wrapper](https://github.com/prometheus/prometheus/blob/v2.3.1/storage/tsdb/tsdb.go#L102-L106) around [`github.com/prometheus/prometheus/tsdb.DB`](https://github.com/prometheus/prometheus/blob/master/tsdb/db.go#L92-L117). The wrapper makes only minor interface adjustments for use of the TSDB in the context of the Prometheus server and implements the [`storage.Storage` interface](https://github.com/prometheus/prometheus/blob/v2.3.1/storage/interface.go#L31-L44). You can find more details about the TSDB's on-disk layout in the [local storage documentation](https://prometheus.io/docs/prometheus/latest/storage/).
|
||||
|
||||
### Remote storage
|
||||
|
||||
|
|
6
go.mod
6
go.mod
|
@ -9,6 +9,7 @@ require (
|
|||
github.com/aws/aws-sdk-go v1.15.24
|
||||
github.com/cespare/xxhash v1.1.0
|
||||
github.com/dgrijalva/jwt-go v3.2.0+incompatible // indirect
|
||||
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954
|
||||
github.com/edsrzf/mmap-go v1.0.0
|
||||
github.com/evanphx/json-patch v4.1.0+incompatible // indirect
|
||||
github.com/go-kit/kit v0.8.0
|
||||
|
@ -32,6 +33,7 @@ require (
|
|||
github.com/miekg/dns v1.1.10
|
||||
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223
|
||||
github.com/oklog/run v1.0.0
|
||||
github.com/oklog/ulid v1.3.1
|
||||
github.com/opentracing-contrib/go-stdlib v0.0.0-20170113013457-1de4cc2120e7
|
||||
github.com/opentracing/opentracing-go v1.0.2
|
||||
github.com/pkg/errors v0.8.1
|
||||
|
@ -39,7 +41,6 @@ require (
|
|||
github.com/prometheus/client_golang v1.0.0
|
||||
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90
|
||||
github.com/prometheus/common v0.4.1
|
||||
github.com/prometheus/tsdb v0.10.0
|
||||
github.com/samuel/go-zookeeper v0.0.0-20161028232340-1d7be4effb13
|
||||
github.com/shurcooL/httpfs v0.0.0-20171119174359-809beceb2371
|
||||
github.com/shurcooL/vfsgen v0.0.0-20180825020608-02ddb050ef6b
|
||||
|
@ -50,7 +51,8 @@ require (
|
|||
golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c // indirect
|
||||
golang.org/x/net v0.0.0-20190403144856-b630fd6fe46b
|
||||
golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a
|
||||
golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e // indirect
|
||||
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6
|
||||
golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e
|
||||
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2
|
||||
golang.org/x/tools v0.0.0-20190312170243-e65039ee4138
|
||||
google.golang.org/api v0.3.2
|
||||
|
|
3
go.sum
3
go.sum
|
@ -48,6 +48,7 @@ github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs
|
|||
github.com/dgrijalva/jwt-go v0.0.0-20160705203006-01aeca54ebda/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
|
||||
github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM=
|
||||
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
|
||||
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954 h1:RMLoZVzv4GliuWafOuPuQDKSm1SJph7uCRnnS61JAn4=
|
||||
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
|
||||
github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
|
||||
github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM=
|
||||
|
@ -288,8 +289,6 @@ github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R
|
|||
github.com/prometheus/procfs v0.0.2 h1:6LJUbpNm42llc4HRCuvApCSWB/WfhuNo9K98Q9sNGfs=
|
||||
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||
github.com/prometheus/prometheus v0.0.0-20180315085919-58e2a31db8de/go.mod h1:oAIUtOny2rjMX0OWN5vPR5/q/twIROJvdqnQKDdil/s=
|
||||
github.com/prometheus/tsdb v0.10.0 h1:If5rVCMTp6W2SiRAQFlbpJNgVlgMEd+U2GZckwK38ic=
|
||||
github.com/prometheus/tsdb v0.10.0/go.mod h1:oi49uRhEe9dPUTlS3JRZOwJuVi6tmh10QSgwXEyGCt4=
|
||||
github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
|
||||
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
|
||||
github.com/rs/cors v1.6.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
|
||||
|
|
|
@ -32,8 +32,8 @@ import (
|
|||
"github.com/prometheus/prometheus/pkg/labels"
|
||||
"github.com/prometheus/prometheus/pkg/relabel"
|
||||
"github.com/prometheus/prometheus/prompb"
|
||||
"github.com/prometheus/tsdb"
|
||||
tsdbLabels "github.com/prometheus/tsdb/labels"
|
||||
"github.com/prometheus/prometheus/tsdb"
|
||||
tsdbLabels "github.com/prometheus/prometheus/tsdb/labels"
|
||||
)
|
||||
|
||||
// String constants for instrumentation.
|
||||
|
@ -253,7 +253,7 @@ outer:
|
|||
ts := prompb.TimeSeries{
|
||||
Labels: lbls,
|
||||
Samples: []prompb.Sample{
|
||||
prompb.Sample{
|
||||
{
|
||||
Value: float64(sample.V),
|
||||
Timestamp: sample.T,
|
||||
},
|
||||
|
|
|
@ -36,9 +36,9 @@ import (
|
|||
"github.com/prometheus/prometheus/config"
|
||||
"github.com/prometheus/prometheus/pkg/labels"
|
||||
"github.com/prometheus/prometheus/prompb"
|
||||
"github.com/prometheus/prometheus/tsdb"
|
||||
tsdbLabels "github.com/prometheus/prometheus/tsdb/labels"
|
||||
"github.com/prometheus/prometheus/util/testutil"
|
||||
"github.com/prometheus/tsdb"
|
||||
tsdbLabels "github.com/prometheus/tsdb/labels"
|
||||
)
|
||||
|
||||
const defaultFlushDeadline = 1 * time.Minute
|
||||
|
@ -264,7 +264,7 @@ func TestReleaseNoninternedString(t *testing.T) {
|
|||
|
||||
for i := 1; i < 1000; i++ {
|
||||
m.StoreSeries([]tsdb.RefSeries{
|
||||
tsdb.RefSeries{
|
||||
{
|
||||
Ref: uint64(i),
|
||||
Labels: tsdbLabels.Labels{
|
||||
tsdbLabels.Label{
|
||||
|
|
|
@ -28,9 +28,9 @@ import (
|
|||
"github.com/go-kit/kit/log/level"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/tsdb"
|
||||
"github.com/prometheus/tsdb/fileutil"
|
||||
"github.com/prometheus/tsdb/wal"
|
||||
"github.com/prometheus/prometheus/tsdb"
|
||||
"github.com/prometheus/prometheus/tsdb/fileutil"
|
||||
"github.com/prometheus/prometheus/tsdb/wal"
|
||||
|
||||
"github.com/prometheus/prometheus/pkg/timestamp"
|
||||
)
|
||||
|
|
|
@ -22,10 +22,10 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/prometheus/prometheus/tsdb"
|
||||
"github.com/prometheus/prometheus/tsdb/labels"
|
||||
"github.com/prometheus/prometheus/tsdb/wal"
|
||||
"github.com/prometheus/prometheus/util/testutil"
|
||||
"github.com/prometheus/tsdb"
|
||||
"github.com/prometheus/tsdb/labels"
|
||||
"github.com/prometheus/tsdb/wal"
|
||||
)
|
||||
|
||||
var defaultRetryInterval = 100 * time.Millisecond
|
||||
|
@ -112,7 +112,7 @@ func TestTailSamples(t *testing.T) {
|
|||
for i := 0; i < seriesCount; i++ {
|
||||
ref := i + 100
|
||||
series := enc.Series([]tsdb.RefSeries{
|
||||
tsdb.RefSeries{
|
||||
{
|
||||
Ref: uint64(ref),
|
||||
Labels: labels.Labels{labels.Label{Name: "__name__", Value: fmt.Sprintf("metric_%d", i)}},
|
||||
},
|
||||
|
@ -122,7 +122,7 @@ func TestTailSamples(t *testing.T) {
|
|||
for j := 0; j < samplesCount; j++ {
|
||||
inner := rand.Intn(ref + 1)
|
||||
sample := enc.Samples([]tsdb.RefSample{
|
||||
tsdb.RefSample{
|
||||
{
|
||||
Ref: uint64(inner),
|
||||
T: int64(now.UnixNano()) + 1,
|
||||
V: float64(i),
|
||||
|
@ -186,7 +186,7 @@ func TestReadToEndNoCheckpoint(t *testing.T) {
|
|||
|
||||
for i := 0; i < seriesCount; i++ {
|
||||
series := enc.Series([]tsdb.RefSeries{
|
||||
tsdb.RefSeries{
|
||||
{
|
||||
Ref: uint64(i),
|
||||
Labels: labels.Labels{labels.Label{Name: "__name__", Value: fmt.Sprintf("metric_%d", i)}},
|
||||
},
|
||||
|
@ -194,7 +194,7 @@ func TestReadToEndNoCheckpoint(t *testing.T) {
|
|||
recs = append(recs, series)
|
||||
for j := 0; j < samplesCount; j++ {
|
||||
sample := enc.Samples([]tsdb.RefSample{
|
||||
tsdb.RefSample{
|
||||
{
|
||||
Ref: uint64(j),
|
||||
T: int64(i),
|
||||
V: float64(i),
|
||||
|
@ -254,7 +254,7 @@ func TestReadToEndWithCheckpoint(t *testing.T) {
|
|||
for i := 0; i < seriesCount; i++ {
|
||||
ref := i + 100
|
||||
series := enc.Series([]tsdb.RefSeries{
|
||||
tsdb.RefSeries{
|
||||
{
|
||||
Ref: uint64(ref),
|
||||
Labels: labels.Labels{labels.Label{Name: "__name__", Value: fmt.Sprintf("metric_%d", i)}},
|
||||
},
|
||||
|
@ -264,7 +264,7 @@ func TestReadToEndWithCheckpoint(t *testing.T) {
|
|||
for j := 0; j < samplesCount; j++ {
|
||||
inner := rand.Intn(ref + 1)
|
||||
sample := enc.Samples([]tsdb.RefSample{
|
||||
tsdb.RefSample{
|
||||
{
|
||||
Ref: uint64(inner),
|
||||
T: int64(i),
|
||||
V: float64(i),
|
||||
|
@ -280,7 +280,7 @@ func TestReadToEndWithCheckpoint(t *testing.T) {
|
|||
// Write more records after checkpointing.
|
||||
for i := 0; i < seriesCount; i++ {
|
||||
series := enc.Series([]tsdb.RefSeries{
|
||||
tsdb.RefSeries{
|
||||
{
|
||||
Ref: uint64(i),
|
||||
Labels: labels.Labels{labels.Label{Name: "__name__", Value: fmt.Sprintf("metric_%d", i)}},
|
||||
},
|
||||
|
@ -289,7 +289,7 @@ func TestReadToEndWithCheckpoint(t *testing.T) {
|
|||
|
||||
for j := 0; j < samplesCount; j++ {
|
||||
sample := enc.Samples([]tsdb.RefSample{
|
||||
tsdb.RefSample{
|
||||
{
|
||||
Ref: uint64(j),
|
||||
T: int64(i),
|
||||
V: float64(i),
|
||||
|
@ -340,7 +340,7 @@ func TestReadCheckpoint(t *testing.T) {
|
|||
for i := 0; i < seriesCount; i++ {
|
||||
ref := i + 100
|
||||
series := enc.Series([]tsdb.RefSeries{
|
||||
tsdb.RefSeries{
|
||||
{
|
||||
Ref: uint64(ref),
|
||||
Labels: labels.Labels{labels.Label{Name: "__name__", Value: fmt.Sprintf("metric_%d", i)}},
|
||||
},
|
||||
|
@ -350,7 +350,7 @@ func TestReadCheckpoint(t *testing.T) {
|
|||
for j := 0; j < samplesCount; j++ {
|
||||
inner := rand.Intn(ref + 1)
|
||||
sample := enc.Samples([]tsdb.RefSample{
|
||||
tsdb.RefSample{
|
||||
{
|
||||
Ref: uint64(inner),
|
||||
T: int64(i),
|
||||
V: float64(i),
|
||||
|
@ -407,7 +407,7 @@ func TestReadCheckpointMultipleSegments(t *testing.T) {
|
|||
for j := 0; j < seriesCount; j++ {
|
||||
ref := j + (i * 100)
|
||||
series := enc.Series([]tsdb.RefSeries{
|
||||
tsdb.RefSeries{
|
||||
{
|
||||
Ref: uint64(ref),
|
||||
Labels: labels.Labels{labels.Label{Name: "__name__", Value: fmt.Sprintf("metric_%d", j)}},
|
||||
},
|
||||
|
@ -417,7 +417,7 @@ func TestReadCheckpointMultipleSegments(t *testing.T) {
|
|||
for k := 0; k < samplesCount; k++ {
|
||||
inner := rand.Intn(ref + 1)
|
||||
sample := enc.Samples([]tsdb.RefSample{
|
||||
tsdb.RefSample{
|
||||
{
|
||||
Ref: uint64(inner),
|
||||
T: int64(i),
|
||||
V: float64(i),
|
||||
|
@ -485,7 +485,7 @@ func TestCheckpointSeriesReset(t *testing.T) {
|
|||
for i := 0; i < seriesCount; i++ {
|
||||
ref := i + 100
|
||||
series := enc.Series([]tsdb.RefSeries{
|
||||
tsdb.RefSeries{
|
||||
{
|
||||
Ref: uint64(ref),
|
||||
Labels: labels.Labels{labels.Label{Name: "__name__", Value: fmt.Sprintf("metric_%d", i)}},
|
||||
},
|
||||
|
@ -495,7 +495,7 @@ func TestCheckpointSeriesReset(t *testing.T) {
|
|||
for j := 0; j < samplesCount; j++ {
|
||||
inner := rand.Intn(ref + 1)
|
||||
sample := enc.Samples([]tsdb.RefSample{
|
||||
tsdb.RefSample{
|
||||
{
|
||||
Ref: uint64(inner),
|
||||
T: int64(i),
|
||||
V: float64(i),
|
||||
|
|
|
@ -26,8 +26,8 @@ import (
|
|||
"github.com/prometheus/common/model"
|
||||
"github.com/prometheus/prometheus/pkg/labels"
|
||||
"github.com/prometheus/prometheus/storage"
|
||||
"github.com/prometheus/tsdb"
|
||||
tsdbLabels "github.com/prometheus/tsdb/labels"
|
||||
"github.com/prometheus/prometheus/tsdb"
|
||||
tsdbLabels "github.com/prometheus/prometheus/tsdb/labels"
|
||||
)
|
||||
|
||||
// ErrNotReady is returned if the underlying storage is not ready yet.
|
||||
|
|
|
@ -1,5 +0,0 @@
|
|||
# Run only staticcheck for now. Additional linters will be enabled one-by-one.
|
||||
linters:
|
||||
enable:
|
||||
- staticcheck
|
||||
disable-all: true
|
|
@ -1,20 +0,0 @@
|
|||
dist: trusty
|
||||
language: go
|
||||
os:
|
||||
- windows
|
||||
- linux
|
||||
- osx
|
||||
|
||||
go:
|
||||
- 1.12.x
|
||||
|
||||
go_import_path: github.com/prometheus/tsdb
|
||||
|
||||
before_install:
|
||||
- if [[ "$TRAVIS_OS_NAME" == "windows" ]]; then choco install make; fi
|
||||
|
||||
install:
|
||||
- make deps
|
||||
|
||||
script:
|
||||
- if [[ "$TRAVIS_OS_NAME" == "windows" ]]; then make test; else make all; fi
|
201
tsdb/LICENSE
201
tsdb/LICENSE
|
@ -1,201 +0,0 @@
|
|||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
|
@ -1,33 +0,0 @@
|
|||
# Copyright 2018 The Prometheus Authors
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
TSDB_PROJECT_DIR = "."
|
||||
TSDB_CLI_DIR="$(TSDB_PROJECT_DIR)/cmd/tsdb"
|
||||
TSDB_BIN = "$(TSDB_CLI_DIR)/tsdb"
|
||||
TSDB_BENCHMARK_NUM_METRICS ?= 1000
|
||||
TSDB_BENCHMARK_DATASET ?= "$(TSDB_PROJECT_DIR)/testdata/20kseries.json"
|
||||
TSDB_BENCHMARK_OUTPUT_DIR ?= "$(TSDB_CLI_DIR)/benchout"
|
||||
|
||||
include Makefile.common
|
||||
|
||||
build:
|
||||
GO111MODULE=$(GO111MODULE) $(GO) build -o $(TSDB_BIN) $(TSDB_CLI_DIR)
|
||||
|
||||
bench: build
|
||||
@echo ">> running benchmark, writing result to $(TSDB_BENCHMARK_OUTPUT_DIR)"
|
||||
@$(TSDB_BIN) bench write --metrics=$(TSDB_BENCHMARK_NUM_METRICS) --out=$(TSDB_BENCHMARK_OUTPUT_DIR) $(TSDB_BENCHMARK_DATASET)
|
||||
@$(GO) tool pprof -svg $(TSDB_BIN) $(TSDB_BENCHMARK_OUTPUT_DIR)/cpu.prof > $(TSDB_BENCHMARK_OUTPUT_DIR)/cpuprof.svg
|
||||
@$(GO) tool pprof --inuse_space -svg $(TSDB_BIN) $(TSDB_BENCHMARK_OUTPUT_DIR)/mem.prof > $(TSDB_BENCHMARK_OUTPUT_DIR)/memprof.inuse.svg
|
||||
@$(GO) tool pprof --alloc_space -svg $(TSDB_BIN) $(TSDB_BENCHMARK_OUTPUT_DIR)/mem.prof > $(TSDB_BENCHMARK_OUTPUT_DIR)/memprof.alloc.svg
|
||||
@$(GO) tool pprof -svg $(TSDB_BIN) $(TSDB_BENCHMARK_OUTPUT_DIR)/block.prof > $(TSDB_BENCHMARK_OUTPUT_DIR)/blockprof.svg
|
||||
@$(GO) tool pprof -svg $(TSDB_BIN) $(TSDB_BENCHMARK_OUTPUT_DIR)/mutex.prof > $(TSDB_BENCHMARK_OUTPUT_DIR)/mutexprof.svg
|
|
@ -1,277 +0,0 @@
|
|||
# Copyright 2018 The Prometheus Authors
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
|
||||
# A common Makefile that includes rules to be reused in different prometheus projects.
|
||||
# !!! Open PRs only against the prometheus/prometheus/Makefile.common repository!
|
||||
|
||||
# Example usage :
|
||||
# Create the main Makefile in the root project directory.
|
||||
# include Makefile.common
|
||||
# customTarget:
|
||||
# @echo ">> Running customTarget"
|
||||
#
|
||||
|
||||
# Ensure GOBIN is not set during build so that promu is installed to the correct path
|
||||
unexport GOBIN
|
||||
|
||||
GO ?= go
|
||||
GOFMT ?= $(GO)fmt
|
||||
FIRST_GOPATH := $(firstword $(subst :, ,$(shell $(GO) env GOPATH)))
|
||||
GOOPTS ?=
|
||||
GOHOSTOS ?= $(shell $(GO) env GOHOSTOS)
|
||||
GOHOSTARCH ?= $(shell $(GO) env GOHOSTARCH)
|
||||
|
||||
GO_VERSION ?= $(shell $(GO) version)
|
||||
GO_VERSION_NUMBER ?= $(word 3, $(GO_VERSION))
|
||||
PRE_GO_111 ?= $(shell echo $(GO_VERSION_NUMBER) | grep -E 'go1\.(10|[0-9])\.')
|
||||
|
||||
GOVENDOR :=
|
||||
GO111MODULE :=
|
||||
ifeq (, $(PRE_GO_111))
|
||||
ifneq (,$(wildcard go.mod))
|
||||
# Enforce Go modules support just in case the directory is inside GOPATH (and for Travis CI).
|
||||
GO111MODULE := on
|
||||
|
||||
ifneq (,$(wildcard vendor))
|
||||
# Always use the local vendor/ directory to satisfy the dependencies.
|
||||
GOOPTS := $(GOOPTS) -mod=vendor
|
||||
endif
|
||||
endif
|
||||
else
|
||||
ifneq (,$(wildcard go.mod))
|
||||
ifneq (,$(wildcard vendor))
|
||||
$(warning This repository requires Go >= 1.11 because of Go modules)
|
||||
$(warning Some recipes may not work as expected as the current Go runtime is '$(GO_VERSION_NUMBER)')
|
||||
endif
|
||||
else
|
||||
# This repository isn't using Go modules (yet).
|
||||
GOVENDOR := $(FIRST_GOPATH)/bin/govendor
|
||||
endif
|
||||
endif
|
||||
PROMU := $(FIRST_GOPATH)/bin/promu
|
||||
pkgs = ./...
|
||||
|
||||
ifeq (arm, $(GOHOSTARCH))
|
||||
GOHOSTARM ?= $(shell GOARM= $(GO) env GOARM)
|
||||
GO_BUILD_PLATFORM ?= $(GOHOSTOS)-$(GOHOSTARCH)v$(GOHOSTARM)
|
||||
else
|
||||
GO_BUILD_PLATFORM ?= $(GOHOSTOS)-$(GOHOSTARCH)
|
||||
endif
|
||||
|
||||
PROMU_VERSION ?= 0.5.0
|
||||
PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_VERSION)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM).tar.gz
|
||||
|
||||
GOLANGCI_LINT :=
|
||||
GOLANGCI_LINT_OPTS ?=
|
||||
GOLANGCI_LINT_VERSION ?= v1.17.1
|
||||
# golangci-lint only supports linux, darwin and windows platforms on i386/amd64.
|
||||
# windows isn't included here because of the path separator being different.
|
||||
ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin))
|
||||
ifeq ($(GOHOSTARCH),$(filter $(GOHOSTARCH),amd64 i386))
|
||||
GOLANGCI_LINT := $(FIRST_GOPATH)/bin/golangci-lint
|
||||
endif
|
||||
endif
|
||||
|
||||
PREFIX ?= $(shell pwd)
|
||||
BIN_DIR ?= $(shell pwd)
|
||||
DOCKER_IMAGE_TAG ?= $(subst /,-,$(shell git rev-parse --abbrev-ref HEAD))
|
||||
DOCKERFILE_PATH ?= ./Dockerfile
|
||||
DOCKERBUILD_CONTEXT ?= ./
|
||||
DOCKER_REPO ?= prom
|
||||
|
||||
DOCKER_ARCHS ?= amd64
|
||||
|
||||
BUILD_DOCKER_ARCHS = $(addprefix common-docker-,$(DOCKER_ARCHS))
|
||||
PUBLISH_DOCKER_ARCHS = $(addprefix common-docker-publish-,$(DOCKER_ARCHS))
|
||||
TAG_DOCKER_ARCHS = $(addprefix common-docker-tag-latest-,$(DOCKER_ARCHS))
|
||||
|
||||
ifeq ($(GOHOSTARCH),amd64)
|
||||
ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux freebsd darwin windows))
|
||||
# Only supported on amd64
|
||||
test-flags := -race
|
||||
endif
|
||||
endif
|
||||
|
||||
# This rule is used to forward a target like "build" to "common-build". This
|
||||
# allows a new "build" target to be defined in a Makefile which includes this
|
||||
# one and override "common-build" without override warnings.
|
||||
%: common-% ;
|
||||
|
||||
.PHONY: common-all
|
||||
common-all: precheck style check_license lint unused build test
|
||||
|
||||
.PHONY: common-style
|
||||
common-style:
|
||||
@echo ">> checking code style"
|
||||
@fmtRes=$$($(GOFMT) -d $$(find . -path ./vendor -prune -o -name '*.go' -print)); \
|
||||
if [ -n "$${fmtRes}" ]; then \
|
||||
echo "gofmt checking failed!"; echo "$${fmtRes}"; echo; \
|
||||
echo "Please ensure you are using $$($(GO) version) for formatting code."; \
|
||||
exit 1; \
|
||||
fi
|
||||
|
||||
.PHONY: common-check_license
|
||||
common-check_license:
|
||||
@echo ">> checking license header"
|
||||
@licRes=$$(for file in $$(find . -type f -iname '*.go' ! -path './vendor/*') ; do \
|
||||
awk 'NR<=3' $$file | grep -Eq "(Copyright|generated|GENERATED)" || echo $$file; \
|
||||
done); \
|
||||
if [ -n "$${licRes}" ]; then \
|
||||
echo "license header checking failed:"; echo "$${licRes}"; \
|
||||
exit 1; \
|
||||
fi
|
||||
|
||||
.PHONY: common-deps
|
||||
common-deps:
|
||||
@echo ">> getting dependencies"
|
||||
ifdef GO111MODULE
|
||||
GO111MODULE=$(GO111MODULE) $(GO) mod download
|
||||
else
|
||||
$(GO) get $(GOOPTS) -t ./...
|
||||
endif
|
||||
|
||||
.PHONY: common-test-short
|
||||
common-test-short:
|
||||
@echo ">> running short tests"
|
||||
GO111MODULE=$(GO111MODULE) $(GO) test -short $(GOOPTS) $(pkgs)
|
||||
|
||||
.PHONY: common-test
|
||||
common-test:
|
||||
@echo ">> running all tests"
|
||||
GO111MODULE=$(GO111MODULE) $(GO) test $(test-flags) $(GOOPTS) $(pkgs)
|
||||
|
||||
.PHONY: common-format
|
||||
common-format:
|
||||
@echo ">> formatting code"
|
||||
GO111MODULE=$(GO111MODULE) $(GO) fmt $(pkgs)
|
||||
|
||||
.PHONY: common-vet
|
||||
common-vet:
|
||||
@echo ">> vetting code"
|
||||
GO111MODULE=$(GO111MODULE) $(GO) vet $(GOOPTS) $(pkgs)
|
||||
|
||||
.PHONY: common-lint
|
||||
common-lint: $(GOLANGCI_LINT)
|
||||
ifdef GOLANGCI_LINT
|
||||
@echo ">> running golangci-lint"
|
||||
ifdef GO111MODULE
|
||||
# 'go list' needs to be executed before staticcheck to prepopulate the modules cache.
|
||||
# Otherwise staticcheck might fail randomly for some reason not yet explained.
|
||||
GO111MODULE=$(GO111MODULE) $(GO) list -e -compiled -test=true -export=false -deps=true -find=false -tags= -- ./... > /dev/null
|
||||
GO111MODULE=$(GO111MODULE) $(GOLANGCI_LINT) run $(GOLANGCI_LINT_OPTS) $(pkgs)
|
||||
else
|
||||
$(GOLANGCI_LINT) run $(pkgs)
|
||||
endif
|
||||
endif
|
||||
|
||||
# For backward-compatibility.
|
||||
.PHONY: common-staticcheck
|
||||
common-staticcheck: lint
|
||||
|
||||
.PHONY: common-unused
|
||||
common-unused: $(GOVENDOR)
|
||||
ifdef GOVENDOR
|
||||
@echo ">> running check for unused packages"
|
||||
@$(GOVENDOR) list +unused | grep . && exit 1 || echo 'No unused packages'
|
||||
else
|
||||
ifdef GO111MODULE
|
||||
@echo ">> running check for unused/missing packages in go.mod"
|
||||
GO111MODULE=$(GO111MODULE) $(GO) mod tidy
|
||||
ifeq (,$(wildcard vendor))
|
||||
@git diff --exit-code -- go.sum go.mod
|
||||
else
|
||||
@echo ">> running check for unused packages in vendor/"
|
||||
GO111MODULE=$(GO111MODULE) $(GO) mod vendor
|
||||
@git diff --exit-code -- go.sum go.mod vendor/
|
||||
endif
|
||||
endif
|
||||
endif
|
||||
|
||||
.PHONY: common-build
|
||||
common-build: promu
|
||||
@echo ">> building binaries"
|
||||
GO111MODULE=$(GO111MODULE) $(PROMU) build --prefix $(PREFIX)
|
||||
|
||||
.PHONY: common-tarball
|
||||
common-tarball: promu
|
||||
@echo ">> building release tarball"
|
||||
$(PROMU) tarball --prefix $(PREFIX) $(BIN_DIR)
|
||||
|
||||
.PHONY: common-docker $(BUILD_DOCKER_ARCHS)
|
||||
common-docker: $(BUILD_DOCKER_ARCHS)
|
||||
$(BUILD_DOCKER_ARCHS): common-docker-%:
|
||||
docker build -t "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" \
|
||||
-f $(DOCKERFILE_PATH) \
|
||||
--build-arg ARCH="$*" \
|
||||
--build-arg OS="linux" \
|
||||
$(DOCKERBUILD_CONTEXT)
|
||||
|
||||
.PHONY: common-docker-publish $(PUBLISH_DOCKER_ARCHS)
|
||||
common-docker-publish: $(PUBLISH_DOCKER_ARCHS)
|
||||
$(PUBLISH_DOCKER_ARCHS): common-docker-publish-%:
|
||||
docker push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)"
|
||||
|
||||
.PHONY: common-docker-tag-latest $(TAG_DOCKER_ARCHS)
|
||||
common-docker-tag-latest: $(TAG_DOCKER_ARCHS)
|
||||
$(TAG_DOCKER_ARCHS): common-docker-tag-latest-%:
|
||||
docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:latest"
|
||||
|
||||
.PHONY: common-docker-manifest
|
||||
common-docker-manifest:
|
||||
DOCKER_CLI_EXPERIMENTAL=enabled docker manifest create -a "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG)" $(foreach ARCH,$(DOCKER_ARCHS),$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$(ARCH):$(DOCKER_IMAGE_TAG))
|
||||
DOCKER_CLI_EXPERIMENTAL=enabled docker manifest push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG)"
|
||||
|
||||
.PHONY: promu
|
||||
promu: $(PROMU)
|
||||
|
||||
$(PROMU):
|
||||
$(eval PROMU_TMP := $(shell mktemp -d))
|
||||
curl -s -L $(PROMU_URL) | tar -xvzf - -C $(PROMU_TMP)
|
||||
mkdir -p $(FIRST_GOPATH)/bin
|
||||
cp $(PROMU_TMP)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM)/promu $(FIRST_GOPATH)/bin/promu
|
||||
rm -r $(PROMU_TMP)
|
||||
|
||||
.PHONY: proto
|
||||
proto:
|
||||
@echo ">> generating code from proto files"
|
||||
@./scripts/genproto.sh
|
||||
|
||||
ifdef GOLANGCI_LINT
|
||||
$(GOLANGCI_LINT):
|
||||
mkdir -p $(FIRST_GOPATH)/bin
|
||||
curl -sfL https://raw.githubusercontent.com/golangci/golangci-lint/$(GOLANGCI_LINT_VERSION)/install.sh \
|
||||
| sed -e '/install -d/d' \
|
||||
| sh -s -- -b $(FIRST_GOPATH)/bin $(GOLANGCI_LINT_VERSION)
|
||||
endif
|
||||
|
||||
ifdef GOVENDOR
|
||||
.PHONY: $(GOVENDOR)
|
||||
$(GOVENDOR):
|
||||
GOOS= GOARCH= $(GO) get -u github.com/kardianos/govendor
|
||||
endif
|
||||
|
||||
.PHONY: precheck
|
||||
precheck::
|
||||
|
||||
define PRECHECK_COMMAND_template =
|
||||
precheck:: $(1)_precheck
|
||||
|
||||
PRECHECK_COMMAND_$(1) ?= $(1) $$(strip $$(PRECHECK_OPTIONS_$(1)))
|
||||
.PHONY: $(1)_precheck
|
||||
$(1)_precheck:
|
||||
@if ! $$(PRECHECK_COMMAND_$(1)) 1>/dev/null 2>&1; then \
|
||||
echo "Execution of '$$(PRECHECK_COMMAND_$(1))' command failed. Is $(1) installed?"; \
|
||||
exit 1; \
|
||||
fi
|
||||
endef
|
|
@ -26,12 +26,12 @@ import (
|
|||
"github.com/go-kit/kit/log/level"
|
||||
"github.com/oklog/ulid"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prometheus/tsdb/chunkenc"
|
||||
"github.com/prometheus/tsdb/chunks"
|
||||
tsdb_errors "github.com/prometheus/tsdb/errors"
|
||||
"github.com/prometheus/tsdb/fileutil"
|
||||
"github.com/prometheus/tsdb/index"
|
||||
"github.com/prometheus/tsdb/labels"
|
||||
"github.com/prometheus/prometheus/tsdb/chunkenc"
|
||||
"github.com/prometheus/prometheus/tsdb/chunks"
|
||||
tsdb_errors "github.com/prometheus/prometheus/tsdb/errors"
|
||||
"github.com/prometheus/prometheus/tsdb/fileutil"
|
||||
"github.com/prometheus/prometheus/tsdb/index"
|
||||
"github.com/prometheus/prometheus/tsdb/labels"
|
||||
)
|
||||
|
||||
// IndexWriter serializes the index for a block of series data.
|
||||
|
|
|
@ -26,10 +26,10 @@ import (
|
|||
"testing"
|
||||
|
||||
"github.com/go-kit/kit/log"
|
||||
"github.com/prometheus/tsdb/chunks"
|
||||
"github.com/prometheus/tsdb/labels"
|
||||
"github.com/prometheus/tsdb/testutil"
|
||||
"github.com/prometheus/tsdb/tsdbutil"
|
||||
"github.com/prometheus/prometheus/tsdb/chunks"
|
||||
"github.com/prometheus/prometheus/tsdb/labels"
|
||||
"github.com/prometheus/prometheus/tsdb/testutil"
|
||||
"github.com/prometheus/prometheus/tsdb/tsdbutil"
|
||||
)
|
||||
|
||||
// In Prometheus 2.1.0 we had a bug where the meta.json version was falsely bumped
|
||||
|
|
|
@ -25,9 +25,9 @@ import (
|
|||
"strings"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
tsdb_errors "github.com/prometheus/tsdb/errors"
|
||||
"github.com/prometheus/tsdb/fileutil"
|
||||
"github.com/prometheus/tsdb/wal"
|
||||
tsdb_errors "github.com/prometheus/prometheus/tsdb/errors"
|
||||
"github.com/prometheus/prometheus/tsdb/fileutil"
|
||||
"github.com/prometheus/prometheus/tsdb/wal"
|
||||
)
|
||||
|
||||
// CheckpointStats returns stats about a created checkpoint.
|
||||
|
|
|
@ -23,10 +23,10 @@ import (
|
|||
"testing"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prometheus/tsdb/fileutil"
|
||||
"github.com/prometheus/tsdb/labels"
|
||||
"github.com/prometheus/tsdb/testutil"
|
||||
"github.com/prometheus/tsdb/wal"
|
||||
"github.com/prometheus/prometheus/tsdb/fileutil"
|
||||
"github.com/prometheus/prometheus/tsdb/labels"
|
||||
"github.com/prometheus/prometheus/tsdb/testutil"
|
||||
"github.com/prometheus/prometheus/tsdb/wal"
|
||||
)
|
||||
|
||||
func TestLastCheckpoint(t *testing.T) {
|
||||
|
|
|
@ -20,7 +20,7 @@ import (
|
|||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/prometheus/tsdb/testutil"
|
||||
"github.com/prometheus/prometheus/tsdb/testutil"
|
||||
)
|
||||
|
||||
type pair struct {
|
||||
|
|
|
@ -26,9 +26,9 @@ import (
|
|||
"strconv"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prometheus/tsdb/chunkenc"
|
||||
tsdb_errors "github.com/prometheus/tsdb/errors"
|
||||
"github.com/prometheus/tsdb/fileutil"
|
||||
"github.com/prometheus/prometheus/tsdb/chunkenc"
|
||||
tsdb_errors "github.com/prometheus/prometheus/tsdb/errors"
|
||||
"github.com/prometheus/prometheus/tsdb/fileutil"
|
||||
)
|
||||
|
||||
const (
|
||||
|
|
|
@ -16,7 +16,7 @@ package chunks
|
|||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/prometheus/tsdb/testutil"
|
||||
"github.com/prometheus/prometheus/tsdb/testutil"
|
||||
)
|
||||
|
||||
func TestReaderWithInvalidBuffer(t *testing.T) {
|
||||
|
|
|
@ -32,10 +32,10 @@ import (
|
|||
|
||||
"github.com/go-kit/kit/log"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prometheus/tsdb"
|
||||
"github.com/prometheus/tsdb/chunks"
|
||||
tsdb_errors "github.com/prometheus/tsdb/errors"
|
||||
"github.com/prometheus/tsdb/labels"
|
||||
"github.com/prometheus/prometheus/tsdb"
|
||||
"github.com/prometheus/prometheus/tsdb/chunks"
|
||||
tsdb_errors "github.com/prometheus/prometheus/tsdb/errors"
|
||||
"github.com/prometheus/prometheus/tsdb/labels"
|
||||
"gopkg.in/alecthomas/kingpin.v2"
|
||||
)
|
||||
|
||||
|
@ -206,7 +206,9 @@ func (b *writeBenchmark) run() error {
|
|||
var total uint64
|
||||
|
||||
dur, err := measureTime("ingestScrapes", func() error {
|
||||
b.startProfiling()
|
||||
if err := b.startProfiling(); err != nil {
|
||||
return err
|
||||
}
|
||||
total, err = b.ingestScrapes(labels, 3000)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -559,7 +561,7 @@ func analyzeBlock(b tsdb.BlockReader, limit int) error {
|
|||
var cumulativeLength uint64
|
||||
|
||||
for i := 0; i < values.Len(); i++ {
|
||||
value, _ := values.At(i)
|
||||
value, err := values.At(i)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -29,12 +29,12 @@ import (
|
|||
"github.com/oklog/ulid"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/tsdb/chunkenc"
|
||||
"github.com/prometheus/tsdb/chunks"
|
||||
tsdb_errors "github.com/prometheus/tsdb/errors"
|
||||
"github.com/prometheus/tsdb/fileutil"
|
||||
"github.com/prometheus/tsdb/index"
|
||||
"github.com/prometheus/tsdb/labels"
|
||||
"github.com/prometheus/prometheus/tsdb/chunkenc"
|
||||
"github.com/prometheus/prometheus/tsdb/chunks"
|
||||
tsdb_errors "github.com/prometheus/prometheus/tsdb/errors"
|
||||
"github.com/prometheus/prometheus/tsdb/fileutil"
|
||||
"github.com/prometheus/prometheus/tsdb/index"
|
||||
"github.com/prometheus/prometheus/tsdb/labels"
|
||||
)
|
||||
|
||||
// ExponentialBlockRanges returns the time ranges based on the stepSize.
|
||||
|
|
|
@ -27,10 +27,10 @@ import (
|
|||
"github.com/go-kit/kit/log"
|
||||
"github.com/pkg/errors"
|
||||
prom_testutil "github.com/prometheus/client_golang/prometheus/testutil"
|
||||
"github.com/prometheus/tsdb/chunks"
|
||||
"github.com/prometheus/tsdb/fileutil"
|
||||
"github.com/prometheus/tsdb/labels"
|
||||
"github.com/prometheus/tsdb/testutil"
|
||||
"github.com/prometheus/prometheus/tsdb/chunks"
|
||||
"github.com/prometheus/prometheus/tsdb/fileutil"
|
||||
"github.com/prometheus/prometheus/tsdb/labels"
|
||||
"github.com/prometheus/prometheus/tsdb/testutil"
|
||||
)
|
||||
|
||||
func TestSplitByRange(t *testing.T) {
|
||||
|
@ -651,7 +651,7 @@ func TestCompaction_populateBlock(t *testing.T) {
|
|||
expErr: errors.New("found chunk with minTime: 10 maxTime: 30 outside of compacted minTime: 0 maxTime: 20"),
|
||||
},
|
||||
{
|
||||
// Introduced by https://github.com/prometheus/tsdb/issues/347.
|
||||
// Introduced by https://github.com/prometheus/prometheus/tsdb/issues/347.
|
||||
title: "Populate from single block containing extra chunk",
|
||||
inputSeriesSamples: [][]seriesSamples{
|
||||
{
|
||||
|
@ -691,7 +691,7 @@ func TestCompaction_populateBlock(t *testing.T) {
|
|||
},
|
||||
},
|
||||
{
|
||||
// Introduced by https://github.com/prometheus/tsdb/pull/539.
|
||||
// Introduced by https://github.com/prometheus/prometheus/tsdb/pull/539.
|
||||
title: "Populate from three blocks that the last two are overlapping.",
|
||||
inputSeriesSamples: [][]seriesSamples{
|
||||
{
|
||||
|
|
12
tsdb/db.go
12
tsdb/db.go
|
@ -34,12 +34,12 @@ import (
|
|||
"github.com/oklog/ulid"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/tsdb/chunkenc"
|
||||
tsdb_errors "github.com/prometheus/tsdb/errors"
|
||||
"github.com/prometheus/tsdb/fileutil"
|
||||
_ "github.com/prometheus/tsdb/goversion"
|
||||
"github.com/prometheus/tsdb/labels"
|
||||
"github.com/prometheus/tsdb/wal"
|
||||
"github.com/prometheus/prometheus/tsdb/chunkenc"
|
||||
tsdb_errors "github.com/prometheus/prometheus/tsdb/errors"
|
||||
"github.com/prometheus/prometheus/tsdb/fileutil"
|
||||
_ "github.com/prometheus/prometheus/tsdb/goversion"
|
||||
"github.com/prometheus/prometheus/tsdb/labels"
|
||||
"github.com/prometheus/prometheus/tsdb/wal"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
|
|
144
tsdb/db_test.go
144
tsdb/db_test.go
|
@ -30,12 +30,12 @@ import (
|
|||
"github.com/pkg/errors"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
prom_testutil "github.com/prometheus/client_golang/prometheus/testutil"
|
||||
"github.com/prometheus/tsdb/chunks"
|
||||
"github.com/prometheus/tsdb/index"
|
||||
"github.com/prometheus/tsdb/labels"
|
||||
"github.com/prometheus/tsdb/testutil"
|
||||
"github.com/prometheus/tsdb/tsdbutil"
|
||||
"github.com/prometheus/tsdb/wal"
|
||||
"github.com/prometheus/prometheus/tsdb/chunks"
|
||||
"github.com/prometheus/prometheus/tsdb/index"
|
||||
"github.com/prometheus/prometheus/tsdb/labels"
|
||||
"github.com/prometheus/prometheus/tsdb/testutil"
|
||||
"github.com/prometheus/prometheus/tsdb/tsdbutil"
|
||||
"github.com/prometheus/prometheus/tsdb/wal"
|
||||
)
|
||||
|
||||
func openTestDB(t testing.TB, opts *Options) (db *DB, close func()) {
|
||||
|
@ -229,7 +229,7 @@ func TestAppendEmptyLabelsIgnored(t *testing.T) {
|
|||
testutil.Ok(t, err)
|
||||
|
||||
// Construct labels manually so there is an empty label.
|
||||
ref2, err := app1.Add(labels.Labels{labels.Label{"a", "b"}, labels.Label{"c", ""}}, 124, 0)
|
||||
ref2, err := app1.Add(labels.Labels{labels.Label{Name: "a", Value: "b"}, labels.Label{Name: "c", Value: ""}}, 124, 0)
|
||||
testutil.Ok(t, err)
|
||||
|
||||
// Should be the same series.
|
||||
|
@ -281,7 +281,7 @@ Outer:
|
|||
smpls := make([]float64, numSamples)
|
||||
for i := int64(0); i < numSamples; i++ {
|
||||
smpls[i] = rand.Float64()
|
||||
app.Add(labels.Labels{{"a", "b"}}, i, smpls[i])
|
||||
app.Add(labels.Labels{{Name: "a", Value: "b"}}, i, smpls[i])
|
||||
}
|
||||
|
||||
testutil.Ok(t, app.Commit())
|
||||
|
@ -405,9 +405,9 @@ func TestSkippingInvalidValuesInSameTxn(t *testing.T) {
|
|||
|
||||
// Append AmendedValue.
|
||||
app := db.Appender()
|
||||
_, err := app.Add(labels.Labels{{"a", "b"}}, 0, 1)
|
||||
_, err := app.Add(labels.Labels{{Name: "a", Value: "b"}}, 0, 1)
|
||||
testutil.Ok(t, err)
|
||||
_, err = app.Add(labels.Labels{{"a", "b"}}, 0, 2)
|
||||
_, err = app.Add(labels.Labels{{Name: "a", Value: "b"}}, 0, 2)
|
||||
testutil.Ok(t, err)
|
||||
testutil.Ok(t, app.Commit())
|
||||
|
||||
|
@ -418,14 +418,14 @@ func TestSkippingInvalidValuesInSameTxn(t *testing.T) {
|
|||
ssMap := query(t, q, labels.NewEqualMatcher("a", "b"))
|
||||
|
||||
testutil.Equals(t, map[string][]tsdbutil.Sample{
|
||||
labels.New(labels.Label{"a", "b"}).String(): {sample{0, 1}},
|
||||
labels.New(labels.Label{Name: "a", Value: "b"}).String(): {sample{0, 1}},
|
||||
}, ssMap)
|
||||
|
||||
// Append Out of Order Value.
|
||||
app = db.Appender()
|
||||
_, err = app.Add(labels.Labels{{"a", "b"}}, 10, 3)
|
||||
_, err = app.Add(labels.Labels{{Name: "a", Value: "b"}}, 10, 3)
|
||||
testutil.Ok(t, err)
|
||||
_, err = app.Add(labels.Labels{{"a", "b"}}, 7, 5)
|
||||
_, err = app.Add(labels.Labels{{Name: "a", Value: "b"}}, 7, 5)
|
||||
testutil.Ok(t, err)
|
||||
testutil.Ok(t, app.Commit())
|
||||
|
||||
|
@ -435,7 +435,7 @@ func TestSkippingInvalidValuesInSameTxn(t *testing.T) {
|
|||
ssMap = query(t, q, labels.NewEqualMatcher("a", "b"))
|
||||
|
||||
testutil.Equals(t, map[string][]tsdbutil.Sample{
|
||||
labels.New(labels.Label{"a", "b"}).String(): {sample{0, 1}, sample{10, 3}},
|
||||
labels.New(labels.Label{Name: "a", Value: "b"}).String(): {sample{0, 1}, sample{10, 3}},
|
||||
}, ssMap)
|
||||
}
|
||||
|
||||
|
@ -556,7 +556,7 @@ func TestDB_SnapshotWithDelete(t *testing.T) {
|
|||
smpls := make([]float64, numSamples)
|
||||
for i := int64(0); i < numSamples; i++ {
|
||||
smpls[i] = rand.Float64()
|
||||
app.Add(labels.Labels{{"a", "b"}}, i, smpls[i])
|
||||
app.Add(labels.Labels{{Name: "a", Value: "b"}}, i, smpls[i])
|
||||
}
|
||||
|
||||
testutil.Ok(t, app.Commit())
|
||||
|
@ -645,44 +645,44 @@ func TestDB_e2e(t *testing.T) {
|
|||
// Create 8 series with 1000 data-points of different ranges and run queries.
|
||||
lbls := [][]labels.Label{
|
||||
{
|
||||
{"a", "b"},
|
||||
{"instance", "localhost:9090"},
|
||||
{"job", "prometheus"},
|
||||
{Name: "a", Value: "b"},
|
||||
{Name: "instance", Value: "localhost:9090"},
|
||||
{Name: "job", Value: "prometheus"},
|
||||
},
|
||||
{
|
||||
{"a", "b"},
|
||||
{"instance", "127.0.0.1:9090"},
|
||||
{"job", "prometheus"},
|
||||
{Name: "a", Value: "b"},
|
||||
{Name: "instance", Value: "127.0.0.1:9090"},
|
||||
{Name: "job", Value: "prometheus"},
|
||||
},
|
||||
{
|
||||
{"a", "b"},
|
||||
{"instance", "127.0.0.1:9090"},
|
||||
{"job", "prom-k8s"},
|
||||
{Name: "a", Value: "b"},
|
||||
{Name: "instance", Value: "127.0.0.1:9090"},
|
||||
{Name: "job", Value: "prom-k8s"},
|
||||
},
|
||||
{
|
||||
{"a", "b"},
|
||||
{"instance", "localhost:9090"},
|
||||
{"job", "prom-k8s"},
|
||||
{Name: "a", Value: "b"},
|
||||
{Name: "instance", Value: "localhost:9090"},
|
||||
{Name: "job", Value: "prom-k8s"},
|
||||
},
|
||||
{
|
||||
{"a", "c"},
|
||||
{"instance", "localhost:9090"},
|
||||
{"job", "prometheus"},
|
||||
{Name: "a", Value: "c"},
|
||||
{Name: "instance", Value: "localhost:9090"},
|
||||
{Name: "job", Value: "prometheus"},
|
||||
},
|
||||
{
|
||||
{"a", "c"},
|
||||
{"instance", "127.0.0.1:9090"},
|
||||
{"job", "prometheus"},
|
||||
{Name: "a", Value: "c"},
|
||||
{Name: "instance", Value: "127.0.0.1:9090"},
|
||||
{Name: "job", Value: "prometheus"},
|
||||
},
|
||||
{
|
||||
{"a", "c"},
|
||||
{"instance", "127.0.0.1:9090"},
|
||||
{"job", "prom-k8s"},
|
||||
{Name: "a", Value: "c"},
|
||||
{Name: "instance", Value: "127.0.0.1:9090"},
|
||||
{Name: "job", Value: "prom-k8s"},
|
||||
},
|
||||
{
|
||||
{"a", "c"},
|
||||
{"instance", "localhost:9090"},
|
||||
{"job", "prom-k8s"},
|
||||
{Name: "a", Value: "c"},
|
||||
{Name: "instance", Value: "localhost:9090"},
|
||||
{Name: "job", Value: "prom-k8s"},
|
||||
},
|
||||
}
|
||||
|
||||
|
@ -883,7 +883,7 @@ func TestTombstoneClean(t *testing.T) {
|
|||
smpls := make([]float64, numSamples)
|
||||
for i := int64(0); i < numSamples; i++ {
|
||||
smpls[i] = rand.Float64()
|
||||
app.Add(labels.Labels{{"a", "b"}}, i, smpls[i])
|
||||
app.Add(labels.Labels{{Name: "a", Value: "b"}}, i, smpls[i])
|
||||
}
|
||||
|
||||
testutil.Ok(t, app.Commit())
|
||||
|
@ -1339,7 +1339,7 @@ func TestOverlappingBlocksDetectsAllOverlaps(t *testing.T) {
|
|||
}, OverlappingBlocks(nc1))
|
||||
}
|
||||
|
||||
// Regression test for https://github.com/prometheus/tsdb/issues/347
|
||||
// Regression test for https://github.com/prometheus/prometheus/tsdb/issues/347
|
||||
func TestChunkAtBlockBoundary(t *testing.T) {
|
||||
db, delete := openTestDB(t, nil)
|
||||
defer func() {
|
||||
|
@ -1666,26 +1666,26 @@ func TestDB_LabelNames(t *testing.T) {
|
|||
}{
|
||||
{
|
||||
sampleLabels1: [][2]string{
|
||||
[2]string{"name1", "1"},
|
||||
[2]string{"name3", "3"},
|
||||
[2]string{"name2", "2"},
|
||||
{"name1", "1"},
|
||||
{"name3", "3"},
|
||||
{"name2", "2"},
|
||||
},
|
||||
sampleLabels2: [][2]string{
|
||||
[2]string{"name4", "4"},
|
||||
[2]string{"name1", "1"},
|
||||
{"name4", "4"},
|
||||
{"name1", "1"},
|
||||
},
|
||||
exp1: []string{"name1", "name2", "name3"},
|
||||
exp2: []string{"name1", "name2", "name3", "name4"},
|
||||
},
|
||||
{
|
||||
sampleLabels1: [][2]string{
|
||||
[2]string{"name2", "2"},
|
||||
[2]string{"name1", "1"},
|
||||
[2]string{"name2", "2"},
|
||||
{"name2", "2"},
|
||||
{"name1", "1"},
|
||||
{"name2", "2"},
|
||||
},
|
||||
sampleLabels2: [][2]string{
|
||||
[2]string{"name6", "6"},
|
||||
[2]string{"name0", "0"},
|
||||
{"name6", "6"},
|
||||
{"name0", "0"},
|
||||
},
|
||||
exp1: []string{"name1", "name2"},
|
||||
exp2: []string{"name0", "name1", "name2", "name6"},
|
||||
|
@ -1802,13 +1802,13 @@ func TestVerticalCompaction(t *testing.T) {
|
|||
// |----------------|
|
||||
{
|
||||
blockSeries: [][]Series{
|
||||
[]Series{
|
||||
{
|
||||
newSeries(map[string]string{"a": "b"}, []tsdbutil.Sample{
|
||||
sample{0, 0}, sample{1, 0}, sample{2, 0}, sample{4, 0},
|
||||
sample{5, 0}, sample{7, 0}, sample{8, 0}, sample{9, 0},
|
||||
}),
|
||||
},
|
||||
[]Series{
|
||||
{
|
||||
newSeries(map[string]string{"a": "b"}, []tsdbutil.Sample{
|
||||
sample{3, 99}, sample{5, 99}, sample{6, 99}, sample{7, 99},
|
||||
sample{8, 99}, sample{9, 99}, sample{10, 99}, sample{11, 99},
|
||||
|
@ -1830,14 +1830,14 @@ func TestVerticalCompaction(t *testing.T) {
|
|||
// |----------------|
|
||||
{
|
||||
blockSeries: [][]Series{
|
||||
[]Series{
|
||||
{
|
||||
newSeries(map[string]string{"a": "b"}, []tsdbutil.Sample{
|
||||
sample{0, 0}, sample{1, 0}, sample{2, 0}, sample{4, 0},
|
||||
sample{5, 0}, sample{7, 0}, sample{8, 0}, sample{9, 0},
|
||||
sample{11, 0}, sample{13, 0}, sample{17, 0},
|
||||
}),
|
||||
},
|
||||
[]Series{
|
||||
{
|
||||
newSeries(map[string]string{"a": "b"}, []tsdbutil.Sample{
|
||||
sample{3, 99}, sample{5, 99}, sample{6, 99}, sample{7, 99},
|
||||
sample{8, 99}, sample{9, 99}, sample{10, 99},
|
||||
|
@ -1859,20 +1859,20 @@ func TestVerticalCompaction(t *testing.T) {
|
|||
// |--------------------|
|
||||
{
|
||||
blockSeries: [][]Series{
|
||||
[]Series{
|
||||
{
|
||||
newSeries(map[string]string{"a": "b"}, []tsdbutil.Sample{
|
||||
sample{0, 0}, sample{1, 0}, sample{2, 0}, sample{4, 0},
|
||||
sample{5, 0}, sample{7, 0}, sample{8, 0}, sample{9, 0},
|
||||
sample{11, 0}, sample{13, 0}, sample{17, 0},
|
||||
}),
|
||||
},
|
||||
[]Series{
|
||||
{
|
||||
newSeries(map[string]string{"a": "b"}, []tsdbutil.Sample{
|
||||
sample{3, 99}, sample{5, 99}, sample{6, 99}, sample{7, 99},
|
||||
sample{8, 99}, sample{9, 99},
|
||||
}),
|
||||
},
|
||||
[]Series{
|
||||
{
|
||||
newSeries(map[string]string{"a": "b"}, []tsdbutil.Sample{
|
||||
sample{14, 59}, sample{15, 59}, sample{17, 59}, sample{20, 59},
|
||||
sample{21, 59}, sample{22, 59},
|
||||
|
@ -1895,19 +1895,19 @@ func TestVerticalCompaction(t *testing.T) {
|
|||
// |----------------|
|
||||
{
|
||||
blockSeries: [][]Series{
|
||||
[]Series{
|
||||
{
|
||||
newSeries(map[string]string{"a": "b"}, []tsdbutil.Sample{
|
||||
sample{0, 0}, sample{1, 0}, sample{2, 0}, sample{4, 0},
|
||||
sample{5, 0}, sample{8, 0}, sample{9, 0},
|
||||
}),
|
||||
},
|
||||
[]Series{
|
||||
{
|
||||
newSeries(map[string]string{"a": "b"}, []tsdbutil.Sample{
|
||||
sample{14, 59}, sample{15, 59}, sample{17, 59}, sample{20, 59},
|
||||
sample{21, 59}, sample{22, 59},
|
||||
}),
|
||||
},
|
||||
[]Series{
|
||||
{
|
||||
newSeries(map[string]string{"a": "b"}, []tsdbutil.Sample{
|
||||
sample{5, 99}, sample{6, 99}, sample{7, 99}, sample{8, 99},
|
||||
sample{9, 99}, sample{10, 99}, sample{13, 99}, sample{15, 99},
|
||||
|
@ -1931,7 +1931,7 @@ func TestVerticalCompaction(t *testing.T) {
|
|||
// |-------------------------|
|
||||
{
|
||||
blockSeries: [][]Series{
|
||||
[]Series{
|
||||
{
|
||||
newSeries(map[string]string{"a": "b"}, []tsdbutil.Sample{
|
||||
sample{0, 0}, sample{1, 0}, sample{2, 0}, sample{4, 0},
|
||||
sample{5, 0}, sample{8, 0}, sample{9, 0}, sample{10, 0},
|
||||
|
@ -1939,13 +1939,13 @@ func TestVerticalCompaction(t *testing.T) {
|
|||
sample{20, 0}, sample{22, 0},
|
||||
}),
|
||||
},
|
||||
[]Series{
|
||||
{
|
||||
newSeries(map[string]string{"a": "b"}, []tsdbutil.Sample{
|
||||
sample{7, 59}, sample{8, 59}, sample{9, 59}, sample{10, 59},
|
||||
sample{11, 59},
|
||||
}),
|
||||
},
|
||||
[]Series{
|
||||
{
|
||||
newSeries(map[string]string{"a": "b"}, []tsdbutil.Sample{
|
||||
sample{3, 99}, sample{5, 99}, sample{6, 99}, sample{8, 99},
|
||||
sample{9, 99}, sample{10, 99}, sample{13, 99}, sample{15, 99},
|
||||
|
@ -1969,7 +1969,7 @@ func TestVerticalCompaction(t *testing.T) {
|
|||
// |-------------------------|
|
||||
{
|
||||
blockSeries: [][]Series{
|
||||
[]Series{
|
||||
{
|
||||
newSeries(map[string]string{"a": "b"}, []tsdbutil.Sample{
|
||||
sample{0, 0}, sample{1, 0}, sample{2, 0}, sample{4, 0},
|
||||
sample{5, 0}, sample{8, 0}, sample{9, 0}, sample{10, 0},
|
||||
|
@ -1989,7 +1989,7 @@ func TestVerticalCompaction(t *testing.T) {
|
|||
sample{20, 0}, sample{22, 0},
|
||||
}),
|
||||
},
|
||||
[]Series{
|
||||
{
|
||||
newSeries(map[string]string{"__name__": "a"}, []tsdbutil.Sample{
|
||||
sample{7, 59}, sample{8, 59}, sample{9, 59}, sample{10, 59},
|
||||
sample{11, 59},
|
||||
|
@ -2007,7 +2007,7 @@ func TestVerticalCompaction(t *testing.T) {
|
|||
sample{11, 59},
|
||||
}),
|
||||
},
|
||||
[]Series{
|
||||
{
|
||||
newSeries(map[string]string{"a": "b"}, []tsdbutil.Sample{
|
||||
sample{3, 99}, sample{5, 99}, sample{6, 99}, sample{8, 99},
|
||||
sample{9, 99}, sample{10, 99}, sample{13, 99}, sample{15, 99},
|
||||
|
@ -2066,26 +2066,26 @@ func TestVerticalCompaction(t *testing.T) {
|
|||
// |----------------|
|
||||
{
|
||||
blockSeries: [][]Series{
|
||||
[]Series{
|
||||
{
|
||||
newSeries(map[string]string{"a": "b"}, []tsdbutil.Sample{
|
||||
sample{0, 0}, sample{1, 0}, sample{2, 0}, sample{4, 0},
|
||||
sample{5, 0}, sample{7, 0}, sample{8, 0}, sample{9, 0},
|
||||
}),
|
||||
},
|
||||
[]Series{
|
||||
{
|
||||
newSeries(map[string]string{"a": "b"}, []tsdbutil.Sample{
|
||||
sample{3, 99}, sample{5, 99}, sample{6, 99}, sample{7, 99},
|
||||
sample{8, 99}, sample{9, 99}, sample{10, 99}, sample{11, 99},
|
||||
sample{12, 99}, sample{13, 99}, sample{14, 99},
|
||||
}),
|
||||
},
|
||||
[]Series{
|
||||
{
|
||||
newSeries(map[string]string{"a": "b"}, []tsdbutil.Sample{
|
||||
sample{20, 0}, sample{21, 0}, sample{22, 0}, sample{24, 0},
|
||||
sample{25, 0}, sample{27, 0}, sample{28, 0}, sample{29, 0},
|
||||
}),
|
||||
},
|
||||
[]Series{
|
||||
{
|
||||
newSeries(map[string]string{"a": "b"}, []tsdbutil.Sample{
|
||||
sample{23, 99}, sample{25, 99}, sample{26, 99}, sample{27, 99},
|
||||
sample{28, 99}, sample{29, 99}, sample{30, 99}, sample{31, 99},
|
||||
|
@ -2186,7 +2186,7 @@ func TestBlockRanges(t *testing.T) {
|
|||
os.RemoveAll(dir)
|
||||
}()
|
||||
app := db.Appender()
|
||||
lbl := labels.Labels{{"a", "b"}}
|
||||
lbl := labels.Labels{{Name: "a", Value: "b"}}
|
||||
_, err = app.Add(lbl, firstBlockMaxT-1, rand.Float64())
|
||||
if err == nil {
|
||||
t.Fatalf("appending a sample with a timestamp covered by a previous block shouldn't be possible")
|
||||
|
|
|
@ -18,7 +18,7 @@ import (
|
|||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/prometheus/tsdb/testutil"
|
||||
"github.com/prometheus/prometheus/tsdb/testutil"
|
||||
)
|
||||
|
||||
func TestLocking(t *testing.T) {
|
||||
|
|
14
tsdb/go.mod
14
tsdb/go.mod
|
@ -1,14 +0,0 @@
|
|||
module github.com/prometheus/tsdb
|
||||
|
||||
require (
|
||||
github.com/cespare/xxhash v1.1.0
|
||||
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954
|
||||
github.com/go-kit/kit v0.8.0
|
||||
github.com/golang/snappy v0.0.1
|
||||
github.com/oklog/ulid v1.3.1
|
||||
github.com/pkg/errors v0.8.0
|
||||
github.com/prometheus/client_golang v1.0.0
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4
|
||||
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5
|
||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6
|
||||
)
|
83
tsdb/go.sum
83
tsdb/go.sum
|
@ -1,83 +0,0 @@
|
|||
github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE=
|
||||
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
|
||||
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc h1:cAKDfWh5VpdgMhJosfJnn5/FoN2SRZ4p7fJNX58YPaU=
|
||||
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf h1:qet1QNfXsQxTZqLG4oE62mJzwPIB8+Tee4RNCL9ulrY=
|
||||
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 h1:xJ4a3vCFaGF/jqvzLMYoU8P317H5OQ+Via4RmuPwCS0=
|
||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||
github.com/beorn7/perks v1.0.0 h1:HWo1m869IqiPhD389kmkxeTalrjNbbJTC8LXupb+sl0=
|
||||
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
|
||||
github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
|
||||
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954 h1:RMLoZVzv4GliuWafOuPuQDKSm1SJph7uCRnnS61JAn4=
|
||||
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
|
||||
github.com/go-kit/kit v0.8.0 h1:Wz+5lgoB0kkuqLEc6NVmwRknTKP6dTGbSqvhZtBI/j0=
|
||||
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
github.com/go-logfmt/logfmt v0.3.0 h1:8HUsc87TaSWLKwrnumgC8/YconD2fJQsRJAsWaPg2ic=
|
||||
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
|
||||
github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk=
|
||||
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||
github.com/gogo/protobuf v1.1.1 h1:72R+M5VuhED/KujmZVcIquuo8mBgX4oVda//DQb3PXo=
|
||||
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||
github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg=
|
||||
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
|
||||
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4=
|
||||
github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515 h1:T+h1c/A9Gawja4Y9mFVWj2vyii2bbUNDw3kt9VxK2EY=
|
||||
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
||||
github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4=
|
||||
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
|
||||
github.com/pkg/errors v0.8.0 h1:WdK/asTD0HN+q6hsWO3/vpuAkAr+tw6aNJNDFFf0+qw=
|
||||
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/prometheus/client_golang v0.9.1 h1:K47Rk0v/fkEfwfQet2KWhscE0cJzjgCCDBG2KHZoVno=
|
||||
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
||||
github.com/prometheus/client_golang v1.0.0 h1:vrDKnkGzuGvhNAL56c7DBz29ZL+KxnoR0x7enabFceM=
|
||||
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
|
||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910 h1:idejC8f05m9MGOsuEi1ATq9shN03HrxNkD/luQvxCv8=
|
||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90 h1:S/YWwWx/RA8rT8tKFRuGUZhuA90OyIBpPCXkcbwU8DE=
|
||||
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/common v0.4.1 h1:K0MGApIoQvMw27RTdJkPbr3JZ7DNbtxQNyi5STVM6Kw=
|
||||
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d h1:GoAlyOgbOEIFdaDqxJVlbOQ1DtGmZWs/Qau0hIlk+WQ=
|
||||
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.0.2 h1:6LJUbpNm42llc4HRCuvApCSWB/WfhuNo9K98Q9sNGfs=
|
||||
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72 h1:qLC7fQah7D6K1B0ujays3HV9gkFtllcxhzImRR7ArPQ=
|
||||
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f h1:Bl/8QSvNqXvPGPGXa2z5xUTmV7VDcZyvRZ+QQXkXTZQ=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4 h1:YUO/7uOKsKeq9UokNS62b8FYywz3ker1l1vDZRCRefw=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5 h1:mzjBh+S5frKOsOBobWIMAbXavqjmgO17k/2puhcFR94=
|
||||
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6 h1:jMFz6MfLP0/4fUyZle81rXUoxOBFi19VUFKVDOQfozc=
|
||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
|
@ -16,7 +16,7 @@ package goversion_test
|
|||
import (
|
||||
"testing"
|
||||
|
||||
_ "github.com/prometheus/tsdb/goversion"
|
||||
_ "github.com/prometheus/prometheus/tsdb/goversion"
|
||||
)
|
||||
|
||||
// This test is is intentionally blank and exists only so `go test` believes
|
||||
|
|
12
tsdb/head.go
12
tsdb/head.go
|
@ -28,12 +28,12 @@ import (
|
|||
"github.com/oklog/ulid"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/tsdb/chunkenc"
|
||||
"github.com/prometheus/tsdb/chunks"
|
||||
"github.com/prometheus/tsdb/encoding"
|
||||
"github.com/prometheus/tsdb/index"
|
||||
"github.com/prometheus/tsdb/labels"
|
||||
"github.com/prometheus/tsdb/wal"
|
||||
"github.com/prometheus/prometheus/tsdb/chunkenc"
|
||||
"github.com/prometheus/prometheus/tsdb/chunks"
|
||||
"github.com/prometheus/prometheus/tsdb/encoding"
|
||||
"github.com/prometheus/prometheus/tsdb/index"
|
||||
"github.com/prometheus/prometheus/tsdb/labels"
|
||||
"github.com/prometheus/prometheus/tsdb/wal"
|
||||
)
|
||||
|
||||
var (
|
||||
|
|
|
@ -18,8 +18,8 @@ import (
|
|||
"sync/atomic"
|
||||
"testing"
|
||||
|
||||
"github.com/prometheus/tsdb/labels"
|
||||
"github.com/prometheus/tsdb/testutil"
|
||||
"github.com/prometheus/prometheus/tsdb/labels"
|
||||
"github.com/prometheus/prometheus/tsdb/testutil"
|
||||
)
|
||||
|
||||
func BenchmarkHeadStripeSeriesCreate(b *testing.B) {
|
||||
|
|
|
@ -26,13 +26,13 @@ import (
|
|||
|
||||
"github.com/pkg/errors"
|
||||
prom_testutil "github.com/prometheus/client_golang/prometheus/testutil"
|
||||
"github.com/prometheus/tsdb/chunkenc"
|
||||
"github.com/prometheus/tsdb/chunks"
|
||||
"github.com/prometheus/tsdb/index"
|
||||
"github.com/prometheus/tsdb/labels"
|
||||
"github.com/prometheus/tsdb/testutil"
|
||||
"github.com/prometheus/tsdb/tsdbutil"
|
||||
"github.com/prometheus/tsdb/wal"
|
||||
"github.com/prometheus/prometheus/tsdb/chunkenc"
|
||||
"github.com/prometheus/prometheus/tsdb/chunks"
|
||||
"github.com/prometheus/prometheus/tsdb/index"
|
||||
"github.com/prometheus/prometheus/tsdb/labels"
|
||||
"github.com/prometheus/prometheus/tsdb/testutil"
|
||||
"github.com/prometheus/prometheus/tsdb/tsdbutil"
|
||||
"github.com/prometheus/prometheus/tsdb/wal"
|
||||
)
|
||||
|
||||
func BenchmarkCreateSeries(b *testing.B) {
|
||||
|
@ -368,7 +368,7 @@ func TestHeadDeleteSimple(t *testing.T) {
|
|||
return ss
|
||||
}
|
||||
smplsAll := buildSmpls([]int64{0, 1, 2, 3, 4, 5, 6, 7, 8, 9})
|
||||
lblDefault := labels.Label{"a", "b"}
|
||||
lblDefault := labels.Label{Name: "a", Value: "b"}
|
||||
|
||||
cases := []struct {
|
||||
dranges Intervals
|
||||
|
@ -536,7 +536,7 @@ func TestDeleteUntilCurMax(t *testing.T) {
|
|||
smpls := make([]float64, numSamples)
|
||||
for i := int64(0); i < numSamples; i++ {
|
||||
smpls[i] = rand.Float64()
|
||||
_, err := app.Add(labels.Labels{{"a", "b"}}, i, smpls[i])
|
||||
_, err := app.Add(labels.Labels{{Name: "a", Value: "b"}}, i, smpls[i])
|
||||
testutil.Ok(t, err)
|
||||
}
|
||||
testutil.Ok(t, app.Commit())
|
||||
|
@ -551,7 +551,7 @@ func TestDeleteUntilCurMax(t *testing.T) {
|
|||
|
||||
// Add again and test for presence.
|
||||
app = hb.Appender()
|
||||
_, err = app.Add(labels.Labels{{"a", "b"}}, 11, 1)
|
||||
_, err = app.Add(labels.Labels{{Name: "a", Value: "b"}}, 11, 1)
|
||||
testutil.Ok(t, err)
|
||||
testutil.Ok(t, app.Commit())
|
||||
q, err = NewBlockQuerier(hb, 0, 100000)
|
||||
|
@ -582,7 +582,7 @@ func TestDeletedSamplesAndSeriesStillInWALAfterCheckpoint(t *testing.T) {
|
|||
defer hb.Close()
|
||||
for i := 0; i < numSamples; i++ {
|
||||
app := hb.Appender()
|
||||
_, err := app.Add(labels.Labels{{"a", "b"}}, int64(i), 0)
|
||||
_, err := app.Add(labels.Labels{{Name: "a", Value: "b"}}, int64(i), 0)
|
||||
testutil.Ok(t, err)
|
||||
testutil.Ok(t, app.Commit())
|
||||
}
|
||||
|
@ -623,44 +623,44 @@ func TestDelete_e2e(t *testing.T) {
|
|||
// Create 8 series with 1000 data-points of different ranges, delete and run queries.
|
||||
lbls := [][]labels.Label{
|
||||
{
|
||||
{"a", "b"},
|
||||
{"instance", "localhost:9090"},
|
||||
{"job", "prometheus"},
|
||||
{Name: "a", Value: "b"},
|
||||
{Name: "instance", Value: "localhost:9090"},
|
||||
{Name: "job", Value: "prometheus"},
|
||||
},
|
||||
{
|
||||
{"a", "b"},
|
||||
{"instance", "127.0.0.1:9090"},
|
||||
{"job", "prometheus"},
|
||||
{Name: "a", Value: "b"},
|
||||
{Name: "instance", Value: "127.0.0.1:9090"},
|
||||
{Name: "job", Value: "prometheus"},
|
||||
},
|
||||
{
|
||||
{"a", "b"},
|
||||
{"instance", "127.0.0.1:9090"},
|
||||
{"job", "prom-k8s"},
|
||||
{Name: "a", Value: "b"},
|
||||
{Name: "instance", Value: "127.0.0.1:9090"},
|
||||
{Name: "job", Value: "prom-k8s"},
|
||||
},
|
||||
{
|
||||
{"a", "b"},
|
||||
{"instance", "localhost:9090"},
|
||||
{"job", "prom-k8s"},
|
||||
{Name: "a", Value: "b"},
|
||||
{Name: "instance", Value: "localhost:9090"},
|
||||
{Name: "job", Value: "prom-k8s"},
|
||||
},
|
||||
{
|
||||
{"a", "c"},
|
||||
{"instance", "localhost:9090"},
|
||||
{"job", "prometheus"},
|
||||
{Name: "a", Value: "c"},
|
||||
{Name: "instance", Value: "localhost:9090"},
|
||||
{Name: "job", Value: "prometheus"},
|
||||
},
|
||||
{
|
||||
{"a", "c"},
|
||||
{"instance", "127.0.0.1:9090"},
|
||||
{"job", "prometheus"},
|
||||
{Name: "a", Value: "c"},
|
||||
{Name: "instance", Value: "127.0.0.1:9090"},
|
||||
{Name: "job", Value: "prometheus"},
|
||||
},
|
||||
{
|
||||
{"a", "c"},
|
||||
{"instance", "127.0.0.1:9090"},
|
||||
{"job", "prom-k8s"},
|
||||
{Name: "a", Value: "c"},
|
||||
{Name: "instance", Value: "127.0.0.1:9090"},
|
||||
{Name: "job", Value: "prom-k8s"},
|
||||
},
|
||||
{
|
||||
{"a", "c"},
|
||||
{"instance", "localhost:9090"},
|
||||
{"job", "prom-k8s"},
|
||||
{Name: "a", Value: "c"},
|
||||
{Name: "instance", Value: "localhost:9090"},
|
||||
{Name: "job", Value: "prom-k8s"},
|
||||
},
|
||||
}
|
||||
seriesMap := map[string][]tsdbutil.Sample{}
|
||||
|
@ -1185,7 +1185,7 @@ func TestNewWalSegmentOnTruncate(t *testing.T) {
|
|||
defer h.Close()
|
||||
add := func(ts int64) {
|
||||
app := h.Appender()
|
||||
_, err := app.Add(labels.Labels{{"a", "b"}}, ts, 0)
|
||||
_, err := app.Add(labels.Labels{{Name: "a", Value: "b"}}, ts, 0)
|
||||
testutil.Ok(t, err)
|
||||
testutil.Ok(t, app.Commit())
|
||||
}
|
||||
|
|
|
@ -27,11 +27,11 @@ import (
|
|||
"strings"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prometheus/tsdb/chunks"
|
||||
"github.com/prometheus/tsdb/encoding"
|
||||
tsdb_errors "github.com/prometheus/tsdb/errors"
|
||||
"github.com/prometheus/tsdb/fileutil"
|
||||
"github.com/prometheus/tsdb/labels"
|
||||
"github.com/prometheus/prometheus/tsdb/chunks"
|
||||
"github.com/prometheus/prometheus/tsdb/encoding"
|
||||
tsdb_errors "github.com/prometheus/prometheus/tsdb/errors"
|
||||
"github.com/prometheus/prometheus/tsdb/fileutil"
|
||||
"github.com/prometheus/prometheus/tsdb/labels"
|
||||
)
|
||||
|
||||
const (
|
||||
|
|
|
@ -22,11 +22,11 @@ import (
|
|||
"testing"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prometheus/tsdb/chunkenc"
|
||||
"github.com/prometheus/tsdb/chunks"
|
||||
"github.com/prometheus/tsdb/encoding"
|
||||
"github.com/prometheus/tsdb/labels"
|
||||
"github.com/prometheus/tsdb/testutil"
|
||||
"github.com/prometheus/prometheus/tsdb/chunkenc"
|
||||
"github.com/prometheus/prometheus/tsdb/chunks"
|
||||
"github.com/prometheus/prometheus/tsdb/encoding"
|
||||
"github.com/prometheus/prometheus/tsdb/labels"
|
||||
"github.com/prometheus/prometheus/tsdb/testutil"
|
||||
)
|
||||
|
||||
type series struct {
|
||||
|
|
|
@ -21,7 +21,7 @@ import (
|
|||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/prometheus/tsdb/labels"
|
||||
"github.com/prometheus/prometheus/tsdb/labels"
|
||||
)
|
||||
|
||||
var allPostingsKey = labels.Label{}
|
||||
|
|
|
@ -20,7 +20,7 @@ import (
|
|||
"sort"
|
||||
"testing"
|
||||
|
||||
"github.com/prometheus/tsdb/testutil"
|
||||
"github.com/prometheus/prometheus/tsdb/testutil"
|
||||
)
|
||||
|
||||
func TestMemPostings_addFor(t *testing.T) {
|
||||
|
|
|
@ -20,7 +20,7 @@ import (
|
|||
"sort"
|
||||
"testing"
|
||||
|
||||
"github.com/prometheus/tsdb/testutil"
|
||||
"github.com/prometheus/prometheus/tsdb/testutil"
|
||||
)
|
||||
|
||||
func TestCompareAndEquals(t *testing.T) {
|
||||
|
|
|
@ -14,10 +14,10 @@
|
|||
package tsdb
|
||||
|
||||
import (
|
||||
"github.com/prometheus/tsdb/chunkenc"
|
||||
"github.com/prometheus/tsdb/chunks"
|
||||
"github.com/prometheus/tsdb/index"
|
||||
"github.com/prometheus/tsdb/labels"
|
||||
"github.com/prometheus/prometheus/tsdb/chunkenc"
|
||||
"github.com/prometheus/prometheus/tsdb/chunks"
|
||||
"github.com/prometheus/prometheus/tsdb/index"
|
||||
"github.com/prometheus/prometheus/tsdb/labels"
|
||||
)
|
||||
|
||||
type mockIndexWriter struct {
|
||||
|
|
|
@ -20,11 +20,11 @@ import (
|
|||
"unicode/utf8"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prometheus/tsdb/chunkenc"
|
||||
"github.com/prometheus/tsdb/chunks"
|
||||
tsdb_errors "github.com/prometheus/tsdb/errors"
|
||||
"github.com/prometheus/tsdb/index"
|
||||
"github.com/prometheus/tsdb/labels"
|
||||
"github.com/prometheus/prometheus/tsdb/chunkenc"
|
||||
"github.com/prometheus/prometheus/tsdb/chunks"
|
||||
tsdb_errors "github.com/prometheus/prometheus/tsdb/errors"
|
||||
"github.com/prometheus/prometheus/tsdb/index"
|
||||
"github.com/prometheus/prometheus/tsdb/labels"
|
||||
)
|
||||
|
||||
// Querier provides querying access over time series data of a fixed
|
||||
|
@ -287,7 +287,7 @@ func findSetMatches(pattern string) []string {
|
|||
return nil
|
||||
}
|
||||
escaped := false
|
||||
sets := []*strings.Builder{&strings.Builder{}}
|
||||
sets := []*strings.Builder{{}}
|
||||
for i := 4; i < len(pattern)-2; i++ {
|
||||
if escaped {
|
||||
switch {
|
||||
|
|
|
@ -25,12 +25,12 @@ import (
|
|||
"testing"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prometheus/tsdb/chunkenc"
|
||||
"github.com/prometheus/tsdb/chunks"
|
||||
"github.com/prometheus/tsdb/index"
|
||||
"github.com/prometheus/tsdb/labels"
|
||||
"github.com/prometheus/tsdb/testutil"
|
||||
"github.com/prometheus/tsdb/tsdbutil"
|
||||
"github.com/prometheus/prometheus/tsdb/chunkenc"
|
||||
"github.com/prometheus/prometheus/tsdb/chunks"
|
||||
"github.com/prometheus/prometheus/tsdb/index"
|
||||
"github.com/prometheus/prometheus/tsdb/labels"
|
||||
"github.com/prometheus/prometheus/tsdb/testutil"
|
||||
"github.com/prometheus/prometheus/tsdb/tsdbutil"
|
||||
)
|
||||
|
||||
type mockSeriesSet struct {
|
||||
|
@ -461,9 +461,9 @@ func TestBlockQuerierDelete(t *testing.T) {
|
|||
},
|
||||
},
|
||||
tombstones: &memTombstones{intvlGroups: map[uint64]Intervals{
|
||||
1: Intervals{{1, 3}},
|
||||
2: Intervals{{1, 3}, {6, 10}},
|
||||
3: Intervals{{6, 10}},
|
||||
1: {{1, 3}},
|
||||
2: {{1, 3}, {6, 10}},
|
||||
3: {{6, 10}},
|
||||
}},
|
||||
queries: []query{
|
||||
{
|
||||
|
@ -578,7 +578,7 @@ func TestBaseChunkSeries(t *testing.T) {
|
|||
{
|
||||
series: []refdSeries{
|
||||
{
|
||||
lset: labels.New([]labels.Label{{"a", "a"}}...),
|
||||
lset: labels.New([]labels.Label{{Name: "a", Value: "a"}}...),
|
||||
chunks: []chunks.Meta{
|
||||
{Ref: 29}, {Ref: 45}, {Ref: 245}, {Ref: 123}, {Ref: 4232}, {Ref: 5344},
|
||||
{Ref: 121},
|
||||
|
@ -586,19 +586,19 @@ func TestBaseChunkSeries(t *testing.T) {
|
|||
ref: 12,
|
||||
},
|
||||
{
|
||||
lset: labels.New([]labels.Label{{"a", "a"}, {"b", "b"}}...),
|
||||
lset: labels.New([]labels.Label{{Name: "a", Value: "a"}, {Name: "b", Value: "b"}}...),
|
||||
chunks: []chunks.Meta{
|
||||
{Ref: 82}, {Ref: 23}, {Ref: 234}, {Ref: 65}, {Ref: 26},
|
||||
},
|
||||
ref: 10,
|
||||
},
|
||||
{
|
||||
lset: labels.New([]labels.Label{{"b", "c"}}...),
|
||||
lset: labels.New([]labels.Label{{Name: "b", Value: "c"}}...),
|
||||
chunks: []chunks.Meta{{Ref: 8282}},
|
||||
ref: 1,
|
||||
},
|
||||
{
|
||||
lset: labels.New([]labels.Label{{"b", "b"}}...),
|
||||
lset: labels.New([]labels.Label{{Name: "b", Value: "b"}}...),
|
||||
chunks: []chunks.Meta{
|
||||
{Ref: 829}, {Ref: 239}, {Ref: 2349}, {Ref: 659}, {Ref: 269},
|
||||
},
|
||||
|
@ -611,14 +611,14 @@ func TestBaseChunkSeries(t *testing.T) {
|
|||
{
|
||||
series: []refdSeries{
|
||||
{
|
||||
lset: labels.New([]labels.Label{{"a", "a"}, {"b", "b"}}...),
|
||||
lset: labels.New([]labels.Label{{Name: "a", Value: "a"}, {Name: "b", Value: "b"}}...),
|
||||
chunks: []chunks.Meta{
|
||||
{Ref: 82}, {Ref: 23}, {Ref: 234}, {Ref: 65}, {Ref: 26},
|
||||
},
|
||||
ref: 10,
|
||||
},
|
||||
{
|
||||
lset: labels.New([]labels.Label{{"b", "c"}}...),
|
||||
lset: labels.New([]labels.Label{{Name: "b", Value: "c"}}...),
|
||||
chunks: []chunks.Meta{{Ref: 8282}},
|
||||
ref: 3,
|
||||
},
|
||||
|
@ -1044,7 +1044,7 @@ func TestSeriesIterator(t *testing.T) {
|
|||
})
|
||||
}
|
||||
|
||||
// Regression for: https://github.com/prometheus/tsdb/pull/97
|
||||
// Regression for: https://github.com/prometheus/prometheus/tsdb/pull/97
|
||||
func TestChunkSeriesIterator_DoubleSeek(t *testing.T) {
|
||||
chkMetas := []chunks.Meta{
|
||||
tsdbutil.ChunkFromSamples([]tsdbutil.Sample{}),
|
||||
|
@ -1094,7 +1094,7 @@ func TestChunkSeriesIterator_NextWithMinTime(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestPopulatedCSReturnsValidChunkSlice(t *testing.T) {
|
||||
lbls := []labels.Labels{labels.New(labels.Label{"a", "b"})}
|
||||
lbls := []labels.Labels{labels.New(labels.Label{Name: "a", Value: "b"})}
|
||||
chunkMetas := [][]chunks.Meta{
|
||||
{
|
||||
{MinTime: 1, MaxTime: 2, Ref: 1},
|
||||
|
@ -2109,27 +2109,27 @@ func TestClose(t *testing.T) {
|
|||
|
||||
func BenchmarkQueries(b *testing.B) {
|
||||
cases := map[string]labels.Selector{
|
||||
"Eq Matcher: Expansion - 1": labels.Selector{
|
||||
"Eq Matcher: Expansion - 1": {
|
||||
labels.NewEqualMatcher("la", "va"),
|
||||
},
|
||||
"Eq Matcher: Expansion - 2": labels.Selector{
|
||||
"Eq Matcher: Expansion - 2": {
|
||||
labels.NewEqualMatcher("la", "va"),
|
||||
labels.NewEqualMatcher("lb", "vb"),
|
||||
},
|
||||
|
||||
"Eq Matcher: Expansion - 3": labels.Selector{
|
||||
"Eq Matcher: Expansion - 3": {
|
||||
labels.NewEqualMatcher("la", "va"),
|
||||
labels.NewEqualMatcher("lb", "vb"),
|
||||
labels.NewEqualMatcher("lc", "vc"),
|
||||
},
|
||||
"Regex Matcher: Expansion - 1": labels.Selector{
|
||||
"Regex Matcher: Expansion - 1": {
|
||||
labels.NewMustRegexpMatcher("la", ".*va"),
|
||||
},
|
||||
"Regex Matcher: Expansion - 2": labels.Selector{
|
||||
"Regex Matcher: Expansion - 2": {
|
||||
labels.NewMustRegexpMatcher("la", ".*va"),
|
||||
labels.NewMustRegexpMatcher("lb", ".*vb"),
|
||||
},
|
||||
"Regex Matcher: Expansion - 3": labels.Selector{
|
||||
"Regex Matcher: Expansion - 3": {
|
||||
labels.NewMustRegexpMatcher("la", ".*va"),
|
||||
labels.NewMustRegexpMatcher("lb", ".*vb"),
|
||||
labels.NewMustRegexpMatcher("lc", ".*vc"),
|
||||
|
|
|
@ -19,8 +19,8 @@ import (
|
|||
"sort"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prometheus/tsdb/encoding"
|
||||
"github.com/prometheus/tsdb/labels"
|
||||
"github.com/prometheus/prometheus/tsdb/encoding"
|
||||
"github.com/prometheus/prometheus/tsdb/labels"
|
||||
)
|
||||
|
||||
// RecordType represents the data type of a record.
|
||||
|
|
|
@ -18,9 +18,9 @@ import (
|
|||
"testing"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prometheus/tsdb/encoding"
|
||||
"github.com/prometheus/tsdb/labels"
|
||||
"github.com/prometheus/tsdb/testutil"
|
||||
"github.com/prometheus/prometheus/tsdb/encoding"
|
||||
"github.com/prometheus/prometheus/tsdb/labels"
|
||||
"github.com/prometheus/prometheus/tsdb/testutil"
|
||||
)
|
||||
|
||||
func TestRecord_EncodeDecode(t *testing.T) {
|
||||
|
|
|
@ -23,8 +23,8 @@ import (
|
|||
"github.com/go-kit/kit/log"
|
||||
"github.com/go-kit/kit/log/level"
|
||||
"github.com/pkg/errors"
|
||||
tsdb_errors "github.com/prometheus/tsdb/errors"
|
||||
"github.com/prometheus/tsdb/fileutil"
|
||||
tsdb_errors "github.com/prometheus/prometheus/tsdb/errors"
|
||||
"github.com/prometheus/prometheus/tsdb/fileutil"
|
||||
)
|
||||
|
||||
// repairBadIndexVersion repairs an issue in index and meta.json persistence introduced in
|
||||
|
|
|
@ -18,11 +18,11 @@ import (
|
|||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/prometheus/tsdb/chunks"
|
||||
"github.com/prometheus/tsdb/fileutil"
|
||||
"github.com/prometheus/tsdb/index"
|
||||
"github.com/prometheus/tsdb/labels"
|
||||
"github.com/prometheus/tsdb/testutil"
|
||||
"github.com/prometheus/prometheus/tsdb/chunks"
|
||||
"github.com/prometheus/prometheus/tsdb/fileutil"
|
||||
"github.com/prometheus/prometheus/tsdb/index"
|
||||
"github.com/prometheus/prometheus/tsdb/labels"
|
||||
"github.com/prometheus/prometheus/tsdb/testutil"
|
||||
)
|
||||
|
||||
func TestRepairBadIndexVersion(t *testing.T) {
|
||||
|
@ -117,8 +117,8 @@ func TestRepairBadIndexVersion(t *testing.T) {
|
|||
|
||||
testutil.Ok(t, p.Err())
|
||||
testutil.Equals(t, []labels.Labels{
|
||||
{{"a", "1"}, {"b", "1"}},
|
||||
{{"a", "2"}, {"b", "1"}},
|
||||
{{Name: "a", Value: "1"}, {Name: "b", Value: "1"}},
|
||||
{{Name: "a", Value: "2"}, {Name: "b", Value: "1"}},
|
||||
}, res)
|
||||
|
||||
meta, _, err := readMetaFile(tmpDbDir)
|
||||
|
|
|
@ -18,7 +18,7 @@ import (
|
|||
"crypto/rand"
|
||||
"testing"
|
||||
|
||||
"github.com/prometheus/tsdb/labels"
|
||||
"github.com/prometheus/prometheus/tsdb/labels"
|
||||
)
|
||||
|
||||
func BenchmarkMapClone(b *testing.B) {
|
||||
|
|
|
@ -25,9 +25,9 @@ import (
|
|||
"github.com/go-kit/kit/log"
|
||||
"github.com/go-kit/kit/log/level"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prometheus/tsdb/encoding"
|
||||
tsdb_errors "github.com/prometheus/tsdb/errors"
|
||||
"github.com/prometheus/tsdb/fileutil"
|
||||
"github.com/prometheus/prometheus/tsdb/encoding"
|
||||
tsdb_errors "github.com/prometheus/prometheus/tsdb/errors"
|
||||
"github.com/prometheus/prometheus/tsdb/fileutil"
|
||||
)
|
||||
|
||||
const tombstoneFilename = "tombstones"
|
||||
|
|
|
@ -22,7 +22,7 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/go-kit/kit/log"
|
||||
"github.com/prometheus/tsdb/testutil"
|
||||
"github.com/prometheus/prometheus/tsdb/testutil"
|
||||
)
|
||||
|
||||
func TestWriteAndReadbackTombStones(t *testing.T) {
|
||||
|
|
|
@ -18,7 +18,7 @@ import (
|
|||
"sort"
|
||||
"testing"
|
||||
|
||||
"github.com/prometheus/tsdb/testutil"
|
||||
"github.com/prometheus/prometheus/tsdb/testutil"
|
||||
)
|
||||
|
||||
func TestSampleRing(t *testing.T) {
|
||||
|
|
|
@ -14,8 +14,8 @@
|
|||
package tsdbutil
|
||||
|
||||
import (
|
||||
"github.com/prometheus/tsdb/chunkenc"
|
||||
"github.com/prometheus/tsdb/chunks"
|
||||
"github.com/prometheus/prometheus/tsdb/chunkenc"
|
||||
"github.com/prometheus/prometheus/tsdb/chunks"
|
||||
)
|
||||
|
||||
type Sample interface {
|
||||
|
|
22
tsdb/wal.go
22
tsdb/wal.go
|
@ -31,10 +31,10 @@ import (
|
|||
"github.com/go-kit/kit/log/level"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/tsdb/encoding"
|
||||
"github.com/prometheus/tsdb/fileutil"
|
||||
"github.com/prometheus/tsdb/labels"
|
||||
"github.com/prometheus/tsdb/wal"
|
||||
"github.com/prometheus/prometheus/tsdb/encoding"
|
||||
"github.com/prometheus/prometheus/tsdb/fileutil"
|
||||
"github.com/prometheus/prometheus/tsdb/labels"
|
||||
"github.com/prometheus/prometheus/tsdb/wal"
|
||||
)
|
||||
|
||||
// WALEntryType indicates what data a WAL entry contains.
|
||||
|
@ -392,10 +392,14 @@ func (w *SegmentWAL) Truncate(mint int64, keep func(uint64) bool) error {
|
|||
if err := csf.Truncate(off); err != nil {
|
||||
return err
|
||||
}
|
||||
csf.Sync()
|
||||
csf.Close()
|
||||
if err := csf.Sync(); err != nil {
|
||||
return nil
|
||||
}
|
||||
if err := csf.Close(); err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
candidates[0].Close() // need close before remove on platform windows
|
||||
_ = candidates[0].Close() // need close before remove on platform windows
|
||||
if err := fileutil.Replace(csf.Name(), candidates[0].Name()); err != nil {
|
||||
return errors.Wrap(err, "rename compaction segment")
|
||||
}
|
||||
|
@ -416,7 +420,9 @@ func (w *SegmentWAL) Truncate(mint int64, keep func(uint64) bool) error {
|
|||
return err
|
||||
}
|
||||
// We don't need it to be open.
|
||||
csf.Close()
|
||||
if err := csf.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
w.mtx.Lock()
|
||||
w.files = append([]*segmentFile{csf}, w.files[len(candidates):]...)
|
||||
|
|
|
@ -32,7 +32,7 @@ type liveReaderMetrics struct {
|
|||
readerCorruptionErrors *prometheus.CounterVec
|
||||
}
|
||||
|
||||
// LiveReaderMetrics instatiates, registers and returns metrics to be injected
|
||||
// NewLiveReaderMetrics instatiates, registers and returns metrics to be injected
|
||||
// at LiveReader instantiation.
|
||||
func NewLiveReaderMetrics(reg prometheus.Registerer) *liveReaderMetrics {
|
||||
m := &liveReaderMetrics{
|
||||
|
@ -43,7 +43,8 @@ func NewLiveReaderMetrics(reg prometheus.Registerer) *liveReaderMetrics {
|
|||
}
|
||||
|
||||
if reg != nil {
|
||||
reg.Register(m.readerCorruptionErrors)
|
||||
// TODO(codesome): log error.
|
||||
_ = reg.Register(m.readerCorruptionErrors)
|
||||
}
|
||||
|
||||
return m
|
||||
|
|
|
@ -30,8 +30,8 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/go-kit/kit/log"
|
||||
tsdb_errors "github.com/prometheus/tsdb/errors"
|
||||
"github.com/prometheus/tsdb/testutil"
|
||||
tsdb_errors "github.com/prometheus/prometheus/tsdb/errors"
|
||||
"github.com/prometheus/prometheus/tsdb/testutil"
|
||||
)
|
||||
|
||||
type reader interface {
|
||||
|
|
|
@ -32,7 +32,7 @@ import (
|
|||
"github.com/golang/snappy"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/tsdb/fileutil"
|
||||
"github.com/prometheus/prometheus/tsdb/fileutil"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -387,7 +387,9 @@ func (w *WAL) Repair(origErr error) error {
|
|||
// We expect an error here from r.Err(), so nothing to handle.
|
||||
|
||||
// We need to pad to the end of the last page in the repaired segment
|
||||
w.flushPage(true)
|
||||
if err := w.flushPage(true); err != nil {
|
||||
return errors.Wrap(err, "flush page in repair")
|
||||
}
|
||||
|
||||
// We explicitly close even when there is a defer for Windows to be
|
||||
// able to delete it. The defer is in place to close it in-case there
|
||||
|
|
|
@ -24,7 +24,7 @@ import (
|
|||
"testing"
|
||||
|
||||
client_testutil "github.com/prometheus/client_golang/prometheus/testutil"
|
||||
"github.com/prometheus/tsdb/testutil"
|
||||
"github.com/prometheus/prometheus/tsdb/testutil"
|
||||
)
|
||||
|
||||
// TestWALRepair_ReadingError ensures that a repair is run for an error
|
||||
|
@ -47,7 +47,7 @@ func TestWALRepair_ReadingError(t *testing.T) {
|
|||
},
|
||||
// Ensures that the page buffer is big enough to fit
|
||||
// an entire page size without panicing.
|
||||
// https://github.com/prometheus/tsdb/pull/414
|
||||
// https://github.com/prometheus/prometheus/tsdb/pull/414
|
||||
"bad_header": {
|
||||
1,
|
||||
func(f *os.File) {
|
||||
|
|
|
@ -27,10 +27,10 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/go-kit/kit/log"
|
||||
"github.com/prometheus/tsdb/fileutil"
|
||||
"github.com/prometheus/tsdb/labels"
|
||||
"github.com/prometheus/tsdb/testutil"
|
||||
"github.com/prometheus/tsdb/wal"
|
||||
"github.com/prometheus/prometheus/tsdb/fileutil"
|
||||
"github.com/prometheus/prometheus/tsdb/labels"
|
||||
"github.com/prometheus/prometheus/tsdb/testutil"
|
||||
"github.com/prometheus/prometheus/tsdb/wal"
|
||||
)
|
||||
|
||||
func TestSegmentWAL_cut(t *testing.T) {
|
||||
|
|
21
vendor/github.com/dgryski/go-sip13/LICENSE
generated
vendored
Normal file
21
vendor/github.com/dgryski/go-sip13/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,21 @@
|
|||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2016,2017 Damian Gryski <damian@gryski.com>
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
88
vendor/github.com/dgryski/go-sip13/sip13.go
generated
vendored
Normal file
88
vendor/github.com/dgryski/go-sip13/sip13.go
generated
vendored
Normal file
|
@ -0,0 +1,88 @@
|
|||
// +build !amd64 noasm
|
||||
|
||||
package sip13
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"math/bits"
|
||||
)
|
||||
|
||||
type sip struct {
|
||||
v0, v1, v2, v3 uint64
|
||||
}
|
||||
|
||||
func (s *sip) round() {
|
||||
s.v0 += s.v1
|
||||
s.v2 += s.v3
|
||||
s.v1 = bits.RotateLeft64(s.v1, 13)
|
||||
s.v3 = bits.RotateLeft64(s.v3, 16)
|
||||
s.v1 ^= s.v0
|
||||
s.v3 ^= s.v2
|
||||
s.v0 = bits.RotateLeft64(s.v0, 32)
|
||||
s.v2 += s.v1
|
||||
s.v0 += s.v3
|
||||
s.v1 = bits.RotateLeft64(s.v1, 17)
|
||||
s.v3 = bits.RotateLeft64(s.v3, 21)
|
||||
s.v1 ^= s.v2
|
||||
s.v3 ^= s.v0
|
||||
s.v2 = bits.RotateLeft64(s.v2, 32)
|
||||
}
|
||||
|
||||
func Sum64Str(k0, k1 uint64, p string) uint64 {
|
||||
return Sum64(k0, k1, []byte(p))
|
||||
}
|
||||
|
||||
func Sum64(k0, k1 uint64, p []byte) uint64 {
|
||||
|
||||
s := sip{
|
||||
v0: k0 ^ 0x736f6d6570736575,
|
||||
v1: k1 ^ 0x646f72616e646f6d,
|
||||
v2: k0 ^ 0x6c7967656e657261,
|
||||
v3: k1 ^ 0x7465646279746573,
|
||||
}
|
||||
b := uint64(len(p)) << 56
|
||||
|
||||
for len(p) >= 8 {
|
||||
m := binary.LittleEndian.Uint64(p[:8])
|
||||
s.v3 ^= m
|
||||
s.round()
|
||||
s.v0 ^= m
|
||||
p = p[8:]
|
||||
}
|
||||
|
||||
switch len(p) {
|
||||
case 7:
|
||||
b |= uint64(p[6]) << 48
|
||||
fallthrough
|
||||
case 6:
|
||||
b |= uint64(p[5]) << 40
|
||||
fallthrough
|
||||
case 5:
|
||||
b |= uint64(p[4]) << 32
|
||||
fallthrough
|
||||
case 4:
|
||||
b |= uint64(p[3]) << 24
|
||||
fallthrough
|
||||
case 3:
|
||||
b |= uint64(p[2]) << 16
|
||||
fallthrough
|
||||
case 2:
|
||||
b |= uint64(p[1]) << 8
|
||||
fallthrough
|
||||
case 1:
|
||||
b |= uint64(p[0])
|
||||
}
|
||||
|
||||
// last block
|
||||
s.v3 ^= b
|
||||
s.round()
|
||||
s.v0 ^= b
|
||||
|
||||
// finalization
|
||||
s.v2 ^= 0xff
|
||||
s.round()
|
||||
s.round()
|
||||
s.round()
|
||||
|
||||
return s.v0 ^ s.v1 ^ s.v2 ^ s.v3
|
||||
}
|
119
vendor/github.com/dgryski/go-sip13/sip13.py
generated
vendored
Normal file
119
vendor/github.com/dgryski/go-sip13/sip13.py
generated
vendored
Normal file
|
@ -0,0 +1,119 @@
|
|||
import peachpy.x86_64
|
||||
|
||||
k0 = Argument(uint64_t)
|
||||
k1 = Argument(uint64_t)
|
||||
p_base = Argument(ptr())
|
||||
p_len = Argument(int64_t)
|
||||
p_cap = Argument(int64_t)
|
||||
|
||||
# siphash 1-3
|
||||
cROUND = 1
|
||||
dROUND = 3
|
||||
|
||||
|
||||
def sipround(v0, v1, v2, v3):
|
||||
ADD(v0, v1)
|
||||
ADD(v2, v3)
|
||||
ROL(v1, 13)
|
||||
ROL(v3, 16)
|
||||
XOR(v1, v0)
|
||||
XOR(v3, v2)
|
||||
|
||||
ROL(v0, 32)
|
||||
|
||||
ADD(v2, v1)
|
||||
ADD(v0, v3)
|
||||
ROL(v1, 17)
|
||||
ROL(v3, 21)
|
||||
XOR(v1, v2)
|
||||
XOR(v3, v0)
|
||||
|
||||
ROL(v2, 32)
|
||||
|
||||
|
||||
def makeSip(name, args):
|
||||
|
||||
with Function(name, args, uint64_t, target=uarch.default) as function:
|
||||
|
||||
reg_v0 = GeneralPurposeRegister64()
|
||||
reg_v1 = GeneralPurposeRegister64()
|
||||
reg_v2 = GeneralPurposeRegister64()
|
||||
reg_v3 = GeneralPurposeRegister64()
|
||||
|
||||
LOAD.ARGUMENT(reg_v0, k0)
|
||||
MOV(reg_v2, reg_v0)
|
||||
LOAD.ARGUMENT(reg_v1, k1)
|
||||
MOV(reg_v3, reg_v1)
|
||||
|
||||
reg_magic = GeneralPurposeRegister64()
|
||||
MOV(reg_magic, 0x736f6d6570736575)
|
||||
XOR(reg_v0, reg_magic)
|
||||
MOV(reg_magic, 0x646f72616e646f6d)
|
||||
XOR(reg_v1, reg_magic)
|
||||
MOV(reg_magic, 0x6c7967656e657261)
|
||||
XOR(reg_v2, reg_magic)
|
||||
MOV(reg_magic, 0x7465646279746573)
|
||||
XOR(reg_v3, reg_magic)
|
||||
|
||||
reg_p = GeneralPurposeRegister64()
|
||||
reg_p_len = GeneralPurposeRegister64()
|
||||
LOAD.ARGUMENT(reg_p, p_base)
|
||||
LOAD.ARGUMENT(reg_p_len, p_len)
|
||||
|
||||
reg_b = GeneralPurposeRegister64()
|
||||
MOV(reg_b, reg_p_len)
|
||||
SHL(reg_b, 56)
|
||||
|
||||
reg_m = GeneralPurposeRegister64()
|
||||
|
||||
loop = Loop()
|
||||
|
||||
CMP(reg_p_len, 8)
|
||||
JL(loop.end)
|
||||
with loop:
|
||||
MOV(reg_m, [reg_p])
|
||||
|
||||
XOR(reg_v3, reg_m)
|
||||
for _ in range(0, cROUND):
|
||||
sipround(reg_v0, reg_v1, reg_v2, reg_v3)
|
||||
XOR(reg_v0, reg_m)
|
||||
|
||||
ADD(reg_p, 8)
|
||||
SUB(reg_p_len, 8)
|
||||
CMP(reg_p_len, 8)
|
||||
JGE(loop.begin)
|
||||
|
||||
# no support for jump tables
|
||||
labels = [Label("sw%d" % i) for i in range(0, 8)]
|
||||
|
||||
for i in range(0, 7):
|
||||
CMP(reg_p_len, i)
|
||||
JE(labels[i])
|
||||
|
||||
char = GeneralPurposeRegister64()
|
||||
for i in range(7, 0, -1):
|
||||
LABEL(labels[i])
|
||||
MOVZX(char, byte[reg_p + i - 1])
|
||||
SHL(char, (i - 1) * 8)
|
||||
OR(reg_b, char)
|
||||
|
||||
LABEL(labels[0])
|
||||
|
||||
XOR(reg_v3, reg_b)
|
||||
for _ in range(0, cROUND):
|
||||
sipround(reg_v0, reg_v1, reg_v2, reg_v3)
|
||||
XOR(reg_v0, reg_b)
|
||||
|
||||
XOR(reg_v2, 0xff)
|
||||
for _ in range(0, dROUND):
|
||||
sipround(reg_v0, reg_v1, reg_v2, reg_v3)
|
||||
|
||||
XOR(reg_v0, reg_v1)
|
||||
XOR(reg_v2, reg_v3)
|
||||
XOR(reg_v0, reg_v2)
|
||||
|
||||
RETURN(reg_v0)
|
||||
|
||||
|
||||
makeSip("Sum64", (k0, k1, p_base, p_len, p_cap))
|
||||
makeSip("Sum64Str", (k0, k1, p_base, p_len))
|
304
vendor/github.com/dgryski/go-sip13/sip13_amd64.s
generated
vendored
Normal file
304
vendor/github.com/dgryski/go-sip13/sip13_amd64.s
generated
vendored
Normal file
|
@ -0,0 +1,304 @@
|
|||
//+build !noasm
|
||||
|
||||
// Generated by PeachPy 0.2.0 from sip13.py
|
||||
|
||||
|
||||
// func Sum64(k0 uint64, k1 uint64, p_base uintptr, p_len int64, p_cap int64) uint64
|
||||
TEXT ·Sum64(SB),4,$0-48
|
||||
MOVQ k0+0(FP), AX
|
||||
MOVQ AX, CX
|
||||
MOVQ k1+8(FP), BX
|
||||
MOVQ BX, DX
|
||||
MOVQ $8317987319222330741, DI
|
||||
XORQ DI, AX
|
||||
MOVQ $7237128888997146477, DI
|
||||
XORQ DI, BX
|
||||
MOVQ $7816392313619706465, DI
|
||||
XORQ DI, CX
|
||||
MOVQ $8387220255154660723, DI
|
||||
XORQ DI, DX
|
||||
MOVQ p_base+16(FP), DI
|
||||
MOVQ p_len+24(FP), SI
|
||||
MOVQ SI, BP
|
||||
SHLQ $56, BP
|
||||
CMPQ SI, $8
|
||||
JLT loop_end
|
||||
loop_begin:
|
||||
MOVQ 0(DI), R8
|
||||
XORQ R8, DX
|
||||
ADDQ BX, AX
|
||||
ADDQ DX, CX
|
||||
ROLQ $13, BX
|
||||
ROLQ $16, DX
|
||||
XORQ AX, BX
|
||||
XORQ CX, DX
|
||||
ROLQ $32, AX
|
||||
ADDQ BX, CX
|
||||
ADDQ DX, AX
|
||||
ROLQ $17, BX
|
||||
ROLQ $21, DX
|
||||
XORQ CX, BX
|
||||
XORQ AX, DX
|
||||
ROLQ $32, CX
|
||||
XORQ R8, AX
|
||||
ADDQ $8, DI
|
||||
SUBQ $8, SI
|
||||
CMPQ SI, $8
|
||||
JGE loop_begin
|
||||
loop_end:
|
||||
CMPQ SI, $0
|
||||
JEQ sw0
|
||||
CMPQ SI, $1
|
||||
JEQ sw1
|
||||
CMPQ SI, $2
|
||||
JEQ sw2
|
||||
CMPQ SI, $3
|
||||
JEQ sw3
|
||||
CMPQ SI, $4
|
||||
JEQ sw4
|
||||
CMPQ SI, $5
|
||||
JEQ sw5
|
||||
CMPQ SI, $6
|
||||
JEQ sw6
|
||||
MOVBQZX 6(DI), SI
|
||||
SHLQ $48, SI
|
||||
ORQ SI, BP
|
||||
sw6:
|
||||
MOVBQZX 5(DI), SI
|
||||
SHLQ $40, SI
|
||||
ORQ SI, BP
|
||||
sw5:
|
||||
MOVBQZX 4(DI), SI
|
||||
SHLQ $32, SI
|
||||
ORQ SI, BP
|
||||
sw4:
|
||||
MOVBQZX 3(DI), SI
|
||||
SHLQ $24, SI
|
||||
ORQ SI, BP
|
||||
sw3:
|
||||
MOVBQZX 2(DI), SI
|
||||
SHLQ $16, SI
|
||||
ORQ SI, BP
|
||||
sw2:
|
||||
MOVBQZX 1(DI), SI
|
||||
SHLQ $8, SI
|
||||
ORQ SI, BP
|
||||
sw1:
|
||||
MOVBQZX 0(DI), SI
|
||||
SHLQ $0, SI
|
||||
ORQ SI, BP
|
||||
sw0:
|
||||
XORQ BP, DX
|
||||
ADDQ BX, AX
|
||||
ADDQ DX, CX
|
||||
ROLQ $13, BX
|
||||
ROLQ $16, DX
|
||||
XORQ AX, BX
|
||||
XORQ CX, DX
|
||||
ROLQ $32, AX
|
||||
ADDQ BX, CX
|
||||
ADDQ DX, AX
|
||||
ROLQ $17, BX
|
||||
ROLQ $21, DX
|
||||
XORQ CX, BX
|
||||
XORQ AX, DX
|
||||
ROLQ $32, CX
|
||||
XORQ BP, AX
|
||||
XORQ $255, CX
|
||||
ADDQ BX, AX
|
||||
ADDQ DX, CX
|
||||
ROLQ $13, BX
|
||||
ROLQ $16, DX
|
||||
XORQ AX, BX
|
||||
XORQ CX, DX
|
||||
ROLQ $32, AX
|
||||
ADDQ BX, CX
|
||||
ADDQ DX, AX
|
||||
ROLQ $17, BX
|
||||
ROLQ $21, DX
|
||||
XORQ CX, BX
|
||||
XORQ AX, DX
|
||||
ROLQ $32, CX
|
||||
ADDQ BX, AX
|
||||
ADDQ DX, CX
|
||||
ROLQ $13, BX
|
||||
ROLQ $16, DX
|
||||
XORQ AX, BX
|
||||
XORQ CX, DX
|
||||
ROLQ $32, AX
|
||||
ADDQ BX, CX
|
||||
ADDQ DX, AX
|
||||
ROLQ $17, BX
|
||||
ROLQ $21, DX
|
||||
XORQ CX, BX
|
||||
XORQ AX, DX
|
||||
ROLQ $32, CX
|
||||
ADDQ BX, AX
|
||||
ADDQ DX, CX
|
||||
ROLQ $13, BX
|
||||
ROLQ $16, DX
|
||||
XORQ AX, BX
|
||||
XORQ CX, DX
|
||||
ROLQ $32, AX
|
||||
ADDQ BX, CX
|
||||
ADDQ DX, AX
|
||||
ROLQ $17, BX
|
||||
ROLQ $21, DX
|
||||
XORQ CX, BX
|
||||
XORQ AX, DX
|
||||
ROLQ $32, CX
|
||||
XORQ BX, AX
|
||||
XORQ DX, CX
|
||||
XORQ CX, AX
|
||||
MOVQ AX, ret+40(FP)
|
||||
RET
|
||||
|
||||
// func Sum64Str(k0 uint64, k1 uint64, p_base uintptr, p_len int64) uint64
|
||||
TEXT ·Sum64Str(SB),4,$0-40
|
||||
MOVQ k0+0(FP), AX
|
||||
MOVQ AX, CX
|
||||
MOVQ k1+8(FP), BX
|
||||
MOVQ BX, DX
|
||||
MOVQ $8317987319222330741, DI
|
||||
XORQ DI, AX
|
||||
MOVQ $7237128888997146477, DI
|
||||
XORQ DI, BX
|
||||
MOVQ $7816392313619706465, DI
|
||||
XORQ DI, CX
|
||||
MOVQ $8387220255154660723, DI
|
||||
XORQ DI, DX
|
||||
MOVQ p_base+16(FP), DI
|
||||
MOVQ p_len+24(FP), SI
|
||||
MOVQ SI, BP
|
||||
SHLQ $56, BP
|
||||
CMPQ SI, $8
|
||||
JLT loop_end
|
||||
loop_begin:
|
||||
MOVQ 0(DI), R8
|
||||
XORQ R8, DX
|
||||
ADDQ BX, AX
|
||||
ADDQ DX, CX
|
||||
ROLQ $13, BX
|
||||
ROLQ $16, DX
|
||||
XORQ AX, BX
|
||||
XORQ CX, DX
|
||||
ROLQ $32, AX
|
||||
ADDQ BX, CX
|
||||
ADDQ DX, AX
|
||||
ROLQ $17, BX
|
||||
ROLQ $21, DX
|
||||
XORQ CX, BX
|
||||
XORQ AX, DX
|
||||
ROLQ $32, CX
|
||||
XORQ R8, AX
|
||||
ADDQ $8, DI
|
||||
SUBQ $8, SI
|
||||
CMPQ SI, $8
|
||||
JGE loop_begin
|
||||
loop_end:
|
||||
CMPQ SI, $0
|
||||
JEQ sw0
|
||||
CMPQ SI, $1
|
||||
JEQ sw1
|
||||
CMPQ SI, $2
|
||||
JEQ sw2
|
||||
CMPQ SI, $3
|
||||
JEQ sw3
|
||||
CMPQ SI, $4
|
||||
JEQ sw4
|
||||
CMPQ SI, $5
|
||||
JEQ sw5
|
||||
CMPQ SI, $6
|
||||
JEQ sw6
|
||||
MOVBQZX 6(DI), SI
|
||||
SHLQ $48, SI
|
||||
ORQ SI, BP
|
||||
sw6:
|
||||
MOVBQZX 5(DI), SI
|
||||
SHLQ $40, SI
|
||||
ORQ SI, BP
|
||||
sw5:
|
||||
MOVBQZX 4(DI), SI
|
||||
SHLQ $32, SI
|
||||
ORQ SI, BP
|
||||
sw4:
|
||||
MOVBQZX 3(DI), SI
|
||||
SHLQ $24, SI
|
||||
ORQ SI, BP
|
||||
sw3:
|
||||
MOVBQZX 2(DI), SI
|
||||
SHLQ $16, SI
|
||||
ORQ SI, BP
|
||||
sw2:
|
||||
MOVBQZX 1(DI), SI
|
||||
SHLQ $8, SI
|
||||
ORQ SI, BP
|
||||
sw1:
|
||||
MOVBQZX 0(DI), SI
|
||||
SHLQ $0, SI
|
||||
ORQ SI, BP
|
||||
sw0:
|
||||
XORQ BP, DX
|
||||
ADDQ BX, AX
|
||||
ADDQ DX, CX
|
||||
ROLQ $13, BX
|
||||
ROLQ $16, DX
|
||||
XORQ AX, BX
|
||||
XORQ CX, DX
|
||||
ROLQ $32, AX
|
||||
ADDQ BX, CX
|
||||
ADDQ DX, AX
|
||||
ROLQ $17, BX
|
||||
ROLQ $21, DX
|
||||
XORQ CX, BX
|
||||
XORQ AX, DX
|
||||
ROLQ $32, CX
|
||||
XORQ BP, AX
|
||||
XORQ $255, CX
|
||||
ADDQ BX, AX
|
||||
ADDQ DX, CX
|
||||
ROLQ $13, BX
|
||||
ROLQ $16, DX
|
||||
XORQ AX, BX
|
||||
XORQ CX, DX
|
||||
ROLQ $32, AX
|
||||
ADDQ BX, CX
|
||||
ADDQ DX, AX
|
||||
ROLQ $17, BX
|
||||
ROLQ $21, DX
|
||||
XORQ CX, BX
|
||||
XORQ AX, DX
|
||||
ROLQ $32, CX
|
||||
ADDQ BX, AX
|
||||
ADDQ DX, CX
|
||||
ROLQ $13, BX
|
||||
ROLQ $16, DX
|
||||
XORQ AX, BX
|
||||
XORQ CX, DX
|
||||
ROLQ $32, AX
|
||||
ADDQ BX, CX
|
||||
ADDQ DX, AX
|
||||
ROLQ $17, BX
|
||||
ROLQ $21, DX
|
||||
XORQ CX, BX
|
||||
XORQ AX, DX
|
||||
ROLQ $32, CX
|
||||
ADDQ BX, AX
|
||||
ADDQ DX, CX
|
||||
ROLQ $13, BX
|
||||
ROLQ $16, DX
|
||||
XORQ AX, BX
|
||||
XORQ CX, DX
|
||||
ROLQ $32, AX
|
||||
ADDQ BX, CX
|
||||
ADDQ DX, AX
|
||||
ROLQ $17, BX
|
||||
ROLQ $21, DX
|
||||
XORQ CX, BX
|
||||
XORQ AX, DX
|
||||
ROLQ $32, CX
|
||||
XORQ BX, AX
|
||||
XORQ DX, CX
|
||||
XORQ CX, AX
|
||||
MOVQ AX, ret+32(FP)
|
||||
RET
|
11
vendor/github.com/dgryski/go-sip13/sip13_stub.go
generated
vendored
Normal file
11
vendor/github.com/dgryski/go-sip13/sip13_stub.go
generated
vendored
Normal file
|
@ -0,0 +1,11 @@
|
|||
// +build amd64,!noasm
|
||||
|
||||
package sip13
|
||||
|
||||
//go:generate python -m peachpy.x86_64 sip13.py -S -o sip13_amd64.s -mabi=goasm
|
||||
//go:noescape
|
||||
|
||||
func Sum64(k0, k1 uint64, p []byte) uint64
|
||||
|
||||
//go:noescape
|
||||
func Sum64Str(k0, k1 uint64, p string) uint64
|
1
vendor/github.com/prometheus/tsdb/.gitignore
generated
vendored
1
vendor/github.com/prometheus/tsdb/.gitignore
generated
vendored
|
@ -1 +0,0 @@
|
|||
benchout/
|
5
vendor/github.com/prometheus/tsdb/.golangci.yml
generated
vendored
5
vendor/github.com/prometheus/tsdb/.golangci.yml
generated
vendored
|
@ -1,5 +0,0 @@
|
|||
# Run only staticcheck for now. Additional linters will be enabled one-by-one.
|
||||
linters:
|
||||
enable:
|
||||
- staticcheck
|
||||
disable-all: true
|
20
vendor/github.com/prometheus/tsdb/.travis.yml
generated
vendored
20
vendor/github.com/prometheus/tsdb/.travis.yml
generated
vendored
|
@ -1,20 +0,0 @@
|
|||
dist: trusty
|
||||
language: go
|
||||
os:
|
||||
- windows
|
||||
- linux
|
||||
- osx
|
||||
|
||||
go:
|
||||
- 1.12.x
|
||||
|
||||
go_import_path: github.com/prometheus/tsdb
|
||||
|
||||
before_install:
|
||||
- if [[ "$TRAVIS_OS_NAME" == "windows" ]]; then choco install make; fi
|
||||
|
||||
install:
|
||||
- make deps
|
||||
|
||||
script:
|
||||
- if [[ "$TRAVIS_OS_NAME" == "windows" ]]; then make test; else make all; fi
|
108
vendor/github.com/prometheus/tsdb/CHANGELOG.md
generated
vendored
108
vendor/github.com/prometheus/tsdb/CHANGELOG.md
generated
vendored
|
@ -1,108 +0,0 @@
|
|||
## master / unreleased
|
||||
|
||||
## 0.10.0
|
||||
|
||||
- [FEATURE] Added `DBReadOnly` to allow opening a database in read only mode.
|
||||
- `DBReadOnly.Blocks()` exposes a slice of `BlockReader`s.
|
||||
- `BlockReader` interface - removed MinTime/MaxTime methods and now exposes the full block meta via `Meta()`.
|
||||
- [FEATURE] `chunckenc.Chunk.Iterator` method now takes a `chunckenc.Iterator` interface as an argument for reuse.
|
||||
|
||||
## 0.9.1
|
||||
|
||||
- [CHANGE] LiveReader metrics are now injected rather than global.
|
||||
|
||||
## 0.9.0
|
||||
|
||||
- [FEATURE] Provide option to compress WAL records using Snappy. [#609](https://github.com/prometheus/tsdb/pull/609)
|
||||
- [BUGFIX] Re-calculate block size when calling `block.Delete`.
|
||||
- [BUGFIX] Re-encode all head chunks at compaction that are open (being appended to) or outside the Maxt block range. This avoids writing out corrupt data. It happens when snapshotting with the head included.
|
||||
- [BUGFIX] Improved handling of multiple refs for the same series in WAL reading.
|
||||
- [BUGFIX] `prometheus_tsdb_compactions_failed_total` is now incremented on any compaction failure.
|
||||
- [CHANGE] The meta file `BlockStats` no longer holds size information. This is now dynamically calculated and kept in memory. It also includes the meta file size which was not included before.
|
||||
- [CHANGE] Create new clean segment when starting the WAL.
|
||||
- [CHANGE] Renamed metric from `prometheus_tsdb_wal_reader_corruption_errors` to `prometheus_tsdb_wal_reader_corruption_errors_total`.
|
||||
- [ENHANCEMENT] Improved atomicity of .tmp block replacement during compaction for usual case.
|
||||
- [ENHANCEMENT] Improved postings intersection matching.
|
||||
- [ENHANCEMENT] Reduced disk usage for WAL for small setups.
|
||||
- [ENHANCEMENT] Optimize queries using regexp for set lookups.
|
||||
|
||||
|
||||
## 0.8.0
|
||||
|
||||
- [BUGFIX] Calling `Close` more than once on a querier returns an error instead of a panic.
|
||||
- [BUGFIX] Don't panic and recover nicely when running out of disk space.
|
||||
- [BUGFIX] Correctly handle empty labels.
|
||||
- [BUGFIX] Don't crash on an unknown tombstone ref.
|
||||
- [ENHANCEMENT] Re-add FromData function to create a chunk from bytes. It is used by Cortex and Thanos.
|
||||
- [ENHANCEMENT] Simplify mergedPostings.Seek.
|
||||
- [FEATURE] Added `currentSegment` metric for the current WAL segment it is being written to.
|
||||
|
||||
## 0.7.1
|
||||
|
||||
- [ENHANCEMENT] Reduce memory usage in mergedPostings.Seek
|
||||
|
||||
## 0.7.0
|
||||
|
||||
- [CHANGE] tsdb now requires golang 1.12 or higher.
|
||||
- [REMOVED] `chunks.NewReader` is removed as it wasn't used anywhere.
|
||||
- [REMOVED] `FromData` is considered unused so was removed.
|
||||
- [FEATURE] Added option WALSegmentSize -1 to disable the WAL.
|
||||
- [BUGFIX] Bugfix in selectOverlappingDirs. Only return the first overlapping blocks.
|
||||
- [BUGFIX] Fsync the meta file to persist it on disk to avoid data loss in case of a host crash.
|
||||
- [BUGFIX] Fix fd and vm_area leak on error path in chunks.NewDirReader.
|
||||
- [BUGFIX] Fix fd and vm_area leak on error path in index.NewFileReader.
|
||||
- [BUGFIX] Force persisting the tombstone file to avoid data loss in case of a host crash.
|
||||
- [BUGFIX] Keep series that are still in WAL in checkpoints.
|
||||
- [ENHANCEMENT] Fast path for EmptyPostings cases in Merge, Intersect and Without.
|
||||
- [ENHANCEMENT] Be smarter in how we look at matchers.
|
||||
- [ENHANCEMENT] PostListings and NotMatcher now public.
|
||||
|
||||
## 0.6.1
|
||||
|
||||
- [BUGFIX] Update `last` after appending a non-overlapping chunk in `chunks.MergeOverlappingChunks`. [#539](https://github.com/prometheus/tsdb/pull/539)
|
||||
|
||||
## 0.6.0
|
||||
|
||||
- [CHANGE] `AllowOverlappingBlock` is now `AllowOverlappingBlocks`.
|
||||
|
||||
## 0.5.0
|
||||
|
||||
- [FEATURE] Time-ovelapping blocks are now allowed. [#370](https://github.com/prometheus/tsdb/pull/370)
|
||||
- Disabled by default and can be enabled via `AllowOverlappingBlock` option.
|
||||
- Added `MergeChunks` function in `chunkenc/xor.go` to merge 2 time-overlapping chunks.
|
||||
- Added `MergeOverlappingChunks` function in `chunks/chunks.go` to merge multiple time-overlapping Chunk Metas.
|
||||
- Added `MinTime` and `MaxTime` method for `BlockReader`.
|
||||
- [FEATURE] New `dump` command to tsdb tool to dump all samples.
|
||||
- [FEATURE] New `encoding` package for common binary encoding/decoding helpers.
|
||||
- Added to remove some code duplication.
|
||||
- [ENHANCEMENT] When closing the db any running compaction will be cancelled so it doesn't block.
|
||||
- `NewLeveledCompactor` takes a context.
|
||||
- [CHANGE] `prometheus_tsdb_storage_blocks_bytes_total` is now `prometheus_tsdb_storage_blocks_bytes`.
|
||||
- [BUGFIX] Improved Postings Merge performance. Fixes a regression from the the previous release.
|
||||
- [BUGFIX] LiveReader can get into an infinite loop on corrupt WALs.
|
||||
|
||||
## 0.4.0
|
||||
|
||||
- [CHANGE] New `WALSegmentSize` option to override the `DefaultOptions.WALSegmentSize`. Added to allow using smaller wal files. For example using tmpfs on a RPI to minimise the SD card wear out from the constant WAL writes. As part of this change the `DefaultOptions.WALSegmentSize` constant was also exposed.
|
||||
- [CHANGE] Empty blocks are not written during compaction [#374](https://github.com/prometheus/tsdb/pull/374)
|
||||
- [FEATURE] Size base retention through `Options.MaxBytes`. As part of this change:
|
||||
- Added new metrics - `prometheus_tsdb_storage_blocks_bytes_total`, `prometheus_tsdb_size_retentions_total`, `prometheus_tsdb_time_retentions_total`
|
||||
- New public interface `SizeReader: Size() int64`
|
||||
- `OpenBlock` signature changed to take a logger.
|
||||
- [REMOVED] `PrefixMatcher` is considered unused so was removed.
|
||||
- [CLEANUP] `Options.WALFlushInterval` is removed as it wasn't used anywhere.
|
||||
- [FEATURE] Add new `LiveReader` to WAL pacakge. Added to allow live tailing of a WAL segment, used by Prometheus Remote Write after refactor. The main difference between the new reader and the existing `Reader` is that for `LiveReader` a call to `Next()` that returns false does not mean that there will never be more data to read.
|
||||
|
||||
## 0.3.1
|
||||
|
||||
- [BUGFIX] Fixed most windows test and some actual bugs for unclosed file readers.
|
||||
|
||||
## 0.3.0
|
||||
|
||||
- [CHANGE] `LastCheckpoint()` used to return just the segment name and now it returns the full relative path.
|
||||
- [CHANGE] `NewSegmentsRangeReader()` can now read over miltiple wal ranges by using the new `SegmentRange{}` struct.
|
||||
- [CHANGE] `CorruptionErr{}` now also exposes the Segment `Dir` which is added when displaying any errors.
|
||||
- [CHANGE] `Head.Init()` is changed to `Head.Init(minValidTime int64)`
|
||||
- [CHANGE] `SymbolTable()` renamed to `SymbolTableSize()` to make the name consistent with the `Block{ symbolTableSize uint64 }` field.
|
||||
- [CHANGE] `wal.Reader{}` now exposes `Segment()` for the current segment being read and `Offset()` for the current offset.
|
||||
- [FEATURE] tsdbutil analyze subcomand to find churn, high cardinality, etc.
|
201
vendor/github.com/prometheus/tsdb/LICENSE
generated
vendored
201
vendor/github.com/prometheus/tsdb/LICENSE
generated
vendored
|
@ -1,201 +0,0 @@
|
|||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
4
vendor/github.com/prometheus/tsdb/MAINTAINERS.md
generated
vendored
4
vendor/github.com/prometheus/tsdb/MAINTAINERS.md
generated
vendored
|
@ -1,4 +0,0 @@
|
|||
Maintainers of this repository:
|
||||
|
||||
* Krasi Georgiev <kgeorgie@redhat.com> @krasi-georgiev
|
||||
* Goutham Veeramachaneni <gouthamve@gmail.com> @gouthamve
|
33
vendor/github.com/prometheus/tsdb/Makefile
generated
vendored
33
vendor/github.com/prometheus/tsdb/Makefile
generated
vendored
|
@ -1,33 +0,0 @@
|
|||
# Copyright 2018 The Prometheus Authors
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
TSDB_PROJECT_DIR = "."
|
||||
TSDB_CLI_DIR="$(TSDB_PROJECT_DIR)/cmd/tsdb"
|
||||
TSDB_BIN = "$(TSDB_CLI_DIR)/tsdb"
|
||||
TSDB_BENCHMARK_NUM_METRICS ?= 1000
|
||||
TSDB_BENCHMARK_DATASET ?= "$(TSDB_PROJECT_DIR)/testdata/20kseries.json"
|
||||
TSDB_BENCHMARK_OUTPUT_DIR ?= "$(TSDB_CLI_DIR)/benchout"
|
||||
|
||||
include Makefile.common
|
||||
|
||||
build:
|
||||
GO111MODULE=$(GO111MODULE) $(GO) build -o $(TSDB_BIN) $(TSDB_CLI_DIR)
|
||||
|
||||
bench: build
|
||||
@echo ">> running benchmark, writing result to $(TSDB_BENCHMARK_OUTPUT_DIR)"
|
||||
@$(TSDB_BIN) bench write --metrics=$(TSDB_BENCHMARK_NUM_METRICS) --out=$(TSDB_BENCHMARK_OUTPUT_DIR) $(TSDB_BENCHMARK_DATASET)
|
||||
@$(GO) tool pprof -svg $(TSDB_BIN) $(TSDB_BENCHMARK_OUTPUT_DIR)/cpu.prof > $(TSDB_BENCHMARK_OUTPUT_DIR)/cpuprof.svg
|
||||
@$(GO) tool pprof --inuse_space -svg $(TSDB_BIN) $(TSDB_BENCHMARK_OUTPUT_DIR)/mem.prof > $(TSDB_BENCHMARK_OUTPUT_DIR)/memprof.inuse.svg
|
||||
@$(GO) tool pprof --alloc_space -svg $(TSDB_BIN) $(TSDB_BENCHMARK_OUTPUT_DIR)/mem.prof > $(TSDB_BENCHMARK_OUTPUT_DIR)/memprof.alloc.svg
|
||||
@$(GO) tool pprof -svg $(TSDB_BIN) $(TSDB_BENCHMARK_OUTPUT_DIR)/block.prof > $(TSDB_BENCHMARK_OUTPUT_DIR)/blockprof.svg
|
||||
@$(GO) tool pprof -svg $(TSDB_BIN) $(TSDB_BENCHMARK_OUTPUT_DIR)/mutex.prof > $(TSDB_BENCHMARK_OUTPUT_DIR)/mutexprof.svg
|
277
vendor/github.com/prometheus/tsdb/Makefile.common
generated
vendored
277
vendor/github.com/prometheus/tsdb/Makefile.common
generated
vendored
|
@ -1,277 +0,0 @@
|
|||
# Copyright 2018 The Prometheus Authors
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
|
||||
# A common Makefile that includes rules to be reused in different prometheus projects.
|
||||
# !!! Open PRs only against the prometheus/prometheus/Makefile.common repository!
|
||||
|
||||
# Example usage :
|
||||
# Create the main Makefile in the root project directory.
|
||||
# include Makefile.common
|
||||
# customTarget:
|
||||
# @echo ">> Running customTarget"
|
||||
#
|
||||
|
||||
# Ensure GOBIN is not set during build so that promu is installed to the correct path
|
||||
unexport GOBIN
|
||||
|
||||
GO ?= go
|
||||
GOFMT ?= $(GO)fmt
|
||||
FIRST_GOPATH := $(firstword $(subst :, ,$(shell $(GO) env GOPATH)))
|
||||
GOOPTS ?=
|
||||
GOHOSTOS ?= $(shell $(GO) env GOHOSTOS)
|
||||
GOHOSTARCH ?= $(shell $(GO) env GOHOSTARCH)
|
||||
|
||||
GO_VERSION ?= $(shell $(GO) version)
|
||||
GO_VERSION_NUMBER ?= $(word 3, $(GO_VERSION))
|
||||
PRE_GO_111 ?= $(shell echo $(GO_VERSION_NUMBER) | grep -E 'go1\.(10|[0-9])\.')
|
||||
|
||||
GOVENDOR :=
|
||||
GO111MODULE :=
|
||||
ifeq (, $(PRE_GO_111))
|
||||
ifneq (,$(wildcard go.mod))
|
||||
# Enforce Go modules support just in case the directory is inside GOPATH (and for Travis CI).
|
||||
GO111MODULE := on
|
||||
|
||||
ifneq (,$(wildcard vendor))
|
||||
# Always use the local vendor/ directory to satisfy the dependencies.
|
||||
GOOPTS := $(GOOPTS) -mod=vendor
|
||||
endif
|
||||
endif
|
||||
else
|
||||
ifneq (,$(wildcard go.mod))
|
||||
ifneq (,$(wildcard vendor))
|
||||
$(warning This repository requires Go >= 1.11 because of Go modules)
|
||||
$(warning Some recipes may not work as expected as the current Go runtime is '$(GO_VERSION_NUMBER)')
|
||||
endif
|
||||
else
|
||||
# This repository isn't using Go modules (yet).
|
||||
GOVENDOR := $(FIRST_GOPATH)/bin/govendor
|
||||
endif
|
||||
endif
|
||||
PROMU := $(FIRST_GOPATH)/bin/promu
|
||||
pkgs = ./...
|
||||
|
||||
ifeq (arm, $(GOHOSTARCH))
|
||||
GOHOSTARM ?= $(shell GOARM= $(GO) env GOARM)
|
||||
GO_BUILD_PLATFORM ?= $(GOHOSTOS)-$(GOHOSTARCH)v$(GOHOSTARM)
|
||||
else
|
||||
GO_BUILD_PLATFORM ?= $(GOHOSTOS)-$(GOHOSTARCH)
|
||||
endif
|
||||
|
||||
PROMU_VERSION ?= 0.5.0
|
||||
PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_VERSION)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM).tar.gz
|
||||
|
||||
GOLANGCI_LINT :=
|
||||
GOLANGCI_LINT_OPTS ?=
|
||||
GOLANGCI_LINT_VERSION ?= v1.17.1
|
||||
# golangci-lint only supports linux, darwin and windows platforms on i386/amd64.
|
||||
# windows isn't included here because of the path separator being different.
|
||||
ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin))
|
||||
ifeq ($(GOHOSTARCH),$(filter $(GOHOSTARCH),amd64 i386))
|
||||
GOLANGCI_LINT := $(FIRST_GOPATH)/bin/golangci-lint
|
||||
endif
|
||||
endif
|
||||
|
||||
PREFIX ?= $(shell pwd)
|
||||
BIN_DIR ?= $(shell pwd)
|
||||
DOCKER_IMAGE_TAG ?= $(subst /,-,$(shell git rev-parse --abbrev-ref HEAD))
|
||||
DOCKERFILE_PATH ?= ./Dockerfile
|
||||
DOCKERBUILD_CONTEXT ?= ./
|
||||
DOCKER_REPO ?= prom
|
||||
|
||||
DOCKER_ARCHS ?= amd64
|
||||
|
||||
BUILD_DOCKER_ARCHS = $(addprefix common-docker-,$(DOCKER_ARCHS))
|
||||
PUBLISH_DOCKER_ARCHS = $(addprefix common-docker-publish-,$(DOCKER_ARCHS))
|
||||
TAG_DOCKER_ARCHS = $(addprefix common-docker-tag-latest-,$(DOCKER_ARCHS))
|
||||
|
||||
ifeq ($(GOHOSTARCH),amd64)
|
||||
ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux freebsd darwin windows))
|
||||
# Only supported on amd64
|
||||
test-flags := -race
|
||||
endif
|
||||
endif
|
||||
|
||||
# This rule is used to forward a target like "build" to "common-build". This
|
||||
# allows a new "build" target to be defined in a Makefile which includes this
|
||||
# one and override "common-build" without override warnings.
|
||||
%: common-% ;
|
||||
|
||||
.PHONY: common-all
|
||||
common-all: precheck style check_license lint unused build test
|
||||
|
||||
.PHONY: common-style
|
||||
common-style:
|
||||
@echo ">> checking code style"
|
||||
@fmtRes=$$($(GOFMT) -d $$(find . -path ./vendor -prune -o -name '*.go' -print)); \
|
||||
if [ -n "$${fmtRes}" ]; then \
|
||||
echo "gofmt checking failed!"; echo "$${fmtRes}"; echo; \
|
||||
echo "Please ensure you are using $$($(GO) version) for formatting code."; \
|
||||
exit 1; \
|
||||
fi
|
||||
|
||||
.PHONY: common-check_license
|
||||
common-check_license:
|
||||
@echo ">> checking license header"
|
||||
@licRes=$$(for file in $$(find . -type f -iname '*.go' ! -path './vendor/*') ; do \
|
||||
awk 'NR<=3' $$file | grep -Eq "(Copyright|generated|GENERATED)" || echo $$file; \
|
||||
done); \
|
||||
if [ -n "$${licRes}" ]; then \
|
||||
echo "license header checking failed:"; echo "$${licRes}"; \
|
||||
exit 1; \
|
||||
fi
|
||||
|
||||
.PHONY: common-deps
|
||||
common-deps:
|
||||
@echo ">> getting dependencies"
|
||||
ifdef GO111MODULE
|
||||
GO111MODULE=$(GO111MODULE) $(GO) mod download
|
||||
else
|
||||
$(GO) get $(GOOPTS) -t ./...
|
||||
endif
|
||||
|
||||
.PHONY: common-test-short
|
||||
common-test-short:
|
||||
@echo ">> running short tests"
|
||||
GO111MODULE=$(GO111MODULE) $(GO) test -short $(GOOPTS) $(pkgs)
|
||||
|
||||
.PHONY: common-test
|
||||
common-test:
|
||||
@echo ">> running all tests"
|
||||
GO111MODULE=$(GO111MODULE) $(GO) test $(test-flags) $(GOOPTS) $(pkgs)
|
||||
|
||||
.PHONY: common-format
|
||||
common-format:
|
||||
@echo ">> formatting code"
|
||||
GO111MODULE=$(GO111MODULE) $(GO) fmt $(pkgs)
|
||||
|
||||
.PHONY: common-vet
|
||||
common-vet:
|
||||
@echo ">> vetting code"
|
||||
GO111MODULE=$(GO111MODULE) $(GO) vet $(GOOPTS) $(pkgs)
|
||||
|
||||
.PHONY: common-lint
|
||||
common-lint: $(GOLANGCI_LINT)
|
||||
ifdef GOLANGCI_LINT
|
||||
@echo ">> running golangci-lint"
|
||||
ifdef GO111MODULE
|
||||
# 'go list' needs to be executed before staticcheck to prepopulate the modules cache.
|
||||
# Otherwise staticcheck might fail randomly for some reason not yet explained.
|
||||
GO111MODULE=$(GO111MODULE) $(GO) list -e -compiled -test=true -export=false -deps=true -find=false -tags= -- ./... > /dev/null
|
||||
GO111MODULE=$(GO111MODULE) $(GOLANGCI_LINT) run $(GOLANGCI_LINT_OPTS) $(pkgs)
|
||||
else
|
||||
$(GOLANGCI_LINT) run $(pkgs)
|
||||
endif
|
||||
endif
|
||||
|
||||
# For backward-compatibility.
|
||||
.PHONY: common-staticcheck
|
||||
common-staticcheck: lint
|
||||
|
||||
.PHONY: common-unused
|
||||
common-unused: $(GOVENDOR)
|
||||
ifdef GOVENDOR
|
||||
@echo ">> running check for unused packages"
|
||||
@$(GOVENDOR) list +unused | grep . && exit 1 || echo 'No unused packages'
|
||||
else
|
||||
ifdef GO111MODULE
|
||||
@echo ">> running check for unused/missing packages in go.mod"
|
||||
GO111MODULE=$(GO111MODULE) $(GO) mod tidy
|
||||
ifeq (,$(wildcard vendor))
|
||||
@git diff --exit-code -- go.sum go.mod
|
||||
else
|
||||
@echo ">> running check for unused packages in vendor/"
|
||||
GO111MODULE=$(GO111MODULE) $(GO) mod vendor
|
||||
@git diff --exit-code -- go.sum go.mod vendor/
|
||||
endif
|
||||
endif
|
||||
endif
|
||||
|
||||
.PHONY: common-build
|
||||
common-build: promu
|
||||
@echo ">> building binaries"
|
||||
GO111MODULE=$(GO111MODULE) $(PROMU) build --prefix $(PREFIX)
|
||||
|
||||
.PHONY: common-tarball
|
||||
common-tarball: promu
|
||||
@echo ">> building release tarball"
|
||||
$(PROMU) tarball --prefix $(PREFIX) $(BIN_DIR)
|
||||
|
||||
.PHONY: common-docker $(BUILD_DOCKER_ARCHS)
|
||||
common-docker: $(BUILD_DOCKER_ARCHS)
|
||||
$(BUILD_DOCKER_ARCHS): common-docker-%:
|
||||
docker build -t "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" \
|
||||
-f $(DOCKERFILE_PATH) \
|
||||
--build-arg ARCH="$*" \
|
||||
--build-arg OS="linux" \
|
||||
$(DOCKERBUILD_CONTEXT)
|
||||
|
||||
.PHONY: common-docker-publish $(PUBLISH_DOCKER_ARCHS)
|
||||
common-docker-publish: $(PUBLISH_DOCKER_ARCHS)
|
||||
$(PUBLISH_DOCKER_ARCHS): common-docker-publish-%:
|
||||
docker push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)"
|
||||
|
||||
.PHONY: common-docker-tag-latest $(TAG_DOCKER_ARCHS)
|
||||
common-docker-tag-latest: $(TAG_DOCKER_ARCHS)
|
||||
$(TAG_DOCKER_ARCHS): common-docker-tag-latest-%:
|
||||
docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:latest"
|
||||
|
||||
.PHONY: common-docker-manifest
|
||||
common-docker-manifest:
|
||||
DOCKER_CLI_EXPERIMENTAL=enabled docker manifest create -a "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG)" $(foreach ARCH,$(DOCKER_ARCHS),$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$(ARCH):$(DOCKER_IMAGE_TAG))
|
||||
DOCKER_CLI_EXPERIMENTAL=enabled docker manifest push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG)"
|
||||
|
||||
.PHONY: promu
|
||||
promu: $(PROMU)
|
||||
|
||||
$(PROMU):
|
||||
$(eval PROMU_TMP := $(shell mktemp -d))
|
||||
curl -s -L $(PROMU_URL) | tar -xvzf - -C $(PROMU_TMP)
|
||||
mkdir -p $(FIRST_GOPATH)/bin
|
||||
cp $(PROMU_TMP)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM)/promu $(FIRST_GOPATH)/bin/promu
|
||||
rm -r $(PROMU_TMP)
|
||||
|
||||
.PHONY: proto
|
||||
proto:
|
||||
@echo ">> generating code from proto files"
|
||||
@./scripts/genproto.sh
|
||||
|
||||
ifdef GOLANGCI_LINT
|
||||
$(GOLANGCI_LINT):
|
||||
mkdir -p $(FIRST_GOPATH)/bin
|
||||
curl -sfL https://raw.githubusercontent.com/golangci/golangci-lint/$(GOLANGCI_LINT_VERSION)/install.sh \
|
||||
| sed -e '/install -d/d' \
|
||||
| sh -s -- -b $(FIRST_GOPATH)/bin $(GOLANGCI_LINT_VERSION)
|
||||
endif
|
||||
|
||||
ifdef GOVENDOR
|
||||
.PHONY: $(GOVENDOR)
|
||||
$(GOVENDOR):
|
||||
GOOS= GOARCH= $(GO) get -u github.com/kardianos/govendor
|
||||
endif
|
||||
|
||||
.PHONY: precheck
|
||||
precheck::
|
||||
|
||||
define PRECHECK_COMMAND_template =
|
||||
precheck:: $(1)_precheck
|
||||
|
||||
PRECHECK_COMMAND_$(1) ?= $(1) $$(strip $$(PRECHECK_OPTIONS_$(1)))
|
||||
.PHONY: $(1)_precheck
|
||||
$(1)_precheck:
|
||||
@if ! $$(PRECHECK_COMMAND_$(1)) 1>/dev/null 2>&1; then \
|
||||
echo "Execution of '$$(PRECHECK_COMMAND_$(1))' command failed. Is $(1) installed?"; \
|
||||
exit 1; \
|
||||
fi
|
||||
endef
|
15
vendor/github.com/prometheus/tsdb/README.md
generated
vendored
15
vendor/github.com/prometheus/tsdb/README.md
generated
vendored
|
@ -1,15 +0,0 @@
|
|||
# TSDB
|
||||
|
||||
[![Build Status](https://travis-ci.org/prometheus/tsdb.svg?branch=master)](https://travis-ci.org/prometheus/tsdb)
|
||||
[![GoDoc](https://godoc.org/github.com/prometheus/tsdb?status.svg)](https://godoc.org/github.com/prometheus/tsdb)
|
||||
[![Go Report Card](https://goreportcard.com/badge/github.com/prometheus/tsdb)](https://goreportcard.com/report/github.com/prometheus/tsdb)
|
||||
|
||||
This repository contains the Prometheus storage layer that is used in its 2.x releases.
|
||||
|
||||
A writeup of its design can be found [here](https://fabxc.org/blog/2017-04-10-writing-a-tsdb/).
|
||||
|
||||
Based on the Gorilla TSDB [white papers](http://www.vldb.org/pvldb/vol8/p1816-teller.pdf).
|
||||
|
||||
Video: [Storing 16 Bytes at Scale](https://youtu.be/b_pEevMAC3I) from [PromCon 2017](https://promcon.io/2017-munich/).
|
||||
|
||||
See also the [format documentation](docs/format/README.md).
|
656
vendor/github.com/prometheus/tsdb/block.go
generated
vendored
656
vendor/github.com/prometheus/tsdb/block.go
generated
vendored
|
@ -1,656 +0,0 @@
|
|||
// Copyright 2017 The Prometheus Authors
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package tsdb
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
|
||||
"github.com/go-kit/kit/log"
|
||||
"github.com/go-kit/kit/log/level"
|
||||
"github.com/oklog/ulid"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prometheus/tsdb/chunkenc"
|
||||
"github.com/prometheus/tsdb/chunks"
|
||||
tsdb_errors "github.com/prometheus/tsdb/errors"
|
||||
"github.com/prometheus/tsdb/fileutil"
|
||||
"github.com/prometheus/tsdb/index"
|
||||
"github.com/prometheus/tsdb/labels"
|
||||
)
|
||||
|
||||
// IndexWriter serializes the index for a block of series data.
|
||||
// The methods must be called in the order they are specified in.
|
||||
type IndexWriter interface {
|
||||
// AddSymbols registers all string symbols that are encountered in series
|
||||
// and other indices.
|
||||
AddSymbols(sym map[string]struct{}) error
|
||||
|
||||
// AddSeries populates the index writer with a series and its offsets
|
||||
// of chunks that the index can reference.
|
||||
// Implementations may require series to be insert in increasing order by
|
||||
// their labels.
|
||||
// The reference numbers are used to resolve entries in postings lists that
|
||||
// are added later.
|
||||
AddSeries(ref uint64, l labels.Labels, chunks ...chunks.Meta) error
|
||||
|
||||
// WriteLabelIndex serializes an index from label names to values.
|
||||
// The passed in values chained tuples of strings of the length of names.
|
||||
WriteLabelIndex(names []string, values []string) error
|
||||
|
||||
// WritePostings writes a postings list for a single label pair.
|
||||
// The Postings here contain refs to the series that were added.
|
||||
WritePostings(name, value string, it index.Postings) error
|
||||
|
||||
// Close writes any finalization and closes the resources associated with
|
||||
// the underlying writer.
|
||||
Close() error
|
||||
}
|
||||
|
||||
// IndexReader provides reading access of serialized index data.
|
||||
type IndexReader interface {
|
||||
// Symbols returns a set of string symbols that may occur in series' labels
|
||||
// and indices.
|
||||
Symbols() (map[string]struct{}, error)
|
||||
|
||||
// LabelValues returns the possible label values.
|
||||
LabelValues(names ...string) (index.StringTuples, error)
|
||||
|
||||
// Postings returns the postings list iterator for the label pair.
|
||||
// The Postings here contain the offsets to the series inside the index.
|
||||
// Found IDs are not strictly required to point to a valid Series, e.g. during
|
||||
// background garbage collections.
|
||||
Postings(name, value string) (index.Postings, error)
|
||||
|
||||
// SortedPostings returns a postings list that is reordered to be sorted
|
||||
// by the label set of the underlying series.
|
||||
SortedPostings(index.Postings) index.Postings
|
||||
|
||||
// Series populates the given labels and chunk metas for the series identified
|
||||
// by the reference.
|
||||
// Returns ErrNotFound if the ref does not resolve to a known series.
|
||||
Series(ref uint64, lset *labels.Labels, chks *[]chunks.Meta) error
|
||||
|
||||
// LabelIndices returns a list of string tuples for which a label value index exists.
|
||||
// NOTE: This is deprecated. Use `LabelNames()` instead.
|
||||
LabelIndices() ([][]string, error)
|
||||
|
||||
// LabelNames returns all the unique label names present in the index in sorted order.
|
||||
LabelNames() ([]string, error)
|
||||
|
||||
// Close releases the underlying resources of the reader.
|
||||
Close() error
|
||||
}
|
||||
|
||||
// StringTuples provides access to a sorted list of string tuples.
|
||||
type StringTuples interface {
|
||||
// Total number of tuples in the list.
|
||||
Len() int
|
||||
// At returns the tuple at position i.
|
||||
At(i int) ([]string, error)
|
||||
}
|
||||
|
||||
// ChunkWriter serializes a time block of chunked series data.
|
||||
type ChunkWriter interface {
|
||||
// WriteChunks writes several chunks. The Chunk field of the ChunkMetas
|
||||
// must be populated.
|
||||
// After returning successfully, the Ref fields in the ChunkMetas
|
||||
// are set and can be used to retrieve the chunks from the written data.
|
||||
WriteChunks(chunks ...chunks.Meta) error
|
||||
|
||||
// Close writes any required finalization and closes the resources
|
||||
// associated with the underlying writer.
|
||||
Close() error
|
||||
}
|
||||
|
||||
// ChunkReader provides reading access of serialized time series data.
|
||||
type ChunkReader interface {
|
||||
// Chunk returns the series data chunk with the given reference.
|
||||
Chunk(ref uint64) (chunkenc.Chunk, error)
|
||||
|
||||
// Close releases all underlying resources of the reader.
|
||||
Close() error
|
||||
}
|
||||
|
||||
// BlockReader provides reading access to a data block.
|
||||
type BlockReader interface {
|
||||
// Index returns an IndexReader over the block's data.
|
||||
Index() (IndexReader, error)
|
||||
|
||||
// Chunks returns a ChunkReader over the block's data.
|
||||
Chunks() (ChunkReader, error)
|
||||
|
||||
// Tombstones returns a TombstoneReader over the block's deleted data.
|
||||
Tombstones() (TombstoneReader, error)
|
||||
|
||||
// Meta provides meta information about the block reader.
|
||||
Meta() BlockMeta
|
||||
}
|
||||
|
||||
// Appendable defines an entity to which data can be appended.
|
||||
type Appendable interface {
|
||||
// Appender returns a new Appender against an underlying store.
|
||||
Appender() Appender
|
||||
}
|
||||
|
||||
// BlockMeta provides meta information about a block.
|
||||
type BlockMeta struct {
|
||||
// Unique identifier for the block and its contents. Changes on compaction.
|
||||
ULID ulid.ULID `json:"ulid"`
|
||||
|
||||
// MinTime and MaxTime specify the time range all samples
|
||||
// in the block are in.
|
||||
MinTime int64 `json:"minTime"`
|
||||
MaxTime int64 `json:"maxTime"`
|
||||
|
||||
// Stats about the contents of the block.
|
||||
Stats BlockStats `json:"stats,omitempty"`
|
||||
|
||||
// Information on compactions the block was created from.
|
||||
Compaction BlockMetaCompaction `json:"compaction"`
|
||||
|
||||
// Version of the index format.
|
||||
Version int `json:"version"`
|
||||
}
|
||||
|
||||
// BlockStats contains stats about contents of a block.
|
||||
type BlockStats struct {
|
||||
NumSamples uint64 `json:"numSamples,omitempty"`
|
||||
NumSeries uint64 `json:"numSeries,omitempty"`
|
||||
NumChunks uint64 `json:"numChunks,omitempty"`
|
||||
NumTombstones uint64 `json:"numTombstones,omitempty"`
|
||||
}
|
||||
|
||||
// BlockDesc describes a block by ULID and time range.
|
||||
type BlockDesc struct {
|
||||
ULID ulid.ULID `json:"ulid"`
|
||||
MinTime int64 `json:"minTime"`
|
||||
MaxTime int64 `json:"maxTime"`
|
||||
}
|
||||
|
||||
// BlockMetaCompaction holds information about compactions a block went through.
|
||||
type BlockMetaCompaction struct {
|
||||
// Maximum number of compaction cycles any source block has
|
||||
// gone through.
|
||||
Level int `json:"level"`
|
||||
// ULIDs of all source head blocks that went into the block.
|
||||
Sources []ulid.ULID `json:"sources,omitempty"`
|
||||
// Indicates that during compaction it resulted in a block without any samples
|
||||
// so it should be deleted on the next reload.
|
||||
Deletable bool `json:"deletable,omitempty"`
|
||||
// Short descriptions of the direct blocks that were used to create
|
||||
// this block.
|
||||
Parents []BlockDesc `json:"parents,omitempty"`
|
||||
Failed bool `json:"failed,omitempty"`
|
||||
}
|
||||
|
||||
const indexFilename = "index"
|
||||
const metaFilename = "meta.json"
|
||||
|
||||
func chunkDir(dir string) string { return filepath.Join(dir, "chunks") }
|
||||
|
||||
func readMetaFile(dir string) (*BlockMeta, int64, error) {
|
||||
b, err := ioutil.ReadFile(filepath.Join(dir, metaFilename))
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
var m BlockMeta
|
||||
|
||||
if err := json.Unmarshal(b, &m); err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
if m.Version != 1 {
|
||||
return nil, 0, errors.Errorf("unexpected meta file version %d", m.Version)
|
||||
}
|
||||
|
||||
return &m, int64(len(b)), nil
|
||||
}
|
||||
|
||||
func writeMetaFile(logger log.Logger, dir string, meta *BlockMeta) (int64, error) {
|
||||
meta.Version = 1
|
||||
|
||||
// Make any changes to the file appear atomic.
|
||||
path := filepath.Join(dir, metaFilename)
|
||||
tmp := path + ".tmp"
|
||||
defer func() {
|
||||
if err := os.RemoveAll(tmp); err != nil {
|
||||
level.Error(logger).Log("msg", "remove tmp file", "err", err.Error())
|
||||
}
|
||||
}()
|
||||
|
||||
f, err := os.Create(tmp)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
jsonMeta, err := json.MarshalIndent(meta, "", "\t")
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
var merr tsdb_errors.MultiError
|
||||
n, err := f.Write(jsonMeta)
|
||||
if err != nil {
|
||||
merr.Add(err)
|
||||
merr.Add(f.Close())
|
||||
return 0, merr.Err()
|
||||
}
|
||||
|
||||
// Force the kernel to persist the file on disk to avoid data loss if the host crashes.
|
||||
if err := f.Sync(); err != nil {
|
||||
merr.Add(err)
|
||||
merr.Add(f.Close())
|
||||
return 0, merr.Err()
|
||||
}
|
||||
if err := f.Close(); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return int64(n), fileutil.Replace(tmp, path)
|
||||
}
|
||||
|
||||
// Block represents a directory of time series data covering a continuous time range.
|
||||
type Block struct {
|
||||
mtx sync.RWMutex
|
||||
closing bool
|
||||
pendingReaders sync.WaitGroup
|
||||
|
||||
dir string
|
||||
meta BlockMeta
|
||||
|
||||
// Symbol Table Size in bytes.
|
||||
// We maintain this variable to avoid recalculation everytime.
|
||||
symbolTableSize uint64
|
||||
|
||||
chunkr ChunkReader
|
||||
indexr IndexReader
|
||||
tombstones TombstoneReader
|
||||
|
||||
logger log.Logger
|
||||
|
||||
numBytesChunks int64
|
||||
numBytesIndex int64
|
||||
numBytesTombstone int64
|
||||
numBytesMeta int64
|
||||
}
|
||||
|
||||
// OpenBlock opens the block in the directory. It can be passed a chunk pool, which is used
|
||||
// to instantiate chunk structs.
|
||||
func OpenBlock(logger log.Logger, dir string, pool chunkenc.Pool) (pb *Block, err error) {
|
||||
if logger == nil {
|
||||
logger = log.NewNopLogger()
|
||||
}
|
||||
var closers []io.Closer
|
||||
defer func() {
|
||||
if err != nil {
|
||||
var merr tsdb_errors.MultiError
|
||||
merr.Add(err)
|
||||
merr.Add(closeAll(closers))
|
||||
err = merr.Err()
|
||||
}
|
||||
}()
|
||||
meta, sizeMeta, err := readMetaFile(dir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cr, err := chunks.NewDirReader(chunkDir(dir), pool)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
closers = append(closers, cr)
|
||||
|
||||
ir, err := index.NewFileReader(filepath.Join(dir, indexFilename))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
closers = append(closers, ir)
|
||||
|
||||
tr, sizeTomb, err := readTombstones(dir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
closers = append(closers, tr)
|
||||
|
||||
pb = &Block{
|
||||
dir: dir,
|
||||
meta: *meta,
|
||||
chunkr: cr,
|
||||
indexr: ir,
|
||||
tombstones: tr,
|
||||
symbolTableSize: ir.SymbolTableSize(),
|
||||
logger: logger,
|
||||
numBytesChunks: cr.Size(),
|
||||
numBytesIndex: ir.Size(),
|
||||
numBytesTombstone: sizeTomb,
|
||||
numBytesMeta: sizeMeta,
|
||||
}
|
||||
return pb, nil
|
||||
}
|
||||
|
||||
// Close closes the on-disk block. It blocks as long as there are readers reading from the block.
|
||||
func (pb *Block) Close() error {
|
||||
pb.mtx.Lock()
|
||||
pb.closing = true
|
||||
pb.mtx.Unlock()
|
||||
|
||||
pb.pendingReaders.Wait()
|
||||
|
||||
var merr tsdb_errors.MultiError
|
||||
|
||||
merr.Add(pb.chunkr.Close())
|
||||
merr.Add(pb.indexr.Close())
|
||||
merr.Add(pb.tombstones.Close())
|
||||
|
||||
return merr.Err()
|
||||
}
|
||||
|
||||
func (pb *Block) String() string {
|
||||
return pb.meta.ULID.String()
|
||||
}
|
||||
|
||||
// Dir returns the directory of the block.
|
||||
func (pb *Block) Dir() string { return pb.dir }
|
||||
|
||||
// Meta returns meta information about the block.
|
||||
func (pb *Block) Meta() BlockMeta { return pb.meta }
|
||||
|
||||
// MinTime returns the min time of the meta.
|
||||
func (pb *Block) MinTime() int64 { return pb.meta.MinTime }
|
||||
|
||||
// MaxTime returns the max time of the meta.
|
||||
func (pb *Block) MaxTime() int64 { return pb.meta.MaxTime }
|
||||
|
||||
// Size returns the number of bytes that the block takes up.
|
||||
func (pb *Block) Size() int64 {
|
||||
return pb.numBytesChunks + pb.numBytesIndex + pb.numBytesTombstone + pb.numBytesMeta
|
||||
}
|
||||
|
||||
// ErrClosing is returned when a block is in the process of being closed.
|
||||
var ErrClosing = errors.New("block is closing")
|
||||
|
||||
func (pb *Block) startRead() error {
|
||||
pb.mtx.RLock()
|
||||
defer pb.mtx.RUnlock()
|
||||
|
||||
if pb.closing {
|
||||
return ErrClosing
|
||||
}
|
||||
pb.pendingReaders.Add(1)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Index returns a new IndexReader against the block data.
|
||||
func (pb *Block) Index() (IndexReader, error) {
|
||||
if err := pb.startRead(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return blockIndexReader{ir: pb.indexr, b: pb}, nil
|
||||
}
|
||||
|
||||
// Chunks returns a new ChunkReader against the block data.
|
||||
func (pb *Block) Chunks() (ChunkReader, error) {
|
||||
if err := pb.startRead(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return blockChunkReader{ChunkReader: pb.chunkr, b: pb}, nil
|
||||
}
|
||||
|
||||
// Tombstones returns a new TombstoneReader against the block data.
|
||||
func (pb *Block) Tombstones() (TombstoneReader, error) {
|
||||
if err := pb.startRead(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return blockTombstoneReader{TombstoneReader: pb.tombstones, b: pb}, nil
|
||||
}
|
||||
|
||||
// GetSymbolTableSize returns the Symbol Table Size in the index of this block.
|
||||
func (pb *Block) GetSymbolTableSize() uint64 {
|
||||
return pb.symbolTableSize
|
||||
}
|
||||
|
||||
func (pb *Block) setCompactionFailed() error {
|
||||
pb.meta.Compaction.Failed = true
|
||||
n, err := writeMetaFile(pb.logger, pb.dir, &pb.meta)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
pb.numBytesMeta = n
|
||||
return nil
|
||||
}
|
||||
|
||||
type blockIndexReader struct {
|
||||
ir IndexReader
|
||||
b *Block
|
||||
}
|
||||
|
||||
func (r blockIndexReader) Symbols() (map[string]struct{}, error) {
|
||||
s, err := r.ir.Symbols()
|
||||
return s, errors.Wrapf(err, "block: %s", r.b.Meta().ULID)
|
||||
}
|
||||
|
||||
func (r blockIndexReader) LabelValues(names ...string) (index.StringTuples, error) {
|
||||
st, err := r.ir.LabelValues(names...)
|
||||
return st, errors.Wrapf(err, "block: %s", r.b.Meta().ULID)
|
||||
}
|
||||
|
||||
func (r blockIndexReader) Postings(name, value string) (index.Postings, error) {
|
||||
p, err := r.ir.Postings(name, value)
|
||||
if err != nil {
|
||||
return p, errors.Wrapf(err, "block: %s", r.b.Meta().ULID)
|
||||
}
|
||||
return p, nil
|
||||
}
|
||||
|
||||
func (r blockIndexReader) SortedPostings(p index.Postings) index.Postings {
|
||||
return r.ir.SortedPostings(p)
|
||||
}
|
||||
|
||||
func (r blockIndexReader) Series(ref uint64, lset *labels.Labels, chks *[]chunks.Meta) error {
|
||||
if err := r.ir.Series(ref, lset, chks); err != nil {
|
||||
return errors.Wrapf(err, "block: %s", r.b.Meta().ULID)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r blockIndexReader) LabelIndices() ([][]string, error) {
|
||||
ss, err := r.ir.LabelIndices()
|
||||
return ss, errors.Wrapf(err, "block: %s", r.b.Meta().ULID)
|
||||
}
|
||||
|
||||
func (r blockIndexReader) LabelNames() ([]string, error) {
|
||||
return r.b.LabelNames()
|
||||
}
|
||||
|
||||
func (r blockIndexReader) Close() error {
|
||||
r.b.pendingReaders.Done()
|
||||
return nil
|
||||
}
|
||||
|
||||
type blockTombstoneReader struct {
|
||||
TombstoneReader
|
||||
b *Block
|
||||
}
|
||||
|
||||
func (r blockTombstoneReader) Close() error {
|
||||
r.b.pendingReaders.Done()
|
||||
return nil
|
||||
}
|
||||
|
||||
type blockChunkReader struct {
|
||||
ChunkReader
|
||||
b *Block
|
||||
}
|
||||
|
||||
func (r blockChunkReader) Close() error {
|
||||
r.b.pendingReaders.Done()
|
||||
return nil
|
||||
}
|
||||
|
||||
// Delete matching series between mint and maxt in the block.
|
||||
func (pb *Block) Delete(mint, maxt int64, ms ...labels.Matcher) error {
|
||||
pb.mtx.Lock()
|
||||
defer pb.mtx.Unlock()
|
||||
|
||||
if pb.closing {
|
||||
return ErrClosing
|
||||
}
|
||||
|
||||
p, err := PostingsForMatchers(pb.indexr, ms...)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "select series")
|
||||
}
|
||||
|
||||
ir := pb.indexr
|
||||
|
||||
// Choose only valid postings which have chunks in the time-range.
|
||||
stones := newMemTombstones()
|
||||
|
||||
var lset labels.Labels
|
||||
var chks []chunks.Meta
|
||||
|
||||
Outer:
|
||||
for p.Next() {
|
||||
err := ir.Series(p.At(), &lset, &chks)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, chk := range chks {
|
||||
if chk.OverlapsClosedInterval(mint, maxt) {
|
||||
// Delete only until the current values and not beyond.
|
||||
tmin, tmax := clampInterval(mint, maxt, chks[0].MinTime, chks[len(chks)-1].MaxTime)
|
||||
stones.addInterval(p.At(), Interval{tmin, tmax})
|
||||
continue Outer
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if p.Err() != nil {
|
||||
return p.Err()
|
||||
}
|
||||
|
||||
err = pb.tombstones.Iter(func(id uint64, ivs Intervals) error {
|
||||
for _, iv := range ivs {
|
||||
stones.addInterval(id, iv)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
pb.tombstones = stones
|
||||
pb.meta.Stats.NumTombstones = pb.tombstones.Total()
|
||||
|
||||
n, err := writeTombstoneFile(pb.logger, pb.dir, pb.tombstones)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
pb.numBytesTombstone = n
|
||||
n, err = writeMetaFile(pb.logger, pb.dir, &pb.meta)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
pb.numBytesMeta = n
|
||||
return nil
|
||||
}
|
||||
|
||||
// CleanTombstones will remove the tombstones and rewrite the block (only if there are any tombstones).
|
||||
// If there was a rewrite, then it returns the ULID of the new block written, else nil.
|
||||
func (pb *Block) CleanTombstones(dest string, c Compactor) (*ulid.ULID, error) {
|
||||
numStones := 0
|
||||
|
||||
if err := pb.tombstones.Iter(func(id uint64, ivs Intervals) error {
|
||||
numStones += len(ivs)
|
||||
return nil
|
||||
}); err != nil {
|
||||
// This should never happen, as the iteration function only returns nil.
|
||||
panic(err)
|
||||
}
|
||||
if numStones == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
meta := pb.Meta()
|
||||
uid, err := c.Write(dest, pb, pb.meta.MinTime, pb.meta.MaxTime, &meta)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &uid, nil
|
||||
}
|
||||
|
||||
// Snapshot creates snapshot of the block into dir.
|
||||
func (pb *Block) Snapshot(dir string) error {
|
||||
blockDir := filepath.Join(dir, pb.meta.ULID.String())
|
||||
if err := os.MkdirAll(blockDir, 0777); err != nil {
|
||||
return errors.Wrap(err, "create snapshot block dir")
|
||||
}
|
||||
|
||||
chunksDir := chunkDir(blockDir)
|
||||
if err := os.MkdirAll(chunksDir, 0777); err != nil {
|
||||
return errors.Wrap(err, "create snapshot chunk dir")
|
||||
}
|
||||
|
||||
// Hardlink meta, index and tombstones
|
||||
for _, fname := range []string{
|
||||
metaFilename,
|
||||
indexFilename,
|
||||
tombstoneFilename,
|
||||
} {
|
||||
if err := os.Link(filepath.Join(pb.dir, fname), filepath.Join(blockDir, fname)); err != nil {
|
||||
return errors.Wrapf(err, "create snapshot %s", fname)
|
||||
}
|
||||
}
|
||||
|
||||
// Hardlink the chunks
|
||||
curChunkDir := chunkDir(pb.dir)
|
||||
files, err := ioutil.ReadDir(curChunkDir)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "ReadDir the current chunk dir")
|
||||
}
|
||||
|
||||
for _, f := range files {
|
||||
err := os.Link(filepath.Join(curChunkDir, f.Name()), filepath.Join(chunksDir, f.Name()))
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "hardlink a chunk")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// OverlapsClosedInterval returns true if the block overlaps [mint, maxt].
|
||||
func (pb *Block) OverlapsClosedInterval(mint, maxt int64) bool {
|
||||
// The block itself is a half-open interval
|
||||
// [pb.meta.MinTime, pb.meta.MaxTime).
|
||||
return pb.meta.MinTime <= maxt && mint < pb.meta.MaxTime
|
||||
}
|
||||
|
||||
// LabelNames returns all the unique label names present in the Block in sorted order.
|
||||
func (pb *Block) LabelNames() ([]string, error) {
|
||||
return pb.indexr.LabelNames()
|
||||
}
|
||||
|
||||
func clampInterval(a, b, mint, maxt int64) (int64, int64) {
|
||||
if a < mint {
|
||||
a = mint
|
||||
}
|
||||
if b > maxt {
|
||||
b = maxt
|
||||
}
|
||||
return a, b
|
||||
}
|
261
vendor/github.com/prometheus/tsdb/checkpoint.go
generated
vendored
261
vendor/github.com/prometheus/tsdb/checkpoint.go
generated
vendored
|
@ -1,261 +0,0 @@
|
|||
// Copyright 2018 The Prometheus Authors
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package tsdb
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"math"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
tsdb_errors "github.com/prometheus/tsdb/errors"
|
||||
"github.com/prometheus/tsdb/fileutil"
|
||||
"github.com/prometheus/tsdb/wal"
|
||||
)
|
||||
|
||||
// CheckpointStats returns stats about a created checkpoint.
|
||||
type CheckpointStats struct {
|
||||
DroppedSeries int
|
||||
DroppedSamples int
|
||||
DroppedTombstones int
|
||||
TotalSeries int // Processed series including dropped ones.
|
||||
TotalSamples int // Processed samples including dropped ones.
|
||||
TotalTombstones int // Processed tombstones including dropped ones.
|
||||
}
|
||||
|
||||
// LastCheckpoint returns the directory name and index of the most recent checkpoint.
|
||||
// If dir does not contain any checkpoints, ErrNotFound is returned.
|
||||
func LastCheckpoint(dir string) (string, int, error) {
|
||||
files, err := ioutil.ReadDir(dir)
|
||||
if err != nil {
|
||||
return "", 0, err
|
||||
}
|
||||
// Traverse list backwards since there may be multiple checkpoints left.
|
||||
for i := len(files) - 1; i >= 0; i-- {
|
||||
fi := files[i]
|
||||
|
||||
if !strings.HasPrefix(fi.Name(), checkpointPrefix) {
|
||||
continue
|
||||
}
|
||||
if !fi.IsDir() {
|
||||
return "", 0, errors.Errorf("checkpoint %s is not a directory", fi.Name())
|
||||
}
|
||||
idx, err := strconv.Atoi(fi.Name()[len(checkpointPrefix):])
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
return filepath.Join(dir, fi.Name()), idx, nil
|
||||
}
|
||||
return "", 0, ErrNotFound
|
||||
}
|
||||
|
||||
// DeleteCheckpoints deletes all checkpoints in a directory below a given index.
|
||||
func DeleteCheckpoints(dir string, maxIndex int) error {
|
||||
var errs tsdb_errors.MultiError
|
||||
|
||||
files, err := ioutil.ReadDir(dir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, fi := range files {
|
||||
if !strings.HasPrefix(fi.Name(), checkpointPrefix) {
|
||||
continue
|
||||
}
|
||||
index, err := strconv.Atoi(fi.Name()[len(checkpointPrefix):])
|
||||
if err != nil || index >= maxIndex {
|
||||
continue
|
||||
}
|
||||
if err := os.RemoveAll(filepath.Join(dir, fi.Name())); err != nil {
|
||||
errs.Add(err)
|
||||
}
|
||||
}
|
||||
return errs.Err()
|
||||
}
|
||||
|
||||
const checkpointPrefix = "checkpoint."
|
||||
|
||||
// Checkpoint creates a compacted checkpoint of segments in range [first, last] in the given WAL.
|
||||
// It includes the most recent checkpoint if it exists.
|
||||
// All series not satisfying keep and samples below mint are dropped.
|
||||
//
|
||||
// The checkpoint is stored in a directory named checkpoint.N in the same
|
||||
// segmented format as the original WAL itself.
|
||||
// This makes it easy to read it through the WAL package and concatenate
|
||||
// it with the original WAL.
|
||||
func Checkpoint(w *wal.WAL, from, to int, keep func(id uint64) bool, mint int64) (*CheckpointStats, error) {
|
||||
stats := &CheckpointStats{}
|
||||
var sgmReader io.ReadCloser
|
||||
|
||||
{
|
||||
|
||||
var sgmRange []wal.SegmentRange
|
||||
dir, idx, err := LastCheckpoint(w.Dir())
|
||||
if err != nil && err != ErrNotFound {
|
||||
return nil, errors.Wrap(err, "find last checkpoint")
|
||||
}
|
||||
last := idx + 1
|
||||
if err == nil {
|
||||
if from > last {
|
||||
return nil, fmt.Errorf("unexpected gap to last checkpoint. expected:%v, requested:%v", last, from)
|
||||
}
|
||||
// Ignore WAL files below the checkpoint. They shouldn't exist to begin with.
|
||||
from = last
|
||||
|
||||
sgmRange = append(sgmRange, wal.SegmentRange{Dir: dir, Last: math.MaxInt32})
|
||||
}
|
||||
|
||||
sgmRange = append(sgmRange, wal.SegmentRange{Dir: w.Dir(), First: from, Last: to})
|
||||
sgmReader, err = wal.NewSegmentsRangeReader(sgmRange...)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "create segment reader")
|
||||
}
|
||||
defer sgmReader.Close()
|
||||
}
|
||||
|
||||
cpdir := filepath.Join(w.Dir(), fmt.Sprintf(checkpointPrefix+"%06d", to))
|
||||
cpdirtmp := cpdir + ".tmp"
|
||||
|
||||
if err := os.MkdirAll(cpdirtmp, 0777); err != nil {
|
||||
return nil, errors.Wrap(err, "create checkpoint dir")
|
||||
}
|
||||
cp, err := wal.New(nil, nil, cpdirtmp, w.CompressionEnabled())
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "open checkpoint")
|
||||
}
|
||||
|
||||
// Ensures that an early return caused by an error doesn't leave any tmp files.
|
||||
defer func() {
|
||||
cp.Close()
|
||||
os.RemoveAll(cpdirtmp)
|
||||
}()
|
||||
|
||||
r := wal.NewReader(sgmReader)
|
||||
|
||||
var (
|
||||
series []RefSeries
|
||||
samples []RefSample
|
||||
tstones []Stone
|
||||
dec RecordDecoder
|
||||
enc RecordEncoder
|
||||
buf []byte
|
||||
recs [][]byte
|
||||
)
|
||||
for r.Next() {
|
||||
series, samples, tstones = series[:0], samples[:0], tstones[:0]
|
||||
|
||||
// We don't reset the buffer since we batch up multiple records
|
||||
// before writing them to the checkpoint.
|
||||
// Remember where the record for this iteration starts.
|
||||
start := len(buf)
|
||||
rec := r.Record()
|
||||
|
||||
switch dec.Type(rec) {
|
||||
case RecordSeries:
|
||||
series, err = dec.Series(rec, series)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "decode series")
|
||||
}
|
||||
// Drop irrelevant series in place.
|
||||
repl := series[:0]
|
||||
for _, s := range series {
|
||||
if keep(s.Ref) {
|
||||
repl = append(repl, s)
|
||||
}
|
||||
}
|
||||
if len(repl) > 0 {
|
||||
buf = enc.Series(repl, buf)
|
||||
}
|
||||
stats.TotalSeries += len(series)
|
||||
stats.DroppedSeries += len(series) - len(repl)
|
||||
|
||||
case RecordSamples:
|
||||
samples, err = dec.Samples(rec, samples)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "decode samples")
|
||||
}
|
||||
// Drop irrelevant samples in place.
|
||||
repl := samples[:0]
|
||||
for _, s := range samples {
|
||||
if s.T >= mint {
|
||||
repl = append(repl, s)
|
||||
}
|
||||
}
|
||||
if len(repl) > 0 {
|
||||
buf = enc.Samples(repl, buf)
|
||||
}
|
||||
stats.TotalSamples += len(samples)
|
||||
stats.DroppedSamples += len(samples) - len(repl)
|
||||
|
||||
case RecordTombstones:
|
||||
tstones, err = dec.Tombstones(rec, tstones)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "decode deletes")
|
||||
}
|
||||
// Drop irrelevant tombstones in place.
|
||||
repl := tstones[:0]
|
||||
for _, s := range tstones {
|
||||
for _, iv := range s.intervals {
|
||||
if iv.Maxt >= mint {
|
||||
repl = append(repl, s)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
if len(repl) > 0 {
|
||||
buf = enc.Tombstones(repl, buf)
|
||||
}
|
||||
stats.TotalTombstones += len(tstones)
|
||||
stats.DroppedTombstones += len(tstones) - len(repl)
|
||||
|
||||
default:
|
||||
return nil, errors.New("invalid record type")
|
||||
}
|
||||
if len(buf[start:]) == 0 {
|
||||
continue // All contents discarded.
|
||||
}
|
||||
recs = append(recs, buf[start:])
|
||||
|
||||
// Flush records in 1 MB increments.
|
||||
if len(buf) > 1*1024*1024 {
|
||||
if err := cp.Log(recs...); err != nil {
|
||||
return nil, errors.Wrap(err, "flush records")
|
||||
}
|
||||
buf, recs = buf[:0], recs[:0]
|
||||
}
|
||||
}
|
||||
// If we hit any corruption during checkpointing, repairing is not an option.
|
||||
// The head won't know which series records are lost.
|
||||
if r.Err() != nil {
|
||||
return nil, errors.Wrap(r.Err(), "read segments")
|
||||
}
|
||||
|
||||
// Flush remaining records.
|
||||
if err := cp.Log(recs...); err != nil {
|
||||
return nil, errors.Wrap(err, "flush records")
|
||||
}
|
||||
if err := cp.Close(); err != nil {
|
||||
return nil, errors.Wrap(err, "close checkpoint")
|
||||
}
|
||||
if err := fileutil.Replace(cpdirtmp, cpdir); err != nil {
|
||||
return nil, errors.Wrap(err, "rename checkpoint directory")
|
||||
}
|
||||
|
||||
return stats, nil
|
||||
}
|
200
vendor/github.com/prometheus/tsdb/chunkenc/bstream.go
generated
vendored
200
vendor/github.com/prometheus/tsdb/chunkenc/bstream.go
generated
vendored
|
@ -1,200 +0,0 @@
|
|||
// Copyright 2017 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// The code in this file was largely written by Damian Gryski as part of
|
||||
// https://github.com/dgryski/go-tsz and published under the license below.
|
||||
// It received minor modifications to suit Prometheus's needs.
|
||||
|
||||
// Copyright (c) 2015,2016 Damian Gryski <damian@gryski.com>
|
||||
// All rights reserved.
|
||||
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are met:
|
||||
|
||||
// * Redistributions of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// * Redistributions in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
||||
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package chunkenc
|
||||
|
||||
import "io"
|
||||
|
||||
// bstream is a stream of bits.
|
||||
type bstream struct {
|
||||
stream []byte // the data stream
|
||||
count uint8 // how many bits are valid in current byte
|
||||
}
|
||||
|
||||
func newBReader(b []byte) bstream {
|
||||
return bstream{stream: b, count: 8}
|
||||
}
|
||||
|
||||
func (b *bstream) bytes() []byte {
|
||||
return b.stream
|
||||
}
|
||||
|
||||
type bit bool
|
||||
|
||||
const (
|
||||
zero bit = false
|
||||
one bit = true
|
||||
)
|
||||
|
||||
func (b *bstream) writeBit(bit bit) {
|
||||
if b.count == 0 {
|
||||
b.stream = append(b.stream, 0)
|
||||
b.count = 8
|
||||
}
|
||||
|
||||
i := len(b.stream) - 1
|
||||
|
||||
if bit {
|
||||
b.stream[i] |= 1 << (b.count - 1)
|
||||
}
|
||||
|
||||
b.count--
|
||||
}
|
||||
|
||||
func (b *bstream) writeByte(byt byte) {
|
||||
if b.count == 0 {
|
||||
b.stream = append(b.stream, 0)
|
||||
b.count = 8
|
||||
}
|
||||
|
||||
i := len(b.stream) - 1
|
||||
|
||||
// fill up b.b with b.count bits from byt
|
||||
b.stream[i] |= byt >> (8 - b.count)
|
||||
|
||||
b.stream = append(b.stream, 0)
|
||||
i++
|
||||
b.stream[i] = byt << b.count
|
||||
}
|
||||
|
||||
func (b *bstream) writeBits(u uint64, nbits int) {
|
||||
u <<= (64 - uint(nbits))
|
||||
for nbits >= 8 {
|
||||
byt := byte(u >> 56)
|
||||
b.writeByte(byt)
|
||||
u <<= 8
|
||||
nbits -= 8
|
||||
}
|
||||
|
||||
for nbits > 0 {
|
||||
b.writeBit((u >> 63) == 1)
|
||||
u <<= 1
|
||||
nbits--
|
||||
}
|
||||
}
|
||||
|
||||
func (b *bstream) readBit() (bit, error) {
|
||||
if len(b.stream) == 0 {
|
||||
return false, io.EOF
|
||||
}
|
||||
|
||||
if b.count == 0 {
|
||||
b.stream = b.stream[1:]
|
||||
|
||||
if len(b.stream) == 0 {
|
||||
return false, io.EOF
|
||||
}
|
||||
b.count = 8
|
||||
}
|
||||
|
||||
d := (b.stream[0] << (8 - b.count)) & 0x80
|
||||
b.count--
|
||||
return d != 0, nil
|
||||
}
|
||||
|
||||
func (b *bstream) ReadByte() (byte, error) {
|
||||
return b.readByte()
|
||||
}
|
||||
|
||||
func (b *bstream) readByte() (byte, error) {
|
||||
if len(b.stream) == 0 {
|
||||
return 0, io.EOF
|
||||
}
|
||||
|
||||
if b.count == 0 {
|
||||
b.stream = b.stream[1:]
|
||||
|
||||
if len(b.stream) == 0 {
|
||||
return 0, io.EOF
|
||||
}
|
||||
return b.stream[0], nil
|
||||
}
|
||||
|
||||
if b.count == 8 {
|
||||
b.count = 0
|
||||
return b.stream[0], nil
|
||||
}
|
||||
|
||||
byt := b.stream[0] << (8 - b.count)
|
||||
b.stream = b.stream[1:]
|
||||
|
||||
if len(b.stream) == 0 {
|
||||
return 0, io.EOF
|
||||
}
|
||||
|
||||
// We just advanced the stream and can assume the shift to be 0.
|
||||
byt |= b.stream[0] >> b.count
|
||||
|
||||
return byt, nil
|
||||
}
|
||||
|
||||
func (b *bstream) readBits(nbits int) (uint64, error) {
|
||||
var u uint64
|
||||
|
||||
for nbits >= 8 {
|
||||
byt, err := b.readByte()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
u = (u << 8) | uint64(byt)
|
||||
nbits -= 8
|
||||
}
|
||||
|
||||
if nbits == 0 {
|
||||
return u, nil
|
||||
}
|
||||
|
||||
if nbits > int(b.count) {
|
||||
u = (u << uint(b.count)) | uint64((b.stream[0]<<(8-b.count))>>(8-b.count))
|
||||
nbits -= int(b.count)
|
||||
b.stream = b.stream[1:]
|
||||
|
||||
if len(b.stream) == 0 {
|
||||
return 0, io.EOF
|
||||
}
|
||||
b.count = 8
|
||||
}
|
||||
|
||||
u = (u << uint(nbits)) | uint64((b.stream[0]<<(8-b.count))>>(8-uint(nbits)))
|
||||
b.count -= uint8(nbits)
|
||||
return u, nil
|
||||
}
|
138
vendor/github.com/prometheus/tsdb/chunkenc/chunk.go
generated
vendored
138
vendor/github.com/prometheus/tsdb/chunkenc/chunk.go
generated
vendored
|
@ -1,138 +0,0 @@
|
|||
// Copyright 2017 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package chunkenc
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// Encoding is the identifier for a chunk encoding.
|
||||
type Encoding uint8
|
||||
|
||||
func (e Encoding) String() string {
|
||||
switch e {
|
||||
case EncNone:
|
||||
return "none"
|
||||
case EncXOR:
|
||||
return "XOR"
|
||||
}
|
||||
return "<unknown>"
|
||||
}
|
||||
|
||||
// The different available chunk encodings.
|
||||
const (
|
||||
EncNone Encoding = iota
|
||||
EncXOR
|
||||
)
|
||||
|
||||
// Chunk holds a sequence of sample pairs that can be iterated over and appended to.
|
||||
type Chunk interface {
|
||||
Bytes() []byte
|
||||
Encoding() Encoding
|
||||
Appender() (Appender, error)
|
||||
// The iterator passed as argument is for re-use.
|
||||
// Depending on implementation, the iterator can
|
||||
// be re-used or a new iterator can be allocated.
|
||||
Iterator(Iterator) Iterator
|
||||
NumSamples() int
|
||||
}
|
||||
|
||||
// Appender adds sample pairs to a chunk.
|
||||
type Appender interface {
|
||||
Append(int64, float64)
|
||||
}
|
||||
|
||||
// Iterator is a simple iterator that can only get the next value.
|
||||
type Iterator interface {
|
||||
At() (int64, float64)
|
||||
Err() error
|
||||
Next() bool
|
||||
}
|
||||
|
||||
// NewNopIterator returns a new chunk iterator that does not hold any data.
|
||||
func NewNopIterator() Iterator {
|
||||
return nopIterator{}
|
||||
}
|
||||
|
||||
type nopIterator struct{}
|
||||
|
||||
func (nopIterator) At() (int64, float64) { return 0, 0 }
|
||||
func (nopIterator) Next() bool { return false }
|
||||
func (nopIterator) Err() error { return nil }
|
||||
|
||||
// Pool is used to create and reuse chunk references to avoid allocations.
|
||||
type Pool interface {
|
||||
Put(Chunk) error
|
||||
Get(e Encoding, b []byte) (Chunk, error)
|
||||
}
|
||||
|
||||
// pool is a memory pool of chunk objects.
|
||||
type pool struct {
|
||||
xor sync.Pool
|
||||
}
|
||||
|
||||
// NewPool returns a new pool.
|
||||
func NewPool() Pool {
|
||||
return &pool{
|
||||
xor: sync.Pool{
|
||||
New: func() interface{} {
|
||||
return &XORChunk{b: bstream{}}
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (p *pool) Get(e Encoding, b []byte) (Chunk, error) {
|
||||
switch e {
|
||||
case EncXOR:
|
||||
c := p.xor.Get().(*XORChunk)
|
||||
c.b.stream = b
|
||||
c.b.count = 0
|
||||
return c, nil
|
||||
}
|
||||
return nil, errors.Errorf("invalid encoding %q", e)
|
||||
}
|
||||
|
||||
func (p *pool) Put(c Chunk) error {
|
||||
switch c.Encoding() {
|
||||
case EncXOR:
|
||||
xc, ok := c.(*XORChunk)
|
||||
// This may happen often with wrapped chunks. Nothing we can really do about
|
||||
// it but returning an error would cause a lot of allocations again. Thus,
|
||||
// we just skip it.
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
xc.b.stream = nil
|
||||
xc.b.count = 0
|
||||
p.xor.Put(c)
|
||||
default:
|
||||
return errors.Errorf("invalid encoding %q", c.Encoding())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// FromData returns a chunk from a byte slice of chunk data.
|
||||
// This is there so that users of the library can easily create chunks from
|
||||
// bytes.
|
||||
func FromData(e Encoding, d []byte) (Chunk, error) {
|
||||
switch e {
|
||||
case EncXOR:
|
||||
return &XORChunk{b: bstream{count: 0, stream: d}}, nil
|
||||
}
|
||||
return nil, fmt.Errorf("unknown chunk encoding: %d", e)
|
||||
}
|
407
vendor/github.com/prometheus/tsdb/chunkenc/xor.go
generated
vendored
407
vendor/github.com/prometheus/tsdb/chunkenc/xor.go
generated
vendored
|
@ -1,407 +0,0 @@
|
|||
// Copyright 2017 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// The code in this file was largely written by Damian Gryski as part of
|
||||
// https://github.com/dgryski/go-tsz and published under the license below.
|
||||
// It was modified to accommodate reading from byte slices without modifying
|
||||
// the underlying bytes, which would panic when reading from mmaped
|
||||
// read-only byte slices.
|
||||
|
||||
// Copyright (c) 2015,2016 Damian Gryski <damian@gryski.com>
|
||||
// All rights reserved.
|
||||
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are met:
|
||||
|
||||
// * Redistributions of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// * Redistributions in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
||||
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package chunkenc
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"math"
|
||||
"math/bits"
|
||||
)
|
||||
|
||||
// XORChunk holds XOR encoded sample data.
|
||||
type XORChunk struct {
|
||||
b bstream
|
||||
}
|
||||
|
||||
// NewXORChunk returns a new chunk with XOR encoding of the given size.
|
||||
func NewXORChunk() *XORChunk {
|
||||
b := make([]byte, 2, 128)
|
||||
return &XORChunk{b: bstream{stream: b, count: 0}}
|
||||
}
|
||||
|
||||
// Encoding returns the encoding type.
|
||||
func (c *XORChunk) Encoding() Encoding {
|
||||
return EncXOR
|
||||
}
|
||||
|
||||
// Bytes returns the underlying byte slice of the chunk.
|
||||
func (c *XORChunk) Bytes() []byte {
|
||||
return c.b.bytes()
|
||||
}
|
||||
|
||||
// NumSamples returns the number of samples in the chunk.
|
||||
func (c *XORChunk) NumSamples() int {
|
||||
return int(binary.BigEndian.Uint16(c.Bytes()))
|
||||
}
|
||||
|
||||
// Appender implements the Chunk interface.
|
||||
func (c *XORChunk) Appender() (Appender, error) {
|
||||
it := c.iterator(nil)
|
||||
|
||||
// To get an appender we must know the state it would have if we had
|
||||
// appended all existing data from scratch.
|
||||
// We iterate through the end and populate via the iterator's state.
|
||||
for it.Next() {
|
||||
}
|
||||
if err := it.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
a := &xorAppender{
|
||||
b: &c.b,
|
||||
t: it.t,
|
||||
v: it.val,
|
||||
tDelta: it.tDelta,
|
||||
leading: it.leading,
|
||||
trailing: it.trailing,
|
||||
}
|
||||
if binary.BigEndian.Uint16(a.b.bytes()) == 0 {
|
||||
a.leading = 0xff
|
||||
}
|
||||
return a, nil
|
||||
}
|
||||
|
||||
func (c *XORChunk) iterator(it Iterator) *xorIterator {
|
||||
// Should iterators guarantee to act on a copy of the data so it doesn't lock append?
|
||||
// When using striped locks to guard access to chunks, probably yes.
|
||||
// Could only copy data if the chunk is not completed yet.
|
||||
if xorIter, ok := it.(*xorIterator); ok {
|
||||
xorIter.Reset(c.b.bytes())
|
||||
return xorIter
|
||||
}
|
||||
return &xorIterator{
|
||||
// The first 2 bytes contain chunk headers.
|
||||
// We skip that for actual samples.
|
||||
br: newBReader(c.b.bytes()[2:]),
|
||||
numTotal: binary.BigEndian.Uint16(c.b.bytes()),
|
||||
}
|
||||
}
|
||||
|
||||
// Iterator implements the Chunk interface.
|
||||
func (c *XORChunk) Iterator(it Iterator) Iterator {
|
||||
return c.iterator(it)
|
||||
}
|
||||
|
||||
type xorAppender struct {
|
||||
b *bstream
|
||||
|
||||
t int64
|
||||
v float64
|
||||
tDelta uint64
|
||||
|
||||
leading uint8
|
||||
trailing uint8
|
||||
}
|
||||
|
||||
func (a *xorAppender) Append(t int64, v float64) {
|
||||
var tDelta uint64
|
||||
num := binary.BigEndian.Uint16(a.b.bytes())
|
||||
|
||||
if num == 0 {
|
||||
buf := make([]byte, binary.MaxVarintLen64)
|
||||
for _, b := range buf[:binary.PutVarint(buf, t)] {
|
||||
a.b.writeByte(b)
|
||||
}
|
||||
a.b.writeBits(math.Float64bits(v), 64)
|
||||
|
||||
} else if num == 1 {
|
||||
tDelta = uint64(t - a.t)
|
||||
|
||||
buf := make([]byte, binary.MaxVarintLen64)
|
||||
for _, b := range buf[:binary.PutUvarint(buf, tDelta)] {
|
||||
a.b.writeByte(b)
|
||||
}
|
||||
|
||||
a.writeVDelta(v)
|
||||
|
||||
} else {
|
||||
tDelta = uint64(t - a.t)
|
||||
dod := int64(tDelta - a.tDelta)
|
||||
|
||||
// Gorilla has a max resolution of seconds, Prometheus milliseconds.
|
||||
// Thus we use higher value range steps with larger bit size.
|
||||
switch {
|
||||
case dod == 0:
|
||||
a.b.writeBit(zero)
|
||||
case bitRange(dod, 14):
|
||||
a.b.writeBits(0x02, 2) // '10'
|
||||
a.b.writeBits(uint64(dod), 14)
|
||||
case bitRange(dod, 17):
|
||||
a.b.writeBits(0x06, 3) // '110'
|
||||
a.b.writeBits(uint64(dod), 17)
|
||||
case bitRange(dod, 20):
|
||||
a.b.writeBits(0x0e, 4) // '1110'
|
||||
a.b.writeBits(uint64(dod), 20)
|
||||
default:
|
||||
a.b.writeBits(0x0f, 4) // '1111'
|
||||
a.b.writeBits(uint64(dod), 64)
|
||||
}
|
||||
|
||||
a.writeVDelta(v)
|
||||
}
|
||||
|
||||
a.t = t
|
||||
a.v = v
|
||||
binary.BigEndian.PutUint16(a.b.bytes(), num+1)
|
||||
a.tDelta = tDelta
|
||||
}
|
||||
|
||||
func bitRange(x int64, nbits uint8) bool {
|
||||
return -((1<<(nbits-1))-1) <= x && x <= 1<<(nbits-1)
|
||||
}
|
||||
|
||||
func (a *xorAppender) writeVDelta(v float64) {
|
||||
vDelta := math.Float64bits(v) ^ math.Float64bits(a.v)
|
||||
|
||||
if vDelta == 0 {
|
||||
a.b.writeBit(zero)
|
||||
return
|
||||
}
|
||||
a.b.writeBit(one)
|
||||
|
||||
leading := uint8(bits.LeadingZeros64(vDelta))
|
||||
trailing := uint8(bits.TrailingZeros64(vDelta))
|
||||
|
||||
// Clamp number of leading zeros to avoid overflow when encoding.
|
||||
if leading >= 32 {
|
||||
leading = 31
|
||||
}
|
||||
|
||||
if a.leading != 0xff && leading >= a.leading && trailing >= a.trailing {
|
||||
a.b.writeBit(zero)
|
||||
a.b.writeBits(vDelta>>a.trailing, 64-int(a.leading)-int(a.trailing))
|
||||
} else {
|
||||
a.leading, a.trailing = leading, trailing
|
||||
|
||||
a.b.writeBit(one)
|
||||
a.b.writeBits(uint64(leading), 5)
|
||||
|
||||
// Note that if leading == trailing == 0, then sigbits == 64. But that value doesn't actually fit into the 6 bits we have.
|
||||
// Luckily, we never need to encode 0 significant bits, since that would put us in the other case (vdelta == 0).
|
||||
// So instead we write out a 0 and adjust it back to 64 on unpacking.
|
||||
sigbits := 64 - leading - trailing
|
||||
a.b.writeBits(uint64(sigbits), 6)
|
||||
a.b.writeBits(vDelta>>trailing, int(sigbits))
|
||||
}
|
||||
}
|
||||
|
||||
type xorIterator struct {
|
||||
br bstream
|
||||
numTotal uint16
|
||||
numRead uint16
|
||||
|
||||
t int64
|
||||
val float64
|
||||
|
||||
leading uint8
|
||||
trailing uint8
|
||||
|
||||
tDelta uint64
|
||||
err error
|
||||
}
|
||||
|
||||
func (it *xorIterator) At() (int64, float64) {
|
||||
return it.t, it.val
|
||||
}
|
||||
|
||||
func (it *xorIterator) Err() error {
|
||||
return it.err
|
||||
}
|
||||
|
||||
func (it *xorIterator) Reset(b []byte) {
|
||||
// The first 2 bytes contain chunk headers.
|
||||
// We skip that for actual samples.
|
||||
it.br = newBReader(b[2:])
|
||||
it.numTotal = binary.BigEndian.Uint16(b)
|
||||
|
||||
it.numRead = 0
|
||||
it.t = 0
|
||||
it.val = 0
|
||||
it.leading = 0
|
||||
it.trailing = 0
|
||||
it.tDelta = 0
|
||||
it.err = nil
|
||||
}
|
||||
|
||||
func (it *xorIterator) Next() bool {
|
||||
if it.err != nil || it.numRead == it.numTotal {
|
||||
return false
|
||||
}
|
||||
|
||||
if it.numRead == 0 {
|
||||
t, err := binary.ReadVarint(&it.br)
|
||||
if err != nil {
|
||||
it.err = err
|
||||
return false
|
||||
}
|
||||
v, err := it.br.readBits(64)
|
||||
if err != nil {
|
||||
it.err = err
|
||||
return false
|
||||
}
|
||||
it.t = t
|
||||
it.val = math.Float64frombits(v)
|
||||
|
||||
it.numRead++
|
||||
return true
|
||||
}
|
||||
if it.numRead == 1 {
|
||||
tDelta, err := binary.ReadUvarint(&it.br)
|
||||
if err != nil {
|
||||
it.err = err
|
||||
return false
|
||||
}
|
||||
it.tDelta = tDelta
|
||||
it.t = it.t + int64(it.tDelta)
|
||||
|
||||
return it.readValue()
|
||||
}
|
||||
|
||||
var d byte
|
||||
// read delta-of-delta
|
||||
for i := 0; i < 4; i++ {
|
||||
d <<= 1
|
||||
bit, err := it.br.readBit()
|
||||
if err != nil {
|
||||
it.err = err
|
||||
return false
|
||||
}
|
||||
if bit == zero {
|
||||
break
|
||||
}
|
||||
d |= 1
|
||||
}
|
||||
var sz uint8
|
||||
var dod int64
|
||||
switch d {
|
||||
case 0x00:
|
||||
// dod == 0
|
||||
case 0x02:
|
||||
sz = 14
|
||||
case 0x06:
|
||||
sz = 17
|
||||
case 0x0e:
|
||||
sz = 20
|
||||
case 0x0f:
|
||||
bits, err := it.br.readBits(64)
|
||||
if err != nil {
|
||||
it.err = err
|
||||
return false
|
||||
}
|
||||
|
||||
dod = int64(bits)
|
||||
}
|
||||
|
||||
if sz != 0 {
|
||||
bits, err := it.br.readBits(int(sz))
|
||||
if err != nil {
|
||||
it.err = err
|
||||
return false
|
||||
}
|
||||
if bits > (1 << (sz - 1)) {
|
||||
// or something
|
||||
bits = bits - (1 << sz)
|
||||
}
|
||||
dod = int64(bits)
|
||||
}
|
||||
|
||||
it.tDelta = uint64(int64(it.tDelta) + dod)
|
||||
it.t = it.t + int64(it.tDelta)
|
||||
|
||||
return it.readValue()
|
||||
}
|
||||
|
||||
func (it *xorIterator) readValue() bool {
|
||||
bit, err := it.br.readBit()
|
||||
if err != nil {
|
||||
it.err = err
|
||||
return false
|
||||
}
|
||||
|
||||
if bit == zero {
|
||||
// it.val = it.val
|
||||
} else {
|
||||
bit, err := it.br.readBit()
|
||||
if err != nil {
|
||||
it.err = err
|
||||
return false
|
||||
}
|
||||
if bit == zero {
|
||||
// reuse leading/trailing zero bits
|
||||
// it.leading, it.trailing = it.leading, it.trailing
|
||||
} else {
|
||||
bits, err := it.br.readBits(5)
|
||||
if err != nil {
|
||||
it.err = err
|
||||
return false
|
||||
}
|
||||
it.leading = uint8(bits)
|
||||
|
||||
bits, err = it.br.readBits(6)
|
||||
if err != nil {
|
||||
it.err = err
|
||||
return false
|
||||
}
|
||||
mbits := uint8(bits)
|
||||
// 0 significant bits here means we overflowed and we actually need 64; see comment in encoder
|
||||
if mbits == 0 {
|
||||
mbits = 64
|
||||
}
|
||||
it.trailing = 64 - it.leading - mbits
|
||||
}
|
||||
|
||||
mbits := int(64 - it.leading - it.trailing)
|
||||
bits, err := it.br.readBits(mbits)
|
||||
if err != nil {
|
||||
it.err = err
|
||||
return false
|
||||
}
|
||||
vbits := math.Float64bits(it.val)
|
||||
vbits ^= (bits << it.trailing)
|
||||
it.val = math.Float64frombits(vbits)
|
||||
}
|
||||
|
||||
it.numRead++
|
||||
return true
|
||||
}
|
512
vendor/github.com/prometheus/tsdb/chunks/chunks.go
generated
vendored
512
vendor/github.com/prometheus/tsdb/chunks/chunks.go
generated
vendored
|
@ -1,512 +0,0 @@
|
|||
// Copyright 2017 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package chunks
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"hash"
|
||||
"hash/crc32"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prometheus/tsdb/chunkenc"
|
||||
tsdb_errors "github.com/prometheus/tsdb/errors"
|
||||
"github.com/prometheus/tsdb/fileutil"
|
||||
)
|
||||
|
||||
const (
|
||||
// MagicChunks is 4 bytes at the head of a series file.
|
||||
MagicChunks = 0x85BD40DD
|
||||
// MagicChunksSize is the size in bytes of MagicChunks.
|
||||
MagicChunksSize = 4
|
||||
|
||||
chunksFormatV1 = 1
|
||||
ChunksFormatVersionSize = 1
|
||||
|
||||
chunkHeaderSize = MagicChunksSize + ChunksFormatVersionSize
|
||||
)
|
||||
|
||||
// Meta holds information about a chunk of data.
|
||||
type Meta struct {
|
||||
// Ref and Chunk hold either a reference that can be used to retrieve
|
||||
// chunk data or the data itself.
|
||||
// Generally, only one of them is set.
|
||||
Ref uint64
|
||||
Chunk chunkenc.Chunk
|
||||
|
||||
// Time range the data covers.
|
||||
// When MaxTime == math.MaxInt64 the chunk is still open and being appended to.
|
||||
MinTime, MaxTime int64
|
||||
}
|
||||
|
||||
// writeHash writes the chunk encoding and raw data into the provided hash.
|
||||
func (cm *Meta) writeHash(h hash.Hash, buf []byte) error {
|
||||
buf = append(buf[:0], byte(cm.Chunk.Encoding()))
|
||||
if _, err := h.Write(buf[:1]); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := h.Write(cm.Chunk.Bytes()); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// OverlapsClosedInterval Returns true if the chunk overlaps [mint, maxt].
|
||||
func (cm *Meta) OverlapsClosedInterval(mint, maxt int64) bool {
|
||||
// The chunk itself is a closed interval [cm.MinTime, cm.MaxTime].
|
||||
return cm.MinTime <= maxt && mint <= cm.MaxTime
|
||||
}
|
||||
|
||||
var (
|
||||
errInvalidSize = fmt.Errorf("invalid size")
|
||||
)
|
||||
|
||||
var castagnoliTable *crc32.Table
|
||||
|
||||
func init() {
|
||||
castagnoliTable = crc32.MakeTable(crc32.Castagnoli)
|
||||
}
|
||||
|
||||
// newCRC32 initializes a CRC32 hash with a preconfigured polynomial, so the
|
||||
// polynomial may be easily changed in one location at a later time, if necessary.
|
||||
func newCRC32() hash.Hash32 {
|
||||
return crc32.New(castagnoliTable)
|
||||
}
|
||||
|
||||
// Writer implements the ChunkWriter interface for the standard
|
||||
// serialization format.
|
||||
type Writer struct {
|
||||
dirFile *os.File
|
||||
files []*os.File
|
||||
wbuf *bufio.Writer
|
||||
n int64
|
||||
crc32 hash.Hash
|
||||
buf [binary.MaxVarintLen32]byte
|
||||
|
||||
segmentSize int64
|
||||
}
|
||||
|
||||
const (
|
||||
defaultChunkSegmentSize = 512 * 1024 * 1024
|
||||
)
|
||||
|
||||
// NewWriter returns a new writer against the given directory.
|
||||
func NewWriter(dir string) (*Writer, error) {
|
||||
if err := os.MkdirAll(dir, 0777); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
dirFile, err := fileutil.OpenDir(dir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cw := &Writer{
|
||||
dirFile: dirFile,
|
||||
n: 0,
|
||||
crc32: newCRC32(),
|
||||
segmentSize: defaultChunkSegmentSize,
|
||||
}
|
||||
return cw, nil
|
||||
}
|
||||
|
||||
func (w *Writer) tail() *os.File {
|
||||
if len(w.files) == 0 {
|
||||
return nil
|
||||
}
|
||||
return w.files[len(w.files)-1]
|
||||
}
|
||||
|
||||
// finalizeTail writes all pending data to the current tail file,
|
||||
// truncates its size, and closes it.
|
||||
func (w *Writer) finalizeTail() error {
|
||||
tf := w.tail()
|
||||
if tf == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := w.wbuf.Flush(); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := tf.Sync(); err != nil {
|
||||
return err
|
||||
}
|
||||
// As the file was pre-allocated, we truncate any superfluous zero bytes.
|
||||
off, err := tf.Seek(0, io.SeekCurrent)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := tf.Truncate(off); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return tf.Close()
|
||||
}
|
||||
|
||||
func (w *Writer) cut() error {
|
||||
// Sync current tail to disk and close.
|
||||
if err := w.finalizeTail(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
p, _, err := nextSequenceFile(w.dirFile.Name())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
f, err := os.OpenFile(p, os.O_WRONLY|os.O_CREATE, 0666)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err = fileutil.Preallocate(f, w.segmentSize, true); err != nil {
|
||||
return err
|
||||
}
|
||||
if err = w.dirFile.Sync(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Write header metadata for new file.
|
||||
metab := make([]byte, 8)
|
||||
binary.BigEndian.PutUint32(metab[:MagicChunksSize], MagicChunks)
|
||||
metab[4] = chunksFormatV1
|
||||
|
||||
if _, err := f.Write(metab); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
w.files = append(w.files, f)
|
||||
if w.wbuf != nil {
|
||||
w.wbuf.Reset(f)
|
||||
} else {
|
||||
w.wbuf = bufio.NewWriterSize(f, 8*1024*1024)
|
||||
}
|
||||
w.n = 8
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *Writer) write(b []byte) error {
|
||||
n, err := w.wbuf.Write(b)
|
||||
w.n += int64(n)
|
||||
return err
|
||||
}
|
||||
|
||||
// MergeOverlappingChunks removes the samples whose timestamp is overlapping.
|
||||
// The last appearing sample is retained in case there is overlapping.
|
||||
// This assumes that `chks []Meta` is sorted w.r.t. MinTime.
|
||||
func MergeOverlappingChunks(chks []Meta) ([]Meta, error) {
|
||||
if len(chks) < 2 {
|
||||
return chks, nil
|
||||
}
|
||||
newChks := make([]Meta, 0, len(chks)) // Will contain the merged chunks.
|
||||
newChks = append(newChks, chks[0])
|
||||
last := 0
|
||||
for _, c := range chks[1:] {
|
||||
// We need to check only the last chunk in newChks.
|
||||
// Reason: (1) newChks[last-1].MaxTime < newChks[last].MinTime (non overlapping)
|
||||
// (2) As chks are sorted w.r.t. MinTime, newChks[last].MinTime < c.MinTime.
|
||||
// So never overlaps with newChks[last-1] or anything before that.
|
||||
if c.MinTime > newChks[last].MaxTime {
|
||||
newChks = append(newChks, c)
|
||||
last++
|
||||
continue
|
||||
}
|
||||
nc := &newChks[last]
|
||||
if c.MaxTime > nc.MaxTime {
|
||||
nc.MaxTime = c.MaxTime
|
||||
}
|
||||
chk, err := MergeChunks(nc.Chunk, c.Chunk)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
nc.Chunk = chk
|
||||
}
|
||||
|
||||
return newChks, nil
|
||||
}
|
||||
|
||||
// MergeChunks vertically merges a and b, i.e., if there is any sample
|
||||
// with same timestamp in both a and b, the sample in a is discarded.
|
||||
func MergeChunks(a, b chunkenc.Chunk) (*chunkenc.XORChunk, error) {
|
||||
newChunk := chunkenc.NewXORChunk()
|
||||
app, err := newChunk.Appender()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ait := a.Iterator(nil)
|
||||
bit := b.Iterator(nil)
|
||||
aok, bok := ait.Next(), bit.Next()
|
||||
for aok && bok {
|
||||
at, av := ait.At()
|
||||
bt, bv := bit.At()
|
||||
if at < bt {
|
||||
app.Append(at, av)
|
||||
aok = ait.Next()
|
||||
} else if bt < at {
|
||||
app.Append(bt, bv)
|
||||
bok = bit.Next()
|
||||
} else {
|
||||
app.Append(bt, bv)
|
||||
aok = ait.Next()
|
||||
bok = bit.Next()
|
||||
}
|
||||
}
|
||||
for aok {
|
||||
at, av := ait.At()
|
||||
app.Append(at, av)
|
||||
aok = ait.Next()
|
||||
}
|
||||
for bok {
|
||||
bt, bv := bit.At()
|
||||
app.Append(bt, bv)
|
||||
bok = bit.Next()
|
||||
}
|
||||
if ait.Err() != nil {
|
||||
return nil, ait.Err()
|
||||
}
|
||||
if bit.Err() != nil {
|
||||
return nil, bit.Err()
|
||||
}
|
||||
return newChunk, nil
|
||||
}
|
||||
|
||||
func (w *Writer) WriteChunks(chks ...Meta) error {
|
||||
// Calculate maximum space we need and cut a new segment in case
|
||||
// we don't fit into the current one.
|
||||
maxLen := int64(binary.MaxVarintLen32) // The number of chunks.
|
||||
for _, c := range chks {
|
||||
maxLen += binary.MaxVarintLen32 + 1 // The number of bytes in the chunk and its encoding.
|
||||
maxLen += int64(len(c.Chunk.Bytes()))
|
||||
maxLen += 4 // The 4 bytes of crc32
|
||||
}
|
||||
newsz := w.n + maxLen
|
||||
|
||||
if w.wbuf == nil || w.n > w.segmentSize || newsz > w.segmentSize && maxLen <= w.segmentSize {
|
||||
if err := w.cut(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
var seq = uint64(w.seq()) << 32
|
||||
for i := range chks {
|
||||
chk := &chks[i]
|
||||
|
||||
chk.Ref = seq | uint64(w.n)
|
||||
|
||||
n := binary.PutUvarint(w.buf[:], uint64(len(chk.Chunk.Bytes())))
|
||||
|
||||
if err := w.write(w.buf[:n]); err != nil {
|
||||
return err
|
||||
}
|
||||
w.buf[0] = byte(chk.Chunk.Encoding())
|
||||
if err := w.write(w.buf[:1]); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := w.write(chk.Chunk.Bytes()); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
w.crc32.Reset()
|
||||
if err := chk.writeHash(w.crc32, w.buf[:]); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := w.write(w.crc32.Sum(w.buf[:0])); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *Writer) seq() int {
|
||||
return len(w.files) - 1
|
||||
}
|
||||
|
||||
func (w *Writer) Close() error {
|
||||
if err := w.finalizeTail(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// close dir file (if not windows platform will fail on rename)
|
||||
return w.dirFile.Close()
|
||||
}
|
||||
|
||||
// ByteSlice abstracts a byte slice.
|
||||
type ByteSlice interface {
|
||||
Len() int
|
||||
Range(start, end int) []byte
|
||||
}
|
||||
|
||||
type realByteSlice []byte
|
||||
|
||||
func (b realByteSlice) Len() int {
|
||||
return len(b)
|
||||
}
|
||||
|
||||
func (b realByteSlice) Range(start, end int) []byte {
|
||||
return b[start:end]
|
||||
}
|
||||
|
||||
func (b realByteSlice) Sub(start, end int) ByteSlice {
|
||||
return b[start:end]
|
||||
}
|
||||
|
||||
// Reader implements a ChunkReader for a serialized byte stream
|
||||
// of series data.
|
||||
type Reader struct {
|
||||
bs []ByteSlice // The underlying bytes holding the encoded series data.
|
||||
cs []io.Closer // Closers for resources behind the byte slices.
|
||||
size int64 // The total size of bytes in the reader.
|
||||
pool chunkenc.Pool
|
||||
}
|
||||
|
||||
func newReader(bs []ByteSlice, cs []io.Closer, pool chunkenc.Pool) (*Reader, error) {
|
||||
cr := Reader{pool: pool, bs: bs, cs: cs}
|
||||
var totalSize int64
|
||||
|
||||
for i, b := range cr.bs {
|
||||
if b.Len() < chunkHeaderSize {
|
||||
return nil, errors.Wrapf(errInvalidSize, "invalid chunk header in segment %d", i)
|
||||
}
|
||||
// Verify magic number.
|
||||
if m := binary.BigEndian.Uint32(b.Range(0, MagicChunksSize)); m != MagicChunks {
|
||||
return nil, errors.Errorf("invalid magic number %x", m)
|
||||
}
|
||||
|
||||
// Verify chunk format version.
|
||||
if v := int(b.Range(MagicChunksSize, MagicChunksSize+ChunksFormatVersionSize)[0]); v != chunksFormatV1 {
|
||||
return nil, errors.Errorf("invalid chunk format version %d", v)
|
||||
}
|
||||
totalSize += int64(b.Len())
|
||||
}
|
||||
cr.size = totalSize
|
||||
return &cr, nil
|
||||
}
|
||||
|
||||
// NewDirReader returns a new Reader against sequentially numbered files in the
|
||||
// given directory.
|
||||
func NewDirReader(dir string, pool chunkenc.Pool) (*Reader, error) {
|
||||
files, err := sequenceFiles(dir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if pool == nil {
|
||||
pool = chunkenc.NewPool()
|
||||
}
|
||||
|
||||
var (
|
||||
bs []ByteSlice
|
||||
cs []io.Closer
|
||||
merr tsdb_errors.MultiError
|
||||
)
|
||||
for _, fn := range files {
|
||||
f, err := fileutil.OpenMmapFile(fn)
|
||||
if err != nil {
|
||||
merr.Add(errors.Wrap(err, "mmap files"))
|
||||
merr.Add(closeAll(cs))
|
||||
return nil, merr
|
||||
}
|
||||
cs = append(cs, f)
|
||||
bs = append(bs, realByteSlice(f.Bytes()))
|
||||
}
|
||||
|
||||
reader, err := newReader(bs, cs, pool)
|
||||
if err != nil {
|
||||
merr.Add(err)
|
||||
merr.Add(closeAll(cs))
|
||||
return nil, merr
|
||||
}
|
||||
return reader, nil
|
||||
}
|
||||
|
||||
func (s *Reader) Close() error {
|
||||
return closeAll(s.cs)
|
||||
}
|
||||
|
||||
// Size returns the size of the chunks.
|
||||
func (s *Reader) Size() int64 {
|
||||
return s.size
|
||||
}
|
||||
|
||||
// Chunk returns a chunk from a given reference.
|
||||
func (s *Reader) Chunk(ref uint64) (chunkenc.Chunk, error) {
|
||||
var (
|
||||
sgmSeq = int(ref >> 32)
|
||||
sgmOffset = int((ref << 32) >> 32)
|
||||
)
|
||||
if sgmSeq >= len(s.bs) {
|
||||
return nil, errors.Errorf("reference sequence %d out of range", sgmSeq)
|
||||
}
|
||||
chkS := s.bs[sgmSeq]
|
||||
|
||||
if sgmOffset >= chkS.Len() {
|
||||
return nil, errors.Errorf("offset %d beyond data size %d", sgmOffset, chkS.Len())
|
||||
}
|
||||
// With the minimum chunk length this should never cause us reading
|
||||
// over the end of the slice.
|
||||
chk := chkS.Range(sgmOffset, sgmOffset+binary.MaxVarintLen32)
|
||||
|
||||
chkLen, n := binary.Uvarint(chk)
|
||||
if n <= 0 {
|
||||
return nil, errors.Errorf("reading chunk length failed with %d", n)
|
||||
}
|
||||
chk = chkS.Range(sgmOffset+n, sgmOffset+n+1+int(chkLen))
|
||||
|
||||
return s.pool.Get(chunkenc.Encoding(chk[0]), chk[1:1+chkLen])
|
||||
}
|
||||
|
||||
func nextSequenceFile(dir string) (string, int, error) {
|
||||
names, err := fileutil.ReadDir(dir)
|
||||
if err != nil {
|
||||
return "", 0, err
|
||||
}
|
||||
|
||||
i := uint64(0)
|
||||
for _, n := range names {
|
||||
j, err := strconv.ParseUint(n, 10, 64)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
i = j
|
||||
}
|
||||
return filepath.Join(dir, fmt.Sprintf("%0.6d", i+1)), int(i + 1), nil
|
||||
}
|
||||
|
||||
func sequenceFiles(dir string) ([]string, error) {
|
||||
files, err := ioutil.ReadDir(dir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var res []string
|
||||
|
||||
for _, fi := range files {
|
||||
if _, err := strconv.ParseUint(fi.Name(), 10, 64); err != nil {
|
||||
continue
|
||||
}
|
||||
res = append(res, filepath.Join(dir, fi.Name()))
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func closeAll(cs []io.Closer) error {
|
||||
var merr tsdb_errors.MultiError
|
||||
|
||||
for _, c := range cs {
|
||||
merr.Add(c.Close())
|
||||
}
|
||||
return merr.Err()
|
||||
}
|
1034
vendor/github.com/prometheus/tsdb/compact.go
generated
vendored
1034
vendor/github.com/prometheus/tsdb/compact.go
generated
vendored
File diff suppressed because it is too large
Load diff
1323
vendor/github.com/prometheus/tsdb/db.go
generated
vendored
1323
vendor/github.com/prometheus/tsdb/db.go
generated
vendored
File diff suppressed because it is too large
Load diff
244
vendor/github.com/prometheus/tsdb/encoding/encoding.go
generated
vendored
244
vendor/github.com/prometheus/tsdb/encoding/encoding.go
generated
vendored
|
@ -1,244 +0,0 @@
|
|||
// Copyright 2018 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package encoding
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"hash"
|
||||
"hash/crc32"
|
||||
"unsafe"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrInvalidSize = errors.New("invalid size")
|
||||
ErrInvalidChecksum = errors.New("invalid checksum")
|
||||
)
|
||||
|
||||
// Encbuf is a helper type to populate a byte slice with various types.
|
||||
type Encbuf struct {
|
||||
B []byte
|
||||
C [binary.MaxVarintLen64]byte
|
||||
}
|
||||
|
||||
func (e *Encbuf) Reset() { e.B = e.B[:0] }
|
||||
func (e *Encbuf) Get() []byte { return e.B }
|
||||
func (e *Encbuf) Len() int { return len(e.B) }
|
||||
|
||||
func (e *Encbuf) PutString(s string) { e.B = append(e.B, s...) }
|
||||
func (e *Encbuf) PutByte(c byte) { e.B = append(e.B, c) }
|
||||
|
||||
func (e *Encbuf) PutBE32int(x int) { e.PutBE32(uint32(x)) }
|
||||
func (e *Encbuf) PutUvarint32(x uint32) { e.PutUvarint64(uint64(x)) }
|
||||
func (e *Encbuf) PutBE64int64(x int64) { e.PutBE64(uint64(x)) }
|
||||
func (e *Encbuf) PutUvarint(x int) { e.PutUvarint64(uint64(x)) }
|
||||
|
||||
func (e *Encbuf) PutBE32(x uint32) {
|
||||
binary.BigEndian.PutUint32(e.C[:], x)
|
||||
e.B = append(e.B, e.C[:4]...)
|
||||
}
|
||||
|
||||
func (e *Encbuf) PutBE64(x uint64) {
|
||||
binary.BigEndian.PutUint64(e.C[:], x)
|
||||
e.B = append(e.B, e.C[:8]...)
|
||||
}
|
||||
|
||||
func (e *Encbuf) PutUvarint64(x uint64) {
|
||||
n := binary.PutUvarint(e.C[:], x)
|
||||
e.B = append(e.B, e.C[:n]...)
|
||||
}
|
||||
|
||||
func (e *Encbuf) PutVarint64(x int64) {
|
||||
n := binary.PutVarint(e.C[:], x)
|
||||
e.B = append(e.B, e.C[:n]...)
|
||||
}
|
||||
|
||||
// PutUvarintStr writes a string to the buffer prefixed by its varint length (in bytes!).
|
||||
func (e *Encbuf) PutUvarintStr(s string) {
|
||||
b := *(*[]byte)(unsafe.Pointer(&s))
|
||||
e.PutUvarint(len(b))
|
||||
e.PutString(s)
|
||||
}
|
||||
|
||||
// PutHash appends a hash over the buffers current contents to the buffer.
|
||||
func (e *Encbuf) PutHash(h hash.Hash) {
|
||||
h.Reset()
|
||||
_, err := h.Write(e.B)
|
||||
if err != nil {
|
||||
panic(err) // The CRC32 implementation does not error
|
||||
}
|
||||
e.B = h.Sum(e.B)
|
||||
}
|
||||
|
||||
// Decbuf provides safe methods to extract data from a byte slice. It does all
|
||||
// necessary bounds checking and advancing of the byte slice.
|
||||
// Several datums can be extracted without checking for errors. However, before using
|
||||
// any datum, the err() method must be checked.
|
||||
type Decbuf struct {
|
||||
B []byte
|
||||
E error
|
||||
}
|
||||
|
||||
// NewDecbufAt returns a new decoding buffer. It expects the first 4 bytes
|
||||
// after offset to hold the big endian encoded content length, followed by the contents and the expected
|
||||
// checksum.
|
||||
func NewDecbufAt(bs ByteSlice, off int, castagnoliTable *crc32.Table) Decbuf {
|
||||
if bs.Len() < off+4 {
|
||||
return Decbuf{E: ErrInvalidSize}
|
||||
}
|
||||
b := bs.Range(off, off+4)
|
||||
l := int(binary.BigEndian.Uint32(b))
|
||||
|
||||
if bs.Len() < off+4+l+4 {
|
||||
return Decbuf{E: ErrInvalidSize}
|
||||
}
|
||||
|
||||
// Load bytes holding the contents plus a CRC32 checksum.
|
||||
b = bs.Range(off+4, off+4+l+4)
|
||||
dec := Decbuf{B: b[:len(b)-4]}
|
||||
|
||||
if exp := binary.BigEndian.Uint32(b[len(b)-4:]); dec.Crc32(castagnoliTable) != exp {
|
||||
return Decbuf{E: ErrInvalidChecksum}
|
||||
}
|
||||
return dec
|
||||
}
|
||||
|
||||
// NewDecbufUvarintAt returns a new decoding buffer. It expects the first bytes
|
||||
// after offset to hold the uvarint-encoded buffers length, followed by the contents and the expected
|
||||
// checksum.
|
||||
func NewDecbufUvarintAt(bs ByteSlice, off int, castagnoliTable *crc32.Table) Decbuf {
|
||||
// We never have to access this method at the far end of the byte slice. Thus just checking
|
||||
// against the MaxVarintLen32 is sufficient.
|
||||
if bs.Len() < off+binary.MaxVarintLen32 {
|
||||
return Decbuf{E: ErrInvalidSize}
|
||||
}
|
||||
b := bs.Range(off, off+binary.MaxVarintLen32)
|
||||
|
||||
l, n := binary.Uvarint(b)
|
||||
if n <= 0 || n > binary.MaxVarintLen32 {
|
||||
return Decbuf{E: errors.Errorf("invalid uvarint %d", n)}
|
||||
}
|
||||
|
||||
if bs.Len() < off+n+int(l)+4 {
|
||||
return Decbuf{E: ErrInvalidSize}
|
||||
}
|
||||
|
||||
// Load bytes holding the contents plus a CRC32 checksum.
|
||||
b = bs.Range(off+n, off+n+int(l)+4)
|
||||
dec := Decbuf{B: b[:len(b)-4]}
|
||||
|
||||
if dec.Crc32(castagnoliTable) != binary.BigEndian.Uint32(b[len(b)-4:]) {
|
||||
return Decbuf{E: ErrInvalidChecksum}
|
||||
}
|
||||
return dec
|
||||
}
|
||||
|
||||
func (d *Decbuf) Uvarint() int { return int(d.Uvarint64()) }
|
||||
func (d *Decbuf) Be32int() int { return int(d.Be32()) }
|
||||
func (d *Decbuf) Be64int64() int64 { return int64(d.Be64()) }
|
||||
|
||||
// Crc32 returns a CRC32 checksum over the remaining bytes.
|
||||
func (d *Decbuf) Crc32(castagnoliTable *crc32.Table) uint32 {
|
||||
return crc32.Checksum(d.B, castagnoliTable)
|
||||
}
|
||||
|
||||
func (d *Decbuf) UvarintStr() string {
|
||||
l := d.Uvarint64()
|
||||
if d.E != nil {
|
||||
return ""
|
||||
}
|
||||
if len(d.B) < int(l) {
|
||||
d.E = ErrInvalidSize
|
||||
return ""
|
||||
}
|
||||
s := string(d.B[:l])
|
||||
d.B = d.B[l:]
|
||||
return s
|
||||
}
|
||||
|
||||
func (d *Decbuf) Varint64() int64 {
|
||||
if d.E != nil {
|
||||
return 0
|
||||
}
|
||||
x, n := binary.Varint(d.B)
|
||||
if n < 1 {
|
||||
d.E = ErrInvalidSize
|
||||
return 0
|
||||
}
|
||||
d.B = d.B[n:]
|
||||
return x
|
||||
}
|
||||
|
||||
func (d *Decbuf) Uvarint64() uint64 {
|
||||
if d.E != nil {
|
||||
return 0
|
||||
}
|
||||
x, n := binary.Uvarint(d.B)
|
||||
if n < 1 {
|
||||
d.E = ErrInvalidSize
|
||||
return 0
|
||||
}
|
||||
d.B = d.B[n:]
|
||||
return x
|
||||
}
|
||||
|
||||
func (d *Decbuf) Be64() uint64 {
|
||||
if d.E != nil {
|
||||
return 0
|
||||
}
|
||||
if len(d.B) < 8 {
|
||||
d.E = ErrInvalidSize
|
||||
return 0
|
||||
}
|
||||
x := binary.BigEndian.Uint64(d.B)
|
||||
d.B = d.B[8:]
|
||||
return x
|
||||
}
|
||||
|
||||
func (d *Decbuf) Be32() uint32 {
|
||||
if d.E != nil {
|
||||
return 0
|
||||
}
|
||||
if len(d.B) < 4 {
|
||||
d.E = ErrInvalidSize
|
||||
return 0
|
||||
}
|
||||
x := binary.BigEndian.Uint32(d.B)
|
||||
d.B = d.B[4:]
|
||||
return x
|
||||
}
|
||||
|
||||
func (d *Decbuf) Byte() byte {
|
||||
if d.E != nil {
|
||||
return 0
|
||||
}
|
||||
if len(d.B) < 1 {
|
||||
d.E = ErrInvalidSize
|
||||
return 0
|
||||
}
|
||||
x := d.B[0]
|
||||
d.B = d.B[1:]
|
||||
return x
|
||||
}
|
||||
|
||||
func (d *Decbuf) Err() error { return d.E }
|
||||
func (d *Decbuf) Len() int { return len(d.B) }
|
||||
func (d *Decbuf) Get() []byte { return d.B }
|
||||
|
||||
// ByteSlice abstracts a byte slice.
|
||||
type ByteSlice interface {
|
||||
Len() int
|
||||
Range(start, end int) []byte
|
||||
}
|
62
vendor/github.com/prometheus/tsdb/errors/errors.go
generated
vendored
62
vendor/github.com/prometheus/tsdb/errors/errors.go
generated
vendored
|
@ -1,62 +0,0 @@
|
|||
// Copyright 2016 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package errors
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// The MultiError type implements the error interface, and contains the
|
||||
// Errors used to construct it.
|
||||
type MultiError []error
|
||||
|
||||
// Returns a concatenated string of the contained errors
|
||||
func (es MultiError) Error() string {
|
||||
var buf bytes.Buffer
|
||||
|
||||
if len(es) > 1 {
|
||||
fmt.Fprintf(&buf, "%d errors: ", len(es))
|
||||
}
|
||||
|
||||
for i, err := range es {
|
||||
if i != 0 {
|
||||
buf.WriteString("; ")
|
||||
}
|
||||
buf.WriteString(err.Error())
|
||||
}
|
||||
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
// Add adds the error to the error list if it is not nil.
|
||||
func (es *MultiError) Add(err error) {
|
||||
if err == nil {
|
||||
return
|
||||
}
|
||||
if merr, ok := err.(MultiError); ok {
|
||||
*es = append(*es, merr...)
|
||||
} else {
|
||||
*es = append(*es, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Err returns the error list as an error or nil if it is empty.
|
||||
func (es MultiError) Err() error {
|
||||
if len(es) == 0 {
|
||||
return nil
|
||||
}
|
||||
return es
|
||||
}
|
22
vendor/github.com/prometheus/tsdb/fileutil/dir_unix.go
generated
vendored
22
vendor/github.com/prometheus/tsdb/fileutil/dir_unix.go
generated
vendored
|
@ -1,22 +0,0 @@
|
|||
// Copyright 2016 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// +build !windows
|
||||
|
||||
package fileutil
|
||||
|
||||
import "os"
|
||||
|
||||
// OpenDir opens a directory for syncing.
|
||||
func OpenDir(path string) (*os.File, error) { return os.Open(path) }
|
46
vendor/github.com/prometheus/tsdb/fileutil/dir_windows.go
generated
vendored
46
vendor/github.com/prometheus/tsdb/fileutil/dir_windows.go
generated
vendored
|
@ -1,46 +0,0 @@
|
|||
// Copyright 2016 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// +build windows
|
||||
|
||||
package fileutil
|
||||
|
||||
import (
|
||||
"os"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
// OpenDir opens a directory in windows with write access for syncing.
|
||||
func OpenDir(path string) (*os.File, error) {
|
||||
fd, err := openDir(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return os.NewFile(uintptr(fd), path), nil
|
||||
}
|
||||
|
||||
func openDir(path string) (fd syscall.Handle, err error) {
|
||||
if len(path) == 0 {
|
||||
return syscall.InvalidHandle, syscall.ERROR_FILE_NOT_FOUND
|
||||
}
|
||||
pathp, err := syscall.UTF16PtrFromString(path)
|
||||
if err != nil {
|
||||
return syscall.InvalidHandle, err
|
||||
}
|
||||
access := uint32(syscall.GENERIC_READ | syscall.GENERIC_WRITE)
|
||||
sharemode := uint32(syscall.FILE_SHARE_READ | syscall.FILE_SHARE_WRITE)
|
||||
createmode := uint32(syscall.OPEN_EXISTING)
|
||||
fl := uint32(syscall.FILE_FLAG_BACKUP_SEMANTICS)
|
||||
return syscall.CreateFile(pathp, access, sharemode, nil, createmode, fl, 0)
|
||||
}
|
159
vendor/github.com/prometheus/tsdb/fileutil/fileutil.go
generated
vendored
159
vendor/github.com/prometheus/tsdb/fileutil/fileutil.go
generated
vendored
|
@ -1,159 +0,0 @@
|
|||
// Copyright 2018 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package fileutil provides utility methods used when dealing with the filesystem in tsdb.
|
||||
// It is largely copied from github.com/coreos/etcd/pkg/fileutil to avoid the
|
||||
// dependency chain it brings with it.
|
||||
// Please check github.com/coreos/etcd for licensing information.
|
||||
package fileutil
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// CopyDirs copies all directories, subdirectories and files recursively including the empty folders.
|
||||
// Source and destination must be full paths.
|
||||
func CopyDirs(src, dest string) error {
|
||||
if err := os.MkdirAll(dest, 0777); err != nil {
|
||||
return err
|
||||
}
|
||||
files, err := readDirs(src)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, f := range files {
|
||||
dp := filepath.Join(dest, f)
|
||||
sp := filepath.Join(src, f)
|
||||
|
||||
stat, err := os.Stat(sp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Empty directories are also created.
|
||||
if stat.IsDir() {
|
||||
if err := os.MkdirAll(dp, 0777); err != nil {
|
||||
return err
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
if err := copyFile(sp, dp); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func copyFile(src, dest string) error {
|
||||
data, err := ioutil.ReadFile(src)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = ioutil.WriteFile(dest, data, 0644)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// readDirs reads the source directory recursively and
|
||||
// returns relative paths to all files and empty directories.
|
||||
func readDirs(src string) ([]string, error) {
|
||||
var files []string
|
||||
|
||||
err := filepath.Walk(src, func(path string, f os.FileInfo, err error) error {
|
||||
relativePath := strings.TrimPrefix(path, src)
|
||||
if len(relativePath) > 0 {
|
||||
files = append(files, relativePath)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return files, nil
|
||||
}
|
||||
|
||||
// ReadDir returns the filenames in the given directory in sorted order.
|
||||
func ReadDir(dirpath string) ([]string, error) {
|
||||
dir, err := os.Open(dirpath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer dir.Close()
|
||||
names, err := dir.Readdirnames(-1)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
sort.Strings(names)
|
||||
return names, nil
|
||||
}
|
||||
|
||||
// Rename safely renames a file.
|
||||
func Rename(from, to string) error {
|
||||
if err := os.Rename(from, to); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Directory was renamed; sync parent dir to persist rename.
|
||||
pdir, err := OpenDir(filepath.Dir(to))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err = pdir.Sync(); err != nil {
|
||||
pdir.Close()
|
||||
return err
|
||||
}
|
||||
return pdir.Close()
|
||||
}
|
||||
|
||||
// Replace moves a file or directory to a new location and deletes any previous data.
|
||||
// It is not atomic.
|
||||
func Replace(from, to string) error {
|
||||
// Remove destination only if it is a dir otherwise leave it to os.Rename
|
||||
// as it replaces the destination file and is atomic.
|
||||
{
|
||||
f, err := os.Stat(to)
|
||||
if !os.IsNotExist(err) {
|
||||
if err == nil && f.IsDir() {
|
||||
if err := os.RemoveAll(to); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if err := os.Rename(from, to); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Directory was renamed; sync parent dir to persist rename.
|
||||
pdir, err := OpenDir(filepath.Dir(to))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err = pdir.Sync(); err != nil {
|
||||
pdir.Close()
|
||||
return err
|
||||
}
|
||||
return pdir.Close()
|
||||
}
|
41
vendor/github.com/prometheus/tsdb/fileutil/flock.go
generated
vendored
41
vendor/github.com/prometheus/tsdb/fileutil/flock.go
generated
vendored
|
@ -1,41 +0,0 @@
|
|||
// Copyright 2016 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package fileutil
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
// Releaser provides the Release method to release a file lock.
|
||||
type Releaser interface {
|
||||
Release() error
|
||||
}
|
||||
|
||||
// Flock locks the file with the provided name. If the file does not exist, it is
|
||||
// created. The returned Releaser is used to release the lock. existed is true
|
||||
// if the file to lock already existed. A non-nil error is returned if the
|
||||
// locking has failed. Neither this function nor the returned Releaser is
|
||||
// goroutine-safe.
|
||||
func Flock(fileName string) (r Releaser, existed bool, err error) {
|
||||
if err = os.MkdirAll(filepath.Dir(fileName), 0755); err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
|
||||
_, err = os.Stat(fileName)
|
||||
existed = err == nil
|
||||
|
||||
r, err = newLock(fileName)
|
||||
return r, existed, err
|
||||
}
|
32
vendor/github.com/prometheus/tsdb/fileutil/flock_plan9.go
generated
vendored
32
vendor/github.com/prometheus/tsdb/fileutil/flock_plan9.go
generated
vendored
|
@ -1,32 +0,0 @@
|
|||
// Copyright 2016 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package fileutil
|
||||
|
||||
import "os"
|
||||
|
||||
type plan9Lock struct {
|
||||
f *os.File
|
||||
}
|
||||
|
||||
func (l *plan9Lock) Release() error {
|
||||
return l.f.Close()
|
||||
}
|
||||
|
||||
func newLock(fileName string) (Releaser, error) {
|
||||
f, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE, os.ModeExclusive|0644)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &plan9Lock{f}, nil
|
||||
}
|
59
vendor/github.com/prometheus/tsdb/fileutil/flock_solaris.go
generated
vendored
59
vendor/github.com/prometheus/tsdb/fileutil/flock_solaris.go
generated
vendored
|
@ -1,59 +0,0 @@
|
|||
// Copyright 2016 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// +build solaris
|
||||
|
||||
package fileutil
|
||||
|
||||
import (
|
||||
"os"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
type unixLock struct {
|
||||
f *os.File
|
||||
}
|
||||
|
||||
func (l *unixLock) Release() error {
|
||||
if err := l.set(false); err != nil {
|
||||
return err
|
||||
}
|
||||
return l.f.Close()
|
||||
}
|
||||
|
||||
func (l *unixLock) set(lock bool) error {
|
||||
flock := syscall.Flock_t{
|
||||
Type: syscall.F_UNLCK,
|
||||
Start: 0,
|
||||
Len: 0,
|
||||
Whence: 1,
|
||||
}
|
||||
if lock {
|
||||
flock.Type = syscall.F_WRLCK
|
||||
}
|
||||
return syscall.FcntlFlock(l.f.Fd(), syscall.F_SETLK, &flock)
|
||||
}
|
||||
|
||||
func newLock(fileName string) (Releaser, error) {
|
||||
f, err := os.OpenFile(fileName, os.O_RDWR|os.O_CREATE, 0644)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
l := &unixLock{f}
|
||||
err = l.set(true)
|
||||
if err != nil {
|
||||
f.Close()
|
||||
return nil, err
|
||||
}
|
||||
return l, nil
|
||||
}
|
54
vendor/github.com/prometheus/tsdb/fileutil/flock_unix.go
generated
vendored
54
vendor/github.com/prometheus/tsdb/fileutil/flock_unix.go
generated
vendored
|
@ -1,54 +0,0 @@
|
|||
// Copyright 2016 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// +build darwin dragonfly freebsd linux netbsd openbsd
|
||||
|
||||
package fileutil
|
||||
|
||||
import (
|
||||
"os"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
type unixLock struct {
|
||||
f *os.File
|
||||
}
|
||||
|
||||
func (l *unixLock) Release() error {
|
||||
if err := l.set(false); err != nil {
|
||||
return err
|
||||
}
|
||||
return l.f.Close()
|
||||
}
|
||||
|
||||
func (l *unixLock) set(lock bool) error {
|
||||
how := syscall.LOCK_UN
|
||||
if lock {
|
||||
how = syscall.LOCK_EX
|
||||
}
|
||||
return syscall.Flock(int(l.f.Fd()), how|syscall.LOCK_NB)
|
||||
}
|
||||
|
||||
func newLock(fileName string) (Releaser, error) {
|
||||
f, err := os.OpenFile(fileName, os.O_RDWR|os.O_CREATE, 0644)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
l := &unixLock{f}
|
||||
err = l.set(true)
|
||||
if err != nil {
|
||||
f.Close()
|
||||
return nil, err
|
||||
}
|
||||
return l, nil
|
||||
}
|
36
vendor/github.com/prometheus/tsdb/fileutil/flock_windows.go
generated
vendored
36
vendor/github.com/prometheus/tsdb/fileutil/flock_windows.go
generated
vendored
|
@ -1,36 +0,0 @@
|
|||
// Copyright 2016 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package fileutil
|
||||
|
||||
import "syscall"
|
||||
|
||||
type windowsLock struct {
|
||||
fd syscall.Handle
|
||||
}
|
||||
|
||||
func (fl *windowsLock) Release() error {
|
||||
return syscall.Close(fl.fd)
|
||||
}
|
||||
|
||||
func newLock(fileName string) (Releaser, error) {
|
||||
pathp, err := syscall.UTF16PtrFromString(fileName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fd, err := syscall.CreateFile(pathp, syscall.GENERIC_READ|syscall.GENERIC_WRITE, 0, nil, syscall.CREATE_ALWAYS, syscall.FILE_ATTRIBUTE_NORMAL, 0)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &windowsLock{fd}, nil
|
||||
}
|
61
vendor/github.com/prometheus/tsdb/fileutil/mmap.go
generated
vendored
61
vendor/github.com/prometheus/tsdb/fileutil/mmap.go
generated
vendored
|
@ -1,61 +0,0 @@
|
|||
// Copyright 2018 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package fileutil
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
type MmapFile struct {
|
||||
f *os.File
|
||||
b []byte
|
||||
}
|
||||
|
||||
func OpenMmapFile(path string) (*MmapFile, error) {
|
||||
f, err := os.Open(path)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "try lock file")
|
||||
}
|
||||
info, err := f.Stat()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "stat")
|
||||
}
|
||||
|
||||
b, err := mmap(f, int(info.Size()))
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "mmap")
|
||||
}
|
||||
|
||||
return &MmapFile{f: f, b: b}, nil
|
||||
}
|
||||
|
||||
func (f *MmapFile) Close() error {
|
||||
err0 := munmap(f.b)
|
||||
err1 := f.f.Close()
|
||||
|
||||
if err0 != nil {
|
||||
return err0
|
||||
}
|
||||
return err1
|
||||
}
|
||||
|
||||
func (f *MmapFile) File() *os.File {
|
||||
return f.f
|
||||
}
|
||||
|
||||
func (f *MmapFile) Bytes() []byte {
|
||||
return f.b
|
||||
}
|
18
vendor/github.com/prometheus/tsdb/fileutil/mmap_386.go
generated
vendored
18
vendor/github.com/prometheus/tsdb/fileutil/mmap_386.go
generated
vendored
|
@ -1,18 +0,0 @@
|
|||
// Copyright 2018 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// +build windows
|
||||
|
||||
package fileutil
|
||||
|
||||
const maxMapSize = 0x7FFFFFFF // 2GB
|
18
vendor/github.com/prometheus/tsdb/fileutil/mmap_amd64.go
generated
vendored
18
vendor/github.com/prometheus/tsdb/fileutil/mmap_amd64.go
generated
vendored
|
@ -1,18 +0,0 @@
|
|||
// Copyright 2018 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// +build windows
|
||||
|
||||
package fileutil
|
||||
|
||||
const maxMapSize = 0xFFFFFFFFFFFF // 256TB
|
30
vendor/github.com/prometheus/tsdb/fileutil/mmap_unix.go
generated
vendored
30
vendor/github.com/prometheus/tsdb/fileutil/mmap_unix.go
generated
vendored
|
@ -1,30 +0,0 @@
|
|||
// Copyright 2017 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// +build !windows,!plan9
|
||||
|
||||
package fileutil
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
func mmap(f *os.File, length int) ([]byte, error) {
|
||||
return unix.Mmap(int(f.Fd()), 0, length, unix.PROT_READ, unix.MAP_SHARED)
|
||||
}
|
||||
|
||||
func munmap(b []byte) (err error) {
|
||||
return unix.Munmap(b)
|
||||
}
|
46
vendor/github.com/prometheus/tsdb/fileutil/mmap_windows.go
generated
vendored
46
vendor/github.com/prometheus/tsdb/fileutil/mmap_windows.go
generated
vendored
|
@ -1,46 +0,0 @@
|
|||
// Copyright 2017 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package fileutil
|
||||
|
||||
import (
|
||||
"os"
|
||||
"syscall"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
func mmap(f *os.File, size int) ([]byte, error) {
|
||||
low, high := uint32(size), uint32(size>>32)
|
||||
h, errno := syscall.CreateFileMapping(syscall.Handle(f.Fd()), nil, syscall.PAGE_READONLY, high, low, nil)
|
||||
if h == 0 {
|
||||
return nil, os.NewSyscallError("CreateFileMapping", errno)
|
||||
}
|
||||
|
||||
addr, errno := syscall.MapViewOfFile(h, syscall.FILE_MAP_READ, 0, 0, uintptr(size))
|
||||
if addr == 0 {
|
||||
return nil, os.NewSyscallError("MapViewOfFile", errno)
|
||||
}
|
||||
|
||||
if err := syscall.CloseHandle(syscall.Handle(h)); err != nil {
|
||||
return nil, os.NewSyscallError("CloseHandle", err)
|
||||
}
|
||||
|
||||
return (*[maxMapSize]byte)(unsafe.Pointer(addr))[:size], nil
|
||||
}
|
||||
|
||||
func munmap(b []byte) error {
|
||||
if err := syscall.UnmapViewOfFile((uintptr)(unsafe.Pointer(&b[0]))); err != nil {
|
||||
return os.NewSyscallError("UnmapViewOfFile", err)
|
||||
}
|
||||
return nil
|
||||
}
|
54
vendor/github.com/prometheus/tsdb/fileutil/preallocate.go
generated
vendored
54
vendor/github.com/prometheus/tsdb/fileutil/preallocate.go
generated
vendored
|
@ -1,54 +0,0 @@
|
|||
// Copyright 2015 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package fileutil
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
)
|
||||
|
||||
// Preallocate tries to allocate the space for given
|
||||
// file. This operation is only supported on linux by a
|
||||
// few filesystems (btrfs, ext4, etc.).
|
||||
// If the operation is unsupported, no error will be returned.
|
||||
// Otherwise, the error encountered will be returned.
|
||||
func Preallocate(f *os.File, sizeInBytes int64, extendFile bool) error {
|
||||
if sizeInBytes == 0 {
|
||||
// fallocate will return EINVAL if length is 0; skip
|
||||
return nil
|
||||
}
|
||||
if extendFile {
|
||||
return preallocExtend(f, sizeInBytes)
|
||||
}
|
||||
return preallocFixed(f, sizeInBytes)
|
||||
}
|
||||
|
||||
func preallocExtendTrunc(f *os.File, sizeInBytes int64) error {
|
||||
curOff, err := f.Seek(0, io.SeekCurrent)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
size, err := f.Seek(sizeInBytes, io.SeekEnd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err = f.Seek(curOff, io.SeekStart); err != nil {
|
||||
return err
|
||||
}
|
||||
if sizeInBytes > size {
|
||||
return nil
|
||||
}
|
||||
return f.Truncate(sizeInBytes)
|
||||
}
|
41
vendor/github.com/prometheus/tsdb/fileutil/preallocate_darwin.go
generated
vendored
41
vendor/github.com/prometheus/tsdb/fileutil/preallocate_darwin.go
generated
vendored
|
@ -1,41 +0,0 @@
|
|||
// Copyright 2015 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package fileutil
|
||||
|
||||
import (
|
||||
"os"
|
||||
"syscall"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
func preallocExtend(f *os.File, sizeInBytes int64) error {
|
||||
if err := preallocFixed(f, sizeInBytes); err != nil {
|
||||
return err
|
||||
}
|
||||
return preallocExtendTrunc(f, sizeInBytes)
|
||||
}
|
||||
|
||||
func preallocFixed(f *os.File, sizeInBytes int64) error {
|
||||
fstore := &syscall.Fstore_t{
|
||||
Flags: syscall.F_ALLOCATEALL,
|
||||
Posmode: syscall.F_PEOFPOSMODE,
|
||||
Length: sizeInBytes}
|
||||
p := unsafe.Pointer(fstore)
|
||||
_, _, errno := syscall.Syscall(syscall.SYS_FCNTL, f.Fd(), uintptr(syscall.F_PREALLOCATE), uintptr(p))
|
||||
if errno == 0 || errno == syscall.ENOTSUP {
|
||||
return nil
|
||||
}
|
||||
return errno
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue