From f6dae7a5ba554fa8324e19003c9992fe6ddbda28 Mon Sep 17 00:00:00 2001 From: Fabian Reinartz Date: Tue, 7 Feb 2017 03:42:02 -0800 Subject: [PATCH] vendor: add fabx/tsdb --- vendor/github.com/bradfitz/slice/COPYING | 3 + vendor/github.com/bradfitz/slice/LICENSE | 27 + vendor/github.com/bradfitz/slice/README | 1 + vendor/github.com/bradfitz/slice/slice.go | 44 + vendor/github.com/cespare/xxhash/LICENSE.txt | 22 + vendor/github.com/cespare/xxhash/README.md | 34 + vendor/github.com/cespare/xxhash/xxhash.go | 180 ++++ .../github.com/cespare/xxhash/xxhash_amd64.go | 12 + .../github.com/cespare/xxhash/xxhash_amd64.s | 233 +++++ vendor/github.com/coreos/etcd/LICENSE | 202 ++++ vendor/github.com/coreos/etcd/NOTICE | 5 + .../coreos/etcd/pkg/fileutil/dir_unix.go | 22 + .../coreos/etcd/pkg/fileutil/dir_windows.go | 46 + .../coreos/etcd/pkg/fileutil/fileutil.go | 121 +++ .../coreos/etcd/pkg/fileutil/lock.go | 26 + .../coreos/etcd/pkg/fileutil/lock_flock.go | 49 + .../coreos/etcd/pkg/fileutil/lock_linux.go | 96 ++ .../coreos/etcd/pkg/fileutil/lock_plan9.go | 45 + .../coreos/etcd/pkg/fileutil/lock_solaris.go | 62 ++ .../coreos/etcd/pkg/fileutil/lock_unix.go | 29 + .../coreos/etcd/pkg/fileutil/lock_windows.go | 125 +++ .../coreos/etcd/pkg/fileutil/preallocate.go | 47 + .../etcd/pkg/fileutil/preallocate_darwin.go | 43 + .../etcd/pkg/fileutil/preallocate_unix.go | 49 + .../pkg/fileutil/preallocate_unsupported.go | 25 + .../coreos/etcd/pkg/fileutil/purge.go | 78 ++ .../coreos/etcd/pkg/fileutil/sync.go | 29 + .../coreos/etcd/pkg/fileutil/sync_darwin.go | 40 + .../coreos/etcd/pkg/fileutil/sync_linux.go | 34 + vendor/github.com/coreos/go-systemd/LICENSE | 191 ++++ .../coreos/go-systemd/journal/journal.go | 179 ++++ .../github.com/coreos/pkg/capnslog/README.md | 39 + .../coreos/pkg/capnslog/formatters.go | 157 +++ .../coreos/pkg/capnslog/glog_formatter.go | 96 ++ vendor/github.com/coreos/pkg/capnslog/init.go | 49 + .../coreos/pkg/capnslog/init_windows.go | 25 + .../coreos/pkg/capnslog/journald_formatter.go | 68 ++ .../coreos/pkg/capnslog/log_hijack.go | 39 + .../github.com/coreos/pkg/capnslog/logmap.go | 245 +++++ .../coreos/pkg/capnslog/pkg_logger.go | 177 ++++ .../coreos/pkg/capnslog/syslog_formatter.go | 65 ++ vendor/github.com/dgryski/go-bits/LICENSE | 21 + vendor/github.com/dgryski/go-bits/README | 1 + vendor/github.com/dgryski/go-bits/clz_amd64.s | 13 + vendor/github.com/dgryski/go-bits/clz_asm.go | 6 + vendor/github.com/dgryski/go-bits/ctz_amd64.s | 11 + vendor/github.com/dgryski/go-bits/ctz_asm.go | 6 + .../github.com/dgryski/go-bits/popcnt_amd64.s | 11 + .../github.com/dgryski/go-bits/popcnt_asm.go | 6 + vendor/github.com/fabxc/tsdb/block.go | 237 +++++ .../github.com/fabxc/tsdb/chunks/bstream.go | 169 ++++ vendor/github.com/fabxc/tsdb/chunks/chunk.go | 57 ++ vendor/github.com/fabxc/tsdb/chunks/xor.go | 341 +++++++ vendor/github.com/fabxc/tsdb/compact.go | 444 +++++++++ vendor/github.com/fabxc/tsdb/db.go | 806 ++++++++++++++++ vendor/github.com/fabxc/tsdb/db_amd64.go | 10 + vendor/github.com/fabxc/tsdb/db_unix.go | 27 + vendor/github.com/fabxc/tsdb/head.go | 701 ++++++++++++++ vendor/github.com/fabxc/tsdb/labels/labels.go | 143 +++ .../github.com/fabxc/tsdb/labels/selector.go | 66 ++ vendor/github.com/fabxc/tsdb/postings.go | 265 ++++++ vendor/github.com/fabxc/tsdb/querier.go | 900 ++++++++++++++++++ vendor/github.com/fabxc/tsdb/reader.go | 411 ++++++++ vendor/github.com/fabxc/tsdb/wal.go | 428 +++++++++ vendor/github.com/fabxc/tsdb/writer.go | 506 ++++++++++ vendor/github.com/go-kit/kit/LICENSE | 22 + vendor/github.com/go-kit/kit/log/README.md | 148 +++ vendor/github.com/go-kit/kit/log/doc.go | 93 ++ .../github.com/go-kit/kit/log/json_logger.go | 92 ++ vendor/github.com/go-kit/kit/log/log.go | 144 +++ .../go-kit/kit/log/logfmt_logger.go | 62 ++ .../github.com/go-kit/kit/log/nop_logger.go | 8 + vendor/github.com/go-kit/kit/log/stdlib.go | 116 +++ vendor/github.com/go-kit/kit/log/sync.go | 81 ++ vendor/github.com/go-kit/kit/log/value.go | 62 ++ vendor/github.com/go-logfmt/logfmt/LICENSE | 22 + vendor/github.com/go-logfmt/logfmt/README.md | 33 + vendor/github.com/go-logfmt/logfmt/decode.go | 237 +++++ vendor/github.com/go-logfmt/logfmt/doc.go | 6 + vendor/github.com/go-logfmt/logfmt/encode.go | 321 +++++++ vendor/github.com/go-logfmt/logfmt/fuzz.go | 126 +++ .../github.com/go-logfmt/logfmt/jsonstring.go | 277 ++++++ vendor/github.com/go-stack/stack/LICENSE.md | 13 + vendor/github.com/go-stack/stack/README.md | 38 + vendor/github.com/go-stack/stack/stack.go | 349 +++++++ vendor/github.com/kr/logfmt/Readme | 12 + vendor/github.com/kr/logfmt/decode.go | 184 ++++ vendor/github.com/kr/logfmt/scanner.go | 149 +++ vendor/github.com/kr/logfmt/unquote.go | 149 +++ vendor/github.com/pkg/errors/LICENSE | 23 + vendor/github.com/pkg/errors/README.md | 52 + vendor/github.com/pkg/errors/appveyor.yml | 32 + vendor/github.com/pkg/errors/errors.go | 269 ++++++ vendor/github.com/pkg/errors/stack.go | 178 ++++ vendor/go4.org/LICENSE | 202 ++++ vendor/go4.org/reflectutil/asm_b.s | 17 + vendor/go4.org/reflectutil/asm_b_14.s | 13 + vendor/go4.org/reflectutil/asm_jmp.s | 17 + vendor/go4.org/reflectutil/asm_jmp_14.s | 13 + vendor/go4.org/reflectutil/reflectutil.go | 40 + vendor/go4.org/reflectutil/swapper.go | 21 + vendor/go4.org/reflectutil/swapper_unsafe.go | 105 ++ .../go4.org/reflectutil/swapper_unsafe_14.go | 15 + .../go4.org/reflectutil/swapper_unsafe_15.go | 15 + vendor/golang.org/x/sync/LICENSE | 27 + vendor/golang.org/x/sync/PATENTS | 22 + vendor/golang.org/x/sync/errgroup/errgroup.go | 67 ++ vendor/vendor.json | 96 ++ 108 files changed, 12613 insertions(+) create mode 100644 vendor/github.com/bradfitz/slice/COPYING create mode 100644 vendor/github.com/bradfitz/slice/LICENSE create mode 100644 vendor/github.com/bradfitz/slice/README create mode 100644 vendor/github.com/bradfitz/slice/slice.go create mode 100644 vendor/github.com/cespare/xxhash/LICENSE.txt create mode 100644 vendor/github.com/cespare/xxhash/README.md create mode 100644 vendor/github.com/cespare/xxhash/xxhash.go create mode 100644 vendor/github.com/cespare/xxhash/xxhash_amd64.go create mode 100644 vendor/github.com/cespare/xxhash/xxhash_amd64.s create mode 100644 vendor/github.com/coreos/etcd/LICENSE create mode 100644 vendor/github.com/coreos/etcd/NOTICE create mode 100644 vendor/github.com/coreos/etcd/pkg/fileutil/dir_unix.go create mode 100644 vendor/github.com/coreos/etcd/pkg/fileutil/dir_windows.go create mode 100644 vendor/github.com/coreos/etcd/pkg/fileutil/fileutil.go create mode 100644 vendor/github.com/coreos/etcd/pkg/fileutil/lock.go create mode 100644 vendor/github.com/coreos/etcd/pkg/fileutil/lock_flock.go create mode 100644 vendor/github.com/coreos/etcd/pkg/fileutil/lock_linux.go create mode 100644 vendor/github.com/coreos/etcd/pkg/fileutil/lock_plan9.go create mode 100644 vendor/github.com/coreos/etcd/pkg/fileutil/lock_solaris.go create mode 100644 vendor/github.com/coreos/etcd/pkg/fileutil/lock_unix.go create mode 100644 vendor/github.com/coreos/etcd/pkg/fileutil/lock_windows.go create mode 100644 vendor/github.com/coreos/etcd/pkg/fileutil/preallocate.go create mode 100644 vendor/github.com/coreos/etcd/pkg/fileutil/preallocate_darwin.go create mode 100644 vendor/github.com/coreos/etcd/pkg/fileutil/preallocate_unix.go create mode 100644 vendor/github.com/coreos/etcd/pkg/fileutil/preallocate_unsupported.go create mode 100644 vendor/github.com/coreos/etcd/pkg/fileutil/purge.go create mode 100644 vendor/github.com/coreos/etcd/pkg/fileutil/sync.go create mode 100644 vendor/github.com/coreos/etcd/pkg/fileutil/sync_darwin.go create mode 100644 vendor/github.com/coreos/etcd/pkg/fileutil/sync_linux.go create mode 100644 vendor/github.com/coreos/go-systemd/LICENSE create mode 100644 vendor/github.com/coreos/go-systemd/journal/journal.go create mode 100644 vendor/github.com/coreos/pkg/capnslog/README.md create mode 100644 vendor/github.com/coreos/pkg/capnslog/formatters.go create mode 100644 vendor/github.com/coreos/pkg/capnslog/glog_formatter.go create mode 100644 vendor/github.com/coreos/pkg/capnslog/init.go create mode 100644 vendor/github.com/coreos/pkg/capnslog/init_windows.go create mode 100644 vendor/github.com/coreos/pkg/capnslog/journald_formatter.go create mode 100644 vendor/github.com/coreos/pkg/capnslog/log_hijack.go create mode 100644 vendor/github.com/coreos/pkg/capnslog/logmap.go create mode 100644 vendor/github.com/coreos/pkg/capnslog/pkg_logger.go create mode 100644 vendor/github.com/coreos/pkg/capnslog/syslog_formatter.go create mode 100644 vendor/github.com/dgryski/go-bits/LICENSE create mode 100644 vendor/github.com/dgryski/go-bits/README create mode 100644 vendor/github.com/dgryski/go-bits/clz_amd64.s create mode 100644 vendor/github.com/dgryski/go-bits/clz_asm.go create mode 100644 vendor/github.com/dgryski/go-bits/ctz_amd64.s create mode 100644 vendor/github.com/dgryski/go-bits/ctz_asm.go create mode 100644 vendor/github.com/dgryski/go-bits/popcnt_amd64.s create mode 100644 vendor/github.com/dgryski/go-bits/popcnt_asm.go create mode 100644 vendor/github.com/fabxc/tsdb/block.go create mode 100644 vendor/github.com/fabxc/tsdb/chunks/bstream.go create mode 100644 vendor/github.com/fabxc/tsdb/chunks/chunk.go create mode 100644 vendor/github.com/fabxc/tsdb/chunks/xor.go create mode 100644 vendor/github.com/fabxc/tsdb/compact.go create mode 100644 vendor/github.com/fabxc/tsdb/db.go create mode 100644 vendor/github.com/fabxc/tsdb/db_amd64.go create mode 100644 vendor/github.com/fabxc/tsdb/db_unix.go create mode 100644 vendor/github.com/fabxc/tsdb/head.go create mode 100644 vendor/github.com/fabxc/tsdb/labels/labels.go create mode 100644 vendor/github.com/fabxc/tsdb/labels/selector.go create mode 100644 vendor/github.com/fabxc/tsdb/postings.go create mode 100644 vendor/github.com/fabxc/tsdb/querier.go create mode 100644 vendor/github.com/fabxc/tsdb/reader.go create mode 100644 vendor/github.com/fabxc/tsdb/wal.go create mode 100644 vendor/github.com/fabxc/tsdb/writer.go create mode 100644 vendor/github.com/go-kit/kit/LICENSE create mode 100644 vendor/github.com/go-kit/kit/log/README.md create mode 100644 vendor/github.com/go-kit/kit/log/doc.go create mode 100644 vendor/github.com/go-kit/kit/log/json_logger.go create mode 100644 vendor/github.com/go-kit/kit/log/log.go create mode 100644 vendor/github.com/go-kit/kit/log/logfmt_logger.go create mode 100644 vendor/github.com/go-kit/kit/log/nop_logger.go create mode 100644 vendor/github.com/go-kit/kit/log/stdlib.go create mode 100644 vendor/github.com/go-kit/kit/log/sync.go create mode 100644 vendor/github.com/go-kit/kit/log/value.go create mode 100644 vendor/github.com/go-logfmt/logfmt/LICENSE create mode 100644 vendor/github.com/go-logfmt/logfmt/README.md create mode 100644 vendor/github.com/go-logfmt/logfmt/decode.go create mode 100644 vendor/github.com/go-logfmt/logfmt/doc.go create mode 100644 vendor/github.com/go-logfmt/logfmt/encode.go create mode 100644 vendor/github.com/go-logfmt/logfmt/fuzz.go create mode 100644 vendor/github.com/go-logfmt/logfmt/jsonstring.go create mode 100644 vendor/github.com/go-stack/stack/LICENSE.md create mode 100644 vendor/github.com/go-stack/stack/README.md create mode 100644 vendor/github.com/go-stack/stack/stack.go create mode 100644 vendor/github.com/kr/logfmt/Readme create mode 100644 vendor/github.com/kr/logfmt/decode.go create mode 100644 vendor/github.com/kr/logfmt/scanner.go create mode 100644 vendor/github.com/kr/logfmt/unquote.go create mode 100644 vendor/github.com/pkg/errors/LICENSE create mode 100644 vendor/github.com/pkg/errors/README.md create mode 100644 vendor/github.com/pkg/errors/appveyor.yml create mode 100644 vendor/github.com/pkg/errors/errors.go create mode 100644 vendor/github.com/pkg/errors/stack.go create mode 100644 vendor/go4.org/LICENSE create mode 100644 vendor/go4.org/reflectutil/asm_b.s create mode 100644 vendor/go4.org/reflectutil/asm_b_14.s create mode 100644 vendor/go4.org/reflectutil/asm_jmp.s create mode 100644 vendor/go4.org/reflectutil/asm_jmp_14.s create mode 100644 vendor/go4.org/reflectutil/reflectutil.go create mode 100644 vendor/go4.org/reflectutil/swapper.go create mode 100644 vendor/go4.org/reflectutil/swapper_unsafe.go create mode 100644 vendor/go4.org/reflectutil/swapper_unsafe_14.go create mode 100644 vendor/go4.org/reflectutil/swapper_unsafe_15.go create mode 100644 vendor/golang.org/x/sync/LICENSE create mode 100644 vendor/golang.org/x/sync/PATENTS create mode 100644 vendor/golang.org/x/sync/errgroup/errgroup.go diff --git a/vendor/github.com/bradfitz/slice/COPYING b/vendor/github.com/bradfitz/slice/COPYING new file mode 100644 index 0000000000..84dd3f0e88 --- /dev/null +++ b/vendor/github.com/bradfitz/slice/COPYING @@ -0,0 +1,3 @@ +This package is licensed under the same terms as Go itself and +has the same contribution / CLA requirements. + diff --git a/vendor/github.com/bradfitz/slice/LICENSE b/vendor/github.com/bradfitz/slice/LICENSE new file mode 100644 index 0000000000..7448756763 --- /dev/null +++ b/vendor/github.com/bradfitz/slice/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2012 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/bradfitz/slice/README b/vendor/github.com/bradfitz/slice/README new file mode 100644 index 0000000000..97c01477a9 --- /dev/null +++ b/vendor/github.com/bradfitz/slice/README @@ -0,0 +1 @@ +See https://godoc.org/github.com/bradfitz/slice diff --git a/vendor/github.com/bradfitz/slice/slice.go b/vendor/github.com/bradfitz/slice/slice.go new file mode 100644 index 0000000000..993d8b1a9d --- /dev/null +++ b/vendor/github.com/bradfitz/slice/slice.go @@ -0,0 +1,44 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package slice provides a slice sorting function. +package slice + +import ( + "fmt" + "reflect" + "sort" + + "go4.org/reflectutil" +) + +// Sort sorts the provided slice using the function less. +// If slice is not a slice, Sort panics. +func Sort(slice interface{}, less func(i, j int) bool) { + sort.Sort(SortInterface(slice, less)) +} + +// SortInterface returns a sort.Interface to sort the provided slice +// using the function less. +func SortInterface(slice interface{}, less func(i, j int) bool) sort.Interface { + sv := reflect.ValueOf(slice) + if sv.Kind() != reflect.Slice { + panic(fmt.Sprintf("slice.Sort called with non-slice value of type %T", slice)) + } + return &funcs{ + length: sv.Len(), + less: less, + swap: reflectutil.Swapper(slice), + } +} + +type funcs struct { + length int + less func(i, j int) bool + swap func(i, j int) +} + +func (f *funcs) Len() int { return f.length } +func (f *funcs) Less(i, j int) bool { return f.less(i, j) } +func (f *funcs) Swap(i, j int) { f.swap(i, j) } diff --git a/vendor/github.com/cespare/xxhash/LICENSE.txt b/vendor/github.com/cespare/xxhash/LICENSE.txt new file mode 100644 index 0000000000..24b53065f4 --- /dev/null +++ b/vendor/github.com/cespare/xxhash/LICENSE.txt @@ -0,0 +1,22 @@ +Copyright (c) 2016 Caleb Spare + +MIT License + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/cespare/xxhash/README.md b/vendor/github.com/cespare/xxhash/README.md new file mode 100644 index 0000000000..8a555d57b8 --- /dev/null +++ b/vendor/github.com/cespare/xxhash/README.md @@ -0,0 +1,34 @@ +# xxhash + +[![GoDoc](https://godoc.org/github.com/cespare/mph?status.svg)](https://godoc.org/github.com/cespare/xxhash) + +xxhash is a Go implementation of the 64-bit +[xxHash](http://cyan4973.github.io/xxHash/) algorithm, XXH64. This is a +high-quality hashing algorithm that is much faster than anything in the Go +standard library. + +The API is very small, taking its cue from the other hashing packages in the +standard library: + + $ go doc github.com/cespare/xxhash ! + package xxhash // import "github.com/cespare/xxhash" + + Package xxhash implements the 64-bit variant of xxHash (XXH64) as described + at http://cyan4973.github.io/xxHash/. + + func New() hash.Hash64 + func Sum64(b []byte) uint64 + +This implementation provides a fast pure-Go implementation and an even faster +assembly implementation for amd64. + +Here are some quick benchmarks comparing the pure-Go and assembly +implementations of Sum64 against another popular Go XXH64 implementation, +[github.com/OneOfOne/xxhash](https://github.com/OneOfOne/xxhash): + +| input size | OneOfOne | cespare (noasm) | cespare | +| --- | --- | --- | --- | +| 5 B | 438.34 MB/s | 596.40 MB/s | 711.11 MB/s | +| 100 B | 3676.54 MB/s | 4301.40 MB/s | 4598.95 MB/s | +| 4 KB | 8128.64 MB/s | 8840.83 MB/s | 10549.72 MB/s | +| 10 MB | 7335.19 MB/s | 7736.64 MB/s | 9024.04 MB/s | diff --git a/vendor/github.com/cespare/xxhash/xxhash.go b/vendor/github.com/cespare/xxhash/xxhash.go new file mode 100644 index 0000000000..aeacdd13ff --- /dev/null +++ b/vendor/github.com/cespare/xxhash/xxhash.go @@ -0,0 +1,180 @@ +// Package xxhash implements the 64-bit variant of xxHash (XXH64) as described +// at http://cyan4973.github.io/xxHash/. +package xxhash + +import ( + "encoding/binary" + "hash" +) + +const ( + prime1 uint64 = 11400714785074694791 + prime2 uint64 = 14029467366897019727 + prime3 uint64 = 1609587929392839161 + prime4 uint64 = 9650029242287828579 + prime5 uint64 = 2870177450012600261 +) + +// NOTE(caleb): I'm using both consts and vars of the primes. Using consts where +// possible in the Go code is worth a small (but measurable) performance boost +// by avoiding some MOVQs. Vars are needed for the asm and also are useful for +// convenience in the Go code in a few places where we need to intentionally +// avoid constant arithmetic (e.g., v1 := prime1 + prime2 fails because the +// result overflows a uint64). +var ( + prime1v = prime1 + prime2v = prime2 + prime3v = prime3 + prime4v = prime4 + prime5v = prime5 +) + +type xxh struct { + v1 uint64 + v2 uint64 + v3 uint64 + v4 uint64 + total int + mem [32]byte + n int // how much of mem is used +} + +// New creates a new hash.Hash64 that implements the 64-bit xxHash algorithm. +func New() hash.Hash64 { + var x xxh + x.Reset() + return &x +} + +func (x *xxh) Reset() { + x.n = 0 + x.total = 0 + x.v1 = prime1v + prime2 + x.v2 = prime2 + x.v3 = 0 + x.v4 = -prime1v +} + +func (x *xxh) Size() int { return 8 } +func (x *xxh) BlockSize() int { return 32 } + +// Write adds more data to x. It always returns len(b), nil. +func (x *xxh) Write(b []byte) (n int, err error) { + n = len(b) + x.total += len(b) + + if x.n+len(b) < 32 { + // This new data doesn't even fill the current block. + copy(x.mem[x.n:], b) + x.n += len(b) + return + } + + if x.n > 0 { + // Finish off the partial block. + copy(x.mem[x.n:], b) + x.v1 = round(x.v1, u64(x.mem[0:8])) + x.v2 = round(x.v2, u64(x.mem[8:16])) + x.v3 = round(x.v3, u64(x.mem[16:24])) + x.v4 = round(x.v4, u64(x.mem[24:32])) + b = b[32-x.n:] + x.n = 0 + } + + if len(b) >= 32 { + // One or more full blocks left. + b = writeBlocks(x, b) + } + + // Store any remaining partial block. + copy(x.mem[:], b) + x.n = len(b) + + return +} + +func (x *xxh) Sum(b []byte) []byte { + s := x.Sum64() + return append( + b, + byte(s>>56), + byte(s>>48), + byte(s>>40), + byte(s>>32), + byte(s>>24), + byte(s>>16), + byte(s>>8), + byte(s), + ) +} + +func (x *xxh) Sum64() uint64 { + var h uint64 + + if x.total >= 32 { + v1, v2, v3, v4 := x.v1, x.v2, x.v3, x.v4 + h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4) + h = mergeRound(h, v1) + h = mergeRound(h, v2) + h = mergeRound(h, v3) + h = mergeRound(h, v4) + } else { + h = x.v3 + prime5 + } + + h += uint64(x.total) + + i, end := 0, x.n + for ; i+8 <= end; i += 8 { + k1 := round(0, u64(x.mem[i:i+8])) + h ^= k1 + h = rol27(h)*prime1 + prime4 + } + if i+4 <= end { + h ^= uint64(u32(x.mem[i:i+4])) * prime1 + h = rol23(h)*prime2 + prime3 + i += 4 + } + for i < end { + h ^= uint64(x.mem[i]) * prime5 + h = rol11(h) * prime1 + i++ + } + + h ^= h >> 33 + h *= prime2 + h ^= h >> 29 + h *= prime3 + h ^= h >> 32 + + return h +} + +func u64(b []byte) uint64 { return binary.LittleEndian.Uint64(b) } +func u32(b []byte) uint32 { return binary.LittleEndian.Uint32(b) } + +func round(acc, input uint64) uint64 { + acc += input * prime2 + acc = rol31(acc) + acc *= prime1 + return acc +} + +func mergeRound(acc, val uint64) uint64 { + val = round(0, val) + acc ^= val + acc = acc*prime1 + prime4 + return acc +} + +// It's important for performance to get the rotates to actually compile to +// ROLQs. gc will do this for us but only if rotate amount is a constant. + +func rol1(x uint64) uint64 { return (x << 1) | (x >> (64 - 1)) } +func rol7(x uint64) uint64 { return (x << 7) | (x >> (64 - 7)) } +func rol11(x uint64) uint64 { return (x << 11) | (x >> (64 - 11)) } +func rol12(x uint64) uint64 { return (x << 12) | (x >> (64 - 12)) } +func rol18(x uint64) uint64 { return (x << 18) | (x >> (64 - 18)) } +func rol23(x uint64) uint64 { return (x << 23) | (x >> (64 - 23)) } +func rol27(x uint64) uint64 { return (x << 27) | (x >> (64 - 27)) } +func rol31(x uint64) uint64 { return (x << 31) | (x >> (64 - 31)) } diff --git a/vendor/github.com/cespare/xxhash/xxhash_amd64.go b/vendor/github.com/cespare/xxhash/xxhash_amd64.go new file mode 100644 index 0000000000..fc417c13b4 --- /dev/null +++ b/vendor/github.com/cespare/xxhash/xxhash_amd64.go @@ -0,0 +1,12 @@ +// +build !appengine +// +build gc +// +build !noasm + +package xxhash + +// Sum64 computes the 64-bit xxHash digest of b. +// +//go:noescape +func Sum64(b []byte) uint64 + +func writeBlocks(x *xxh, b []byte) []byte diff --git a/vendor/github.com/cespare/xxhash/xxhash_amd64.s b/vendor/github.com/cespare/xxhash/xxhash_amd64.s new file mode 100644 index 0000000000..37aa8f0035 --- /dev/null +++ b/vendor/github.com/cespare/xxhash/xxhash_amd64.s @@ -0,0 +1,233 @@ +// +build !appengine +// +build gc +// +build !noasm + +#include "textflag.h" + +// Register allocation: +// AX h +// CX pointer to advance through b +// DX n +// BX loop end +// R8 v1, k1 +// R9 v2 +// R10 v3 +// R11 v4 +// R12 tmp +// R13 prime1v +// R14 prime2v +// R15 prime4v + +// round reads from and advances the buffer pointer in CX. +// It assumes that R13 has prime1v and R14 has prime2v. +#define round(r) \ + MOVQ (CX), R12 \ + ADDQ $8, CX \ + IMULQ R14, R12 \ + ADDQ R12, r \ + ROLQ $31, r \ + IMULQ R13, r + +// mergeRound applies a merge round on the two registers acc and val. +// It assumes that R13 has prime1v, R14 has prime2v, and R15 has prime4v. +#define mergeRound(acc, val) \ + IMULQ R14, val \ + ROLQ $31, val \ + IMULQ R13, val \ + XORQ val, acc \ + IMULQ R13, acc \ + ADDQ R15, acc + +// func Sum64(b []byte) uint64 +TEXT ·Sum64(SB), NOSPLIT, $0-32 + // Load fixed primes. + MOVQ ·prime1v(SB), R13 + MOVQ ·prime2v(SB), R14 + MOVQ ·prime4v(SB), R15 + + // Load slice. + MOVQ b_base+0(FP), CX + MOVQ b_len+8(FP), DX + LEAQ (CX)(DX*1), BX + + // The first loop limit will be len(b)-32. + SUBQ $32, BX + + // Check whether we have at least one block. + CMPQ DX, $32 + JLT noBlocks + + // Set up initial state (v1, v2, v3, v4). + MOVQ R13, R8 + ADDQ R14, R8 + MOVQ R14, R9 + XORQ R10, R10 + XORQ R11, R11 + SUBQ R13, R11 + + // Loop until CX > BX. +blockLoop: + round(R8) + round(R9) + round(R10) + round(R11) + + CMPQ CX, BX + JLE blockLoop + + MOVQ R8, AX + ROLQ $1, AX + MOVQ R9, R12 + ROLQ $7, R12 + ADDQ R12, AX + MOVQ R10, R12 + ROLQ $12, R12 + ADDQ R12, AX + MOVQ R11, R12 + ROLQ $18, R12 + ADDQ R12, AX + + mergeRound(AX, R8) + mergeRound(AX, R9) + mergeRound(AX, R10) + mergeRound(AX, R11) + + JMP afterBlocks + +noBlocks: + MOVQ ·prime5v(SB), AX + +afterBlocks: + ADDQ DX, AX + + // Right now BX has len(b)-32, and we want to loop until CX > len(b)-8. + ADDQ $24, BX + + CMPQ CX, BX + JG fourByte + +wordLoop: + // Calculate k1. + MOVQ (CX), R8 + ADDQ $8, CX + IMULQ R14, R8 + ROLQ $31, R8 + IMULQ R13, R8 + + XORQ R8, AX + ROLQ $27, AX + IMULQ R13, AX + ADDQ R15, AX + + CMPQ CX, BX + JLE wordLoop + +fourByte: + ADDQ $4, BX + CMPQ CX, BX + JG singles + + MOVL (CX), R8 + ADDQ $4, CX + IMULQ R13, R8 + XORQ R8, AX + + ROLQ $23, AX + IMULQ R14, AX + ADDQ ·prime3v(SB), AX + +singles: + ADDQ $4, BX + CMPQ CX, BX + JGE finalize + +singlesLoop: + MOVBQZX (CX), R12 + ADDQ $1, CX + IMULQ ·prime5v(SB), R12 + XORQ R12, AX + + ROLQ $11, AX + IMULQ R13, AX + + CMPQ CX, BX + JL singlesLoop + +finalize: + MOVQ AX, R12 + SHRQ $33, R12 + XORQ R12, AX + IMULQ R14, AX + MOVQ AX, R12 + SHRQ $29, R12 + XORQ R12, AX + IMULQ ·prime3v(SB), AX + MOVQ AX, R12 + SHRQ $32, R12 + XORQ R12, AX + + MOVQ AX, ret+24(FP) + RET + +// writeBlocks uses the same registers as above except that it uses AX to store +// the x pointer. + +// func writeBlocks(x *xxh, b []byte) []byte +TEXT ·writeBlocks(SB), NOSPLIT, $0-56 + // Load fixed primes needed for round. + MOVQ ·prime1v(SB), R13 + MOVQ ·prime2v(SB), R14 + + // Load slice. + MOVQ b_base+8(FP), CX + MOVQ CX, ret_base+32(FP) // initialize return base pointer; see NOTE below + MOVQ b_len+16(FP), DX + LEAQ (CX)(DX*1), BX + SUBQ $32, BX + + // Load vN from x. + MOVQ x+0(FP), AX + MOVQ 0(AX), R8 // v1 + MOVQ 8(AX), R9 // v2 + MOVQ 16(AX), R10 // v3 + MOVQ 24(AX), R11 // v4 + + // We don't need to check the loop condition here; this function is + // always called with at least one block of data to process. +blockLoop: + round(R8) + round(R9) + round(R10) + round(R11) + + CMPQ CX, BX + JLE blockLoop + + // Copy vN back to x. + MOVQ R8, 0(AX) + MOVQ R9, 8(AX) + MOVQ R10, 16(AX) + MOVQ R11, 24(AX) + + // Construct return slice. + // NOTE: It's important that we don't construct a slice that has a base + // pointer off the end of the original slice, as in Go 1.7+ this will + // cause runtime crashes. (See discussion in, for example, + // https://github.com/golang/go/issues/16772.) + // Therefore, we calculate the length/cap first, and if they're zero, we + // keep the old base. This is what the compiler does as well if you + // write code like + // b = b[len(b):] + + // New length is 32 - (CX - BX) -> BX+32 - CX. + ADDQ $32, BX + SUBQ CX, BX + JZ afterSetBase + + MOVQ CX, ret_base+32(FP) + +afterSetBase: + MOVQ BX, ret_len+40(FP) + MOVQ BX, ret_cap+48(FP) // set cap == len + + RET diff --git a/vendor/github.com/coreos/etcd/LICENSE b/vendor/github.com/coreos/etcd/LICENSE new file mode 100644 index 0000000000..d645695673 --- /dev/null +++ b/vendor/github.com/coreos/etcd/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/coreos/etcd/NOTICE b/vendor/github.com/coreos/etcd/NOTICE new file mode 100644 index 0000000000..b39ddfa5cb --- /dev/null +++ b/vendor/github.com/coreos/etcd/NOTICE @@ -0,0 +1,5 @@ +CoreOS Project +Copyright 2014 CoreOS, Inc + +This product includes software developed at CoreOS, Inc. +(http://www.coreos.com/). diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/dir_unix.go b/vendor/github.com/coreos/etcd/pkg/fileutil/dir_unix.go new file mode 100644 index 0000000000..58a77dfc1a --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/fileutil/dir_unix.go @@ -0,0 +1,22 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !windows + +package fileutil + +import "os" + +// OpenDir opens a directory for syncing. +func OpenDir(path string) (*os.File, error) { return os.Open(path) } diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/dir_windows.go b/vendor/github.com/coreos/etcd/pkg/fileutil/dir_windows.go new file mode 100644 index 0000000000..c123395c00 --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/fileutil/dir_windows.go @@ -0,0 +1,46 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build windows + +package fileutil + +import ( + "os" + "syscall" +) + +// OpenDir opens a directory in windows with write access for syncing. +func OpenDir(path string) (*os.File, error) { + fd, err := openDir(path) + if err != nil { + return nil, err + } + return os.NewFile(uintptr(fd), path), nil +} + +func openDir(path string) (fd syscall.Handle, err error) { + if len(path) == 0 { + return syscall.InvalidHandle, syscall.ERROR_FILE_NOT_FOUND + } + pathp, err := syscall.UTF16PtrFromString(path) + if err != nil { + return syscall.InvalidHandle, err + } + access := uint32(syscall.GENERIC_READ | syscall.GENERIC_WRITE) + sharemode := uint32(syscall.FILE_SHARE_READ | syscall.FILE_SHARE_WRITE) + createmode := uint32(syscall.OPEN_EXISTING) + fl := uint32(syscall.FILE_FLAG_BACKUP_SEMANTICS) + return syscall.CreateFile(pathp, access, sharemode, nil, createmode, fl, 0) +} diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/fileutil.go b/vendor/github.com/coreos/etcd/pkg/fileutil/fileutil.go new file mode 100644 index 0000000000..9585ed5e0e --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/fileutil/fileutil.go @@ -0,0 +1,121 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package fileutil implements utility functions related to files and paths. +package fileutil + +import ( + "fmt" + "io/ioutil" + "os" + "path" + "sort" + + "github.com/coreos/pkg/capnslog" +) + +const ( + // PrivateFileMode grants owner to read/write a file. + PrivateFileMode = 0600 + // PrivateDirMode grants owner to make/remove files inside the directory. + PrivateDirMode = 0700 +) + +var ( + plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "pkg/fileutil") +) + +// IsDirWriteable checks if dir is writable by writing and removing a file +// to dir. It returns nil if dir is writable. +func IsDirWriteable(dir string) error { + f := path.Join(dir, ".touch") + if err := ioutil.WriteFile(f, []byte(""), PrivateFileMode); err != nil { + return err + } + return os.Remove(f) +} + +// ReadDir returns the filenames in the given directory in sorted order. +func ReadDir(dirpath string) ([]string, error) { + dir, err := os.Open(dirpath) + if err != nil { + return nil, err + } + defer dir.Close() + names, err := dir.Readdirnames(-1) + if err != nil { + return nil, err + } + sort.Strings(names) + return names, nil +} + +// TouchDirAll is similar to os.MkdirAll. It creates directories with 0700 permission if any directory +// does not exists. TouchDirAll also ensures the given directory is writable. +func TouchDirAll(dir string) error { + // If path is already a directory, MkdirAll does nothing + // and returns nil. + err := os.MkdirAll(dir, PrivateDirMode) + if err != nil { + // if mkdirAll("a/text") and "text" is not + // a directory, this will return syscall.ENOTDIR + return err + } + return IsDirWriteable(dir) +} + +// CreateDirAll is similar to TouchDirAll but returns error +// if the deepest directory was not empty. +func CreateDirAll(dir string) error { + err := TouchDirAll(dir) + if err == nil { + var ns []string + ns, err = ReadDir(dir) + if err != nil { + return err + } + if len(ns) != 0 { + err = fmt.Errorf("expected %q to be empty, got %q", dir, ns) + } + } + return err +} + +func Exist(name string) bool { + _, err := os.Stat(name) + return err == nil +} + +// ZeroToEnd zeros a file starting from SEEK_CUR to its SEEK_END. May temporarily +// shorten the length of the file. +func ZeroToEnd(f *os.File) error { + // TODO: support FALLOC_FL_ZERO_RANGE + off, err := f.Seek(0, os.SEEK_CUR) + if err != nil { + return err + } + lenf, lerr := f.Seek(0, os.SEEK_END) + if lerr != nil { + return lerr + } + if err = f.Truncate(off); err != nil { + return err + } + // make sure blocks remain allocated + if err = Preallocate(f, lenf, true); err != nil { + return err + } + _, err = f.Seek(off, os.SEEK_SET) + return err +} diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/lock.go b/vendor/github.com/coreos/etcd/pkg/fileutil/lock.go new file mode 100644 index 0000000000..338627f43c --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/fileutil/lock.go @@ -0,0 +1,26 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fileutil + +import ( + "errors" + "os" +) + +var ( + ErrLocked = errors.New("fileutil: file already locked") +) + +type LockedFile struct{ *os.File } diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/lock_flock.go b/vendor/github.com/coreos/etcd/pkg/fileutil/lock_flock.go new file mode 100644 index 0000000000..542550bc8a --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/fileutil/lock_flock.go @@ -0,0 +1,49 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !windows,!plan9,!solaris + +package fileutil + +import ( + "os" + "syscall" +) + +func flockTryLockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) { + f, err := os.OpenFile(path, flag, perm) + if err != nil { + return nil, err + } + if err = syscall.Flock(int(f.Fd()), syscall.LOCK_EX|syscall.LOCK_NB); err != nil { + f.Close() + if err == syscall.EWOULDBLOCK { + err = ErrLocked + } + return nil, err + } + return &LockedFile{f}, nil +} + +func flockLockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) { + f, err := os.OpenFile(path, flag, perm) + if err != nil { + return nil, err + } + if err = syscall.Flock(int(f.Fd()), syscall.LOCK_EX); err != nil { + f.Close() + return nil, err + } + return &LockedFile{f}, err +} diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/lock_linux.go b/vendor/github.com/coreos/etcd/pkg/fileutil/lock_linux.go new file mode 100644 index 0000000000..dec25a1af4 --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/fileutil/lock_linux.go @@ -0,0 +1,96 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build linux + +package fileutil + +import ( + "os" + "syscall" +) + +// This used to call syscall.Flock() but that call fails with EBADF on NFS. +// An alternative is lockf() which works on NFS but that call lets a process lock +// the same file twice. Instead, use Linux's non-standard open file descriptor +// locks which will block if the process already holds the file lock. +// +// constants from /usr/include/bits/fcntl-linux.h +const ( + F_OFD_GETLK = 37 + F_OFD_SETLK = 37 + F_OFD_SETLKW = 38 +) + +var ( + wrlck = syscall.Flock_t{ + Type: syscall.F_WRLCK, + Whence: int16(os.SEEK_SET), + Start: 0, + Len: 0, + } + + linuxTryLockFile = flockTryLockFile + linuxLockFile = flockLockFile +) + +func init() { + // use open file descriptor locks if the system supports it + getlk := syscall.Flock_t{Type: syscall.F_RDLCK} + if err := syscall.FcntlFlock(0, F_OFD_GETLK, &getlk); err == nil { + linuxTryLockFile = ofdTryLockFile + linuxLockFile = ofdLockFile + } +} + +func TryLockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) { + return linuxTryLockFile(path, flag, perm) +} + +func ofdTryLockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) { + f, err := os.OpenFile(path, flag, perm) + if err != nil { + return nil, err + } + + flock := wrlck + if err = syscall.FcntlFlock(f.Fd(), F_OFD_SETLK, &flock); err != nil { + f.Close() + if err == syscall.EWOULDBLOCK { + err = ErrLocked + } + return nil, err + } + return &LockedFile{f}, nil +} + +func LockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) { + return linuxLockFile(path, flag, perm) +} + +func ofdLockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) { + f, err := os.OpenFile(path, flag, perm) + if err != nil { + return nil, err + } + + flock := wrlck + err = syscall.FcntlFlock(f.Fd(), F_OFD_SETLKW, &flock) + + if err != nil { + f.Close() + return nil, err + } + return &LockedFile{f}, err +} diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/lock_plan9.go b/vendor/github.com/coreos/etcd/pkg/fileutil/lock_plan9.go new file mode 100644 index 0000000000..fee6a7c8f4 --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/fileutil/lock_plan9.go @@ -0,0 +1,45 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fileutil + +import ( + "os" + "syscall" + "time" +) + +func TryLockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) { + if err := os.Chmod(path, syscall.DMEXCL|PrivateFileMode); err != nil { + return nil, err + } + f, err := os.Open(path, flag, perm) + if err != nil { + return nil, ErrLocked + } + return &LockedFile{f}, nil +} + +func LockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) { + if err := os.Chmod(path, syscall.DMEXCL|PrivateFileMode); err != nil { + return nil, err + } + for { + f, err := os.OpenFile(path, flag, perm) + if err == nil { + return &LockedFile{f}, nil + } + time.Sleep(10 * time.Millisecond) + } +} diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/lock_solaris.go b/vendor/github.com/coreos/etcd/pkg/fileutil/lock_solaris.go new file mode 100644 index 0000000000..352ca5590d --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/fileutil/lock_solaris.go @@ -0,0 +1,62 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build solaris + +package fileutil + +import ( + "os" + "syscall" +) + +func TryLockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) { + var lock syscall.Flock_t + lock.Start = 0 + lock.Len = 0 + lock.Pid = 0 + lock.Type = syscall.F_WRLCK + lock.Whence = 0 + lock.Pid = 0 + f, err := os.OpenFile(path, flag, perm) + if err != nil { + return nil, err + } + if err := syscall.FcntlFlock(f.Fd(), syscall.F_SETLK, &lock); err != nil { + f.Close() + if err == syscall.EAGAIN { + err = ErrLocked + } + return nil, err + } + return &LockedFile{f}, nil +} + +func LockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) { + var lock syscall.Flock_t + lock.Start = 0 + lock.Len = 0 + lock.Pid = 0 + lock.Type = syscall.F_WRLCK + lock.Whence = 0 + f, err := os.OpenFile(path, flag, perm) + if err != nil { + return nil, err + } + if err = syscall.FcntlFlock(f.Fd(), syscall.F_SETLKW, &lock); err != nil { + f.Close() + return nil, err + } + return &LockedFile{f}, nil +} diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/lock_unix.go b/vendor/github.com/coreos/etcd/pkg/fileutil/lock_unix.go new file mode 100644 index 0000000000..ed01164de6 --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/fileutil/lock_unix.go @@ -0,0 +1,29 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !windows,!plan9,!solaris,!linux + +package fileutil + +import ( + "os" +) + +func TryLockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) { + return flockTryLockFile(path, flag, perm) +} + +func LockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) { + return flockLockFile(path, flag, perm) +} diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/lock_windows.go b/vendor/github.com/coreos/etcd/pkg/fileutil/lock_windows.go new file mode 100644 index 0000000000..8698f4a8d1 --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/fileutil/lock_windows.go @@ -0,0 +1,125 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build windows + +package fileutil + +import ( + "errors" + "fmt" + "os" + "syscall" + "unsafe" +) + +var ( + modkernel32 = syscall.NewLazyDLL("kernel32.dll") + procLockFileEx = modkernel32.NewProc("LockFileEx") + + errLocked = errors.New("The process cannot access the file because another process has locked a portion of the file.") +) + +const ( + // https://msdn.microsoft.com/en-us/library/windows/desktop/aa365203(v=vs.85).aspx + LOCKFILE_EXCLUSIVE_LOCK = 2 + LOCKFILE_FAIL_IMMEDIATELY = 1 + + // see https://msdn.microsoft.com/en-us/library/windows/desktop/ms681382(v=vs.85).aspx + errLockViolation syscall.Errno = 0x21 +) + +func TryLockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) { + f, err := open(path, flag, perm) + if err != nil { + return nil, err + } + if err := lockFile(syscall.Handle(f.Fd()), LOCKFILE_FAIL_IMMEDIATELY); err != nil { + f.Close() + return nil, err + } + return &LockedFile{f}, nil +} + +func LockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) { + f, err := open(path, flag, perm) + if err != nil { + return nil, err + } + if err := lockFile(syscall.Handle(f.Fd()), 0); err != nil { + f.Close() + return nil, err + } + return &LockedFile{f}, nil +} + +func open(path string, flag int, perm os.FileMode) (*os.File, error) { + if path == "" { + return nil, fmt.Errorf("cannot open empty filename") + } + var access uint32 + switch flag { + case syscall.O_RDONLY: + access = syscall.GENERIC_READ + case syscall.O_WRONLY: + access = syscall.GENERIC_WRITE + case syscall.O_RDWR: + access = syscall.GENERIC_READ | syscall.GENERIC_WRITE + case syscall.O_WRONLY | syscall.O_CREAT: + access = syscall.GENERIC_ALL + default: + panic(fmt.Errorf("flag %v is not supported", flag)) + } + fd, err := syscall.CreateFile(&(syscall.StringToUTF16(path)[0]), + access, + syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE, + nil, + syscall.OPEN_ALWAYS, + syscall.FILE_ATTRIBUTE_NORMAL, + 0) + if err != nil { + return nil, err + } + return os.NewFile(uintptr(fd), path), nil +} + +func lockFile(fd syscall.Handle, flags uint32) error { + var flag uint32 = LOCKFILE_EXCLUSIVE_LOCK + flag |= flags + if fd == syscall.InvalidHandle { + return nil + } + err := lockFileEx(fd, flag, 1, 0, &syscall.Overlapped{}) + if err == nil { + return nil + } else if err.Error() == errLocked.Error() { + return ErrLocked + } else if err != errLockViolation { + return err + } + return nil +} + +func lockFileEx(h syscall.Handle, flags, locklow, lockhigh uint32, ol *syscall.Overlapped) (err error) { + var reserved uint32 = 0 + r1, _, e1 := syscall.Syscall6(procLockFileEx.Addr(), 6, uintptr(h), uintptr(flags), uintptr(reserved), uintptr(locklow), uintptr(lockhigh), uintptr(unsafe.Pointer(ol))) + if r1 == 0 { + if e1 != 0 { + err = error(e1) + } else { + err = syscall.EINVAL + } + } + return +} diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/preallocate.go b/vendor/github.com/coreos/etcd/pkg/fileutil/preallocate.go new file mode 100644 index 0000000000..bb7f028123 --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/fileutil/preallocate.go @@ -0,0 +1,47 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fileutil + +import "os" + +// Preallocate tries to allocate the space for given +// file. This operation is only supported on linux by a +// few filesystems (btrfs, ext4, etc.). +// If the operation is unsupported, no error will be returned. +// Otherwise, the error encountered will be returned. +func Preallocate(f *os.File, sizeInBytes int64, extendFile bool) error { + if extendFile { + return preallocExtend(f, sizeInBytes) + } + return preallocFixed(f, sizeInBytes) +} + +func preallocExtendTrunc(f *os.File, sizeInBytes int64) error { + curOff, err := f.Seek(0, os.SEEK_CUR) + if err != nil { + return err + } + size, err := f.Seek(sizeInBytes, os.SEEK_END) + if err != nil { + return err + } + if _, err = f.Seek(curOff, os.SEEK_SET); err != nil { + return err + } + if sizeInBytes > size { + return nil + } + return f.Truncate(sizeInBytes) +} diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/preallocate_darwin.go b/vendor/github.com/coreos/etcd/pkg/fileutil/preallocate_darwin.go new file mode 100644 index 0000000000..1ed09c560f --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/fileutil/preallocate_darwin.go @@ -0,0 +1,43 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build darwin + +package fileutil + +import ( + "os" + "syscall" + "unsafe" +) + +func preallocExtend(f *os.File, sizeInBytes int64) error { + if err := preallocFixed(f, sizeInBytes); err != nil { + return err + } + return preallocExtendTrunc(f, sizeInBytes) +} + +func preallocFixed(f *os.File, sizeInBytes int64) error { + fstore := &syscall.Fstore_t{ + Flags: syscall.F_ALLOCATEALL, + Posmode: syscall.F_PEOFPOSMODE, + Length: sizeInBytes} + p := unsafe.Pointer(fstore) + _, _, errno := syscall.Syscall(syscall.SYS_FCNTL, f.Fd(), uintptr(syscall.F_PREALLOCATE), uintptr(p)) + if errno == 0 || errno == syscall.ENOTSUP { + return nil + } + return errno +} diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/preallocate_unix.go b/vendor/github.com/coreos/etcd/pkg/fileutil/preallocate_unix.go new file mode 100644 index 0000000000..50bd84f02a --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/fileutil/preallocate_unix.go @@ -0,0 +1,49 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build linux + +package fileutil + +import ( + "os" + "syscall" +) + +func preallocExtend(f *os.File, sizeInBytes int64) error { + // use mode = 0 to change size + err := syscall.Fallocate(int(f.Fd()), 0, 0, sizeInBytes) + if err != nil { + errno, ok := err.(syscall.Errno) + // not supported; fallback + // fallocate EINTRs frequently in some environments; fallback + if ok && (errno == syscall.ENOTSUP || errno == syscall.EINTR) { + return preallocExtendTrunc(f, sizeInBytes) + } + } + return err +} + +func preallocFixed(f *os.File, sizeInBytes int64) error { + // use mode = 1 to keep size; see FALLOC_FL_KEEP_SIZE + err := syscall.Fallocate(int(f.Fd()), 1, 0, sizeInBytes) + if err != nil { + errno, ok := err.(syscall.Errno) + // treat not supported as nil error + if ok && errno == syscall.ENOTSUP { + return nil + } + } + return err +} diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/preallocate_unsupported.go b/vendor/github.com/coreos/etcd/pkg/fileutil/preallocate_unsupported.go new file mode 100644 index 0000000000..162fbc5f78 --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/fileutil/preallocate_unsupported.go @@ -0,0 +1,25 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !linux,!darwin + +package fileutil + +import "os" + +func preallocExtend(f *os.File, sizeInBytes int64) error { + return preallocExtendTrunc(f, sizeInBytes) +} + +func preallocFixed(f *os.File, sizeInBytes int64) error { return nil } diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/purge.go b/vendor/github.com/coreos/etcd/pkg/fileutil/purge.go new file mode 100644 index 0000000000..53bda0c012 --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/fileutil/purge.go @@ -0,0 +1,78 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fileutil + +import ( + "os" + "path" + "sort" + "strings" + "time" +) + +func PurgeFile(dirname string, suffix string, max uint, interval time.Duration, stop <-chan struct{}) <-chan error { + return purgeFile(dirname, suffix, max, interval, stop, nil) +} + +// purgeFile is the internal implementation for PurgeFile which can post purged files to purgec if non-nil. +func purgeFile(dirname string, suffix string, max uint, interval time.Duration, stop <-chan struct{}, purgec chan<- string) <-chan error { + errC := make(chan error, 1) + go func() { + for { + fnames, err := ReadDir(dirname) + if err != nil { + errC <- err + return + } + newfnames := make([]string, 0) + for _, fname := range fnames { + if strings.HasSuffix(fname, suffix) { + newfnames = append(newfnames, fname) + } + } + sort.Strings(newfnames) + fnames = newfnames + for len(newfnames) > int(max) { + f := path.Join(dirname, newfnames[0]) + l, err := TryLockFile(f, os.O_WRONLY, PrivateFileMode) + if err != nil { + break + } + if err = os.Remove(f); err != nil { + errC <- err + return + } + if err = l.Close(); err != nil { + plog.Errorf("error unlocking %s when purging file (%v)", l.Name(), err) + errC <- err + return + } + plog.Infof("purged file %s successfully", f) + newfnames = newfnames[1:] + } + if purgec != nil { + for i := 0; i < len(fnames)-len(newfnames); i++ { + purgec <- fnames[i] + } + } + select { + case <-time.After(interval): + case <-stop: + return + } + } + }() + return errC +} diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/sync.go b/vendor/github.com/coreos/etcd/pkg/fileutil/sync.go new file mode 100644 index 0000000000..54dd41f4f3 --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/fileutil/sync.go @@ -0,0 +1,29 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !linux,!darwin + +package fileutil + +import "os" + +// Fsync is a wrapper around file.Sync(). Special handling is needed on darwin platform. +func Fsync(f *os.File) error { + return f.Sync() +} + +// Fdatasync is a wrapper around file.Sync(). Special handling is needed on linux platform. +func Fdatasync(f *os.File) error { + return f.Sync() +} diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/sync_darwin.go b/vendor/github.com/coreos/etcd/pkg/fileutil/sync_darwin.go new file mode 100644 index 0000000000..c2f39bf204 --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/fileutil/sync_darwin.go @@ -0,0 +1,40 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build darwin + +package fileutil + +import ( + "os" + "syscall" +) + +// Fsync on HFS/OSX flushes the data on to the physical drive but the drive +// may not write it to the persistent media for quite sometime and it may be +// written in out-of-order sequence. Using F_FULLFSYNC ensures that the +// physical drive's buffer will also get flushed to the media. +func Fsync(f *os.File) error { + _, _, errno := syscall.Syscall(syscall.SYS_FCNTL, f.Fd(), uintptr(syscall.F_FULLFSYNC), uintptr(0)) + if errno == 0 { + return nil + } + return errno +} + +// Fdatasync on darwin platform invokes fcntl(F_FULLFSYNC) for actual persistence +// on physical drive media. +func Fdatasync(f *os.File) error { + return Fsync(f) +} diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/sync_linux.go b/vendor/github.com/coreos/etcd/pkg/fileutil/sync_linux.go new file mode 100644 index 0000000000..1bbced915e --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/fileutil/sync_linux.go @@ -0,0 +1,34 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build linux + +package fileutil + +import ( + "os" + "syscall" +) + +// Fsync is a wrapper around file.Sync(). Special handling is needed on darwin platform. +func Fsync(f *os.File) error { + return f.Sync() +} + +// Fdatasync is similar to fsync(), but does not flush modified metadata +// unless that metadata is needed in order to allow a subsequent data retrieval +// to be correctly handled. +func Fdatasync(f *os.File) error { + return syscall.Fdatasync(int(f.Fd())) +} diff --git a/vendor/github.com/coreos/go-systemd/LICENSE b/vendor/github.com/coreos/go-systemd/LICENSE new file mode 100644 index 0000000000..37ec93a14f --- /dev/null +++ b/vendor/github.com/coreos/go-systemd/LICENSE @@ -0,0 +1,191 @@ +Apache License +Version 2.0, January 2004 +http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + +"License" shall mean the terms and conditions for use, reproduction, and +distribution as defined by Sections 1 through 9 of this document. + +"Licensor" shall mean the copyright owner or entity authorized by the copyright +owner that is granting the License. + +"Legal Entity" shall mean the union of the acting entity and all other entities +that control, are controlled by, or are under common control with that entity. +For the purposes of this definition, "control" means (i) the power, direct or +indirect, to cause the direction or management of such entity, whether by +contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the +outstanding shares, or (iii) beneficial ownership of such entity. + +"You" (or "Your") shall mean an individual or Legal Entity exercising +permissions granted by this License. + +"Source" form shall mean the preferred form for making modifications, including +but not limited to software source code, documentation source, and configuration +files. + +"Object" form shall mean any form resulting from mechanical transformation or +translation of a Source form, including but not limited to compiled object code, +generated documentation, and conversions to other media types. + +"Work" shall mean the work of authorship, whether in Source or Object form, made +available under the License, as indicated by a copyright notice that is included +in or attached to the work (an example is provided in the Appendix below). + +"Derivative Works" shall mean any work, whether in Source or Object form, that +is based on (or derived from) the Work and for which the editorial revisions, +annotations, elaborations, or other modifications represent, as a whole, an +original work of authorship. For the purposes of this License, Derivative Works +shall not include works that remain separable from, or merely link (or bind by +name) to the interfaces of, the Work and Derivative Works thereof. + +"Contribution" shall mean any work of authorship, including the original version +of the Work and any modifications or additions to that Work or Derivative Works +thereof, that is intentionally submitted to Licensor for inclusion in the Work +by the copyright owner or by an individual or Legal Entity authorized to submit +on behalf of the copyright owner. For the purposes of this definition, +"submitted" means any form of electronic, verbal, or written communication sent +to the Licensor or its representatives, including but not limited to +communication on electronic mailing lists, source code control systems, and +issue tracking systems that are managed by, or on behalf of, the Licensor for +the purpose of discussing and improving the Work, but excluding communication +that is conspicuously marked or otherwise designated in writing by the copyright +owner as "Not a Contribution." + +"Contributor" shall mean Licensor and any individual or Legal Entity on behalf +of whom a Contribution has been received by Licensor and subsequently +incorporated within the Work. + +2. Grant of Copyright License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable copyright license to reproduce, prepare Derivative Works of, +publicly display, publicly perform, sublicense, and distribute the Work and such +Derivative Works in Source or Object form. + +3. Grant of Patent License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable (except as stated in this section) patent license to make, have +made, use, offer to sell, sell, import, and otherwise transfer the Work, where +such license applies only to those patent claims licensable by such Contributor +that are necessarily infringed by their Contribution(s) alone or by combination +of their Contribution(s) with the Work to which such Contribution(s) was +submitted. If You institute patent litigation against any entity (including a +cross-claim or counterclaim in a lawsuit) alleging that the Work or a +Contribution incorporated within the Work constitutes direct or contributory +patent infringement, then any patent licenses granted to You under this License +for that Work shall terminate as of the date such litigation is filed. + +4. Redistribution. + +You may reproduce and distribute copies of the Work or Derivative Works thereof +in any medium, with or without modifications, and in Source or Object form, +provided that You meet the following conditions: + +You must give any other recipients of the Work or Derivative Works a copy of +this License; and +You must cause any modified files to carry prominent notices stating that You +changed the files; and +You must retain, in the Source form of any Derivative Works that You distribute, +all copyright, patent, trademark, and attribution notices from the Source form +of the Work, excluding those notices that do not pertain to any part of the +Derivative Works; and +If the Work includes a "NOTICE" text file as part of its distribution, then any +Derivative Works that You distribute must include a readable copy of the +attribution notices contained within such NOTICE file, excluding those notices +that do not pertain to any part of the Derivative Works, in at least one of the +following places: within a NOTICE text file distributed as part of the +Derivative Works; within the Source form or documentation, if provided along +with the Derivative Works; or, within a display generated by the Derivative +Works, if and wherever such third-party notices normally appear. The contents of +the NOTICE file are for informational purposes only and do not modify the +License. You may add Your own attribution notices within Derivative Works that +You distribute, alongside or as an addendum to the NOTICE text from the Work, +provided that such additional attribution notices cannot be construed as +modifying the License. +You may add Your own copyright statement to Your modifications and may provide +additional or different license terms and conditions for use, reproduction, or +distribution of Your modifications, or for any such Derivative Works as a whole, +provided Your use, reproduction, and distribution of the Work otherwise complies +with the conditions stated in this License. + +5. Submission of Contributions. + +Unless You explicitly state otherwise, any Contribution intentionally submitted +for inclusion in the Work by You to the Licensor shall be under the terms and +conditions of this License, without any additional terms or conditions. +Notwithstanding the above, nothing herein shall supersede or modify the terms of +any separate license agreement you may have executed with Licensor regarding +such Contributions. + +6. Trademarks. + +This License does not grant permission to use the trade names, trademarks, +service marks, or product names of the Licensor, except as required for +reasonable and customary use in describing the origin of the Work and +reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. + +Unless required by applicable law or agreed to in writing, Licensor provides the +Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, +including, without limitation, any warranties or conditions of TITLE, +NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are +solely responsible for determining the appropriateness of using or +redistributing the Work and assume any risks associated with Your exercise of +permissions under this License. + +8. Limitation of Liability. + +In no event and under no legal theory, whether in tort (including negligence), +contract, or otherwise, unless required by applicable law (such as deliberate +and grossly negligent acts) or agreed to in writing, shall any Contributor be +liable to You for damages, including any direct, indirect, special, incidental, +or consequential damages of any character arising as a result of this License or +out of the use or inability to use the Work (including but not limited to +damages for loss of goodwill, work stoppage, computer failure or malfunction, or +any and all other commercial damages or losses), even if such Contributor has +been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. + +While redistributing the Work or Derivative Works thereof, You may choose to +offer, and charge a fee for, acceptance of support, warranty, indemnity, or +other liability obligations and/or rights consistent with this License. However, +in accepting such obligations, You may act only on Your own behalf and on Your +sole responsibility, not on behalf of any other Contributor, and only if You +agree to indemnify, defend, and hold each Contributor harmless for any liability +incurred by, or claims asserted against, such Contributor by reason of your +accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work + +To apply the Apache License to your work, attach the following boilerplate +notice, with the fields enclosed by brackets "[]" replaced with your own +identifying information. (Don't include the brackets!) The text should be +enclosed in the appropriate comment syntax for the file format. We also +recommend that a file or class name and description of purpose be included on +the same "printed page" as the copyright notice for easier identification within +third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/coreos/go-systemd/journal/journal.go b/vendor/github.com/coreos/go-systemd/journal/journal.go new file mode 100644 index 0000000000..7f434990d2 --- /dev/null +++ b/vendor/github.com/coreos/go-systemd/journal/journal.go @@ -0,0 +1,179 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package journal provides write bindings to the local systemd journal. +// It is implemented in pure Go and connects to the journal directly over its +// unix socket. +// +// To read from the journal, see the "sdjournal" package, which wraps the +// sd-journal a C API. +// +// http://www.freedesktop.org/software/systemd/man/systemd-journald.service.html +package journal + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "io" + "io/ioutil" + "net" + "os" + "strconv" + "strings" + "syscall" +) + +// Priority of a journal message +type Priority int + +const ( + PriEmerg Priority = iota + PriAlert + PriCrit + PriErr + PriWarning + PriNotice + PriInfo + PriDebug +) + +var conn net.Conn + +func init() { + var err error + conn, err = net.Dial("unixgram", "/run/systemd/journal/socket") + if err != nil { + conn = nil + } +} + +// Enabled returns true if the local systemd journal is available for logging +func Enabled() bool { + return conn != nil +} + +// Send a message to the local systemd journal. vars is a map of journald +// fields to values. Fields must be composed of uppercase letters, numbers, +// and underscores, but must not start with an underscore. Within these +// restrictions, any arbitrary field name may be used. Some names have special +// significance: see the journalctl documentation +// (http://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html) +// for more details. vars may be nil. +func Send(message string, priority Priority, vars map[string]string) error { + if conn == nil { + return journalError("could not connect to journald socket") + } + + data := new(bytes.Buffer) + appendVariable(data, "PRIORITY", strconv.Itoa(int(priority))) + appendVariable(data, "MESSAGE", message) + for k, v := range vars { + appendVariable(data, k, v) + } + + _, err := io.Copy(conn, data) + if err != nil && isSocketSpaceError(err) { + file, err := tempFd() + if err != nil { + return journalError(err.Error()) + } + defer file.Close() + _, err = io.Copy(file, data) + if err != nil { + return journalError(err.Error()) + } + + rights := syscall.UnixRights(int(file.Fd())) + + /* this connection should always be a UnixConn, but better safe than sorry */ + unixConn, ok := conn.(*net.UnixConn) + if !ok { + return journalError("can't send file through non-Unix connection") + } + unixConn.WriteMsgUnix([]byte{}, rights, nil) + } else if err != nil { + return journalError(err.Error()) + } + return nil +} + +// Print prints a message to the local systemd journal using Send(). +func Print(priority Priority, format string, a ...interface{}) error { + return Send(fmt.Sprintf(format, a...), priority, nil) +} + +func appendVariable(w io.Writer, name, value string) { + if !validVarName(name) { + journalError("variable name contains invalid character, ignoring") + } + if strings.ContainsRune(value, '\n') { + /* When the value contains a newline, we write: + * - the variable name, followed by a newline + * - the size (in 64bit little endian format) + * - the data, followed by a newline + */ + fmt.Fprintln(w, name) + binary.Write(w, binary.LittleEndian, uint64(len(value))) + fmt.Fprintln(w, value) + } else { + /* just write the variable and value all on one line */ + fmt.Fprintf(w, "%s=%s\n", name, value) + } +} + +func validVarName(name string) bool { + /* The variable name must be in uppercase and consist only of characters, + * numbers and underscores, and may not begin with an underscore. (from the docs) + */ + + valid := name[0] != '_' + for _, c := range name { + valid = valid && ('A' <= c && c <= 'Z') || ('0' <= c && c <= '9') || c == '_' + } + return valid +} + +func isSocketSpaceError(err error) bool { + opErr, ok := err.(*net.OpError) + if !ok { + return false + } + + sysErr, ok := opErr.Err.(syscall.Errno) + if !ok { + return false + } + + return sysErr == syscall.EMSGSIZE || sysErr == syscall.ENOBUFS +} + +func tempFd() (*os.File, error) { + file, err := ioutil.TempFile("/dev/shm/", "journal.XXXXX") + if err != nil { + return nil, err + } + syscall.Unlink(file.Name()) + if err != nil { + return nil, err + } + return file, nil +} + +func journalError(s string) error { + s = "journal error: " + s + fmt.Fprintln(os.Stderr, s) + return errors.New(s) +} diff --git a/vendor/github.com/coreos/pkg/capnslog/README.md b/vendor/github.com/coreos/pkg/capnslog/README.md new file mode 100644 index 0000000000..81efb1fb6a --- /dev/null +++ b/vendor/github.com/coreos/pkg/capnslog/README.md @@ -0,0 +1,39 @@ +# capnslog, the CoreOS logging package + +There are far too many logging packages out there, with varying degrees of licenses, far too many features (colorization, all sorts of log frameworks) or are just a pain to use (lack of `Fatalln()`?). +capnslog provides a simple but consistent logging interface suitable for all kinds of projects. + +### Design Principles + +##### `package main` is the place where logging gets turned on and routed + +A library should not touch log options, only generate log entries. Libraries are silent until main lets them speak. + +##### All log options are runtime-configurable. + +Still the job of `main` to expose these configurations. `main` may delegate this to, say, a configuration webhook, but does so explicitly. + +##### There is one log object per package. It is registered under its repository and package name. + +`main` activates logging for its repository and any dependency repositories it would also like to have output in its logstream. `main` also dictates at which level each subpackage logs. + +##### There is *one* output stream, and it is an `io.Writer` composed with a formatter. + +Splitting streams is probably not the job of your program, but rather, your log aggregation framework. If you must split output streams, again, `main` configures this and you can write a very simple two-output struct that satisfies io.Writer. + +Fancy colorful formatting and JSON output are beyond the scope of a basic logging framework -- they're application/log-collector dependant. These are, at best, provided as options, but more likely, provided by your application. + +##### Log objects are an interface + +An object knows best how to print itself. Log objects can collect more interesting metadata if they wish, however, because text isn't going away anytime soon, they must all be marshalable to text. The simplest log object is a string, which returns itself. If you wish to do more fancy tricks for printing your log objects, see also JSON output -- introspect and write a formatter which can handle your advanced log interface. Making strings is the only thing guaranteed. + +##### Log levels have specific meanings: + + * Critical: Unrecoverable. Must fail. + * Error: Data has been lost, a request has failed for a bad reason, or a required resource has been lost + * Warning: (Hopefully) Temporary conditions that may cause errors, but may work fine. A replica disappearing (that may reconnect) is a warning. + * Notice: Normal, but important (uncommon) log information. + * Info: Normal, working log information, everything is fine, but helpful notices for auditing or common operations. + * Debug: Everything is still fine, but even common operations may be logged, and less helpful but more quantity of notices. + * Trace: Anything goes, from logging every function call as part of a common operation, to tracing execution of a query. + diff --git a/vendor/github.com/coreos/pkg/capnslog/formatters.go b/vendor/github.com/coreos/pkg/capnslog/formatters.go new file mode 100644 index 0000000000..b305a845fb --- /dev/null +++ b/vendor/github.com/coreos/pkg/capnslog/formatters.go @@ -0,0 +1,157 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package capnslog + +import ( + "bufio" + "fmt" + "io" + "log" + "runtime" + "strings" + "time" +) + +type Formatter interface { + Format(pkg string, level LogLevel, depth int, entries ...interface{}) + Flush() +} + +func NewStringFormatter(w io.Writer) Formatter { + return &StringFormatter{ + w: bufio.NewWriter(w), + } +} + +type StringFormatter struct { + w *bufio.Writer +} + +func (s *StringFormatter) Format(pkg string, l LogLevel, i int, entries ...interface{}) { + now := time.Now().UTC() + s.w.WriteString(now.Format(time.RFC3339)) + s.w.WriteByte(' ') + writeEntries(s.w, pkg, l, i, entries...) + s.Flush() +} + +func writeEntries(w *bufio.Writer, pkg string, _ LogLevel, _ int, entries ...interface{}) { + if pkg != "" { + w.WriteString(pkg + ": ") + } + str := fmt.Sprint(entries...) + endsInNL := strings.HasSuffix(str, "\n") + w.WriteString(str) + if !endsInNL { + w.WriteString("\n") + } +} + +func (s *StringFormatter) Flush() { + s.w.Flush() +} + +func NewPrettyFormatter(w io.Writer, debug bool) Formatter { + return &PrettyFormatter{ + w: bufio.NewWriter(w), + debug: debug, + } +} + +type PrettyFormatter struct { + w *bufio.Writer + debug bool +} + +func (c *PrettyFormatter) Format(pkg string, l LogLevel, depth int, entries ...interface{}) { + now := time.Now() + ts := now.Format("2006-01-02 15:04:05") + c.w.WriteString(ts) + ms := now.Nanosecond() / 1000 + c.w.WriteString(fmt.Sprintf(".%06d", ms)) + if c.debug { + _, file, line, ok := runtime.Caller(depth) // It's always the same number of frames to the user's call. + if !ok { + file = "???" + line = 1 + } else { + slash := strings.LastIndex(file, "/") + if slash >= 0 { + file = file[slash+1:] + } + } + if line < 0 { + line = 0 // not a real line number + } + c.w.WriteString(fmt.Sprintf(" [%s:%d]", file, line)) + } + c.w.WriteString(fmt.Sprint(" ", l.Char(), " | ")) + writeEntries(c.w, pkg, l, depth, entries...) + c.Flush() +} + +func (c *PrettyFormatter) Flush() { + c.w.Flush() +} + +// LogFormatter emulates the form of the traditional built-in logger. +type LogFormatter struct { + logger *log.Logger + prefix string +} + +// NewLogFormatter is a helper to produce a new LogFormatter struct. It uses the +// golang log package to actually do the logging work so that logs look similar. +func NewLogFormatter(w io.Writer, prefix string, flag int) Formatter { + return &LogFormatter{ + logger: log.New(w, "", flag), // don't use prefix here + prefix: prefix, // save it instead + } +} + +// Format builds a log message for the LogFormatter. The LogLevel is ignored. +func (lf *LogFormatter) Format(pkg string, _ LogLevel, _ int, entries ...interface{}) { + str := fmt.Sprint(entries...) + prefix := lf.prefix + if pkg != "" { + prefix = fmt.Sprintf("%s%s: ", prefix, pkg) + } + lf.logger.Output(5, fmt.Sprintf("%s%v", prefix, str)) // call depth is 5 +} + +// Flush is included so that the interface is complete, but is a no-op. +func (lf *LogFormatter) Flush() { + // noop +} + +// NilFormatter is a no-op log formatter that does nothing. +type NilFormatter struct { +} + +// NewNilFormatter is a helper to produce a new LogFormatter struct. It logs no +// messages so that you can cause part of your logging to be silent. +func NewNilFormatter() Formatter { + return &NilFormatter{} +} + +// Format does nothing. +func (_ *NilFormatter) Format(_ string, _ LogLevel, _ int, _ ...interface{}) { + // noop +} + +// Flush is included so that the interface is complete, but is a no-op. +func (_ *NilFormatter) Flush() { + // noop +} diff --git a/vendor/github.com/coreos/pkg/capnslog/glog_formatter.go b/vendor/github.com/coreos/pkg/capnslog/glog_formatter.go new file mode 100644 index 0000000000..426603ef30 --- /dev/null +++ b/vendor/github.com/coreos/pkg/capnslog/glog_formatter.go @@ -0,0 +1,96 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package capnslog + +import ( + "bufio" + "bytes" + "io" + "os" + "runtime" + "strconv" + "strings" + "time" +) + +var pid = os.Getpid() + +type GlogFormatter struct { + StringFormatter +} + +func NewGlogFormatter(w io.Writer) *GlogFormatter { + g := &GlogFormatter{} + g.w = bufio.NewWriter(w) + return g +} + +func (g GlogFormatter) Format(pkg string, level LogLevel, depth int, entries ...interface{}) { + g.w.Write(GlogHeader(level, depth+1)) + g.StringFormatter.Format(pkg, level, depth+1, entries...) +} + +func GlogHeader(level LogLevel, depth int) []byte { + // Lmmdd hh:mm:ss.uuuuuu threadid file:line] + now := time.Now().UTC() + _, file, line, ok := runtime.Caller(depth) // It's always the same number of frames to the user's call. + if !ok { + file = "???" + line = 1 + } else { + slash := strings.LastIndex(file, "/") + if slash >= 0 { + file = file[slash+1:] + } + } + if line < 0 { + line = 0 // not a real line number + } + buf := &bytes.Buffer{} + buf.Grow(30) + _, month, day := now.Date() + hour, minute, second := now.Clock() + buf.WriteString(level.Char()) + twoDigits(buf, int(month)) + twoDigits(buf, day) + buf.WriteByte(' ') + twoDigits(buf, hour) + buf.WriteByte(':') + twoDigits(buf, minute) + buf.WriteByte(':') + twoDigits(buf, second) + buf.WriteByte('.') + buf.WriteString(strconv.Itoa(now.Nanosecond() / 1000)) + buf.WriteByte('Z') + buf.WriteByte(' ') + buf.WriteString(strconv.Itoa(pid)) + buf.WriteByte(' ') + buf.WriteString(file) + buf.WriteByte(':') + buf.WriteString(strconv.Itoa(line)) + buf.WriteByte(']') + buf.WriteByte(' ') + return buf.Bytes() +} + +const digits = "0123456789" + +func twoDigits(b *bytes.Buffer, d int) { + c2 := digits[d%10] + d /= 10 + c1 := digits[d%10] + b.WriteByte(c1) + b.WriteByte(c2) +} diff --git a/vendor/github.com/coreos/pkg/capnslog/init.go b/vendor/github.com/coreos/pkg/capnslog/init.go new file mode 100644 index 0000000000..44b8cd361b --- /dev/null +++ b/vendor/github.com/coreos/pkg/capnslog/init.go @@ -0,0 +1,49 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// +build !windows + +package capnslog + +import ( + "io" + "os" + "syscall" +) + +// Here's where the opinionation comes in. We need some sensible defaults, +// especially after taking over the log package. Your project (whatever it may +// be) may see things differently. That's okay; there should be no defaults in +// the main package that cannot be controlled or overridden programatically, +// otherwise it's a bug. Doing so is creating your own init_log.go file much +// like this one. + +func init() { + initHijack() + + // Go `log` pacakge uses os.Stderr. + SetFormatter(NewDefaultFormatter(os.Stderr)) + SetGlobalLogLevel(INFO) +} + +func NewDefaultFormatter(out io.Writer) Formatter { + if syscall.Getppid() == 1 { + // We're running under init, which may be systemd. + f, err := NewJournaldFormatter() + if err == nil { + return f + } + } + return NewPrettyFormatter(out, false) +} diff --git a/vendor/github.com/coreos/pkg/capnslog/init_windows.go b/vendor/github.com/coreos/pkg/capnslog/init_windows.go new file mode 100644 index 0000000000..4553050653 --- /dev/null +++ b/vendor/github.com/coreos/pkg/capnslog/init_windows.go @@ -0,0 +1,25 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package capnslog + +import "os" + +func init() { + initHijack() + + // Go `log` package uses os.Stderr. + SetFormatter(NewPrettyFormatter(os.Stderr, false)) + SetGlobalLogLevel(INFO) +} diff --git a/vendor/github.com/coreos/pkg/capnslog/journald_formatter.go b/vendor/github.com/coreos/pkg/capnslog/journald_formatter.go new file mode 100644 index 0000000000..72e05207c5 --- /dev/null +++ b/vendor/github.com/coreos/pkg/capnslog/journald_formatter.go @@ -0,0 +1,68 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// +build !windows + +package capnslog + +import ( + "errors" + "fmt" + "os" + "path/filepath" + + "github.com/coreos/go-systemd/journal" +) + +func NewJournaldFormatter() (Formatter, error) { + if !journal.Enabled() { + return nil, errors.New("No systemd detected") + } + return &journaldFormatter{}, nil +} + +type journaldFormatter struct{} + +func (j *journaldFormatter) Format(pkg string, l LogLevel, _ int, entries ...interface{}) { + var pri journal.Priority + switch l { + case CRITICAL: + pri = journal.PriCrit + case ERROR: + pri = journal.PriErr + case WARNING: + pri = journal.PriWarning + case NOTICE: + pri = journal.PriNotice + case INFO: + pri = journal.PriInfo + case DEBUG: + pri = journal.PriDebug + case TRACE: + pri = journal.PriDebug + default: + panic("Unhandled loglevel") + } + msg := fmt.Sprint(entries...) + tags := map[string]string{ + "PACKAGE": pkg, + "SYSLOG_IDENTIFIER": filepath.Base(os.Args[0]), + } + err := journal.Send(msg, pri, tags) + if err != nil { + fmt.Fprintln(os.Stderr, err) + } +} + +func (j *journaldFormatter) Flush() {} diff --git a/vendor/github.com/coreos/pkg/capnslog/log_hijack.go b/vendor/github.com/coreos/pkg/capnslog/log_hijack.go new file mode 100644 index 0000000000..970086b9f9 --- /dev/null +++ b/vendor/github.com/coreos/pkg/capnslog/log_hijack.go @@ -0,0 +1,39 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package capnslog + +import ( + "log" +) + +func initHijack() { + pkg := NewPackageLogger("log", "") + w := packageWriter{pkg} + log.SetFlags(0) + log.SetPrefix("") + log.SetOutput(w) +} + +type packageWriter struct { + pl *PackageLogger +} + +func (p packageWriter) Write(b []byte) (int, error) { + if p.pl.level < INFO { + return 0, nil + } + p.pl.internalLog(calldepth+2, INFO, string(b)) + return len(b), nil +} diff --git a/vendor/github.com/coreos/pkg/capnslog/logmap.go b/vendor/github.com/coreos/pkg/capnslog/logmap.go new file mode 100644 index 0000000000..226b60c225 --- /dev/null +++ b/vendor/github.com/coreos/pkg/capnslog/logmap.go @@ -0,0 +1,245 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package capnslog + +import ( + "errors" + "strings" + "sync" +) + +// LogLevel is the set of all log levels. +type LogLevel int8 + +const ( + // CRITICAL is the lowest log level; only errors which will end the program will be propagated. + CRITICAL LogLevel = iota - 1 + // ERROR is for errors that are not fatal but lead to troubling behavior. + ERROR + // WARNING is for errors which are not fatal and not errors, but are unusual. Often sourced from misconfigurations. + WARNING + // NOTICE is for normal but significant conditions. + NOTICE + // INFO is a log level for common, everyday log updates. + INFO + // DEBUG is the default hidden level for more verbose updates about internal processes. + DEBUG + // TRACE is for (potentially) call by call tracing of programs. + TRACE +) + +// Char returns a single-character representation of the log level. +func (l LogLevel) Char() string { + switch l { + case CRITICAL: + return "C" + case ERROR: + return "E" + case WARNING: + return "W" + case NOTICE: + return "N" + case INFO: + return "I" + case DEBUG: + return "D" + case TRACE: + return "T" + default: + panic("Unhandled loglevel") + } +} + +// String returns a multi-character representation of the log level. +func (l LogLevel) String() string { + switch l { + case CRITICAL: + return "CRITICAL" + case ERROR: + return "ERROR" + case WARNING: + return "WARNING" + case NOTICE: + return "NOTICE" + case INFO: + return "INFO" + case DEBUG: + return "DEBUG" + case TRACE: + return "TRACE" + default: + panic("Unhandled loglevel") + } +} + +// Update using the given string value. Fulfills the flag.Value interface. +func (l *LogLevel) Set(s string) error { + value, err := ParseLevel(s) + if err != nil { + return err + } + + *l = value + return nil +} + +// Returns an empty string, only here to fulfill the pflag.Value interface. +func (l *LogLevel) Type() string { + return "" +} + +// ParseLevel translates some potential loglevel strings into their corresponding levels. +func ParseLevel(s string) (LogLevel, error) { + switch s { + case "CRITICAL", "C": + return CRITICAL, nil + case "ERROR", "0", "E": + return ERROR, nil + case "WARNING", "1", "W": + return WARNING, nil + case "NOTICE", "2", "N": + return NOTICE, nil + case "INFO", "3", "I": + return INFO, nil + case "DEBUG", "4", "D": + return DEBUG, nil + case "TRACE", "5", "T": + return TRACE, nil + } + return CRITICAL, errors.New("couldn't parse log level " + s) +} + +type RepoLogger map[string]*PackageLogger + +type loggerStruct struct { + sync.Mutex + repoMap map[string]RepoLogger + formatter Formatter +} + +// logger is the global logger +var logger = new(loggerStruct) + +// SetGlobalLogLevel sets the log level for all packages in all repositories +// registered with capnslog. +func SetGlobalLogLevel(l LogLevel) { + logger.Lock() + defer logger.Unlock() + for _, r := range logger.repoMap { + r.setRepoLogLevelInternal(l) + } +} + +// GetRepoLogger may return the handle to the repository's set of packages' loggers. +func GetRepoLogger(repo string) (RepoLogger, error) { + logger.Lock() + defer logger.Unlock() + r, ok := logger.repoMap[repo] + if !ok { + return nil, errors.New("no packages registered for repo " + repo) + } + return r, nil +} + +// MustRepoLogger returns the handle to the repository's packages' loggers. +func MustRepoLogger(repo string) RepoLogger { + r, err := GetRepoLogger(repo) + if err != nil { + panic(err) + } + return r +} + +// SetRepoLogLevel sets the log level for all packages in the repository. +func (r RepoLogger) SetRepoLogLevel(l LogLevel) { + logger.Lock() + defer logger.Unlock() + r.setRepoLogLevelInternal(l) +} + +func (r RepoLogger) setRepoLogLevelInternal(l LogLevel) { + for _, v := range r { + v.level = l + } +} + +// ParseLogLevelConfig parses a comma-separated string of "package=loglevel", in +// order, and returns a map of the results, for use in SetLogLevel. +func (r RepoLogger) ParseLogLevelConfig(conf string) (map[string]LogLevel, error) { + setlist := strings.Split(conf, ",") + out := make(map[string]LogLevel) + for _, setstring := range setlist { + setting := strings.Split(setstring, "=") + if len(setting) != 2 { + return nil, errors.New("oddly structured `pkg=level` option: " + setstring) + } + l, err := ParseLevel(setting[1]) + if err != nil { + return nil, err + } + out[setting[0]] = l + } + return out, nil +} + +// SetLogLevel takes a map of package names within a repository to their desired +// loglevel, and sets the levels appropriately. Unknown packages are ignored. +// "*" is a special package name that corresponds to all packages, and will be +// processed first. +func (r RepoLogger) SetLogLevel(m map[string]LogLevel) { + logger.Lock() + defer logger.Unlock() + if l, ok := m["*"]; ok { + r.setRepoLogLevelInternal(l) + } + for k, v := range m { + l, ok := r[k] + if !ok { + continue + } + l.level = v + } +} + +// SetFormatter sets the formatting function for all logs. +func SetFormatter(f Formatter) { + logger.Lock() + defer logger.Unlock() + logger.formatter = f +} + +// NewPackageLogger creates a package logger object. +// This should be defined as a global var in your package, referencing your repo. +func NewPackageLogger(repo string, pkg string) (p *PackageLogger) { + logger.Lock() + defer logger.Unlock() + if logger.repoMap == nil { + logger.repoMap = make(map[string]RepoLogger) + } + r, rok := logger.repoMap[repo] + if !rok { + logger.repoMap[repo] = make(RepoLogger) + r = logger.repoMap[repo] + } + p, pok := r[pkg] + if !pok { + r[pkg] = &PackageLogger{ + pkg: pkg, + level: INFO, + } + p = r[pkg] + } + return +} diff --git a/vendor/github.com/coreos/pkg/capnslog/pkg_logger.go b/vendor/github.com/coreos/pkg/capnslog/pkg_logger.go new file mode 100644 index 0000000000..612d55c66c --- /dev/null +++ b/vendor/github.com/coreos/pkg/capnslog/pkg_logger.go @@ -0,0 +1,177 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package capnslog + +import ( + "fmt" + "os" +) + +type PackageLogger struct { + pkg string + level LogLevel +} + +const calldepth = 2 + +func (p *PackageLogger) internalLog(depth int, inLevel LogLevel, entries ...interface{}) { + logger.Lock() + defer logger.Unlock() + if inLevel != CRITICAL && p.level < inLevel { + return + } + if logger.formatter != nil { + logger.formatter.Format(p.pkg, inLevel, depth+1, entries...) + } +} + +func (p *PackageLogger) LevelAt(l LogLevel) bool { + logger.Lock() + defer logger.Unlock() + return p.level >= l +} + +// Log a formatted string at any level between ERROR and TRACE +func (p *PackageLogger) Logf(l LogLevel, format string, args ...interface{}) { + p.internalLog(calldepth, l, fmt.Sprintf(format, args...)) +} + +// Log a message at any level between ERROR and TRACE +func (p *PackageLogger) Log(l LogLevel, args ...interface{}) { + p.internalLog(calldepth, l, fmt.Sprint(args...)) +} + +// log stdlib compatibility + +func (p *PackageLogger) Println(args ...interface{}) { + p.internalLog(calldepth, INFO, fmt.Sprintln(args...)) +} + +func (p *PackageLogger) Printf(format string, args ...interface{}) { + p.Logf(INFO, format, args...) +} + +func (p *PackageLogger) Print(args ...interface{}) { + p.internalLog(calldepth, INFO, fmt.Sprint(args...)) +} + +// Panic and fatal + +func (p *PackageLogger) Panicf(format string, args ...interface{}) { + s := fmt.Sprintf(format, args...) + p.internalLog(calldepth, CRITICAL, s) + panic(s) +} + +func (p *PackageLogger) Panic(args ...interface{}) { + s := fmt.Sprint(args...) + p.internalLog(calldepth, CRITICAL, s) + panic(s) +} + +func (p *PackageLogger) Fatalf(format string, args ...interface{}) { + p.Logf(CRITICAL, format, args...) + os.Exit(1) +} + +func (p *PackageLogger) Fatal(args ...interface{}) { + s := fmt.Sprint(args...) + p.internalLog(calldepth, CRITICAL, s) + os.Exit(1) +} + +func (p *PackageLogger) Fatalln(args ...interface{}) { + s := fmt.Sprintln(args...) + p.internalLog(calldepth, CRITICAL, s) + os.Exit(1) +} + +// Error Functions + +func (p *PackageLogger) Errorf(format string, args ...interface{}) { + p.Logf(ERROR, format, args...) +} + +func (p *PackageLogger) Error(entries ...interface{}) { + p.internalLog(calldepth, ERROR, entries...) +} + +// Warning Functions + +func (p *PackageLogger) Warningf(format string, args ...interface{}) { + p.Logf(WARNING, format, args...) +} + +func (p *PackageLogger) Warning(entries ...interface{}) { + p.internalLog(calldepth, WARNING, entries...) +} + +// Notice Functions + +func (p *PackageLogger) Noticef(format string, args ...interface{}) { + p.Logf(NOTICE, format, args...) +} + +func (p *PackageLogger) Notice(entries ...interface{}) { + p.internalLog(calldepth, NOTICE, entries...) +} + +// Info Functions + +func (p *PackageLogger) Infof(format string, args ...interface{}) { + p.Logf(INFO, format, args...) +} + +func (p *PackageLogger) Info(entries ...interface{}) { + p.internalLog(calldepth, INFO, entries...) +} + +// Debug Functions + +func (p *PackageLogger) Debugf(format string, args ...interface{}) { + if p.level < DEBUG { + return + } + p.Logf(DEBUG, format, args...) +} + +func (p *PackageLogger) Debug(entries ...interface{}) { + if p.level < DEBUG { + return + } + p.internalLog(calldepth, DEBUG, entries...) +} + +// Trace Functions + +func (p *PackageLogger) Tracef(format string, args ...interface{}) { + if p.level < TRACE { + return + } + p.Logf(TRACE, format, args...) +} + +func (p *PackageLogger) Trace(entries ...interface{}) { + if p.level < TRACE { + return + } + p.internalLog(calldepth, TRACE, entries...) +} + +func (p *PackageLogger) Flush() { + logger.Lock() + defer logger.Unlock() + logger.formatter.Flush() +} diff --git a/vendor/github.com/coreos/pkg/capnslog/syslog_formatter.go b/vendor/github.com/coreos/pkg/capnslog/syslog_formatter.go new file mode 100644 index 0000000000..4be5a1f2de --- /dev/null +++ b/vendor/github.com/coreos/pkg/capnslog/syslog_formatter.go @@ -0,0 +1,65 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// +build !windows + +package capnslog + +import ( + "fmt" + "log/syslog" +) + +func NewSyslogFormatter(w *syslog.Writer) Formatter { + return &syslogFormatter{w} +} + +func NewDefaultSyslogFormatter(tag string) (Formatter, error) { + w, err := syslog.New(syslog.LOG_DEBUG, tag) + if err != nil { + return nil, err + } + return NewSyslogFormatter(w), nil +} + +type syslogFormatter struct { + w *syslog.Writer +} + +func (s *syslogFormatter) Format(pkg string, l LogLevel, _ int, entries ...interface{}) { + for _, entry := range entries { + str := fmt.Sprint(entry) + switch l { + case CRITICAL: + s.w.Crit(str) + case ERROR: + s.w.Err(str) + case WARNING: + s.w.Warning(str) + case NOTICE: + s.w.Notice(str) + case INFO: + s.w.Info(str) + case DEBUG: + s.w.Debug(str) + case TRACE: + s.w.Debug(str) + default: + panic("Unhandled loglevel") + } + } +} + +func (s *syslogFormatter) Flush() { +} diff --git a/vendor/github.com/dgryski/go-bits/LICENSE b/vendor/github.com/dgryski/go-bits/LICENSE new file mode 100644 index 0000000000..039a2e4c0b --- /dev/null +++ b/vendor/github.com/dgryski/go-bits/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2015 Damian Gryski + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/dgryski/go-bits/README b/vendor/github.com/dgryski/go-bits/README new file mode 100644 index 0000000000..8486803da3 --- /dev/null +++ b/vendor/github.com/dgryski/go-bits/README @@ -0,0 +1 @@ +godoc: https://godoc.org/github.com/dgryski/go-bits diff --git a/vendor/github.com/dgryski/go-bits/clz_amd64.s b/vendor/github.com/dgryski/go-bits/clz_amd64.s new file mode 100644 index 0000000000..b5b9336d56 --- /dev/null +++ b/vendor/github.com/dgryski/go-bits/clz_amd64.s @@ -0,0 +1,13 @@ +// +build amd64,!appengine + +// func Clz(x uint64) uint64 +TEXT ·Clz(SB),4,$0-16 + BSRQ x+0(FP), AX + JZ zero + SUBQ $63, AX + NEGQ AX + MOVQ AX, ret+8(FP) + RET +zero: + MOVQ $64, ret+8(FP) + RET diff --git a/vendor/github.com/dgryski/go-bits/clz_asm.go b/vendor/github.com/dgryski/go-bits/clz_asm.go new file mode 100644 index 0000000000..005b09007d --- /dev/null +++ b/vendor/github.com/dgryski/go-bits/clz_asm.go @@ -0,0 +1,6 @@ +// +build amd64,!appengine + +package bits + +// Clz counts leading zeroes +func Clz(x uint64) uint64 diff --git a/vendor/github.com/dgryski/go-bits/ctz_amd64.s b/vendor/github.com/dgryski/go-bits/ctz_amd64.s new file mode 100644 index 0000000000..fc6fbf8d23 --- /dev/null +++ b/vendor/github.com/dgryski/go-bits/ctz_amd64.s @@ -0,0 +1,11 @@ +// +build amd64,!appengine + +// func Ctz(x uint64) uint64 +TEXT ·Ctz(SB),4,$0-16 + BSFQ x+0(FP), AX + JZ zero + MOVQ AX, ret+8(FP) + RET +zero: + MOVQ $64, ret+8(FP) + RET diff --git a/vendor/github.com/dgryski/go-bits/ctz_asm.go b/vendor/github.com/dgryski/go-bits/ctz_asm.go new file mode 100644 index 0000000000..ea952b5121 --- /dev/null +++ b/vendor/github.com/dgryski/go-bits/ctz_asm.go @@ -0,0 +1,6 @@ +// +build amd64,!appengine + +package bits + +// Ctz counts trailing zeroes +func Ctz(x uint64) uint64 diff --git a/vendor/github.com/dgryski/go-bits/popcnt_amd64.s b/vendor/github.com/dgryski/go-bits/popcnt_amd64.s new file mode 100644 index 0000000000..4d70c57d6a --- /dev/null +++ b/vendor/github.com/dgryski/go-bits/popcnt_amd64.s @@ -0,0 +1,11 @@ +// +build amd64,!appengine,!popcntgo + +#define POPCNTQ_DX_DX BYTE $0xf3; BYTE $0x48; BYTE $0x0f; BYTE $0xb8; BYTE $0xd2 + +// func Popcnt(x uint64) uint64 + +TEXT ·Popcnt(SB),4,$0-16 + MOVQ x+0(FP), DX + POPCNTQ_DX_DX + MOVQ DX, ret+8(FP) + RET diff --git a/vendor/github.com/dgryski/go-bits/popcnt_asm.go b/vendor/github.com/dgryski/go-bits/popcnt_asm.go new file mode 100644 index 0000000000..57e869adee --- /dev/null +++ b/vendor/github.com/dgryski/go-bits/popcnt_asm.go @@ -0,0 +1,6 @@ +// +build amd64,!appengine,!popcntgo + +package bits + +// Popcnt counts the number of bits set +func Popcnt(x uint64) uint64 diff --git a/vendor/github.com/fabxc/tsdb/block.go b/vendor/github.com/fabxc/tsdb/block.go new file mode 100644 index 0000000000..ca7c48f469 --- /dev/null +++ b/vendor/github.com/fabxc/tsdb/block.go @@ -0,0 +1,237 @@ +package tsdb + +import ( + "encoding/json" + "io/ioutil" + "os" + "path/filepath" + "sort" + + "github.com/coreos/etcd/pkg/fileutil" + "github.com/pkg/errors" +) + +// Block handles reads against a Block of time series data. +type Block interface { + // Directory where block data is stored. + Dir() string + + // Stats returns statistics about the block. + Meta() BlockMeta + + // Index returns an IndexReader over the block's data. + Index() IndexReader + + // Series returns a SeriesReader over the block's data. + Series() SeriesReader + + // Persisted returns whether the block is already persisted, + // and no longer being appended to. + Persisted() bool + + // Close releases all underlying resources of the block. + Close() error +} + +// BlockMeta provides meta information about a block. +type BlockMeta struct { + // Sequence number of the block. + Sequence int `json:"sequence"` + + // MinTime and MaxTime specify the time range all samples + // in the block are in. + MinTime int64 `json:"minTime"` + MaxTime int64 `json:"maxTime"` + + // Stats about the contents of the block. + Stats struct { + NumSamples uint64 `json:"numSamples,omitempty"` + NumSeries uint64 `json:"numSeries,omitempty"` + NumChunks uint64 `json:"numChunks,omitempty"` + } `json:"stats,omitempty"` + + // Information on compactions the block was created from. + Compaction struct { + Generation int `json:"generation"` + } `json:"compaction"` +} + +const ( + flagNone = 0 + flagStd = 1 +) + +type persistedBlock struct { + dir string + meta BlockMeta + + chunksf, indexf *mmapFile + + chunkr *seriesReader + indexr *indexReader +} + +type blockMeta struct { + Version int `json:"version"` + + *BlockMeta +} + +const metaFilename = "meta.json" + +func readMetaFile(dir string) (*BlockMeta, error) { + b, err := ioutil.ReadFile(filepath.Join(dir, metaFilename)) + if err != nil { + return nil, err + } + var m blockMeta + + if err := json.Unmarshal(b, &m); err != nil { + return nil, err + } + if m.Version != 1 { + return nil, errors.Errorf("unexpected meta file version %d", m.Version) + } + + return m.BlockMeta, nil +} + +func writeMetaFile(dir string, meta *BlockMeta) error { + f, err := os.Create(filepath.Join(dir, metaFilename)) + if err != nil { + return err + } + + enc := json.NewEncoder(f) + enc.SetIndent("", "\t") + + if err := enc.Encode(&blockMeta{Version: 1, BlockMeta: meta}); err != nil { + return err + } + if err := f.Close(); err != nil { + return err + } + + return nil +} + +func newPersistedBlock(dir string) (*persistedBlock, error) { + meta, err := readMetaFile(dir) + if err != nil { + return nil, err + } + + chunksf, err := openMmapFile(chunksFileName(dir)) + if err != nil { + return nil, errors.Wrap(err, "open chunk file") + } + indexf, err := openMmapFile(indexFileName(dir)) + if err != nil { + return nil, errors.Wrap(err, "open index file") + } + + sr, err := newSeriesReader(chunksf.b) + if err != nil { + return nil, errors.Wrap(err, "create series reader") + } + ir, err := newIndexReader(sr, indexf.b) + if err != nil { + return nil, errors.Wrap(err, "create index reader") + } + + pb := &persistedBlock{ + dir: dir, + meta: *meta, + chunksf: chunksf, + indexf: indexf, + chunkr: sr, + indexr: ir, + } + return pb, nil +} + +func (pb *persistedBlock) Close() error { + err0 := pb.chunksf.Close() + err1 := pb.indexf.Close() + + if err0 != nil { + return err0 + } + return err1 +} + +func (pb *persistedBlock) Dir() string { return pb.dir } +func (pb *persistedBlock) Persisted() bool { return true } +func (pb *persistedBlock) Index() IndexReader { return pb.indexr } +func (pb *persistedBlock) Series() SeriesReader { return pb.chunkr } +func (pb *persistedBlock) Meta() BlockMeta { return pb.meta } + +func chunksFileName(path string) string { + return filepath.Join(path, "chunks-000") +} + +func indexFileName(path string) string { + return filepath.Join(path, "index-000") +} + +type mmapFile struct { + f *fileutil.LockedFile + b []byte +} + +func openMmapFile(path string) (*mmapFile, error) { + // We have to open the file in RDWR for the lock to work with fileutil. + // TODO(fabxc): use own flock call that supports multi-reader. + f, err := fileutil.TryLockFile(path, os.O_RDWR, 0666) + if err != nil { + return nil, errors.Wrap(err, "try lock file") + } + info, err := f.Stat() + if err != nil { + return nil, errors.Wrap(err, "stat") + } + + b, err := mmap(f.File, int(info.Size())) + if err != nil { + return nil, errors.Wrap(err, "mmap") + } + + return &mmapFile{f: f, b: b}, nil +} + +func (f *mmapFile) Close() error { + err0 := munmap(f.b) + err1 := f.f.Close() + + if err0 != nil { + return err0 + } + return err1 +} + +// A skiplist maps offsets to values. The values found in the data at an +// offset are strictly greater than the indexed value. +type skiplist interface { + // offset returns the offset to data containing values of x and lower. + offset(x int64) (uint32, bool) +} + +// simpleSkiplist is a slice of plain value/offset pairs. +type simpleSkiplist []skiplistPair + +type skiplistPair struct { + value int64 + offset uint32 +} + +func (sl simpleSkiplist) offset(x int64) (uint32, bool) { + // Search for the first offset that contains data greater than x. + i := sort.Search(len(sl), func(i int) bool { return sl[i].value >= x }) + + // If no element was found return false. If the first element is found, + // there's no previous offset actually containing values that are x or lower. + if i == len(sl) || i == 0 { + return 0, false + } + return sl[i-1].offset, true +} diff --git a/vendor/github.com/fabxc/tsdb/chunks/bstream.go b/vendor/github.com/fabxc/tsdb/chunks/bstream.go new file mode 100644 index 0000000000..25fadb26d2 --- /dev/null +++ b/vendor/github.com/fabxc/tsdb/chunks/bstream.go @@ -0,0 +1,169 @@ +package chunks + +import "io" + +// bstream is a stream of bits +type bstream struct { + stream []byte // the data stream + count uint8 // how many bits are valid in current byte +} + +func newBReader(b []byte) *bstream { + return &bstream{stream: b, count: 8} +} + +func newBWriter(size int) *bstream { + return &bstream{stream: make([]byte, 0, size), count: 0} +} + +func (b *bstream) clone() *bstream { + d := make([]byte, len(b.stream)) + copy(d, b.stream) + return &bstream{stream: d, count: b.count} +} + +func (b *bstream) bytes() []byte { + return b.stream +} + +type bit bool + +const ( + zero bit = false + one bit = true +) + +func (b *bstream) writeBit(bit bit) { + if b.count == 0 { + b.stream = append(b.stream, 0) + b.count = 8 + } + + i := len(b.stream) - 1 + + if bit { + b.stream[i] |= 1 << (b.count - 1) + } + + b.count-- +} + +func (b *bstream) writeByte(byt byte) { + if b.count == 0 { + b.stream = append(b.stream, 0) + b.count = 8 + } + + i := len(b.stream) - 1 + + // fill up b.b with b.count bits from byt + b.stream[i] |= byt >> (8 - b.count) + + b.stream = append(b.stream, 0) + i++ + b.stream[i] = byt << b.count +} + +func (b *bstream) writeBits(u uint64, nbits int) { + u <<= (64 - uint(nbits)) + for nbits >= 8 { + byt := byte(u >> 56) + b.writeByte(byt) + u <<= 8 + nbits -= 8 + } + + for nbits > 0 { + b.writeBit((u >> 63) == 1) + u <<= 1 + nbits-- + } +} + +func (b *bstream) readBit() (bit, error) { + if len(b.stream) == 0 { + return false, io.EOF + } + + if b.count == 0 { + b.stream = b.stream[1:] + + if len(b.stream) == 0 { + return false, io.EOF + } + b.count = 8 + } + + d := (b.stream[0] << (8 - b.count)) & 0x80 + b.count-- + return d != 0, nil +} + +func (b *bstream) ReadByte() (byte, error) { + return b.readByte() +} + +func (b *bstream) readByte() (byte, error) { + if len(b.stream) == 0 { + return 0, io.EOF + } + + if b.count == 0 { + b.stream = b.stream[1:] + + if len(b.stream) == 0 { + return 0, io.EOF + } + return b.stream[0], nil + } + + if b.count == 8 { + b.count = 0 + return b.stream[0], nil + } + + byt := b.stream[0] << (8 - b.count) + b.stream = b.stream[1:] + + if len(b.stream) == 0 { + return 0, io.EOF + } + + // We just advanced the stream and can assume the shift to be 0. + byt |= b.stream[0] >> b.count + + return byt, nil +} + +func (b *bstream) readBits(nbits int) (uint64, error) { + var u uint64 + + for nbits >= 8 { + byt, err := b.readByte() + if err != nil { + return 0, err + } + + u = (u << 8) | uint64(byt) + nbits -= 8 + } + + if nbits == 0 { + return u, nil + } + + if nbits > int(b.count) { + u = (u << uint(b.count)) | uint64((b.stream[0]<<(8-b.count))>>(8-b.count)) + nbits -= int(b.count) + b.stream = b.stream[1:] + + if len(b.stream) == 0 { + return 0, io.EOF + } + b.count = 8 + } + + u = (u << uint(nbits)) | uint64((b.stream[0]<<(8-b.count))>>(8-uint(nbits))) + b.count -= uint8(nbits) + return u, nil +} diff --git a/vendor/github.com/fabxc/tsdb/chunks/chunk.go b/vendor/github.com/fabxc/tsdb/chunks/chunk.go new file mode 100644 index 0000000000..6bff827357 --- /dev/null +++ b/vendor/github.com/fabxc/tsdb/chunks/chunk.go @@ -0,0 +1,57 @@ +package chunks + +import ( + "encoding/binary" + "fmt" +) + +// Encoding is the identifier for a chunk encoding +type Encoding uint8 + +func (e Encoding) String() string { + switch e { + case EncNone: + return "none" + case EncXOR: + return "XOR" + } + return "" +} + +// The different available chunk encodings. +const ( + EncNone Encoding = iota + EncXOR +) + +// Chunk holds a sequence of sample pairs that can be iterated over and appended to. +type Chunk interface { + Bytes() []byte + Encoding() Encoding + Appender() (Appender, error) + Iterator() Iterator +} + +// FromData returns a chunk from a byte slice of chunk data. +func FromData(e Encoding, d []byte) (Chunk, error) { + switch e { + case EncXOR: + return &XORChunk{ + b: &bstream{count: 0, stream: d}, + num: binary.BigEndian.Uint16(d), + }, nil + } + return nil, fmt.Errorf("unknown chunk encoding: %d", e) +} + +// Appender adds sample pairs to a chunk. +type Appender interface { + Append(int64, float64) +} + +// Iterator is a simple iterator that can only get the next value. +type Iterator interface { + At() (int64, float64) + Err() error + Next() bool +} diff --git a/vendor/github.com/fabxc/tsdb/chunks/xor.go b/vendor/github.com/fabxc/tsdb/chunks/xor.go new file mode 100644 index 0000000000..8acdda6ee0 --- /dev/null +++ b/vendor/github.com/fabxc/tsdb/chunks/xor.go @@ -0,0 +1,341 @@ +package chunks + +import ( + "encoding/binary" + "math" + + bits "github.com/dgryski/go-bits" +) + +// XORChunk holds XOR encoded sample data. +type XORChunk struct { + b *bstream + num uint16 +} + +// NewXORChunk returns a new chunk with XOR encoding of the given size. +func NewXORChunk() *XORChunk { + b := make([]byte, 2, 128) + return &XORChunk{b: &bstream{stream: b, count: 0}} +} + +func (c *XORChunk) Encoding() Encoding { + return EncXOR +} + +// Bytes returns the underlying byte slice of the chunk. +func (c *XORChunk) Bytes() []byte { + return c.b.bytes() +} + +// Appender implements the Chunk interface. +func (c *XORChunk) Appender() (Appender, error) { + it := c.iterator() + + // To get an appender we must know the state it would have if we had + // appended all existing data from scratch. + // We iterate through the end and populate via the iterator's state. + for it.Next() { + } + if err := it.Err(); err != nil { + return nil, err + } + + a := &xorAppender{ + c: c, + b: c.b, + t: it.t, + v: it.val, + tDelta: it.tDelta, + leading: it.leading, + trailing: it.trailing, + } + if binary.BigEndian.Uint16(a.b.bytes()) == 0 { + a.leading = 0xff + } + return a, nil +} + +func (c *XORChunk) iterator() *xorIterator { + // Should iterators guarantee to act on a copy of the data so it doesn't lock append? + // When using striped locks to guard access to chunks, probably yes. + // Could only copy data if the chunk is not completed yet. + return &xorIterator{ + br: newBReader(c.b.bytes()[2:]), + numTotal: binary.BigEndian.Uint16(c.b.bytes()), + } +} + +// Iterator implements the Chunk interface. +func (c *XORChunk) Iterator() Iterator { + return c.iterator() +} + +type xorAppender struct { + c *XORChunk + b *bstream + + t int64 + v float64 + tDelta uint64 + + leading uint8 + trailing uint8 +} + +func (a *xorAppender) Append(t int64, v float64) { + var tDelta uint64 + num := binary.BigEndian.Uint16(a.b.bytes()) + + if num == 0 { + buf := make([]byte, binary.MaxVarintLen64) + for _, b := range buf[:binary.PutVarint(buf, t)] { + a.b.writeByte(b) + } + a.b.writeBits(math.Float64bits(v), 64) + + } else if num == 1 { + tDelta = uint64(t - a.t) + + buf := make([]byte, binary.MaxVarintLen64) + for _, b := range buf[:binary.PutUvarint(buf, tDelta)] { + a.b.writeByte(b) + } + + a.writeVDelta(v) + + } else { + tDelta = uint64(t - a.t) + dod := int64(tDelta - a.tDelta) + + // Gorilla has a max resolution of seconds, Prometheus milliseconds. + // Thus we use higher value range steps with larger bit size. + switch { + case dod == 0: + a.b.writeBit(zero) + case bitRange(dod, 14): + a.b.writeBits(0x02, 2) // '10' + a.b.writeBits(uint64(dod), 14) + case bitRange(dod, 17): + a.b.writeBits(0x06, 3) // '110' + a.b.writeBits(uint64(dod), 17) + case bitRange(dod, 20): + a.b.writeBits(0x0e, 4) // '1110' + a.b.writeBits(uint64(dod), 20) + default: + a.b.writeBits(0x0f, 4) // '1111' + a.b.writeBits(uint64(dod), 64) + } + + a.writeVDelta(v) + } + + a.t = t + a.v = v + binary.BigEndian.PutUint16(a.b.bytes(), num+1) + a.tDelta = tDelta +} + +func bitRange(x int64, nbits uint8) bool { + return -((1<<(nbits-1))-1) <= x && x <= 1<<(nbits-1) +} + +func (a *xorAppender) writeVDelta(v float64) { + vDelta := math.Float64bits(v) ^ math.Float64bits(a.v) + + if vDelta == 0 { + a.b.writeBit(zero) + return + } + a.b.writeBit(one) + + leading := uint8(bits.Clz(vDelta)) + trailing := uint8(bits.Ctz(vDelta)) + + // Clamp number of leading zeros to avoid overflow when encoding. + if leading >= 32 { + leading = 31 + } + + if a.leading != 0xff && leading >= a.leading && trailing >= a.trailing { + a.b.writeBit(zero) + a.b.writeBits(vDelta>>a.trailing, 64-int(a.leading)-int(a.trailing)) + } else { + a.leading, a.trailing = leading, trailing + + a.b.writeBit(one) + a.b.writeBits(uint64(leading), 5) + + // Note that if leading == trailing == 0, then sigbits == 64. But that value doesn't actually fit into the 6 bits we have. + // Luckily, we never need to encode 0 significant bits, since that would put us in the other case (vdelta == 0). + // So instead we write out a 0 and adjust it back to 64 on unpacking. + sigbits := 64 - leading - trailing + a.b.writeBits(uint64(sigbits), 6) + a.b.writeBits(vDelta>>trailing, int(sigbits)) + } +} + +type xorIterator struct { + br *bstream + numTotal uint16 + numRead uint16 + + t int64 + val float64 + + leading uint8 + trailing uint8 + + tDelta uint64 + err error +} + +func (it *xorIterator) At() (int64, float64) { + return it.t, it.val +} + +func (it *xorIterator) Err() error { + return it.err +} + +func (it *xorIterator) Next() bool { + if it.err != nil || it.numRead == it.numTotal { + return false + } + + if it.numRead == 0 { + t, err := binary.ReadVarint(it.br) + if err != nil { + it.err = err + return false + } + v, err := it.br.readBits(64) + if err != nil { + it.err = err + return false + } + it.t = int64(t) + it.val = math.Float64frombits(v) + + it.numRead++ + return true + } + if it.numRead == 1 { + tDelta, err := binary.ReadUvarint(it.br) + if err != nil { + it.err = err + return false + } + it.tDelta = tDelta + it.t = it.t + int64(it.tDelta) + + return it.readValue() + } + + var d byte + // read delta-of-delta + for i := 0; i < 4; i++ { + d <<= 1 + bit, err := it.br.readBit() + if err != nil { + it.err = err + return false + } + if bit == zero { + break + } + d |= 1 + } + var sz uint8 + var dod int64 + switch d { + case 0x00: + // dod == 0 + case 0x02: + sz = 14 + case 0x06: + sz = 17 + case 0x0e: + sz = 20 + case 0x0f: + bits, err := it.br.readBits(64) + if err != nil { + it.err = err + return false + } + + dod = int64(bits) + } + + if sz != 0 { + bits, err := it.br.readBits(int(sz)) + if err != nil { + it.err = err + return false + } + if bits > (1 << (sz - 1)) { + // or something + bits = bits - (1 << sz) + } + dod = int64(bits) + } + + it.tDelta = uint64(int64(it.tDelta) + dod) + it.t = it.t + int64(it.tDelta) + + return it.readValue() +} + +func (it *xorIterator) readValue() bool { + bit, err := it.br.readBit() + if err != nil { + it.err = err + return false + } + + if bit == zero { + // it.val = it.val + } else { + bit, err := it.br.readBit() + if err != nil { + it.err = err + return false + } + if bit == zero { + // reuse leading/trailing zero bits + // it.leading, it.trailing = it.leading, it.trailing + } else { + bits, err := it.br.readBits(5) + if err != nil { + it.err = err + return false + } + it.leading = uint8(bits) + + bits, err = it.br.readBits(6) + if err != nil { + it.err = err + return false + } + mbits := uint8(bits) + // 0 significant bits here means we overflowed and we actually need 64; see comment in encoder + if mbits == 0 { + mbits = 64 + } + it.trailing = 64 - it.leading - mbits + } + + mbits := int(64 - it.leading - it.trailing) + bits, err := it.br.readBits(mbits) + if err != nil { + it.err = err + return false + } + vbits := math.Float64bits(it.val) + vbits ^= (bits << it.trailing) + it.val = math.Float64frombits(vbits) + } + + it.numRead++ + return true +} diff --git a/vendor/github.com/fabxc/tsdb/compact.go b/vendor/github.com/fabxc/tsdb/compact.go new file mode 100644 index 0000000000..412a247888 --- /dev/null +++ b/vendor/github.com/fabxc/tsdb/compact.go @@ -0,0 +1,444 @@ +package tsdb + +import ( + "os" + "path/filepath" + "time" + + "github.com/coreos/etcd/pkg/fileutil" + "github.com/fabxc/tsdb/labels" + "github.com/pkg/errors" + "github.com/prometheus/client_golang/prometheus" +) + +type compactor struct { + metrics *compactorMetrics + opts *compactorOptions +} + +type compactorMetrics struct { + ran prometheus.Counter + failed prometheus.Counter + duration prometheus.Histogram +} + +func newCompactorMetrics(r prometheus.Registerer) *compactorMetrics { + m := &compactorMetrics{} + + m.ran = prometheus.NewCounter(prometheus.CounterOpts{ + Name: "tsdb_compactions_total", + Help: "Total number of compactions that were executed for the partition.", + }) + m.failed = prometheus.NewCounter(prometheus.CounterOpts{ + Name: "tsdb_compactions_failed_total", + Help: "Total number of compactions that failed for the partition.", + }) + m.duration = prometheus.NewHistogram(prometheus.HistogramOpts{ + Name: "tsdb_compaction_duration", + Help: "Duration of compaction runs.", + }) + + if r != nil { + r.MustRegister( + m.ran, + m.failed, + m.duration, + ) + } + return m +} + +type compactorOptions struct { + maxBlockRange uint64 +} + +func newCompactor(r prometheus.Registerer, opts *compactorOptions) *compactor { + return &compactor{ + opts: opts, + metrics: newCompactorMetrics(r), + } +} + +type compactionInfo struct { + generation int + mint, maxt int64 +} + +const compactionBlocksLen = 4 + +// pick returns a range [i, j) in the blocks that are suitable to be compacted +// into a single block at position i. +func (c *compactor) pick(bs []compactionInfo) (i, j int, ok bool) { + if len(bs) == 0 { + return 0, 0, false + } + + // First, we always compact pending in-memory blocks – oldest first. + for i, b := range bs { + if b.generation > 0 { + continue + } + // Directly compact into 2nd generation with previous generation 1 blocks. + if i+1 >= compactionBlocksLen { + match := true + for _, pb := range bs[i-compactionBlocksLen+1 : i] { + match = match && pb.generation == 1 + } + if match { + return i - compactionBlocksLen + 1, i + 1, true + } + } + // If we have enough generation 0 blocks to directly move to the + // 2nd generation, skip generation 1. + if len(bs)-i >= compactionBlocksLen { + // Guard against the newly compacted block becoming larger than + // the previous one. + if i == 0 || bs[i-1].generation >= 2 { + return i, i + compactionBlocksLen, true + } + } + + // No optimizations possible, naiively compact the new block. + return i, i + 1, true + } + + // Then we care about compacting multiple blocks, starting with the oldest. + for i := 0; i < len(bs)-compactionBlocksLen; i += compactionBlocksLen { + if c.match(bs[i : i+2]) { + return i, i + compactionBlocksLen, true + } + } + + return 0, 0, false +} + +func (c *compactor) match(bs []compactionInfo) bool { + g := bs[0].generation + if g >= 5 { + return false + } + + for _, b := range bs { + if b.generation == 0 { + continue + } + if b.generation != g { + return false + } + } + + return uint64(bs[len(bs)-1].maxt-bs[0].mint) <= c.opts.maxBlockRange +} + +func mergeBlockMetas(blocks ...Block) (res BlockMeta) { + m0 := blocks[0].Meta() + + res.Sequence = m0.Sequence + res.MinTime = m0.MinTime + res.MaxTime = blocks[len(blocks)-1].Meta().MaxTime + + g := m0.Compaction.Generation + if g == 0 && len(blocks) > 1 { + g++ + } + res.Compaction.Generation = g + 1 + + for _, b := range blocks { + res.Stats.NumSamples += b.Meta().Stats.NumSamples + } + return res +} + +func (c *compactor) compact(dir string, blocks ...Block) (err error) { + start := time.Now() + defer func() { + if err != nil { + c.metrics.failed.Inc() + } + c.metrics.duration.Observe(time.Since(start).Seconds()) + }() + + if fileutil.Exist(dir) { + if err = os.RemoveAll(dir); err != nil { + return err + } + } + if err = os.MkdirAll(dir, 0755); err != nil { + return err + } + + chunkf, err := fileutil.LockFile(chunksFileName(dir), os.O_WRONLY|os.O_CREATE, 0666) + if err != nil { + return errors.Wrap(err, "create chunk file") + } + indexf, err := fileutil.LockFile(indexFileName(dir), os.O_WRONLY|os.O_CREATE, 0666) + if err != nil { + return errors.Wrap(err, "create index file") + } + + indexw := newIndexWriter(indexf) + chunkw := newSeriesWriter(chunkf, indexw) + + if err = c.write(dir, blocks, indexw, chunkw); err != nil { + return errors.Wrap(err, "write compaction") + } + + if err = chunkw.Close(); err != nil { + return errors.Wrap(err, "close chunk writer") + } + if err = indexw.Close(); err != nil { + return errors.Wrap(err, "close index writer") + } + if err = fileutil.Fsync(chunkf.File); err != nil { + return errors.Wrap(err, "fsync chunk file") + } + if err = fileutil.Fsync(indexf.File); err != nil { + return errors.Wrap(err, "fsync index file") + } + if err = chunkf.Close(); err != nil { + return errors.Wrap(err, "close chunk file") + } + if err = indexf.Close(); err != nil { + return errors.Wrap(err, "close index file") + } + return nil +} + +func (c *compactor) write(dir string, blocks []Block, indexw IndexWriter, chunkw SeriesWriter) error { + var set compactionSet + + for i, b := range blocks { + all, err := b.Index().Postings("", "") + if err != nil { + return err + } + // TODO(fabxc): find more transparent way of handling this. + if hb, ok := b.(*headBlock); ok { + all = hb.remapPostings(all) + } + s := newCompactionSeriesSet(b.Index(), b.Series(), all) + + if i == 0 { + set = s + continue + } + set, err = newCompactionMerger(set, s) + if err != nil { + return err + } + } + + // We fully rebuild the postings list index from merged series. + var ( + postings = &memPostings{m: make(map[term][]uint32, 512)} + values = map[string]stringset{} + i = uint32(0) + meta = mergeBlockMetas(blocks...) + ) + + for set.Next() { + lset, chunks := set.At() + if err := chunkw.WriteSeries(i, lset, chunks); err != nil { + return err + } + + meta.Stats.NumChunks += uint64(len(chunks)) + meta.Stats.NumSeries++ + + for _, l := range lset { + valset, ok := values[l.Name] + if !ok { + valset = stringset{} + values[l.Name] = valset + } + valset.set(l.Value) + + postings.add(i, term{name: l.Name, value: l.Value}) + } + i++ + } + if set.Err() != nil { + return set.Err() + } + + s := make([]string, 0, 256) + for n, v := range values { + s = s[:0] + + for x := range v { + s = append(s, x) + } + if err := indexw.WriteLabelIndex([]string{n}, s); err != nil { + return err + } + } + + for t := range postings.m { + if err := indexw.WritePostings(t.name, t.value, postings.get(t)); err != nil { + return err + } + } + // Write a postings list containing all series. + all := make([]uint32, i) + for i := range all { + all[i] = uint32(i) + } + if err := indexw.WritePostings("", "", newListPostings(all)); err != nil { + return err + } + + return writeMetaFile(dir, &meta) +} + +type compactionSet interface { + Next() bool + At() (labels.Labels, []ChunkMeta) + Err() error +} + +type compactionSeriesSet struct { + p Postings + index IndexReader + series SeriesReader + + l labels.Labels + c []ChunkMeta + err error +} + +func newCompactionSeriesSet(i IndexReader, s SeriesReader, p Postings) *compactionSeriesSet { + return &compactionSeriesSet{ + index: i, + series: s, + p: p, + } +} + +func (c *compactionSeriesSet) Next() bool { + if !c.p.Next() { + return false + } + + c.l, c.c, c.err = c.index.Series(c.p.At()) + if c.err != nil { + return false + } + for i := range c.c { + chk := &c.c[i] + + chk.Chunk, c.err = c.series.Chunk(chk.Ref) + if c.err != nil { + return false + } + } + + return true +} + +func (c *compactionSeriesSet) Err() error { + if c.err != nil { + return c.err + } + return c.p.Err() +} + +func (c *compactionSeriesSet) At() (labels.Labels, []ChunkMeta) { + return c.l, c.c +} + +type compactionMerger struct { + a, b compactionSet + + aok, bok bool + l labels.Labels + c []ChunkMeta +} + +type compactionSeries struct { + labels labels.Labels + chunks []ChunkMeta +} + +func newCompactionMerger(a, b compactionSet) (*compactionMerger, error) { + c := &compactionMerger{ + a: a, + b: b, + } + // Initialize first elements of both sets as Next() needs + // one element look-ahead. + c.aok = c.a.Next() + c.bok = c.b.Next() + + return c, c.Err() +} + +func (c *compactionMerger) compare() int { + if !c.aok { + return 1 + } + if !c.bok { + return -1 + } + a, _ := c.a.At() + b, _ := c.b.At() + return labels.Compare(a, b) +} + +func (c *compactionMerger) Next() bool { + if !c.aok && !c.bok || c.Err() != nil { + return false + } + + d := c.compare() + // Both sets contain the current series. Chain them into a single one. + if d > 0 { + c.l, c.c = c.b.At() + c.bok = c.b.Next() + } else if d < 0 { + c.l, c.c = c.a.At() + c.aok = c.a.Next() + } else { + l, ca := c.a.At() + _, cb := c.b.At() + + c.l = l + c.c = append(ca, cb...) + + c.aok = c.a.Next() + c.bok = c.b.Next() + } + return true +} + +func (c *compactionMerger) Err() error { + if c.a.Err() != nil { + return c.a.Err() + } + return c.b.Err() +} + +func (c *compactionMerger) At() (labels.Labels, []ChunkMeta) { + return c.l, c.c +} + +func renameDir(from, to string) error { + if err := os.RemoveAll(to); err != nil { + return err + } + if err := os.Rename(from, to); err != nil { + return err + } + + // Directory was renamed; sync parent dir to persist rename. + pdir, err := fileutil.OpenDir(filepath.Dir(to)) + if err != nil { + return err + } + if err = fileutil.Fsync(pdir); err != nil { + return err + } + if err = pdir.Close(); err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/fabxc/tsdb/db.go b/vendor/github.com/fabxc/tsdb/db.go new file mode 100644 index 0000000000..72b82c778e --- /dev/null +++ b/vendor/github.com/fabxc/tsdb/db.go @@ -0,0 +1,806 @@ +// Package tsdb implements a time series storage for float64 sample data. +package tsdb + +import ( + "bytes" + "fmt" + "io/ioutil" + "math" + "os" + "path/filepath" + "reflect" + "strconv" + "strings" + "sync" + "sync/atomic" + "time" + "unsafe" + + "golang.org/x/sync/errgroup" + + "github.com/coreos/etcd/pkg/fileutil" + "github.com/fabxc/tsdb/labels" + "github.com/go-kit/kit/log" + "github.com/pkg/errors" + "github.com/prometheus/client_golang/prometheus" +) + +// DefaultOptions used for the DB. They are sane for setups using +// millisecond precision timestampdb. +var DefaultOptions = &Options{ + WALFlushInterval: 5 * time.Second, + MinBlockDuration: 3 * 60 * 60 * 1000, // 2 hours in milliseconds + MaxBlockDuration: 24 * 60 * 60 * 1000, // 1 days in milliseconds + AppendableBlocks: 2, +} + +// Options of the DB storage. +type Options struct { + // The interval at which the write ahead log is flushed to disc. + WALFlushInterval time.Duration + + // The timestamp range of head blocks after which they get persisted. + // It's the minimum duration of any persisted block. + MinBlockDuration uint64 + + // The maximum timestamp range of compacted blocks. + MaxBlockDuration uint64 + + // Number of head blocks that can be appended to. + // Should be two or higher to prevent write errors in general scenarios. + // + // After a new block is started for timestamp t0 or higher, appends with + // timestamps as early as t0 - (n-1) * MinBlockDuration are valid. + AppendableBlocks int +} + +// Appender allows appending a batch of data. It must be completed with a +// call to Commit or Rollback and must not be reused afterwards. +type Appender interface { + // Add adds a sample pair for the given series. A reference number is + // returned which can be used to add further samples in the same or later + // transactions. + // Returned reference numbers are ephemeral and may be rejected in calls + // to AddFast() at any point. Adding the sample via Add() returns a new + // reference number. + Add(l labels.Labels, t int64, v float64) (uint64, error) + + // Add adds a sample pair for the referenced series. It is generally faster + // than adding a sample by providing its full label set. + AddFast(ref uint64, t int64, v float64) error + + // Commit submits the collected samples and purges the batch. + Commit() error + + // Rollback rolls back all modifications made in the appender so far. + Rollback() error +} + +const sep = '\xff' + +// DB handles reads and writes of time series falling into +// a hashed partition of a seriedb. +type DB struct { + dir string + logger log.Logger + metrics *dbMetrics + opts *Options + + mtx sync.RWMutex + persisted []*persistedBlock + heads []*headBlock + headGen uint8 + + compactor *compactor + + compactc chan struct{} + donec chan struct{} + stopc chan struct{} +} + +type dbMetrics struct { + samplesAppended prometheus.Counter + compactionsTriggered prometheus.Counter +} + +func newDBMetrics(r prometheus.Registerer) *dbMetrics { + m := &dbMetrics{} + + m.samplesAppended = prometheus.NewCounter(prometheus.CounterOpts{ + Name: "tsdb_samples_appended_total", + Help: "Total number of appended sampledb.", + }) + m.compactionsTriggered = prometheus.NewCounter(prometheus.CounterOpts{ + Name: "tsdb_compactions_triggered_total", + Help: "Total number of triggered compactions for the partition.", + }) + + if r != nil { + r.MustRegister( + m.samplesAppended, + m.compactionsTriggered, + ) + } + return m +} + +// Open returns a new DB in the given directory. +func Open(dir string, logger log.Logger, opts *Options) (db *DB, err error) { + if !fileutil.Exist(dir) { + if err := os.MkdirAll(dir, 0777); err != nil { + return nil, err + } + } + // var r prometheus.Registerer + r := prometheus.DefaultRegisterer + + if opts == nil { + opts = DefaultOptions + } + if opts.AppendableBlocks < 1 { + return nil, errors.Errorf("AppendableBlocks must be greater than 0") + } + + db = &DB{ + dir: dir, + logger: logger, + metrics: newDBMetrics(r), + opts: opts, + compactc: make(chan struct{}, 1), + donec: make(chan struct{}), + stopc: make(chan struct{}), + } + db.compactor = newCompactor(r, &compactorOptions{ + maxBlockRange: opts.MaxBlockDuration, + }) + + if err := db.initBlocks(); err != nil { + return nil, err + } + + go db.run() + + return db, nil +} + +func (db *DB) run() { + defer close(db.donec) + + for { + select { + case <-db.compactc: + db.metrics.compactionsTriggered.Inc() + + var infos []compactionInfo + for _, b := range db.compactable() { + m := b.Meta() + + infos = append(infos, compactionInfo{ + generation: m.Compaction.Generation, + mint: m.MinTime, + maxt: m.MaxTime, + }) + } + + i, j, ok := db.compactor.pick(infos) + if !ok { + continue + } + db.logger.Log("msg", "picked", "i", i, "j", j) + for k := i; k < j; k++ { + db.logger.Log("k", k, "generation", infos[k].generation) + } + + if err := db.compact(i, j); err != nil { + db.logger.Log("msg", "compaction failed", "err", err) + continue + } + db.logger.Log("msg", "compaction completed") + // Trigger another compaction in case there's more work to do. + select { + case db.compactc <- struct{}{}: + default: + } + + case <-db.stopc: + return + } + } +} + +func (db *DB) getBlock(i int) Block { + if i < len(db.persisted) { + return db.persisted[i] + } + return db.heads[i-len(db.persisted)] +} + +// removeBlocks removes the blocks in range [i, j) from the list of persisted +// and head blocks. The blocks are not closed and their files not deleted. +func (db *DB) removeBlocks(i, j int) { + for k := i; k < j; k++ { + if i < len(db.persisted) { + db.persisted = append(db.persisted[:i], db.persisted[i+1:]...) + } else { + l := i - len(db.persisted) + db.heads = append(db.heads[:l], db.heads[l+1:]...) + } + } +} + +func (db *DB) blocks() (bs []Block) { + for _, b := range db.persisted { + bs = append(bs, b) + } + for _, b := range db.heads { + bs = append(bs, b) + } + return bs +} + +// compact block in range [i, j) into a temporary directory and atomically +// swap the blocks out on successful completion. +func (db *DB) compact(i, j int) error { + if j <= i { + return errors.New("invalid compaction block range") + } + var blocks []Block + for k := i; k < j; k++ { + blocks = append(blocks, db.getBlock(k)) + } + var ( + dir = blocks[0].Dir() + tmpdir = dir + ".tmp" + ) + + if err := db.compactor.compact(tmpdir, blocks...); err != nil { + return err + } + + pb, err := newPersistedBlock(tmpdir) + if err != nil { + return err + } + + db.mtx.Lock() + defer db.mtx.Unlock() + + for _, b := range blocks { + if err := b.Close(); err != nil { + return errors.Wrapf(err, "close old block %s", b.Dir()) + } + } + + if err := renameDir(tmpdir, dir); err != nil { + return errors.Wrap(err, "rename dir") + } + pb.dir = dir + + db.removeBlocks(i, j) + db.persisted = append(db.persisted, pb) + + for _, b := range blocks[1:] { + if err := os.RemoveAll(b.Dir()); err != nil { + return errors.Wrap(err, "removing old block") + } + } + return nil +} + +func (db *DB) initBlocks() error { + var ( + persisted []*persistedBlock + heads []*headBlock + ) + + dirs, err := blockDirs(db.dir) + if err != nil { + return err + } + + for _, dir := range dirs { + if fileutil.Exist(filepath.Join(dir, walFileName)) { + h, err := openHeadBlock(dir, db.logger) + if err != nil { + return err + } + h.generation = db.headGen + db.headGen++ + heads = append(heads, h) + continue + } + b, err := newPersistedBlock(dir) + if err != nil { + return err + } + persisted = append(persisted, b) + } + + db.persisted = persisted + db.heads = heads + + return nil +} + +// Close the partition. +func (db *DB) Close() error { + close(db.stopc) + <-db.donec + + var merr MultiError + + db.mtx.Lock() + defer db.mtx.Unlock() + + for _, pb := range db.persisted { + merr.Add(pb.Close()) + } + for _, hb := range db.heads { + merr.Add(hb.Close()) + } + + return merr.Err() +} + +// Appender returns a new Appender on the database. +func (db *DB) Appender() Appender { + db.mtx.RLock() + a := &dbAppender{db: db} + + for _, b := range db.appendable() { + a.heads = append(a.heads, b.Appender().(*headAppender)) + } + return a +} + +type dbAppender struct { + db *DB + heads []*headAppender + samples int +} + +func (a *dbAppender) Add(lset labels.Labels, t int64, v float64) (uint64, error) { + h, err := a.appenderFor(t) + if err != nil { + return 0, err + } + ref, err := h.Add(lset, t, v) + if err != nil { + return 0, err + } + a.samples++ + return ref | (uint64(h.generation) << 40), nil +} + +func (a *dbAppender) hashedAdd(hash uint64, lset labels.Labels, t int64, v float64) (uint64, error) { + h, err := a.appenderFor(t) + if err != nil { + return 0, err + } + ref, err := h.hashedAdd(hash, lset, t, v) + if err != nil { + return 0, err + } + a.samples++ + return ref | (uint64(h.generation) << 40), nil +} + +func (a *dbAppender) AddFast(ref uint64, t int64, v float64) error { + // We store the head generation in the 4th byte and use it to reject + // stale references. + gen := uint8((ref << 16) >> 56) + + h, err := a.appenderFor(t) + if err != nil { + return err + } + // If the reference pointed into a previous block, we cannot + // use it to append the sample. + if h.generation != gen { + return ErrNotFound + } + if err := h.AddFast(ref, t, v); err != nil { + return err + } + + a.samples++ + return nil +} + +// appenderFor gets the appender for the head containing timestamp t. +// If the head block doesn't exist yet, it gets created. +func (a *dbAppender) appenderFor(t int64) (*headAppender, error) { + // If there's no fitting head block for t, ensure it gets created. + if len(a.heads) == 0 || t >= a.heads[len(a.heads)-1].meta.MaxTime { + a.db.mtx.RUnlock() + + if err := a.db.ensureHead(t); err != nil { + a.db.mtx.RLock() + return nil, err + } + + a.db.mtx.RLock() + + if len(a.heads) == 0 { + for _, b := range a.db.appendable() { + a.heads = append(a.heads, b.Appender().(*headAppender)) + } + } else { + maxSeq := a.heads[len(a.heads)-1].meta.Sequence + for _, b := range a.db.appendable() { + if b.meta.Sequence > maxSeq { + a.heads = append(a.heads, b.Appender().(*headAppender)) + } + } + } + } + for i := len(a.heads) - 1; i >= 0; i-- { + if h := a.heads[i]; t >= h.meta.MinTime { + return h, nil + } + } + + return nil, ErrNotFound +} + +func (db *DB) ensureHead(t int64) error { + db.mtx.Lock() + defer db.mtx.Unlock() + + // Initial case for a new database: we must create the first + // AppendableBlocks-1 front padding heads. + if len(db.heads) == 0 { + for i := int64(db.opts.AppendableBlocks - 1); i >= 0; i-- { + if _, err := db.cut(t - i*int64(db.opts.MinBlockDuration)); err != nil { + return err + } + } + } + + for { + h := db.heads[len(db.heads)-1] + // If t doesn't exceed the range of heads blocks, there's nothing to do. + if t < h.meta.MaxTime { + return nil + } + if _, err := db.cut(h.meta.MaxTime); err != nil { + return err + } + } +} + +func (a *dbAppender) Commit() error { + var merr MultiError + + for _, h := range a.heads { + merr.Add(h.Commit()) + } + a.db.mtx.RUnlock() + + if merr.Err() == nil { + a.db.metrics.samplesAppended.Add(float64(a.samples)) + } + return merr.Err() +} + +func (a *dbAppender) Rollback() error { + var merr MultiError + + for _, h := range a.heads { + merr.Add(h.Rollback()) + } + a.db.mtx.RUnlock() + + return merr.Err() +} + +func (db *DB) appendable() []*headBlock { + if len(db.heads) <= db.opts.AppendableBlocks { + return db.heads + } + return db.heads[len(db.heads)-db.opts.AppendableBlocks:] +} + +func (db *DB) compactable() []Block { + db.mtx.RLock() + defer db.mtx.RUnlock() + + var blocks []Block + for _, pb := range db.persisted { + blocks = append(blocks, pb) + } + + if len(db.heads) <= db.opts.AppendableBlocks { + return blocks + } + + for _, h := range db.heads[:len(db.heads)-db.opts.AppendableBlocks] { + // Blocks that won't be appendable when instantiating a new appender + // might still have active appenders on them. + // Abort at the first one we encounter. + if atomic.LoadUint64(&h.activeWriters) > 0 { + break + } + blocks = append(blocks, h) + } + return blocks +} + +func intervalOverlap(amin, amax, bmin, bmax int64) bool { + if bmin >= amin && bmin <= amax { + return true + } + if amin >= bmin && amin <= bmax { + return true + } + return false +} + +func intervalContains(min, max, t int64) bool { + return t >= min && t <= max +} + +// blocksForInterval returns all blocks within the partition that may contain +// data for the given time range. +func (db *DB) blocksForInterval(mint, maxt int64) []Block { + var bs []Block + + for _, b := range db.persisted { + m := b.Meta() + if intervalOverlap(mint, maxt, m.MinTime, m.MaxTime) { + bs = append(bs, b) + } + } + for _, b := range db.heads { + m := b.Meta() + if intervalOverlap(mint, maxt, m.MinTime, m.MaxTime) { + bs = append(bs, b) + } + } + + return bs +} + +// cut starts a new head block to append to. The completed head block +// will still be appendable for the configured grace period. +func (db *DB) cut(mint int64) (*headBlock, error) { + maxt := mint + int64(db.opts.MinBlockDuration) + + dir, seq, err := nextBlockDir(db.dir) + if err != nil { + return nil, err + } + newHead, err := createHeadBlock(dir, seq, db.logger, mint, maxt) + if err != nil { + return nil, err + } + + db.heads = append(db.heads, newHead) + db.headGen++ + + newHead.generation = db.headGen + + select { + case db.compactc <- struct{}{}: + default: + } + + return newHead, nil +} + +func isBlockDir(fi os.FileInfo) bool { + if !fi.IsDir() { + return false + } + if !strings.HasPrefix(fi.Name(), "b-") { + return false + } + if _, err := strconv.ParseUint(fi.Name()[2:], 10, 32); err != nil { + return false + } + return true +} + +func blockDirs(dir string) ([]string, error) { + files, err := ioutil.ReadDir(dir) + if err != nil { + return nil, err + } + var dirs []string + + for _, fi := range files { + if isBlockDir(fi) { + dirs = append(dirs, filepath.Join(dir, fi.Name())) + } + } + return dirs, nil +} + +func nextBlockDir(dir string) (string, int, error) { + names, err := fileutil.ReadDir(dir) + if err != nil { + return "", 0, err + } + + i := uint64(0) + for _, n := range names { + if !strings.HasPrefix(n, "b-") { + continue + } + j, err := strconv.ParseUint(n[2:], 10, 32) + if err != nil { + continue + } + i = j + } + return filepath.Join(dir, fmt.Sprintf("b-%0.6d", i+1)), int(i + 1), nil +} + +// PartitionedDB is a time series storage. +type PartitionedDB struct { + logger log.Logger + dir string + + partitionPow uint + Partitions []*DB +} + +func isPowTwo(x int) bool { + return x > 0 && (x&(x-1)) == 0 +} + +// OpenPartitioned or create a new DB. +func OpenPartitioned(dir string, n int, l log.Logger, opts *Options) (*PartitionedDB, error) { + if !isPowTwo(n) { + return nil, errors.Errorf("%d is not a power of two", n) + } + if opts == nil { + opts = DefaultOptions + } + if l == nil { + l = log.NewLogfmtLogger(os.Stdout) + l = log.NewContext(l).With("ts", log.DefaultTimestampUTC, "caller", log.DefaultCaller) + } + + if err := os.MkdirAll(dir, 0777); err != nil { + return nil, err + } + c := &PartitionedDB{ + logger: l, + dir: dir, + partitionPow: uint(math.Log2(float64(n))), + } + + // Initialize vertical partitiondb. + // TODO(fabxc): validate partition number to be power of 2, which is required + // for the bitshift-modulo when finding the right partition. + for i := 0; i < n; i++ { + l := log.NewContext(l).With("partition", i) + d := partitionDir(dir, i) + + s, err := Open(d, l, opts) + if err != nil { + return nil, fmt.Errorf("initializing partition %q failed: %s", d, err) + } + + c.Partitions = append(c.Partitions, s) + } + + return c, nil +} + +func partitionDir(base string, i int) string { + return filepath.Join(base, fmt.Sprintf("p-%0.4d", i)) +} + +// Close the database. +func (db *PartitionedDB) Close() error { + var g errgroup.Group + + for _, partition := range db.Partitions { + g.Go(partition.Close) + } + + return g.Wait() +} + +// Appender returns a new appender against the database. +func (db *PartitionedDB) Appender() Appender { + app := &partitionedAppender{db: db} + + for _, p := range db.Partitions { + app.partitions = append(app.partitions, p.Appender().(*dbAppender)) + } + return app +} + +type partitionedAppender struct { + db *PartitionedDB + partitions []*dbAppender +} + +func (a *partitionedAppender) Add(lset labels.Labels, t int64, v float64) (uint64, error) { + h := lset.Hash() + p := h >> (64 - a.db.partitionPow) + + ref, err := a.partitions[p].hashedAdd(h, lset, t, v) + if err != nil { + return 0, err + } + return ref | (p << 48), nil +} + +func (a *partitionedAppender) AddFast(ref uint64, t int64, v float64) error { + p := uint8((ref << 8) >> 56) + return a.partitions[p].AddFast(ref, t, v) +} + +func (a *partitionedAppender) Commit() error { + var merr MultiError + + for _, p := range a.partitions { + merr.Add(p.Commit()) + } + return merr.Err() +} + +func (a *partitionedAppender) Rollback() error { + var merr MultiError + + for _, p := range a.partitions { + merr.Add(p.Rollback()) + } + return merr.Err() +} + +// The MultiError type implements the error interface, and contains the +// Errors used to construct it. +type MultiError []error + +// Returns a concatenated string of the contained errors +func (es MultiError) Error() string { + var buf bytes.Buffer + + if len(es) > 1 { + fmt.Fprintf(&buf, "%d errors: ", len(es)) + } + + for i, err := range es { + if i != 0 { + buf.WriteString("; ") + } + buf.WriteString(err.Error()) + } + + return buf.String() +} + +// Add adds the error to the error list if it is not nil. +func (es *MultiError) Add(err error) { + if err == nil { + return + } + if merr, ok := err.(MultiError); ok { + *es = append(*es, merr...) + } else { + *es = append(*es, err) + } +} + +// Err returns the error list as an error or nil if it is empty. +func (es MultiError) Err() error { + if len(es) == 0 { + return nil + } + return es +} + +func yoloString(b []byte) string { + sh := (*reflect.SliceHeader)(unsafe.Pointer(&b)) + + h := reflect.StringHeader{ + Data: sh.Data, + Len: sh.Len, + } + return *((*string)(unsafe.Pointer(&h))) +} diff --git a/vendor/github.com/fabxc/tsdb/db_amd64.go b/vendor/github.com/fabxc/tsdb/db_amd64.go new file mode 100644 index 0000000000..cfd85c9756 --- /dev/null +++ b/vendor/github.com/fabxc/tsdb/db_amd64.go @@ -0,0 +1,10 @@ +package tsdb + +// maxMapSize represents the largest mmap size supported by Bolt. +const maxMapSize = 0xFFFFFFFFFFFF // 256TB + +// maxAllocSize is the size used when creating array pointers. +const maxAllocSize = 0x7FFFFFFF + +// Are unaligned load/stores broken on this arch? +var brokenUnaligned = false diff --git a/vendor/github.com/fabxc/tsdb/db_unix.go b/vendor/github.com/fabxc/tsdb/db_unix.go new file mode 100644 index 0000000000..2e673f9516 --- /dev/null +++ b/vendor/github.com/fabxc/tsdb/db_unix.go @@ -0,0 +1,27 @@ +// +build !windows,!plan9,!solaris + +package tsdb + +import ( + "os" + "unsafe" + + "golang.org/x/sys/unix" +) + +func mmap(f *os.File, length int) ([]byte, error) { + return unix.Mmap(int(f.Fd()), 0, length, unix.PROT_READ, unix.MAP_SHARED) +} + +func munmap(b []byte) (err error) { + return unix.Munmap(b) +} + +// unix.Madvise is not defined for darwin, so we define it ourselves. +func madvise(b []byte, advice int) (err error) { + _, _, e1 := unix.Syscall(unix.SYS_MADVISE, uintptr(unsafe.Pointer(&b[0])), uintptr(len(b)), uintptr(advice)) + if e1 != 0 { + err = e1 + } + return +} diff --git a/vendor/github.com/fabxc/tsdb/head.go b/vendor/github.com/fabxc/tsdb/head.go new file mode 100644 index 0000000000..1c69ac3bb4 --- /dev/null +++ b/vendor/github.com/fabxc/tsdb/head.go @@ -0,0 +1,701 @@ +package tsdb + +import ( + "fmt" + "math" + "math/rand" + "os" + "sort" + "sync" + "sync/atomic" + "time" + + "github.com/bradfitz/slice" + "github.com/fabxc/tsdb/chunks" + "github.com/fabxc/tsdb/labels" + "github.com/go-kit/kit/log" + "github.com/pkg/errors" +) + +var ( + // ErrNotFound is returned if a looked up resource was not found. + ErrNotFound = fmt.Errorf("not found") + + // ErrOutOfOrderSample is returned if an appended sample has a + // timestamp larger than the most recent sample. + ErrOutOfOrderSample = errors.New("out of order sample") + + // ErrAmendSample is returned if an appended sample has the same timestamp + // as the most recent sample but a different value. + ErrAmendSample = errors.New("amending sample") + + // ErrOutOfBounds is returned if an appended sample is out of the + // writable time range. + ErrOutOfBounds = errors.New("out of bounds") +) + +// headBlock handles reads and writes of time series data within a time window. +type headBlock struct { + mtx sync.RWMutex + dir string + generation uint8 + wal *WAL + + activeWriters uint64 + + // descs holds all chunk descs for the head block. Each chunk implicitly + // is assigned the index as its ID. + series []*memSeries + // mapping maps a series ID to its position in an ordered list + // of all series. The orderDirty flag indicates that it has gone stale. + mapper *positionMapper + // hashes contains a collision map of label set hashes of chunks + // to their chunk descs. + hashes map[uint64][]*memSeries + + values map[string]stringset // label names to possible values + postings *memPostings // postings lists for terms + + metamtx sync.RWMutex + meta BlockMeta +} + +func createHeadBlock(dir string, seq int, l log.Logger, mint, maxt int64) (*headBlock, error) { + if err := os.MkdirAll(dir, 0755); err != nil { + return nil, err + } + + if err := writeMetaFile(dir, &BlockMeta{ + Sequence: seq, + MinTime: mint, + MaxTime: maxt, + }); err != nil { + return nil, err + } + return openHeadBlock(dir, l) +} + +// openHeadBlock creates a new empty head block. +func openHeadBlock(dir string, l log.Logger) (*headBlock, error) { + wal, err := OpenWAL(dir, log.NewContext(l).With("component", "wal"), 5*time.Second) + if err != nil { + return nil, err + } + meta, err := readMetaFile(dir) + if err != nil { + return nil, err + } + + h := &headBlock{ + dir: dir, + wal: wal, + series: []*memSeries{}, + hashes: map[uint64][]*memSeries{}, + values: map[string]stringset{}, + postings: &memPostings{m: make(map[term][]uint32)}, + mapper: newPositionMapper(nil), + meta: *meta, + } + + // Replay contents of the write ahead log. + if err = wal.ReadAll(&walHandler{ + series: func(lset labels.Labels) error { + h.create(lset.Hash(), lset) + h.meta.Stats.NumSeries++ + return nil + }, + sample: func(s refdSample) error { + h.series[s.ref].append(s.t, s.v) + + if !h.inBounds(s.t) { + return ErrOutOfBounds + } + + h.meta.Stats.NumSamples++ + return nil + }, + }); err != nil { + return nil, err + } + + h.updateMapping() + + return h, nil +} + +// inBounds returns true if the given timestamp is within the valid +// time bounds of the block. +func (h *headBlock) inBounds(t int64) bool { + return t >= h.meta.MinTime && t <= h.meta.MaxTime +} + +// Close syncs all data and closes underlying resources of the head block. +func (h *headBlock) Close() error { + if err := writeMetaFile(h.dir, &h.meta); err != nil { + return err + } + return h.wal.Close() +} + +func (h *headBlock) Meta() BlockMeta { + h.metamtx.RLock() + defer h.metamtx.RUnlock() + + return h.meta +} + +func (h *headBlock) Dir() string { return h.dir } +func (h *headBlock) Persisted() bool { return false } +func (h *headBlock) Index() IndexReader { return &headIndexReader{h} } +func (h *headBlock) Series() SeriesReader { return &headSeriesReader{h} } + +func (h *headBlock) Appender() Appender { + atomic.AddUint64(&h.activeWriters, 1) + + h.mtx.RLock() + return &headAppender{headBlock: h, samples: getHeadAppendBuffer()} +} + +var headPool = sync.Pool{} + +func getHeadAppendBuffer() []refdSample { + b := headPool.Get() + if b == nil { + return make([]refdSample, 0, 512) + } + return b.([]refdSample) +} + +func putHeadAppendBuffer(b []refdSample) { + headPool.Put(b[:0]) +} + +type headAppender struct { + *headBlock + + newSeries map[uint64]hashedLabels + newHashes map[uint64]uint64 + refmap map[uint64]uint64 + newLabels []labels.Labels + + samples []refdSample +} + +type hashedLabels struct { + hash uint64 + labels labels.Labels +} + +type refdSample struct { + ref uint64 + t int64 + v float64 +} + +func (a *headAppender) Add(lset labels.Labels, t int64, v float64) (uint64, error) { + return a.hashedAdd(lset.Hash(), lset, t, v) +} + +func (a *headAppender) hashedAdd(hash uint64, lset labels.Labels, t int64, v float64) (uint64, error) { + if ms := a.get(hash, lset); ms != nil { + return uint64(ms.ref), a.AddFast(uint64(ms.ref), t, v) + } + if ref, ok := a.newHashes[hash]; ok { + return uint64(ref), a.AddFast(uint64(ref), t, v) + } + + // We only know the actual reference after committing. We generate an + // intermediate reference only valid for this batch. + // It is indicated by the the LSB of the 4th byte being set to 1. + // We use a random ID to avoid collisions when new series are created + // in two subsequent batches. + // TODO(fabxc): Provide method for client to determine whether a ref + // is valid beyond the current transaction. + ref := uint64(rand.Int31()) | (1 << 32) + + if a.newSeries == nil { + a.newSeries = map[uint64]hashedLabels{} + a.newHashes = map[uint64]uint64{} + a.refmap = map[uint64]uint64{} + } + a.newSeries[ref] = hashedLabels{hash: hash, labels: lset} + a.newHashes[hash] = ref + + return ref, a.AddFast(ref, t, v) +} + +func (a *headAppender) AddFast(ref uint64, t int64, v float64) error { + // We only own the last 5 bytes of the reference. Anything before is + // used by higher-order appenders. We erase it to avoid issues. + ref = (ref << 24) >> 24 + + // Distinguish between existing series and series created in + // this transaction. + if ref&(1<<32) != 0 { + if _, ok := a.newSeries[ref]; !ok { + return ErrNotFound + } + // TODO(fabxc): we also have to validate here that the + // sample sequence is valid. + // We also have to revalidate it as we switch locks an create + // the new series. + } else { + ms := a.series[int(ref)] + if ms == nil { + return ErrNotFound + } + // TODO(fabxc): memory series should be locked here already. + // Only problem is release of locks in case of a rollback. + c := ms.head() + + if !a.inBounds(t) { + return ErrOutOfBounds + } + if t < c.maxTime { + return ErrOutOfOrderSample + } + if c.maxTime == t && ms.lastValue != v { + return ErrAmendSample + } + } + + a.samples = append(a.samples, refdSample{ + ref: ref, + t: t, + v: v, + }) + return nil +} + +func (a *headAppender) createSeries() { + if len(a.newSeries) == 0 { + return + } + a.newLabels = make([]labels.Labels, 0, len(a.newSeries)) + base0 := len(a.series) + + a.mtx.RUnlock() + a.mtx.Lock() + + base1 := len(a.series) + + for ref, l := range a.newSeries { + // We switched locks and have to re-validate that the series were not + // created by another goroutine in the meantime. + if base1 > base0 { + if ms := a.get(l.hash, l.labels); ms != nil { + a.refmap[ref] = uint64(ms.ref) + continue + } + } + // Series is still new. + a.newLabels = append(a.newLabels, l.labels) + a.refmap[ref] = uint64(len(a.series)) + + a.create(l.hash, l.labels) + } + + a.mtx.Unlock() + a.mtx.RLock() +} + +func (a *headAppender) Commit() error { + defer atomic.AddUint64(&a.activeWriters, ^uint64(0)) + defer putHeadAppendBuffer(a.samples) + + a.createSeries() + + for i := range a.samples { + s := &a.samples[i] + + if s.ref&(1<<32) > 0 { + s.ref = a.refmap[s.ref] + } + } + + // Write all new series and samples to the WAL and add it to the + // in-mem database on success. + if err := a.wal.Log(a.newLabels, a.samples); err != nil { + a.mtx.RUnlock() + return err + } + + var ( + total = uint64(len(a.samples)) + mint = int64(math.MaxInt64) + maxt = int64(math.MinInt64) + ) + + for _, s := range a.samples { + if !a.series[s.ref].append(s.t, s.v) { + total-- + } + + if s.t < mint { + mint = s.t + } + if s.t > maxt { + maxt = s.t + } + } + + a.mtx.RUnlock() + + a.metamtx.Lock() + defer a.metamtx.Unlock() + + a.meta.Stats.NumSamples += total + a.meta.Stats.NumSeries += uint64(len(a.newSeries)) + + return nil +} + +func (a *headAppender) Rollback() error { + a.mtx.RUnlock() + atomic.AddUint64(&a.activeWriters, ^uint64(0)) + putHeadAppendBuffer(a.samples) + return nil +} + +type headSeriesReader struct { + *headBlock +} + +// Chunk returns the chunk for the reference number. +func (h *headSeriesReader) Chunk(ref uint32) (chunks.Chunk, error) { + h.mtx.RLock() + defer h.mtx.RUnlock() + + c := &safeChunk{ + Chunk: h.series[ref>>8].chunks[int((ref<<24)>>24)].chunk, + s: h.series[ref>>8], + i: int((ref << 24) >> 24), + } + return c, nil +} + +type safeChunk struct { + chunks.Chunk + s *memSeries + i int +} + +func (c *safeChunk) Iterator() chunks.Iterator { + c.s.mtx.RLock() + defer c.s.mtx.RUnlock() + return c.s.iterator(c.i) +} + +// func (c *safeChunk) Appender() (chunks.Appender, error) { panic("illegal") } +// func (c *safeChunk) Bytes() []byte { panic("illegal") } +// func (c *safeChunk) Encoding() chunks.Encoding { panic("illegal") } + +type headIndexReader struct { + *headBlock +} + +// LabelValues returns the possible label values +func (h *headIndexReader) LabelValues(names ...string) (StringTuples, error) { + h.mtx.RLock() + defer h.mtx.RUnlock() + + if len(names) != 1 { + return nil, errInvalidSize + } + var sl []string + + for s := range h.values[names[0]] { + sl = append(sl, s) + } + sort.Strings(sl) + + return &stringTuples{l: len(names), s: sl}, nil +} + +// Postings returns the postings list iterator for the label pair. +func (h *headIndexReader) Postings(name, value string) (Postings, error) { + h.mtx.RLock() + defer h.mtx.RUnlock() + + return h.postings.get(term{name: name, value: value}), nil +} + +// Series returns the series for the given reference. +func (h *headIndexReader) Series(ref uint32) (labels.Labels, []ChunkMeta, error) { + h.mtx.RLock() + defer h.mtx.RUnlock() + + if int(ref) >= len(h.series) { + return nil, nil, ErrNotFound + } + s := h.series[ref] + metas := make([]ChunkMeta, 0, len(s.chunks)) + + s.mtx.RLock() + defer s.mtx.RUnlock() + + for i, c := range s.chunks { + metas = append(metas, ChunkMeta{ + MinTime: c.minTime, + MaxTime: c.maxTime, + Ref: (ref << 8) | uint32(i), + }) + } + + return s.lset, metas, nil +} + +func (h *headIndexReader) LabelIndices() ([][]string, error) { + h.mtx.RLock() + defer h.mtx.RUnlock() + + res := [][]string{} + + for s := range h.values { + res = append(res, []string{s}) + } + return res, nil +} + +// get retrieves the chunk with the hash and label set and creates +// a new one if it doesn't exist yet. +func (h *headBlock) get(hash uint64, lset labels.Labels) *memSeries { + series := h.hashes[hash] + + for _, s := range series { + if s.lset.Equals(lset) { + return s + } + } + return nil +} + +func (h *headBlock) create(hash uint64, lset labels.Labels) *memSeries { + s := &memSeries{ + lset: lset, + ref: uint32(len(h.series)), + } + + // Allocate empty space until we can insert at the given index. + h.series = append(h.series, s) + + h.hashes[hash] = append(h.hashes[hash], s) + + for _, l := range lset { + valset, ok := h.values[l.Name] + if !ok { + valset = stringset{} + h.values[l.Name] = valset + } + valset.set(l.Value) + + h.postings.add(s.ref, term{name: l.Name, value: l.Value}) + } + + h.postings.add(s.ref, term{}) + + return s +} + +func (h *headBlock) fullness() float64 { + h.metamtx.RLock() + defer h.metamtx.RUnlock() + + return float64(h.meta.Stats.NumSamples) / float64(h.meta.Stats.NumSeries+1) / 250 +} + +func (h *headBlock) updateMapping() { + h.mtx.RLock() + + if h.mapper.sortable != nil && h.mapper.Len() == len(h.series) { + h.mtx.RUnlock() + return + } + + series := make([]*memSeries, len(h.series)) + copy(series, h.series) + + h.mtx.RUnlock() + + s := slice.SortInterface(series, func(i, j int) bool { + return labels.Compare(series[i].lset, series[j].lset) < 0 + }) + + h.mapper.update(s) +} + +// remapPostings changes the order of the postings from their ID to the ordering +// of the series they reference. +// Returned postings have no longer monotonic IDs and MUST NOT be used for regular +// postings set operations, i.e. intersect and merge. +func (h *headBlock) remapPostings(p Postings) Postings { + list, err := expandPostings(p) + if err != nil { + return errPostings{err: err} + } + + h.mapper.mtx.Lock() + defer h.mapper.mtx.Unlock() + + h.updateMapping() + h.mapper.Sort(list) + + return newListPostings(list) +} + +type memSeries struct { + mtx sync.RWMutex + + ref uint32 + lset labels.Labels + chunks []*memChunk + + lastValue float64 + sampleBuf [4]sample + + app chunks.Appender // Current appender for the chunkdb. +} + +func (s *memSeries) cut() *memChunk { + c := &memChunk{ + chunk: chunks.NewXORChunk(), + maxTime: math.MinInt64, + } + s.chunks = append(s.chunks, c) + + app, err := c.chunk.Appender() + if err != nil { + panic(err) + } + + s.app = app + return c +} + +func (s *memSeries) append(t int64, v float64) bool { + var c *memChunk + + if s.app == nil || s.head().samples > 2000 { + c = s.cut() + c.minTime = t + } else { + c = s.head() + // Skip duplicate samples. + if c.maxTime == t && s.lastValue != v { + return false + } + } + s.app.Append(t, v) + + c.maxTime = t + c.samples++ + + s.lastValue = v + + s.sampleBuf[0] = s.sampleBuf[1] + s.sampleBuf[1] = s.sampleBuf[2] + s.sampleBuf[2] = s.sampleBuf[3] + s.sampleBuf[3] = sample{t: t, v: v} + + return true +} + +func (s *memSeries) iterator(i int) chunks.Iterator { + c := s.chunks[i] + + if i < len(s.chunks)-1 { + return c.chunk.Iterator() + } + + it := &memSafeIterator{ + Iterator: c.chunk.Iterator(), + i: -1, + total: c.samples, + buf: s.sampleBuf, + } + return it +} + +func (s *memSeries) head() *memChunk { + return s.chunks[len(s.chunks)-1] +} + +type memChunk struct { + chunk chunks.Chunk + minTime, maxTime int64 + samples int +} + +type memSafeIterator struct { + chunks.Iterator + + i int + total int + buf [4]sample +} + +func (it *memSafeIterator) Next() bool { + if it.i+1 >= it.total { + return false + } + it.i++ + if it.total-it.i > 4 { + return it.Iterator.Next() + } + return true +} + +func (it *memSafeIterator) At() (int64, float64) { + if it.total-it.i > 4 { + return it.Iterator.At() + } + s := it.buf[4-(it.total-it.i)] + return s.t, s.v +} + +// positionMapper stores a position mapping from unsorted to +// sorted indices of a sortable collection. +type positionMapper struct { + mtx sync.RWMutex + sortable sort.Interface + iv, fw []int +} + +func newPositionMapper(s sort.Interface) *positionMapper { + m := &positionMapper{} + if s != nil { + m.update(s) + } + return m +} + +func (m *positionMapper) Len() int { return m.sortable.Len() } +func (m *positionMapper) Less(i, j int) bool { return m.sortable.Less(i, j) } + +func (m *positionMapper) Swap(i, j int) { + m.sortable.Swap(i, j) + + m.iv[i], m.iv[j] = m.iv[j], m.iv[i] +} + +func (m *positionMapper) Sort(l []uint32) { + slice.Sort(l, func(i, j int) bool { + return m.fw[l[i]] < m.fw[l[j]] + }) +} + +func (m *positionMapper) update(s sort.Interface) { + m.sortable = s + + m.iv = make([]int, s.Len()) + m.fw = make([]int, s.Len()) + + for i := range m.iv { + m.iv[i] = i + } + sort.Sort(m) + + for i, k := range m.iv { + m.fw[k] = i + } +} diff --git a/vendor/github.com/fabxc/tsdb/labels/labels.go b/vendor/github.com/fabxc/tsdb/labels/labels.go new file mode 100644 index 0000000000..0b79bc2e93 --- /dev/null +++ b/vendor/github.com/fabxc/tsdb/labels/labels.go @@ -0,0 +1,143 @@ +package labels + +import ( + "bytes" + "sort" + "strconv" + "strings" + + "github.com/cespare/xxhash" +) + +const sep = '\xff' + +// Label is a key/value pair of strings. +type Label struct { + Name, Value string +} + +// Labels is a sorted set of labels. Order has to be guaranteed upon +// instantiation. +type Labels []Label + +func (ls Labels) Len() int { return len(ls) } +func (ls Labels) Swap(i, j int) { ls[i], ls[j] = ls[j], ls[i] } +func (ls Labels) Less(i, j int) bool { return ls[i].Name < ls[j].Name } + +func (ls Labels) String() string { + var b bytes.Buffer + + b.WriteByte('{') + for i, l := range ls { + if i > 0 { + b.WriteByte(',') + } + b.WriteString(l.Name) + b.WriteByte('=') + b.WriteString(strconv.Quote(l.Value)) + } + b.WriteByte('}') + + return b.String() +} + +// Hash returns a hash value for the label set. +func (ls Labels) Hash() uint64 { + b := make([]byte, 0, 1024) + + for _, v := range ls { + b = append(b, v.Name...) + b = append(b, sep) + b = append(b, v.Value...) + b = append(b, sep) + } + return xxhash.Sum64(b) +} + +// Get returns the value for the label with the given name. +// Returns an empty string if the label doesn't exist. +func (ls Labels) Get(name string) string { + for _, l := range ls { + if l.Name == name { + return l.Value + } + } + return "" +} + +// Equals returns whether the two label sets are equal. +func (ls Labels) Equals(o Labels) bool { + if len(ls) != len(o) { + return false + } + for i, l := range ls { + if l.Name != o[i].Name || l.Value != o[i].Value { + return false + } + } + return true +} + +// Map returns a string map of the labels. +func (ls Labels) Map() map[string]string { + m := make(map[string]string, len(ls)) + for _, l := range ls { + m[l.Name] = l.Value + } + return m +} + +// New returns a sorted Labels from the given labels. +// The caller has to guarantee that all label names are unique. +func New(ls ...Label) Labels { + set := make(Labels, 0, len(ls)) + for _, l := range ls { + set = append(set, l) + } + sort.Sort(set) + + return set +} + +// FromMap returns new sorted Labels from the given map. +func FromMap(m map[string]string) Labels { + l := make([]Label, 0, len(m)) + for k, v := range m { + l = append(l, Label{Name: k, Value: v}) + } + return New(l...) +} + +// FromStrings creates new labels from pairs of strings. +func FromStrings(ss ...string) Labels { + if len(ss)%2 != 0 { + panic("invalid number of strings") + } + var res Labels + for i := 0; i < len(ss); i += 2 { + res = append(res, Label{Name: ss[i], Value: ss[i+1]}) + } + + sort.Sort(res) + return res +} + +// Compare compares the two label sets. +// The result will be 0 if a==b, <0 if a < b, and >0 if a > b. +func Compare(a, b Labels) int { + l := len(a) + if len(b) < l { + l = len(b) + } + + for i := 0; i < l; i++ { + if d := strings.Compare(a[i].Name, b[i].Name); d != 0 { + return d + } + if d := strings.Compare(a[i].Value, b[i].Value); d != 0 { + return d + } + } + // If all labels so far were in common, the set with fewer labels comes first. + return len(a) - len(b) +} diff --git a/vendor/github.com/fabxc/tsdb/labels/selector.go b/vendor/github.com/fabxc/tsdb/labels/selector.go new file mode 100644 index 0000000000..0bcf5380c9 --- /dev/null +++ b/vendor/github.com/fabxc/tsdb/labels/selector.go @@ -0,0 +1,66 @@ +package labels + +import "regexp" + +// Selector holds constraints for matching against a label set. +type Selector []Matcher + +// Matches returns whether the labels satisfy all matchers. +func (s Selector) Matches(labels Labels) bool { + for _, m := range s { + if v := labels.Get(m.Name()); !m.Matches(v) { + return false + } + } + return true +} + +// Matcher specifies a constraint for the value of a label. +type Matcher interface { + // Name returns the label name the matcher should apply to. + Name() string + // Matches checks whether a value fulfills the constraints. + Matches(v string) bool +} + +type EqualMatcher struct { + name, value string +} + +func (m *EqualMatcher) Name() string { return m.name } +func (m *EqualMatcher) Matches(v string) bool { return v == m.value } + +// NewEqualMatcher returns a new matcher matching an exact label value. +func NewEqualMatcher(name, value string) Matcher { + return &EqualMatcher{name: name, value: value} +} + +type regexpMatcher struct { + name string + re *regexp.Regexp +} + +func (m *regexpMatcher) Name() string { return m.name } +func (m *regexpMatcher) Matches(v string) bool { return m.re.MatchString(v) } + +// NewRegexpMatcher returns a new matcher verifying that a value matches +// the regular expression pattern. +func NewRegexpMatcher(name, pattern string) (Matcher, error) { + re, err := regexp.Compile(pattern) + if err != nil { + return nil, err + } + return ®expMatcher{name: name, re: re}, nil +} + +// notMatcher inverts the matching result for a matcher. +type notMatcher struct { + Matcher +} + +func (m *notMatcher) Matches(v string) bool { return !m.Matcher.Matches(v) } + +// Not inverts the matcher's matching result. +func Not(m Matcher) Matcher { + return ¬Matcher{m} +} diff --git a/vendor/github.com/fabxc/tsdb/postings.go b/vendor/github.com/fabxc/tsdb/postings.go new file mode 100644 index 0000000000..b3afe831af --- /dev/null +++ b/vendor/github.com/fabxc/tsdb/postings.go @@ -0,0 +1,265 @@ +package tsdb + +import ( + "sort" + "strings" +) + +type memPostings struct { + m map[term][]uint32 +} + +type term struct { + name, value string +} + +// Postings returns an iterator over the postings list for s. +func (p *memPostings) get(t term) Postings { + l := p.m[t] + if l == nil { + return emptyPostings + } + return &listPostings{list: l, idx: -1} +} + +// add adds a document to the index. The caller has to ensure that no +// term argument appears twice. +func (p *memPostings) add(id uint32, terms ...term) { + for _, t := range terms { + p.m[t] = append(p.m[t], id) + } +} + +// Postings provides iterative access over a postings list. +type Postings interface { + // Next advances the iterator and returns true if another value was found. + Next() bool + + // Seek advances the iterator to value v or greater and returns + // true if a value was found. + Seek(v uint32) bool + + // At returns the value at the current iterator position. + At() uint32 + + // Err returns the last error of the iterator. + Err() error +} + +// errPostings is an empty iterator that always errors. +type errPostings struct { + err error +} + +func (e errPostings) Next() bool { return false } +func (e errPostings) Seek(uint32) bool { return false } +func (e errPostings) At() uint32 { return 0 } +func (e errPostings) Err() error { return e.err } + +func expandPostings(p Postings) (res []uint32, err error) { + for p.Next() { + res = append(res, p.At()) + } + return res, p.Err() +} + +// Intersect returns a new postings list over the intersection of the +// input postings. +func Intersect(its ...Postings) Postings { + if len(its) == 0 { + return errPostings{err: nil} + } + a := its[0] + + for _, b := range its[1:] { + a = newIntersectPostings(a, b) + } + return a +} + +var emptyPostings = errPostings{} + +type intersectPostings struct { + a, b Postings + aok, bok bool + cur uint32 +} + +func newIntersectPostings(a, b Postings) *intersectPostings { + it := &intersectPostings{a: a, b: b} + it.aok = it.a.Next() + it.bok = it.b.Next() + + return it +} + +func (it *intersectPostings) At() uint32 { + return it.cur +} + +func (it *intersectPostings) Next() bool { + for { + if !it.aok || !it.bok { + return false + } + av, bv := it.a.At(), it.b.At() + + if av < bv { + it.aok = it.a.Seek(bv) + } else if bv < av { + it.bok = it.b.Seek(av) + } else { + it.cur = av + it.aok = it.a.Next() + it.bok = it.b.Next() + return true + } + } +} + +func (it *intersectPostings) Seek(id uint32) bool { + it.aok = it.a.Seek(id) + it.bok = it.b.Seek(id) + return it.Next() +} + +func (it *intersectPostings) Err() error { + if it.a.Err() != nil { + return it.a.Err() + } + return it.b.Err() +} + +// Merge returns a new iterator over the union of the input iterators. +func Merge(its ...Postings) Postings { + if len(its) == 0 { + return nil + } + a := its[0] + + for _, b := range its[1:] { + a = newMergePostings(a, b) + } + return a +} + +type mergePostings struct { + a, b Postings + aok, bok bool + cur uint32 +} + +func newMergePostings(a, b Postings) *mergePostings { + it := &mergePostings{a: a, b: b} + it.aok = it.a.Next() + it.bok = it.b.Next() + + return it +} + +func (it *mergePostings) At() uint32 { + return it.cur +} + +func (it *mergePostings) Next() bool { + if !it.aok && !it.bok { + return false + } + + if !it.aok { + it.cur = it.b.At() + it.bok = it.b.Next() + return true + } + if !it.bok { + it.cur = it.a.At() + it.aok = it.a.Next() + return true + } + + acur, bcur := it.a.At(), it.b.At() + + if acur < bcur { + it.cur = acur + it.aok = it.a.Next() + return true + } + if bcur < acur { + it.cur = bcur + it.bok = it.b.Next() + return true + } + it.cur = acur + it.aok = it.a.Next() + it.bok = it.b.Next() + + return true +} + +func (it *mergePostings) Seek(id uint32) bool { + it.aok = it.a.Seek(id) + it.bok = it.b.Seek(id) + return it.Next() +} + +func (it *mergePostings) Err() error { + if it.a.Err() != nil { + return it.a.Err() + } + return it.b.Err() +} + +// listPostings implements the Postings interface over a plain list. +type listPostings struct { + list []uint32 + idx int +} + +func newListPostings(list []uint32) *listPostings { + return &listPostings{list: list, idx: -1} +} + +func (it *listPostings) At() uint32 { + return it.list[it.idx] +} + +func (it *listPostings) Next() bool { + it.idx++ + return it.idx < len(it.list) +} + +func (it *listPostings) Seek(x uint32) bool { + // Do binary search between current position and end. + it.idx += sort.Search(len(it.list)-it.idx, func(i int) bool { + return it.list[i+it.idx] >= x + }) + return it.idx < len(it.list) +} + +func (it *listPostings) Err() error { + return nil +} + +type stringset map[string]struct{} + +func (ss stringset) set(s string) { + ss[s] = struct{}{} +} + +func (ss stringset) has(s string) bool { + _, ok := ss[s] + return ok +} + +func (ss stringset) String() string { + return strings.Join(ss.slice(), ",") +} + +func (ss stringset) slice() []string { + slice := make([]string, 0, len(ss)) + for k := range ss { + slice = append(slice, k) + } + sort.Strings(slice) + return slice +} diff --git a/vendor/github.com/fabxc/tsdb/querier.go b/vendor/github.com/fabxc/tsdb/querier.go new file mode 100644 index 0000000000..df17ad270c --- /dev/null +++ b/vendor/github.com/fabxc/tsdb/querier.go @@ -0,0 +1,900 @@ +package tsdb + +import ( + "fmt" + "math" + "sort" + "strings" + + "github.com/fabxc/tsdb/chunks" + "github.com/fabxc/tsdb/labels" +) + +// Querier provides querying access over time series data of a fixed +// time range. +type Querier interface { + // Select returns a set of series that matches the given label matchers. + Select(...labels.Matcher) SeriesSet + + // LabelValues returns all potential values for a label name. + LabelValues(string) ([]string, error) + // LabelValuesFor returns all potential values for a label name. + // under the constraint of another label. + LabelValuesFor(string, labels.Label) ([]string, error) + + // Close releases the resources of the Querier. + Close() error +} + +// Series represents a single time series. +type Series interface { + // Labels returns the complete set of labels identifying the series. + Labels() labels.Labels + + // Iterator returns a new iterator of the data of the series. + Iterator() SeriesIterator +} + +// querier aggregates querying results from time blocks within +// a single partition. +type querier struct { + db *DB + blocks []Querier +} + +// Querier returns a new querier over the data partition for the given +// time range. +func (s *DB) Querier(mint, maxt int64) Querier { + s.mtx.RLock() + + blocks := s.blocksForInterval(mint, maxt) + + sq := &querier{ + blocks: make([]Querier, 0, len(blocks)), + db: s, + } + + for _, b := range blocks { + q := &blockQuerier{ + mint: mint, + maxt: maxt, + index: b.Index(), + series: b.Series(), + } + + // TODO(fabxc): find nicer solution. + if hb, ok := b.(*headBlock); ok { + q.postingsMapper = hb.remapPostings + } + + sq.blocks = append(sq.blocks, q) + } + + return sq +} + +func (q *querier) LabelValues(n string) ([]string, error) { + res, err := q.blocks[0].LabelValues(n) + if err != nil { + return nil, err + } + for _, bq := range q.blocks[1:] { + pr, err := bq.LabelValues(n) + if err != nil { + return nil, err + } + // Merge new values into deduplicated result. + res = mergeStrings(res, pr) + } + return res, nil +} + +func (q *querier) LabelValuesFor(string, labels.Label) ([]string, error) { + return nil, fmt.Errorf("not implemented") +} + +func (q *querier) Select(ms ...labels.Matcher) SeriesSet { + // Sets from different blocks have no time overlap. The reference numbers + // they emit point to series sorted in lexicographic order. + // We can fully connect partial series by simply comparing with the previous + // label set. + if len(q.blocks) == 0 { + return nopSeriesSet{} + } + r := q.blocks[0].Select(ms...) + + for _, s := range q.blocks[1:] { + r = newPartitionSeriesSet(r, s.Select(ms...)) + } + return r +} + +func (q *querier) Close() error { + var merr MultiError + + for _, bq := range q.blocks { + merr.Add(bq.Close()) + } + q.db.mtx.RUnlock() + + return merr.Err() +} + +// blockQuerier provides querying access to a single block database. +type blockQuerier struct { + index IndexReader + series SeriesReader + + postingsMapper func(Postings) Postings + + mint, maxt int64 +} + +func newBlockQuerier(ix IndexReader, s SeriesReader, mint, maxt int64) *blockQuerier { + return &blockQuerier{ + mint: mint, + maxt: maxt, + index: ix, + series: s, + } +} + +func (q *blockQuerier) Select(ms ...labels.Matcher) SeriesSet { + var ( + its []Postings + absent []string + ) + for _, m := range ms { + // If the matcher checks absence of a label, don't select them + // but propagate the check into the series set. + if _, ok := m.(*labels.EqualMatcher); ok && m.Matches("") { + absent = append(absent, m.Name()) + continue + } + its = append(its, q.selectSingle(m)) + } + + p := Intersect(its...) + + if q.postingsMapper != nil { + p = q.postingsMapper(p) + } + + return &blockSeriesSet{ + index: q.index, + chunks: q.series, + it: p, + absent: absent, + mint: q.mint, + maxt: q.maxt, + } +} + +func (q *blockQuerier) selectSingle(m labels.Matcher) Postings { + tpls, err := q.index.LabelValues(m.Name()) + if err != nil { + return errPostings{err: err} + } + // TODO(fabxc): use interface upgrading to provide fast solution + // for equality and prefix matches. Tuples are lexicographically sorted. + var res []string + + for i := 0; i < tpls.Len(); i++ { + vals, err := tpls.At(i) + if err != nil { + return errPostings{err: err} + } + if m.Matches(vals[0]) { + res = append(res, vals[0]) + } + } + + if len(res) == 0 { + return emptyPostings + } + + var rit []Postings + + for _, v := range res { + it, err := q.index.Postings(m.Name(), v) + if err != nil { + return errPostings{err: err} + } + rit = append(rit, it) + } + + return Merge(rit...) +} + +func (q *blockQuerier) LabelValues(name string) ([]string, error) { + tpls, err := q.index.LabelValues(name) + if err != nil { + return nil, err + } + res := make([]string, 0, tpls.Len()) + + for i := 0; i < tpls.Len(); i++ { + vals, err := tpls.At(i) + if err != nil { + return nil, err + } + res = append(res, vals[0]) + } + return res, nil +} + +func (q *blockQuerier) LabelValuesFor(string, labels.Label) ([]string, error) { + return nil, fmt.Errorf("not implemented") +} + +func (q *blockQuerier) Close() error { + return nil +} + +// partitionedQuerier merges query results from a set of partition querieres. +type partitionedQuerier struct { + mint, maxt int64 + partitions []Querier +} + +// Querier returns a new querier over the database for the given +// time range. +func (db *PartitionedDB) Querier(mint, maxt int64) Querier { + q := &partitionedQuerier{ + mint: mint, + maxt: maxt, + } + for _, s := range db.Partitions { + q.partitions = append(q.partitions, s.Querier(mint, maxt)) + } + + return q +} + +func (q *partitionedQuerier) Select(ms ...labels.Matcher) SeriesSet { + // We gather the non-overlapping series from every partition and simply + // return their union. + r := &mergedSeriesSet{} + + for _, s := range q.partitions { + r.sets = append(r.sets, s.Select(ms...)) + } + if len(r.sets) == 0 { + return nopSeriesSet{} + } + return r +} + +func (q *partitionedQuerier) LabelValues(n string) ([]string, error) { + res, err := q.partitions[0].LabelValues(n) + if err != nil { + return nil, err + } + for _, sq := range q.partitions[1:] { + pr, err := sq.LabelValues(n) + if err != nil { + return nil, err + } + // Merge new values into deduplicated result. + res = mergeStrings(res, pr) + } + return res, nil +} + +func (q *partitionedQuerier) LabelValuesFor(string, labels.Label) ([]string, error) { + return nil, fmt.Errorf("not implemented") +} + +func (q *partitionedQuerier) Close() error { + var merr MultiError + + for _, sq := range q.partitions { + merr.Add(sq.Close()) + } + return merr.Err() +} + +func mergeStrings(a, b []string) []string { + maxl := len(a) + if len(b) > len(a) { + maxl = len(b) + } + res := make([]string, 0, maxl*10/9) + + for len(a) > 0 && len(b) > 0 { + d := strings.Compare(a[0], b[0]) + + if d == 0 { + res = append(res, a[0]) + a, b = a[1:], b[1:] + } else if d < 0 { + res = append(res, a[0]) + a = a[1:] + } else if d > 0 { + res = append(res, b[0]) + b = b[1:] + } + } + + // Append all remaining elements. + res = append(res, a...) + res = append(res, b...) + return res +} + +// SeriesSet contains a set of series. +type SeriesSet interface { + Next() bool + At() Series + Err() error +} + +type nopSeriesSet struct{} + +func (nopSeriesSet) Next() bool { return false } +func (nopSeriesSet) At() Series { return nil } +func (nopSeriesSet) Err() error { return nil } + +type mergedSeriesSet struct { + sets []SeriesSet + + cur int + err error +} + +func (s *mergedSeriesSet) At() Series { return s.sets[s.cur].At() } +func (s *mergedSeriesSet) Err() error { return s.sets[s.cur].Err() } + +func (s *mergedSeriesSet) Next() bool { + // TODO(fabxc): We just emit the sets one after one. They are each + // lexicographically sorted. Should we emit their union sorted too? + if s.sets[s.cur].Next() { + return true + } + + if s.cur == len(s.sets)-1 { + return false + } + s.cur++ + + return s.Next() +} + +type partitionSeriesSet struct { + a, b SeriesSet + + cur Series + adone, bdone bool +} + +func newPartitionSeriesSet(a, b SeriesSet) *partitionSeriesSet { + s := &partitionSeriesSet{a: a, b: b} + // Initialize first elements of both sets as Next() needs + // one element look-ahead. + s.adone = !s.a.Next() + s.bdone = !s.b.Next() + + return s +} + +func (s *partitionSeriesSet) At() Series { + return s.cur +} + +func (s *partitionSeriesSet) Err() error { + if s.a.Err() != nil { + return s.a.Err() + } + return s.b.Err() +} + +func (s *partitionSeriesSet) compare() int { + if s.adone { + return 1 + } + if s.bdone { + return -1 + } + return labels.Compare(s.a.At().Labels(), s.b.At().Labels()) +} + +func (s *partitionSeriesSet) Next() bool { + if s.adone && s.bdone || s.Err() != nil { + return false + } + + d := s.compare() + + // Both sets contain the current series. Chain them into a single one. + if d > 0 { + s.cur = s.b.At() + s.bdone = !s.b.Next() + + } else if d < 0 { + s.cur = s.a.At() + s.adone = !s.a.Next() + + } else { + s.cur = &chainedSeries{series: []Series{s.a.At(), s.b.At()}} + s.adone = !s.a.Next() + s.bdone = !s.b.Next() + } + return true +} + +// blockSeriesSet is a set of series from an inverted index query. +type blockSeriesSet struct { + index IndexReader + chunks SeriesReader + it Postings // postings list referencing series + absent []string // labels that must not be set for result series + mint, maxt int64 // considered time range + + err error + cur Series +} + +func (s *blockSeriesSet) Next() bool { + // Step through the postings iterator to find potential series. +outer: + for s.it.Next() { + lset, chunks, err := s.index.Series(s.it.At()) + if err != nil { + s.err = err + return false + } + + // If a series contains a label that must be absent, it is skipped as well. + for _, abs := range s.absent { + if lset.Get(abs) != "" { + continue outer + } + } + + ser := &chunkSeries{ + labels: lset, + chunks: make([]ChunkMeta, 0, len(chunks)), + chunk: s.chunks.Chunk, + } + // Only use chunks that fit the time range. + for _, c := range chunks { + if c.MaxTime < s.mint { + continue + } + if c.MinTime > s.maxt { + break + } + ser.chunks = append(ser.chunks, c) + } + // If no chunks of the series apply to the time range, skip it. + if len(ser.chunks) == 0 { + continue + } + + s.cur = ser + return true + } + if s.it.Err() != nil { + s.err = s.it.Err() + } + return false +} + +func (s *blockSeriesSet) At() Series { return s.cur } +func (s *blockSeriesSet) Err() error { return s.err } + +// chunkSeries is a series that is backed by a sequence of chunks holding +// time series data. +type chunkSeries struct { + labels labels.Labels + chunks []ChunkMeta // in-order chunk refs + + // chunk is a function that retrieves chunks based on a reference + // number contained in the chunk meta information. + chunk func(ref uint32) (chunks.Chunk, error) +} + +func (s *chunkSeries) Labels() labels.Labels { + return s.labels +} + +func (s *chunkSeries) Iterator() SeriesIterator { + var cs []chunks.Chunk + var mints []int64 + + for _, co := range s.chunks { + c, err := s.chunk(co.Ref) + if err != nil { + panic(err) // TODO(fabxc): add error series iterator. + } + cs = append(cs, c) + mints = append(mints, co.MinTime) + } + + // TODO(fabxc): consider pushing chunk retrieval further down. In practice, we + // probably have to touch all chunks anyway and it doesn't matter. + return newChunkSeriesIterator(mints, cs) +} + +// SeriesIterator iterates over the data of a time series. +type SeriesIterator interface { + // Seek advances the iterator forward to the given timestamp. + // If there's no value exactly at ts, it advances to the last value + // before tt. + Seek(t int64) bool + // Values returns the current timestamp/value pair. + At() (t int64, v float64) + // Next advances the iterator by one. + Next() bool + // Err returns the current error. + Err() error +} + +// chainedSeries implements a series for a list of time-sorted series. +// They all must have the same labels. +type chainedSeries struct { + series []Series +} + +func (s *chainedSeries) Labels() labels.Labels { + return s.series[0].Labels() +} + +func (s *chainedSeries) Iterator() SeriesIterator { + return &chainedSeriesIterator{series: s.series} +} + +// chainedSeriesIterator implements a series iterater over a list +// of time-sorted, non-overlapping iterators. +type chainedSeriesIterator struct { + series []Series // series in time order + + i int + cur SeriesIterator +} + +func (it *chainedSeriesIterator) Seek(t int64) bool { + // We just scan the chained series sequentially as they are already + // pre-selected by relevant time and should be accessed sequentially anyway. + for i, s := range it.series[it.i:] { + cur := s.Iterator() + if !cur.Seek(t) { + continue + } + it.cur = cur + it.i += i + return true + } + return false +} + +func (it *chainedSeriesIterator) Next() bool { + if it.cur == nil { + it.cur = it.series[it.i].Iterator() + } + if it.cur.Next() { + return true + } + if err := it.cur.Err(); err != nil { + return false + } + if it.i == len(it.series)-1 { + return false + } + + it.i++ + it.cur = it.series[it.i].Iterator() + + return it.Next() +} + +func (it *chainedSeriesIterator) At() (t int64, v float64) { + return it.cur.At() +} + +func (it *chainedSeriesIterator) Err() error { + return it.cur.Err() +} + +// chunkSeriesIterator implements a series iterator on top +// of a list of time-sorted, non-overlapping chunks. +type chunkSeriesIterator struct { + mints []int64 // minimum timestamps for each iterator + chunks []chunks.Chunk + + i int + cur chunks.Iterator +} + +func newChunkSeriesIterator(mints []int64, cs []chunks.Chunk) *chunkSeriesIterator { + if len(mints) != len(cs) { + panic("chunk references and chunks length don't match") + } + return &chunkSeriesIterator{ + mints: mints, + chunks: cs, + i: 0, + cur: cs[0].Iterator(), + } +} + +func (it *chunkSeriesIterator) Seek(t int64) (ok bool) { + // Only do binary search forward to stay in line with other iterators + // that can only move forward. + x := sort.Search(len(it.mints[it.i:]), func(i int) bool { return it.mints[i] >= t }) + x += it.i + + // If the timestamp was not found, it might be in the last chunk. + if x == len(it.mints) { + x-- + } + // Go to previous chunk if the chunk doesn't exactly start with t. + // If we are already at the first chunk, we use it as it's the best we have. + if x > 0 && it.mints[x] > t { + x-- + } + + it.i = x + it.cur = it.chunks[x].Iterator() + + for it.cur.Next() { + t0, _ := it.cur.At() + if t0 >= t { + return true + } + } + return false +} + +func (it *chunkSeriesIterator) At() (t int64, v float64) { + return it.cur.At() +} + +func (it *chunkSeriesIterator) Next() bool { + if it.cur.Next() { + return true + } + if err := it.cur.Err(); err != nil { + return false + } + if it.i == len(it.chunks)-1 { + return false + } + + it.i++ + it.cur = it.chunks[it.i].Iterator() + + return it.Next() +} + +func (it *chunkSeriesIterator) Err() error { + return it.cur.Err() +} + +// BufferedSeriesIterator wraps an iterator with a look-back buffer. +type BufferedSeriesIterator struct { + it SeriesIterator + buf *sampleRing + + lastTime int64 +} + +// NewBuffer returns a new iterator that buffers the values within the time range +// of the current element and the duration of delta before. +func NewBuffer(it SeriesIterator, delta int64) *BufferedSeriesIterator { + return &BufferedSeriesIterator{ + it: it, + buf: newSampleRing(delta, 16), + lastTime: math.MinInt64, + } +} + +// PeekBack returns the previous element of the iterator. If there is none buffered, +// ok is false. +func (b *BufferedSeriesIterator) PeekBack() (t int64, v float64, ok bool) { + return b.buf.last() +} + +// Buffer returns an iterator over the buffered data. +func (b *BufferedSeriesIterator) Buffer() SeriesIterator { + return b.buf.iterator() +} + +// Seek advances the iterator to the element at time t or greater. +func (b *BufferedSeriesIterator) Seek(t int64) bool { + t0 := t - b.buf.delta + + // If the delta would cause us to seek backwards, preserve the buffer + // and just continue regular advancment while filling the buffer on the way. + if t0 > b.lastTime { + b.buf.reset() + + ok := b.it.Seek(t0) + if !ok { + return false + } + b.lastTime, _ = b.At() + } + + if b.lastTime >= t { + return true + } + for b.Next() { + if b.lastTime >= t { + return true + } + } + + return false +} + +// Next advances the iterator to the next element. +func (b *BufferedSeriesIterator) Next() bool { + // Add current element to buffer before advancing. + b.buf.add(b.it.At()) + + ok := b.it.Next() + if ok { + b.lastTime, _ = b.At() + } + return ok +} + +// Values returns the current element of the iterator. +func (b *BufferedSeriesIterator) At() (int64, float64) { + return b.it.At() +} + +// Err returns the last encountered error. +func (b *BufferedSeriesIterator) Err() error { + return b.it.Err() +} + +type sample struct { + t int64 + v float64 +} + +type sampleRing struct { + delta int64 + + buf []sample // lookback buffer + i int // position of most recent element in ring buffer + f int // position of first element in ring buffer + l int // number of elements in buffer +} + +func newSampleRing(delta int64, sz int) *sampleRing { + r := &sampleRing{delta: delta, buf: make([]sample, sz)} + r.reset() + + return r +} + +func (r *sampleRing) reset() { + r.l = 0 + r.i = -1 + r.f = 0 +} + +func (r *sampleRing) iterator() SeriesIterator { + return &sampleRingIterator{r: r, i: -1} +} + +type sampleRingIterator struct { + r *sampleRing + i int +} + +func (it *sampleRingIterator) Next() bool { + it.i++ + return it.i < it.r.l +} + +func (it *sampleRingIterator) Seek(int64) bool { + return false +} + +func (it *sampleRingIterator) Err() error { + return nil +} + +func (it *sampleRingIterator) At() (int64, float64) { + return it.r.at(it.i) +} + +func (r *sampleRing) at(i int) (int64, float64) { + j := (r.f + i) % len(r.buf) + s := r.buf[j] + return s.t, s.v +} + +// add adds a sample to the ring buffer and frees all samples that fall +// out of the delta range. +func (r *sampleRing) add(t int64, v float64) { + l := len(r.buf) + // Grow the ring buffer if it fits no more elements. + if l == r.l { + buf := make([]sample, 2*l) + copy(buf[l+r.f:], r.buf[r.f:]) + copy(buf, r.buf[:r.f]) + + r.buf = buf + r.i = r.f + r.f += l + } else { + r.i++ + if r.i >= l { + r.i -= l + } + } + + r.buf[r.i] = sample{t: t, v: v} + r.l++ + + // Free head of the buffer of samples that just fell out of the range. + for r.buf[r.f].t < t-r.delta { + r.f++ + if r.f >= l { + r.f -= l + } + r.l-- + } +} + +// last returns the most recent element added to the ring. +func (r *sampleRing) last() (int64, float64, bool) { + if r.l == 0 { + return 0, 0, false + } + s := r.buf[r.i] + return s.t, s.v, true +} + +func (r *sampleRing) samples() []sample { + res := make([]sample, r.l) + + var k = r.f + r.l + var j int + if k > len(r.buf) { + k = len(r.buf) + j = r.l - k + r.f + } + + n := copy(res, r.buf[r.f:k]) + copy(res[n:], r.buf[:j]) + + return res +} + +type mockSeriesSet struct { + next func() bool + series func() Series + err func() error +} + +func (m *mockSeriesSet) Next() bool { return m.next() } +func (m *mockSeriesSet) At() Series { return m.series() } +func (m *mockSeriesSet) Err() error { return m.err() } + +func newListSeriesSet(list []Series) *mockSeriesSet { + i := -1 + return &mockSeriesSet{ + next: func() bool { + i++ + return i < len(list) + }, + series: func() Series { + return list[i] + }, + err: func() error { return nil }, + } +} + +type errSeriesSet struct { + err error +} + +func (s errSeriesSet) Next() bool { return false } +func (s errSeriesSet) At() Series { return nil } +func (s errSeriesSet) Err() error { return s.err } diff --git a/vendor/github.com/fabxc/tsdb/reader.go b/vendor/github.com/fabxc/tsdb/reader.go new file mode 100644 index 0000000000..a49fb794bb --- /dev/null +++ b/vendor/github.com/fabxc/tsdb/reader.go @@ -0,0 +1,411 @@ +package tsdb + +import ( + "encoding/binary" + "fmt" + "strings" + + "github.com/fabxc/tsdb/chunks" + "github.com/fabxc/tsdb/labels" + "github.com/pkg/errors" +) + +// SeriesReader provides reading access of serialized time series data. +type SeriesReader interface { + // Chunk returns the series data chunk with the given reference. + Chunk(ref uint32) (chunks.Chunk, error) +} + +// seriesReader implements a SeriesReader for a serialized byte stream +// of series data. +type seriesReader struct { + // The underlying byte slice holding the encoded series data. + b []byte +} + +func newSeriesReader(b []byte) (*seriesReader, error) { + if len(b) < 4 { + return nil, errors.Wrap(errInvalidSize, "index header") + } + // Verify magic number. + if m := binary.BigEndian.Uint32(b[:4]); m != MagicSeries { + return nil, fmt.Errorf("invalid magic number %x", m) + } + return &seriesReader{b: b}, nil +} + +func (s *seriesReader) Chunk(offset uint32) (chunks.Chunk, error) { + if int(offset) > len(s.b) { + return nil, errors.Errorf("offset %d beyond data size %d", offset, len(s.b)) + } + b := s.b[offset:] + + l, n := binary.Uvarint(b) + if n < 0 { + return nil, fmt.Errorf("reading chunk length failed") + } + b = b[n:] + enc := chunks.Encoding(b[0]) + + c, err := chunks.FromData(enc, b[1:1+l]) + if err != nil { + return nil, err + } + return c, nil +} + +// IndexReader provides reading access of serialized index data. +type IndexReader interface { + // LabelValues returns the possible label values + LabelValues(names ...string) (StringTuples, error) + + // Postings returns the postings list iterator for the label pair. + Postings(name, value string) (Postings, error) + + // Series returns the series for the given reference. + Series(ref uint32) (labels.Labels, []ChunkMeta, error) + + // LabelIndices returns the label pairs for which indices exist. + LabelIndices() ([][]string, error) +} + +// StringTuples provides access to a sorted list of string tuples. +type StringTuples interface { + // Total number of tuples in the list. + Len() int + // At returns the tuple at position i. + At(i int) ([]string, error) +} + +type indexReader struct { + series SeriesReader + + // The underlying byte slice holding the encoded series data. + b []byte + + // Cached hashmaps of section offsets. + labels map[string]uint32 + postings map[string]uint32 +} + +var ( + errInvalidSize = fmt.Errorf("invalid size") + errInvalidFlag = fmt.Errorf("invalid flag") +) + +func newIndexReader(s SeriesReader, b []byte) (*indexReader, error) { + if len(b) < 4 { + return nil, errors.Wrap(errInvalidSize, "index header") + } + r := &indexReader{ + series: s, + b: b, + } + + // Verify magic number. + if m := binary.BigEndian.Uint32(b[:4]); m != MagicIndex { + return nil, fmt.Errorf("invalid magic number %x", m) + } + + var err error + // The last two 4 bytes hold the pointers to the hashmaps. + loff := binary.BigEndian.Uint32(b[len(b)-8 : len(b)-4]) + poff := binary.BigEndian.Uint32(b[len(b)-4:]) + + f, b, err := r.section(loff) + if err != nil { + return nil, errors.Wrapf(err, "label index hashmap section at %d", loff) + } + if r.labels, err = readHashmap(f, b); err != nil { + return nil, errors.Wrap(err, "read label index hashmap") + } + f, b, err = r.section(poff) + if err != nil { + return nil, errors.Wrapf(err, "postings hashmap section at %d", loff) + } + if r.postings, err = readHashmap(f, b); err != nil { + return nil, errors.Wrap(err, "read postings hashmap") + } + + return r, nil +} + +func readHashmap(flag byte, b []byte) (map[string]uint32, error) { + if flag != flagStd { + return nil, errInvalidFlag + } + h := make(map[string]uint32, 512) + + for len(b) > 0 { + l, n := binary.Uvarint(b) + if n < 1 { + return nil, errors.Wrap(errInvalidSize, "read key length") + } + b = b[n:] + + if len(b) < int(l) { + return nil, errors.Wrap(errInvalidSize, "read key") + } + s := string(b[:l]) + b = b[l:] + + o, n := binary.Uvarint(b) + if n < 1 { + return nil, errors.Wrap(errInvalidSize, "read offset value") + } + b = b[n:] + + h[s] = uint32(o) + } + + return h, nil +} + +func (r *indexReader) section(o uint32) (byte, []byte, error) { + b := r.b[o:] + + if len(b) < 5 { + return 0, nil, errors.Wrap(errInvalidSize, "read header") + } + + flag := b[0] + l := binary.BigEndian.Uint32(b[1:5]) + + b = b[5:] + + // b must have the given length plus 4 bytes for the CRC32 checksum. + if len(b) < int(l)+4 { + return 0, nil, errors.Wrap(errInvalidSize, "section content") + } + return flag, b[:l], nil +} + +func (r *indexReader) lookupSymbol(o uint32) (string, error) { + if int(o) > len(r.b) { + return "", errors.Errorf("invalid symbol offset %d", o) + } + l, n := binary.Uvarint(r.b[o:]) + if n < 0 { + return "", errors.New("reading symbol length failed") + } + + end := int(o) + n + int(l) + if end > len(r.b) { + return "", errors.New("invalid length") + } + b := r.b[int(o)+n : end] + + return yoloString(b), nil +} + +func (r *indexReader) LabelValues(names ...string) (StringTuples, error) { + key := strings.Join(names, string(sep)) + off, ok := r.labels[key] + if !ok { + return nil, fmt.Errorf("label index doesn't exist") + } + + flag, b, err := r.section(off) + if err != nil { + return nil, errors.Wrapf(err, "section at %d", off) + } + if flag != flagStd { + return nil, errInvalidFlag + } + l, n := binary.Uvarint(b) + if n < 1 { + return nil, errors.Wrap(errInvalidSize, "read label index size") + } + + st := &serializedStringTuples{ + l: int(l), + b: b[n:], + lookup: r.lookupSymbol, + } + return st, nil +} + +func (r *indexReader) LabelIndices() ([][]string, error) { + res := [][]string{} + + for s := range r.labels { + res = append(res, strings.Split(s, string(sep))) + } + return res, nil +} + +func (r *indexReader) Series(ref uint32) (labels.Labels, []ChunkMeta, error) { + k, n := binary.Uvarint(r.b[ref:]) + if n < 1 { + return nil, nil, errors.Wrap(errInvalidSize, "number of labels") + } + + b := r.b[int(ref)+n:] + lbls := make(labels.Labels, 0, k) + + for i := 0; i < 2*int(k); i += 2 { + o, m := binary.Uvarint(b) + if m < 1 { + return nil, nil, errors.Wrap(errInvalidSize, "symbol offset") + } + n, err := r.lookupSymbol(uint32(o)) + if err != nil { + return nil, nil, errors.Wrap(err, "symbol lookup") + } + b = b[m:] + + o, m = binary.Uvarint(b) + if m < 1 { + return nil, nil, errors.Wrap(errInvalidSize, "symbol offset") + } + v, err := r.lookupSymbol(uint32(o)) + if err != nil { + return nil, nil, errors.Wrap(err, "symbol lookup") + } + b = b[m:] + + lbls = append(lbls, labels.Label{ + Name: n, + Value: v, + }) + } + + // Read the chunks meta data. + k, n = binary.Uvarint(b) + if n < 1 { + return nil, nil, errors.Wrap(errInvalidSize, "number of chunks") + } + + b = b[n:] + chunks := make([]ChunkMeta, 0, k) + + for i := 0; i < int(k); i++ { + firstTime, n := binary.Varint(b) + if n < 1 { + return nil, nil, errors.Wrap(errInvalidSize, "first time") + } + b = b[n:] + + lastTime, n := binary.Varint(b) + if n < 1 { + return nil, nil, errors.Wrap(errInvalidSize, "last time") + } + b = b[n:] + + o, n := binary.Uvarint(b) + if n < 1 { + return nil, nil, errors.Wrap(errInvalidSize, "chunk offset") + } + b = b[n:] + + chunks = append(chunks, ChunkMeta{ + Ref: uint32(o), + MinTime: firstTime, + MaxTime: lastTime, + }) + } + + return lbls, chunks, nil +} + +func (r *indexReader) Postings(name, value string) (Postings, error) { + key := name + string(sep) + value + + off, ok := r.postings[key] + if !ok { + return nil, ErrNotFound + } + + flag, b, err := r.section(off) + if err != nil { + return nil, errors.Wrapf(err, "section at %d", off) + } + + if flag != flagStd { + return nil, errors.Wrapf(errInvalidFlag, "section at %d", off) + } + + // TODO(fabxc): just read into memory as an intermediate solution. + // Add iterator over serialized data. + var l []uint32 + + for len(b) > 0 { + if len(b) < 4 { + return nil, errors.Wrap(errInvalidSize, "plain postings entry") + } + l = append(l, binary.BigEndian.Uint32(b[:4])) + + b = b[4:] + } + + return &listPostings{list: l, idx: -1}, nil +} + +type stringTuples struct { + l int // tuple length + s []string // flattened tuple entries +} + +func newStringTuples(s []string, l int) (*stringTuples, error) { + if len(s)%l != 0 { + return nil, errors.Wrap(errInvalidSize, "string tuple list") + } + return &stringTuples{s: s, l: l}, nil +} + +func (t *stringTuples) Len() int { return len(t.s) / t.l } +func (t *stringTuples) At(i int) ([]string, error) { return t.s[i : i+t.l], nil } + +func (t *stringTuples) Swap(i, j int) { + c := make([]string, t.l) + copy(c, t.s[i:i+t.l]) + + for k := 0; k < t.l; k++ { + t.s[i+k] = t.s[j+k] + t.s[j+k] = c[k] + } +} + +func (t *stringTuples) Less(i, j int) bool { + for k := 0; k < t.l; k++ { + d := strings.Compare(t.s[i+k], t.s[j+k]) + + if d < 0 { + return true + } + if d > 0 { + return false + } + } + return false +} + +type serializedStringTuples struct { + l int + b []byte + lookup func(uint32) (string, error) +} + +func (t *serializedStringTuples) Len() int { + // TODO(fabxc): Cache this? + return len(t.b) / (4 * t.l) +} + +func (t *serializedStringTuples) At(i int) ([]string, error) { + if len(t.b) < (i+t.l)*4 { + return nil, errInvalidSize + } + res := make([]string, 0, t.l) + + for k := 0; k < t.l; k++ { + offset := binary.BigEndian.Uint32(t.b[(i+k)*4:]) + + s, err := t.lookup(offset) + if err != nil { + return nil, errors.Wrap(err, "symbol lookup") + } + res = append(res, s) + } + + return res, nil +} diff --git a/vendor/github.com/fabxc/tsdb/wal.go b/vendor/github.com/fabxc/tsdb/wal.go new file mode 100644 index 0000000000..5731bba142 --- /dev/null +++ b/vendor/github.com/fabxc/tsdb/wal.go @@ -0,0 +1,428 @@ +package tsdb + +import ( + "bufio" + "encoding/binary" + "hash/crc32" + "io" + "math" + "os" + "path/filepath" + "sync" + "time" + + "github.com/coreos/etcd/pkg/fileutil" + "github.com/fabxc/tsdb/labels" + "github.com/go-kit/kit/log" + "github.com/pkg/errors" +) + +// WALEntryType indicates what data a WAL entry contains. +type WALEntryType byte + +// The valid WAL entry types. +const ( + WALEntrySymbols = 1 + WALEntrySeries = 2 + WALEntrySamples = 3 +) + +// WAL is a write ahead log for series data. It can only be written to. +// Use WALReader to read back from a write ahead log. +type WAL struct { + mtx sync.Mutex + + f *fileutil.LockedFile + enc *walEncoder + logger log.Logger + flushInterval time.Duration + + stopc chan struct{} + donec chan struct{} + + symbols map[string]uint32 +} + +const walFileName = "wal-000" + +// OpenWAL opens or creates a write ahead log in the given directory. +// The WAL must be read completely before new data is written. +func OpenWAL(dir string, l log.Logger, flushInterval time.Duration) (*WAL, error) { + if err := os.MkdirAll(dir, 0777); err != nil { + return nil, err + } + + p := filepath.Join(dir, walFileName) + + f, err := fileutil.TryLockFile(p, os.O_RDWR, 0666) + if err != nil { + if !os.IsNotExist(err) { + return nil, err + } + + f, err = fileutil.LockFile(p, os.O_RDWR|os.O_CREATE, 0666) + if err != nil { + return nil, err + } + if _, err = f.Seek(0, os.SEEK_END); err != nil { + return nil, err + } + } + enc, err := newWALEncoder(f.File) + if err != nil { + return nil, err + } + + w := &WAL{ + f: f, + logger: l, + enc: enc, + flushInterval: flushInterval, + symbols: map[string]uint32{}, + donec: make(chan struct{}), + stopc: make(chan struct{}), + } + go w.run(flushInterval) + + return w, nil +} + +type walHandler struct { + sample func(refdSample) error + series func(labels.Labels) error +} + +// ReadAll consumes all entries in the WAL and triggers the registered handlers. +func (w *WAL) ReadAll(h *walHandler) error { + dec := &walDecoder{ + r: w.f, + handler: h, + } + + for { + if err := dec.entry(); err != nil { + if err == io.EOF { + return nil + } + return err + } + } +} + +// Log writes a batch of new series labels and samples to the log. +func (w *WAL) Log(series []labels.Labels, samples []refdSample) error { + if err := w.enc.encodeSeries(series); err != nil { + return err + } + if err := w.enc.encodeSamples(samples); err != nil { + return err + } + if w.flushInterval <= 0 { + return w.sync() + } + return nil +} + +func (w *WAL) sync() error { + if err := w.enc.flush(); err != nil { + return err + } + return fileutil.Fdatasync(w.f.File) +} + +func (w *WAL) run(interval time.Duration) { + var tick <-chan time.Time + + if interval > 0 { + ticker := time.NewTicker(interval) + defer ticker.Stop() + tick = ticker.C + } + defer close(w.donec) + + for { + select { + case <-w.stopc: + return + case <-tick: + if err := w.sync(); err != nil { + w.logger.Log("msg", "sync failed", "err", err) + } + } + } +} + +// Close sync all data and closes the underlying resources. +func (w *WAL) Close() error { + close(w.stopc) + <-w.donec + + if err := w.sync(); err != nil { + return err + } + return w.f.Close() +} + +type walEncoder struct { + mtx sync.Mutex + // w *ioutil.PageWriter + w *bufio.Writer +} + +const ( + minSectorSize = 512 + + // walPageBytes is the alignment for flushing records to the backing Writer. + // It should be a multiple of the minimum sector size so that WAL can safely + // distinguish between torn writes and ordinary data corruption. + walPageBytes = 16 * minSectorSize +) + +func newWALEncoder(f *os.File) (*walEncoder, error) { + // offset, err := f.Seek(0, os.SEEK_CUR) + // if err != nil { + // return nil, err + // } + enc := &walEncoder{ + // w: ioutil.NewPageWriter(f, walPageBytes, int(offset)), + w: bufio.NewWriterSize(f, 4*1024*1024), + } + return enc, nil +} + +func (e *walEncoder) flush() error { + e.mtx.Lock() + defer e.mtx.Unlock() + + return e.w.Flush() +} + +func (e *walEncoder) entry(et WALEntryType, flag byte, buf []byte) error { + e.mtx.Lock() + defer e.mtx.Unlock() + + h := crc32.NewIEEE() + w := io.MultiWriter(h, e.w) + + b := make([]byte, 6) + b[0] = byte(et) + b[1] = flag + + binary.BigEndian.PutUint32(b[2:], uint32(len(buf))) + + if _, err := w.Write(b); err != nil { + return err + } + if _, err := w.Write(buf); err != nil { + return err + } + if _, err := e.w.Write(h.Sum(nil)); err != nil { + return err + } + + putWALBuffer(buf) + return nil +} + +const ( + walSeriesSimple = 1 + walSamplesSimple = 1 +) + +var walBuffers = sync.Pool{} + +func getWALBuffer() []byte { + b := walBuffers.Get() + if b == nil { + return make([]byte, 0, 64*1024) + } + return b.([]byte) +} + +func putWALBuffer(b []byte) { + b = b[:0] + walBuffers.Put(b) +} + +func (e *walEncoder) encodeSeries(series []labels.Labels) error { + if len(series) == 0 { + return nil + } + + b := make([]byte, binary.MaxVarintLen32) + buf := getWALBuffer() + + for _, lset := range series { + n := binary.PutUvarint(b, uint64(len(lset))) + buf = append(buf, b[:n]...) + + for _, l := range lset { + n = binary.PutUvarint(b, uint64(len(l.Name))) + buf = append(buf, b[:n]...) + buf = append(buf, l.Name...) + + n = binary.PutUvarint(b, uint64(len(l.Value))) + buf = append(buf, b[:n]...) + buf = append(buf, l.Value...) + } + } + + return e.entry(WALEntrySeries, walSeriesSimple, buf) +} + +func (e *walEncoder) encodeSamples(samples []refdSample) error { + if len(samples) == 0 { + return nil + } + + b := make([]byte, binary.MaxVarintLen64) + buf := getWALBuffer() + + // Store base timestamp and base reference number of first sample. + // All samples encode their timestamp and ref as delta to those. + // + // TODO(fabxc): optimize for all samples having the same timestamp. + first := samples[0] + + binary.BigEndian.PutUint64(b, first.ref) + buf = append(buf, b[:8]...) + binary.BigEndian.PutUint64(b, uint64(first.t)) + buf = append(buf, b[:8]...) + + for _, s := range samples { + n := binary.PutVarint(b, int64(s.ref)-int64(first.ref)) + buf = append(buf, b[:n]...) + + n = binary.PutVarint(b, s.t-first.t) + buf = append(buf, b[:n]...) + + binary.BigEndian.PutUint64(b, math.Float64bits(s.v)) + buf = append(buf, b[:8]...) + } + + return e.entry(WALEntrySamples, walSamplesSimple, buf) +} + +type walDecoder struct { + r io.Reader + handler *walHandler + + buf []byte +} + +func newWALDecoer(r io.Reader, h *walHandler) *walDecoder { + return &walDecoder{ + r: r, + handler: h, + buf: make([]byte, 0, 1024*1024), + } +} + +func (d *walDecoder) decodeSeries(flag byte, b []byte) error { + for len(b) > 0 { + l, n := binary.Uvarint(b) + if n < 1 { + return errors.Wrap(errInvalidSize, "number of labels") + } + b = b[n:] + lset := make(labels.Labels, l) + + for i := 0; i < int(l); i++ { + nl, n := binary.Uvarint(b) + if n < 1 || len(b) < n+int(nl) { + return errors.Wrap(errInvalidSize, "label name") + } + lset[i].Name = string(b[n : n+int(nl)]) + b = b[n+int(nl):] + + vl, n := binary.Uvarint(b) + if n < 1 || len(b) < n+int(vl) { + return errors.Wrap(errInvalidSize, "label value") + } + lset[i].Value = string(b[n : n+int(vl)]) + b = b[n+int(vl):] + } + + if err := d.handler.series(lset); err != nil { + return err + } + } + return nil +} + +func (d *walDecoder) decodeSamples(flag byte, b []byte) error { + if len(b) < 16 { + return errors.Wrap(errInvalidSize, "header length") + } + var ( + baseRef = binary.BigEndian.Uint64(b) + baseTime = int64(binary.BigEndian.Uint64(b[8:])) + ) + b = b[16:] + + for len(b) > 0 { + var smpl refdSample + + dref, n := binary.Varint(b) + if n < 1 { + return errors.Wrap(errInvalidSize, "sample ref delta") + } + b = b[n:] + + smpl.ref = uint64(int64(baseRef) + dref) + + dtime, n := binary.Varint(b) + if n < 1 { + return errors.Wrap(errInvalidSize, "sample timestamp delta") + } + b = b[n:] + smpl.t = baseTime + dtime + + if len(b) < 8 { + return errors.Wrapf(errInvalidSize, "sample value bits %d", len(b)) + } + smpl.v = float64(math.Float64frombits(binary.BigEndian.Uint64(b))) + b = b[8:] + + if err := d.handler.sample(smpl); err != nil { + return err + } + } + return nil +} + +func (d *walDecoder) entry() error { + b := make([]byte, 6) + if _, err := d.r.Read(b); err != nil { + return err + } + + var ( + etype = WALEntryType(b[0]) + flag = b[1] + length = int(binary.BigEndian.Uint32(b[2:])) + ) + + if length > len(d.buf) { + d.buf = make([]byte, length) + } + buf := d.buf[:length] + + if _, err := d.r.Read(buf); err != nil { + return err + } + // Read away checksum. + // TODO(fabxc): verify it + if _, err := d.r.Read(b[:4]); err != nil { + return err + } + + switch etype { + case WALEntrySeries: + return d.decodeSeries(flag, buf) + case WALEntrySamples: + return d.decodeSamples(flag, buf) + } + + return errors.Errorf("unknown WAL entry type %q", etype) +} diff --git a/vendor/github.com/fabxc/tsdb/writer.go b/vendor/github.com/fabxc/tsdb/writer.go new file mode 100644 index 0000000000..a73f2bd116 --- /dev/null +++ b/vendor/github.com/fabxc/tsdb/writer.go @@ -0,0 +1,506 @@ +package tsdb + +import ( + "bufio" + "encoding/binary" + "hash/crc32" + "io" + "sort" + "strings" + + "github.com/bradfitz/slice" + "github.com/fabxc/tsdb/chunks" + "github.com/fabxc/tsdb/labels" + "github.com/pkg/errors" +) + +const ( + // MagicSeries 4 bytes at the head of series file. + MagicSeries = 0x85BD40DD + + // MagicIndex 4 bytes at the head of an index file. + MagicIndex = 0xBAAAD700 +) + +const compactionPageBytes = minSectorSize * 64 + +// SeriesWriter serializes a time block of chunked series data. +type SeriesWriter interface { + // WriteSeries writes the time series data chunks for a single series. + // The reference is used to resolve the correct series in the written index. + // It only has to be valid for the duration of the write. + WriteSeries(ref uint32, l labels.Labels, chunks []ChunkMeta) error + + // Size returns the size of the data written so far. + Size() int64 + + // Close writes any required finalization and closes the resources + // associated with the underlying writer. + Close() error +} + +// seriesWriter implements the SeriesWriter interface for the standard +// serialization format. +type seriesWriter struct { + ow io.Writer + w *bufio.Writer + n int64 + c int + + index IndexWriter +} + +func newSeriesWriter(w io.Writer, index IndexWriter) *seriesWriter { + return &seriesWriter{ + ow: w, + w: bufio.NewWriterSize(w, 1*1024*1024), + n: 0, + index: index, + } +} + +func (w *seriesWriter) write(wr io.Writer, b []byte) error { + n, err := wr.Write(b) + w.n += int64(n) + return err +} + +func (w *seriesWriter) writeMeta() error { + b := [8]byte{} + + binary.BigEndian.PutUint32(b[:4], MagicSeries) + b[4] = flagStd + + return w.write(w.w, b[:]) +} + +func (w *seriesWriter) WriteSeries(ref uint32, lset labels.Labels, chks []ChunkMeta) error { + // Initialize with meta data. + if w.n == 0 { + if err := w.writeMeta(); err != nil { + return err + } + } + + // TODO(fabxc): is crc32 enough for chunks of one series? + h := crc32.NewIEEE() + wr := io.MultiWriter(h, w.w) + + // For normal reads we don't need the number of the chunk section but + // it allows us to verify checksums without reading the index file. + // The offsets are also technically enough to calculate chunk size. but + // holding the length of each chunk could later allow for adding padding + // between chunks. + b := [binary.MaxVarintLen32]byte{} + n := binary.PutUvarint(b[:], uint64(len(chks))) + + if err := w.write(wr, b[:n]); err != nil { + return err + } + + for i := range chks { + chk := &chks[i] + + chk.Ref = uint32(w.n) + + n = binary.PutUvarint(b[:], uint64(len(chk.Chunk.Bytes()))) + + if err := w.write(wr, b[:n]); err != nil { + return err + } + if err := w.write(wr, []byte{byte(chk.Chunk.Encoding())}); err != nil { + return err + } + if err := w.write(wr, chk.Chunk.Bytes()); err != nil { + return err + } + chk.Chunk = nil + } + + if err := w.write(w.w, h.Sum(nil)); err != nil { + return err + } + + if w.index != nil { + w.index.AddSeries(ref, lset, chks...) + } + return nil +} + +func (w *seriesWriter) Size() int64 { + return w.n +} + +func (w *seriesWriter) Close() error { + // Initialize block in case no data was written to it. + if w.n == 0 { + if err := w.writeMeta(); err != nil { + return err + } + } + return w.w.Flush() +} + +// ChunkMeta holds information about a chunk of data. +type ChunkMeta struct { + // Ref and Chunk hold either a reference that can be used to retrieve + // chunk data or the data itself. + // Generally, only one of them is set. + Ref uint32 + Chunk chunks.Chunk + + MinTime, MaxTime int64 // time range the data covers +} + +// IndexWriter serialized the index for a block of series data. +// The methods must generally be called in order they are specified. +type IndexWriter interface { + // AddSeries populates the index writer witha series and its offsets + // of chunks that the index can reference. + // The reference number is used to resolve a series against the postings + // list iterator. It only has to be available during the write processing. + AddSeries(ref uint32, l labels.Labels, chunks ...ChunkMeta) + + // WriteLabelIndex serializes an index from label names to values. + // The passed in values chained tuples of strings of the length of names. + WriteLabelIndex(names []string, values []string) error + + // WritePostings writes a postings list for a single label pair. + WritePostings(name, value string, it Postings) error + + // Size returns the size of the data written so far. + Size() int64 + + // Close writes any finalization and closes theresources associated with + // the underlying writer. + Close() error +} + +type indexWriterSeries struct { + labels labels.Labels + chunks []ChunkMeta // series file offset of chunks + offset uint32 // index file offset of series reference +} + +// indexWriter implements the IndexWriter interface for the standard +// serialization format. +type indexWriter struct { + ow io.Writer + w *bufio.Writer + n int64 + started bool + + series map[uint32]*indexWriterSeries + + symbols map[string]uint32 // symbol offsets + labelIndexes []hashEntry // label index offsets + postings []hashEntry // postings lists offsets +} + +func newIndexWriter(w io.Writer) *indexWriter { + return &indexWriter{ + w: bufio.NewWriterSize(w, 1*1024*1024), + ow: w, + n: 0, + symbols: make(map[string]uint32, 4096), + series: make(map[uint32]*indexWriterSeries, 4096), + } +} + +func (w *indexWriter) write(wr io.Writer, b []byte) error { + n, err := wr.Write(b) + w.n += int64(n) + return err +} + +// section writes a CRC32 checksummed section of length l and guarded by flag. +func (w *indexWriter) section(l uint32, flag byte, f func(w io.Writer) error) error { + h := crc32.NewIEEE() + wr := io.MultiWriter(h, w.w) + + b := [5]byte{flag, 0, 0, 0, 0} + binary.BigEndian.PutUint32(b[1:], l) + + if err := w.write(wr, b[:]); err != nil { + return errors.Wrap(err, "writing header") + } + + if err := f(wr); err != nil { + return errors.Wrap(err, "contents write func") + } + if err := w.write(w.w, h.Sum(nil)); err != nil { + return errors.Wrap(err, "writing checksum") + } + return nil +} + +func (w *indexWriter) writeMeta() error { + b := [8]byte{} + + binary.BigEndian.PutUint32(b[:4], MagicIndex) + b[4] = flagStd + + return w.write(w.w, b[:]) +} + +func (w *indexWriter) AddSeries(ref uint32, lset labels.Labels, chunks ...ChunkMeta) { + // Populate the symbol table from all label sets we have to reference. + for _, l := range lset { + w.symbols[l.Name] = 0 + w.symbols[l.Value] = 0 + } + + w.series[ref] = &indexWriterSeries{ + labels: lset, + chunks: chunks, + } +} + +func (w *indexWriter) writeSymbols() error { + // Generate sorted list of strings we will store as reference table. + symbols := make([]string, 0, len(w.symbols)) + for s := range w.symbols { + symbols = append(symbols, s) + } + sort.Strings(symbols) + + // The start of the section plus a 5 byte section header are our base. + // TODO(fabxc): switch to relative offsets and hold sections in a TOC. + base := uint32(w.n) + 5 + + buf := [binary.MaxVarintLen32]byte{} + b := append(make([]byte, 0, 4096), flagStd) + + for _, s := range symbols { + w.symbols[s] = base + uint32(len(b)) + + n := binary.PutUvarint(buf[:], uint64(len(s))) + b = append(b, buf[:n]...) + b = append(b, s...) + } + + l := uint32(len(b)) + + return w.section(l, flagStd, func(wr io.Writer) error { + return w.write(wr, b) + }) +} + +func (w *indexWriter) writeSeries() error { + // Series must be stored sorted along their labels. + series := make([]*indexWriterSeries, 0, len(w.series)) + + for _, s := range w.series { + series = append(series, s) + } + slice.Sort(series, func(i, j int) bool { + return labels.Compare(series[i].labels, series[j].labels) < 0 + }) + + // Current end of file plus 5 bytes for section header. + // TODO(fabxc): switch to relative offsets. + base := uint32(w.n) + 5 + + b := make([]byte, 0, 1<<20) // 1MiB + buf := make([]byte, binary.MaxVarintLen64) + + for _, s := range series { + // Write label set symbol references. + s.offset = base + uint32(len(b)) + + n := binary.PutUvarint(buf, uint64(len(s.labels))) + b = append(b, buf[:n]...) + + for _, l := range s.labels { + n = binary.PutUvarint(buf, uint64(w.symbols[l.Name])) + b = append(b, buf[:n]...) + n = binary.PutUvarint(buf, uint64(w.symbols[l.Value])) + b = append(b, buf[:n]...) + } + + // Write chunks meta data including reference into chunk file. + n = binary.PutUvarint(buf, uint64(len(s.chunks))) + b = append(b, buf[:n]...) + + for _, c := range s.chunks { + n = binary.PutVarint(buf, c.MinTime) + b = append(b, buf[:n]...) + n = binary.PutVarint(buf, c.MaxTime) + b = append(b, buf[:n]...) + + n = binary.PutUvarint(buf, uint64(c.Ref)) + b = append(b, buf[:n]...) + } + } + + l := uint32(len(b)) + + return w.section(l, flagStd, func(wr io.Writer) error { + return w.write(wr, b) + }) +} + +func (w *indexWriter) init() error { + if err := w.writeMeta(); err != nil { + return err + } + if err := w.writeSymbols(); err != nil { + return err + } + if err := w.writeSeries(); err != nil { + return err + } + w.started = true + + return nil +} + +func (w *indexWriter) WriteLabelIndex(names []string, values []string) error { + if !w.started { + if err := w.init(); err != nil { + return err + } + } + + valt, err := newStringTuples(values, len(names)) + if err != nil { + return err + } + sort.Sort(valt) + + w.labelIndexes = append(w.labelIndexes, hashEntry{ + name: strings.Join(names, string(sep)), + offset: uint32(w.n), + }) + + buf := make([]byte, binary.MaxVarintLen32) + n := binary.PutUvarint(buf, uint64(len(names))) + + l := uint32(n) + uint32(len(values)*4) + + return w.section(l, flagStd, func(wr io.Writer) error { + // First byte indicates tuple size for index. + if err := w.write(wr, buf[:n]); err != nil { + return err + } + + for _, v := range valt.s { + binary.BigEndian.PutUint32(buf, w.symbols[v]) + + if err := w.write(wr, buf[:4]); err != nil { + return err + } + } + return nil + }) +} + +func (w *indexWriter) WritePostings(name, value string, it Postings) error { + if !w.started { + if err := w.init(); err != nil { + return err + } + } + + key := name + string(sep) + value + + w.postings = append(w.postings, hashEntry{ + name: key, + offset: uint32(w.n), + }) + + b := make([]byte, 0, 4096) + buf := [4]byte{} + + // Order of the references in the postings list does not imply order + // of the series references within the persisted block they are mapped to. + // We have to sort the new references again. + var refs []uint32 + + for it.Next() { + s, ok := w.series[it.At()] + if !ok { + return errors.Errorf("series for reference %d not found", it.At()) + } + refs = append(refs, s.offset) + } + if err := it.Err(); err != nil { + return err + } + + slice.Sort(refs, func(i, j int) bool { return refs[i] < refs[j] }) + + for _, r := range refs { + binary.BigEndian.PutUint32(buf[:], r) + b = append(b, buf[:]...) + } + + return w.section(uint32(len(b)), flagStd, func(wr io.Writer) error { + return w.write(wr, b) + }) +} + +func (w *indexWriter) Size() int64 { + return w.n +} + +type hashEntry struct { + name string + offset uint32 +} + +func (w *indexWriter) writeHashmap(h []hashEntry) error { + b := make([]byte, 0, 4096) + buf := [binary.MaxVarintLen32]byte{} + + for _, e := range h { + n := binary.PutUvarint(buf[:], uint64(len(e.name))) + b = append(b, buf[:n]...) + b = append(b, e.name...) + + n = binary.PutUvarint(buf[:], uint64(e.offset)) + b = append(b, buf[:n]...) + } + + return w.section(uint32(len(b)), flagStd, func(wr io.Writer) error { + return w.write(wr, b) + }) +} + +func (w *indexWriter) finalize() error { + // Write out hash maps to jump to correct label index and postings sections. + lo := uint32(w.n) + if err := w.writeHashmap(w.labelIndexes); err != nil { + return err + } + + po := uint32(w.n) + if err := w.writeHashmap(w.postings); err != nil { + return err + } + + // Terminate index file with offsets to hashmaps. This is the entry Pointer + // for any index query. + // TODO(fabxc): also store offset to series section to allow plain + // iteration over all existing series? + // TODO(fabxc): store references like these that are not resolved via direct + // mmap using explicit endianness? + b := [8]byte{} + binary.BigEndian.PutUint32(b[:4], lo) + binary.BigEndian.PutUint32(b[4:], po) + + return w.write(w.w, b[:]) +} + +func (w *indexWriter) Close() error { + // Handle blocks without any data. + if !w.started { + if err := w.init(); err != nil { + return err + } + } + if err := w.finalize(); err != nil { + return err + } + return w.w.Flush() +} diff --git a/vendor/github.com/go-kit/kit/LICENSE b/vendor/github.com/go-kit/kit/LICENSE new file mode 100644 index 0000000000..9d83342acd --- /dev/null +++ b/vendor/github.com/go-kit/kit/LICENSE @@ -0,0 +1,22 @@ +The MIT License (MIT) + +Copyright (c) 2015 Peter Bourgon + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + diff --git a/vendor/github.com/go-kit/kit/log/README.md b/vendor/github.com/go-kit/kit/log/README.md new file mode 100644 index 0000000000..2763f7f148 --- /dev/null +++ b/vendor/github.com/go-kit/kit/log/README.md @@ -0,0 +1,148 @@ +# package log + +`package log` provides a minimal interface for structured logging in services. +It may be wrapped to encode conventions, enforce type-safety, provide leveled logging, and so on. +It can be used for both typical application log events, and log-structured data streams. + +## Structured logging + +Structured logging is, basically, conceding to the reality that logs are _data_, + and warrant some level of schematic rigor. +Using a stricter, key/value-oriented message format for our logs, + containing contextual and semantic information, + makes it much easier to get insight into the operational activity of the systems we build. +Consequently, `package log` is of the strong belief that + "[the benefits of structured logging outweigh the minimal effort involved](https://www.thoughtworks.com/radar/techniques/structured-logging)". + +Migrating from unstructured to structured logging is probably a lot easier than you'd expect. + +```go +// Unstructured +log.Printf("HTTP server listening on %s", addr) + +// Structured +logger.Log("transport", "HTTP", "addr", addr, "msg", "listening") +``` + +## Usage + +### Typical application logging + +```go +w := log.NewSyncWriter(os.Stderr) +logger := log.NewLogfmtLogger(w) +logger.Log("question", "what is the meaning of life?", "answer", 42) + +// Output: +// question="what is the meaning of life?" answer=42 +``` + +### Log contexts + +```go +func main() { + var logger log.Logger + logger = log.NewLogfmtLogger(log.NewSyncWriter(os.Stderr)) + logger = log.NewContext(logger).With("instance_id", 123) + + logger.Log("msg", "starting") + NewWorker(log.NewContext(logger).With("component", "worker")).Run() + NewSlacker(log.NewContext(logger).With("component", "slacker")).Run() +} + +// Output: +// instance_id=123 msg=starting +// instance_id=123 component=worker msg=running +// instance_id=123 component=slacker msg=running +``` + +### Interact with stdlib logger + +Redirect stdlib logger to Go kit logger. + +```go +import ( + "os" + stdlog "log" + kitlog "github.com/go-kit/kit/log" +) + +func main() { + logger := kitlog.NewJSONLogger(kitlog.NewSyncWriter(os.Stdout)) + stdlog.SetOutput(kitlog.NewStdlibAdapter(logger)) + stdlog.Print("I sure like pie") +} + +// Output: +// {"msg":"I sure like pie","ts":"2016/01/01 12:34:56"} +``` + +Or, if, for legacy reasons, + you need to pipe all of your logging through the stdlib log package, + you can redirect Go kit logger to the stdlib logger. + +```go +logger := kitlog.NewLogfmtLogger(kitlog.StdlibWriter{}) +logger.Log("legacy", true, "msg", "at least it's something") + +// Output: +// 2016/01/01 12:34:56 legacy=true msg="at least it's something" +``` + +### Timestamps and callers + +```go +var logger log.Logger +logger = log.NewLogfmtLogger(log.NewSyncWriter(os.Stderr)) +logger = log.NewContext(logger).With("ts", log.DefaultTimestampUTC, "caller", log.DefaultCaller) + +logger.Log("msg", "hello") + +// Output: +// ts=2016-01-01T12:34:56Z caller=main.go:15 msg=hello +``` + +## Supported output formats + +- [Logfmt](https://brandur.org/logfmt) +- JSON + +## Enhancements + +`package log` is centered on the one-method Logger interface. + +```go +type Logger interface { + Log(keyvals ...interface{}) error +} +``` + +This interface, and its supporting code like [log.Context](https://godoc.org/github.com/go-kit/kit/log#Context), + is the product of much iteration and evaluation. +For more details on the evolution of the Logger interface, + see [The Hunt for a Logger Interface](http://go-talks.appspot.com/github.com/ChrisHines/talks/structured-logging/structured-logging.slide#1), + a talk by [Chris Hines](https://github.com/ChrisHines). +Also, please see + [#63](https://github.com/go-kit/kit/issues/63), + [#76](https://github.com/go-kit/kit/pull/76), + [#131](https://github.com/go-kit/kit/issues/131), + [#157](https://github.com/go-kit/kit/pull/157), + [#164](https://github.com/go-kit/kit/issues/164), and + [#252](https://github.com/go-kit/kit/pull/252) + to review historical conversations about package log and the Logger interface. + +Value-add packages and suggestions, + like improvements to [the leveled logger](https://godoc.org/github.com/go-kit/kit/log/levels), + are of course welcome. +Good proposals should + +- Be composable with [log.Context](https://godoc.org/github.com/go-kit/kit/log#Context), +- Not break the behavior of [log.Caller](https://godoc.org/github.com/go-kit/kit/log#Caller) in any wrapped context, and +- Be friendly to packages that accept only an unadorned log.Logger. + +## Benchmarks & comparisons + +There are a few Go logging benchmarks and comparisons that include Go kit's package log. + +- [imkira/go-loggers-bench](https://github.com/imkira/go-loggers-bench) includes kit/log +- [uber-common/zap](https://github.com/uber-common/zap), a zero-alloc logging library, includes a comparison with kit/log diff --git a/vendor/github.com/go-kit/kit/log/doc.go b/vendor/github.com/go-kit/kit/log/doc.go new file mode 100644 index 0000000000..49d3f1810b --- /dev/null +++ b/vendor/github.com/go-kit/kit/log/doc.go @@ -0,0 +1,93 @@ +// Package log provides a structured logger. +// +// Structured logging produces logs easily consumed later by humans or +// machines. Humans might be interested in debugging errors, or tracing +// specific requests. Machines might be interested in counting interesting +// events, or aggregating information for off-line processing. In both cases, +// it is important that the log messages are structured and actionable. +// Package log is designed to encourage both of these best practices. +// +// Basic Usage +// +// The fundamental interface is Logger. Loggers create log events from +// key/value data. The Logger interface has a single method, Log, which +// accepts a sequence of alternating key/value pairs, which this package names +// keyvals. +// +// type Logger interface { +// Log(keyvals ...interface{}) error +// } +// +// Here is an example of a function using a Logger to create log events. +// +// func RunTask(task Task, logger log.Logger) string { +// logger.Log("taskID", task.ID, "event", "starting task") +// ... +// logger.Log("taskID", task.ID, "event", "task complete") +// } +// +// The keys in the above example are "taskID" and "event". The values are +// task.ID, "starting task", and "task complete". Every key is followed +// immediately by its value. +// +// Keys are usually plain strings. Values may be any type that has a sensible +// encoding in the chosen log format. With structured logging it is a good +// idea to log simple values without formatting them. This practice allows +// the chosen logger to encode values in the most appropriate way. +// +// Log Context +// +// A log context stores keyvals that it includes in all log events. Building +// appropriate log contexts reduces repetition and aids consistency in the +// resulting log output. We can use a context to improve the RunTask example. +// +// func RunTask(task Task, logger log.Logger) string { +// logger = log.NewContext(logger).With("taskID", task.ID) +// logger.Log("event", "starting task") +// ... +// taskHelper(task.Cmd, logger) +// ... +// logger.Log("event", "task complete") +// } +// +// The improved version emits the same log events as the original for the +// first and last calls to Log. The call to taskHelper highlights that a +// context may be passed as a logger to other functions. Each log event +// created by the called function will include the task.ID even though the +// function does not have access to that value. Using log contexts this way +// simplifies producing log output that enables tracing the life cycle of +// individual tasks. (See the Context example for the full code of the +// above snippet.) +// +// Dynamic Context Values +// +// A Valuer function stored in a log context generates a new value each time +// the context logs an event. The Valuer example demonstrates how this +// feature works. +// +// Valuers provide the basis for consistently logging timestamps and source +// code location. The log package defines several valuers for that purpose. +// See Timestamp, DefaultTimestamp, DefaultTimestampUTC, Caller, and +// DefaultCaller. A common logger initialization sequence that ensures all log +// entries contain a timestamp and source location looks like this: +// +// logger := log.NewLogfmtLogger(log.NewSyncWriter(os.Stdout)) +// logger = log.NewContext(logger).With("ts", log.DefaultTimestampUTC, "caller", log.DefaultCaller) +// +// Concurrent Safety +// +// Applications with multiple goroutines want each log event written to the +// same logger to remain separate from other log events. Package log provides +// two simple solutions for concurrent safe logging. +// +// NewSyncWriter wraps an io.Writer and serializes each call to its Write +// method. Using a SyncWriter has the benefit that the smallest practical +// portion of the logging logic is performed within a mutex, but it requires +// the formatting Logger to make only one call to Write per log event. +// +// NewSyncLogger wraps any Logger and serializes each call to its Log method. +// Using a SyncLogger has the benefit that it guarantees each log event is +// handled atomically within the wrapped logger, but it typically serializes +// both the formatting and output logic. Use a SyncLogger if the formatting +// logger may perform multiple writes per log event. +package log diff --git a/vendor/github.com/go-kit/kit/log/json_logger.go b/vendor/github.com/go-kit/kit/log/json_logger.go new file mode 100644 index 0000000000..231e099553 --- /dev/null +++ b/vendor/github.com/go-kit/kit/log/json_logger.go @@ -0,0 +1,92 @@ +package log + +import ( + "encoding" + "encoding/json" + "fmt" + "io" + "reflect" +) + +type jsonLogger struct { + io.Writer +} + +// NewJSONLogger returns a Logger that encodes keyvals to the Writer as a +// single JSON object. Each log event produces no more than one call to +// w.Write. The passed Writer must be safe for concurrent use by multiple +// goroutines if the returned Logger will be used concurrently. +func NewJSONLogger(w io.Writer) Logger { + return &jsonLogger{w} +} + +func (l *jsonLogger) Log(keyvals ...interface{}) error { + n := (len(keyvals) + 1) / 2 // +1 to handle case when len is odd + m := make(map[string]interface{}, n) + for i := 0; i < len(keyvals); i += 2 { + k := keyvals[i] + var v interface{} = ErrMissingValue + if i+1 < len(keyvals) { + v = keyvals[i+1] + } + merge(m, k, v) + } + return json.NewEncoder(l.Writer).Encode(m) +} + +func merge(dst map[string]interface{}, k, v interface{}) { + var key string + switch x := k.(type) { + case string: + key = x + case fmt.Stringer: + key = safeString(x) + default: + key = fmt.Sprint(x) + } + if x, ok := v.(error); ok { + v = safeError(x) + } + + // We want json.Marshaler and encoding.TextMarshaller to take priority over + // err.Error() and v.String(). But json.Marshall (called later) does that by + // default so we force a no-op if it's one of those 2 case. + switch x := v.(type) { + case json.Marshaler: + case encoding.TextMarshaler: + case error: + v = safeError(x) + case fmt.Stringer: + v = safeString(x) + } + + dst[key] = v +} + +func safeString(str fmt.Stringer) (s string) { + defer func() { + if panicVal := recover(); panicVal != nil { + if v := reflect.ValueOf(str); v.Kind() == reflect.Ptr && v.IsNil() { + s = "NULL" + } else { + panic(panicVal) + } + } + }() + s = str.String() + return +} + +func safeError(err error) (s interface{}) { + defer func() { + if panicVal := recover(); panicVal != nil { + if v := reflect.ValueOf(err); v.Kind() == reflect.Ptr && v.IsNil() { + s = nil + } else { + panic(panicVal) + } + } + }() + s = err.Error() + return +} diff --git a/vendor/github.com/go-kit/kit/log/log.go b/vendor/github.com/go-kit/kit/log/log.go new file mode 100644 index 0000000000..97990feffa --- /dev/null +++ b/vendor/github.com/go-kit/kit/log/log.go @@ -0,0 +1,144 @@ +package log + +import "errors" + +// Logger is the fundamental interface for all log operations. Log creates a +// log event from keyvals, a variadic sequence of alternating keys and values. +// Implementations must be safe for concurrent use by multiple goroutines. In +// particular, any implementation of Logger that appends to keyvals or +// modifies any of its elements must make a copy first. +type Logger interface { + Log(keyvals ...interface{}) error +} + +// ErrMissingValue is appended to keyvals slices with odd length to substitute +// the missing value. +var ErrMissingValue = errors.New("(MISSING)") + +// NewContext returns a new Context that logs to logger. +func NewContext(logger Logger) *Context { + if c, ok := logger.(*Context); ok { + return c + } + return &Context{logger: logger} +} + +// Context must always have the same number of stack frames between calls to +// its Log method and the eventual binding of Valuers to their value. This +// requirement comes from the functional requirement to allow a context to +// resolve application call site information for a log.Caller stored in the +// context. To do this we must be able to predict the number of logging +// functions on the stack when bindValues is called. +// +// Three implementation details provide the needed stack depth consistency. +// The first two of these details also result in better amortized performance, +// and thus make sense even without the requirements regarding stack depth. +// The third detail, however, is subtle and tied to the implementation of the +// Go compiler. +// +// 1. NewContext avoids introducing an additional layer when asked to +// wrap another Context. +// 2. With avoids introducing an additional layer by returning a newly +// constructed Context with a merged keyvals rather than simply +// wrapping the existing Context. +// 3. All of Context's methods take pointer receivers even though they +// do not mutate the Context. +// +// Before explaining the last detail, first some background. The Go compiler +// generates wrapper methods to implement the auto dereferencing behavior when +// calling a value method through a pointer variable. These wrapper methods +// are also used when calling a value method through an interface variable +// because interfaces store a pointer to the underlying concrete value. +// Calling a pointer receiver through an interface does not require generating +// an additional function. +// +// If Context had value methods then calling Context.Log through a variable +// with type Logger would have an extra stack frame compared to calling +// Context.Log through a variable with type Context. Using pointer receivers +// avoids this problem. + +// A Context wraps a Logger and holds keyvals that it includes in all log +// events. When logging, a Context replaces all value elements (odd indexes) +// containing a Valuer with their generated value for each call to its Log +// method. +type Context struct { + logger Logger + keyvals []interface{} + hasValuer bool +} + +// Log replaces all value elements (odd indexes) containing a Valuer in the +// stored context with their generated value, appends keyvals, and passes the +// result to the wrapped Logger. +func (l *Context) Log(keyvals ...interface{}) error { + kvs := append(l.keyvals, keyvals...) + if len(kvs)%2 != 0 { + kvs = append(kvs, ErrMissingValue) + } + if l.hasValuer { + // If no keyvals were appended above then we must copy l.keyvals so + // that future log events will reevaluate the stored Valuers. + if len(keyvals) == 0 { + kvs = append([]interface{}{}, l.keyvals...) + } + bindValues(kvs[:len(l.keyvals)]) + } + return l.logger.Log(kvs...) +} + +// With returns a new Context with keyvals appended to those of the receiver. +func (l *Context) With(keyvals ...interface{}) *Context { + if len(keyvals) == 0 { + return l + } + kvs := append(l.keyvals, keyvals...) + if len(kvs)%2 != 0 { + kvs = append(kvs, ErrMissingValue) + } + return &Context{ + logger: l.logger, + // Limiting the capacity of the stored keyvals ensures that a new + // backing array is created if the slice must grow in Log or With. + // Using the extra capacity without copying risks a data race that + // would violate the Logger interface contract. + keyvals: kvs[:len(kvs):len(kvs)], + hasValuer: l.hasValuer || containsValuer(keyvals), + } +} + +// WithPrefix returns a new Context with keyvals prepended to those of the +// receiver. +func (l *Context) WithPrefix(keyvals ...interface{}) *Context { + if len(keyvals) == 0 { + return l + } + // Limiting the capacity of the stored keyvals ensures that a new + // backing array is created if the slice must grow in Log or With. + // Using the extra capacity without copying risks a data race that + // would violate the Logger interface contract. + n := len(l.keyvals) + len(keyvals) + if len(keyvals)%2 != 0 { + n++ + } + kvs := make([]interface{}, 0, n) + kvs = append(kvs, keyvals...) + if len(kvs)%2 != 0 { + kvs = append(kvs, ErrMissingValue) + } + kvs = append(kvs, l.keyvals...) + return &Context{ + logger: l.logger, + keyvals: kvs, + hasValuer: l.hasValuer || containsValuer(keyvals), + } +} + +// LoggerFunc is an adapter to allow use of ordinary functions as Loggers. If +// f is a function with the appropriate signature, LoggerFunc(f) is a Logger +// object that calls f. +type LoggerFunc func(...interface{}) error + +// Log implements Logger by calling f(keyvals...). +func (f LoggerFunc) Log(keyvals ...interface{}) error { + return f(keyvals...) +} diff --git a/vendor/github.com/go-kit/kit/log/logfmt_logger.go b/vendor/github.com/go-kit/kit/log/logfmt_logger.go new file mode 100644 index 0000000000..a00305298b --- /dev/null +++ b/vendor/github.com/go-kit/kit/log/logfmt_logger.go @@ -0,0 +1,62 @@ +package log + +import ( + "bytes" + "io" + "sync" + + "github.com/go-logfmt/logfmt" +) + +type logfmtEncoder struct { + *logfmt.Encoder + buf bytes.Buffer +} + +func (l *logfmtEncoder) Reset() { + l.Encoder.Reset() + l.buf.Reset() +} + +var logfmtEncoderPool = sync.Pool{ + New: func() interface{} { + var enc logfmtEncoder + enc.Encoder = logfmt.NewEncoder(&enc.buf) + return &enc + }, +} + +type logfmtLogger struct { + w io.Writer +} + +// NewLogfmtLogger returns a logger that encodes keyvals to the Writer in +// logfmt format. Each log event produces no more than one call to w.Write. +// The passed Writer must be safe for concurrent use by multiple goroutines if +// the returned Logger will be used concurrently. +func NewLogfmtLogger(w io.Writer) Logger { + return &logfmtLogger{w} +} + +func (l logfmtLogger) Log(keyvals ...interface{}) error { + enc := logfmtEncoderPool.Get().(*logfmtEncoder) + enc.Reset() + defer logfmtEncoderPool.Put(enc) + + if err := enc.EncodeKeyvals(keyvals...); err != nil { + return err + } + + // Add newline to the end of the buffer + if err := enc.EndRecord(); err != nil { + return err + } + + // The Logger interface requires implementations to be safe for concurrent + // use by multiple goroutines. For this implementation that means making + // only one call to l.w.Write() for each call to Log. + if _, err := l.w.Write(enc.buf.Bytes()); err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/go-kit/kit/log/nop_logger.go b/vendor/github.com/go-kit/kit/log/nop_logger.go new file mode 100644 index 0000000000..1047d626c4 --- /dev/null +++ b/vendor/github.com/go-kit/kit/log/nop_logger.go @@ -0,0 +1,8 @@ +package log + +type nopLogger struct{} + +// NewNopLogger returns a logger that doesn't do anything. +func NewNopLogger() Logger { return nopLogger{} } + +func (nopLogger) Log(...interface{}) error { return nil } diff --git a/vendor/github.com/go-kit/kit/log/stdlib.go b/vendor/github.com/go-kit/kit/log/stdlib.go new file mode 100644 index 0000000000..7ffd1ca177 --- /dev/null +++ b/vendor/github.com/go-kit/kit/log/stdlib.go @@ -0,0 +1,116 @@ +package log + +import ( + "io" + "log" + "regexp" + "strings" +) + +// StdlibWriter implements io.Writer by invoking the stdlib log.Print. It's +// designed to be passed to a Go kit logger as the writer, for cases where +// it's necessary to redirect all Go kit log output to the stdlib logger. +// +// If you have any choice in the matter, you shouldn't use this. Prefer to +// redirect the stdlib log to the Go kit logger via NewStdlibAdapter. +type StdlibWriter struct{} + +// Write implements io.Writer. +func (w StdlibWriter) Write(p []byte) (int, error) { + log.Print(strings.TrimSpace(string(p))) + return len(p), nil +} + +// StdlibAdapter wraps a Logger and allows it to be passed to the stdlib +// logger's SetOutput. It will extract date/timestamps, filenames, and +// messages, and place them under relevant keys. +type StdlibAdapter struct { + Logger + timestampKey string + fileKey string + messageKey string +} + +// StdlibAdapterOption sets a parameter for the StdlibAdapter. +type StdlibAdapterOption func(*StdlibAdapter) + +// TimestampKey sets the key for the timestamp field. By default, it's "ts". +func TimestampKey(key string) StdlibAdapterOption { + return func(a *StdlibAdapter) { a.timestampKey = key } +} + +// FileKey sets the key for the file and line field. By default, it's "file". +func FileKey(key string) StdlibAdapterOption { + return func(a *StdlibAdapter) { a.fileKey = key } +} + +// MessageKey sets the key for the actual log message. By default, it's "msg". +func MessageKey(key string) StdlibAdapterOption { + return func(a *StdlibAdapter) { a.messageKey = key } +} + +// NewStdlibAdapter returns a new StdlibAdapter wrapper around the passed +// logger. It's designed to be passed to log.SetOutput. +func NewStdlibAdapter(logger Logger, options ...StdlibAdapterOption) io.Writer { + a := StdlibAdapter{ + Logger: logger, + timestampKey: "ts", + fileKey: "file", + messageKey: "msg", + } + for _, option := range options { + option(&a) + } + return a +} + +func (a StdlibAdapter) Write(p []byte) (int, error) { + result := subexps(p) + keyvals := []interface{}{} + var timestamp string + if date, ok := result["date"]; ok && date != "" { + timestamp = date + } + if time, ok := result["time"]; ok && time != "" { + if timestamp != "" { + timestamp += " " + } + timestamp += time + } + if timestamp != "" { + keyvals = append(keyvals, a.timestampKey, timestamp) + } + if file, ok := result["file"]; ok && file != "" { + keyvals = append(keyvals, a.fileKey, file) + } + if msg, ok := result["msg"]; ok { + keyvals = append(keyvals, a.messageKey, msg) + } + if err := a.Logger.Log(keyvals...); err != nil { + return 0, err + } + return len(p), nil +} + +const ( + logRegexpDate = `(?P[0-9]{4}/[0-9]{2}/[0-9]{2})?[ ]?` + logRegexpTime = `(?P