Merge pull request #2133 from prometheus/beorn7/vendoring

Cut v1.2.2
This commit is contained in:
Fabian Reinartz 2016-10-30 14:34:07 +01:00 committed by GitHub
commit 1c3531af4e
178 changed files with 23933 additions and 4647 deletions

View file

@ -1,3 +1,11 @@
## 1.2.2 / 2016-10-28
* [BUGFIX] UI: Deal properly with aborted requests.
* [BUGFIX] UI: Decode URL query parameters properly.
* [BUGFIX] Storage: Deal better with data corruption (non-monotonic timestamps).
* [BUGFIX] Remote storage: Re-add accidentally removed timeout flag.
* [BUGFIX] Updated a number of vendored packages to pick up upstream bug fixes.
## 1.2.1 / 2016-10-10
* [BUGFIX] Count chunk evictions properly so that the server doesn't

View file

@ -1 +1 @@
1.2.1
1.2.2

View file

@ -163,7 +163,7 @@ func (node *BinaryExpr) String() string {
matching := ""
vm := node.VectorMatching
if vm != nil && len(vm.MatchingLabels) > 0 {
if vm != nil && (len(vm.MatchingLabels) > 0 || vm.On) {
if vm.On {
matching = fmt.Sprintf(" ON(%s)", vm.MatchingLabels)
} else {

View file

@ -59,6 +59,10 @@ func TestExprString(t *testing.T) {
inputs := []struct {
in, out string
}{
{
in: `sum(task:errors:rate10s{job="s"}) BY ()`,
out: `sum(task:errors:rate10s{job="s"})`,
},
{
in: `sum(task:errors:rate10s{job="s"}) BY (code)`,
},
@ -77,6 +81,9 @@ func TestExprString(t *testing.T) {
{
in: `count_values("value", task:errors:rate10s{job="s"})`,
},
{
in: `a - ON() c`,
},
{
in: `a - ON(b) c`,
},
@ -92,6 +99,10 @@ func TestExprString(t *testing.T) {
{
in: `a - IGNORING(b) c`,
},
{
in: `a - IGNORING() c`,
out: `a - c`,
},
{
in: `up > BOOL 0`,
},

View file

@ -30,7 +30,15 @@ const (
Latitude string = "^[-+]?([1-8]?\\d(\\.\\d+)?|90(\\.0+)?)$"
Longitude string = "^[-+]?(180(\\.0+)?|((1[0-7]\\d)|([1-9]?\\d))(\\.\\d+)?)$"
DNSName string = `^([a-zA-Z0-9]{1}[a-zA-Z0-9_-]{1,62}){1}(\.[a-zA-Z0-9]{1}[a-zA-Z0-9_-]{1,62})*$`
URL string = `^((ftp|https?):\/\/)?(\S+(:\S*)?@)?((([1-9]\d?|1\d\d|2[01]\d|22[0-3])(\.(1?\d{1,2}|2[0-4]\d|25[0-5])){2}(?:\.([0-9]\d?|1\d\d|2[0-4]\d|25[0-4]))|(([a-zA-Z0-9]([a-zA-Z0-9-]+)?[a-zA-Z0-9]([-\.][a-zA-Z0-9]+)*)|((www\.)?))?(([a-zA-Z\x{00a1}-\x{ffff}0-9]+-?-?)*[a-zA-Z\x{00a1}-\x{ffff}0-9]+)(?:\.([a-zA-Z\x{00a1}-\x{ffff}]{1,}))?))(:(\d{1,5}))?((\/|\?|#)[^\s]*)?$`
IP string = `(([0-9a-fA-F]{1,4}:){7,7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,7}:|([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,5}(:[0-9a-fA-F]{1,4}){1,2}|([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4}){1,3}|([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|([0-9a-fA-F]{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]{1,4}){1,6})|:((:[0-9a-fA-F]{1,4}){1,7}|:)|fe80:(:[0-9a-fA-F]{0,4}){0,4}%[0-9a-zA-Z]{1,}|::(ffff(:0{1,4}){0,1}:){0,1}((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])|([0-9a-fA-F]{1,4}:){1,4}:((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9]))`
URLSchema string = `((ftp|tcp|udp|wss?|https?):\/\/)`
URLUsername string = `(\S+(:\S*)?@)`
Hostname string = ``
URLPath string = `((\/|\?|#)[^\s]*)`
URLPort string = `(:(\d{1,5}))`
URLIP string = `([1-9]\d?|1\d\d|2[01]\d|22[0-3])(\.(1?\d{1,2}|2[0-4]\d|25[0-5])){2}(?:\.([0-9]\d?|1\d\d|2[0-4]\d|25[0-4]))`
URLSubdomain string = `((www\.)|([a-zA-Z0-9]([-\.][a-zA-Z0-9]+)*))`
URL string = `^` + URLSchema + `?` + URLUsername + `?` + `((` + URLIP + `|(\[` + IP + `\])|(([a-zA-Z0-9]([a-zA-Z0-9-]+)?[a-zA-Z0-9]([-\.][a-zA-Z0-9]+)*)|(` + URLSubdomain + `?))?(([a-zA-Z\x{00a1}-\x{ffff}0-9]+-?-?)*[a-zA-Z\x{00a1}-\x{ffff}0-9]+)(?:\.([a-zA-Z\x{00a1}-\x{ffff}]{1,}))?))` + URLPort + `?` + URLPath + `?$`
SSN string = `^\d{3}[- ]?\d{2}[- ]?\d{4}$`
WinPath string = `^[a-zA-Z]:\\(?:[^\\/:*?"<>|\r\n]+\\)*[^\\/:*?"<>|\r\n]*$`
UnixPath string = `^((?:\/[a-zA-Z0-9\.\:]+(?:_[a-zA-Z0-9\:\.]+)*(?:\-[\:a-zA-Z0-9\.]+)*)+\/?)$`

View file

@ -496,6 +496,12 @@ func IsIPv6(str string) bool {
return ip != nil && strings.Contains(str, ":")
}
// IsCIDR check if the string is an valid CIDR notiation (IPV4 & IPV6)
func IsCIDR(str string) bool {
_, _, err := net.ParseCIDR(str)
return err == nil
}
// IsMAC check if a string is valid MAC address.
// Possible MAC formats:
// 01:23:45:67:89:ab

View file

@ -61,7 +61,6 @@ var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for
// int32, int64, uint32, uint64, bool, and enum
// protocol buffer types.
func DecodeVarint(buf []byte) (x uint64, n int) {
// x, n already 0
for shift := uint(0); shift < 64; shift += 7 {
if n >= len(buf) {
return 0, 0
@ -78,13 +77,7 @@ func DecodeVarint(buf []byte) (x uint64, n int) {
return 0, 0
}
// DecodeVarint reads a varint-encoded integer from the Buffer.
// This is the format for the
// int32, int64, uint32, uint64, bool, and enum
// protocol buffer types.
func (p *Buffer) DecodeVarint() (x uint64, err error) {
// x, err already 0
func (p *Buffer) decodeVarintSlow() (x uint64, err error) {
i := p.index
l := len(p.buf)
@ -107,6 +100,107 @@ func (p *Buffer) DecodeVarint() (x uint64, err error) {
return
}
// DecodeVarint reads a varint-encoded integer from the Buffer.
// This is the format for the
// int32, int64, uint32, uint64, bool, and enum
// protocol buffer types.
func (p *Buffer) DecodeVarint() (x uint64, err error) {
i := p.index
buf := p.buf
if i >= len(buf) {
return 0, io.ErrUnexpectedEOF
} else if buf[i] < 0x80 {
p.index++
return uint64(buf[i]), nil
} else if len(buf)-i < 10 {
return p.decodeVarintSlow()
}
var b uint64
// we already checked the first byte
x = uint64(buf[i]) - 0x80
i++
b = uint64(buf[i])
i++
x += b << 7
if b&0x80 == 0 {
goto done
}
x -= 0x80 << 7
b = uint64(buf[i])
i++
x += b << 14
if b&0x80 == 0 {
goto done
}
x -= 0x80 << 14
b = uint64(buf[i])
i++
x += b << 21
if b&0x80 == 0 {
goto done
}
x -= 0x80 << 21
b = uint64(buf[i])
i++
x += b << 28
if b&0x80 == 0 {
goto done
}
x -= 0x80 << 28
b = uint64(buf[i])
i++
x += b << 35
if b&0x80 == 0 {
goto done
}
x -= 0x80 << 35
b = uint64(buf[i])
i++
x += b << 42
if b&0x80 == 0 {
goto done
}
x -= 0x80 << 42
b = uint64(buf[i])
i++
x += b << 49
if b&0x80 == 0 {
goto done
}
x -= 0x80 << 49
b = uint64(buf[i])
i++
x += b << 56
if b&0x80 == 0 {
goto done
}
x -= 0x80 << 56
b = uint64(buf[i])
i++
x += b << 63
if b&0x80 == 0 {
goto done
}
// x -= 0x80 << 63 // Always zero.
return 0, errOverflow
done:
p.index = i
return x, nil
}
// DecodeFixed64 reads a 64-bit integer from the Buffer.
// This is the format for the
// fixed64, sfixed64, and double protocol buffer types.
@ -340,6 +434,8 @@ func (p *Buffer) DecodeGroup(pb Message) error {
// Buffer and places the decoded result in pb. If the struct
// underlying pb does not match the data in the buffer, the results can be
// unpredictable.
//
// Unlike proto.Unmarshal, this does not reset pb before starting to unmarshal.
func (p *Buffer) Unmarshal(pb Message) error {
// If the object can unmarshal itself, let it.
if u, ok := pb.(Unmarshaler); ok {

View file

@ -234,10 +234,6 @@ func Marshal(pb Message) ([]byte, error) {
}
p := NewBuffer(nil)
err := p.Marshal(pb)
var state errorState
if err != nil && !state.shouldContinue(err, nil) {
return nil, err
}
if p.buf == nil && err == nil {
// Return a non-nil slice on success.
return []byte{}, nil
@ -266,11 +262,8 @@ func (p *Buffer) Marshal(pb Message) error {
// Can the object marshal itself?
if m, ok := pb.(Marshaler); ok {
data, err := m.Marshal()
if err != nil {
return err
}
p.buf = append(p.buf, data...)
return nil
return err
}
t, base, err := getbase(pb)
@ -282,7 +275,7 @@ func (p *Buffer) Marshal(pb Message) error {
}
if collectStats {
stats.Encode++
(stats).Encode++ // Parens are to work around a goimports bug.
}
if len(p.buf) > maxMarshalSize {
@ -309,7 +302,7 @@ func Size(pb Message) (n int) {
}
if collectStats {
stats.Size++
(stats).Size++ // Parens are to work around a goimports bug.
}
return
@ -1014,7 +1007,6 @@ func size_slice_struct_message(p *Properties, base structPointer) (n int) {
if p.isMarshaler {
m := structPointer_Interface(structp, p.stype).(Marshaler)
data, _ := m.Marshal()
n += len(p.tagcode)
n += sizeRawBytes(data)
continue
}

View file

@ -54,13 +54,17 @@ Equality is defined in this way:
in a proto3 .proto file, fields are not "set"; specifically,
zero length proto3 "bytes" fields are equal (nil == {}).
- Two repeated fields are equal iff their lengths are the same,
and their corresponding elements are equal (a "bytes" field,
although represented by []byte, is not a repeated field)
and their corresponding elements are equal. Note a "bytes" field,
although represented by []byte, is not a repeated field and the
rule for the scalar fields described above applies.
- Two unset fields are equal.
- Two unknown field sets are equal if their current
encoded state is equal.
- Two extension sets are equal iff they have corresponding
elements that are pairwise equal.
- Two map fields are equal iff their lengths are the same,
and they contain the same set of elements. Zero-length map
fields are equal.
- Every other combination of things are not equal.
The return value is undefined if a and b are not protocol buffers.

View file

@ -1,484 +0,0 @@
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2012 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// +build appengine js
// This file contains an implementation of proto field accesses using package reflect.
// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can
// be used on App Engine.
package proto
import (
"math"
"reflect"
)
// A structPointer is a pointer to a struct.
type structPointer struct {
v reflect.Value
}
// toStructPointer returns a structPointer equivalent to the given reflect value.
// The reflect value must itself be a pointer to a struct.
func toStructPointer(v reflect.Value) structPointer {
return structPointer{v}
}
// IsNil reports whether p is nil.
func structPointer_IsNil(p structPointer) bool {
return p.v.IsNil()
}
// Interface returns the struct pointer as an interface value.
func structPointer_Interface(p structPointer, _ reflect.Type) interface{} {
return p.v.Interface()
}
// A field identifies a field in a struct, accessible from a structPointer.
// In this implementation, a field is identified by the sequence of field indices
// passed to reflect's FieldByIndex.
type field []int
// toField returns a field equivalent to the given reflect field.
func toField(f *reflect.StructField) field {
return f.Index
}
// invalidField is an invalid field identifier.
var invalidField = field(nil)
// IsValid reports whether the field identifier is valid.
func (f field) IsValid() bool { return f != nil }
// field returns the given field in the struct as a reflect value.
func structPointer_field(p structPointer, f field) reflect.Value {
// Special case: an extension map entry with a value of type T
// passes a *T to the struct-handling code with a zero field,
// expecting that it will be treated as equivalent to *struct{ X T },
// which has the same memory layout. We have to handle that case
// specially, because reflect will panic if we call FieldByIndex on a
// non-struct.
if f == nil {
return p.v.Elem()
}
return p.v.Elem().FieldByIndex(f)
}
// ifield returns the given field in the struct as an interface value.
func structPointer_ifield(p structPointer, f field) interface{} {
return structPointer_field(p, f).Addr().Interface()
}
// Bytes returns the address of a []byte field in the struct.
func structPointer_Bytes(p structPointer, f field) *[]byte {
return structPointer_ifield(p, f).(*[]byte)
}
// BytesSlice returns the address of a [][]byte field in the struct.
func structPointer_BytesSlice(p structPointer, f field) *[][]byte {
return structPointer_ifield(p, f).(*[][]byte)
}
// Bool returns the address of a *bool field in the struct.
func structPointer_Bool(p structPointer, f field) **bool {
return structPointer_ifield(p, f).(**bool)
}
// BoolVal returns the address of a bool field in the struct.
func structPointer_BoolVal(p structPointer, f field) *bool {
return structPointer_ifield(p, f).(*bool)
}
// BoolSlice returns the address of a []bool field in the struct.
func structPointer_BoolSlice(p structPointer, f field) *[]bool {
return structPointer_ifield(p, f).(*[]bool)
}
// String returns the address of a *string field in the struct.
func structPointer_String(p structPointer, f field) **string {
return structPointer_ifield(p, f).(**string)
}
// StringVal returns the address of a string field in the struct.
func structPointer_StringVal(p structPointer, f field) *string {
return structPointer_ifield(p, f).(*string)
}
// StringSlice returns the address of a []string field in the struct.
func structPointer_StringSlice(p structPointer, f field) *[]string {
return structPointer_ifield(p, f).(*[]string)
}
// Extensions returns the address of an extension map field in the struct.
func structPointer_Extensions(p structPointer, f field) *XXX_InternalExtensions {
return structPointer_ifield(p, f).(*XXX_InternalExtensions)
}
// ExtMap returns the address of an extension map field in the struct.
func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension {
return structPointer_ifield(p, f).(*map[int32]Extension)
}
// NewAt returns the reflect.Value for a pointer to a field in the struct.
func structPointer_NewAt(p structPointer, f field, typ reflect.Type) reflect.Value {
return structPointer_field(p, f).Addr()
}
// SetStructPointer writes a *struct field in the struct.
func structPointer_SetStructPointer(p structPointer, f field, q structPointer) {
structPointer_field(p, f).Set(q.v)
}
// GetStructPointer reads a *struct field in the struct.
func structPointer_GetStructPointer(p structPointer, f field) structPointer {
return structPointer{structPointer_field(p, f)}
}
// StructPointerSlice the address of a []*struct field in the struct.
func structPointer_StructPointerSlice(p structPointer, f field) structPointerSlice {
return structPointerSlice{structPointer_field(p, f)}
}
// A structPointerSlice represents the address of a slice of pointers to structs
// (themselves messages or groups). That is, v.Type() is *[]*struct{...}.
type structPointerSlice struct {
v reflect.Value
}
func (p structPointerSlice) Len() int { return p.v.Len() }
func (p structPointerSlice) Index(i int) structPointer { return structPointer{p.v.Index(i)} }
func (p structPointerSlice) Append(q structPointer) {
p.v.Set(reflect.Append(p.v, q.v))
}
var (
int32Type = reflect.TypeOf(int32(0))
uint32Type = reflect.TypeOf(uint32(0))
float32Type = reflect.TypeOf(float32(0))
int64Type = reflect.TypeOf(int64(0))
uint64Type = reflect.TypeOf(uint64(0))
float64Type = reflect.TypeOf(float64(0))
)
// A word32 represents a field of type *int32, *uint32, *float32, or *enum.
// That is, v.Type() is *int32, *uint32, *float32, or *enum and v is assignable.
type word32 struct {
v reflect.Value
}
// IsNil reports whether p is nil.
func word32_IsNil(p word32) bool {
return p.v.IsNil()
}
// Set sets p to point at a newly allocated word with bits set to x.
func word32_Set(p word32, o *Buffer, x uint32) {
t := p.v.Type().Elem()
switch t {
case int32Type:
if len(o.int32s) == 0 {
o.int32s = make([]int32, uint32PoolSize)
}
o.int32s[0] = int32(x)
p.v.Set(reflect.ValueOf(&o.int32s[0]))
o.int32s = o.int32s[1:]
return
case uint32Type:
if len(o.uint32s) == 0 {
o.uint32s = make([]uint32, uint32PoolSize)
}
o.uint32s[0] = x
p.v.Set(reflect.ValueOf(&o.uint32s[0]))
o.uint32s = o.uint32s[1:]
return
case float32Type:
if len(o.float32s) == 0 {
o.float32s = make([]float32, uint32PoolSize)
}
o.float32s[0] = math.Float32frombits(x)
p.v.Set(reflect.ValueOf(&o.float32s[0]))
o.float32s = o.float32s[1:]
return
}
// must be enum
p.v.Set(reflect.New(t))
p.v.Elem().SetInt(int64(int32(x)))
}
// Get gets the bits pointed at by p, as a uint32.
func word32_Get(p word32) uint32 {
elem := p.v.Elem()
switch elem.Kind() {
case reflect.Int32:
return uint32(elem.Int())
case reflect.Uint32:
return uint32(elem.Uint())
case reflect.Float32:
return math.Float32bits(float32(elem.Float()))
}
panic("unreachable")
}
// Word32 returns a reference to a *int32, *uint32, *float32, or *enum field in the struct.
func structPointer_Word32(p structPointer, f field) word32 {
return word32{structPointer_field(p, f)}
}
// A word32Val represents a field of type int32, uint32, float32, or enum.
// That is, v.Type() is int32, uint32, float32, or enum and v is assignable.
type word32Val struct {
v reflect.Value
}
// Set sets *p to x.
func word32Val_Set(p word32Val, x uint32) {
switch p.v.Type() {
case int32Type:
p.v.SetInt(int64(x))
return
case uint32Type:
p.v.SetUint(uint64(x))
return
case float32Type:
p.v.SetFloat(float64(math.Float32frombits(x)))
return
}
// must be enum
p.v.SetInt(int64(int32(x)))
}
// Get gets the bits pointed at by p, as a uint32.
func word32Val_Get(p word32Val) uint32 {
elem := p.v
switch elem.Kind() {
case reflect.Int32:
return uint32(elem.Int())
case reflect.Uint32:
return uint32(elem.Uint())
case reflect.Float32:
return math.Float32bits(float32(elem.Float()))
}
panic("unreachable")
}
// Word32Val returns a reference to a int32, uint32, float32, or enum field in the struct.
func structPointer_Word32Val(p structPointer, f field) word32Val {
return word32Val{structPointer_field(p, f)}
}
// A word32Slice is a slice of 32-bit values.
// That is, v.Type() is []int32, []uint32, []float32, or []enum.
type word32Slice struct {
v reflect.Value
}
func (p word32Slice) Append(x uint32) {
n, m := p.v.Len(), p.v.Cap()
if n < m {
p.v.SetLen(n + 1)
} else {
t := p.v.Type().Elem()
p.v.Set(reflect.Append(p.v, reflect.Zero(t)))
}
elem := p.v.Index(n)
switch elem.Kind() {
case reflect.Int32:
elem.SetInt(int64(int32(x)))
case reflect.Uint32:
elem.SetUint(uint64(x))
case reflect.Float32:
elem.SetFloat(float64(math.Float32frombits(x)))
}
}
func (p word32Slice) Len() int {
return p.v.Len()
}
func (p word32Slice) Index(i int) uint32 {
elem := p.v.Index(i)
switch elem.Kind() {
case reflect.Int32:
return uint32(elem.Int())
case reflect.Uint32:
return uint32(elem.Uint())
case reflect.Float32:
return math.Float32bits(float32(elem.Float()))
}
panic("unreachable")
}
// Word32Slice returns a reference to a []int32, []uint32, []float32, or []enum field in the struct.
func structPointer_Word32Slice(p structPointer, f field) word32Slice {
return word32Slice{structPointer_field(p, f)}
}
// word64 is like word32 but for 64-bit values.
type word64 struct {
v reflect.Value
}
func word64_Set(p word64, o *Buffer, x uint64) {
t := p.v.Type().Elem()
switch t {
case int64Type:
if len(o.int64s) == 0 {
o.int64s = make([]int64, uint64PoolSize)
}
o.int64s[0] = int64(x)
p.v.Set(reflect.ValueOf(&o.int64s[0]))
o.int64s = o.int64s[1:]
return
case uint64Type:
if len(o.uint64s) == 0 {
o.uint64s = make([]uint64, uint64PoolSize)
}
o.uint64s[0] = x
p.v.Set(reflect.ValueOf(&o.uint64s[0]))
o.uint64s = o.uint64s[1:]
return
case float64Type:
if len(o.float64s) == 0 {
o.float64s = make([]float64, uint64PoolSize)
}
o.float64s[0] = math.Float64frombits(x)
p.v.Set(reflect.ValueOf(&o.float64s[0]))
o.float64s = o.float64s[1:]
return
}
panic("unreachable")
}
func word64_IsNil(p word64) bool {
return p.v.IsNil()
}
func word64_Get(p word64) uint64 {
elem := p.v.Elem()
switch elem.Kind() {
case reflect.Int64:
return uint64(elem.Int())
case reflect.Uint64:
return elem.Uint()
case reflect.Float64:
return math.Float64bits(elem.Float())
}
panic("unreachable")
}
func structPointer_Word64(p structPointer, f field) word64 {
return word64{structPointer_field(p, f)}
}
// word64Val is like word32Val but for 64-bit values.
type word64Val struct {
v reflect.Value
}
func word64Val_Set(p word64Val, o *Buffer, x uint64) {
switch p.v.Type() {
case int64Type:
p.v.SetInt(int64(x))
return
case uint64Type:
p.v.SetUint(x)
return
case float64Type:
p.v.SetFloat(math.Float64frombits(x))
return
}
panic("unreachable")
}
func word64Val_Get(p word64Val) uint64 {
elem := p.v
switch elem.Kind() {
case reflect.Int64:
return uint64(elem.Int())
case reflect.Uint64:
return elem.Uint()
case reflect.Float64:
return math.Float64bits(elem.Float())
}
panic("unreachable")
}
func structPointer_Word64Val(p structPointer, f field) word64Val {
return word64Val{structPointer_field(p, f)}
}
type word64Slice struct {
v reflect.Value
}
func (p word64Slice) Append(x uint64) {
n, m := p.v.Len(), p.v.Cap()
if n < m {
p.v.SetLen(n + 1)
} else {
t := p.v.Type().Elem()
p.v.Set(reflect.Append(p.v, reflect.Zero(t)))
}
elem := p.v.Index(n)
switch elem.Kind() {
case reflect.Int64:
elem.SetInt(int64(int64(x)))
case reflect.Uint64:
elem.SetUint(uint64(x))
case reflect.Float64:
elem.SetFloat(float64(math.Float64frombits(x)))
}
}
func (p word64Slice) Len() int {
return p.v.Len()
}
func (p word64Slice) Index(i int) uint64 {
elem := p.v.Index(i)
switch elem.Kind() {
case reflect.Int64:
return uint64(elem.Int())
case reflect.Uint64:
return uint64(elem.Uint())
case reflect.Float64:
return math.Float64bits(float64(elem.Float()))
}
panic("unreachable")
}
func structPointer_Word64Slice(p structPointer, f field) word64Slice {
return word64Slice{structPointer_field(p, f)}
}

View file

@ -844,7 +844,15 @@ func RegisterType(x Message, name string) {
}
// MessageName returns the fully-qualified proto name for the given message type.
func MessageName(x Message) string { return revProtoTypes[reflect.TypeOf(x)] }
func MessageName(x Message) string {
type xname interface {
XXX_MessageName() string
}
if m, ok := x.(xname); ok {
return m.XXX_MessageName()
}
return revProtoTypes[reflect.TypeOf(x)]
}
// MessageType returns the message type (pointer to struct) for a named message.
func MessageType(name string) reflect.Type { return protoTypes[name] }

View file

@ -792,12 +792,12 @@ func (p *textParser) readAny(v reflect.Value, props *Properties) error {
fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem()))
return p.readAny(fv.Index(fv.Len()-1), props)
case reflect.Bool:
// Either "true", "false", 1 or 0.
// true/1/t/True or false/f/0/False.
switch tok.value {
case "true", "1":
case "true", "1", "t", "True":
fv.SetBool(true)
return nil
case "false", "0":
case "false", "0", "f", "False":
fv.SetBool(false)
return nil
}

View file

@ -4,12 +4,12 @@ Consul API client
This package provides the `api` package which attempts to
provide programmatic access to the full Consul API.
Currently, all of the Consul APIs included in version 0.3 are supported.
Currently, all of the Consul APIs included in version 0.6.0 are supported.
Documentation
=============
The full documentation is available on [Godoc](http://godoc.org/github.com/hashicorp/consul/api)
The full documentation is available on [Godoc](https://godoc.org/github.com/hashicorp/consul/api)
Usage
=====
@ -17,13 +17,18 @@ Usage
Below is an example of using the Consul client:
```go
// Get a new client, with KV endpoints
client, _ := api.NewClient(api.DefaultConfig())
// Get a new client
client, err := api.NewClient(api.DefaultConfig())
if err != nil {
panic(err)
}
// Get a handle to the KV API
kv := client.KV()
// PUT a new KV pair
p := &api.KVPair{Key: "foo", Value: []byte("test")}
_, err := kv.Put(p, nil)
_, err = kv.Put(p, nil)
if err != nil {
panic(err)
}
@ -36,4 +41,3 @@ if err != nil {
fmt.Printf("KV: %v", pair)
```

View file

@ -18,11 +18,12 @@ type AgentCheck struct {
// AgentService represents a service known to the agent
type AgentService struct {
ID string
Service string
Tags []string
Port int
Address string
ID string
Service string
Tags []string
Port int
Address string
EnableTagOverride bool
}
// AgentMember represents a cluster member known to the agent
@ -42,13 +43,14 @@ type AgentMember struct {
// AgentServiceRegistration is used to register a new service
type AgentServiceRegistration struct {
ID string `json:",omitempty"`
Name string `json:",omitempty"`
Tags []string `json:",omitempty"`
Port int `json:",omitempty"`
Address string `json:",omitempty"`
Check *AgentServiceCheck
Checks AgentServiceChecks
ID string `json:",omitempty"`
Name string `json:",omitempty"`
Tags []string `json:",omitempty"`
Port int `json:",omitempty"`
Address string `json:",omitempty"`
EnableTagOverride bool `json:",omitempty"`
Check *AgentServiceCheck
Checks AgentServiceChecks
}
// AgentCheckRegistration is used to register a new check
@ -60,16 +62,25 @@ type AgentCheckRegistration struct {
AgentServiceCheck
}
// AgentServiceCheck is used to create an associated
// check for a service
// AgentServiceCheck is used to define a node or service level check
type AgentServiceCheck struct {
Script string `json:",omitempty"`
Interval string `json:",omitempty"`
Timeout string `json:",omitempty"`
TTL string `json:",omitempty"`
HTTP string `json:",omitempty"`
TCP string `json:",omitempty"`
Status string `json:",omitempty"`
Script string `json:",omitempty"`
DockerContainerID string `json:",omitempty"`
Shell string `json:",omitempty"` // Only supported for Docker.
Interval string `json:",omitempty"`
Timeout string `json:",omitempty"`
TTL string `json:",omitempty"`
HTTP string `json:",omitempty"`
TCP string `json:",omitempty"`
Status string `json:",omitempty"`
// In Consul 0.7 and later, checks that are associated with a service
// may also contain this optional DeregisterCriticalServiceAfter field,
// which is a timeout in the same Go time format as Interval and TTL. If
// a check is in the critical state for more than this configured value,
// then its associated service (and all of its associated checks) will
// automatically be deregistered.
DeregisterCriticalServiceAfter string `json:",omitempty"`
}
type AgentServiceChecks []*AgentServiceCheck
@ -194,23 +205,43 @@ func (a *Agent) ServiceDeregister(serviceID string) error {
return nil
}
// PassTTL is used to set a TTL check to the passing state
// PassTTL is used to set a TTL check to the passing state.
//
// DEPRECATION NOTICE: This interface is deprecated in favor of UpdateTTL().
// The client interface will be removed in 0.8 or changed to use
// UpdateTTL()'s endpoint and the server endpoints will be removed in 0.9.
func (a *Agent) PassTTL(checkID, note string) error {
return a.UpdateTTL(checkID, note, "pass")
return a.updateTTL(checkID, note, "pass")
}
// WarnTTL is used to set a TTL check to the warning state
// WarnTTL is used to set a TTL check to the warning state.
//
// DEPRECATION NOTICE: This interface is deprecated in favor of UpdateTTL().
// The client interface will be removed in 0.8 or changed to use
// UpdateTTL()'s endpoint and the server endpoints will be removed in 0.9.
func (a *Agent) WarnTTL(checkID, note string) error {
return a.UpdateTTL(checkID, note, "warn")
return a.updateTTL(checkID, note, "warn")
}
// FailTTL is used to set a TTL check to the failing state
// FailTTL is used to set a TTL check to the failing state.
//
// DEPRECATION NOTICE: This interface is deprecated in favor of UpdateTTL().
// The client interface will be removed in 0.8 or changed to use
// UpdateTTL()'s endpoint and the server endpoints will be removed in 0.9.
func (a *Agent) FailTTL(checkID, note string) error {
return a.UpdateTTL(checkID, note, "fail")
return a.updateTTL(checkID, note, "fail")
}
// UpdateTTL is used to update the TTL of a check
func (a *Agent) UpdateTTL(checkID, note, status string) error {
// updateTTL is used to update the TTL of a check. This is the internal
// method that uses the old API that's present in Consul versions prior to
// 0.6.4. Since Consul didn't have an analogous "update" API before it seemed
// ok to break this (former) UpdateTTL in favor of the new UpdateTTL below,
// but keep the old Pass/Warn/Fail methods using the old API under the hood.
//
// DEPRECATION NOTICE: This interface is deprecated in favor of UpdateTTL().
// The client interface will be removed in 0.8 and the server endpoints will
// be removed in 0.9.
func (a *Agent) updateTTL(checkID, note, status string) error {
switch status {
case "pass":
case "warn":
@ -229,6 +260,51 @@ func (a *Agent) UpdateTTL(checkID, note, status string) error {
return nil
}
// checkUpdate is the payload for a PUT for a check update.
type checkUpdate struct {
// Status is one of the api.Health* states: HealthPassing
// ("passing"), HealthWarning ("warning"), or HealthCritical
// ("critical").
Status string
// Output is the information to post to the UI for operators as the
// output of the process that decided to hit the TTL check. This is
// different from the note field that's associated with the check
// itself.
Output string
}
// UpdateTTL is used to update the TTL of a check. This uses the newer API
// that was introduced in Consul 0.6.4 and later. We translate the old status
// strings for compatibility (though a newer version of Consul will still be
// required to use this API).
func (a *Agent) UpdateTTL(checkID, output, status string) error {
switch status {
case "pass", HealthPassing:
status = HealthPassing
case "warn", HealthWarning:
status = HealthWarning
case "fail", HealthCritical:
status = HealthCritical
default:
return fmt.Errorf("Invalid status: %s", status)
}
endpoint := fmt.Sprintf("/v1/agent/check/update/%s", checkID)
r := a.c.newRequest("PUT", endpoint)
r.obj = &checkUpdate{
Status: status,
Output: output,
}
_, resp, err := requireOK(a.c.doRequest(r))
if err != nil {
return err
}
resp.Body.Close()
return nil
}
// CheckRegister is used to register a new check with
// the local agent
func (a *Agent) CheckRegister(check *AgentCheckRegistration) error {

View file

@ -3,9 +3,11 @@ package api
import (
"bytes"
"crypto/tls"
"crypto/x509"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"log"
"net"
"net/http"
@ -14,6 +16,8 @@ import (
"strconv"
"strings"
"time"
"github.com/hashicorp/go-cleanhttp"
)
// QueryOptions are used to parameterize a query
@ -36,12 +40,18 @@ type QueryOptions struct {
WaitIndex uint64
// WaitTime is used to bound the duration of a wait.
// Defaults to that of the Config, but can be overriden.
// Defaults to that of the Config, but can be overridden.
WaitTime time.Duration
// Token is used to provide a per-request ACL token
// which overrides the agent's default token.
Token string
// Near is used to provide a node name that will sort the results
// in ascending order based on the estimated round trip time from
// that node. Setting this to "_agent" will use the agent's node
// for the sort.
Near string
}
// WriteOptions are used to parameterize a write
@ -70,6 +80,9 @@ type QueryMeta struct {
// How long did the request take
RequestTime time.Duration
// Is address translation enabled for HTTP responses on this agent
AddressTranslationEnabled bool
}
// WriteMeta is used to return meta data about a write
@ -114,12 +127,58 @@ type Config struct {
Token string
}
// DefaultConfig returns a default configuration for the client
// TLSConfig is used to generate a TLSClientConfig that's useful for talking to
// Consul using TLS.
type TLSConfig struct {
// Address is the optional address of the Consul server. The port, if any
// will be removed from here and this will be set to the ServerName of the
// resulting config.
Address string
// CAFile is the optional path to the CA certificate used for Consul
// communication, defaults to the system bundle if not specified.
CAFile string
// CertFile is the optional path to the certificate for Consul
// communication. If this is set then you need to also set KeyFile.
CertFile string
// KeyFile is the optional path to the private key for Consul communication.
// If this is set then you need to also set CertFile.
KeyFile string
// InsecureSkipVerify if set to true will disable TLS host verification.
InsecureSkipVerify bool
}
// DefaultConfig returns a default configuration for the client. By default this
// will pool and reuse idle connections to Consul. If you have a long-lived
// client object, this is the desired behavior and should make the most efficient
// use of the connections to Consul. If you don't reuse a client object , which
// is not recommended, then you may notice idle connections building up over
// time. To avoid this, use the DefaultNonPooledConfig() instead.
func DefaultConfig() *Config {
return defaultConfig(cleanhttp.DefaultPooledTransport)
}
// DefaultNonPooledConfig returns a default configuration for the client which
// does not pool connections. This isn't a recommended configuration because it
// will reconnect to Consul on every request, but this is useful to avoid the
// accumulation of idle connections if you make many client objects during the
// lifetime of your application.
func DefaultNonPooledConfig() *Config {
return defaultConfig(cleanhttp.DefaultTransport)
}
// defaultConfig returns the default configuration for the client, using the
// given function to make the transport.
func defaultConfig(transportFn func() *http.Transport) *Config {
config := &Config{
Address: "127.0.0.1:8500",
Scheme: "http",
HttpClient: http.DefaultClient,
Address: "127.0.0.1:8500",
Scheme: "http",
HttpClient: &http.Client{
Transport: transportFn(),
},
}
if addr := os.Getenv("CONSUL_HTTP_ADDR"); addr != "" {
@ -164,17 +223,70 @@ func DefaultConfig() *Config {
}
if !doVerify {
config.HttpClient.Transport = &http.Transport{
TLSClientConfig: &tls.Config{
InsecureSkipVerify: true,
},
tlsClientConfig, err := SetupTLSConfig(&TLSConfig{
InsecureSkipVerify: true,
})
// We don't expect this to fail given that we aren't
// parsing any of the input, but we panic just in case
// since this doesn't have an error return.
if err != nil {
panic(err)
}
transport := transportFn()
transport.TLSClientConfig = tlsClientConfig
config.HttpClient.Transport = transport
}
}
return config
}
// TLSConfig is used to generate a TLSClientConfig that's useful for talking to
// Consul using TLS.
func SetupTLSConfig(tlsConfig *TLSConfig) (*tls.Config, error) {
tlsClientConfig := &tls.Config{
InsecureSkipVerify: tlsConfig.InsecureSkipVerify,
}
if tlsConfig.Address != "" {
server := tlsConfig.Address
hasPort := strings.LastIndex(server, ":") > strings.LastIndex(server, "]")
if hasPort {
var err error
server, _, err = net.SplitHostPort(server)
if err != nil {
return nil, err
}
}
tlsClientConfig.ServerName = server
}
if tlsConfig.CertFile != "" && tlsConfig.KeyFile != "" {
tlsCert, err := tls.LoadX509KeyPair(tlsConfig.CertFile, tlsConfig.KeyFile)
if err != nil {
return nil, err
}
tlsClientConfig.Certificates = []tls.Certificate{tlsCert}
}
if tlsConfig.CAFile != "" {
data, err := ioutil.ReadFile(tlsConfig.CAFile)
if err != nil {
return nil, fmt.Errorf("failed to read CA file: %v", err)
}
caPool := x509.NewCertPool()
if !caPool.AppendCertsFromPEM(data) {
return nil, fmt.Errorf("failed to parse CA certificate")
}
tlsClientConfig.RootCAs = caPool
}
return tlsClientConfig, nil
}
// Client provides a client to the Consul API
type Client struct {
config Config
@ -198,12 +310,12 @@ func NewClient(config *Config) (*Client, error) {
}
if parts := strings.SplitN(config.Address, "unix://", 2); len(parts) == 2 {
trans := cleanhttp.DefaultTransport()
trans.Dial = func(_, _ string) (net.Conn, error) {
return net.Dial("unix", parts[1])
}
config.HttpClient = &http.Client{
Transport: &http.Transport{
Dial: func(_, _ string) (net.Conn, error) {
return net.Dial("unix", parts[1])
},
},
Transport: trans,
}
config.Address = parts[1]
}
@ -221,6 +333,7 @@ type request struct {
url *url.URL
params url.Values
body io.Reader
header http.Header
obj interface{}
}
@ -246,13 +359,38 @@ func (r *request) setQueryOptions(q *QueryOptions) {
r.params.Set("wait", durToMsec(q.WaitTime))
}
if q.Token != "" {
r.params.Set("token", q.Token)
r.header.Set("X-Consul-Token", q.Token)
}
if q.Near != "" {
r.params.Set("near", q.Near)
}
}
// durToMsec converts a duration to a millisecond specified string
// durToMsec converts a duration to a millisecond specified string. If the
// user selected a positive value that rounds to 0 ms, then we will use 1 ms
// so they get a short delay, otherwise Consul will translate the 0 ms into
// a huge default delay.
func durToMsec(dur time.Duration) string {
return fmt.Sprintf("%dms", dur/time.Millisecond)
ms := dur / time.Millisecond
if dur > 0 && ms == 0 {
ms = 1
}
return fmt.Sprintf("%dms", ms)
}
// serverError is a string we look for to detect 500 errors.
const serverError = "Unexpected response code: 500"
// IsServerError returns true for 500 errors from the Consul servers, these are
// usually retryable at a later time.
func IsServerError(err error) bool {
if err == nil {
return false
}
// TODO (slackpad) - Make a real error type here instead of using
// a string check.
return strings.Contains(err.Error(), serverError)
}
// setWriteOptions is used to annotate the request with
@ -265,7 +403,7 @@ func (r *request) setWriteOptions(q *WriteOptions) {
r.params.Set("dc", q.Datacenter)
}
if q.Token != "" {
r.params.Set("token", q.Token)
r.header.Set("X-Consul-Token", q.Token)
}
}
@ -292,6 +430,7 @@ func (r *request) toHTTP() (*http.Request, error) {
req.URL.Host = r.url.Host
req.URL.Scheme = r.url.Scheme
req.Host = r.url.Host
req.Header = r.header
// Setup auth
if r.config.HttpAuth != nil {
@ -312,6 +451,7 @@ func (c *Client) newRequest(method, path string) *request {
Path: path,
},
params: make(map[string][]string),
header: make(http.Header),
}
if c.config.Datacenter != "" {
r.params.Set("dc", c.config.Datacenter)
@ -320,7 +460,7 @@ func (c *Client) newRequest(method, path string) *request {
r.params.Set("wait", durToMsec(r.config.WaitTime))
}
if c.config.Token != "" {
r.params.Set("token", r.config.Token)
r.header.Set("X-Consul-Token", r.config.Token)
}
return r
}
@ -405,6 +545,15 @@ func parseQueryMeta(resp *http.Response, q *QueryMeta) error {
default:
q.KnownLeader = false
}
// Parse X-Consul-Translate-Addresses
switch header.Get("X-Consul-Translate-Addresses") {
case "true":
q.AddressTranslationEnabled = true
default:
q.AddressTranslationEnabled = false
}
return nil
}

View file

@ -1,18 +1,21 @@
package api
type Node struct {
Node string
Address string
Node string
Address string
TaggedAddresses map[string]string
}
type CatalogService struct {
Node string
Address string
ServiceID string
ServiceName string
ServiceAddress string
ServiceTags []string
ServicePort int
Node string
Address string
TaggedAddresses map[string]string
ServiceID string
ServiceName string
ServiceAddress string
ServiceTags []string
ServicePort int
ServiceEnableTagOverride bool
}
type CatalogNode struct {
@ -21,11 +24,12 @@ type CatalogNode struct {
}
type CatalogRegistration struct {
Node string
Address string
Datacenter string
Service *AgentService
Check *AgentCheck
Node string
Address string
TaggedAddresses map[string]string
Datacenter string
Service *AgentService
Check *AgentCheck
}
type CatalogDeregistration struct {

66
vendor/github.com/hashicorp/consul/api/coordinate.go generated vendored Normal file
View file

@ -0,0 +1,66 @@
package api
import (
"github.com/hashicorp/serf/coordinate"
)
// CoordinateEntry represents a node and its associated network coordinate.
type CoordinateEntry struct {
Node string
Coord *coordinate.Coordinate
}
// CoordinateDatacenterMap represents a datacenter and its associated WAN
// nodes and their associates coordinates.
type CoordinateDatacenterMap struct {
Datacenter string
Coordinates []CoordinateEntry
}
// Coordinate can be used to query the coordinate endpoints
type Coordinate struct {
c *Client
}
// Coordinate returns a handle to the coordinate endpoints
func (c *Client) Coordinate() *Coordinate {
return &Coordinate{c}
}
// Datacenters is used to return the coordinates of all the servers in the WAN
// pool.
func (c *Coordinate) Datacenters() ([]*CoordinateDatacenterMap, error) {
r := c.c.newRequest("GET", "/v1/coordinate/datacenters")
_, resp, err := requireOK(c.c.doRequest(r))
if err != nil {
return nil, err
}
defer resp.Body.Close()
var out []*CoordinateDatacenterMap
if err := decodeBody(resp, &out); err != nil {
return nil, err
}
return out, nil
}
// Nodes is used to return the coordinates of all the nodes in the LAN pool.
func (c *Coordinate) Nodes(q *QueryOptions) ([]*CoordinateEntry, *QueryMeta, error) {
r := c.c.newRequest("GET", "/v1/coordinate/nodes")
r.setQueryOptions(q)
rtt, resp, err := requireOK(c.c.doRequest(r))
if err != nil {
return nil, nil, err
}
defer resp.Body.Close()
qm := &QueryMeta{}
parseQueryMeta(resp, qm)
qm.RequestTime = rtt
var out []*CoordinateEntry
if err := decodeBody(resp, &out); err != nil {
return nil, nil, err
}
return out, qm, nil
}

View file

@ -4,6 +4,15 @@ import (
"fmt"
)
const (
// HealthAny is special, and is used as a wild card,
// not as a specific state.
HealthAny = "any"
HealthPassing = "passing"
HealthWarning = "warning"
HealthCritical = "critical"
)
// HealthCheck is used to represent a single check
type HealthCheck struct {
Node string
@ -85,7 +94,7 @@ func (h *Health) Service(service, tag string, passingOnly bool, q *QueryOptions)
r.params.Set("tag", tag)
}
if passingOnly {
r.params.Set("passing", "1")
r.params.Set(HealthPassing, "1")
}
rtt, resp, err := requireOK(h.c.doRequest(r))
if err != nil {
@ -104,15 +113,14 @@ func (h *Health) Service(service, tag string, passingOnly bool, q *QueryOptions)
return out, qm, nil
}
// State is used to retreive all the checks in a given state.
// State is used to retrieve all the checks in a given state.
// The wildcard "any" state can also be used for all checks.
func (h *Health) State(state string, q *QueryOptions) ([]*HealthCheck, *QueryMeta, error) {
switch state {
case "any":
case "warning":
case "critical":
case "passing":
case "unknown":
case HealthAny:
case HealthWarning:
case HealthCritical:
case HealthPassing:
default:
return nil, nil, fmt.Errorf("Unsupported state: %v", state)
}

View file

@ -11,18 +11,77 @@ import (
// KVPair is used to represent a single K/V entry
type KVPair struct {
Key string
// Key is the name of the key. It is also part of the URL path when accessed
// via the API.
Key string
// CreateIndex holds the index corresponding the creation of this KVPair. This
// is a read-only field.
CreateIndex uint64
// ModifyIndex is used for the Check-And-Set operations and can also be fed
// back into the WaitIndex of the QueryOptions in order to perform blocking
// queries.
ModifyIndex uint64
LockIndex uint64
Flags uint64
Value []byte
Session string
// LockIndex holds the index corresponding to a lock on this key, if any. This
// is a read-only field.
LockIndex uint64
// Flags are any user-defined flags on the key. It is up to the implementer
// to check these values, since Consul does not treat them specially.
Flags uint64
// Value is the value for the key. This can be any value, but it will be
// base64 encoded upon transport.
Value []byte
// Session is a string representing the ID of the session. Any other
// interactions with this key over the same session must specify the same
// session ID.
Session string
}
// KVPairs is a list of KVPair objects
type KVPairs []*KVPair
// KVOp constants give possible operations available in a KVTxn.
type KVOp string
const (
KVSet KVOp = "set"
KVDelete = "delete"
KVDeleteCAS = "delete-cas"
KVDeleteTree = "delete-tree"
KVCAS = "cas"
KVLock = "lock"
KVUnlock = "unlock"
KVGet = "get"
KVGetTree = "get-tree"
KVCheckSession = "check-session"
KVCheckIndex = "check-index"
)
// KVTxnOp defines a single operation inside a transaction.
type KVTxnOp struct {
Verb string
Key string
Value []byte
Flags uint64
Index uint64
Session string
}
// KVTxnOps defines a set of operations to be performed inside a single
// transaction.
type KVTxnOps []*KVTxnOp
// KVTxnResponse has the outcome of a transaction.
type KVTxnResponse struct {
Results []*KVPair
Errors TxnErrors
}
// KV is used to manipulate the K/V API
type KV struct {
c *Client
@ -33,7 +92,8 @@ func (c *Client) KV() *KV {
return &KV{c}
}
// Get is used to lookup a single key
// Get is used to lookup a single key. The returned pointer
// to the KVPair will be nil if the key does not exist.
func (k *KV) Get(key string, q *QueryOptions) (*KVPair, *QueryMeta, error) {
resp, qm, err := k.getInternal(key, nil, q)
if err != nil {
@ -143,7 +203,7 @@ func (k *KV) CAS(p *KVPair, q *WriteOptions) (bool, *WriteMeta, error) {
return k.put(p.Key, params, p.Value, q)
}
// Acquire is used for a lock acquisiiton operation. The Key,
// Acquire is used for a lock acquisition operation. The Key,
// Flags, Value and Session are respected. Returns true
// on success or false on failures.
func (k *KV) Acquire(p *KVPair, q *WriteOptions) (bool, *WriteMeta, error) {
@ -238,3 +298,122 @@ func (k *KV) deleteInternal(key string, params map[string]string, q *WriteOption
res := strings.Contains(string(buf.Bytes()), "true")
return res, qm, nil
}
// TxnOp is the internal format we send to Consul. It's not specific to KV,
// though currently only KV operations are supported.
type TxnOp struct {
KV *KVTxnOp
}
// TxnOps is a list of transaction operations.
type TxnOps []*TxnOp
// TxnResult is the internal format we receive from Consul.
type TxnResult struct {
KV *KVPair
}
// TxnResults is a list of TxnResult objects.
type TxnResults []*TxnResult
// TxnError is used to return information about an operation in a transaction.
type TxnError struct {
OpIndex int
What string
}
// TxnErrors is a list of TxnError objects.
type TxnErrors []*TxnError
// TxnResponse is the internal format we receive from Consul.
type TxnResponse struct {
Results TxnResults
Errors TxnErrors
}
// Txn is used to apply multiple KV operations in a single, atomic transaction.
//
// Note that Go will perform the required base64 encoding on the values
// automatically because the type is a byte slice. Transactions are defined as a
// list of operations to perform, using the KVOp constants and KVTxnOp structure
// to define operations. If any operation fails, none of the changes are applied
// to the state store. Note that this hides the internal raw transaction interface
// and munges the input and output types into KV-specific ones for ease of use.
// If there are more non-KV operations in the future we may break out a new
// transaction API client, but it will be easy to keep this KV-specific variant
// supported.
//
// Even though this is generally a write operation, we take a QueryOptions input
// and return a QueryMeta output. If the transaction contains only read ops, then
// Consul will fast-path it to a different endpoint internally which supports
// consistency controls, but not blocking. If there are write operations then
// the request will always be routed through raft and any consistency settings
// will be ignored.
//
// Here's an example:
//
// ops := KVTxnOps{
// &KVTxnOp{
// Verb: KVLock,
// Key: "test/lock",
// Session: "adf4238a-882b-9ddc-4a9d-5b6758e4159e",
// Value: []byte("hello"),
// },
// &KVTxnOp{
// Verb: KVGet,
// Key: "another/key",
// },
// }
// ok, response, _, err := kv.Txn(&ops, nil)
//
// If there is a problem making the transaction request then an error will be
// returned. Otherwise, the ok value will be true if the transaction succeeded
// or false if it was rolled back. The response is a structured return value which
// will have the outcome of the transaction. Its Results member will have entries
// for each operation. Deleted keys will have a nil entry in the, and to save
// space, the Value of each key in the Results will be nil unless the operation
// is a KVGet. If the transaction was rolled back, the Errors member will have
// entries referencing the index of the operation that failed along with an error
// message.
func (k *KV) Txn(txn KVTxnOps, q *QueryOptions) (bool, *KVTxnResponse, *QueryMeta, error) {
r := k.c.newRequest("PUT", "/v1/txn")
r.setQueryOptions(q)
// Convert into the internal format since this is an all-KV txn.
ops := make(TxnOps, 0, len(txn))
for _, kvOp := range txn {
ops = append(ops, &TxnOp{KV: kvOp})
}
r.obj = ops
rtt, resp, err := k.c.doRequest(r)
if err != nil {
return false, nil, nil, err
}
defer resp.Body.Close()
qm := &QueryMeta{}
parseQueryMeta(resp, qm)
qm.RequestTime = rtt
if resp.StatusCode == http.StatusOK || resp.StatusCode == http.StatusConflict {
var txnResp TxnResponse
if err := decodeBody(resp, &txnResp); err != nil {
return false, nil, nil, err
}
// Convert from the internal format.
kvResp := KVTxnResponse{
Errors: txnResp.Errors,
}
for _, result := range txnResp.Results {
kvResp.Results = append(kvResp.Results, result.KV)
}
return resp.StatusCode == http.StatusOK, &kvResp, qm, nil
}
var buf bytes.Buffer
if _, err := io.Copy(&buf, resp.Body); err != nil {
return false, nil, nil, fmt.Errorf("Failed to read response: %v", err)
}
return false, nil, nil, fmt.Errorf("Failed request: %s", buf.String())
}

View file

@ -22,9 +22,16 @@ const (
// DefaultLockRetryTime is how long we wait after a failed lock acquisition
// before attempting to do the lock again. This is so that once a lock-delay
// is in affect, we do not hot loop retrying the acquisition.
// is in effect, we do not hot loop retrying the acquisition.
DefaultLockRetryTime = 5 * time.Second
// DefaultMonitorRetryTime is how long we wait after a failed monitor check
// of a lock (500 response code). This allows the monitor to ride out brief
// periods of unavailability, subject to the MonitorRetries setting in the
// lock options which is by default set to 0, disabling this feature. This
// affects locks and semaphores.
DefaultMonitorRetryTime = 2 * time.Second
// LockFlagValue is a magic flag we set to indicate a key
// is being used for a lock. It is used to detect a potential
// conflict with a semaphore.
@ -49,7 +56,7 @@ var (
)
// Lock is used to implement client-side leader election. It is follows the
// algorithm as described here: https://consul.io/docs/guides/leader-election.html.
// algorithm as described here: https://www.consul.io/docs/guides/leader-election.html.
type Lock struct {
c *Client
opts *LockOptions
@ -62,11 +69,16 @@ type Lock struct {
// LockOptions is used to parameterize the Lock behavior.
type LockOptions struct {
Key string // Must be set and have write permissions
Value []byte // Optional, value to associate with the lock
Session string // Optional, created if not specified
SessionName string // Optional, defaults to DefaultLockSessionName
SessionTTL string // Optional, defaults to DefaultLockSessionTTL
Key string // Must be set and have write permissions
Value []byte // Optional, value to associate with the lock
Session string // Optional, created if not specified
SessionOpts *SessionEntry // Optional, options to use when creating a session
SessionName string // Optional, defaults to DefaultLockSessionName (ignored if SessionOpts is given)
SessionTTL string // Optional, defaults to DefaultLockSessionTTL (ignored if SessionOpts is given)
MonitorRetries int // Optional, defaults to 0 which means no retries
MonitorRetryTime time.Duration // Optional, defaults to DefaultMonitorRetryTime
LockWaitTime time.Duration // Optional, defaults to DefaultLockWaitTime
LockTryOnce bool // Optional, defaults to false which means try forever
}
// LockKey returns a handle to a lock struct which can be used
@ -96,6 +108,12 @@ func (c *Client) LockOpts(opts *LockOptions) (*Lock, error) {
return nil, fmt.Errorf("invalid SessionTTL: %v", err)
}
}
if opts.MonitorRetryTime == 0 {
opts.MonitorRetryTime = DefaultMonitorRetryTime
}
if opts.LockWaitTime == 0 {
opts.LockWaitTime = DefaultLockWaitTime
}
l := &Lock{
c: c,
opts: opts,
@ -146,9 +164,11 @@ func (l *Lock) Lock(stopCh <-chan struct{}) (<-chan struct{}, error) {
// Setup the query options
kv := l.c.KV()
qOpts := &QueryOptions{
WaitTime: DefaultLockWaitTime,
WaitTime: l.opts.LockWaitTime,
}
start := time.Now()
attempts := 0
WAIT:
// Check if we should quit
select {
@ -157,6 +177,17 @@ WAIT:
default:
}
// Handle the one-shot mode.
if l.opts.LockTryOnce && attempts > 0 {
elapsed := time.Now().Sub(start)
if elapsed > qOpts.WaitTime {
return nil, nil
}
qOpts.WaitTime -= elapsed
}
attempts++
// Look for an existing lock, blocking until not taken
pair, meta, err := kv.Get(l.opts.Key, qOpts)
if err != nil {
@ -299,9 +330,12 @@ func (l *Lock) Destroy() error {
// createSession is used to create a new managed session
func (l *Lock) createSession() (string, error) {
session := l.c.Session()
se := &SessionEntry{
Name: l.opts.SessionName,
TTL: l.opts.SessionTTL,
se := l.opts.SessionOpts
if se == nil {
se = &SessionEntry{
Name: l.opts.SessionName,
TTL: l.opts.SessionTTL,
}
}
id, _, err := session.Create(se, nil)
if err != nil {
@ -327,8 +361,20 @@ func (l *Lock) monitorLock(session string, stopCh chan struct{}) {
kv := l.c.KV()
opts := &QueryOptions{RequireConsistent: true}
WAIT:
retries := l.opts.MonitorRetries
RETRY:
pair, meta, err := kv.Get(l.opts.Key, opts)
if err != nil {
// If configured we can try to ride out a brief Consul unavailability
// by doing retries. Note that we have to attempt the retry in a non-
// blocking fashion so that we have a clean place to reset the retry
// counter if service is restored.
if retries > 0 && IsServerError(err) {
time.Sleep(l.opts.MonitorRetryTime)
retries--
opts.WaitIndex = 0
goto RETRY
}
return
}
if pair != nil && pair.Session == session {

81
vendor/github.com/hashicorp/consul/api/operator.go generated vendored Normal file
View file

@ -0,0 +1,81 @@
package api
// Operator can be used to perform low-level operator tasks for Consul.
type Operator struct {
c *Client
}
// Operator returns a handle to the operator endpoints.
func (c *Client) Operator() *Operator {
return &Operator{c}
}
// RaftServer has information about a server in the Raft configuration.
type RaftServer struct {
// ID is the unique ID for the server. These are currently the same
// as the address, but they will be changed to a real GUID in a future
// release of Consul.
ID string
// Node is the node name of the server, as known by Consul, or this
// will be set to "(unknown)" otherwise.
Node string
// Address is the IP:port of the server, used for Raft communications.
Address string
// Leader is true if this server is the current cluster leader.
Leader bool
// Voter is true if this server has a vote in the cluster. This might
// be false if the server is staging and still coming online, or if
// it's a non-voting server, which will be added in a future release of
// Consul.
Voter bool
}
// RaftConfigration is returned when querying for the current Raft configuration.
type RaftConfiguration struct {
// Servers has the list of servers in the Raft configuration.
Servers []*RaftServer
// Index has the Raft index of this configuration.
Index uint64
}
// RaftGetConfiguration is used to query the current Raft peer set.
func (op *Operator) RaftGetConfiguration(q *QueryOptions) (*RaftConfiguration, error) {
r := op.c.newRequest("GET", "/v1/operator/raft/configuration")
r.setQueryOptions(q)
_, resp, err := requireOK(op.c.doRequest(r))
if err != nil {
return nil, err
}
defer resp.Body.Close()
var out RaftConfiguration
if err := decodeBody(resp, &out); err != nil {
return nil, err
}
return &out, nil
}
// RaftRemovePeerByAddress is used to kick a stale peer (one that it in the Raft
// quorum but no longer known to Serf or the catalog) by address in the form of
// "IP:port".
func (op *Operator) RaftRemovePeerByAddress(address string, q *WriteOptions) error {
r := op.c.newRequest("DELETE", "/v1/operator/raft/peer")
r.setWriteOptions(q)
// TODO (slackpad) Currently we made address a query parameter. Once
// IDs are in place this will be DELETE /v1/operator/raft/peer/<id>.
r.params.Set("address", string(address))
_, resp, err := requireOK(op.c.doRequest(r))
if err != nil {
return err
}
resp.Body.Close()
return nil
}

View file

@ -0,0 +1,194 @@
package api
// QueryDatacenterOptions sets options about how we fail over if there are no
// healthy nodes in the local datacenter.
type QueryDatacenterOptions struct {
// NearestN is set to the number of remote datacenters to try, based on
// network coordinates.
NearestN int
// Datacenters is a fixed list of datacenters to try after NearestN. We
// never try a datacenter multiple times, so those are subtracted from
// this list before proceeding.
Datacenters []string
}
// QueryDNSOptions controls settings when query results are served over DNS.
type QueryDNSOptions struct {
// TTL is the time to live for the served DNS results.
TTL string
}
// ServiceQuery is used to query for a set of healthy nodes offering a specific
// service.
type ServiceQuery struct {
// Service is the service to query.
Service string
// Near allows baking in the name of a node to automatically distance-
// sort from. The magic "_agent" value is supported, which sorts near
// the agent which initiated the request by default.
Near string
// Failover controls what we do if there are no healthy nodes in the
// local datacenter.
Failover QueryDatacenterOptions
// If OnlyPassing is true then we will only include nodes with passing
// health checks (critical AND warning checks will cause a node to be
// discarded)
OnlyPassing bool
// Tags are a set of required and/or disallowed tags. If a tag is in
// this list it must be present. If the tag is preceded with "!" then
// it is disallowed.
Tags []string
}
// QueryTemplate carries the arguments for creating a templated query.
type QueryTemplate struct {
// Type specifies the type of the query template. Currently only
// "name_prefix_match" is supported. This field is required.
Type string
// Regexp allows specifying a regex pattern to match against the name
// of the query being executed.
Regexp string
}
// PrepatedQueryDefinition defines a complete prepared query.
type PreparedQueryDefinition struct {
// ID is this UUID-based ID for the query, always generated by Consul.
ID string
// Name is an optional friendly name for the query supplied by the
// user. NOTE - if this feature is used then it will reduce the security
// of any read ACL associated with this query/service since this name
// can be used to locate nodes with supplying any ACL.
Name string
// Session is an optional session to tie this query's lifetime to. If
// this is omitted then the query will not expire.
Session string
// Token is the ACL token used when the query was created, and it is
// used when a query is subsequently executed. This token, or a token
// with management privileges, must be used to change the query later.
Token string
// Service defines a service query (leaving things open for other types
// later).
Service ServiceQuery
// DNS has options that control how the results of this query are
// served over DNS.
DNS QueryDNSOptions
// Template is used to pass through the arguments for creating a
// prepared query with an attached template. If a template is given,
// interpolations are possible in other struct fields.
Template QueryTemplate
}
// PreparedQueryExecuteResponse has the results of executing a query.
type PreparedQueryExecuteResponse struct {
// Service is the service that was queried.
Service string
// Nodes has the nodes that were output by the query.
Nodes []ServiceEntry
// DNS has the options for serving these results over DNS.
DNS QueryDNSOptions
// Datacenter is the datacenter that these results came from.
Datacenter string
// Failovers is a count of how many times we had to query a remote
// datacenter.
Failovers int
}
// PreparedQuery can be used to query the prepared query endpoints.
type PreparedQuery struct {
c *Client
}
// PreparedQuery returns a handle to the prepared query endpoints.
func (c *Client) PreparedQuery() *PreparedQuery {
return &PreparedQuery{c}
}
// Create makes a new prepared query. The ID of the new query is returned.
func (c *PreparedQuery) Create(query *PreparedQueryDefinition, q *WriteOptions) (string, *WriteMeta, error) {
r := c.c.newRequest("POST", "/v1/query")
r.setWriteOptions(q)
r.obj = query
rtt, resp, err := requireOK(c.c.doRequest(r))
if err != nil {
return "", nil, err
}
defer resp.Body.Close()
wm := &WriteMeta{}
wm.RequestTime = rtt
var out struct{ ID string }
if err := decodeBody(resp, &out); err != nil {
return "", nil, err
}
return out.ID, wm, nil
}
// Update makes updates to an existing prepared query.
func (c *PreparedQuery) Update(query *PreparedQueryDefinition, q *WriteOptions) (*WriteMeta, error) {
return c.c.write("/v1/query/"+query.ID, query, nil, q)
}
// List is used to fetch all the prepared queries (always requires a management
// token).
func (c *PreparedQuery) List(q *QueryOptions) ([]*PreparedQueryDefinition, *QueryMeta, error) {
var out []*PreparedQueryDefinition
qm, err := c.c.query("/v1/query", &out, q)
if err != nil {
return nil, nil, err
}
return out, qm, nil
}
// Get is used to fetch a specific prepared query.
func (c *PreparedQuery) Get(queryID string, q *QueryOptions) ([]*PreparedQueryDefinition, *QueryMeta, error) {
var out []*PreparedQueryDefinition
qm, err := c.c.query("/v1/query/"+queryID, &out, q)
if err != nil {
return nil, nil, err
}
return out, qm, nil
}
// Delete is used to delete a specific prepared query.
func (c *PreparedQuery) Delete(queryID string, q *QueryOptions) (*QueryMeta, error) {
r := c.c.newRequest("DELETE", "/v1/query/"+queryID)
r.setQueryOptions(q)
rtt, resp, err := requireOK(c.c.doRequest(r))
if err != nil {
return nil, err
}
defer resp.Body.Close()
qm := &QueryMeta{}
parseQueryMeta(resp, qm)
qm.RequestTime = rtt
return qm, nil
}
// Execute is used to execute a specific prepared query. You can execute using
// a query ID or name.
func (c *PreparedQuery) Execute(queryIDOrName string, q *QueryOptions) (*PreparedQueryExecuteResponse, *QueryMeta, error) {
var out *PreparedQueryExecuteResponse
qm, err := c.c.query("/v1/query/"+queryIDOrName+"/execute", &out, q)
if err != nil {
return nil, nil, err
}
return out, qm, nil
}

View file

@ -63,12 +63,16 @@ type Semaphore struct {
// SemaphoreOptions is used to parameterize the Semaphore
type SemaphoreOptions struct {
Prefix string // Must be set and have write permissions
Limit int // Must be set, and be positive
Value []byte // Optional, value to associate with the contender entry
Session string // OPtional, created if not specified
SessionName string // Optional, defaults to DefaultLockSessionName
SessionTTL string // Optional, defaults to DefaultLockSessionTTL
Prefix string // Must be set and have write permissions
Limit int // Must be set, and be positive
Value []byte // Optional, value to associate with the contender entry
Session string // Optional, created if not specified
SessionName string // Optional, defaults to DefaultLockSessionName
SessionTTL string // Optional, defaults to DefaultLockSessionTTL
MonitorRetries int // Optional, defaults to 0 which means no retries
MonitorRetryTime time.Duration // Optional, defaults to DefaultMonitorRetryTime
SemaphoreWaitTime time.Duration // Optional, defaults to DefaultSemaphoreWaitTime
SemaphoreTryOnce bool // Optional, defaults to false which means try forever
}
// semaphoreLock is written under the DefaultSemaphoreKey and
@ -115,6 +119,12 @@ func (c *Client) SemaphoreOpts(opts *SemaphoreOptions) (*Semaphore, error) {
return nil, fmt.Errorf("invalid SessionTTL: %v", err)
}
}
if opts.MonitorRetryTime == 0 {
opts.MonitorRetryTime = DefaultMonitorRetryTime
}
if opts.SemaphoreWaitTime == 0 {
opts.SemaphoreWaitTime = DefaultSemaphoreWaitTime
}
s := &Semaphore{
c: c,
opts: opts,
@ -123,7 +133,7 @@ func (c *Client) SemaphoreOpts(opts *SemaphoreOptions) (*Semaphore, error) {
}
// Acquire attempts to reserve a slot in the semaphore, blocking until
// success, interrupted via the stopCh or an error is encounted.
// success, interrupted via the stopCh or an error is encountered.
// Providing a non-nil stopCh can be used to abort the attempt.
// On success, a channel is returned that represents our slot.
// This channel could be closed at any time due to session invalidation,
@ -172,9 +182,11 @@ func (s *Semaphore) Acquire(stopCh <-chan struct{}) (<-chan struct{}, error) {
// Setup the query options
qOpts := &QueryOptions{
WaitTime: DefaultSemaphoreWaitTime,
WaitTime: s.opts.SemaphoreWaitTime,
}
start := time.Now()
attempts := 0
WAIT:
// Check if we should quit
select {
@ -183,6 +195,17 @@ WAIT:
default:
}
// Handle the one-shot mode.
if s.opts.SemaphoreTryOnce && attempts > 0 {
elapsed := time.Now().Sub(start)
if elapsed > qOpts.WaitTime {
return nil, nil
}
qOpts.WaitTime -= elapsed
}
attempts++
// Read the prefix
pairs, meta, err := kv.List(s.opts.Prefix, qOpts)
if err != nil {
@ -460,8 +483,20 @@ func (s *Semaphore) monitorLock(session string, stopCh chan struct{}) {
kv := s.c.KV()
opts := &QueryOptions{RequireConsistent: true}
WAIT:
retries := s.opts.MonitorRetries
RETRY:
pairs, meta, err := kv.List(s.opts.Prefix, opts)
if err != nil {
// If configured we can try to ride out a brief Consul unavailability
// by doing retries. Note that we have to attempt the retry in a non-
// blocking fashion so that we have a clean place to reset the retry
// counter if service is restored.
if retries > 0 && IsServerError(err) {
time.Sleep(s.opts.MonitorRetryTime)
retries--
opts.WaitIndex = 0
goto RETRY
}
return
}
lockPair := s.findLock(pairs)

View file

@ -1,6 +1,7 @@
package api
import (
"errors"
"fmt"
"time"
)
@ -16,6 +17,8 @@ const (
SessionBehaviorDelete = "delete"
)
var ErrSessionExpired = errors.New("session expired")
// SessionEntry represents a session in consul
type SessionEntry struct {
CreateIndex uint64
@ -102,7 +105,7 @@ func (s *Session) create(obj interface{}, q *WriteOptions) (string, *WriteMeta,
return out.ID, wm, nil
}
// Destroy invalides a given session
// Destroy invalidates a given session
func (s *Session) Destroy(id string, q *WriteOptions) (*WriteMeta, error) {
wm, err := s.c.write("/v1/session/destroy/"+id, nil, nil, q)
if err != nil {
@ -113,11 +116,26 @@ func (s *Session) Destroy(id string, q *WriteOptions) (*WriteMeta, error) {
// Renew renews the TTL on a given session
func (s *Session) Renew(id string, q *WriteOptions) (*SessionEntry, *WriteMeta, error) {
var entries []*SessionEntry
wm, err := s.c.write("/v1/session/renew/"+id, nil, &entries, q)
r := s.c.newRequest("PUT", "/v1/session/renew/"+id)
r.setWriteOptions(q)
rtt, resp, err := s.c.doRequest(r)
if err != nil {
return nil, nil, err
}
defer resp.Body.Close()
wm := &WriteMeta{RequestTime: rtt}
if resp.StatusCode == 404 {
return nil, wm, nil
} else if resp.StatusCode != 200 {
return nil, nil, fmt.Errorf("Unexpected response code: %d", resp.StatusCode)
}
var entries []*SessionEntry
if err := decodeBody(resp, &entries); err != nil {
return nil, nil, fmt.Errorf("Failed to read response: %v", err)
}
if len(entries) > 0 {
return entries[0], wm, nil
}
@ -149,9 +167,7 @@ func (s *Session) RenewPeriodic(initialTTL string, id string, q *WriteOptions, d
continue
}
if entry == nil {
waitDur = time.Second
lastErr = fmt.Errorf("No SessionEntry returned")
continue
return ErrSessionExpired
}
// Handle the server updating the TTL

47
vendor/github.com/hashicorp/consul/api/snapshot.go generated vendored Normal file
View file

@ -0,0 +1,47 @@
package api
import (
"io"
)
// Snapshot can be used to query the /v1/snapshot endpoint to take snapshots of
// Consul's internal state and restore snapshots for disaster recovery.
type Snapshot struct {
c *Client
}
// Snapshot returns a handle that exposes the snapshot endpoints.
func (c *Client) Snapshot() *Snapshot {
return &Snapshot{c}
}
// Save requests a new snapshot and provides an io.ReadCloser with the snapshot
// data to save. If this doesn't return an error, then it's the responsibility
// of the caller to close it. Only a subset of the QueryOptions are supported:
// Datacenter, AllowStale, and Token.
func (s *Snapshot) Save(q *QueryOptions) (io.ReadCloser, *QueryMeta, error) {
r := s.c.newRequest("GET", "/v1/snapshot")
r.setQueryOptions(q)
rtt, resp, err := requireOK(s.c.doRequest(r))
if err != nil {
return nil, nil, err
}
qm := &QueryMeta{}
parseQueryMeta(resp, qm)
qm.RequestTime = rtt
return resp.Body, qm, nil
}
// Restore streams in an existing snapshot and attempts to restore it.
func (s *Snapshot) Restore(q *WriteOptions, in io.Reader) error {
r := s.c.newRequest("PUT", "/v1/snapshot")
r.body = in
r.setWriteOptions(q)
_, _, err := requireOK(s.c.doRequest(r))
if err != nil {
return err
}
return nil
}

363
vendor/github.com/hashicorp/go-cleanhttp/LICENSE generated vendored Normal file
View file

@ -0,0 +1,363 @@
Mozilla Public License, version 2.0
1. Definitions
1.1. "Contributor"
means each individual or legal entity that creates, contributes to the
creation of, or owns Covered Software.
1.2. "Contributor Version"
means the combination of the Contributions of others (if any) used by a
Contributor and that particular Contributor's Contribution.
1.3. "Contribution"
means Covered Software of a particular Contributor.
1.4. "Covered Software"
means Source Code Form to which the initial Contributor has attached the
notice in Exhibit A, the Executable Form of such Source Code Form, and
Modifications of such Source Code Form, in each case including portions
thereof.
1.5. "Incompatible With Secondary Licenses"
means
a. that the initial Contributor has attached the notice described in
Exhibit B to the Covered Software; or
b. that the Covered Software was made available under the terms of
version 1.1 or earlier of the License, but not also under the terms of
a Secondary License.
1.6. "Executable Form"
means any form of the work other than Source Code Form.
1.7. "Larger Work"
means a work that combines Covered Software with other material, in a
separate file or files, that is not Covered Software.
1.8. "License"
means this document.
1.9. "Licensable"
means having the right to grant, to the maximum extent possible, whether
at the time of the initial grant or subsequently, any and all of the
rights conveyed by this License.
1.10. "Modifications"
means any of the following:
a. any file in Source Code Form that results from an addition to,
deletion from, or modification of the contents of Covered Software; or
b. any new file in Source Code Form that contains any Covered Software.
1.11. "Patent Claims" of a Contributor
means any patent claim(s), including without limitation, method,
process, and apparatus claims, in any patent Licensable by such
Contributor that would be infringed, but for the grant of the License,
by the making, using, selling, offering for sale, having made, import,
or transfer of either its Contributions or its Contributor Version.
1.12. "Secondary License"
means either the GNU General Public License, Version 2.0, the GNU Lesser
General Public License, Version 2.1, the GNU Affero General Public
License, Version 3.0, or any later versions of those licenses.
1.13. "Source Code Form"
means the form of the work preferred for making modifications.
1.14. "You" (or "Your")
means an individual or a legal entity exercising rights under this
License. For legal entities, "You" includes any entity that controls, is
controlled by, or is under common control with You. For purposes of this
definition, "control" means (a) the power, direct or indirect, to cause
the direction or management of such entity, whether by contract or
otherwise, or (b) ownership of more than fifty percent (50%) of the
outstanding shares or beneficial ownership of such entity.
2. License Grants and Conditions
2.1. Grants
Each Contributor hereby grants You a world-wide, royalty-free,
non-exclusive license:
a. under intellectual property rights (other than patent or trademark)
Licensable by such Contributor to use, reproduce, make available,
modify, display, perform, distribute, and otherwise exploit its
Contributions, either on an unmodified basis, with Modifications, or
as part of a Larger Work; and
b. under Patent Claims of such Contributor to make, use, sell, offer for
sale, have made, import, and otherwise transfer either its
Contributions or its Contributor Version.
2.2. Effective Date
The licenses granted in Section 2.1 with respect to any Contribution
become effective for each Contribution on the date the Contributor first
distributes such Contribution.
2.3. Limitations on Grant Scope
The licenses granted in this Section 2 are the only rights granted under
this License. No additional rights or licenses will be implied from the
distribution or licensing of Covered Software under this License.
Notwithstanding Section 2.1(b) above, no patent license is granted by a
Contributor:
a. for any code that a Contributor has removed from Covered Software; or
b. for infringements caused by: (i) Your and any other third party's
modifications of Covered Software, or (ii) the combination of its
Contributions with other software (except as part of its Contributor
Version); or
c. under Patent Claims infringed by Covered Software in the absence of
its Contributions.
This License does not grant any rights in the trademarks, service marks,
or logos of any Contributor (except as may be necessary to comply with
the notice requirements in Section 3.4).
2.4. Subsequent Licenses
No Contributor makes additional grants as a result of Your choice to
distribute the Covered Software under a subsequent version of this
License (see Section 10.2) or under the terms of a Secondary License (if
permitted under the terms of Section 3.3).
2.5. Representation
Each Contributor represents that the Contributor believes its
Contributions are its original creation(s) or it has sufficient rights to
grant the rights to its Contributions conveyed by this License.
2.6. Fair Use
This License is not intended to limit any rights You have under
applicable copyright doctrines of fair use, fair dealing, or other
equivalents.
2.7. Conditions
Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
Section 2.1.
3. Responsibilities
3.1. Distribution of Source Form
All distribution of Covered Software in Source Code Form, including any
Modifications that You create or to which You contribute, must be under
the terms of this License. You must inform recipients that the Source
Code Form of the Covered Software is governed by the terms of this
License, and how they can obtain a copy of this License. You may not
attempt to alter or restrict the recipients' rights in the Source Code
Form.
3.2. Distribution of Executable Form
If You distribute Covered Software in Executable Form then:
a. such Covered Software must also be made available in Source Code Form,
as described in Section 3.1, and You must inform recipients of the
Executable Form how they can obtain a copy of such Source Code Form by
reasonable means in a timely manner, at a charge no more than the cost
of distribution to the recipient; and
b. You may distribute such Executable Form under the terms of this
License, or sublicense it under different terms, provided that the
license for the Executable Form does not attempt to limit or alter the
recipients' rights in the Source Code Form under this License.
3.3. Distribution of a Larger Work
You may create and distribute a Larger Work under terms of Your choice,
provided that You also comply with the requirements of this License for
the Covered Software. If the Larger Work is a combination of Covered
Software with a work governed by one or more Secondary Licenses, and the
Covered Software is not Incompatible With Secondary Licenses, this
License permits You to additionally distribute such Covered Software
under the terms of such Secondary License(s), so that the recipient of
the Larger Work may, at their option, further distribute the Covered
Software under the terms of either this License or such Secondary
License(s).
3.4. Notices
You may not remove or alter the substance of any license notices
(including copyright notices, patent notices, disclaimers of warranty, or
limitations of liability) contained within the Source Code Form of the
Covered Software, except that You may alter any license notices to the
extent required to remedy known factual inaccuracies.
3.5. Application of Additional Terms
You may choose to offer, and to charge a fee for, warranty, support,
indemnity or liability obligations to one or more recipients of Covered
Software. However, You may do so only on Your own behalf, and not on
behalf of any Contributor. You must make it absolutely clear that any
such warranty, support, indemnity, or liability obligation is offered by
You alone, and You hereby agree to indemnify every Contributor for any
liability incurred by such Contributor as a result of warranty, support,
indemnity or liability terms You offer. You may include additional
disclaimers of warranty and limitations of liability specific to any
jurisdiction.
4. Inability to Comply Due to Statute or Regulation
If it is impossible for You to comply with any of the terms of this License
with respect to some or all of the Covered Software due to statute,
judicial order, or regulation then You must: (a) comply with the terms of
this License to the maximum extent possible; and (b) describe the
limitations and the code they affect. Such description must be placed in a
text file included with all distributions of the Covered Software under
this License. Except to the extent prohibited by statute or regulation,
such description must be sufficiently detailed for a recipient of ordinary
skill to be able to understand it.
5. Termination
5.1. The rights granted under this License will terminate automatically if You
fail to comply with any of its terms. However, if You become compliant,
then the rights granted under this License from a particular Contributor
are reinstated (a) provisionally, unless and until such Contributor
explicitly and finally terminates Your grants, and (b) on an ongoing
basis, if such Contributor fails to notify You of the non-compliance by
some reasonable means prior to 60 days after You have come back into
compliance. Moreover, Your grants from a particular Contributor are
reinstated on an ongoing basis if such Contributor notifies You of the
non-compliance by some reasonable means, this is the first time You have
received notice of non-compliance with this License from such
Contributor, and You become compliant prior to 30 days after Your receipt
of the notice.
5.2. If You initiate litigation against any entity by asserting a patent
infringement claim (excluding declaratory judgment actions,
counter-claims, and cross-claims) alleging that a Contributor Version
directly or indirectly infringes any patent, then the rights granted to
You by any and all Contributors for the Covered Software under Section
2.1 of this License shall terminate.
5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
license agreements (excluding distributors and resellers) which have been
validly granted by You or Your distributors under this License prior to
termination shall survive termination.
6. Disclaimer of Warranty
Covered Software is provided under this License on an "as is" basis,
without warranty of any kind, either expressed, implied, or statutory,
including, without limitation, warranties that the Covered Software is free
of defects, merchantable, fit for a particular purpose or non-infringing.
The entire risk as to the quality and performance of the Covered Software
is with You. Should any Covered Software prove defective in any respect,
You (not any Contributor) assume the cost of any necessary servicing,
repair, or correction. This disclaimer of warranty constitutes an essential
part of this License. No use of any Covered Software is authorized under
this License except under this disclaimer.
7. Limitation of Liability
Under no circumstances and under no legal theory, whether tort (including
negligence), contract, or otherwise, shall any Contributor, or anyone who
distributes Covered Software as permitted above, be liable to You for any
direct, indirect, special, incidental, or consequential damages of any
character including, without limitation, damages for lost profits, loss of
goodwill, work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses, even if such party shall have been
informed of the possibility of such damages. This limitation of liability
shall not apply to liability for death or personal injury resulting from
such party's negligence to the extent applicable law prohibits such
limitation. Some jurisdictions do not allow the exclusion or limitation of
incidental or consequential damages, so this exclusion and limitation may
not apply to You.
8. Litigation
Any litigation relating to this License may be brought only in the courts
of a jurisdiction where the defendant maintains its principal place of
business and such litigation shall be governed by laws of that
jurisdiction, without reference to its conflict-of-law provisions. Nothing
in this Section shall prevent a party's ability to bring cross-claims or
counter-claims.
9. Miscellaneous
This License represents the complete agreement concerning the subject
matter hereof. If any provision of this License is held to be
unenforceable, such provision shall be reformed only to the extent
necessary to make it enforceable. Any law or regulation which provides that
the language of a contract shall be construed against the drafter shall not
be used to construe this License against a Contributor.
10. Versions of the License
10.1. New Versions
Mozilla Foundation is the license steward. Except as provided in Section
10.3, no one other than the license steward has the right to modify or
publish new versions of this License. Each version will be given a
distinguishing version number.
10.2. Effect of New Versions
You may distribute the Covered Software under the terms of the version
of the License under which You originally received the Covered Software,
or under the terms of any subsequent version published by the license
steward.
10.3. Modified Versions
If you create software not governed by this License, and you want to
create a new license for such software, you may create and use a
modified version of this License if you rename the license and remove
any references to the name of the license steward (except to note that
such modified license differs from this License).
10.4. Distributing Source Code Form that is Incompatible With Secondary
Licenses If You choose to distribute Source Code Form that is
Incompatible With Secondary Licenses under the terms of this version of
the License, the notice described in Exhibit B of this License must be
attached.
Exhibit A - Source Code Form License Notice
This Source Code Form is subject to the
terms of the Mozilla Public License, v.
2.0. If a copy of the MPL was not
distributed with this file, You can
obtain one at
http://mozilla.org/MPL/2.0/.
If it is not possible or desirable to put the notice in a particular file,
then You may include the notice in a location (such as a LICENSE file in a
relevant directory) where a recipient would be likely to look for such a
notice.
You may add additional accurate notices of copyright ownership.
Exhibit B - "Incompatible With Secondary Licenses" Notice
This Source Code Form is "Incompatible
With Secondary Licenses", as defined by
the Mozilla Public License, v. 2.0.

30
vendor/github.com/hashicorp/go-cleanhttp/README.md generated vendored Normal file
View file

@ -0,0 +1,30 @@
# cleanhttp
Functions for accessing "clean" Go http.Client values
-------------
The Go standard library contains a default `http.Client` called
`http.DefaultClient`. It is a common idiom in Go code to start with
`http.DefaultClient` and tweak it as necessary, and in fact, this is
encouraged; from the `http` package documentation:
> The Client's Transport typically has internal state (cached TCP connections),
so Clients should be reused instead of created as needed. Clients are safe for
concurrent use by multiple goroutines.
Unfortunately, this is a shared value, and it is not uncommon for libraries to
assume that they are free to modify it at will. With enough dependencies, it
can be very easy to encounter strange problems and race conditions due to
manipulation of this shared value across libraries and goroutines (clients are
safe for concurrent use, but writing values to the client struct itself is not
protected).
Making things worse is the fact that a bare `http.Client` will use a default
`http.Transport` called `http.DefaultTransport`, which is another global value
that behaves the same way. So it is not simply enough to replace
`http.DefaultClient` with `&http.Client{}`.
This repository provides some simple functions to get a "clean" `http.Client`
-- one that uses the same default values as the Go standard library, but
returns a client that does not share any state with other clients.

53
vendor/github.com/hashicorp/go-cleanhttp/cleanhttp.go generated vendored Normal file
View file

@ -0,0 +1,53 @@
package cleanhttp
import (
"net"
"net/http"
"time"
)
// DefaultTransport returns a new http.Transport with the same default values
// as http.DefaultTransport, but with idle connections and keepalives disabled.
func DefaultTransport() *http.Transport {
transport := DefaultPooledTransport()
transport.DisableKeepAlives = true
transport.MaxIdleConnsPerHost = -1
return transport
}
// DefaultPooledTransport returns a new http.Transport with similar default
// values to http.DefaultTransport. Do not use this for transient transports as
// it can leak file descriptors over time. Only use this for transports that
// will be re-used for the same host(s).
func DefaultPooledTransport() *http.Transport {
transport := &http.Transport{
Proxy: http.ProxyFromEnvironment,
Dial: (&net.Dialer{
Timeout: 30 * time.Second,
KeepAlive: 30 * time.Second,
}).Dial,
TLSHandshakeTimeout: 10 * time.Second,
DisableKeepAlives: false,
MaxIdleConnsPerHost: 1,
}
return transport
}
// DefaultClient returns a new http.Client with similar default values to
// http.Client, but with a non-shared Transport, idle connections disabled, and
// keepalives disabled.
func DefaultClient() *http.Client {
return &http.Client{
Transport: DefaultTransport(),
}
}
// DefaultPooledClient returns a new http.Client with the same default values
// as http.Client, but with a shared Transport. Do not use this function
// for transient clients as it can leak file descriptors over time. Only use
// this for clients that will be re-used for the same host(s).
func DefaultPooledClient() *http.Client {
return &http.Client{
Transport: DefaultPooledTransport(),
}
}

20
vendor/github.com/hashicorp/go-cleanhttp/doc.go generated vendored Normal file
View file

@ -0,0 +1,20 @@
// Package cleanhttp offers convenience utilities for acquiring "clean"
// http.Transport and http.Client structs.
//
// Values set on http.DefaultClient and http.DefaultTransport affect all
// callers. This can have detrimental effects, esepcially in TLS contexts,
// where client or root certificates set to talk to multiple endpoints can end
// up displacing each other, leading to hard-to-debug issues. This package
// provides non-shared http.Client and http.Transport structs to ensure that
// the configuration will not be overwritten by other parts of the application
// or dependencies.
//
// The DefaultClient and DefaultTransport functions disable idle connections
// and keepalives. Without ensuring that idle connections are closed before
// garbage collection, short-term clients/transports can leak file descriptors,
// eventually leading to "too many open files" errors. If you will be
// connecting to the same hosts repeatedly from the same client, you can use
// DefaultPooledClient to receive a client that has connection pooling
// semantics similar to http.DefaultClient.
//
package cleanhttp

354
vendor/github.com/hashicorp/serf/LICENSE generated vendored Normal file
View file

@ -0,0 +1,354 @@
Mozilla Public License, version 2.0
1. Definitions
1.1. “Contributor”
means each individual or legal entity that creates, contributes to the
creation of, or owns Covered Software.
1.2. “Contributor Version”
means the combination of the Contributions of others (if any) used by a
Contributor and that particular Contributors Contribution.
1.3. “Contribution”
means Covered Software of a particular Contributor.
1.4. “Covered Software”
means Source Code Form to which the initial Contributor has attached the
notice in Exhibit A, the Executable Form of such Source Code Form, and
Modifications of such Source Code Form, in each case including portions
thereof.
1.5. “Incompatible With Secondary Licenses”
means
a. that the initial Contributor has attached the notice described in
Exhibit B to the Covered Software; or
b. that the Covered Software was made available under the terms of version
1.1 or earlier of the License, but not also under the terms of a
Secondary License.
1.6. “Executable Form”
means any form of the work other than Source Code Form.
1.7. “Larger Work”
means a work that combines Covered Software with other material, in a separate
file or files, that is not Covered Software.
1.8. “License”
means this document.
1.9. “Licensable”
means having the right to grant, to the maximum extent possible, whether at the
time of the initial grant or subsequently, any and all of the rights conveyed by
this License.
1.10. “Modifications”
means any of the following:
a. any file in Source Code Form that results from an addition to, deletion
from, or modification of the contents of Covered Software; or
b. any new file in Source Code Form that contains any Covered Software.
1.11. “Patent Claims” of a Contributor
means any patent claim(s), including without limitation, method, process,
and apparatus claims, in any patent Licensable by such Contributor that
would be infringed, but for the grant of the License, by the making,
using, selling, offering for sale, having made, import, or transfer of
either its Contributions or its Contributor Version.
1.12. “Secondary License”
means either the GNU General Public License, Version 2.0, the GNU Lesser
General Public License, Version 2.1, the GNU Affero General Public
License, Version 3.0, or any later versions of those licenses.
1.13. “Source Code Form”
means the form of the work preferred for making modifications.
1.14. “You” (or “Your”)
means an individual or a legal entity exercising rights under this
License. For legal entities, “You” includes any entity that controls, is
controlled by, or is under common control with You. For purposes of this
definition, “control” means (a) the power, direct or indirect, to cause
the direction or management of such entity, whether by contract or
otherwise, or (b) ownership of more than fifty percent (50%) of the
outstanding shares or beneficial ownership of such entity.
2. License Grants and Conditions
2.1. Grants
Each Contributor hereby grants You a world-wide, royalty-free,
non-exclusive license:
a. under intellectual property rights (other than patent or trademark)
Licensable by such Contributor to use, reproduce, make available,
modify, display, perform, distribute, and otherwise exploit its
Contributions, either on an unmodified basis, with Modifications, or as
part of a Larger Work; and
b. under Patent Claims of such Contributor to make, use, sell, offer for
sale, have made, import, and otherwise transfer either its Contributions
or its Contributor Version.
2.2. Effective Date
The licenses granted in Section 2.1 with respect to any Contribution become
effective for each Contribution on the date the Contributor first distributes
such Contribution.
2.3. Limitations on Grant Scope
The licenses granted in this Section 2 are the only rights granted under this
License. No additional rights or licenses will be implied from the distribution
or licensing of Covered Software under this License. Notwithstanding Section
2.1(b) above, no patent license is granted by a Contributor:
a. for any code that a Contributor has removed from Covered Software; or
b. for infringements caused by: (i) Your and any other third partys
modifications of Covered Software, or (ii) the combination of its
Contributions with other software (except as part of its Contributor
Version); or
c. under Patent Claims infringed by Covered Software in the absence of its
Contributions.
This License does not grant any rights in the trademarks, service marks, or
logos of any Contributor (except as may be necessary to comply with the
notice requirements in Section 3.4).
2.4. Subsequent Licenses
No Contributor makes additional grants as a result of Your choice to
distribute the Covered Software under a subsequent version of this License
(see Section 10.2) or under the terms of a Secondary License (if permitted
under the terms of Section 3.3).
2.5. Representation
Each Contributor represents that the Contributor believes its Contributions
are its original creation(s) or it has sufficient rights to grant the
rights to its Contributions conveyed by this License.
2.6. Fair Use
This License is not intended to limit any rights You have under applicable
copyright doctrines of fair use, fair dealing, or other equivalents.
2.7. Conditions
Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
Section 2.1.
3. Responsibilities
3.1. Distribution of Source Form
All distribution of Covered Software in Source Code Form, including any
Modifications that You create or to which You contribute, must be under the
terms of this License. You must inform recipients that the Source Code Form
of the Covered Software is governed by the terms of this License, and how
they can obtain a copy of this License. You may not attempt to alter or
restrict the recipients rights in the Source Code Form.
3.2. Distribution of Executable Form
If You distribute Covered Software in Executable Form then:
a. such Covered Software must also be made available in Source Code Form,
as described in Section 3.1, and You must inform recipients of the
Executable Form how they can obtain a copy of such Source Code Form by
reasonable means in a timely manner, at a charge no more than the cost
of distribution to the recipient; and
b. You may distribute such Executable Form under the terms of this License,
or sublicense it under different terms, provided that the license for
the Executable Form does not attempt to limit or alter the recipients
rights in the Source Code Form under this License.
3.3. Distribution of a Larger Work
You may create and distribute a Larger Work under terms of Your choice,
provided that You also comply with the requirements of this License for the
Covered Software. If the Larger Work is a combination of Covered Software
with a work governed by one or more Secondary Licenses, and the Covered
Software is not Incompatible With Secondary Licenses, this License permits
You to additionally distribute such Covered Software under the terms of
such Secondary License(s), so that the recipient of the Larger Work may, at
their option, further distribute the Covered Software under the terms of
either this License or such Secondary License(s).
3.4. Notices
You may not remove or alter the substance of any license notices (including
copyright notices, patent notices, disclaimers of warranty, or limitations
of liability) contained within the Source Code Form of the Covered
Software, except that You may alter any license notices to the extent
required to remedy known factual inaccuracies.
3.5. Application of Additional Terms
You may choose to offer, and to charge a fee for, warranty, support,
indemnity or liability obligations to one or more recipients of Covered
Software. However, You may do so only on Your own behalf, and not on behalf
of any Contributor. You must make it absolutely clear that any such
warranty, support, indemnity, or liability obligation is offered by You
alone, and You hereby agree to indemnify every Contributor for any
liability incurred by such Contributor as a result of warranty, support,
indemnity or liability terms You offer. You may include additional
disclaimers of warranty and limitations of liability specific to any
jurisdiction.
4. Inability to Comply Due to Statute or Regulation
If it is impossible for You to comply with any of the terms of this License
with respect to some or all of the Covered Software due to statute, judicial
order, or regulation then You must: (a) comply with the terms of this License
to the maximum extent possible; and (b) describe the limitations and the code
they affect. Such description must be placed in a text file included with all
distributions of the Covered Software under this License. Except to the
extent prohibited by statute or regulation, such description must be
sufficiently detailed for a recipient of ordinary skill to be able to
understand it.
5. Termination
5.1. The rights granted under this License will terminate automatically if You
fail to comply with any of its terms. However, if You become compliant,
then the rights granted under this License from a particular Contributor
are reinstated (a) provisionally, unless and until such Contributor
explicitly and finally terminates Your grants, and (b) on an ongoing basis,
if such Contributor fails to notify You of the non-compliance by some
reasonable means prior to 60 days after You have come back into compliance.
Moreover, Your grants from a particular Contributor are reinstated on an
ongoing basis if such Contributor notifies You of the non-compliance by
some reasonable means, this is the first time You have received notice of
non-compliance with this License from such Contributor, and You become
compliant prior to 30 days after Your receipt of the notice.
5.2. If You initiate litigation against any entity by asserting a patent
infringement claim (excluding declaratory judgment actions, counter-claims,
and cross-claims) alleging that a Contributor Version directly or
indirectly infringes any patent, then the rights granted to You by any and
all Contributors for the Covered Software under Section 2.1 of this License
shall terminate.
5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
license agreements (excluding distributors and resellers) which have been
validly granted by You or Your distributors under this License prior to
termination shall survive termination.
6. Disclaimer of Warranty
Covered Software is provided under this License on an “as is” basis, without
warranty of any kind, either expressed, implied, or statutory, including,
without limitation, warranties that the Covered Software is free of defects,
merchantable, fit for a particular purpose or non-infringing. The entire
risk as to the quality and performance of the Covered Software is with You.
Should any Covered Software prove defective in any respect, You (not any
Contributor) assume the cost of any necessary servicing, repair, or
correction. This disclaimer of warranty constitutes an essential part of this
License. No use of any Covered Software is authorized under this License
except under this disclaimer.
7. Limitation of Liability
Under no circumstances and under no legal theory, whether tort (including
negligence), contract, or otherwise, shall any Contributor, or anyone who
distributes Covered Software as permitted above, be liable to You for any
direct, indirect, special, incidental, or consequential damages of any
character including, without limitation, damages for lost profits, loss of
goodwill, work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses, even if such party shall have been
informed of the possibility of such damages. This limitation of liability
shall not apply to liability for death or personal injury resulting from such
partys negligence to the extent applicable law prohibits such limitation.
Some jurisdictions do not allow the exclusion or limitation of incidental or
consequential damages, so this exclusion and limitation may not apply to You.
8. Litigation
Any litigation relating to this License may be brought only in the courts of
a jurisdiction where the defendant maintains its principal place of business
and such litigation shall be governed by laws of that jurisdiction, without
reference to its conflict-of-law provisions. Nothing in this Section shall
prevent a partys ability to bring cross-claims or counter-claims.
9. Miscellaneous
This License represents the complete agreement concerning the subject matter
hereof. If any provision of this License is held to be unenforceable, such
provision shall be reformed only to the extent necessary to make it
enforceable. Any law or regulation which provides that the language of a
contract shall be construed against the drafter shall not be used to construe
this License against a Contributor.
10. Versions of the License
10.1. New Versions
Mozilla Foundation is the license steward. Except as provided in Section
10.3, no one other than the license steward has the right to modify or
publish new versions of this License. Each version will be given a
distinguishing version number.
10.2. Effect of New Versions
You may distribute the Covered Software under the terms of the version of
the License under which You originally received the Covered Software, or
under the terms of any subsequent version published by the license
steward.
10.3. Modified Versions
If you create software not governed by this License, and you want to
create a new license for such software, you may create and use a modified
version of this License if you rename the license and remove any
references to the name of the license steward (except to note that such
modified license differs from this License).
10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses
If You choose to distribute Source Code Form that is Incompatible With
Secondary Licenses under the terms of this version of the License, the
notice described in Exhibit B of this License must be attached.
Exhibit A - Source Code Form License Notice
This Source Code Form is subject to the
terms of the Mozilla Public License, v.
2.0. If a copy of the MPL was not
distributed with this file, You can
obtain one at
http://mozilla.org/MPL/2.0/.
If it is not possible or desirable to put the notice in a particular file, then
You may include the notice in a location (such as a LICENSE file in a relevant
directory) where a recipient would be likely to look for such a notice.
You may add additional accurate notices of copyright ownership.
Exhibit B - “Incompatible With Secondary Licenses” Notice
This Source Code Form is “Incompatible
With Secondary Licenses”, as defined by
the Mozilla Public License, v. 2.0.

180
vendor/github.com/hashicorp/serf/coordinate/client.go generated vendored Normal file
View file

@ -0,0 +1,180 @@
package coordinate
import (
"fmt"
"math"
"sort"
"sync"
"time"
)
// Client manages the estimated network coordinate for a given node, and adjusts
// it as the node observes round trip times and estimated coordinates from other
// nodes. The core algorithm is based on Vivaldi, see the documentation for Config
// for more details.
type Client struct {
// coord is the current estimate of the client's network coordinate.
coord *Coordinate
// origin is a coordinate sitting at the origin.
origin *Coordinate
// config contains the tuning parameters that govern the performance of
// the algorithm.
config *Config
// adjustmentIndex is the current index into the adjustmentSamples slice.
adjustmentIndex uint
// adjustment is used to store samples for the adjustment calculation.
adjustmentSamples []float64
// latencyFilterSamples is used to store the last several RTT samples,
// keyed by node name. We will use the config's LatencyFilterSamples
// value to determine how many samples we keep, per node.
latencyFilterSamples map[string][]float64
// mutex enables safe concurrent access to the client.
mutex sync.RWMutex
}
// NewClient creates a new Client and verifies the configuration is valid.
func NewClient(config *Config) (*Client, error) {
if !(config.Dimensionality > 0) {
return nil, fmt.Errorf("dimensionality must be >0")
}
return &Client{
coord: NewCoordinate(config),
origin: NewCoordinate(config),
config: config,
adjustmentIndex: 0,
adjustmentSamples: make([]float64, config.AdjustmentWindowSize),
latencyFilterSamples: make(map[string][]float64),
}, nil
}
// GetCoordinate returns a copy of the coordinate for this client.
func (c *Client) GetCoordinate() *Coordinate {
c.mutex.RLock()
defer c.mutex.RUnlock()
return c.coord.Clone()
}
// SetCoordinate forces the client's coordinate to a known state.
func (c *Client) SetCoordinate(coord *Coordinate) {
c.mutex.Lock()
defer c.mutex.Unlock()
c.coord = coord.Clone()
}
// ForgetNode removes any client state for the given node.
func (c *Client) ForgetNode(node string) {
c.mutex.Lock()
defer c.mutex.Unlock()
delete(c.latencyFilterSamples, node)
}
// latencyFilter applies a simple moving median filter with a new sample for
// a node. This assumes that the mutex has been locked already.
func (c *Client) latencyFilter(node string, rttSeconds float64) float64 {
samples, ok := c.latencyFilterSamples[node]
if !ok {
samples = make([]float64, 0, c.config.LatencyFilterSize)
}
// Add the new sample and trim the list, if needed.
samples = append(samples, rttSeconds)
if len(samples) > int(c.config.LatencyFilterSize) {
samples = samples[1:]
}
c.latencyFilterSamples[node] = samples
// Sort a copy of the samples and return the median.
sorted := make([]float64, len(samples))
copy(sorted, samples)
sort.Float64s(sorted)
return sorted[len(sorted)/2]
}
// updateVivialdi updates the Vivaldi portion of the client's coordinate. This
// assumes that the mutex has been locked already.
func (c *Client) updateVivaldi(other *Coordinate, rttSeconds float64) {
const zeroThreshold = 1.0e-6
dist := c.coord.DistanceTo(other).Seconds()
if rttSeconds < zeroThreshold {
rttSeconds = zeroThreshold
}
wrongness := math.Abs(dist-rttSeconds) / rttSeconds
totalError := c.coord.Error + other.Error
if totalError < zeroThreshold {
totalError = zeroThreshold
}
weight := c.coord.Error / totalError
c.coord.Error = c.config.VivaldiCE*weight*wrongness + c.coord.Error*(1.0-c.config.VivaldiCE*weight)
if c.coord.Error > c.config.VivaldiErrorMax {
c.coord.Error = c.config.VivaldiErrorMax
}
delta := c.config.VivaldiCC * weight
force := delta * (rttSeconds - dist)
c.coord = c.coord.ApplyForce(c.config, force, other)
}
// updateAdjustment updates the adjustment portion of the client's coordinate, if
// the feature is enabled. This assumes that the mutex has been locked already.
func (c *Client) updateAdjustment(other *Coordinate, rttSeconds float64) {
if c.config.AdjustmentWindowSize == 0 {
return
}
// Note that the existing adjustment factors don't figure in to this
// calculation so we use the raw distance here.
dist := c.coord.rawDistanceTo(other)
c.adjustmentSamples[c.adjustmentIndex] = rttSeconds - dist
c.adjustmentIndex = (c.adjustmentIndex + 1) % c.config.AdjustmentWindowSize
sum := 0.0
for _, sample := range c.adjustmentSamples {
sum += sample
}
c.coord.Adjustment = sum / (2.0 * float64(c.config.AdjustmentWindowSize))
}
// updateGravity applies a small amount of gravity to pull coordinates towards
// the center of the coordinate system to combat drift. This assumes that the
// mutex is locked already.
func (c *Client) updateGravity() {
dist := c.origin.DistanceTo(c.coord).Seconds()
force := -1.0 * math.Pow(dist/c.config.GravityRho, 2.0)
c.coord = c.coord.ApplyForce(c.config, force, c.origin)
}
// Update takes other, a coordinate for another node, and rtt, a round trip
// time observation for a ping to that node, and updates the estimated position of
// the client's coordinate. Returns the updated coordinate.
func (c *Client) Update(node string, other *Coordinate, rtt time.Duration) *Coordinate {
c.mutex.Lock()
defer c.mutex.Unlock()
rttSeconds := c.latencyFilter(node, rtt.Seconds())
c.updateVivaldi(other, rttSeconds)
c.updateAdjustment(other, rttSeconds)
c.updateGravity()
return c.coord.Clone()
}
// DistanceTo returns the estimated RTT from the client's coordinate to other, the
// coordinate for another node.
func (c *Client) DistanceTo(other *Coordinate) time.Duration {
c.mutex.RLock()
defer c.mutex.RUnlock()
return c.coord.DistanceTo(other)
}

70
vendor/github.com/hashicorp/serf/coordinate/config.go generated vendored Normal file
View file

@ -0,0 +1,70 @@
package coordinate
// Config is used to set the parameters of the Vivaldi-based coordinate mapping
// algorithm.
//
// The following references are called out at various points in the documentation
// here:
//
// [1] Dabek, Frank, et al. "Vivaldi: A decentralized network coordinate system."
// ACM SIGCOMM Computer Communication Review. Vol. 34. No. 4. ACM, 2004.
// [2] Ledlie, Jonathan, Paul Gardner, and Margo I. Seltzer. "Network Coordinates
// in the Wild." NSDI. Vol. 7. 2007.
// [3] Lee, Sanghwan, et al. "On suitability of Euclidean embedding for
// host-based network coordinate systems." Networking, IEEE/ACM Transactions
// on 18.1 (2010): 27-40.
type Config struct {
// The dimensionality of the coordinate system. As discussed in [2], more
// dimensions improves the accuracy of the estimates up to a point. Per [2]
// we chose 8 dimensions plus a non-Euclidean height.
Dimensionality uint
// VivaldiErrorMax is the default error value when a node hasn't yet made
// any observations. It also serves as an upper limit on the error value in
// case observations cause the error value to increase without bound.
VivaldiErrorMax float64
// VivaldiCE is a tuning factor that controls the maximum impact an
// observation can have on a node's confidence. See [1] for more details.
VivaldiCE float64
// VivaldiCC is a tuning factor that controls the maximum impact an
// observation can have on a node's coordinate. See [1] for more details.
VivaldiCC float64
// AdjustmentWindowSize is a tuning factor that determines how many samples
// we retain to calculate the adjustment factor as discussed in [3]. Setting
// this to zero disables this feature.
AdjustmentWindowSize uint
// HeightMin is the minimum value of the height parameter. Since this
// always must be positive, it will introduce a small amount error, so
// the chosen value should be relatively small compared to "normal"
// coordinates.
HeightMin float64
// LatencyFilterSamples is the maximum number of samples that are retained
// per node, in order to compute a median. The intent is to ride out blips
// but still keep the delay low, since our time to probe any given node is
// pretty infrequent. See [2] for more details.
LatencyFilterSize uint
// GravityRho is a tuning factor that sets how much gravity has an effect
// to try to re-center coordinates. See [2] for more details.
GravityRho float64
}
// DefaultConfig returns a Config that has some default values suitable for
// basic testing of the algorithm, but not tuned to any particular type of cluster.
func DefaultConfig() *Config {
return &Config{
Dimensionality: 8,
VivaldiErrorMax: 1.5,
VivaldiCE: 0.25,
VivaldiCC: 0.25,
AdjustmentWindowSize: 20,
HeightMin: 10.0e-6,
LatencyFilterSize: 3,
GravityRho: 150.0,
}
}

View file

@ -0,0 +1,183 @@
package coordinate
import (
"math"
"math/rand"
"time"
)
// Coordinate is a specialized structure for holding network coordinates for the
// Vivaldi-based coordinate mapping algorithm. All of the fields should be public
// to enable this to be serialized. All values in here are in units of seconds.
type Coordinate struct {
// Vec is the Euclidean portion of the coordinate. This is used along
// with the other fields to provide an overall distance estimate. The
// units here are seconds.
Vec []float64
// Err reflects the confidence in the given coordinate and is updated
// dynamically by the Vivaldi Client. This is dimensionless.
Error float64
// Adjustment is a distance offset computed based on a calculation over
// observations from all other nodes over a fixed window and is updated
// dynamically by the Vivaldi Client. The units here are seconds.
Adjustment float64
// Height is a distance offset that accounts for non-Euclidean effects
// which model the access links from nodes to the core Internet. The access
// links are usually set by bandwidth and congestion, and the core links
// usually follow distance based on geography.
Height float64
}
const (
// secondsToNanoseconds is used to convert float seconds to nanoseconds.
secondsToNanoseconds = 1.0e9
// zeroThreshold is used to decide if two coordinates are on top of each
// other.
zeroThreshold = 1.0e-6
)
// ErrDimensionalityConflict will be panic-d if you try to perform operations
// with incompatible dimensions.
type DimensionalityConflictError struct{}
// Adds the error interface.
func (e DimensionalityConflictError) Error() string {
return "coordinate dimensionality does not match"
}
// NewCoordinate creates a new coordinate at the origin, using the given config
// to supply key initial values.
func NewCoordinate(config *Config) *Coordinate {
return &Coordinate{
Vec: make([]float64, config.Dimensionality),
Error: config.VivaldiErrorMax,
Adjustment: 0.0,
Height: config.HeightMin,
}
}
// Clone creates an independent copy of this coordinate.
func (c *Coordinate) Clone() *Coordinate {
vec := make([]float64, len(c.Vec))
copy(vec, c.Vec)
return &Coordinate{
Vec: vec,
Error: c.Error,
Adjustment: c.Adjustment,
Height: c.Height,
}
}
// IsCompatibleWith checks to see if the two coordinates are compatible
// dimensionally. If this returns true then you are guaranteed to not get
// any runtime errors operating on them.
func (c *Coordinate) IsCompatibleWith(other *Coordinate) bool {
return len(c.Vec) == len(other.Vec)
}
// ApplyForce returns the result of applying the force from the direction of the
// other coordinate.
func (c *Coordinate) ApplyForce(config *Config, force float64, other *Coordinate) *Coordinate {
if !c.IsCompatibleWith(other) {
panic(DimensionalityConflictError{})
}
ret := c.Clone()
unit, mag := unitVectorAt(c.Vec, other.Vec)
ret.Vec = add(ret.Vec, mul(unit, force))
if mag > zeroThreshold {
ret.Height = (ret.Height+other.Height)*force/mag + ret.Height
ret.Height = math.Max(ret.Height, config.HeightMin)
}
return ret
}
// DistanceTo returns the distance between this coordinate and the other
// coordinate, including adjustments.
func (c *Coordinate) DistanceTo(other *Coordinate) time.Duration {
if !c.IsCompatibleWith(other) {
panic(DimensionalityConflictError{})
}
dist := c.rawDistanceTo(other)
adjustedDist := dist + c.Adjustment + other.Adjustment
if adjustedDist > 0.0 {
dist = adjustedDist
}
return time.Duration(dist * secondsToNanoseconds)
}
// rawDistanceTo returns the Vivaldi distance between this coordinate and the
// other coordinate in seconds, not including adjustments. This assumes the
// dimensions have already been checked to be compatible.
func (c *Coordinate) rawDistanceTo(other *Coordinate) float64 {
return magnitude(diff(c.Vec, other.Vec)) + c.Height + other.Height
}
// add returns the sum of vec1 and vec2. This assumes the dimensions have
// already been checked to be compatible.
func add(vec1 []float64, vec2 []float64) []float64 {
ret := make([]float64, len(vec1))
for i, _ := range ret {
ret[i] = vec1[i] + vec2[i]
}
return ret
}
// diff returns the difference between the vec1 and vec2. This assumes the
// dimensions have already been checked to be compatible.
func diff(vec1 []float64, vec2 []float64) []float64 {
ret := make([]float64, len(vec1))
for i, _ := range ret {
ret[i] = vec1[i] - vec2[i]
}
return ret
}
// mul returns vec multiplied by a scalar factor.
func mul(vec []float64, factor float64) []float64 {
ret := make([]float64, len(vec))
for i, _ := range vec {
ret[i] = vec[i] * factor
}
return ret
}
// magnitude computes the magnitude of the vec.
func magnitude(vec []float64) float64 {
sum := 0.0
for i, _ := range vec {
sum += vec[i] * vec[i]
}
return math.Sqrt(sum)
}
// unitVectorAt returns a unit vector pointing at vec1 from vec2. If the two
// positions are the same then a random unit vector is returned. We also return
// the distance between the points for use in the later height calculation.
func unitVectorAt(vec1 []float64, vec2 []float64) ([]float64, float64) {
ret := diff(vec1, vec2)
// If the coordinates aren't on top of each other we can normalize.
if mag := magnitude(ret); mag > zeroThreshold {
return mul(ret, 1.0/mag), mag
}
// Otherwise, just return a random unit vector.
for i, _ := range ret {
ret[i] = rand.Float64() - 0.5
}
if mag := magnitude(ret); mag > zeroThreshold {
return mul(ret, 1.0/mag), 0.0
}
// And finally just give up and make a unit vector along the first
// dimension. This should be exceedingly rare.
ret = make([]float64, len(ret))
ret[0] = 1.0
return ret, 0.0
}

187
vendor/github.com/hashicorp/serf/coordinate/phantom.go generated vendored Normal file
View file

@ -0,0 +1,187 @@
package coordinate
import (
"fmt"
"math"
"math/rand"
"time"
)
// GenerateClients returns a slice with nodes number of clients, all with the
// given config.
func GenerateClients(nodes int, config *Config) ([]*Client, error) {
clients := make([]*Client, nodes)
for i, _ := range clients {
client, err := NewClient(config)
if err != nil {
return nil, err
}
clients[i] = client
}
return clients, nil
}
// GenerateLine returns a truth matrix as if all the nodes are in a straight linke
// with the given spacing between them.
func GenerateLine(nodes int, spacing time.Duration) [][]time.Duration {
truth := make([][]time.Duration, nodes)
for i := range truth {
truth[i] = make([]time.Duration, nodes)
}
for i := 0; i < nodes; i++ {
for j := i + 1; j < nodes; j++ {
rtt := time.Duration(j-i) * spacing
truth[i][j], truth[j][i] = rtt, rtt
}
}
return truth
}
// GenerateGrid returns a truth matrix as if all the nodes are in a two dimensional
// grid with the given spacing between them.
func GenerateGrid(nodes int, spacing time.Duration) [][]time.Duration {
truth := make([][]time.Duration, nodes)
for i := range truth {
truth[i] = make([]time.Duration, nodes)
}
n := int(math.Sqrt(float64(nodes)))
for i := 0; i < nodes; i++ {
for j := i + 1; j < nodes; j++ {
x1, y1 := float64(i%n), float64(i/n)
x2, y2 := float64(j%n), float64(j/n)
dx, dy := x2-x1, y2-y1
dist := math.Sqrt(dx*dx + dy*dy)
rtt := time.Duration(dist * float64(spacing))
truth[i][j], truth[j][i] = rtt, rtt
}
}
return truth
}
// GenerateSplit returns a truth matrix as if half the nodes are close together in
// one location and half the nodes are close together in another. The lan factor
// is used to separate the nodes locally and the wan factor represents the split
// between the two sides.
func GenerateSplit(nodes int, lan time.Duration, wan time.Duration) [][]time.Duration {
truth := make([][]time.Duration, nodes)
for i := range truth {
truth[i] = make([]time.Duration, nodes)
}
split := nodes / 2
for i := 0; i < nodes; i++ {
for j := i + 1; j < nodes; j++ {
rtt := lan
if (i <= split && j > split) || (i > split && j <= split) {
rtt += wan
}
truth[i][j], truth[j][i] = rtt, rtt
}
}
return truth
}
// GenerateCircle returns a truth matrix for a set of nodes, evenly distributed
// around a circle with the given radius. The first node is at the "center" of the
// circle because it's equidistant from all the other nodes, but we place it at
// double the radius, so it should show up above all the other nodes in height.
func GenerateCircle(nodes int, radius time.Duration) [][]time.Duration {
truth := make([][]time.Duration, nodes)
for i := range truth {
truth[i] = make([]time.Duration, nodes)
}
for i := 0; i < nodes; i++ {
for j := i + 1; j < nodes; j++ {
var rtt time.Duration
if i == 0 {
rtt = 2 * radius
} else {
t1 := 2.0 * math.Pi * float64(i) / float64(nodes)
x1, y1 := math.Cos(t1), math.Sin(t1)
t2 := 2.0 * math.Pi * float64(j) / float64(nodes)
x2, y2 := math.Cos(t2), math.Sin(t2)
dx, dy := x2-x1, y2-y1
dist := math.Sqrt(dx*dx + dy*dy)
rtt = time.Duration(dist * float64(radius))
}
truth[i][j], truth[j][i] = rtt, rtt
}
}
return truth
}
// GenerateRandom returns a truth matrix for a set of nodes with normally
// distributed delays, with the given mean and deviation. The RNG is re-seeded
// so you always get the same matrix for a given size.
func GenerateRandom(nodes int, mean time.Duration, deviation time.Duration) [][]time.Duration {
rand.Seed(1)
truth := make([][]time.Duration, nodes)
for i := range truth {
truth[i] = make([]time.Duration, nodes)
}
for i := 0; i < nodes; i++ {
for j := i + 1; j < nodes; j++ {
rttSeconds := rand.NormFloat64()*deviation.Seconds() + mean.Seconds()
rtt := time.Duration(rttSeconds * secondsToNanoseconds)
truth[i][j], truth[j][i] = rtt, rtt
}
}
return truth
}
// Simulate runs the given number of cycles using the given list of clients and
// truth matrix. On each cycle, each client will pick a random node and observe
// the truth RTT, updating its coordinate estimate. The RNG is re-seeded for
// each simulation run to get deterministic results (for this algorithm and the
// underlying algorithm which will use random numbers for position vectors when
// starting out with everything at the origin).
func Simulate(clients []*Client, truth [][]time.Duration, cycles int) {
rand.Seed(1)
nodes := len(clients)
for cycle := 0; cycle < cycles; cycle++ {
for i, _ := range clients {
if j := rand.Intn(nodes); j != i {
c := clients[j].GetCoordinate()
rtt := truth[i][j]
node := fmt.Sprintf("node_%d", j)
clients[i].Update(node, c, rtt)
}
}
}
}
// Stats is returned from the Evaluate function with a summary of the algorithm
// performance.
type Stats struct {
ErrorMax float64
ErrorAvg float64
}
// Evaluate uses the coordinates of the given clients to calculate estimated
// distances and compares them with the given truth matrix, returning summary
// stats.
func Evaluate(clients []*Client, truth [][]time.Duration) (stats Stats) {
nodes := len(clients)
count := 0
for i := 0; i < nodes; i++ {
for j := i + 1; j < nodes; j++ {
est := clients[i].DistanceTo(clients[j].GetCoordinate()).Seconds()
actual := truth[i][j].Seconds()
error := math.Abs(est-actual) / actual
stats.ErrorMax = math.Max(stats.ErrorMax, error)
stats.ErrorAvg += error
count += 1
}
}
stats.ErrorAvg /= float64(count)
fmt.Printf("Error avg=%9.6f max=%9.6f\n", stats.ErrorAvg, stats.ErrorMax)
return
}

View file

@ -1,4 +1,4 @@
[![Build Status](https://travis-ci.org/miekg/dns.svg?branch=master)](https://travis-ci.org/miekg/dns)
[![Build Status](https://travis-ci.org/miekg/dns.svg?branch=master)](https://travis-ci.org/miekg/dns) [![](https://godoc.org/github.com/miekg/dns?status.svg)](https://godoc.org/github.com/miekg/dns)
# Alternative (more granular) approach to a DNS library
@ -10,9 +10,9 @@ If there is stuff you should know as a DNS programmer there isn't a convenience
function for it. Server side and client side programming is supported, i.e. you
can build servers and resolvers with it.
If you like this, you may also be interested in:
* https://github.com/miekg/unbound -- Go wrapper for the Unbound resolver.
We try to keep the "master" branch as sane as possible and at the bleeding edge
of standards, avoiding breaking changes wherever reasonable. We support the last
two versions of Go, currently: 1.5 and 1.6.
# Goals
@ -33,6 +33,7 @@ A not-so-up-to-date-list-that-may-be-actually-current:
* https://github.com/fcambus/rrda
* https://github.com/kenshinx/godns
* https://github.com/skynetservices/skydns
* https://github.com/hashicorp/consul
* https://github.com/DevelopersPL/godnsagent
* https://github.com/duedil-ltd/discodns
* https://github.com/StalkR/dns-reverse-proxy
@ -42,6 +43,16 @@ A not-so-up-to-date-list-that-may-be-actually-current:
* https://play.google.com/store/apps/details?id=com.turbobytes.dig
* https://github.com/fcambus/statzone
* https://github.com/benschw/dns-clb-go
* https://github.com/corny/dnscheck for http://public-dns.info/
* https://namesmith.io
* https://github.com/miekg/unbound
* https://github.com/miekg/exdns
* https://dnslookup.org
* https://github.com/looterz/grimd
* https://github.com/phamhongviet/serf-dns
* https://github.com/mehrdadrad/mylg
* https://github.com/bamarni/dockness
* https://github.com/fffaraz/microdns
Send pull request if you want to be listed here.
@ -55,9 +66,10 @@ Send pull request if you want to be listed here.
* Server side programming (mimicking the net/http package);
* Client side programming;
* DNSSEC: signing, validating and key generation for DSA, RSA and ECDSA;
* EDNS0, NSID;
* EDNS0, NSID, Cookies;
* AXFR/IXFR;
* TSIG, SIG(0);
* DNS over TLS: optional encrypted connection between client and server;
* DNS name compression;
* Depends only on the standard library.
@ -104,7 +116,6 @@ Example programs can be found in the `github.com/miekg/exdns` repository.
* 340{1,2,3} - NAPTR record
* 3445 - Limiting the scope of (DNS)KEY
* 3597 - Unknown RRs
* 4025 - IPSECKEY
* 403{3,4,5} - DNSSEC + validation functions
* 4255 - SSHFP record
* 4343 - Case insensitivity
@ -123,6 +134,7 @@ Example programs can be found in the `github.com/miekg/exdns` repository.
* 6605 - ECDSA
* 6725 - IANA Registry Update
* 6742 - ILNP DNS
* 6840 - Clarifications and Implementation Notes for DNS Security
* 6844 - CAA record
* 6891 - EDNS0 update
* 6895 - DNS IANA considerations
@ -130,6 +142,8 @@ Example programs can be found in the `github.com/miekg/exdns` repository.
* 7043 - EUI48/EUI64 records
* 7314 - DNS (EDNS) EXPIRE Option
* 7553 - URI record
* 7858 - DNS over TLS: Initiation and Performance Considerations (draft)
* 7873 - Domain Name System (DNS) Cookies (draft-ietf-dnsop-cookies)
* xxxx - EDNS0 DNS Update Lease (draft)
## Loosely based upon
@ -138,11 +152,3 @@ Example programs can be found in the `github.com/miekg/exdns` repository.
* `NSD`
* `Net::DNS`
* `GRONG`
## TODO
* privatekey.Precompute() when signing?
* Last remaining RRs: APL, ATMA, A6, NSAP and NXT.
* Missing in parsing: ISDN, UNSPEC, NSAP and ATMA.
* NSEC(3) cover/match/closest enclose.
* Replies with TC bit are not parsed to the end.

161
vendor/github.com/miekg/dns/client.go generated vendored
View file

@ -4,6 +4,8 @@ package dns
import (
"bytes"
"crypto/tls"
"encoding/binary"
"io"
"net"
"time"
@ -24,27 +26,22 @@ type Conn struct {
// A Client defines parameters for a DNS client.
type Client struct {
Net string // if "tcp" a TCP query will be initiated, otherwise an UDP one (default is "" for UDP)
Net string // if "tcp" or "tcp-tls" (DNS over TLS) a TCP query will be initiated, otherwise an UDP one (default is "" for UDP)
UDPSize uint16 // minimum receive buffer for UDP messages
DialTimeout time.Duration // net.DialTimeout, defaults to 2 seconds
ReadTimeout time.Duration // net.Conn.SetReadTimeout value for connections, defaults to 2 seconds
WriteTimeout time.Duration // net.Conn.SetWriteTimeout value for connections, defaults to 2 seconds
TLSConfig *tls.Config // TLS connection configuration
Timeout time.Duration // a cumulative timeout for dial, write and read, defaults to 0 (disabled) - overrides DialTimeout, ReadTimeout and WriteTimeout when non-zero
DialTimeout time.Duration // net.DialTimeout, defaults to 2 seconds - overridden by Timeout when that value is non-zero
ReadTimeout time.Duration // net.Conn.SetReadTimeout value for connections, defaults to 2 seconds - overridden by Timeout when that value is non-zero
WriteTimeout time.Duration // net.Conn.SetWriteTimeout value for connections, defaults to 2 seconds - overridden by Timeout when that value is non-zero
TsigSecret map[string]string // secret(s) for Tsig map[<zonename>]<base64 secret>, zonename must be fully qualified
SingleInflight bool // if true suppress multiple outstanding queries for the same Qname, Qtype and Qclass
group singleflight
}
// Exchange performs a synchronous UDP query. It sends the message m to the address
// contained in a and waits for an reply. Exchange does not retry a failed query, nor
// contained in a and waits for a reply. Exchange does not retry a failed query, nor
// will it fall back to TCP in case of truncation.
// If you need to send a DNS message on an already existing connection, you can use the
// following:
//
// co := &dns.Conn{Conn: c} // c is your net.Conn
// co.WriteMsg(m)
// in, err := co.ReadMsg()
// co.Close()
//
// See client.Exchange for more information on setting larger buffer sizes.
func Exchange(m *Msg, a string) (r *Msg, err error) {
var co *Conn
co, err = DialTimeout("udp", a, dnsTimeout)
@ -53,8 +50,6 @@ func Exchange(m *Msg, a string) (r *Msg, err error) {
}
defer co.Close()
co.SetReadDeadline(time.Now().Add(dnsTimeout))
co.SetWriteDeadline(time.Now().Add(dnsTimeout))
opt := m.IsEdns0()
// If EDNS0 is used use that for size.
@ -62,9 +57,12 @@ func Exchange(m *Msg, a string) (r *Msg, err error) {
co.UDPSize = opt.UDPSize()
}
co.SetWriteDeadline(time.Now().Add(dnsTimeout))
if err = co.WriteMsg(m); err != nil {
return nil, err
}
co.SetReadDeadline(time.Now().Add(dnsTimeout))
r, err = co.ReadMsg()
if err == nil && r.Id != m.Id {
err = ErrId
@ -95,14 +93,18 @@ func ExchangeConn(c net.Conn, m *Msg) (r *Msg, err error) {
return r, err
}
// Exchange performs an synchronous query. It sends the message m to the address
// contained in a and waits for an reply. Basic use pattern with a *dns.Client:
// Exchange performs a synchronous query. It sends the message m to the address
// contained in a and waits for a reply. Basic use pattern with a *dns.Client:
//
// c := new(dns.Client)
// in, rtt, err := c.Exchange(message, "127.0.0.1:53")
//
// Exchange does not retry a failed query, nor will it fall back to TCP in
// case of truncation.
// It is up to the caller to create a message that allows for larger responses to be
// returned. Specifically this means adding an EDNS0 OPT RR that will advertise a larger
// buffer, see SetEdns0. Messsages without an OPT RR will fallback to the historic limit
// of 512 bytes.
func (c *Client) Exchange(m *Msg, a string) (r *Msg, rtt time.Duration, err error) {
if !c.SingleInflight {
return c.exchange(m, a)
@ -129,6 +131,9 @@ func (c *Client) Exchange(m *Msg, a string) (r *Msg, rtt time.Duration, err erro
}
func (c *Client) dialTimeout() time.Duration {
if c.Timeout != 0 {
return c.Timeout
}
if c.DialTimeout != 0 {
return c.DialTimeout
}
@ -151,11 +156,36 @@ func (c *Client) writeTimeout() time.Duration {
func (c *Client) exchange(m *Msg, a string) (r *Msg, rtt time.Duration, err error) {
var co *Conn
if c.Net == "" {
co, err = DialTimeout("udp", a, c.dialTimeout())
} else {
co, err = DialTimeout(c.Net, a, c.dialTimeout())
network := "udp"
tls := false
switch c.Net {
case "tcp-tls":
network = "tcp"
tls = true
case "tcp4-tls":
network = "tcp4"
tls = true
case "tcp6-tls":
network = "tcp6"
tls = true
default:
if c.Net != "" {
network = c.Net
}
}
var deadline time.Time
if c.Timeout != 0 {
deadline = time.Now().Add(c.Timeout)
}
if tls {
co, err = DialTimeoutWithTLS(network, a, c.TLSConfig, c.dialTimeout())
} else {
co, err = DialTimeout(network, a, c.dialTimeout())
}
if err != nil {
return nil, 0, err
}
@ -171,13 +201,13 @@ func (c *Client) exchange(m *Msg, a string) (r *Msg, rtt time.Duration, err erro
co.UDPSize = c.UDPSize
}
co.SetReadDeadline(time.Now().Add(c.readTimeout()))
co.SetWriteDeadline(time.Now().Add(c.writeTimeout()))
co.TsigSecret = c.TsigSecret
co.SetWriteDeadline(deadlineOrTimeout(deadline, c.writeTimeout()))
if err = co.WriteMsg(m); err != nil {
return nil, 0, err
}
co.SetReadDeadline(deadlineOrTimeout(deadline, c.readTimeout()))
r, err = co.ReadMsg()
if err == nil && r.Id != m.Id {
err = ErrId
@ -196,6 +226,12 @@ func (co *Conn) ReadMsg() (*Msg, error) {
m := new(Msg)
if err := m.Unpack(p); err != nil {
// If ErrTruncated was returned, we still want to allow the user to use
// the message, but naively they can just check err if they don't want
// to use a truncated message
if err == ErrTruncated {
return m, err
}
return nil, err
}
if t := m.IsTsig(); t != nil {
@ -218,21 +254,26 @@ func (co *Conn) ReadMsgHeader(hdr *Header) ([]byte, error) {
err error
)
if t, ok := co.Conn.(*net.TCPConn); ok {
switch t := co.Conn.(type) {
case *net.TCPConn, *tls.Conn:
r := t.(io.Reader)
// First two bytes specify the length of the entire message.
l, err := tcpMsgLen(t)
l, err := tcpMsgLen(r)
if err != nil {
return nil, err
}
p = make([]byte, l)
n, err = tcpRead(t, p)
} else {
n, err = tcpRead(r, p)
co.rtt = time.Since(co.t)
default:
if co.UDPSize > MinMsgSize {
p = make([]byte, co.UDPSize)
} else {
p = make([]byte, MinMsgSize)
}
n, err = co.Read(p)
co.rtt = time.Since(co.t)
}
if err != nil {
@ -243,15 +284,17 @@ func (co *Conn) ReadMsgHeader(hdr *Header) ([]byte, error) {
p = p[:n]
if hdr != nil {
if _, err = UnpackStruct(hdr, p, 0); err != nil {
dh, _, err := unpackMsgHdr(p, 0)
if err != nil {
return nil, err
}
*hdr = dh
}
return p, err
}
// tcpMsgLen is a helper func to read first two bytes of stream as uint16 packet length.
func tcpMsgLen(t *net.TCPConn) (int, error) {
func tcpMsgLen(t io.Reader) (int, error) {
p := []byte{0, 0}
n, err := t.Read(p)
if err != nil {
@ -260,7 +303,7 @@ func tcpMsgLen(t *net.TCPConn) (int, error) {
if n != 2 {
return 0, ErrShortRead
}
l, _ := unpackUint16(p, 0)
l := binary.BigEndian.Uint16(p)
if l == 0 {
return 0, ErrShortRead
}
@ -268,7 +311,7 @@ func tcpMsgLen(t *net.TCPConn) (int, error) {
}
// tcpRead calls TCPConn.Read enough times to fill allocated buffer.
func tcpRead(t *net.TCPConn, p []byte) (int, error) {
func tcpRead(t io.Reader, p []byte) (int, error) {
n, err := t.Read(p)
if err != nil {
return n, err
@ -291,27 +334,28 @@ func (co *Conn) Read(p []byte) (n int, err error) {
if len(p) < 2 {
return 0, io.ErrShortBuffer
}
if t, ok := co.Conn.(*net.TCPConn); ok {
l, err := tcpMsgLen(t)
switch t := co.Conn.(type) {
case *net.TCPConn, *tls.Conn:
r := t.(io.Reader)
l, err := tcpMsgLen(r)
if err != nil {
return 0, err
}
if l > len(p) {
return int(l), io.ErrShortBuffer
}
return tcpRead(t, p[:l])
return tcpRead(r, p[:l])
}
// UDP connection
n, err = co.Conn.Read(p)
if err != nil {
return n, err
}
co.rtt = time.Since(co.t)
return n, err
}
// WriteMsg sends a message throught the connection co.
// WriteMsg sends a message through the connection co.
// If the message m contains a TSIG record the transaction
// signature is calculated.
func (co *Conn) WriteMsg(m *Msg) (err error) {
@ -322,7 +366,7 @@ func (co *Conn) WriteMsg(m *Msg) (err error) {
return ErrSecret
}
out, mac, err = TsigGenerate(m, co.TsigSecret[t.Hdr.Name], co.tsigRequestMAC, false)
// Set for the next read, allthough only used in zone transfers
// Set for the next read, although only used in zone transfers
co.tsigRequestMAC = mac
} else {
out, err = m.Pack()
@ -339,7 +383,10 @@ func (co *Conn) WriteMsg(m *Msg) (err error) {
// Write implements the net.Conn Write method.
func (co *Conn) Write(p []byte) (n int, err error) {
if t, ok := co.Conn.(*net.TCPConn); ok {
switch t := co.Conn.(type) {
case *net.TCPConn, *tls.Conn:
w := t.(io.Writer)
lp := len(p)
if lp < 2 {
return 0, io.ErrShortBuffer
@ -348,9 +395,9 @@ func (co *Conn) Write(p []byte) (n int, err error) {
return 0, &Error{err: "message too large"}
}
l := make([]byte, 2, lp+2)
l[0], l[1] = packUint16(uint16(lp))
binary.BigEndian.PutUint16(l, uint16(lp))
p = append(l, p...)
n, err := io.Copy(t, bytes.NewReader(p))
n, err := io.Copy(w, bytes.NewReader(p))
return int(n), err
}
n, err = co.Conn.(*net.UDPConn).Write(p)
@ -376,3 +423,33 @@ func DialTimeout(network, address string, timeout time.Duration) (conn *Conn, er
}
return conn, nil
}
// DialWithTLS connects to the address on the named network with TLS.
func DialWithTLS(network, address string, tlsConfig *tls.Config) (conn *Conn, err error) {
conn = new(Conn)
conn.Conn, err = tls.Dial(network, address, tlsConfig)
if err != nil {
return nil, err
}
return conn, nil
}
// DialTimeoutWithTLS acts like DialWithTLS but takes a timeout.
func DialTimeoutWithTLS(network, address string, tlsConfig *tls.Config, timeout time.Duration) (conn *Conn, err error) {
var dialer net.Dialer
dialer.Timeout = timeout
conn = new(Conn)
conn.Conn, err = tls.DialWithDialer(&dialer, network, address, tlsConfig)
if err != nil {
return nil, err
}
return conn, nil
}
func deadlineOrTimeout(deadline time.Time, timeout time.Duration) time.Time {
if deadline.IsZero() {
return time.Now().Add(timeout)
}
return deadline
}

44
vendor/github.com/miekg/dns/dane.go generated vendored Normal file
View file

@ -0,0 +1,44 @@
package dns
import (
"crypto/sha256"
"crypto/sha512"
"crypto/x509"
"encoding/hex"
"errors"
"io"
)
// CertificateToDANE converts a certificate to a hex string as used in the TLSA or SMIMEA records.
func CertificateToDANE(selector, matchingType uint8, cert *x509.Certificate) (string, error) {
switch matchingType {
case 0:
switch selector {
case 0:
return hex.EncodeToString(cert.Raw), nil
case 1:
return hex.EncodeToString(cert.RawSubjectPublicKeyInfo), nil
}
case 1:
h := sha256.New()
switch selector {
case 0:
io.WriteString(h, string(cert.Raw))
return hex.EncodeToString(h.Sum(nil)), nil
case 1:
io.WriteString(h, string(cert.RawSubjectPublicKeyInfo))
return hex.EncodeToString(h.Sum(nil)), nil
}
case 2:
h := sha512.New()
switch selector {
case 0:
io.WriteString(h, string(cert.Raw))
return hex.EncodeToString(h.Sum(nil)), nil
case 1:
io.WriteString(h, string(cert.RawSubjectPublicKeyInfo))
return hex.EncodeToString(h.Sum(nil)), nil
}
}
return "", errors.New("dns: bad MatchingType or Selector")
}

View file

@ -142,26 +142,33 @@ func (dns *Msg) IsTsig() *TSIG {
// record in the additional section will do. It returns the OPT record
// found or nil.
func (dns *Msg) IsEdns0() *OPT {
for _, r := range dns.Extra {
if r.Header().Rrtype == TypeOPT {
return r.(*OPT)
// EDNS0 is at the end of the additional section, start there.
// We might want to change this to *only* look at the last two
// records. So we see TSIG and/or OPT - this a slightly bigger
// change though.
for i := len(dns.Extra) - 1; i >= 0; i-- {
if dns.Extra[i].Header().Rrtype == TypeOPT {
return dns.Extra[i].(*OPT)
}
}
return nil
}
// IsDomainName checks if s is a valid domainname, it returns
// the number of labels and true, when a domain name is valid.
// Note that non fully qualified domain name is considered valid, in this case the
// last label is counted in the number of labels.
// When false is returned the number of labels is not defined.
// IsDomainName checks if s is a valid domain name, it returns the number of
// labels and true, when a domain name is valid. Note that non fully qualified
// domain name is considered valid, in this case the last label is counted in
// the number of labels. When false is returned the number of labels is not
// defined. Also note that this function is extremely liberal; almost any
// string is a valid domain name as the DNS is 8 bit protocol. It checks if each
// label fits in 63 characters, but there is no length check for the entire
// string s. I.e. a domain name longer than 255 characters is considered valid.
func IsDomainName(s string) (labels int, ok bool) {
_, labels, err := packDomainName(s, nil, 0, nil, false)
return labels, err == nil
}
// IsSubDomain checks if child is indeed a child of the parent. Both child and
// parent are *not* downcased before doing the comparison.
// IsSubDomain checks if child is indeed a child of the parent. If child and parent
// are the same domain true is returned as well.
func IsSubDomain(parent, child string) bool {
// Entire child is contained in parent
return CompareDomainName(parent, child) == CountLabel(parent)

38
vendor/github.com/miekg/dns/dns.go generated vendored
View file

@ -3,17 +3,15 @@ package dns
import "strconv"
const (
year68 = 1 << 31 // For RFC1982 (Serial Arithmetic) calculations in 32 bits.
// DefaultMsgSize is the standard default for messages larger than 512 bytes.
DefaultMsgSize = 4096
// MinMsgSize is the minimal size of a DNS packet.
MinMsgSize = 512
// MaxMsgSize is the largest possible DNS packet.
MaxMsgSize = 65535
defaultTtl = 3600 // Default internal TTL.
year68 = 1 << 31 // For RFC1982 (Serial Arithmetic) calculations in 32 bits.
defaultTtl = 3600 // Default internal TTL.
DefaultMsgSize = 4096 // DefaultMsgSize is the standard default for messages larger than 512 bytes.
MinMsgSize = 512 // MinMsgSize is the minimal size of a DNS packet.
MaxMsgSize = 65535 // MaxMsgSize is the largest possible DNS packet.
)
// Error represents a DNS error
// Error represents a DNS error.
type Error struct{ err string }
func (e *Error) Error() string {
@ -30,10 +28,13 @@ type RR interface {
Header() *RR_Header
// String returns the text representation of the resource record.
String() string
// copy returns a copy of the RR
copy() RR
// len returns the length (in octets) of the uncompressed RR in wire format.
len() int
// pack packs an RR into wire format.
pack([]byte, int, map[string]int, bool) (int, error)
}
// RR_Header is the header all DNS resource records share.
@ -42,13 +43,13 @@ type RR_Header struct {
Rrtype uint16
Class uint16
Ttl uint32
Rdlength uint16 // length of data after header
Rdlength uint16 // Length of data after header.
}
// Header returns itself. This is here to make RR_Header implement the RR interface.
// Header returns itself. This is here to make RR_Header implements the RR interface.
func (h *RR_Header) Header() *RR_Header { return h }
// Just to imlement the RR interface.
// Just to implement the RR interface.
func (h *RR_Header) copy() RR { return nil }
func (h *RR_Header) copyHeader() *RR_Header {
@ -82,19 +83,22 @@ func (h *RR_Header) len() int {
return l
}
// ToRFC3597 converts a known RR to the unknown RR representation
// from RFC 3597.
// ToRFC3597 converts a known RR to the unknown RR representation from RFC 3597.
func (rr *RFC3597) ToRFC3597(r RR) error {
buf := make([]byte, r.len()*2)
off, err := PackStruct(r, buf, 0)
off, err := PackRR(r, buf, 0, nil, false)
if err != nil {
return err
}
buf = buf[:off]
rawSetRdlength(buf, 0, off)
_, err = UnpackStruct(rr, buf, 0)
if int(r.Header().Rdlength) > off {
return ErrBuf
}
rfc3597, _, err := unpackRFC3597(*r.Header(), buf, off-int(r.Header().Rdlength))
if err != nil {
return err
}
*rr = *rfc3597.(*RFC3597)
return nil
}

View file

@ -13,6 +13,7 @@ import (
_ "crypto/sha256"
_ "crypto/sha512"
"encoding/asn1"
"encoding/binary"
"encoding/hex"
"math/big"
"sort"
@ -103,9 +104,7 @@ const (
ZONE = 1 << 8
)
// The RRSIG needs to be converted to wireformat with some of
// the rdata (the signature) missing. Use this struct to easy
// the conversion (and re-use the pack/unpack functions).
// The RRSIG needs to be converted to wireformat with some of the rdata (the signature) missing.
type rrsigWireFmt struct {
TypeCovered uint16
Algorithm uint8
@ -144,7 +143,7 @@ func (k *DNSKEY) KeyTag() uint16 {
// at the base64 values. But I'm lazy.
modulus, _ := fromBase64([]byte(k.PublicKey))
if len(modulus) > 1 {
x, _ := unpackUint16(modulus, len(modulus)-2)
x := binary.BigEndian.Uint16(modulus[len(modulus)-2:])
keytag = int(x)
}
default:
@ -154,7 +153,7 @@ func (k *DNSKEY) KeyTag() uint16 {
keywire.Algorithm = k.Algorithm
keywire.PublicKey = k.PublicKey
wire := make([]byte, DefaultMsgSize)
n, err := PackStruct(keywire, wire, 0)
n, err := packKeyWire(keywire, wire)
if err != nil {
return 0
}
@ -192,7 +191,7 @@ func (k *DNSKEY) ToDS(h uint8) *DS {
keywire.Algorithm = k.Algorithm
keywire.PublicKey = k.PublicKey
wire := make([]byte, DefaultMsgSize)
n, err := PackStruct(keywire, wire, 0)
n, err := packKeyWire(keywire, wire)
if err != nil {
return nil
}
@ -248,13 +247,12 @@ func (d *DS) ToCDS() *CDS {
return c
}
// Sign signs an RRSet. The signature needs to be filled in with
// the values: Inception, Expiration, KeyTag, SignerName and Algorithm.
// The rest is copied from the RRset. Sign returns true when the signing went OK,
// otherwise false.
// There is no check if RRSet is a proper (RFC 2181) RRSet.
// If OrigTTL is non zero, it is used as-is, otherwise the TTL of the RRset
// is used as the OrigTTL.
// Sign signs an RRSet. The signature needs to be filled in with the values:
// Inception, Expiration, KeyTag, SignerName and Algorithm. The rest is copied
// from the RRset. Sign returns a non-nill error when the signing went OK.
// There is no check if RRSet is a proper (RFC 2181) RRSet. If OrigTTL is non
// zero, it is used as-is, otherwise the TTL of the RRset is used as the
// OrigTTL.
func (rr *RRSIG) Sign(k crypto.Signer, rrset []RR) error {
if k == nil {
return ErrPrivKey
@ -290,7 +288,7 @@ func (rr *RRSIG) Sign(k crypto.Signer, rrset []RR) error {
// Create the desired binary blob
signdata := make([]byte, DefaultMsgSize)
n, err := PackStruct(sigwire, signdata, 0)
n, err := packSigWire(sigwire, signdata)
if err != nil {
return err
}
@ -408,7 +406,7 @@ func (rr *RRSIG) Verify(k *DNSKEY, rrset []RR) error {
sigwire.SignerName = strings.ToLower(rr.SignerName)
// Create the desired binary blob
signeddata := make([]byte, DefaultMsgSize)
n, err := PackStruct(sigwire, signeddata, 0)
n, err := packSigWire(sigwire, signeddata)
if err != nil {
return err
}
@ -421,8 +419,8 @@ func (rr *RRSIG) Verify(k *DNSKEY, rrset []RR) error {
sigbuf := rr.sigBuf() // Get the binary signature data
if rr.Algorithm == PRIVATEDNS { // PRIVATEOID
// TODO(mg)
// remove the domain name and assume its our
// TODO(miek)
// remove the domain name and assume its ours?
}
hash, ok := AlgorithmToHash[rr.Algorithm]
@ -609,6 +607,12 @@ func rawSignatureData(rrset []RR, s *RRSIG) (buf []byte, err error) {
// NS, MD, MF, CNAME, SOA, MB, MG, MR, PTR,
// HINFO, MINFO, MX, RP, AFSDB, RT, SIG, PX, NXT, NAPTR, KX,
// SRV, DNAME, A6
//
// RFC 6840 - Clarifications and Implementation Notes for DNS Security (DNSSEC):
// Section 6.2 of [RFC4034] also erroneously lists HINFO as a record
// that needs conversion to lowercase, and twice at that. Since HINFO
// records contain no domain names, they are not subject to case
// conversion.
switch x := r1.(type) {
case *NS:
x.Ns = strings.ToLower(x.Ns)
@ -657,3 +661,61 @@ func rawSignatureData(rrset []RR, s *RRSIG) (buf []byte, err error) {
}
return buf, nil
}
func packSigWire(sw *rrsigWireFmt, msg []byte) (int, error) {
// copied from zmsg.go RRSIG packing
off, err := packUint16(sw.TypeCovered, msg, 0)
if err != nil {
return off, err
}
off, err = packUint8(sw.Algorithm, msg, off)
if err != nil {
return off, err
}
off, err = packUint8(sw.Labels, msg, off)
if err != nil {
return off, err
}
off, err = packUint32(sw.OrigTtl, msg, off)
if err != nil {
return off, err
}
off, err = packUint32(sw.Expiration, msg, off)
if err != nil {
return off, err
}
off, err = packUint32(sw.Inception, msg, off)
if err != nil {
return off, err
}
off, err = packUint16(sw.KeyTag, msg, off)
if err != nil {
return off, err
}
off, err = PackDomainName(sw.SignerName, msg, off, nil, false)
if err != nil {
return off, err
}
return off, nil
}
func packKeyWire(dw *dnskeyWireFmt, msg []byte) (int, error) {
// copied from zmsg.go DNSKEY packing
off, err := packUint16(dw.Flags, msg, 0)
if err != nil {
return off, err
}
off, err = packUint8(dw.Protocol, msg, off)
if err != nil {
return off, err
}
off, err = packUint8(dw.Algorithm, msg, off)
if err != nil {
return off, err
}
off, err = packStringBase64(dw.PublicKey, msg, off)
if err != nil {
return off, err
}
return off, nil
}

View file

@ -14,7 +14,7 @@ import (
// NewPrivateKey returns a PrivateKey by parsing the string s.
// s should be in the same form of the BIND private key files.
func (k *DNSKEY) NewPrivateKey(s string) (crypto.PrivateKey, error) {
if s[len(s)-1] != '\n' { // We need a closing newline
if s == "" || s[len(s)-1] != '\n' { // We need a closing newline
return k.ReadPrivateKey(strings.NewReader(s+"\n"), "")
}
return k.ReadPrivateKey(strings.NewReader(s), "")
@ -25,9 +25,9 @@ func (k *DNSKEY) NewPrivateKey(s string) (crypto.PrivateKey, error) {
// The public key must be known, because some cryptographic algorithms embed
// the public inside the privatekey.
func (k *DNSKEY) ReadPrivateKey(q io.Reader, file string) (crypto.PrivateKey, error) {
m, e := parseKey(q, file)
m, err := parseKey(q, file)
if m == nil {
return nil, e
return nil, err
}
if _, ok := m["private-key-format"]; !ok {
return nil, ErrPrivKey
@ -42,16 +42,16 @@ func (k *DNSKEY) ReadPrivateKey(q io.Reader, file string) (crypto.PrivateKey, er
}
switch uint8(algo) {
case DSA:
priv, e := readPrivateKeyDSA(m)
if e != nil {
return nil, e
priv, err := readPrivateKeyDSA(m)
if err != nil {
return nil, err
}
pub := k.publicKeyDSA()
if pub == nil {
return nil, ErrKey
}
priv.PublicKey = *pub
return priv, e
return priv, nil
case RSAMD5:
fallthrough
case RSASHA1:
@ -61,31 +61,31 @@ func (k *DNSKEY) ReadPrivateKey(q io.Reader, file string) (crypto.PrivateKey, er
case RSASHA256:
fallthrough
case RSASHA512:
priv, e := readPrivateKeyRSA(m)
if e != nil {
return nil, e
priv, err := readPrivateKeyRSA(m)
if err != nil {
return nil, err
}
pub := k.publicKeyRSA()
if pub == nil {
return nil, ErrKey
}
priv.PublicKey = *pub
return priv, e
return priv, nil
case ECCGOST:
return nil, ErrPrivKey
case ECDSAP256SHA256:
fallthrough
case ECDSAP384SHA384:
priv, e := readPrivateKeyECDSA(m)
if e != nil {
return nil, e
priv, err := readPrivateKeyECDSA(m)
if err != nil {
return nil, err
}
pub := k.publicKeyECDSA()
if pub == nil {
return nil, ErrKey
}
priv.PublicKey = *pub
return priv, e
return priv, nil
default:
return nil, ErrPrivKey
}

8
vendor/github.com/miekg/dns/doc.go generated vendored
View file

@ -101,7 +101,7 @@ uses public key cryptography to sign resource records. The
public keys are stored in DNSKEY records and the signatures in RRSIG records.
Requesting DNSSEC information for a zone is done by adding the DO (DNSSEC OK) bit
to an request.
to a request.
m := new(dns.Msg)
m.SetEdns0(4096, true)
@ -184,9 +184,9 @@ Basic use pattern validating and replying to a message that has TSIG set.
dns.HandleFunc(".", handleRequest)
func handleRequest(w dns.ResponseWriter, r *dns.Msg) {
m := new(Msg)
m := new(dns.Msg)
m.SetReply(r)
if r.IsTsig() {
if r.IsTsig() != nil {
if w.TsigStatus() == nil {
// *Msg r has an TSIG record and it was validated
m.SetTsig("axfr.", dns.HmacMD5, 300, time.Now().Unix())
@ -203,7 +203,7 @@ RFC 6895 sets aside a range of type codes for private use. This range
is 65,280 - 65,534 (0xFF00 - 0xFFFE). When experimenting with new Resource Records these
can be used, before requesting an official type code from IANA.
see http://miek.nl/posts/2014/Sep/21/Private%20RRs%20and%20IDN%20in%20Go%20DNS/ for more
see http://miek.nl/2014/September/21/idn-and-private-rr-in-go-dns/ for more
information.
EDNS0

118
vendor/github.com/miekg/dns/edns.go generated vendored
View file

@ -1,6 +1,7 @@
package dns
import (
"encoding/binary"
"encoding/hex"
"errors"
"net"
@ -17,6 +18,7 @@ const (
EDNS0N3U = 0x7 // NSEC3 Hash Understood
EDNS0SUBNET = 0x8 // client-subnet (RFC6891)
EDNS0EXPIRE = 0x9 // EDNS0 expire
EDNS0COOKIE = 0xa // EDNS0 Cookie
EDNS0SUBNETDRAFT = 0x50fa // Don't use! Use EDNS0SUBNET
EDNS0LOCALSTART = 0xFDE9 // Beginning of range reserved for local/experimental use (RFC6891)
EDNS0LOCALEND = 0xFFFE // End of range reserved for local/experimental use (RFC6891)
@ -30,11 +32,6 @@ type OPT struct {
Option []EDNS0 `dns:"opt"`
}
// Header implements the RR interface.
func (rr *OPT) Header() *RR_Header {
return &rr.Hdr
}
func (rr *OPT) String() string {
s := "\n;; OPT PSEUDOSECTION:\n; EDNS: version " + strconv.Itoa(int(rr.Version())) + "; "
if rr.Do() {
@ -61,6 +58,8 @@ func (rr *OPT) String() string {
if o.(*EDNS0_SUBNET).DraftOption {
s += " (draft)"
}
case *EDNS0_COOKIE:
s += "\n; COOKIE: " + o.String()
case *EDNS0_UL:
s += "\n; UPDATE LEASE: " + o.String()
case *EDNS0_LLQ:
@ -88,10 +87,6 @@ func (rr *OPT) len() int {
return l
}
func (rr *OPT) copy() RR {
return &OPT{*rr.Hdr.copyHeader(), rr.Option}
}
// return the old value -> delete SetVersion?
// Version returns the EDNS version used. Only zero is defined.
@ -105,13 +100,16 @@ func (rr *OPT) SetVersion(v uint8) {
}
// ExtendedRcode returns the EDNS extended RCODE field (the upper 8 bits of the TTL).
func (rr *OPT) ExtendedRcode() uint8 {
return uint8((rr.Hdr.Ttl & 0xFF000000) >> 24)
func (rr *OPT) ExtendedRcode() int {
return int((rr.Hdr.Ttl&0xFF000000)>>24) + 15
}
// SetExtendedRcode sets the EDNS extended RCODE field.
func (rr *OPT) SetExtendedRcode(v uint8) {
rr.Hdr.Ttl = rr.Hdr.Ttl&0x00FFFFFF | (uint32(v) << 24)
if v < RcodeBadVers { // Smaller than 16.. Use the 4 bits you have!
return
}
rr.Hdr.Ttl = rr.Hdr.Ttl&0x00FFFFFF | (uint32(v-15) << 24)
}
// UDPSize returns the UDP buffer size.
@ -130,12 +128,21 @@ func (rr *OPT) Do() bool {
}
// SetDo sets the DO (DNSSEC OK) bit.
func (rr *OPT) SetDo() {
rr.Hdr.Ttl |= _DO
// If we pass an argument, set the DO bit to that value.
// It is possible to pass 2 or more arguments. Any arguments after the 1st is silently ignored.
func (rr *OPT) SetDo(do ...bool) {
if len(do) == 1 {
if do[0] {
rr.Hdr.Ttl |= _DO
} else {
rr.Hdr.Ttl &^= _DO
}
} else {
rr.Hdr.Ttl |= _DO
}
}
// EDNS0 defines an EDNS0 Option. An OPT RR can have multiple options appended to
// it.
// EDNS0 defines an EDNS0 Option. An OPT RR can have multiple options appended to it.
type EDNS0 interface {
// Option returns the option code for the option.
Option() uint16
@ -216,7 +223,7 @@ func (e *EDNS0_SUBNET) Option() uint16 {
func (e *EDNS0_SUBNET) pack() ([]byte, error) {
b := make([]byte, 4)
b[0], b[1] = packUint16(e.Family)
binary.BigEndian.PutUint16(b[0:], e.Family)
b[2] = e.SourceNetmask
b[3] = e.SourceScope
switch e.Family {
@ -250,7 +257,7 @@ func (e *EDNS0_SUBNET) unpack(b []byte) error {
if len(b) < 4 {
return ErrBuf
}
e.Family, _ = unpackUint16(b, 0)
e.Family = binary.BigEndian.Uint16(b)
e.SourceNetmask = b[2]
e.SourceScope = b[3]
switch e.Family {
@ -292,6 +299,41 @@ func (e *EDNS0_SUBNET) String() (s string) {
return
}
// The Cookie EDNS0 option
//
// o := new(dns.OPT)
// o.Hdr.Name = "."
// o.Hdr.Rrtype = dns.TypeOPT
// e := new(dns.EDNS0_COOKIE)
// e.Code = dns.EDNS0COOKIE
// e.Cookie = "24a5ac.."
// o.Option = append(o.Option, e)
//
// The Cookie field consists out of a client cookie (RFC 7873 Section 4), that is
// always 8 bytes. It may then optionally be followed by the server cookie. The server
// cookie is of variable length, 8 to a maximum of 32 bytes. In other words:
//
// cCookie := o.Cookie[:16]
// sCookie := o.Cookie[16:]
//
// There is no guarantee that the Cookie string has a specific length.
type EDNS0_COOKIE struct {
Code uint16 // Always EDNS0COOKIE
Cookie string // Hex-encoded cookie data
}
func (e *EDNS0_COOKIE) pack() ([]byte, error) {
h, err := hex.DecodeString(e.Cookie)
if err != nil {
return nil, err
}
return h, nil
}
func (e *EDNS0_COOKIE) Option() uint16 { return EDNS0COOKIE }
func (e *EDNS0_COOKIE) unpack(b []byte) error { e.Cookie = hex.EncodeToString(b); return nil }
func (e *EDNS0_COOKIE) String() string { return e.Cookie }
// The EDNS0_UL (Update Lease) (draft RFC) option is used to tell the server to set
// an expiration on an update RR. This is helpful for clients that cannot clean
// up after themselves. This is a draft RFC and more information can be found at
@ -315,10 +357,7 @@ func (e *EDNS0_UL) String() string { return strconv.FormatUint(uint64(e.Lease),
// Copied: http://golang.org/src/pkg/net/dnsmsg.go
func (e *EDNS0_UL) pack() ([]byte, error) {
b := make([]byte, 4)
b[0] = byte(e.Lease >> 24)
b[1] = byte(e.Lease >> 16)
b[2] = byte(e.Lease >> 8)
b[3] = byte(e.Lease)
binary.BigEndian.PutUint32(b, e.Lease)
return b, nil
}
@ -326,7 +365,7 @@ func (e *EDNS0_UL) unpack(b []byte) error {
if len(b) < 4 {
return ErrBuf
}
e.Lease = uint32(b[0])<<24 | uint32(b[1])<<16 | uint32(b[2])<<8 | uint32(b[3])
e.Lease = binary.BigEndian.Uint32(b)
return nil
}
@ -345,21 +384,11 @@ func (e *EDNS0_LLQ) Option() uint16 { return EDNS0LLQ }
func (e *EDNS0_LLQ) pack() ([]byte, error) {
b := make([]byte, 18)
b[0], b[1] = packUint16(e.Version)
b[2], b[3] = packUint16(e.Opcode)
b[4], b[5] = packUint16(e.Error)
b[6] = byte(e.Id >> 56)
b[7] = byte(e.Id >> 48)
b[8] = byte(e.Id >> 40)
b[9] = byte(e.Id >> 32)
b[10] = byte(e.Id >> 24)
b[11] = byte(e.Id >> 16)
b[12] = byte(e.Id >> 8)
b[13] = byte(e.Id)
b[14] = byte(e.LeaseLife >> 24)
b[15] = byte(e.LeaseLife >> 16)
b[16] = byte(e.LeaseLife >> 8)
b[17] = byte(e.LeaseLife)
binary.BigEndian.PutUint16(b[0:], e.Version)
binary.BigEndian.PutUint16(b[2:], e.Opcode)
binary.BigEndian.PutUint16(b[4:], e.Error)
binary.BigEndian.PutUint64(b[6:], e.Id)
binary.BigEndian.PutUint32(b[14:], e.LeaseLife)
return b, nil
}
@ -367,12 +396,11 @@ func (e *EDNS0_LLQ) unpack(b []byte) error {
if len(b) < 18 {
return ErrBuf
}
e.Version, _ = unpackUint16(b, 0)
e.Opcode, _ = unpackUint16(b, 2)
e.Error, _ = unpackUint16(b, 4)
e.Id = uint64(b[6])<<56 | uint64(b[6+1])<<48 | uint64(b[6+2])<<40 |
uint64(b[6+3])<<32 | uint64(b[6+4])<<24 | uint64(b[6+5])<<16 | uint64(b[6+6])<<8 | uint64(b[6+7])
e.LeaseLife = uint32(b[14])<<24 | uint32(b[14+1])<<16 | uint32(b[14+2])<<8 | uint32(b[14+3])
e.Version = binary.BigEndian.Uint16(b[0:])
e.Opcode = binary.BigEndian.Uint16(b[2:])
e.Error = binary.BigEndian.Uint16(b[4:])
e.Id = binary.BigEndian.Uint64(b[6:])
e.LeaseLife = binary.BigEndian.Uint32(b[14:])
return nil
}
@ -468,7 +496,7 @@ func (e *EDNS0_EXPIRE) unpack(b []byte) error {
if len(b) < 4 {
return ErrBuf
}
e.Expire = uint32(b[0])<<24 | uint32(b[1])<<16 | uint32(b[2])<<8 | uint32(b[3])
e.Expire = binary.BigEndian.Uint32(b)
return nil
}

View file

@ -69,15 +69,6 @@ func Field(r RR, i int) string {
s += " " + Type(d.Index(i).Uint()).String()
}
return s
case `dns:"wks"`:
if d.Len() == 0 {
return ""
}
s := strconv.Itoa(int(d.Index(0).Uint()))
for i := 0; i < d.Len(); i++ {
s += " " + strconv.Itoa(int(d.Index(i).Uint()))
}
return s
default:
// if it does not have a tag its a string slice
fallthrough

View file

@ -2,6 +2,7 @@ package dns
import (
"bytes"
"errors"
"fmt"
"strconv"
"strings"
@ -15,7 +16,7 @@ import (
// * [[ttl][class]]
// * type
// * rhs (rdata)
// But we are lazy here, only the range is parsed *all* occurences
// But we are lazy here, only the range is parsed *all* occurrences
// of $ after that are interpreted.
// Any error are returned as a string value, the empty string signals
// "no error".
@ -25,7 +26,7 @@ func generate(l lex, c chan lex, t chan *Token, o string) string {
if i+1 == len(l.token) {
return "bad step in $GENERATE range"
}
if s, e := strconv.Atoi(l.token[i+1:]); e == nil {
if s, err := strconv.Atoi(l.token[i+1:]); err == nil {
if s < 0 {
return "bad step in $GENERATE range"
}
@ -65,7 +66,7 @@ BuildRR:
escape bool
dom bytes.Buffer
mod string
err string
err error
offset int
)
@ -104,8 +105,8 @@ BuildRR:
return "bad modifier in $GENERATE"
}
mod, offset, err = modToPrintf(s[j+2 : j+2+sep])
if err != "" {
return err
if err != nil {
return err.Error()
}
j += 2 + sep // Jump to it
}
@ -119,9 +120,9 @@ BuildRR:
}
}
// Re-parse the RR and send it on the current channel t
rx, e := NewRR("$ORIGIN " + o + "\n" + dom.String())
if e != nil {
return e.(*ParseError).err
rx, err := NewRR("$ORIGIN " + o + "\n" + dom.String())
if err != nil {
return err.Error()
}
t <- &Token{RR: rx}
// Its more efficient to first built the rrlist and then parse it in
@ -131,28 +132,28 @@ BuildRR:
}
// Convert a $GENERATE modifier 0,0,d to something Printf can deal with.
func modToPrintf(s string) (string, int, string) {
func modToPrintf(s string) (string, int, error) {
xs := strings.SplitN(s, ",", 3)
if len(xs) != 3 {
return "", 0, "bad modifier in $GENERATE"
return "", 0, errors.New("bad modifier in $GENERATE")
}
// xs[0] is offset, xs[1] is width, xs[2] is base
if xs[2] != "o" && xs[2] != "d" && xs[2] != "x" && xs[2] != "X" {
return "", 0, "bad base in $GENERATE"
return "", 0, errors.New("bad base in $GENERATE")
}
offset, err := strconv.Atoi(xs[0])
if err != nil || offset > 255 {
return "", 0, "bad offset in $GENERATE"
return "", 0, errors.New("bad offset in $GENERATE")
}
width, err := strconv.Atoi(xs[1])
if err != nil || width > 255 {
return "", offset, "bad width in $GENERATE"
return "", offset, errors.New("bad width in $GENERATE")
}
switch {
case width < 0:
return "", offset, "bad width in $GENERATE"
return "", offset, errors.New("bad width in $GENERATE")
case width == 0:
return "%" + xs[1] + xs[2], offset, ""
return "%" + xs[1] + xs[2], offset, nil
}
return "%0" + xs[1] + xs[2], offset, ""
return "%0" + xs[1] + xs[2], offset, nil
}

View file

@ -4,9 +4,11 @@ package dns
// SplitDomainName splits a name string into it's labels.
// www.miek.nl. returns []string{"www", "miek", "nl"}
// .www.miek.nl. returns []string{"", "www", "miek", "nl"},
// The root label (.) returns nil. Note that using
// strings.Split(s) will work in most cases, but does not handle
// escaped dots (\.) for instance.
// s must be a syntactically valid domain name, see IsDomainName.
func SplitDomainName(s string) (labels []string) {
if len(s) == 0 {
return nil
@ -45,6 +47,8 @@ func SplitDomainName(s string) (labels []string) {
//
// www.miek.nl. and miek.nl. have two labels in common: miek and nl
// www.miek.nl. and www.bla.nl. have one label in common: nl
//
// s1 and s2 must be syntactically valid domain names.
func CompareDomainName(s1, s2 string) (n int) {
s1 = Fqdn(s1)
s2 = Fqdn(s2)
@ -85,6 +89,7 @@ func CompareDomainName(s1, s2 string) (n int) {
}
// CountLabel counts the the number of labels in the string s.
// s must be a syntactically valid domain name.
func CountLabel(s string) (labels int) {
if s == "." {
return
@ -103,6 +108,7 @@ func CountLabel(s string) (labels int) {
// Split splits a name s into its label indexes.
// www.miek.nl. returns []int{0, 4, 9}, www.miek.nl also returns []int{0, 4, 9}.
// The root name (.) returns nil. Also see SplitDomainName.
// s must be a syntactically valid domain name.
func Split(s string) []int {
if s == "." {
return nil

1315
vendor/github.com/miekg/dns/msg.go generated vendored

File diff suppressed because it is too large Load diff

340
vendor/github.com/miekg/dns/msg_generate.go generated vendored Normal file
View file

@ -0,0 +1,340 @@
//+build ignore
// msg_generate.go is meant to run with go generate. It will use
// go/{importer,types} to track down all the RR struct types. Then for each type
// it will generate pack/unpack methods based on the struct tags. The generated source is
// written to zmsg.go, and is meant to be checked into git.
package main
import (
"bytes"
"fmt"
"go/format"
"go/importer"
"go/types"
"log"
"os"
"strings"
)
var packageHdr = `
// *** DO NOT MODIFY ***
// AUTOGENERATED BY go generate from msg_generate.go
package dns
`
// getTypeStruct will take a type and the package scope, and return the
// (innermost) struct if the type is considered a RR type (currently defined as
// those structs beginning with a RR_Header, could be redefined as implementing
// the RR interface). The bool return value indicates if embedded structs were
// resolved.
func getTypeStruct(t types.Type, scope *types.Scope) (*types.Struct, bool) {
st, ok := t.Underlying().(*types.Struct)
if !ok {
return nil, false
}
if st.Field(0).Type() == scope.Lookup("RR_Header").Type() {
return st, false
}
if st.Field(0).Anonymous() {
st, _ := getTypeStruct(st.Field(0).Type(), scope)
return st, true
}
return nil, false
}
func main() {
// Import and type-check the package
pkg, err := importer.Default().Import("github.com/miekg/dns")
fatalIfErr(err)
scope := pkg.Scope()
// Collect actual types (*X)
var namedTypes []string
for _, name := range scope.Names() {
o := scope.Lookup(name)
if o == nil || !o.Exported() {
continue
}
if st, _ := getTypeStruct(o.Type(), scope); st == nil {
continue
}
if name == "PrivateRR" {
continue
}
// Check if corresponding TypeX exists
if scope.Lookup("Type"+o.Name()) == nil && o.Name() != "RFC3597" {
log.Fatalf("Constant Type%s does not exist.", o.Name())
}
namedTypes = append(namedTypes, o.Name())
}
b := &bytes.Buffer{}
b.WriteString(packageHdr)
fmt.Fprint(b, "// pack*() functions\n\n")
for _, name := range namedTypes {
o := scope.Lookup(name)
st, _ := getTypeStruct(o.Type(), scope)
fmt.Fprintf(b, "func (rr *%s) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) {\n", name)
fmt.Fprint(b, `off, err := rr.Hdr.pack(msg, off, compression, compress)
if err != nil {
return off, err
}
headerEnd := off
`)
for i := 1; i < st.NumFields(); i++ {
o := func(s string) {
fmt.Fprintf(b, s, st.Field(i).Name())
fmt.Fprint(b, `if err != nil {
return off, err
}
`)
}
if _, ok := st.Field(i).Type().(*types.Slice); ok {
switch st.Tag(i) {
case `dns:"-"`: // ignored
case `dns:"txt"`:
o("off, err = packStringTxt(rr.%s, msg, off)\n")
case `dns:"opt"`:
o("off, err = packDataOpt(rr.%s, msg, off)\n")
case `dns:"nsec"`:
o("off, err = packDataNsec(rr.%s, msg, off)\n")
case `dns:"domain-name"`:
o("off, err = packDataDomainNames(rr.%s, msg, off, compression, compress)\n")
default:
log.Fatalln(name, st.Field(i).Name(), st.Tag(i))
}
continue
}
switch {
case st.Tag(i) == `dns:"-"`: // ignored
case st.Tag(i) == `dns:"cdomain-name"`:
fallthrough
case st.Tag(i) == `dns:"domain-name"`:
o("off, err = PackDomainName(rr.%s, msg, off, compression, compress)\n")
case st.Tag(i) == `dns:"a"`:
o("off, err = packDataA(rr.%s, msg, off)\n")
case st.Tag(i) == `dns:"aaaa"`:
o("off, err = packDataAAAA(rr.%s, msg, off)\n")
case st.Tag(i) == `dns:"uint48"`:
o("off, err = packUint48(rr.%s, msg, off)\n")
case st.Tag(i) == `dns:"txt"`:
o("off, err = packString(rr.%s, msg, off)\n")
case strings.HasPrefix(st.Tag(i), `dns:"size-base32`): // size-base32 can be packed just like base32
fallthrough
case st.Tag(i) == `dns:"base32"`:
o("off, err = packStringBase32(rr.%s, msg, off)\n")
case strings.HasPrefix(st.Tag(i), `dns:"size-base64`): // size-base64 can be packed just like base64
fallthrough
case st.Tag(i) == `dns:"base64"`:
o("off, err = packStringBase64(rr.%s, msg, off)\n")
case strings.HasPrefix(st.Tag(i), `dns:"size-hex:SaltLength`): // Hack to fix empty salt length for NSEC3
o("if rr.%s == \"-\" { /* do nothing, empty salt */ }\n")
continue
case strings.HasPrefix(st.Tag(i), `dns:"size-hex`): // size-hex can be packed just like hex
fallthrough
case st.Tag(i) == `dns:"hex"`:
o("off, err = packStringHex(rr.%s, msg, off)\n")
case st.Tag(i) == `dns:"octet"`:
o("off, err = packStringOctet(rr.%s, msg, off)\n")
case st.Tag(i) == "":
switch st.Field(i).Type().(*types.Basic).Kind() {
case types.Uint8:
o("off, err = packUint8(rr.%s, msg, off)\n")
case types.Uint16:
o("off, err = packUint16(rr.%s, msg, off)\n")
case types.Uint32:
o("off, err = packUint32(rr.%s, msg, off)\n")
case types.Uint64:
o("off, err = packUint64(rr.%s, msg, off)\n")
case types.String:
o("off, err = packString(rr.%s, msg, off)\n")
default:
log.Fatalln(name, st.Field(i).Name())
}
default:
log.Fatalln(name, st.Field(i).Name(), st.Tag(i))
}
}
// We have packed everything, only now we know the rdlength of this RR
fmt.Fprintln(b, "rr.Header().Rdlength = uint16(off-headerEnd)")
fmt.Fprintln(b, "return off, nil }\n")
}
fmt.Fprint(b, "// unpack*() functions\n\n")
for _, name := range namedTypes {
o := scope.Lookup(name)
st, _ := getTypeStruct(o.Type(), scope)
fmt.Fprintf(b, "func unpack%s(h RR_Header, msg []byte, off int) (RR, int, error) {\n", name)
fmt.Fprintf(b, "rr := new(%s)\n", name)
fmt.Fprint(b, "rr.Hdr = h\n")
fmt.Fprint(b, `if noRdata(h) {
return rr, off, nil
}
var err error
rdStart := off
_ = rdStart
`)
for i := 1; i < st.NumFields(); i++ {
o := func(s string) {
fmt.Fprintf(b, s, st.Field(i).Name())
fmt.Fprint(b, `if err != nil {
return rr, off, err
}
`)
}
// size-* are special, because they reference a struct member we should use for the length.
if strings.HasPrefix(st.Tag(i), `dns:"size-`) {
structMember := structMember(st.Tag(i))
structTag := structTag(st.Tag(i))
switch structTag {
case "hex":
fmt.Fprintf(b, "rr.%s, off, err = unpackStringHex(msg, off, off + int(rr.%s))\n", st.Field(i).Name(), structMember)
case "base32":
fmt.Fprintf(b, "rr.%s, off, err = unpackStringBase32(msg, off, off + int(rr.%s))\n", st.Field(i).Name(), structMember)
case "base64":
fmt.Fprintf(b, "rr.%s, off, err = unpackStringBase64(msg, off, off + int(rr.%s))\n", st.Field(i).Name(), structMember)
default:
log.Fatalln(name, st.Field(i).Name(), st.Tag(i))
}
fmt.Fprint(b, `if err != nil {
return rr, off, err
}
`)
continue
}
if _, ok := st.Field(i).Type().(*types.Slice); ok {
switch st.Tag(i) {
case `dns:"-"`: // ignored
case `dns:"txt"`:
o("rr.%s, off, err = unpackStringTxt(msg, off)\n")
case `dns:"opt"`:
o("rr.%s, off, err = unpackDataOpt(msg, off)\n")
case `dns:"nsec"`:
o("rr.%s, off, err = unpackDataNsec(msg, off)\n")
case `dns:"domain-name"`:
o("rr.%s, off, err = unpackDataDomainNames(msg, off, rdStart + int(rr.Hdr.Rdlength))\n")
default:
log.Fatalln(name, st.Field(i).Name(), st.Tag(i))
}
continue
}
switch st.Tag(i) {
case `dns:"-"`: // ignored
case `dns:"cdomain-name"`:
fallthrough
case `dns:"domain-name"`:
o("rr.%s, off, err = UnpackDomainName(msg, off)\n")
case `dns:"a"`:
o("rr.%s, off, err = unpackDataA(msg, off)\n")
case `dns:"aaaa"`:
o("rr.%s, off, err = unpackDataAAAA(msg, off)\n")
case `dns:"uint48"`:
o("rr.%s, off, err = unpackUint48(msg, off)\n")
case `dns:"txt"`:
o("rr.%s, off, err = unpackString(msg, off)\n")
case `dns:"base32"`:
o("rr.%s, off, err = unpackStringBase32(msg, off, rdStart + int(rr.Hdr.Rdlength))\n")
case `dns:"base64"`:
o("rr.%s, off, err = unpackStringBase64(msg, off, rdStart + int(rr.Hdr.Rdlength))\n")
case `dns:"hex"`:
o("rr.%s, off, err = unpackStringHex(msg, off, rdStart + int(rr.Hdr.Rdlength))\n")
case `dns:"octet"`:
o("rr.%s, off, err = unpackStringOctet(msg, off)\n")
case "":
switch st.Field(i).Type().(*types.Basic).Kind() {
case types.Uint8:
o("rr.%s, off, err = unpackUint8(msg, off)\n")
case types.Uint16:
o("rr.%s, off, err = unpackUint16(msg, off)\n")
case types.Uint32:
o("rr.%s, off, err = unpackUint32(msg, off)\n")
case types.Uint64:
o("rr.%s, off, err = unpackUint64(msg, off)\n")
case types.String:
o("rr.%s, off, err = unpackString(msg, off)\n")
default:
log.Fatalln(name, st.Field(i).Name())
}
default:
log.Fatalln(name, st.Field(i).Name(), st.Tag(i))
}
// If we've hit len(msg) we return without error.
if i < st.NumFields()-1 {
fmt.Fprintf(b, `if off == len(msg) {
return rr, off, nil
}
`)
}
}
fmt.Fprintf(b, "return rr, off, err }\n\n")
}
// Generate typeToUnpack map
fmt.Fprintln(b, "var typeToUnpack = map[uint16]func(RR_Header, []byte, int) (RR, int, error){")
for _, name := range namedTypes {
if name == "RFC3597" {
continue
}
fmt.Fprintf(b, "Type%s: unpack%s,\n", name, name)
}
fmt.Fprintln(b, "}\n")
// gofmt
res, err := format.Source(b.Bytes())
if err != nil {
b.WriteTo(os.Stderr)
log.Fatal(err)
}
// write result
f, err := os.Create("zmsg.go")
fatalIfErr(err)
defer f.Close()
f.Write(res)
}
// structMember will take a tag like dns:"size-base32:SaltLength" and return the last part of this string.
func structMember(s string) string {
fields := strings.Split(s, ":")
if len(fields) == 0 {
return ""
}
f := fields[len(fields)-1]
// f should have a closing "
if len(f) > 1 {
return f[:len(f)-1]
}
return f
}
// structTag will take a tag like dns:"size-base32:SaltLength" and return base32.
func structTag(s string) string {
fields := strings.Split(s, ":")
if len(fields) < 2 {
return ""
}
return fields[1][len("\"size-"):]
}
func fatalIfErr(err error) {
if err != nil {
log.Fatal(err)
}
}

630
vendor/github.com/miekg/dns/msg_helpers.go generated vendored Normal file
View file

@ -0,0 +1,630 @@
package dns
import (
"encoding/base32"
"encoding/base64"
"encoding/binary"
"encoding/hex"
"net"
"strconv"
)
// helper functions called from the generated zmsg.go
// These function are named after the tag to help pack/unpack, if there is no tag it is the name
// of the type they pack/unpack (string, int, etc). We prefix all with unpackData or packData, so packDataA or
// packDataDomainName.
func unpackDataA(msg []byte, off int) (net.IP, int, error) {
if off+net.IPv4len > len(msg) {
return nil, len(msg), &Error{err: "overflow unpacking a"}
}
a := append(make(net.IP, 0, net.IPv4len), msg[off:off+net.IPv4len]...)
off += net.IPv4len
return a, off, nil
}
func packDataA(a net.IP, msg []byte, off int) (int, error) {
// It must be a slice of 4, even if it is 16, we encode only the first 4
if off+net.IPv4len > len(msg) {
return len(msg), &Error{err: "overflow packing a"}
}
switch len(a) {
case net.IPv4len, net.IPv6len:
copy(msg[off:], a.To4())
off += net.IPv4len
case 0:
// Allowed, for dynamic updates.
default:
return len(msg), &Error{err: "overflow packing a"}
}
return off, nil
}
func unpackDataAAAA(msg []byte, off int) (net.IP, int, error) {
if off+net.IPv6len > len(msg) {
return nil, len(msg), &Error{err: "overflow unpacking aaaa"}
}
aaaa := append(make(net.IP, 0, net.IPv6len), msg[off:off+net.IPv6len]...)
off += net.IPv6len
return aaaa, off, nil
}
func packDataAAAA(aaaa net.IP, msg []byte, off int) (int, error) {
if off+net.IPv6len > len(msg) {
return len(msg), &Error{err: "overflow packing aaaa"}
}
switch len(aaaa) {
case net.IPv6len:
copy(msg[off:], aaaa)
off += net.IPv6len
case 0:
// Allowed, dynamic updates.
default:
return len(msg), &Error{err: "overflow packing aaaa"}
}
return off, nil
}
// unpackHeader unpacks an RR header, returning the offset to the end of the header and a
// re-sliced msg according to the expected length of the RR.
func unpackHeader(msg []byte, off int) (rr RR_Header, off1 int, truncmsg []byte, err error) {
hdr := RR_Header{}
if off == len(msg) {
return hdr, off, msg, nil
}
hdr.Name, off, err = UnpackDomainName(msg, off)
if err != nil {
return hdr, len(msg), msg, err
}
hdr.Rrtype, off, err = unpackUint16(msg, off)
if err != nil {
return hdr, len(msg), msg, err
}
hdr.Class, off, err = unpackUint16(msg, off)
if err != nil {
return hdr, len(msg), msg, err
}
hdr.Ttl, off, err = unpackUint32(msg, off)
if err != nil {
return hdr, len(msg), msg, err
}
hdr.Rdlength, off, err = unpackUint16(msg, off)
if err != nil {
return hdr, len(msg), msg, err
}
msg, err = truncateMsgFromRdlength(msg, off, hdr.Rdlength)
return hdr, off, msg, nil
}
// pack packs an RR header, returning the offset to the end of the header.
// See PackDomainName for documentation about the compression.
func (hdr RR_Header) pack(msg []byte, off int, compression map[string]int, compress bool) (off1 int, err error) {
if off == len(msg) {
return off, nil
}
off, err = PackDomainName(hdr.Name, msg, off, compression, compress)
if err != nil {
return len(msg), err
}
off, err = packUint16(hdr.Rrtype, msg, off)
if err != nil {
return len(msg), err
}
off, err = packUint16(hdr.Class, msg, off)
if err != nil {
return len(msg), err
}
off, err = packUint32(hdr.Ttl, msg, off)
if err != nil {
return len(msg), err
}
off, err = packUint16(hdr.Rdlength, msg, off)
if err != nil {
return len(msg), err
}
return off, nil
}
// helper helper functions.
// truncateMsgFromRdLength truncates msg to match the expected length of the RR.
// Returns an error if msg is smaller than the expected size.
func truncateMsgFromRdlength(msg []byte, off int, rdlength uint16) (truncmsg []byte, err error) {
lenrd := off + int(rdlength)
if lenrd > len(msg) {
return msg, &Error{err: "overflowing header size"}
}
return msg[:lenrd], nil
}
func fromBase32(s []byte) (buf []byte, err error) {
buflen := base32.HexEncoding.DecodedLen(len(s))
buf = make([]byte, buflen)
n, err := base32.HexEncoding.Decode(buf, s)
buf = buf[:n]
return
}
func toBase32(b []byte) string { return base32.HexEncoding.EncodeToString(b) }
func fromBase64(s []byte) (buf []byte, err error) {
buflen := base64.StdEncoding.DecodedLen(len(s))
buf = make([]byte, buflen)
n, err := base64.StdEncoding.Decode(buf, s)
buf = buf[:n]
return
}
func toBase64(b []byte) string { return base64.StdEncoding.EncodeToString(b) }
// dynamicUpdate returns true if the Rdlength is zero.
func noRdata(h RR_Header) bool { return h.Rdlength == 0 }
func unpackUint8(msg []byte, off int) (i uint8, off1 int, err error) {
if off+1 > len(msg) {
return 0, len(msg), &Error{err: "overflow unpacking uint8"}
}
return uint8(msg[off]), off + 1, nil
}
func packUint8(i uint8, msg []byte, off int) (off1 int, err error) {
if off+1 > len(msg) {
return len(msg), &Error{err: "overflow packing uint8"}
}
msg[off] = byte(i)
return off + 1, nil
}
func unpackUint16(msg []byte, off int) (i uint16, off1 int, err error) {
if off+2 > len(msg) {
return 0, len(msg), &Error{err: "overflow unpacking uint16"}
}
return binary.BigEndian.Uint16(msg[off:]), off + 2, nil
}
func packUint16(i uint16, msg []byte, off int) (off1 int, err error) {
if off+2 > len(msg) {
return len(msg), &Error{err: "overflow packing uint16"}
}
binary.BigEndian.PutUint16(msg[off:], i)
return off + 2, nil
}
func unpackUint32(msg []byte, off int) (i uint32, off1 int, err error) {
if off+4 > len(msg) {
return 0, len(msg), &Error{err: "overflow unpacking uint32"}
}
return binary.BigEndian.Uint32(msg[off:]), off + 4, nil
}
func packUint32(i uint32, msg []byte, off int) (off1 int, err error) {
if off+4 > len(msg) {
return len(msg), &Error{err: "overflow packing uint32"}
}
binary.BigEndian.PutUint32(msg[off:], i)
return off + 4, nil
}
func unpackUint48(msg []byte, off int) (i uint64, off1 int, err error) {
if off+6 > len(msg) {
return 0, len(msg), &Error{err: "overflow unpacking uint64 as uint48"}
}
// Used in TSIG where the last 48 bits are occupied, so for now, assume a uint48 (6 bytes)
i = (uint64(uint64(msg[off])<<40 | uint64(msg[off+1])<<32 | uint64(msg[off+2])<<24 | uint64(msg[off+3])<<16 |
uint64(msg[off+4])<<8 | uint64(msg[off+5])))
off += 6
return i, off, nil
}
func packUint48(i uint64, msg []byte, off int) (off1 int, err error) {
if off+6 > len(msg) {
return len(msg), &Error{err: "overflow packing uint64 as uint48"}
}
msg[off] = byte(i >> 40)
msg[off+1] = byte(i >> 32)
msg[off+2] = byte(i >> 24)
msg[off+3] = byte(i >> 16)
msg[off+4] = byte(i >> 8)
msg[off+5] = byte(i)
off += 6
return off, nil
}
func unpackUint64(msg []byte, off int) (i uint64, off1 int, err error) {
if off+8 > len(msg) {
return 0, len(msg), &Error{err: "overflow unpacking uint64"}
}
return binary.BigEndian.Uint64(msg[off:]), off + 8, nil
}
func packUint64(i uint64, msg []byte, off int) (off1 int, err error) {
if off+8 > len(msg) {
return len(msg), &Error{err: "overflow packing uint64"}
}
binary.BigEndian.PutUint64(msg[off:], i)
off += 8
return off, nil
}
func unpackString(msg []byte, off int) (string, int, error) {
if off+1 > len(msg) {
return "", off, &Error{err: "overflow unpacking txt"}
}
l := int(msg[off])
if off+l+1 > len(msg) {
return "", off, &Error{err: "overflow unpacking txt"}
}
s := make([]byte, 0, l)
for _, b := range msg[off+1 : off+1+l] {
switch b {
case '"', '\\':
s = append(s, '\\', b)
case '\t', '\r', '\n':
s = append(s, b)
default:
if b < 32 || b > 127 { // unprintable
var buf [3]byte
bufs := strconv.AppendInt(buf[:0], int64(b), 10)
s = append(s, '\\')
for i := 0; i < 3-len(bufs); i++ {
s = append(s, '0')
}
for _, r := range bufs {
s = append(s, r)
}
} else {
s = append(s, b)
}
}
}
off += 1 + l
return string(s), off, nil
}
func packString(s string, msg []byte, off int) (int, error) {
txtTmp := make([]byte, 256*4+1)
off, err := packTxtString(s, msg, off, txtTmp)
if err != nil {
return len(msg), err
}
return off, nil
}
func unpackStringBase32(msg []byte, off, end int) (string, int, error) {
if end > len(msg) {
return "", len(msg), &Error{err: "overflow unpacking base32"}
}
s := toBase32(msg[off:end])
return s, end, nil
}
func packStringBase32(s string, msg []byte, off int) (int, error) {
b32, err := fromBase32([]byte(s))
if err != nil {
return len(msg), err
}
if off+len(b32) > len(msg) {
return len(msg), &Error{err: "overflow packing base32"}
}
copy(msg[off:off+len(b32)], b32)
off += len(b32)
return off, nil
}
func unpackStringBase64(msg []byte, off, end int) (string, int, error) {
// Rest of the RR is base64 encoded value, so we don't need an explicit length
// to be set. Thus far all RR's that have base64 encoded fields have those as their
// last one. What we do need is the end of the RR!
if end > len(msg) {
return "", len(msg), &Error{err: "overflow unpacking base64"}
}
s := toBase64(msg[off:end])
return s, end, nil
}
func packStringBase64(s string, msg []byte, off int) (int, error) {
b64, err := fromBase64([]byte(s))
if err != nil {
return len(msg), err
}
if off+len(b64) > len(msg) {
return len(msg), &Error{err: "overflow packing base64"}
}
copy(msg[off:off+len(b64)], b64)
off += len(b64)
return off, nil
}
func unpackStringHex(msg []byte, off, end int) (string, int, error) {
// Rest of the RR is hex encoded value, so we don't need an explicit length
// to be set. NSEC and TSIG have hex fields with a length field.
// What we do need is the end of the RR!
if end > len(msg) {
return "", len(msg), &Error{err: "overflow unpacking hex"}
}
s := hex.EncodeToString(msg[off:end])
return s, end, nil
}
func packStringHex(s string, msg []byte, off int) (int, error) {
h, err := hex.DecodeString(s)
if err != nil {
return len(msg), err
}
if off+(len(h)) > len(msg) {
return len(msg), &Error{err: "overflow packing hex"}
}
copy(msg[off:off+len(h)], h)
off += len(h)
return off, nil
}
func unpackStringTxt(msg []byte, off int) ([]string, int, error) {
txt, off, err := unpackTxt(msg, off)
if err != nil {
return nil, len(msg), err
}
return txt, off, nil
}
func packStringTxt(s []string, msg []byte, off int) (int, error) {
txtTmp := make([]byte, 256*4+1) // If the whole string consists out of \DDD we need this many.
off, err := packTxt(s, msg, off, txtTmp)
if err != nil {
return len(msg), err
}
return off, nil
}
func unpackDataOpt(msg []byte, off int) ([]EDNS0, int, error) {
var edns []EDNS0
Option:
code := uint16(0)
if off+4 > len(msg) {
return nil, len(msg), &Error{err: "overflow unpacking opt"}
}
code = binary.BigEndian.Uint16(msg[off:])
off += 2
optlen := binary.BigEndian.Uint16(msg[off:])
off += 2
if off+int(optlen) > len(msg) {
return nil, len(msg), &Error{err: "overflow unpacking opt"}
}
switch code {
case EDNS0NSID:
e := new(EDNS0_NSID)
if err := e.unpack(msg[off : off+int(optlen)]); err != nil {
return nil, len(msg), err
}
edns = append(edns, e)
off += int(optlen)
case EDNS0SUBNET, EDNS0SUBNETDRAFT:
e := new(EDNS0_SUBNET)
if err := e.unpack(msg[off : off+int(optlen)]); err != nil {
return nil, len(msg), err
}
edns = append(edns, e)
off += int(optlen)
if code == EDNS0SUBNETDRAFT {
e.DraftOption = true
}
case EDNS0COOKIE:
e := new(EDNS0_COOKIE)
if err := e.unpack(msg[off : off+int(optlen)]); err != nil {
return nil, len(msg), err
}
edns = append(edns, e)
off += int(optlen)
case EDNS0UL:
e := new(EDNS0_UL)
if err := e.unpack(msg[off : off+int(optlen)]); err != nil {
return nil, len(msg), err
}
edns = append(edns, e)
off += int(optlen)
case EDNS0LLQ:
e := new(EDNS0_LLQ)
if err := e.unpack(msg[off : off+int(optlen)]); err != nil {
return nil, len(msg), err
}
edns = append(edns, e)
off += int(optlen)
case EDNS0DAU:
e := new(EDNS0_DAU)
if err := e.unpack(msg[off : off+int(optlen)]); err != nil {
return nil, len(msg), err
}
edns = append(edns, e)
off += int(optlen)
case EDNS0DHU:
e := new(EDNS0_DHU)
if err := e.unpack(msg[off : off+int(optlen)]); err != nil {
return nil, len(msg), err
}
edns = append(edns, e)
off += int(optlen)
case EDNS0N3U:
e := new(EDNS0_N3U)
if err := e.unpack(msg[off : off+int(optlen)]); err != nil {
return nil, len(msg), err
}
edns = append(edns, e)
off += int(optlen)
default:
e := new(EDNS0_LOCAL)
e.Code = code
if err := e.unpack(msg[off : off+int(optlen)]); err != nil {
return nil, len(msg), err
}
edns = append(edns, e)
off += int(optlen)
}
if off < len(msg) {
goto Option
}
return edns, off, nil
}
func packDataOpt(options []EDNS0, msg []byte, off int) (int, error) {
for _, el := range options {
b, err := el.pack()
if err != nil || off+3 > len(msg) {
return len(msg), &Error{err: "overflow packing opt"}
}
binary.BigEndian.PutUint16(msg[off:], el.Option()) // Option code
binary.BigEndian.PutUint16(msg[off+2:], uint16(len(b))) // Length
off += 4
if off+len(b) > len(msg) {
copy(msg[off:], b)
off = len(msg)
continue
}
// Actual data
copy(msg[off:off+len(b)], b)
off += len(b)
}
return off, nil
}
func unpackStringOctet(msg []byte, off int) (string, int, error) {
s := string(msg[off:])
return s, len(msg), nil
}
func packStringOctet(s string, msg []byte, off int) (int, error) {
txtTmp := make([]byte, 256*4+1)
off, err := packOctetString(s, msg, off, txtTmp)
if err != nil {
return len(msg), err
}
return off, nil
}
func unpackDataNsec(msg []byte, off int) ([]uint16, int, error) {
var nsec []uint16
length, window, lastwindow := 0, 0, -1
for off < len(msg) {
if off+2 > len(msg) {
return nsec, len(msg), &Error{err: "overflow unpacking nsecx"}
}
window = int(msg[off])
length = int(msg[off+1])
off += 2
if window <= lastwindow {
// RFC 4034: Blocks are present in the NSEC RR RDATA in
// increasing numerical order.
return nsec, len(msg), &Error{err: "out of order NSEC block"}
}
if length == 0 {
// RFC 4034: Blocks with no types present MUST NOT be included.
return nsec, len(msg), &Error{err: "empty NSEC block"}
}
if length > 32 {
return nsec, len(msg), &Error{err: "NSEC block too long"}
}
if off+length > len(msg) {
return nsec, len(msg), &Error{err: "overflowing NSEC block"}
}
// Walk the bytes in the window and extract the type bits
for j := 0; j < length; j++ {
b := msg[off+j]
// Check the bits one by one, and set the type
if b&0x80 == 0x80 {
nsec = append(nsec, uint16(window*256+j*8+0))
}
if b&0x40 == 0x40 {
nsec = append(nsec, uint16(window*256+j*8+1))
}
if b&0x20 == 0x20 {
nsec = append(nsec, uint16(window*256+j*8+2))
}
if b&0x10 == 0x10 {
nsec = append(nsec, uint16(window*256+j*8+3))
}
if b&0x8 == 0x8 {
nsec = append(nsec, uint16(window*256+j*8+4))
}
if b&0x4 == 0x4 {
nsec = append(nsec, uint16(window*256+j*8+5))
}
if b&0x2 == 0x2 {
nsec = append(nsec, uint16(window*256+j*8+6))
}
if b&0x1 == 0x1 {
nsec = append(nsec, uint16(window*256+j*8+7))
}
}
off += length
lastwindow = window
}
return nsec, off, nil
}
func packDataNsec(bitmap []uint16, msg []byte, off int) (int, error) {
if len(bitmap) == 0 {
return off, nil
}
var lastwindow, lastlength uint16
for j := 0; j < len(bitmap); j++ {
t := bitmap[j]
window := t / 256
length := (t-window*256)/8 + 1
if window > lastwindow && lastlength != 0 { // New window, jump to the new offset
off += int(lastlength) + 2
lastlength = 0
}
if window < lastwindow || length < lastlength {
return len(msg), &Error{err: "nsec bits out of order"}
}
if off+2+int(length) > len(msg) {
return len(msg), &Error{err: "overflow packing nsec"}
}
// Setting the window #
msg[off] = byte(window)
// Setting the octets length
msg[off+1] = byte(length)
// Setting the bit value for the type in the right octet
msg[off+1+int(length)] |= byte(1 << (7 - (t % 8)))
lastwindow, lastlength = window, length
}
off += int(lastlength) + 2
return off, nil
}
func unpackDataDomainNames(msg []byte, off, end int) ([]string, int, error) {
var (
servers []string
s string
err error
)
if end > len(msg) {
return nil, len(msg), &Error{err: "overflow unpacking domain names"}
}
for off < end {
s, off, err = UnpackDomainName(msg, off)
if err != nil {
return servers, len(msg), err
}
servers = append(servers, s)
}
return servers, off, nil
}
func packDataDomainNames(names []string, msg []byte, off int, compression map[string]int, compress bool) (int, error) {
var err error
for j := 0; j < len(names); j++ {
off, err = PackDomainName(names[j], msg, off, compression, false && compress)
if err != nil {
return len(msg), err
}
}
return off, nil
}

13
vendor/github.com/miekg/dns/nsecx.go generated vendored
View file

@ -11,13 +11,12 @@ type saltWireFmt struct {
Salt string `dns:"size-hex"`
}
// HashName hashes a string (label) according to RFC 5155. It returns the hashed string in
// uppercase.
// HashName hashes a string (label) according to RFC 5155. It returns the hashed string in uppercase.
func HashName(label string, ha uint8, iter uint16, salt string) string {
saltwire := new(saltWireFmt)
saltwire.Salt = salt
wire := make([]byte, DefaultMsgSize)
n, err := PackStruct(saltwire, wire, 0)
n, err := packSaltWire(saltwire, wire)
if err != nil {
return ""
}
@ -110,3 +109,11 @@ func (rr *NSEC3) Match(name string) bool {
}
return false
}
func packSaltWire(sw *saltWireFmt, msg []byte) (int, error) {
off, err := packStringHex(sw.Salt, msg, 0)
if err != nil {
return off, err
}
return off, nil
}

View file

@ -33,7 +33,7 @@ type PrivateRR struct {
func mkPrivateRR(rrtype uint16) *PrivateRR {
// Panics if RR is not an instance of PrivateRR.
rrfunc, ok := typeToRR[rrtype]
rrfunc, ok := TypeToRR[rrtype]
if !ok {
panic(fmt.Sprintf("dns: invalid operation with Private RR type %d", rrtype))
}
@ -43,7 +43,7 @@ func mkPrivateRR(rrtype uint16) *PrivateRR {
case *PrivateRR:
return rr
}
panic(fmt.Sprintf("dns: RR is not a PrivateRR, typeToRR[%d] generator returned %T", rrtype, anyrr))
panic(fmt.Sprintf("dns: RR is not a PrivateRR, TypeToRR[%d] generator returned %T", rrtype, anyrr))
}
// Header return the RR header of r.
@ -65,29 +65,60 @@ func (r *PrivateRR) copy() RR {
}
return rr
}
func (r *PrivateRR) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) {
off, err := r.Hdr.pack(msg, off, compression, compress)
if err != nil {
return off, err
}
headerEnd := off
n, err := r.Data.Pack(msg[off:])
if err != nil {
return len(msg), err
}
off += n
r.Header().Rdlength = uint16(off - headerEnd)
return off, nil
}
// PrivateHandle registers a private resource record type. It requires
// string and numeric representation of private RR type and generator function as argument.
func PrivateHandle(rtypestr string, rtype uint16, generator func() PrivateRdata) {
rtypestr = strings.ToUpper(rtypestr)
typeToRR[rtype] = func() RR { return &PrivateRR{RR_Header{}, generator()} }
TypeToRR[rtype] = func() RR { return &PrivateRR{RR_Header{}, generator()} }
TypeToString[rtype] = rtypestr
StringToType[rtypestr] = rtype
typeToUnpack[rtype] = func(h RR_Header, msg []byte, off int) (RR, int, error) {
if noRdata(h) {
return &h, off, nil
}
var err error
rr := mkPrivateRR(h.Rrtype)
rr.Hdr = h
off1, err := rr.Data.Unpack(msg[off:])
off += off1
if err != nil {
return rr, off, err
}
return rr, off, err
}
setPrivateRR := func(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
rr := mkPrivateRR(h.Rrtype)
rr.Hdr = h
var l lex
text := make([]string, 0, 2) // could be 0..N elements, median is probably 1
FETCH:
Fetch:
for {
// TODO(miek): we could also be returning _QUOTE, this might or might not
// be an issue (basically parsing TXT becomes hard)
switch l = <-c; l.value {
case zNewline, zEOF:
break FETCH
break Fetch
case zString:
text = append(text, l.token)
}
@ -108,10 +139,11 @@ func PrivateHandle(rtypestr string, rtype uint16, generator func() PrivateRdata)
func PrivateHandleRemove(rtype uint16) {
rtypestr, ok := TypeToString[rtype]
if ok {
delete(typeToRR, rtype)
delete(TypeToRR, rtype)
delete(TypeToString, rtype)
delete(typeToparserFunc, rtype)
delete(StringToType, rtypestr)
delete(typeToUnpack, rtype)
}
return
}

View file

@ -1,52 +1,6 @@
package dns
// These raw* functions do not use reflection, they directly set the values
// in the buffer. There are faster than their reflection counterparts.
// RawSetId sets the message id in buf.
func rawSetId(msg []byte, i uint16) bool {
if len(msg) < 2 {
return false
}
msg[0], msg[1] = packUint16(i)
return true
}
// rawSetQuestionLen sets the length of the question section.
func rawSetQuestionLen(msg []byte, i uint16) bool {
if len(msg) < 6 {
return false
}
msg[4], msg[5] = packUint16(i)
return true
}
// rawSetAnswerLen sets the lenght of the answer section.
func rawSetAnswerLen(msg []byte, i uint16) bool {
if len(msg) < 8 {
return false
}
msg[6], msg[7] = packUint16(i)
return true
}
// rawSetsNsLen sets the lenght of the authority section.
func rawSetNsLen(msg []byte, i uint16) bool {
if len(msg) < 10 {
return false
}
msg[8], msg[9] = packUint16(i)
return true
}
// rawSetExtraLen sets the lenght of the additional section.
func rawSetExtraLen(msg []byte, i uint16) bool {
if len(msg) < 12 {
return false
}
msg[10], msg[11] = packUint16(i)
return true
}
import "encoding/binary"
// rawSetRdlength sets the rdlength in the header of
// the RR. The offset 'off' must be positioned at the
@ -90,6 +44,6 @@ Loop:
if rdatalen > 0xFFFF {
return false
}
msg[off], msg[off+1] = packUint16(uint16(rdatalen))
binary.BigEndian.PutUint16(msg[off:], uint16(rdatalen))
return true
}

38
vendor/github.com/miekg/dns/reverse.go generated vendored Normal file
View file

@ -0,0 +1,38 @@
package dns
// StringToType is the reverse of TypeToString, needed for string parsing.
var StringToType = reverseInt16(TypeToString)
// StringToClass is the reverse of ClassToString, needed for string parsing.
var StringToClass = reverseInt16(ClassToString)
// Map of opcodes strings.
var StringToOpcode = reverseInt(OpcodeToString)
// Map of rcodes strings.
var StringToRcode = reverseInt(RcodeToString)
// Reverse a map
func reverseInt8(m map[uint8]string) map[string]uint8 {
n := make(map[string]uint8, len(m))
for u, s := range m {
n[s] = u
}
return n
}
func reverseInt16(m map[uint16]string) map[string]uint16 {
n := make(map[string]uint16, len(m))
for u, s := range m {
n[s] = u
}
return n
}
func reverseInt(m map[int]string) map[string]int {
n := make(map[string]int, len(m))
for u, s := range m {
n[s] = u
}
return n
}

View file

@ -67,7 +67,7 @@ const (
)
// ParseError is a parsing error. It contains the parse error and the location in the io.Reader
// where the error occured.
// where the error occurred.
type ParseError struct {
file string
err string
@ -86,7 +86,7 @@ func (e *ParseError) Error() (s string) {
type lex struct {
token string // text of the token
tokenUpper string // uppercase text of the token
length int // lenght of the token
length int // length of the token
err bool // when true, token text has lexer error
value uint8 // value: zString, _BLANK, etc.
line int // line in the file
@ -99,7 +99,7 @@ type lex struct {
type Token struct {
// The scanned resource record when error is not nil.
RR
// When an error occured, this has the error specifics.
// When an error occurred, this has the error specifics.
Error *ParseError
// A potential comment positioned after the RR and on the same line.
Comment string
@ -144,8 +144,10 @@ func ReadRR(q io.Reader, filename string) (RR, error) {
//
// for x := range dns.ParseZone(strings.NewReader(z), "", "") {
// if x.Error != nil {
// // Do something with x.RR
// }
// // log.Println(x.Error)
// } else {
// // Do something with x.RR
// }
// }
//
// Comments specified after an RR (and on the same line!) are returned too:
@ -375,8 +377,8 @@ func parseZone(r io.Reader, origin, f string, t chan *Token, include int) {
t <- &Token{Error: &ParseError{f, "expecting $GENERATE value, not this...", l}}
return
}
if e := generate(l, c, t, origin); e != "" {
t <- &Token{Error: &ParseError{f, e, l}}
if errMsg := generate(l, c, t, origin); errMsg != "" {
t <- &Token{Error: &ParseError{f, errMsg, l}}
return
}
st = zExpectOwnerDir
@ -625,6 +627,7 @@ func zlexer(s *scan, c chan lex) {
if stri > 0 {
l.value = zString
l.token = string(str[:stri])
l.tokenUpper = strings.ToUpper(l.token)
l.length = stri
debug.Printf("[4 %+v]", l.token)
c <- l
@ -661,6 +664,7 @@ func zlexer(s *scan, c chan lex) {
owner = true
l.value = zNewline
l.token = "\n"
l.tokenUpper = l.token
l.length = 1
l.comment = string(com[:comi])
debug.Printf("[3 %+v %+v]", l.token, l.comment)
@ -694,6 +698,7 @@ func zlexer(s *scan, c chan lex) {
}
l.value = zNewline
l.token = "\n"
l.tokenUpper = l.token
l.length = 1
debug.Printf("[1 %+v]", l.token)
c <- l
@ -738,6 +743,7 @@ func zlexer(s *scan, c chan lex) {
if stri != 0 {
l.value = zString
l.token = string(str[:stri])
l.tokenUpper = strings.ToUpper(l.token)
l.length = stri
debug.Printf("[%+v]", l.token)
@ -748,6 +754,7 @@ func zlexer(s *scan, c chan lex) {
// send quote itself as separate token
l.value = zQuote
l.token = "\""
l.tokenUpper = l.token
l.length = 1
c <- l
quote = !quote
@ -773,6 +780,7 @@ func zlexer(s *scan, c chan lex) {
brace--
if brace < 0 {
l.token = "extra closing brace"
l.tokenUpper = l.token
l.err = true
debug.Printf("[%+v]", l.token)
c <- l
@ -797,6 +805,7 @@ func zlexer(s *scan, c chan lex) {
if stri > 0 {
// Send remainder
l.token = string(str[:stri])
l.tokenUpper = strings.ToUpper(l.token)
l.length = stri
l.value = zString
debug.Printf("[%+v]", l.token)
@ -964,8 +973,8 @@ func stringToNodeID(l lex) (uint64, *ParseError) {
return 0, &ParseError{l.token, "bad NID/L64 NodeID/Locator64", l}
}
s := l.token[0:4] + l.token[5:9] + l.token[10:14] + l.token[15:19]
u, e := strconv.ParseUint(s, 16, 64)
if e != nil {
u, err := strconv.ParseUint(s, 16, 64)
if err != nil {
return 0, &ParseError{l.token, "bad NID/L64 NodeID/Locator64", l}
}
return u, nil

View file

@ -1443,64 +1443,6 @@ func setEUI64(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
return rr, nil, ""
}
func setWKS(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
rr := new(WKS)
rr.Hdr = h
l := <-c
if l.length == 0 {
return rr, nil, l.comment
}
rr.Address = net.ParseIP(l.token)
if rr.Address == nil || l.err {
return nil, &ParseError{f, "bad WKS Address", l}, ""
}
<-c // zBlank
l = <-c
proto := "tcp"
i, e := strconv.Atoi(l.token)
if e != nil || l.err {
return nil, &ParseError{f, "bad WKS Protocol", l}, ""
}
rr.Protocol = uint8(i)
switch rr.Protocol {
case 17:
proto = "udp"
case 6:
proto = "tcp"
default:
return nil, &ParseError{f, "bad WKS Protocol", l}, ""
}
<-c
l = <-c
rr.BitMap = make([]uint16, 0)
var (
k int
err error
)
for l.value != zNewline && l.value != zEOF {
switch l.value {
case zBlank:
// Ok
case zString:
if k, err = net.LookupPort(proto, l.token); err != nil {
i, e := strconv.Atoi(l.token) // If a number use that
if e != nil {
return nil, &ParseError{f, "bad WKS BitMap", l}, ""
}
rr.BitMap = append(rr.BitMap, uint16(i))
}
rr.BitMap = append(rr.BitMap, uint16(k))
default:
return nil, &ParseError{f, "bad WKS BitMap", l}, ""
}
l = <-c
}
return rr, nil, l.comment
}
func setSSHFP(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
rr := new(SSHFP)
rr.Hdr = h
@ -1804,6 +1746,41 @@ func setTLSA(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
return rr, nil, c1
}
func setSMIMEA(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
rr := new(SMIMEA)
rr.Hdr = h
l := <-c
if l.length == 0 {
return rr, nil, l.comment
}
i, e := strconv.Atoi(l.token)
if e != nil || l.err {
return nil, &ParseError{f, "bad SMIMEA Usage", l}, ""
}
rr.Usage = uint8(i)
<-c // zBlank
l = <-c
i, e = strconv.Atoi(l.token)
if e != nil || l.err {
return nil, &ParseError{f, "bad SMIMEA Selector", l}, ""
}
rr.Selector = uint8(i)
<-c // zBlank
l = <-c
i, e = strconv.Atoi(l.token)
if e != nil || l.err {
return nil, &ParseError{f, "bad SMIMEA MatchingType", l}, ""
}
rr.MatchingType = uint8(i)
// So this needs be e2 (i.e. different than e), because...??t
s, e2, c1 := endingToString(c, "bad SMIMEA Certificate", f)
if e2 != nil {
return nil, e2, c1
}
rr.Certificate = s
return rr, nil, c1
}
func setRFC3597(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
rr := new(RFC3597)
rr.Hdr = h
@ -2103,73 +2080,6 @@ func setPX(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
return rr, nil, ""
}
func setIPSECKEY(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
rr := new(IPSECKEY)
rr.Hdr = h
l := <-c
if l.length == 0 {
return rr, nil, l.comment
}
i, err := strconv.Atoi(l.token)
if err != nil || l.err {
return nil, &ParseError{f, "bad IPSECKEY Precedence", l}, ""
}
rr.Precedence = uint8(i)
<-c // zBlank
l = <-c
i, err = strconv.Atoi(l.token)
if err != nil || l.err {
return nil, &ParseError{f, "bad IPSECKEY GatewayType", l}, ""
}
rr.GatewayType = uint8(i)
<-c // zBlank
l = <-c
i, err = strconv.Atoi(l.token)
if err != nil || l.err {
return nil, &ParseError{f, "bad IPSECKEY Algorithm", l}, ""
}
rr.Algorithm = uint8(i)
// Now according to GatewayType we can have different elements here
<-c // zBlank
l = <-c
switch rr.GatewayType {
case 0:
fallthrough
case 3:
rr.GatewayName = l.token
if l.token == "@" {
rr.GatewayName = o
}
_, ok := IsDomainName(l.token)
if !ok || l.length == 0 || l.err {
return nil, &ParseError{f, "bad IPSECKEY GatewayName", l}, ""
}
if rr.GatewayName[l.length-1] != '.' {
rr.GatewayName = appendOrigin(rr.GatewayName, o)
}
case 1:
rr.GatewayA = net.ParseIP(l.token)
if rr.GatewayA == nil {
return nil, &ParseError{f, "bad IPSECKEY GatewayA", l}, ""
}
case 2:
rr.GatewayAAAA = net.ParseIP(l.token)
if rr.GatewayAAAA == nil {
return nil, &ParseError{f, "bad IPSECKEY GatewayAAAA", l}, ""
}
default:
return nil, &ParseError{f, "bad IPSECKEY GatewayType", l}, ""
}
s, e, c1 := endingToString(c, "bad IPSECKEY PublicKey", f)
if e != nil {
return nil, e, c1
}
rr.PublicKey = s
return rr, nil, c1
}
func setCAA(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
rr := new(CAA)
rr.Hdr = h
@ -2203,68 +2113,67 @@ func setCAA(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
}
var typeToparserFunc = map[uint16]parserFunc{
TypeAAAA: parserFunc{setAAAA, false},
TypeAFSDB: parserFunc{setAFSDB, false},
TypeA: parserFunc{setA, false},
TypeCAA: parserFunc{setCAA, true},
TypeCDS: parserFunc{setCDS, true},
TypeCDNSKEY: parserFunc{setCDNSKEY, true},
TypeCERT: parserFunc{setCERT, true},
TypeCNAME: parserFunc{setCNAME, false},
TypeDHCID: parserFunc{setDHCID, true},
TypeDLV: parserFunc{setDLV, true},
TypeDNAME: parserFunc{setDNAME, false},
TypeKEY: parserFunc{setKEY, true},
TypeDNSKEY: parserFunc{setDNSKEY, true},
TypeDS: parserFunc{setDS, true},
TypeEID: parserFunc{setEID, true},
TypeEUI48: parserFunc{setEUI48, false},
TypeEUI64: parserFunc{setEUI64, false},
TypeGID: parserFunc{setGID, false},
TypeGPOS: parserFunc{setGPOS, false},
TypeHINFO: parserFunc{setHINFO, true},
TypeHIP: parserFunc{setHIP, true},
TypeIPSECKEY: parserFunc{setIPSECKEY, true},
TypeKX: parserFunc{setKX, false},
TypeL32: parserFunc{setL32, false},
TypeL64: parserFunc{setL64, false},
TypeLOC: parserFunc{setLOC, true},
TypeLP: parserFunc{setLP, false},
TypeMB: parserFunc{setMB, false},
TypeMD: parserFunc{setMD, false},
TypeMF: parserFunc{setMF, false},
TypeMG: parserFunc{setMG, false},
TypeMINFO: parserFunc{setMINFO, false},
TypeMR: parserFunc{setMR, false},
TypeMX: parserFunc{setMX, false},
TypeNAPTR: parserFunc{setNAPTR, false},
TypeNID: parserFunc{setNID, false},
TypeNIMLOC: parserFunc{setNIMLOC, true},
TypeNINFO: parserFunc{setNINFO, true},
TypeNSAPPTR: parserFunc{setNSAPPTR, false},
TypeNSEC3PARAM: parserFunc{setNSEC3PARAM, false},
TypeNSEC3: parserFunc{setNSEC3, true},
TypeNSEC: parserFunc{setNSEC, true},
TypeNS: parserFunc{setNS, false},
TypeOPENPGPKEY: parserFunc{setOPENPGPKEY, true},
TypePTR: parserFunc{setPTR, false},
TypePX: parserFunc{setPX, false},
TypeSIG: parserFunc{setSIG, true},
TypeRKEY: parserFunc{setRKEY, true},
TypeRP: parserFunc{setRP, false},
TypeRRSIG: parserFunc{setRRSIG, true},
TypeRT: parserFunc{setRT, false},
TypeSOA: parserFunc{setSOA, false},
TypeSPF: parserFunc{setSPF, true},
TypeSRV: parserFunc{setSRV, false},
TypeSSHFP: parserFunc{setSSHFP, true},
TypeTALINK: parserFunc{setTALINK, false},
TypeTA: parserFunc{setTA, true},
TypeTLSA: parserFunc{setTLSA, true},
TypeTXT: parserFunc{setTXT, true},
TypeUID: parserFunc{setUID, false},
TypeUINFO: parserFunc{setUINFO, true},
TypeURI: parserFunc{setURI, true},
TypeWKS: parserFunc{setWKS, true},
TypeX25: parserFunc{setX25, false},
TypeAAAA: {setAAAA, false},
TypeAFSDB: {setAFSDB, false},
TypeA: {setA, false},
TypeCAA: {setCAA, true},
TypeCDS: {setCDS, true},
TypeCDNSKEY: {setCDNSKEY, true},
TypeCERT: {setCERT, true},
TypeCNAME: {setCNAME, false},
TypeDHCID: {setDHCID, true},
TypeDLV: {setDLV, true},
TypeDNAME: {setDNAME, false},
TypeKEY: {setKEY, true},
TypeDNSKEY: {setDNSKEY, true},
TypeDS: {setDS, true},
TypeEID: {setEID, true},
TypeEUI48: {setEUI48, false},
TypeEUI64: {setEUI64, false},
TypeGID: {setGID, false},
TypeGPOS: {setGPOS, false},
TypeHINFO: {setHINFO, true},
TypeHIP: {setHIP, true},
TypeKX: {setKX, false},
TypeL32: {setL32, false},
TypeL64: {setL64, false},
TypeLOC: {setLOC, true},
TypeLP: {setLP, false},
TypeMB: {setMB, false},
TypeMD: {setMD, false},
TypeMF: {setMF, false},
TypeMG: {setMG, false},
TypeMINFO: {setMINFO, false},
TypeMR: {setMR, false},
TypeMX: {setMX, false},
TypeNAPTR: {setNAPTR, false},
TypeNID: {setNID, false},
TypeNIMLOC: {setNIMLOC, true},
TypeNINFO: {setNINFO, true},
TypeNSAPPTR: {setNSAPPTR, false},
TypeNSEC3PARAM: {setNSEC3PARAM, false},
TypeNSEC3: {setNSEC3, true},
TypeNSEC: {setNSEC, true},
TypeNS: {setNS, false},
TypeOPENPGPKEY: {setOPENPGPKEY, true},
TypePTR: {setPTR, false},
TypePX: {setPX, false},
TypeSIG: {setSIG, true},
TypeRKEY: {setRKEY, true},
TypeRP: {setRP, false},
TypeRRSIG: {setRRSIG, true},
TypeRT: {setRT, false},
TypeSMIMEA: {setSMIMEA, true},
TypeSOA: {setSOA, false},
TypeSPF: {setSPF, true},
TypeSRV: {setSRV, false},
TypeSSHFP: {setSSHFP, true},
TypeTALINK: {setTALINK, false},
TypeTA: {setTA, true},
TypeTLSA: {setTLSA, true},
TypeTXT: {setTXT, true},
TypeUID: {setUID, false},
TypeUINFO: {setUINFO, true},
TypeURI: {setURI, true},
TypeX25: {setX25, false},
}

236
vendor/github.com/miekg/dns/server.go generated vendored
View file

@ -4,6 +4,8 @@ package dns
import (
"bytes"
"crypto/tls"
"encoding/binary"
"io"
"net"
"sync"
@ -47,7 +49,7 @@ type response struct {
tsigRequestMAC string
tsigSecret map[string]string // the tsig secrets
udp *net.UDPConn // i/o connection if UDP was used
tcp *net.TCPConn // i/o connection if TCP was used
tcp net.Conn // i/o connection if TCP was used
udpSession *SessionUDP // oob data to get egress interface right
remoteAddr net.Addr // address of the client
writer Writer // writer to output the raw DNS bits
@ -92,13 +94,35 @@ func HandleFailed(w ResponseWriter, r *Msg) {
func failedHandler() Handler { return HandlerFunc(HandleFailed) }
// ListenAndServe Starts a server on addresss and network speficied. Invoke handler
// ListenAndServe Starts a server on address and network specified Invoke handler
// for incoming queries.
func ListenAndServe(addr string, network string, handler Handler) error {
server := &Server{Addr: addr, Net: network, Handler: handler}
return server.ListenAndServe()
}
// ListenAndServeTLS acts like http.ListenAndServeTLS, more information in
// http://golang.org/pkg/net/http/#ListenAndServeTLS
func ListenAndServeTLS(addr, certFile, keyFile string, handler Handler) error {
cert, err := tls.LoadX509KeyPair(certFile, keyFile)
if err != nil {
return err
}
config := tls.Config{
Certificates: []tls.Certificate{cert},
}
server := &Server{
Addr: addr,
Net: "tcp-tls",
TLSConfig: &config,
Handler: handler,
}
return server.ListenAndServe()
}
// ActivateAndServe activates a server with a listener from systemd,
// l and p should not both be non-nil.
// If both l and p are not nil only p will be used.
@ -123,7 +147,7 @@ func (mux *ServeMux) match(q string, t uint16) Handler {
b[i] |= ('a' - 'A')
}
}
if h, ok := mux.z[string(b[:l])]; ok { // 'causes garbage, might want to change the map key
if h, ok := mux.z[string(b[:l])]; ok { // causes garbage, might want to change the map key
if t != TypeDS {
return h
}
@ -210,7 +234,7 @@ type Writer interface {
type Reader interface {
// ReadTCP reads a raw message from a TCP connection. Implementations may alter
// connection properties, for example the read-deadline.
ReadTCP(conn *net.TCPConn, timeout time.Duration) ([]byte, error)
ReadTCP(conn net.Conn, timeout time.Duration) ([]byte, error)
// ReadUDP reads a raw message from a UDP connection. Implementations may alter
// connection properties, for example the read-deadline.
ReadUDP(conn *net.UDPConn, timeout time.Duration) ([]byte, *SessionUDP, error)
@ -222,7 +246,7 @@ type defaultReader struct {
*Server
}
func (dr *defaultReader) ReadTCP(conn *net.TCPConn, timeout time.Duration) ([]byte, error) {
func (dr *defaultReader) ReadTCP(conn net.Conn, timeout time.Duration) ([]byte, error) {
return dr.readTCP(conn, timeout)
}
@ -242,10 +266,12 @@ type DecorateWriter func(Writer) Writer
type Server struct {
// Address to listen on, ":dns" if empty.
Addr string
// if "tcp" it will invoke a TCP listener, otherwise an UDP one.
// if "tcp" or "tcp-tls" (DNS over TLS) it will invoke a TCP listener, otherwise an UDP one
Net string
// TCP Listener to use, this is to aid in systemd's socket activation.
Listener net.Listener
// TLS connection configuration
TLSConfig *tls.Config
// UDP "Listener" to use, this is to aid in systemd's socket activation.
PacketConn net.PacketConn
// Handler to invoke, dns.DefaultServeMux if nil.
@ -262,7 +288,7 @@ type Server struct {
// Secret(s) for Tsig map[<zonename>]<base64 secret>.
TsigSecret map[string]string
// Unsafe instructs the server to disregard any sanity checks and directly hand the message to
// the handler. It will specfically not check if the query has the QR bit not set.
// the handler. It will specifically not check if the query has the QR bit not set.
Unsafe bool
// If NotifyStartedFunc is set it is called once the server has started listening.
NotifyStartedFunc func()
@ -271,26 +297,21 @@ type Server struct {
// DecorateWriter is optional, allows customization of the process that writes raw DNS messages.
DecorateWriter DecorateWriter
// For graceful shutdown.
stopUDP chan bool
stopTCP chan bool
wgUDP sync.WaitGroup
wgTCP sync.WaitGroup
// Graceful shutdown handling
// make start/shutdown not racy
lock sync.Mutex
inFlight sync.WaitGroup
lock sync.RWMutex
started bool
}
// ListenAndServe starts a nameserver on the configured address in *Server.
func (srv *Server) ListenAndServe() error {
srv.lock.Lock()
defer srv.lock.Unlock()
if srv.started {
srv.lock.Unlock()
return &Error{err: "server already started"}
}
srv.stopUDP, srv.stopTCP = make(chan bool), make(chan bool)
srv.started = true
addr := srv.Addr
if addr == "" {
addr = ":domain"
@ -300,34 +321,57 @@ func (srv *Server) ListenAndServe() error {
}
switch srv.Net {
case "tcp", "tcp4", "tcp6":
a, e := net.ResolveTCPAddr(srv.Net, addr)
if e != nil {
return e
a, err := net.ResolveTCPAddr(srv.Net, addr)
if err != nil {
return err
}
l, e := net.ListenTCP(srv.Net, a)
if e != nil {
return e
l, err := net.ListenTCP(srv.Net, a)
if err != nil {
return err
}
srv.Listener = l
srv.started = true
srv.lock.Unlock()
return srv.serveTCP(l)
case "udp", "udp4", "udp6":
a, e := net.ResolveUDPAddr(srv.Net, addr)
if e != nil {
return e
err = srv.serveTCP(l)
srv.lock.Lock() // to satisfy the defer at the top
return err
case "tcp-tls", "tcp4-tls", "tcp6-tls":
network := "tcp"
if srv.Net == "tcp4-tls" {
network = "tcp4"
} else if srv.Net == "tcp6" {
network = "tcp6"
}
l, e := net.ListenUDP(srv.Net, a)
if e != nil {
return e
l, err := tls.Listen(network, addr, srv.TLSConfig)
if err != nil {
return err
}
srv.Listener = l
srv.started = true
srv.lock.Unlock()
err = srv.serveTCP(l)
srv.lock.Lock() // to satisfy the defer at the top
return err
case "udp", "udp4", "udp6":
a, err := net.ResolveUDPAddr(srv.Net, addr)
if err != nil {
return err
}
l, err := net.ListenUDP(srv.Net, a)
if err != nil {
return err
}
if e := setUDPSocketOptions(l); e != nil {
return e
}
srv.PacketConn = l
srv.started = true
srv.lock.Unlock()
return srv.serveUDP(l)
err = srv.serveUDP(l)
srv.lock.Lock() // to satisfy the defer at the top
return err
}
srv.lock.Unlock()
return &Error{err: "bad network"}
}
@ -335,15 +379,12 @@ func (srv *Server) ListenAndServe() error {
// configured in *Server. Its main use is to start a server from systemd.
func (srv *Server) ActivateAndServe() error {
srv.lock.Lock()
defer srv.lock.Unlock()
if srv.started {
srv.lock.Unlock()
return &Error{err: "server already started"}
}
srv.stopUDP, srv.stopTCP = make(chan bool), make(chan bool)
srv.started = true
pConn := srv.PacketConn
l := srv.Listener
srv.lock.Unlock()
if pConn != nil {
if srv.UDPSize == 0 {
srv.UDPSize = MinMsgSize
@ -352,13 +393,19 @@ func (srv *Server) ActivateAndServe() error {
if e := setUDPSocketOptions(t); e != nil {
return e
}
return srv.serveUDP(t)
srv.started = true
srv.lock.Unlock()
e := srv.serveUDP(t)
srv.lock.Lock() // to satisfy the defer at the top
return e
}
}
if l != nil {
if t, ok := l.(*net.TCPListener); ok {
return srv.serveTCP(t)
}
srv.started = true
srv.lock.Unlock()
e := srv.serveTCP(l)
srv.lock.Lock() // to satisfy the defer at the top
return e
}
return &Error{err: "bad listeners"}
}
@ -374,36 +421,20 @@ func (srv *Server) Shutdown() error {
return &Error{err: "server not started"}
}
srv.started = false
net, addr := srv.Net, srv.Addr
switch {
case srv.Listener != nil:
a := srv.Listener.Addr()
net, addr = a.Network(), a.String()
case srv.PacketConn != nil:
a := srv.PacketConn.LocalAddr()
net, addr = a.Network(), a.String()
}
srv.lock.Unlock()
fin := make(chan bool)
switch net {
case "tcp", "tcp4", "tcp6":
go func() {
srv.stopTCP <- true
srv.wgTCP.Wait()
fin <- true
}()
case "udp", "udp4", "udp6":
go func() {
srv.stopUDP <- true
srv.wgUDP.Wait()
fin <- true
}()
if srv.PacketConn != nil {
srv.PacketConn.Close()
}
if srv.Listener != nil {
srv.Listener.Close()
}
c := &Client{Net: net}
go c.Exchange(new(Msg), addr) // extra query to help ReadXXX loop to pass
fin := make(chan bool)
go func() {
srv.inFlight.Wait()
fin <- true
}()
select {
case <-time.After(srv.getReadTimeout()):
@ -424,7 +455,7 @@ func (srv *Server) getReadTimeout() time.Duration {
// serveTCP starts a TCP listener for the server.
// Each request is handled in a separate goroutine.
func (srv *Server) serveTCP(l *net.TCPListener) error {
func (srv *Server) serveTCP(l net.Listener) error {
defer l.Close()
if srv.NotifyStartedFunc != nil {
@ -443,20 +474,24 @@ func (srv *Server) serveTCP(l *net.TCPListener) error {
rtimeout := srv.getReadTimeout()
// deadline is not used here
for {
rw, e := l.AcceptTCP()
if e != nil {
continue
rw, err := l.Accept()
if err != nil {
if neterr, ok := err.(net.Error); ok && neterr.Temporary() {
continue
}
return err
}
m, e := reader.ReadTCP(rw, rtimeout)
select {
case <-srv.stopTCP:
m, err := reader.ReadTCP(rw, rtimeout)
srv.lock.RLock()
if !srv.started {
srv.lock.RUnlock()
return nil
default:
}
if e != nil {
srv.lock.RUnlock()
if err != nil {
continue
}
srv.wgTCP.Add(1)
srv.inFlight.Add(1)
go srv.serve(rw.RemoteAddr(), handler, m, nil, nil, rw)
}
}
@ -482,22 +517,25 @@ func (srv *Server) serveUDP(l *net.UDPConn) error {
rtimeout := srv.getReadTimeout()
// deadline is not used here
for {
m, s, e := reader.ReadUDP(l, rtimeout)
select {
case <-srv.stopUDP:
m, s, err := reader.ReadUDP(l, rtimeout)
srv.lock.RLock()
if !srv.started {
srv.lock.RUnlock()
return nil
default:
}
if e != nil {
srv.lock.RUnlock()
if err != nil {
continue
}
srv.wgUDP.Add(1)
srv.inFlight.Add(1)
go srv.serve(s.RemoteAddr(), handler, m, l, s, nil)
}
}
// Serve a new connection.
func (srv *Server) serve(a net.Addr, h Handler, m []byte, u *net.UDPConn, s *SessionUDP, t *net.TCPConn) {
func (srv *Server) serve(a net.Addr, h Handler, m []byte, u *net.UDPConn, s *SessionUDP, t net.Conn) {
defer srv.inFlight.Done()
w := &response{tsigSecret: srv.TsigSecret, udp: u, tcp: t, remoteAddr: a, udpSession: s}
if srv.DecorateWriter != nil {
w.writer = srv.DecorateWriter(w)
@ -507,15 +545,6 @@ func (srv *Server) serve(a net.Addr, h Handler, m []byte, u *net.UDPConn, s *Ses
q := 0 // counter for the amount of TCP queries we get
defer func() {
if u != nil {
srv.wgUDP.Done()
}
if t != nil {
srv.wgTCP.Done()
}
}()
reader := Reader(&defaultReader{srv})
if srv.DecorateReader != nil {
reader = srv.DecorateReader(reader)
@ -548,6 +577,9 @@ Redo:
h.ServeDNS(w, req) // Writes back to the client
Exit:
if w.tcp == nil {
return
}
// TODO(miek): make this number configurable?
if q > maxTCPQueries { // close socket after this many queries
w.Close()
@ -565,8 +597,8 @@ Exit:
if srv.IdleTimeout != nil {
idleTimeout = srv.IdleTimeout()
}
m, e := reader.ReadTCP(w.tcp, idleTimeout)
if e == nil {
m, err = reader.ReadTCP(w.tcp, idleTimeout)
if err == nil {
q++
goto Redo
}
@ -574,7 +606,7 @@ Exit:
return
}
func (srv *Server) readTCP(conn *net.TCPConn, timeout time.Duration) ([]byte, error) {
func (srv *Server) readTCP(conn net.Conn, timeout time.Duration) ([]byte, error) {
conn.SetReadDeadline(time.Now().Add(timeout))
l := make([]byte, 2)
n, err := conn.Read(l)
@ -584,7 +616,7 @@ func (srv *Server) readTCP(conn *net.TCPConn, timeout time.Duration) ([]byte, er
}
return nil, ErrShortRead
}
length, _ := unpackUint16(l, 0)
length := binary.BigEndian.Uint16(l)
if length == 0 {
return nil, ErrShortRead
}
@ -612,10 +644,10 @@ func (srv *Server) readTCP(conn *net.TCPConn, timeout time.Duration) ([]byte, er
func (srv *Server) readUDP(conn *net.UDPConn, timeout time.Duration) ([]byte, *SessionUDP, error) {
conn.SetReadDeadline(time.Now().Add(timeout))
m := make([]byte, srv.UDPSize)
n, s, e := ReadFromSessionUDP(conn, m)
if e != nil || n == 0 {
if e != nil {
return nil, nil, e
n, s, err := ReadFromSessionUDP(conn, m)
if err != nil || n == 0 {
if err != nil {
return nil, nil, err
}
return nil, nil, ErrShortRead
}
@ -659,7 +691,7 @@ func (w *response) Write(m []byte) (int, error) {
return 0, &Error{err: "message too large"}
}
l := make([]byte, 2, 2+lm)
l[0], l[1] = packUint16(uint16(lm))
binary.BigEndian.PutUint16(l, uint16(lm))
m = append(l, m...)
n, err := io.Copy(w.tcp, bytes.NewReader(m))

25
vendor/github.com/miekg/dns/sig0.go generated vendored
View file

@ -5,6 +5,7 @@ import (
"crypto/dsa"
"crypto/ecdsa"
"crypto/rsa"
"encoding/binary"
"math/big"
"strings"
"time"
@ -67,13 +68,13 @@ func (rr *SIG) Sign(k crypto.Signer, m *Msg) ([]byte, error) {
}
// Adjust sig data length
rdoff := len(mbuf) + 1 + 2 + 2 + 4
rdlen, _ := unpackUint16(buf, rdoff)
rdlen := binary.BigEndian.Uint16(buf[rdoff:])
rdlen += uint16(len(sig))
buf[rdoff], buf[rdoff+1] = packUint16(rdlen)
binary.BigEndian.PutUint16(buf[rdoff:], rdlen)
// Adjust additional count
adc, _ := unpackUint16(buf, 10)
adc := binary.BigEndian.Uint16(buf[10:])
adc++
buf[10], buf[11] = packUint16(adc)
binary.BigEndian.PutUint16(buf[10:], adc)
return buf, nil
}
@ -103,10 +104,11 @@ func (rr *SIG) Verify(k *KEY, buf []byte) error {
hasher := hash.New()
buflen := len(buf)
qdc, _ := unpackUint16(buf, 4)
anc, _ := unpackUint16(buf, 6)
auc, _ := unpackUint16(buf, 8)
adc, offset := unpackUint16(buf, 10)
qdc := binary.BigEndian.Uint16(buf[4:])
anc := binary.BigEndian.Uint16(buf[6:])
auc := binary.BigEndian.Uint16(buf[8:])
adc := binary.BigEndian.Uint16(buf[10:])
offset := 12
var err error
for i := uint16(0); i < qdc && offset < buflen; i++ {
_, offset, err = UnpackDomainName(buf, offset)
@ -127,7 +129,8 @@ func (rr *SIG) Verify(k *KEY, buf []byte) error {
continue
}
var rdlen uint16
rdlen, offset = unpackUint16(buf, offset)
rdlen = binary.BigEndian.Uint16(buf[offset:])
offset += 2
offset += int(rdlen)
}
if offset >= buflen {
@ -149,9 +152,9 @@ func (rr *SIG) Verify(k *KEY, buf []byte) error {
if offset+4+4 >= buflen {
return &Error{err: "overflow unpacking signed message"}
}
expire := uint32(buf[offset])<<24 | uint32(buf[offset+1])<<16 | uint32(buf[offset+2])<<8 | uint32(buf[offset+3])
expire := binary.BigEndian.Uint32(buf[offset:])
offset += 4
incept := uint32(buf[offset])<<24 | uint32(buf[offset+1])<<16 | uint32(buf[offset+2])<<8 | uint32(buf[offset+3])
incept := binary.BigEndian.Uint32(buf[offset:])
offset += 4
now := uint32(time.Now().Unix())
if now < incept || now > expire {

47
vendor/github.com/miekg/dns/smimea.go generated vendored Normal file
View file

@ -0,0 +1,47 @@
package dns
import (
"crypto/sha256"
"crypto/x509"
"encoding/hex"
)
// Sign creates a SMIMEA record from an SSL certificate.
func (r *SMIMEA) Sign(usage, selector, matchingType int, cert *x509.Certificate) (err error) {
r.Hdr.Rrtype = TypeSMIMEA
r.Usage = uint8(usage)
r.Selector = uint8(selector)
r.MatchingType = uint8(matchingType)
r.Certificate, err = CertificateToDANE(r.Selector, r.MatchingType, cert)
if err != nil {
return err
}
return nil
}
// Verify verifies a SMIMEA record against an SSL certificate. If it is OK
// a nil error is returned.
func (r *SMIMEA) Verify(cert *x509.Certificate) error {
c, err := CertificateToDANE(r.Selector, r.MatchingType, cert)
if err != nil {
return err // Not also ErrSig?
}
if r.Certificate == c {
return nil
}
return ErrSig // ErrSig, really?
}
// SIMEAName returns the ownername of a SMIMEA resource record as per the
// format specified in RFC 'draft-ietf-dane-smime-12' Section 2 and 3
func SMIMEAName(email_address string, domain_name string) (string, error) {
hasher := sha256.New()
hasher.Write([]byte(email_address))
// RFC Section 3: "The local-part is hashed using the SHA2-256
// algorithm with the hash truncated to 28 octets and
// represented in its hexadecimal representation to become the
// left-most label in the prepared domain name"
return hex.EncodeToString(hasher.Sum(nil)[:28]) + "." + "_smimecert." + domain_name, nil
}

47
vendor/github.com/miekg/dns/tlsa.go generated vendored
View file

@ -1,50 +1,11 @@
package dns
import (
"crypto/sha256"
"crypto/sha512"
"crypto/x509"
"encoding/hex"
"errors"
"io"
"net"
"strconv"
)
// CertificateToDANE converts a certificate to a hex string as used in the TLSA record.
func CertificateToDANE(selector, matchingType uint8, cert *x509.Certificate) (string, error) {
switch matchingType {
case 0:
switch selector {
case 0:
return hex.EncodeToString(cert.Raw), nil
case 1:
return hex.EncodeToString(cert.RawSubjectPublicKeyInfo), nil
}
case 1:
h := sha256.New()
switch selector {
case 0:
io.WriteString(h, string(cert.Raw))
return hex.EncodeToString(h.Sum(nil)), nil
case 1:
io.WriteString(h, string(cert.RawSubjectPublicKeyInfo))
return hex.EncodeToString(h.Sum(nil)), nil
}
case 2:
h := sha512.New()
switch selector {
case 0:
io.WriteString(h, string(cert.Raw))
return hex.EncodeToString(h.Sum(nil)), nil
case 1:
io.WriteString(h, string(cert.RawSubjectPublicKeyInfo))
return hex.EncodeToString(h.Sum(nil)), nil
}
}
return "", errors.New("dns: bad TLSA MatchingType or TLSA Selector")
}
// Sign creates a TLSA record from an SSL certificate.
func (r *TLSA) Sign(usage, selector, matchingType int, cert *x509.Certificate) (err error) {
r.Hdr.Rrtype = TypeTLSA
@ -78,9 +39,9 @@ func TLSAName(name, service, network string) (string, error) {
if !IsFqdn(name) {
return "", ErrFqdn
}
p, e := net.LookupPort(network, service)
if e != nil {
return "", e
p, err := net.LookupPort(network, service)
if err != nil {
return "", err
}
return "_" + strconv.Itoa(p) + "_" + network + "." + name, nil
return "_" + strconv.Itoa(p) + "._" + network + "." + name, nil
}

173
vendor/github.com/miekg/dns/tsig.go generated vendored
View file

@ -6,6 +6,7 @@ import (
"crypto/sha1"
"crypto/sha256"
"crypto/sha512"
"encoding/binary"
"encoding/hex"
"hash"
"io"
@ -30,15 +31,11 @@ type TSIG struct {
TimeSigned uint64 `dns:"uint48"`
Fudge uint16
MACSize uint16
MAC string `dns:"size-hex"`
MAC string `dns:"size-hex:MACSize"`
OrigId uint16
Error uint16
OtherLen uint16
OtherData string `dns:"size-hex"`
}
func (rr *TSIG) Header() *RR_Header {
return &rr.Hdr
OtherData string `dns:"size-hex:OtherLen"`
}
// TSIG has no official presentation format, but this will suffice.
@ -58,15 +55,6 @@ func (rr *TSIG) String() string {
return s
}
func (rr *TSIG) len() int {
return rr.Hdr.len() + len(rr.Algorithm) + 1 + 6 +
4 + len(rr.MAC)/2 + 1 + 6 + len(rr.OtherData)/2 + 1
}
func (rr *TSIG) copy() RR {
return &TSIG{*rr.Hdr.copyHeader(), rr.Algorithm, rr.TimeSigned, rr.Fudge, rr.MACSize, rr.MAC, rr.OrigId, rr.Error, rr.OtherLen, rr.OtherData}
}
// The following values must be put in wireformat, so that the MAC can be calculated.
// RFC 2845, section 3.4.2. TSIG Variables.
type tsigWireFmt struct {
@ -81,14 +69,13 @@ type tsigWireFmt struct {
// MACSize, MAC and OrigId excluded
Error uint16
OtherLen uint16
OtherData string `dns:"size-hex"`
OtherData string `dns:"size-hex:OtherLen"`
}
// If we have the MAC use this type to convert it to wiredata.
// Section 3.4.3. Request MAC
// If we have the MAC use this type to convert it to wiredata. Section 3.4.3. Request MAC
type macWireFmt struct {
MACSize uint16
MAC string `dns:"size-hex"`
MAC string `dns:"size-hex:MACSize"`
}
// 3.3. Time values used in TSIG calculations
@ -125,7 +112,7 @@ func TsigGenerate(m *Msg, secret, requestMAC string, timersOnly bool) ([]byte, s
t := new(TSIG)
var h hash.Hash
switch rr.Algorithm {
switch strings.ToLower(rr.Algorithm) {
case HmacMD5:
h = hmac.New(md5.New, []byte(rawsecret))
case HmacSHA1:
@ -154,7 +141,9 @@ func TsigGenerate(m *Msg, secret, requestMAC string, timersOnly bool) ([]byte, s
return nil, "", err
}
mbuf = append(mbuf, tbuf...)
rawSetExtraLen(mbuf, uint16(len(m.Extra)+1))
// Update the ArCount directly in the buffer.
binary.BigEndian.PutUint16(mbuf[10:], uint16(len(m.Extra)+1))
return mbuf, t.MAC, nil
}
@ -191,7 +180,7 @@ func TsigVerify(msg []byte, secret, requestMAC string, timersOnly bool) error {
}
var h hash.Hash
switch tsig.Algorithm {
switch strings.ToLower(tsig.Algorithm) {
case HmacMD5:
h = hmac.New(md5.New, rawsecret)
case HmacSHA1:
@ -225,7 +214,7 @@ func tsigBuffer(msgbuf []byte, rr *TSIG, requestMAC string, timersOnly bool) []b
m.MACSize = uint16(len(requestMAC) / 2)
m.MAC = requestMAC
buf = make([]byte, len(requestMAC)) // long enough
n, _ := PackStruct(m, buf, 0)
n, _ := packMacWire(m, buf)
buf = buf[:n]
}
@ -234,7 +223,7 @@ func tsigBuffer(msgbuf []byte, rr *TSIG, requestMAC string, timersOnly bool) []b
tsig := new(timerWireFmt)
tsig.TimeSigned = rr.TimeSigned
tsig.Fudge = rr.Fudge
n, _ := PackStruct(tsig, tsigvar, 0)
n, _ := packTimerWire(tsig, tsigvar)
tsigvar = tsigvar[:n]
} else {
tsig := new(tsigWireFmt)
@ -247,7 +236,7 @@ func tsigBuffer(msgbuf []byte, rr *TSIG, requestMAC string, timersOnly bool) []b
tsig.Error = rr.Error
tsig.OtherLen = rr.OtherLen
tsig.OtherData = rr.OtherData
n, _ := PackStruct(tsig, tsigvar, 0)
n, _ := packTsigWire(tsig, tsigvar)
tsigvar = tsigvar[:n]
}
@ -262,60 +251,54 @@ func tsigBuffer(msgbuf []byte, rr *TSIG, requestMAC string, timersOnly bool) []b
// Strip the TSIG from the raw message.
func stripTsig(msg []byte) ([]byte, *TSIG, error) {
// Copied from msg.go's Unpack()
// Header.
var dh Header
var err error
dns := new(Msg)
rr := new(TSIG)
off := 0
tsigoff := 0
if off, err = UnpackStruct(&dh, msg, off); err != nil {
// Copied from msg.go's Unpack() Header, but modified.
var (
dh Header
err error
)
off, tsigoff := 0, 0
if dh, off, err = unpackMsgHdr(msg, off); err != nil {
return nil, nil, err
}
if dh.Arcount == 0 {
return nil, nil, ErrNoSig
}
// Rcode, see msg.go Unpack()
if int(dh.Bits&0xF) == RcodeNotAuth {
return nil, nil, ErrAuth
}
// Arrays.
dns.Question = make([]Question, dh.Qdcount)
dns.Answer = make([]RR, dh.Ancount)
dns.Ns = make([]RR, dh.Nscount)
dns.Extra = make([]RR, dh.Arcount)
for i := 0; i < int(dh.Qdcount); i++ {
_, off, err = unpackQuestion(msg, off)
if err != nil {
return nil, nil, err
}
}
for i := 0; i < len(dns.Question); i++ {
off, err = UnpackStruct(&dns.Question[i], msg, off)
if err != nil {
return nil, nil, err
}
_, off, err = unpackRRslice(int(dh.Ancount), msg, off)
if err != nil {
return nil, nil, err
}
for i := 0; i < len(dns.Answer); i++ {
dns.Answer[i], off, err = UnpackRR(msg, off)
if err != nil {
return nil, nil, err
}
_, off, err = unpackRRslice(int(dh.Nscount), msg, off)
if err != nil {
return nil, nil, err
}
for i := 0; i < len(dns.Ns); i++ {
dns.Ns[i], off, err = UnpackRR(msg, off)
if err != nil {
return nil, nil, err
}
}
for i := 0; i < len(dns.Extra); i++ {
rr := new(TSIG)
var extra RR
for i := 0; i < int(dh.Arcount); i++ {
tsigoff = off
dns.Extra[i], off, err = UnpackRR(msg, off)
extra, off, err = UnpackRR(msg, off)
if err != nil {
return nil, nil, err
}
if dns.Extra[i].Header().Rrtype == TypeTSIG {
rr = dns.Extra[i].(*TSIG)
if extra.Header().Rrtype == TypeTSIG {
rr = extra.(*TSIG)
// Adjust Arcount.
arcount, _ := unpackUint16(msg, 10)
msg[10], msg[11] = packUint16(arcount - 1)
arcount := binary.BigEndian.Uint16(msg[10:])
binary.BigEndian.PutUint16(msg[10:], arcount-1)
break
}
}
@ -331,3 +314,71 @@ func tsigTimeToString(t uint64) string {
ti := time.Unix(int64(t), 0).UTC()
return ti.Format("20060102150405")
}
func packTsigWire(tw *tsigWireFmt, msg []byte) (int, error) {
// copied from zmsg.go TSIG packing
// RR_Header
off, err := PackDomainName(tw.Name, msg, 0, nil, false)
if err != nil {
return off, err
}
off, err = packUint16(tw.Class, msg, off)
if err != nil {
return off, err
}
off, err = packUint32(tw.Ttl, msg, off)
if err != nil {
return off, err
}
off, err = PackDomainName(tw.Algorithm, msg, off, nil, false)
if err != nil {
return off, err
}
off, err = packUint48(tw.TimeSigned, msg, off)
if err != nil {
return off, err
}
off, err = packUint16(tw.Fudge, msg, off)
if err != nil {
return off, err
}
off, err = packUint16(tw.Error, msg, off)
if err != nil {
return off, err
}
off, err = packUint16(tw.OtherLen, msg, off)
if err != nil {
return off, err
}
off, err = packStringHex(tw.OtherData, msg, off)
if err != nil {
return off, err
}
return off, nil
}
func packMacWire(mw *macWireFmt, msg []byte) (int, error) {
off, err := packUint16(mw.MACSize, msg, 0)
if err != nil {
return off, err
}
off, err = packStringHex(mw.MAC, msg, off)
if err != nil {
return off, err
}
return off, nil
}
func packTimerWire(tw *timerWireFmt, msg []byte) (int, error) {
off, err := packUint48(tw.TimeSigned, msg, 0)
if err != nil {
return off, err
}
off, err = packUint16(tw.Fudge, msg, off)
if err != nil {
return off, err
}
return off, nil
}

593
vendor/github.com/miekg/dns/types.go generated vendored

File diff suppressed because it is too large Load diff

271
vendor/github.com/miekg/dns/types_generate.go generated vendored Normal file
View file

@ -0,0 +1,271 @@
//+build ignore
// types_generate.go is meant to run with go generate. It will use
// go/{importer,types} to track down all the RR struct types. Then for each type
// it will generate conversion tables (TypeToRR and TypeToString) and banal
// methods (len, Header, copy) based on the struct tags. The generated source is
// written to ztypes.go, and is meant to be checked into git.
package main
import (
"bytes"
"fmt"
"go/format"
"go/importer"
"go/types"
"log"
"os"
"strings"
"text/template"
)
var skipLen = map[string]struct{}{
"NSEC": {},
"NSEC3": {},
"OPT": {},
}
var packageHdr = `
// *** DO NOT MODIFY ***
// AUTOGENERATED BY go generate from type_generate.go
package dns
import (
"encoding/base64"
"net"
)
`
var TypeToRR = template.Must(template.New("TypeToRR").Parse(`
// TypeToRR is a map of constructors for each RR type.
var TypeToRR = map[uint16]func() RR{
{{range .}}{{if ne . "RFC3597"}} Type{{.}}: func() RR { return new({{.}}) },
{{end}}{{end}} }
`))
var typeToString = template.Must(template.New("typeToString").Parse(`
// TypeToString is a map of strings for each RR type.
var TypeToString = map[uint16]string{
{{range .}}{{if ne . "NSAPPTR"}} Type{{.}}: "{{.}}",
{{end}}{{end}} TypeNSAPPTR: "NSAP-PTR",
}
`))
var headerFunc = template.Must(template.New("headerFunc").Parse(`
// Header() functions
{{range .}} func (rr *{{.}}) Header() *RR_Header { return &rr.Hdr }
{{end}}
`))
// getTypeStruct will take a type and the package scope, and return the
// (innermost) struct if the type is considered a RR type (currently defined as
// those structs beginning with a RR_Header, could be redefined as implementing
// the RR interface). The bool return value indicates if embedded structs were
// resolved.
func getTypeStruct(t types.Type, scope *types.Scope) (*types.Struct, bool) {
st, ok := t.Underlying().(*types.Struct)
if !ok {
return nil, false
}
if st.Field(0).Type() == scope.Lookup("RR_Header").Type() {
return st, false
}
if st.Field(0).Anonymous() {
st, _ := getTypeStruct(st.Field(0).Type(), scope)
return st, true
}
return nil, false
}
func main() {
// Import and type-check the package
pkg, err := importer.Default().Import("github.com/miekg/dns")
fatalIfErr(err)
scope := pkg.Scope()
// Collect constants like TypeX
var numberedTypes []string
for _, name := range scope.Names() {
o := scope.Lookup(name)
if o == nil || !o.Exported() {
continue
}
b, ok := o.Type().(*types.Basic)
if !ok || b.Kind() != types.Uint16 {
continue
}
if !strings.HasPrefix(o.Name(), "Type") {
continue
}
name := strings.TrimPrefix(o.Name(), "Type")
if name == "PrivateRR" {
continue
}
numberedTypes = append(numberedTypes, name)
}
// Collect actual types (*X)
var namedTypes []string
for _, name := range scope.Names() {
o := scope.Lookup(name)
if o == nil || !o.Exported() {
continue
}
if st, _ := getTypeStruct(o.Type(), scope); st == nil {
continue
}
if name == "PrivateRR" {
continue
}
// Check if corresponding TypeX exists
if scope.Lookup("Type"+o.Name()) == nil && o.Name() != "RFC3597" {
log.Fatalf("Constant Type%s does not exist.", o.Name())
}
namedTypes = append(namedTypes, o.Name())
}
b := &bytes.Buffer{}
b.WriteString(packageHdr)
// Generate TypeToRR
fatalIfErr(TypeToRR.Execute(b, namedTypes))
// Generate typeToString
fatalIfErr(typeToString.Execute(b, numberedTypes))
// Generate headerFunc
fatalIfErr(headerFunc.Execute(b, namedTypes))
// Generate len()
fmt.Fprint(b, "// len() functions\n")
for _, name := range namedTypes {
if _, ok := skipLen[name]; ok {
continue
}
o := scope.Lookup(name)
st, isEmbedded := getTypeStruct(o.Type(), scope)
if isEmbedded {
continue
}
fmt.Fprintf(b, "func (rr *%s) len() int {\n", name)
fmt.Fprintf(b, "l := rr.Hdr.len()\n")
for i := 1; i < st.NumFields(); i++ {
o := func(s string) { fmt.Fprintf(b, s, st.Field(i).Name()) }
if _, ok := st.Field(i).Type().(*types.Slice); ok {
switch st.Tag(i) {
case `dns:"-"`:
// ignored
case `dns:"cdomain-name"`, `dns:"domain-name"`, `dns:"txt"`:
o("for _, x := range rr.%s { l += len(x) + 1 }\n")
default:
log.Fatalln(name, st.Field(i).Name(), st.Tag(i))
}
continue
}
switch {
case st.Tag(i) == `dns:"-"`:
// ignored
case st.Tag(i) == `dns:"cdomain-name"`, st.Tag(i) == `dns:"domain-name"`:
o("l += len(rr.%s) + 1\n")
case st.Tag(i) == `dns:"octet"`:
o("l += len(rr.%s)\n")
case strings.HasPrefix(st.Tag(i), `dns:"size-base64`):
fallthrough
case st.Tag(i) == `dns:"base64"`:
o("l += base64.StdEncoding.DecodedLen(len(rr.%s))\n")
case strings.HasPrefix(st.Tag(i), `dns:"size-hex`):
fallthrough
case st.Tag(i) == `dns:"hex"`:
o("l += len(rr.%s)/2 + 1\n")
case st.Tag(i) == `dns:"a"`:
o("l += net.IPv4len // %s\n")
case st.Tag(i) == `dns:"aaaa"`:
o("l += net.IPv6len // %s\n")
case st.Tag(i) == `dns:"txt"`:
o("for _, t := range rr.%s { l += len(t) + 1 }\n")
case st.Tag(i) == `dns:"uint48"`:
o("l += 6 // %s\n")
case st.Tag(i) == "":
switch st.Field(i).Type().(*types.Basic).Kind() {
case types.Uint8:
o("l += 1 // %s\n")
case types.Uint16:
o("l += 2 // %s\n")
case types.Uint32:
o("l += 4 // %s\n")
case types.Uint64:
o("l += 8 // %s\n")
case types.String:
o("l += len(rr.%s) + 1\n")
default:
log.Fatalln(name, st.Field(i).Name())
}
default:
log.Fatalln(name, st.Field(i).Name(), st.Tag(i))
}
}
fmt.Fprintf(b, "return l }\n")
}
// Generate copy()
fmt.Fprint(b, "// copy() functions\n")
for _, name := range namedTypes {
o := scope.Lookup(name)
st, isEmbedded := getTypeStruct(o.Type(), scope)
if isEmbedded {
continue
}
fmt.Fprintf(b, "func (rr *%s) copy() RR {\n", name)
fields := []string{"*rr.Hdr.copyHeader()"}
for i := 1; i < st.NumFields(); i++ {
f := st.Field(i).Name()
if sl, ok := st.Field(i).Type().(*types.Slice); ok {
t := sl.Underlying().String()
t = strings.TrimPrefix(t, "[]")
if strings.Contains(t, ".") {
splits := strings.Split(t, ".")
t = splits[len(splits)-1]
}
fmt.Fprintf(b, "%s := make([]%s, len(rr.%s)); copy(%s, rr.%s)\n",
f, t, f, f, f)
fields = append(fields, f)
continue
}
if st.Field(i).Type().String() == "net.IP" {
fields = append(fields, "copyIP(rr."+f+")")
continue
}
fields = append(fields, "rr."+f)
}
fmt.Fprintf(b, "return &%s{%s}\n", name, strings.Join(fields, ","))
fmt.Fprintf(b, "}\n")
}
// gofmt
res, err := format.Source(b.Bytes())
if err != nil {
b.WriteTo(os.Stderr)
log.Fatal(err)
}
// write result
f, err := os.Create("ztypes.go")
fatalIfErr(err)
defer f.Close()
f.Write(res)
}
func fatalIfErr(err error) {
if err != nil {
log.Fatal(err)
}
}

2
vendor/github.com/miekg/dns/udp.go generated vendored
View file

@ -1,4 +1,4 @@
// +build !windows
// +build !windows,!plan9
package dns

View file

@ -24,6 +24,12 @@ func setUDPSocketOptions4(conn *net.UDPConn) error {
if err := syscall.SetsockoptInt(int(file.Fd()), syscall.IPPROTO_IP, syscall.IP_PKTINFO, 1); err != nil {
return err
}
// Calling File() above results in the connection becoming blocking, we must fix that.
// See https://github.com/miekg/dns/issues/279
err = syscall.SetNonblock(int(file.Fd()), true)
if err != nil {
return err
}
return nil
}
@ -36,6 +42,10 @@ func setUDPSocketOptions6(conn *net.UDPConn) error {
if err := syscall.SetsockoptInt(int(file.Fd()), syscall.IPPROTO_IPV6, syscall.IPV6_RECVPKTINFO, 1); err != nil {
return err
}
err = syscall.SetNonblock(int(file.Fd()), true)
if err != nil {
return err
}
return nil
}

View file

@ -1,4 +1,4 @@
// +build !linux
// +build !linux,!plan9
package dns

34
vendor/github.com/miekg/dns/udp_plan9.go generated vendored Normal file
View file

@ -0,0 +1,34 @@
package dns
import (
"net"
)
func setUDPSocketOptions(conn *net.UDPConn) error { return nil }
// SessionUDP holds the remote address and the associated
// out-of-band data.
type SessionUDP struct {
raddr *net.UDPAddr
context []byte
}
// RemoteAddr returns the remote network address.
func (s *SessionUDP) RemoteAddr() net.Addr { return s.raddr }
// ReadFromSessionUDP acts just like net.UDPConn.ReadFrom(), but returns a session object instead of a
// net.UDPAddr.
func ReadFromSessionUDP(conn *net.UDPConn, b []byte) (int, *SessionUDP, error) {
oob := make([]byte, 40)
n, oobn, _, raddr, err := conn.ReadMsgUDP(b, oob)
if err != nil {
return n, nil, err
}
return n, &SessionUDP{raddr, oob[:oobn]}, err
}
// WriteToSessionUDP acts just like net.UDPConn.WritetTo(), but uses a *SessionUDP instead of a net.Addr.
func WriteToSessionUDP(conn *net.UDPConn, b []byte, session *SessionUDP) (int, error) {
n, _, err := conn.WriteMsgUDP(b, session.context, session.raddr)
return n, err
}

View file

@ -3,18 +3,22 @@ package dns
// NameUsed sets the RRs in the prereq section to
// "Name is in use" RRs. RFC 2136 section 2.4.4.
func (u *Msg) NameUsed(rr []RR) {
u.Answer = make([]RR, len(rr))
for i, r := range rr {
u.Answer[i] = &ANY{Hdr: RR_Header{Name: r.Header().Name, Ttl: 0, Rrtype: TypeANY, Class: ClassANY}}
if u.Answer == nil {
u.Answer = make([]RR, 0, len(rr))
}
for _, r := range rr {
u.Answer = append(u.Answer, &ANY{Hdr: RR_Header{Name: r.Header().Name, Ttl: 0, Rrtype: TypeANY, Class: ClassANY}})
}
}
// NameNotUsed sets the RRs in the prereq section to
// "Name is in not use" RRs. RFC 2136 section 2.4.5.
func (u *Msg) NameNotUsed(rr []RR) {
u.Answer = make([]RR, len(rr))
for i, r := range rr {
u.Answer[i] = &ANY{Hdr: RR_Header{Name: r.Header().Name, Ttl: 0, Rrtype: TypeANY, Class: ClassNONE}}
if u.Answer == nil {
u.Answer = make([]RR, 0, len(rr))
}
for _, r := range rr {
u.Answer = append(u.Answer, &ANY{Hdr: RR_Header{Name: r.Header().Name, Ttl: 0, Rrtype: TypeANY, Class: ClassNONE}})
}
}
@ -24,34 +28,34 @@ func (u *Msg) Used(rr []RR) {
if len(u.Question) == 0 {
panic("dns: empty question section")
}
u.Answer = make([]RR, len(rr))
for i, r := range rr {
u.Answer[i] = r
u.Answer[i].Header().Class = u.Question[0].Qclass
if u.Answer == nil {
u.Answer = make([]RR, 0, len(rr))
}
for _, r := range rr {
r.Header().Class = u.Question[0].Qclass
u.Answer = append(u.Answer, r)
}
}
// RRsetUsed sets the RRs in the prereq section to
// "RRset exists (value independent -- no rdata)" RRs. RFC 2136 section 2.4.1.
func (u *Msg) RRsetUsed(rr []RR) {
u.Answer = make([]RR, len(rr))
for i, r := range rr {
u.Answer[i] = r
u.Answer[i].Header().Class = ClassANY
u.Answer[i].Header().Ttl = 0
u.Answer[i].Header().Rdlength = 0
if u.Answer == nil {
u.Answer = make([]RR, 0, len(rr))
}
for _, r := range rr {
u.Answer = append(u.Answer, &ANY{Hdr: RR_Header{Name: r.Header().Name, Ttl: 0, Rrtype: r.Header().Rrtype, Class: ClassANY}})
}
}
// RRsetNotUsed sets the RRs in the prereq section to
// "RRset does not exist" RRs. RFC 2136 section 2.4.3.
func (u *Msg) RRsetNotUsed(rr []RR) {
u.Answer = make([]RR, len(rr))
for i, r := range rr {
u.Answer[i] = r
u.Answer[i].Header().Class = ClassNONE
u.Answer[i].Header().Rdlength = 0
u.Answer[i].Header().Ttl = 0
if u.Answer == nil {
u.Answer = make([]RR, 0, len(rr))
}
for _, r := range rr {
u.Answer = append(u.Answer, &ANY{Hdr: RR_Header{Name: r.Header().Name, Ttl: 0, Rrtype: r.Header().Rrtype, Class: ClassNONE}})
}
}
@ -60,35 +64,43 @@ func (u *Msg) Insert(rr []RR) {
if len(u.Question) == 0 {
panic("dns: empty question section")
}
u.Ns = make([]RR, len(rr))
for i, r := range rr {
u.Ns[i] = r
u.Ns[i].Header().Class = u.Question[0].Qclass
if u.Ns == nil {
u.Ns = make([]RR, 0, len(rr))
}
for _, r := range rr {
r.Header().Class = u.Question[0].Qclass
u.Ns = append(u.Ns, r)
}
}
// RemoveRRset creates a dynamic update packet that deletes an RRset, see RFC 2136 section 2.5.2.
func (u *Msg) RemoveRRset(rr []RR) {
u.Ns = make([]RR, len(rr))
for i, r := range rr {
u.Ns[i] = &ANY{Hdr: RR_Header{Name: r.Header().Name, Ttl: 0, Rrtype: r.Header().Rrtype, Class: ClassANY}}
if u.Ns == nil {
u.Ns = make([]RR, 0, len(rr))
}
for _, r := range rr {
u.Ns = append(u.Ns, &ANY{Hdr: RR_Header{Name: r.Header().Name, Ttl: 0, Rrtype: r.Header().Rrtype, Class: ClassANY}})
}
}
// RemoveName creates a dynamic update packet that deletes all RRsets of a name, see RFC 2136 section 2.5.3
func (u *Msg) RemoveName(rr []RR) {
u.Ns = make([]RR, len(rr))
for i, r := range rr {
u.Ns[i] = &ANY{Hdr: RR_Header{Name: r.Header().Name, Ttl: 0, Rrtype: TypeANY, Class: ClassANY}}
if u.Ns == nil {
u.Ns = make([]RR, 0, len(rr))
}
for _, r := range rr {
u.Ns = append(u.Ns, &ANY{Hdr: RR_Header{Name: r.Header().Name, Ttl: 0, Rrtype: TypeANY, Class: ClassANY}})
}
}
// Remove creates a dynamic update packet deletes RR from the RRSset, see RFC 2136 section 2.5.4
// Remove creates a dynamic update packet deletes RR from a RRSset, see RFC 2136 section 2.5.4
func (u *Msg) Remove(rr []RR) {
u.Ns = make([]RR, len(rr))
for i, r := range rr {
u.Ns[i] = r
u.Ns[i].Header().Class = ClassNONE
u.Ns[i].Header().Ttl = 0
if u.Ns == nil {
u.Ns = make([]RR, 0, len(rr))
}
for _, r := range rr {
r.Header().Class = ClassNONE
r.Header().Ttl = 0
u.Ns = append(u.Ns, r)
}
}

22
vendor/github.com/miekg/dns/xfr.go generated vendored
View file

@ -23,14 +23,26 @@ type Transfer struct {
// Think we need to away to stop the transfer
// In performs an incoming transfer with the server in a.
// If you would like to set the source IP, or some other attribute
// of a Dialer for a Transfer, you can do so by specifying the attributes
// in the Transfer.Conn:
//
// d := net.Dialer{LocalAddr: transfer_source}
// con, err := d.Dial("tcp", master)
// dnscon := &dns.Conn{Conn:con}
// transfer = &dns.Transfer{Conn: dnscon}
// channel, err := transfer.In(message, master)
//
func (t *Transfer) In(q *Msg, a string) (env chan *Envelope, err error) {
timeout := dnsTimeout
if t.DialTimeout != 0 {
timeout = t.DialTimeout
}
t.Conn, err = DialTimeout("tcp", a, timeout)
if err != nil {
return nil, err
if t.Conn == nil {
t.Conn, err = DialTimeout("tcp", a, timeout)
if err != nil {
return nil, err
}
}
if err := t.WriteMsg(q); err != nil {
return nil, err
@ -150,8 +162,8 @@ func (t *Transfer) inIxfr(id uint16, c chan *Envelope) {
//
// ch := make(chan *dns.Envelope)
// tr := new(dns.Transfer)
// tr.Out(w, r, ch)
// c <- &dns.Envelope{RR: []dns.RR{soa, rr1, rr2, rr3, soa}}
// go tr.Out(w, r, ch)
// ch <- &dns.Envelope{RR: []dns.RR{soa, rr1, rr2, rr3, soa}}
// close(ch)
// w.Hijack()
// // w.Close() // Client closes connection

3529
vendor/github.com/miekg/dns/zmsg.go generated vendored Normal file

File diff suppressed because it is too large Load diff

842
vendor/github.com/miekg/dns/ztypes.go generated vendored Normal file
View file

@ -0,0 +1,842 @@
// *** DO NOT MODIFY ***
// AUTOGENERATED BY go generate from type_generate.go
package dns
import (
"encoding/base64"
"net"
)
// TypeToRR is a map of constructors for each RR type.
var TypeToRR = map[uint16]func() RR{
TypeA: func() RR { return new(A) },
TypeAAAA: func() RR { return new(AAAA) },
TypeAFSDB: func() RR { return new(AFSDB) },
TypeANY: func() RR { return new(ANY) },
TypeCAA: func() RR { return new(CAA) },
TypeCDNSKEY: func() RR { return new(CDNSKEY) },
TypeCDS: func() RR { return new(CDS) },
TypeCERT: func() RR { return new(CERT) },
TypeCNAME: func() RR { return new(CNAME) },
TypeDHCID: func() RR { return new(DHCID) },
TypeDLV: func() RR { return new(DLV) },
TypeDNAME: func() RR { return new(DNAME) },
TypeDNSKEY: func() RR { return new(DNSKEY) },
TypeDS: func() RR { return new(DS) },
TypeEID: func() RR { return new(EID) },
TypeEUI48: func() RR { return new(EUI48) },
TypeEUI64: func() RR { return new(EUI64) },
TypeGID: func() RR { return new(GID) },
TypeGPOS: func() RR { return new(GPOS) },
TypeHINFO: func() RR { return new(HINFO) },
TypeHIP: func() RR { return new(HIP) },
TypeKEY: func() RR { return new(KEY) },
TypeKX: func() RR { return new(KX) },
TypeL32: func() RR { return new(L32) },
TypeL64: func() RR { return new(L64) },
TypeLOC: func() RR { return new(LOC) },
TypeLP: func() RR { return new(LP) },
TypeMB: func() RR { return new(MB) },
TypeMD: func() RR { return new(MD) },
TypeMF: func() RR { return new(MF) },
TypeMG: func() RR { return new(MG) },
TypeMINFO: func() RR { return new(MINFO) },
TypeMR: func() RR { return new(MR) },
TypeMX: func() RR { return new(MX) },
TypeNAPTR: func() RR { return new(NAPTR) },
TypeNID: func() RR { return new(NID) },
TypeNIMLOC: func() RR { return new(NIMLOC) },
TypeNINFO: func() RR { return new(NINFO) },
TypeNS: func() RR { return new(NS) },
TypeNSAPPTR: func() RR { return new(NSAPPTR) },
TypeNSEC: func() RR { return new(NSEC) },
TypeNSEC3: func() RR { return new(NSEC3) },
TypeNSEC3PARAM: func() RR { return new(NSEC3PARAM) },
TypeOPENPGPKEY: func() RR { return new(OPENPGPKEY) },
TypeOPT: func() RR { return new(OPT) },
TypePTR: func() RR { return new(PTR) },
TypePX: func() RR { return new(PX) },
TypeRKEY: func() RR { return new(RKEY) },
TypeRP: func() RR { return new(RP) },
TypeRRSIG: func() RR { return new(RRSIG) },
TypeRT: func() RR { return new(RT) },
TypeSIG: func() RR { return new(SIG) },
TypeSMIMEA: func() RR { return new(SMIMEA) },
TypeSOA: func() RR { return new(SOA) },
TypeSPF: func() RR { return new(SPF) },
TypeSRV: func() RR { return new(SRV) },
TypeSSHFP: func() RR { return new(SSHFP) },
TypeTA: func() RR { return new(TA) },
TypeTALINK: func() RR { return new(TALINK) },
TypeTKEY: func() RR { return new(TKEY) },
TypeTLSA: func() RR { return new(TLSA) },
TypeTSIG: func() RR { return new(TSIG) },
TypeTXT: func() RR { return new(TXT) },
TypeUID: func() RR { return new(UID) },
TypeUINFO: func() RR { return new(UINFO) },
TypeURI: func() RR { return new(URI) },
TypeX25: func() RR { return new(X25) },
}
// TypeToString is a map of strings for each RR type.
var TypeToString = map[uint16]string{
TypeA: "A",
TypeAAAA: "AAAA",
TypeAFSDB: "AFSDB",
TypeANY: "ANY",
TypeATMA: "ATMA",
TypeAXFR: "AXFR",
TypeCAA: "CAA",
TypeCDNSKEY: "CDNSKEY",
TypeCDS: "CDS",
TypeCERT: "CERT",
TypeCNAME: "CNAME",
TypeDHCID: "DHCID",
TypeDLV: "DLV",
TypeDNAME: "DNAME",
TypeDNSKEY: "DNSKEY",
TypeDS: "DS",
TypeEID: "EID",
TypeEUI48: "EUI48",
TypeEUI64: "EUI64",
TypeGID: "GID",
TypeGPOS: "GPOS",
TypeHINFO: "HINFO",
TypeHIP: "HIP",
TypeISDN: "ISDN",
TypeIXFR: "IXFR",
TypeKEY: "KEY",
TypeKX: "KX",
TypeL32: "L32",
TypeL64: "L64",
TypeLOC: "LOC",
TypeLP: "LP",
TypeMAILA: "MAILA",
TypeMAILB: "MAILB",
TypeMB: "MB",
TypeMD: "MD",
TypeMF: "MF",
TypeMG: "MG",
TypeMINFO: "MINFO",
TypeMR: "MR",
TypeMX: "MX",
TypeNAPTR: "NAPTR",
TypeNID: "NID",
TypeNIMLOC: "NIMLOC",
TypeNINFO: "NINFO",
TypeNS: "NS",
TypeNSEC: "NSEC",
TypeNSEC3: "NSEC3",
TypeNSEC3PARAM: "NSEC3PARAM",
TypeNULL: "NULL",
TypeNXT: "NXT",
TypeNone: "None",
TypeOPENPGPKEY: "OPENPGPKEY",
TypeOPT: "OPT",
TypePTR: "PTR",
TypePX: "PX",
TypeRKEY: "RKEY",
TypeRP: "RP",
TypeRRSIG: "RRSIG",
TypeRT: "RT",
TypeReserved: "Reserved",
TypeSIG: "SIG",
TypeSMIMEA: "SMIMEA",
TypeSOA: "SOA",
TypeSPF: "SPF",
TypeSRV: "SRV",
TypeSSHFP: "SSHFP",
TypeTA: "TA",
TypeTALINK: "TALINK",
TypeTKEY: "TKEY",
TypeTLSA: "TLSA",
TypeTSIG: "TSIG",
TypeTXT: "TXT",
TypeUID: "UID",
TypeUINFO: "UINFO",
TypeUNSPEC: "UNSPEC",
TypeURI: "URI",
TypeX25: "X25",
TypeNSAPPTR: "NSAP-PTR",
}
// Header() functions
func (rr *A) Header() *RR_Header { return &rr.Hdr }
func (rr *AAAA) Header() *RR_Header { return &rr.Hdr }
func (rr *AFSDB) Header() *RR_Header { return &rr.Hdr }
func (rr *ANY) Header() *RR_Header { return &rr.Hdr }
func (rr *CAA) Header() *RR_Header { return &rr.Hdr }
func (rr *CDNSKEY) Header() *RR_Header { return &rr.Hdr }
func (rr *CDS) Header() *RR_Header { return &rr.Hdr }
func (rr *CERT) Header() *RR_Header { return &rr.Hdr }
func (rr *CNAME) Header() *RR_Header { return &rr.Hdr }
func (rr *DHCID) Header() *RR_Header { return &rr.Hdr }
func (rr *DLV) Header() *RR_Header { return &rr.Hdr }
func (rr *DNAME) Header() *RR_Header { return &rr.Hdr }
func (rr *DNSKEY) Header() *RR_Header { return &rr.Hdr }
func (rr *DS) Header() *RR_Header { return &rr.Hdr }
func (rr *EID) Header() *RR_Header { return &rr.Hdr }
func (rr *EUI48) Header() *RR_Header { return &rr.Hdr }
func (rr *EUI64) Header() *RR_Header { return &rr.Hdr }
func (rr *GID) Header() *RR_Header { return &rr.Hdr }
func (rr *GPOS) Header() *RR_Header { return &rr.Hdr }
func (rr *HINFO) Header() *RR_Header { return &rr.Hdr }
func (rr *HIP) Header() *RR_Header { return &rr.Hdr }
func (rr *KEY) Header() *RR_Header { return &rr.Hdr }
func (rr *KX) Header() *RR_Header { return &rr.Hdr }
func (rr *L32) Header() *RR_Header { return &rr.Hdr }
func (rr *L64) Header() *RR_Header { return &rr.Hdr }
func (rr *LOC) Header() *RR_Header { return &rr.Hdr }
func (rr *LP) Header() *RR_Header { return &rr.Hdr }
func (rr *MB) Header() *RR_Header { return &rr.Hdr }
func (rr *MD) Header() *RR_Header { return &rr.Hdr }
func (rr *MF) Header() *RR_Header { return &rr.Hdr }
func (rr *MG) Header() *RR_Header { return &rr.Hdr }
func (rr *MINFO) Header() *RR_Header { return &rr.Hdr }
func (rr *MR) Header() *RR_Header { return &rr.Hdr }
func (rr *MX) Header() *RR_Header { return &rr.Hdr }
func (rr *NAPTR) Header() *RR_Header { return &rr.Hdr }
func (rr *NID) Header() *RR_Header { return &rr.Hdr }
func (rr *NIMLOC) Header() *RR_Header { return &rr.Hdr }
func (rr *NINFO) Header() *RR_Header { return &rr.Hdr }
func (rr *NS) Header() *RR_Header { return &rr.Hdr }
func (rr *NSAPPTR) Header() *RR_Header { return &rr.Hdr }
func (rr *NSEC) Header() *RR_Header { return &rr.Hdr }
func (rr *NSEC3) Header() *RR_Header { return &rr.Hdr }
func (rr *NSEC3PARAM) Header() *RR_Header { return &rr.Hdr }
func (rr *OPENPGPKEY) Header() *RR_Header { return &rr.Hdr }
func (rr *OPT) Header() *RR_Header { return &rr.Hdr }
func (rr *PTR) Header() *RR_Header { return &rr.Hdr }
func (rr *PX) Header() *RR_Header { return &rr.Hdr }
func (rr *RFC3597) Header() *RR_Header { return &rr.Hdr }
func (rr *RKEY) Header() *RR_Header { return &rr.Hdr }
func (rr *RP) Header() *RR_Header { return &rr.Hdr }
func (rr *RRSIG) Header() *RR_Header { return &rr.Hdr }
func (rr *RT) Header() *RR_Header { return &rr.Hdr }
func (rr *SIG) Header() *RR_Header { return &rr.Hdr }
func (rr *SMIMEA) Header() *RR_Header { return &rr.Hdr }
func (rr *SOA) Header() *RR_Header { return &rr.Hdr }
func (rr *SPF) Header() *RR_Header { return &rr.Hdr }
func (rr *SRV) Header() *RR_Header { return &rr.Hdr }
func (rr *SSHFP) Header() *RR_Header { return &rr.Hdr }
func (rr *TA) Header() *RR_Header { return &rr.Hdr }
func (rr *TALINK) Header() *RR_Header { return &rr.Hdr }
func (rr *TKEY) Header() *RR_Header { return &rr.Hdr }
func (rr *TLSA) Header() *RR_Header { return &rr.Hdr }
func (rr *TSIG) Header() *RR_Header { return &rr.Hdr }
func (rr *TXT) Header() *RR_Header { return &rr.Hdr }
func (rr *UID) Header() *RR_Header { return &rr.Hdr }
func (rr *UINFO) Header() *RR_Header { return &rr.Hdr }
func (rr *URI) Header() *RR_Header { return &rr.Hdr }
func (rr *X25) Header() *RR_Header { return &rr.Hdr }
// len() functions
func (rr *A) len() int {
l := rr.Hdr.len()
l += net.IPv4len // A
return l
}
func (rr *AAAA) len() int {
l := rr.Hdr.len()
l += net.IPv6len // AAAA
return l
}
func (rr *AFSDB) len() int {
l := rr.Hdr.len()
l += 2 // Subtype
l += len(rr.Hostname) + 1
return l
}
func (rr *ANY) len() int {
l := rr.Hdr.len()
return l
}
func (rr *CAA) len() int {
l := rr.Hdr.len()
l += 1 // Flag
l += len(rr.Tag) + 1
l += len(rr.Value)
return l
}
func (rr *CERT) len() int {
l := rr.Hdr.len()
l += 2 // Type
l += 2 // KeyTag
l += 1 // Algorithm
l += base64.StdEncoding.DecodedLen(len(rr.Certificate))
return l
}
func (rr *CNAME) len() int {
l := rr.Hdr.len()
l += len(rr.Target) + 1
return l
}
func (rr *DHCID) len() int {
l := rr.Hdr.len()
l += base64.StdEncoding.DecodedLen(len(rr.Digest))
return l
}
func (rr *DNAME) len() int {
l := rr.Hdr.len()
l += len(rr.Target) + 1
return l
}
func (rr *DNSKEY) len() int {
l := rr.Hdr.len()
l += 2 // Flags
l += 1 // Protocol
l += 1 // Algorithm
l += base64.StdEncoding.DecodedLen(len(rr.PublicKey))
return l
}
func (rr *DS) len() int {
l := rr.Hdr.len()
l += 2 // KeyTag
l += 1 // Algorithm
l += 1 // DigestType
l += len(rr.Digest)/2 + 1
return l
}
func (rr *EID) len() int {
l := rr.Hdr.len()
l += len(rr.Endpoint)/2 + 1
return l
}
func (rr *EUI48) len() int {
l := rr.Hdr.len()
l += 6 // Address
return l
}
func (rr *EUI64) len() int {
l := rr.Hdr.len()
l += 8 // Address
return l
}
func (rr *GID) len() int {
l := rr.Hdr.len()
l += 4 // Gid
return l
}
func (rr *GPOS) len() int {
l := rr.Hdr.len()
l += len(rr.Longitude) + 1
l += len(rr.Latitude) + 1
l += len(rr.Altitude) + 1
return l
}
func (rr *HINFO) len() int {
l := rr.Hdr.len()
l += len(rr.Cpu) + 1
l += len(rr.Os) + 1
return l
}
func (rr *HIP) len() int {
l := rr.Hdr.len()
l += 1 // HitLength
l += 1 // PublicKeyAlgorithm
l += 2 // PublicKeyLength
l += len(rr.Hit)/2 + 1
l += base64.StdEncoding.DecodedLen(len(rr.PublicKey))
for _, x := range rr.RendezvousServers {
l += len(x) + 1
}
return l
}
func (rr *KX) len() int {
l := rr.Hdr.len()
l += 2 // Preference
l += len(rr.Exchanger) + 1
return l
}
func (rr *L32) len() int {
l := rr.Hdr.len()
l += 2 // Preference
l += net.IPv4len // Locator32
return l
}
func (rr *L64) len() int {
l := rr.Hdr.len()
l += 2 // Preference
l += 8 // Locator64
return l
}
func (rr *LOC) len() int {
l := rr.Hdr.len()
l += 1 // Version
l += 1 // Size
l += 1 // HorizPre
l += 1 // VertPre
l += 4 // Latitude
l += 4 // Longitude
l += 4 // Altitude
return l
}
func (rr *LP) len() int {
l := rr.Hdr.len()
l += 2 // Preference
l += len(rr.Fqdn) + 1
return l
}
func (rr *MB) len() int {
l := rr.Hdr.len()
l += len(rr.Mb) + 1
return l
}
func (rr *MD) len() int {
l := rr.Hdr.len()
l += len(rr.Md) + 1
return l
}
func (rr *MF) len() int {
l := rr.Hdr.len()
l += len(rr.Mf) + 1
return l
}
func (rr *MG) len() int {
l := rr.Hdr.len()
l += len(rr.Mg) + 1
return l
}
func (rr *MINFO) len() int {
l := rr.Hdr.len()
l += len(rr.Rmail) + 1
l += len(rr.Email) + 1
return l
}
func (rr *MR) len() int {
l := rr.Hdr.len()
l += len(rr.Mr) + 1
return l
}
func (rr *MX) len() int {
l := rr.Hdr.len()
l += 2 // Preference
l += len(rr.Mx) + 1
return l
}
func (rr *NAPTR) len() int {
l := rr.Hdr.len()
l += 2 // Order
l += 2 // Preference
l += len(rr.Flags) + 1
l += len(rr.Service) + 1
l += len(rr.Regexp) + 1
l += len(rr.Replacement) + 1
return l
}
func (rr *NID) len() int {
l := rr.Hdr.len()
l += 2 // Preference
l += 8 // NodeID
return l
}
func (rr *NIMLOC) len() int {
l := rr.Hdr.len()
l += len(rr.Locator)/2 + 1
return l
}
func (rr *NINFO) len() int {
l := rr.Hdr.len()
for _, x := range rr.ZSData {
l += len(x) + 1
}
return l
}
func (rr *NS) len() int {
l := rr.Hdr.len()
l += len(rr.Ns) + 1
return l
}
func (rr *NSAPPTR) len() int {
l := rr.Hdr.len()
l += len(rr.Ptr) + 1
return l
}
func (rr *NSEC3PARAM) len() int {
l := rr.Hdr.len()
l += 1 // Hash
l += 1 // Flags
l += 2 // Iterations
l += 1 // SaltLength
l += len(rr.Salt)/2 + 1
return l
}
func (rr *OPENPGPKEY) len() int {
l := rr.Hdr.len()
l += base64.StdEncoding.DecodedLen(len(rr.PublicKey))
return l
}
func (rr *PTR) len() int {
l := rr.Hdr.len()
l += len(rr.Ptr) + 1
return l
}
func (rr *PX) len() int {
l := rr.Hdr.len()
l += 2 // Preference
l += len(rr.Map822) + 1
l += len(rr.Mapx400) + 1
return l
}
func (rr *RFC3597) len() int {
l := rr.Hdr.len()
l += len(rr.Rdata)/2 + 1
return l
}
func (rr *RKEY) len() int {
l := rr.Hdr.len()
l += 2 // Flags
l += 1 // Protocol
l += 1 // Algorithm
l += base64.StdEncoding.DecodedLen(len(rr.PublicKey))
return l
}
func (rr *RP) len() int {
l := rr.Hdr.len()
l += len(rr.Mbox) + 1
l += len(rr.Txt) + 1
return l
}
func (rr *RRSIG) len() int {
l := rr.Hdr.len()
l += 2 // TypeCovered
l += 1 // Algorithm
l += 1 // Labels
l += 4 // OrigTtl
l += 4 // Expiration
l += 4 // Inception
l += 2 // KeyTag
l += len(rr.SignerName) + 1
l += base64.StdEncoding.DecodedLen(len(rr.Signature))
return l
}
func (rr *RT) len() int {
l := rr.Hdr.len()
l += 2 // Preference
l += len(rr.Host) + 1
return l
}
func (rr *SMIMEA) len() int {
l := rr.Hdr.len()
l += 1 // Usage
l += 1 // Selector
l += 1 // MatchingType
l += len(rr.Certificate)/2 + 1
return l
}
func (rr *SOA) len() int {
l := rr.Hdr.len()
l += len(rr.Ns) + 1
l += len(rr.Mbox) + 1
l += 4 // Serial
l += 4 // Refresh
l += 4 // Retry
l += 4 // Expire
l += 4 // Minttl
return l
}
func (rr *SPF) len() int {
l := rr.Hdr.len()
for _, x := range rr.Txt {
l += len(x) + 1
}
return l
}
func (rr *SRV) len() int {
l := rr.Hdr.len()
l += 2 // Priority
l += 2 // Weight
l += 2 // Port
l += len(rr.Target) + 1
return l
}
func (rr *SSHFP) len() int {
l := rr.Hdr.len()
l += 1 // Algorithm
l += 1 // Type
l += len(rr.FingerPrint)/2 + 1
return l
}
func (rr *TA) len() int {
l := rr.Hdr.len()
l += 2 // KeyTag
l += 1 // Algorithm
l += 1 // DigestType
l += len(rr.Digest)/2 + 1
return l
}
func (rr *TALINK) len() int {
l := rr.Hdr.len()
l += len(rr.PreviousName) + 1
l += len(rr.NextName) + 1
return l
}
func (rr *TKEY) len() int {
l := rr.Hdr.len()
l += len(rr.Algorithm) + 1
l += 4 // Inception
l += 4 // Expiration
l += 2 // Mode
l += 2 // Error
l += 2 // KeySize
l += len(rr.Key) + 1
l += 2 // OtherLen
l += len(rr.OtherData) + 1
return l
}
func (rr *TLSA) len() int {
l := rr.Hdr.len()
l += 1 // Usage
l += 1 // Selector
l += 1 // MatchingType
l += len(rr.Certificate)/2 + 1
return l
}
func (rr *TSIG) len() int {
l := rr.Hdr.len()
l += len(rr.Algorithm) + 1
l += 6 // TimeSigned
l += 2 // Fudge
l += 2 // MACSize
l += len(rr.MAC)/2 + 1
l += 2 // OrigId
l += 2 // Error
l += 2 // OtherLen
l += len(rr.OtherData)/2 + 1
return l
}
func (rr *TXT) len() int {
l := rr.Hdr.len()
for _, x := range rr.Txt {
l += len(x) + 1
}
return l
}
func (rr *UID) len() int {
l := rr.Hdr.len()
l += 4 // Uid
return l
}
func (rr *UINFO) len() int {
l := rr.Hdr.len()
l += len(rr.Uinfo) + 1
return l
}
func (rr *URI) len() int {
l := rr.Hdr.len()
l += 2 // Priority
l += 2 // Weight
l += len(rr.Target)
return l
}
func (rr *X25) len() int {
l := rr.Hdr.len()
l += len(rr.PSDNAddress) + 1
return l
}
// copy() functions
func (rr *A) copy() RR {
return &A{*rr.Hdr.copyHeader(), copyIP(rr.A)}
}
func (rr *AAAA) copy() RR {
return &AAAA{*rr.Hdr.copyHeader(), copyIP(rr.AAAA)}
}
func (rr *AFSDB) copy() RR {
return &AFSDB{*rr.Hdr.copyHeader(), rr.Subtype, rr.Hostname}
}
func (rr *ANY) copy() RR {
return &ANY{*rr.Hdr.copyHeader()}
}
func (rr *CAA) copy() RR {
return &CAA{*rr.Hdr.copyHeader(), rr.Flag, rr.Tag, rr.Value}
}
func (rr *CERT) copy() RR {
return &CERT{*rr.Hdr.copyHeader(), rr.Type, rr.KeyTag, rr.Algorithm, rr.Certificate}
}
func (rr *CNAME) copy() RR {
return &CNAME{*rr.Hdr.copyHeader(), rr.Target}
}
func (rr *DHCID) copy() RR {
return &DHCID{*rr.Hdr.copyHeader(), rr.Digest}
}
func (rr *DNAME) copy() RR {
return &DNAME{*rr.Hdr.copyHeader(), rr.Target}
}
func (rr *DNSKEY) copy() RR {
return &DNSKEY{*rr.Hdr.copyHeader(), rr.Flags, rr.Protocol, rr.Algorithm, rr.PublicKey}
}
func (rr *DS) copy() RR {
return &DS{*rr.Hdr.copyHeader(), rr.KeyTag, rr.Algorithm, rr.DigestType, rr.Digest}
}
func (rr *EID) copy() RR {
return &EID{*rr.Hdr.copyHeader(), rr.Endpoint}
}
func (rr *EUI48) copy() RR {
return &EUI48{*rr.Hdr.copyHeader(), rr.Address}
}
func (rr *EUI64) copy() RR {
return &EUI64{*rr.Hdr.copyHeader(), rr.Address}
}
func (rr *GID) copy() RR {
return &GID{*rr.Hdr.copyHeader(), rr.Gid}
}
func (rr *GPOS) copy() RR {
return &GPOS{*rr.Hdr.copyHeader(), rr.Longitude, rr.Latitude, rr.Altitude}
}
func (rr *HINFO) copy() RR {
return &HINFO{*rr.Hdr.copyHeader(), rr.Cpu, rr.Os}
}
func (rr *HIP) copy() RR {
RendezvousServers := make([]string, len(rr.RendezvousServers))
copy(RendezvousServers, rr.RendezvousServers)
return &HIP{*rr.Hdr.copyHeader(), rr.HitLength, rr.PublicKeyAlgorithm, rr.PublicKeyLength, rr.Hit, rr.PublicKey, RendezvousServers}
}
func (rr *KX) copy() RR {
return &KX{*rr.Hdr.copyHeader(), rr.Preference, rr.Exchanger}
}
func (rr *L32) copy() RR {
return &L32{*rr.Hdr.copyHeader(), rr.Preference, copyIP(rr.Locator32)}
}
func (rr *L64) copy() RR {
return &L64{*rr.Hdr.copyHeader(), rr.Preference, rr.Locator64}
}
func (rr *LOC) copy() RR {
return &LOC{*rr.Hdr.copyHeader(), rr.Version, rr.Size, rr.HorizPre, rr.VertPre, rr.Latitude, rr.Longitude, rr.Altitude}
}
func (rr *LP) copy() RR {
return &LP{*rr.Hdr.copyHeader(), rr.Preference, rr.Fqdn}
}
func (rr *MB) copy() RR {
return &MB{*rr.Hdr.copyHeader(), rr.Mb}
}
func (rr *MD) copy() RR {
return &MD{*rr.Hdr.copyHeader(), rr.Md}
}
func (rr *MF) copy() RR {
return &MF{*rr.Hdr.copyHeader(), rr.Mf}
}
func (rr *MG) copy() RR {
return &MG{*rr.Hdr.copyHeader(), rr.Mg}
}
func (rr *MINFO) copy() RR {
return &MINFO{*rr.Hdr.copyHeader(), rr.Rmail, rr.Email}
}
func (rr *MR) copy() RR {
return &MR{*rr.Hdr.copyHeader(), rr.Mr}
}
func (rr *MX) copy() RR {
return &MX{*rr.Hdr.copyHeader(), rr.Preference, rr.Mx}
}
func (rr *NAPTR) copy() RR {
return &NAPTR{*rr.Hdr.copyHeader(), rr.Order, rr.Preference, rr.Flags, rr.Service, rr.Regexp, rr.Replacement}
}
func (rr *NID) copy() RR {
return &NID{*rr.Hdr.copyHeader(), rr.Preference, rr.NodeID}
}
func (rr *NIMLOC) copy() RR {
return &NIMLOC{*rr.Hdr.copyHeader(), rr.Locator}
}
func (rr *NINFO) copy() RR {
ZSData := make([]string, len(rr.ZSData))
copy(ZSData, rr.ZSData)
return &NINFO{*rr.Hdr.copyHeader(), ZSData}
}
func (rr *NS) copy() RR {
return &NS{*rr.Hdr.copyHeader(), rr.Ns}
}
func (rr *NSAPPTR) copy() RR {
return &NSAPPTR{*rr.Hdr.copyHeader(), rr.Ptr}
}
func (rr *NSEC) copy() RR {
TypeBitMap := make([]uint16, len(rr.TypeBitMap))
copy(TypeBitMap, rr.TypeBitMap)
return &NSEC{*rr.Hdr.copyHeader(), rr.NextDomain, TypeBitMap}
}
func (rr *NSEC3) copy() RR {
TypeBitMap := make([]uint16, len(rr.TypeBitMap))
copy(TypeBitMap, rr.TypeBitMap)
return &NSEC3{*rr.Hdr.copyHeader(), rr.Hash, rr.Flags, rr.Iterations, rr.SaltLength, rr.Salt, rr.HashLength, rr.NextDomain, TypeBitMap}
}
func (rr *NSEC3PARAM) copy() RR {
return &NSEC3PARAM{*rr.Hdr.copyHeader(), rr.Hash, rr.Flags, rr.Iterations, rr.SaltLength, rr.Salt}
}
func (rr *OPENPGPKEY) copy() RR {
return &OPENPGPKEY{*rr.Hdr.copyHeader(), rr.PublicKey}
}
func (rr *OPT) copy() RR {
Option := make([]EDNS0, len(rr.Option))
copy(Option, rr.Option)
return &OPT{*rr.Hdr.copyHeader(), Option}
}
func (rr *PTR) copy() RR {
return &PTR{*rr.Hdr.copyHeader(), rr.Ptr}
}
func (rr *PX) copy() RR {
return &PX{*rr.Hdr.copyHeader(), rr.Preference, rr.Map822, rr.Mapx400}
}
func (rr *RFC3597) copy() RR {
return &RFC3597{*rr.Hdr.copyHeader(), rr.Rdata}
}
func (rr *RKEY) copy() RR {
return &RKEY{*rr.Hdr.copyHeader(), rr.Flags, rr.Protocol, rr.Algorithm, rr.PublicKey}
}
func (rr *RP) copy() RR {
return &RP{*rr.Hdr.copyHeader(), rr.Mbox, rr.Txt}
}
func (rr *RRSIG) copy() RR {
return &RRSIG{*rr.Hdr.copyHeader(), rr.TypeCovered, rr.Algorithm, rr.Labels, rr.OrigTtl, rr.Expiration, rr.Inception, rr.KeyTag, rr.SignerName, rr.Signature}
}
func (rr *RT) copy() RR {
return &RT{*rr.Hdr.copyHeader(), rr.Preference, rr.Host}
}
func (rr *SMIMEA) copy() RR {
return &SMIMEA{*rr.Hdr.copyHeader(), rr.Usage, rr.Selector, rr.MatchingType, rr.Certificate}
}
func (rr *SOA) copy() RR {
return &SOA{*rr.Hdr.copyHeader(), rr.Ns, rr.Mbox, rr.Serial, rr.Refresh, rr.Retry, rr.Expire, rr.Minttl}
}
func (rr *SPF) copy() RR {
Txt := make([]string, len(rr.Txt))
copy(Txt, rr.Txt)
return &SPF{*rr.Hdr.copyHeader(), Txt}
}
func (rr *SRV) copy() RR {
return &SRV{*rr.Hdr.copyHeader(), rr.Priority, rr.Weight, rr.Port, rr.Target}
}
func (rr *SSHFP) copy() RR {
return &SSHFP{*rr.Hdr.copyHeader(), rr.Algorithm, rr.Type, rr.FingerPrint}
}
func (rr *TA) copy() RR {
return &TA{*rr.Hdr.copyHeader(), rr.KeyTag, rr.Algorithm, rr.DigestType, rr.Digest}
}
func (rr *TALINK) copy() RR {
return &TALINK{*rr.Hdr.copyHeader(), rr.PreviousName, rr.NextName}
}
func (rr *TKEY) copy() RR {
return &TKEY{*rr.Hdr.copyHeader(), rr.Algorithm, rr.Inception, rr.Expiration, rr.Mode, rr.Error, rr.KeySize, rr.Key, rr.OtherLen, rr.OtherData}
}
func (rr *TLSA) copy() RR {
return &TLSA{*rr.Hdr.copyHeader(), rr.Usage, rr.Selector, rr.MatchingType, rr.Certificate}
}
func (rr *TSIG) copy() RR {
return &TSIG{*rr.Hdr.copyHeader(), rr.Algorithm, rr.TimeSigned, rr.Fudge, rr.MACSize, rr.MAC, rr.OrigId, rr.Error, rr.OtherLen, rr.OtherData}
}
func (rr *TXT) copy() RR {
Txt := make([]string, len(rr.Txt))
copy(Txt, rr.Txt)
return &TXT{*rr.Hdr.copyHeader(), Txt}
}
func (rr *UID) copy() RR {
return &UID{*rr.Hdr.copyHeader(), rr.Uid}
}
func (rr *UINFO) copy() RR {
return &UINFO{*rr.Hdr.copyHeader(), rr.Uinfo}
}
func (rr *URI) copy() RR {
return &URI{*rr.Hdr.copyHeader(), rr.Priority, rr.Weight, rr.Target}
}
func (rr *X25) copy() RR {
return &X25{*rr.Hdr.copyHeader(), rr.PSDNAddress}
}

View file

@ -7,11 +7,6 @@ SoundCloud Ltd. (http://soundcloud.com/).
The following components are included in this product:
goautoneg
http://bitbucket.org/ww/goautoneg
Copyright 2011, Open Knowledge Foundation Ltd.
See README.txt for license details.
perks - a fork of https://github.com/bmizerany/perks
https://github.com/beorn7/perks
Copyright 2013-2015 Blake Mizerany, Björn Rabenstein

View file

@ -1,53 +1 @@
# Overview
This is the [Prometheus](http://www.prometheus.io) telemetric
instrumentation client [Go](http://golang.org) client library. It
enable authors to define process-space metrics for their servers and
expose them through a web service interface for extraction,
aggregation, and a whole slew of other post processing techniques.
# Installing
$ go get github.com/prometheus/client_golang/prometheus
# Example
```go
package main
import (
"net/http"
"github.com/prometheus/client_golang/prometheus"
)
var (
indexed = prometheus.NewCounter(prometheus.CounterOpts{
Namespace: "my_company",
Subsystem: "indexer",
Name: "documents_indexed",
Help: "The number of documents indexed.",
})
size = prometheus.NewGauge(prometheus.GaugeOpts{
Namespace: "my_company",
Subsystem: "storage",
Name: "documents_total_size_bytes",
Help: "The total size of all documents in the storage.",
})
)
func main() {
http.Handle("/metrics", prometheus.Handler())
indexed.Inc()
size.Set(5)
http.ListenAndServe(":8080", nil)
}
func init() {
prometheus.MustRegister(indexed)
prometheus.MustRegister(size)
}
```
# Documentation
[![GoDoc](https://godoc.org/github.com/prometheus/client_golang?status.png)](https://godoc.org/github.com/prometheus/client_golang)
See [![go-doc](https://godoc.org/github.com/prometheus/client_golang/prometheus?status.svg)](https://godoc.org/github.com/prometheus/client_golang/prometheus).

View file

@ -15,15 +15,15 @@ package prometheus
// Collector is the interface implemented by anything that can be used by
// Prometheus to collect metrics. A Collector has to be registered for
// collection. See Register, MustRegister, RegisterOrGet, and MustRegisterOrGet.
// collection. See Registerer.Register.
//
// The stock metrics provided by this package (like Gauge, Counter, Summary) are
// also Collectors (which only ever collect one metric, namely itself). An
// implementer of Collector may, however, collect multiple metrics in a
// coordinated fashion and/or create metrics on the fly. Examples for collectors
// already implemented in this library are the metric vectors (i.e. collection
// of multiple instances of the same Metric but with different label values)
// like GaugeVec or SummaryVec, and the ExpvarCollector.
// The stock metrics provided by this package (Gauge, Counter, Summary,
// Histogram, Untyped) are also Collectors (which only ever collect one metric,
// namely itself). An implementer of Collector may, however, collect multiple
// metrics in a coordinated fashion and/or create metrics on the fly. Examples
// for collectors already implemented in this library are the metric vectors
// (i.e. collection of multiple instances of the same Metric but with different
// label values) like GaugeVec or SummaryVec, and the ExpvarCollector.
type Collector interface {
// Describe sends the super-set of all possible descriptors of metrics
// collected by this Collector to the provided channel and returns once
@ -37,39 +37,39 @@ type Collector interface {
// executing this method, it must send an invalid descriptor (created
// with NewInvalidDesc) to signal the error to the registry.
Describe(chan<- *Desc)
// Collect is called by Prometheus when collecting metrics. The
// implementation sends each collected metric via the provided channel
// and returns once the last metric has been sent. The descriptor of
// each sent metric is one of those returned by Describe. Returned
// metrics that share the same descriptor must differ in their variable
// label values. This method may be called concurrently and must
// therefore be implemented in a concurrency safe way. Blocking occurs
// at the expense of total performance of rendering all registered
// metrics. Ideally, Collector implementations support concurrent
// readers.
// Collect is called by the Prometheus registry when collecting
// metrics. The implementation sends each collected metric via the
// provided channel and returns once the last metric has been sent. The
// descriptor of each sent metric is one of those returned by
// Describe. Returned metrics that share the same descriptor must differ
// in their variable label values. This method may be called
// concurrently and must therefore be implemented in a concurrency safe
// way. Blocking occurs at the expense of total performance of rendering
// all registered metrics. Ideally, Collector implementations support
// concurrent readers.
Collect(chan<- Metric)
}
// SelfCollector implements Collector for a single Metric so that that the
// Metric collects itself. Add it as an anonymous field to a struct that
// implements Metric, and call Init with the Metric itself as an argument.
type SelfCollector struct {
// selfCollector implements Collector for a single Metric so that the Metric
// collects itself. Add it as an anonymous field to a struct that implements
// Metric, and call init with the Metric itself as an argument.
type selfCollector struct {
self Metric
}
// Init provides the SelfCollector with a reference to the metric it is supposed
// init provides the selfCollector with a reference to the metric it is supposed
// to collect. It is usually called within the factory function to create a
// metric. See example.
func (c *SelfCollector) Init(self Metric) {
func (c *selfCollector) init(self Metric) {
c.self = self
}
// Describe implements Collector.
func (c *SelfCollector) Describe(ch chan<- *Desc) {
func (c *selfCollector) Describe(ch chan<- *Desc) {
ch <- c.self.Desc()
}
// Collect implements Collector.
func (c *SelfCollector) Collect(ch chan<- Metric) {
func (c *selfCollector) Collect(ch chan<- Metric) {
ch <- c.self
}

View file

@ -35,6 +35,9 @@ type Counter interface {
// Prometheus metric. Do not use it for regular handling of a
// Prometheus counter (as it can be used to break the contract of
// monotonically increasing values).
//
// Deprecated: Use NewConstMetric to create a counter for an external
// value. A Counter should never be set.
Set(float64)
// Inc increments the counter by 1.
Inc()
@ -55,7 +58,7 @@ func NewCounter(opts CounterOpts) Counter {
opts.ConstLabels,
)
result := &counter{value: value{desc: desc, valType: CounterValue, labelPairs: desc.constLabelPairs}}
result.Init(result) // Init self-collection.
result.init(result) // Init self-collection.
return result
}
@ -79,7 +82,7 @@ func (c *counter) Add(v float64) {
// CounterVec embeds MetricVec. See there for a full list of methods with
// detailed documentation.
type CounterVec struct {
MetricVec
*MetricVec
}
// NewCounterVec creates a new CounterVec based on the provided CounterOpts and
@ -93,19 +96,15 @@ func NewCounterVec(opts CounterOpts, labelNames []string) *CounterVec {
opts.ConstLabels,
)
return &CounterVec{
MetricVec: MetricVec{
children: map[uint64]Metric{},
desc: desc,
newMetric: func(lvs ...string) Metric {
result := &counter{value: value{
desc: desc,
valType: CounterValue,
labelPairs: makeLabelPairs(desc, lvs),
}}
result.Init(result) // Init self-collection.
return result
},
},
MetricVec: newMetricVec(desc, func(lvs ...string) Metric {
result := &counter{value: value{
desc: desc,
valType: CounterValue,
labelPairs: makeLabelPairs(desc, lvs),
}}
result.init(result) // Init self-collection.
return result
}),
}
}

View file

@ -1,3 +1,16 @@
// Copyright 2016 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package prometheus
import (

View file

@ -11,18 +11,17 @@
// See the License for the specific language governing permissions and
// limitations under the License.
// Package prometheus provides embeddable metric primitives for servers and
// standardized exposition of telemetry through a web services interface.
// Package prometheus provides metrics primitives to instrument code for
// monitoring. It also offers a registry for metrics. Sub-packages allow to
// expose the registered metrics via HTTP (package promhttp) or push them to a
// Pushgateway (package push).
//
// All exported functions and methods are safe to be used concurrently unless
// specified otherwise.
//specified otherwise.
//
// To expose metrics registered with the Prometheus registry, an HTTP server
// needs to know about the Prometheus handler. The usual endpoint is "/metrics".
// A Basic Example
//
// http.Handle("/metrics", prometheus.Handler())
//
// As a starting point a very basic usage example:
// As a starting point, a very basic usage example:
//
// package main
//
@ -30,6 +29,7 @@
// "net/http"
//
// "github.com/prometheus/client_golang/prometheus"
// "github.com/prometheus/client_golang/prometheus/promhttp"
// )
//
// var (
@ -37,75 +37,145 @@
// Name: "cpu_temperature_celsius",
// Help: "Current temperature of the CPU.",
// })
// hdFailures = prometheus.NewCounter(prometheus.CounterOpts{
// Name: "hd_errors_total",
// Help: "Number of hard-disk errors.",
// })
// hdFailures = prometheus.NewCounterVec(
// prometheus.CounterOpts{
// Name: "hd_errors_total",
// Help: "Number of hard-disk errors.",
// },
// []string{"device"},
// )
// )
//
// func init() {
// // Metrics have to be registered to be exposed:
// prometheus.MustRegister(cpuTemp)
// prometheus.MustRegister(hdFailures)
// }
//
// func main() {
// cpuTemp.Set(65.3)
// hdFailures.Inc()
// hdFailures.With(prometheus.Labels{"device":"/dev/sda"}).Inc()
//
// http.Handle("/metrics", prometheus.Handler())
// // The Handler function provides a default handler to expose metrics
// // via an HTTP server. "/metrics" is the usual endpoint for that.
// http.Handle("/metrics", promhttp.Handler())
// http.ListenAndServe(":8080", nil)
// }
//
//
// This is a complete program that exports two metrics, a Gauge and a Counter.
// It also exports some stats about the HTTP usage of the /metrics
// endpoint. (See the Handler function for more detail.)
// This is a complete program that exports two metrics, a Gauge and a Counter,
// the latter with a label attached to turn it into a (one-dimensional) vector.
//
// Two more advanced metric types are the Summary and Histogram. A more
// thorough description of metric types can be found in the prometheus docs:
// Metrics
//
// The number of exported identifiers in this package might appear a bit
// overwhelming. Hovever, in addition to the basic plumbing shown in the example
// above, you only need to understand the different metric types and their
// vector versions for basic usage.
//
// Above, you have already touched the Counter and the Gauge. There are two more
// advanced metric types: the Summary and Histogram. A more thorough description
// of those four metric types can be found in the Prometheus docs:
// https://prometheus.io/docs/concepts/metric_types/
//
// In addition to the fundamental metric types Gauge, Counter, Summary, and
// Histogram, a very important part of the Prometheus data model is the
// partitioning of samples along dimensions called labels, which results in
// A fifth "type" of metric is Untyped. It behaves like a Gauge, but signals the
// Prometheus server not to assume anything about its type.
//
// In addition to the fundamental metric types Gauge, Counter, Summary,
// Histogram, and Untyped, a very important part of the Prometheus data model is
// the partitioning of samples along dimensions called labels, which results in
// metric vectors. The fundamental types are GaugeVec, CounterVec, SummaryVec,
// and HistogramVec.
// HistogramVec, and UntypedVec.
//
// Those are all the parts needed for basic usage. Detailed documentation and
// examples are provided below.
// While only the fundamental metric types implement the Metric interface, both
// the metrics and their vector versions implement the Collector interface. A
// Collector manages the collection of a number of Metrics, but for convenience,
// a Metric can also “collect itself”. Note that Gauge, Counter, Summary,
// Histogram, and Untyped are interfaces themselves while GaugeVec, CounterVec,
// SummaryVec, HistogramVec, and UntypedVec are not.
//
// Everything else this package offers is essentially for "power users" only. A
// few pointers to "power user features":
// To create instances of Metrics and their vector versions, you need a suitable
// …Opts struct, i.e. GaugeOpts, CounterOpts, SummaryOpts,
// HistogramOpts, or UntypedOpts.
//
// All the various ...Opts structs have a ConstLabels field for labels that
// never change their value (which is only useful under special circumstances,
// see documentation of the Opts type).
// Custom Collectors and constant Metrics
//
// The Untyped metric behaves like a Gauge, but signals the Prometheus server
// not to assume anything about its type.
// While you could create your own implementations of Metric, most likely you
// will only ever implement the Collector interface on your own. At a first
// glance, a custom Collector seems handy to bundle Metrics for common
// registration (with the prime example of the different metric vectors above,
// which bundle all the metrics of the same name but with different labels).
//
// Functions to fine-tune how the metric registry works: EnableCollectChecks,
// PanicOnCollectError, Register, Unregister, SetMetricFamilyInjectionHook.
// There is a more involved use case, too: If you already have metrics
// available, created outside of the Prometheus context, you don't need the
// interface of the various Metric types. You essentially want to mirror the
// existing numbers into Prometheus Metrics during collection. An own
// implementation of the Collector interface is perfect for that. You can create
// Metric instances “on the fly” using NewConstMetric, NewConstHistogram, and
// NewConstSummary (and their respective Must… versions). That will happen in
// the Collect method. The Describe method has to return separate Desc
// instances, representative of the “throw-away” metrics to be created
// later. NewDesc comes in handy to create those Desc instances.
//
// For custom metric collection, there are two entry points: Custom Metric
// implementations and custom Collector implementations. A Metric is the
// fundamental unit in the Prometheus data model: a sample at a point in time
// together with its meta-data (like its fully-qualified name and any number of
// pairs of label name and label value) that knows how to marshal itself into a
// data transfer object (aka DTO, implemented as a protocol buffer). A Collector
// gets registered with the Prometheus registry and manages the collection of
// one or more Metrics. Many parts of this package are building blocks for
// Metrics and Collectors. Desc is the metric descriptor, actually used by all
// metrics under the hood, and by Collectors to describe the Metrics to be
// collected, but only to be dealt with by users if they implement their own
// Metrics or Collectors. To create a Desc, the BuildFQName function will come
// in handy. Other useful components for Metric and Collector implementation
// include: LabelPairSorter to sort the DTO version of label pairs,
// NewConstMetric and MustNewConstMetric to create "throw away" Metrics at
// collection time, MetricVec to bundle custom Metrics into a metric vector
// Collector, SelfCollector to make a custom Metric collect itself.
// The Collector example illustrates the use case. You can also look at the
// source code of the processCollector (mirroring process metrics), the
// goCollector (mirroring Go metrics), or the expvarCollector (mirroring expvar
// metrics) as examples that are used in this package itself.
//
// A good example for a custom Collector is the ExpVarCollector included in this
// package, which exports variables exported via the "expvar" package as
// Prometheus metrics.
// If you just need to call a function to get a single float value to collect as
// a metric, GaugeFunc, CounterFunc, or UntypedFunc might be interesting
// shortcuts.
//
// Advanced Uses of the Registry
//
// While MustRegister is the by far most common way of registering a Collector,
// sometimes you might want to handle the errors the registration might
// cause. As suggested by the name, MustRegister panics if an error occurs. With
// the Register function, the error is returned and can be handled.
//
// An error is returned if the registered Collector is incompatible or
// inconsistent with already registered metrics. The registry aims for
// consistency of the collected metrics according to the Prometheus data
// model. Inconsistencies are ideally detected at registration time, not at
// collect time. The former will usually be detected at start-up time of a
// program, while the latter will only happen at scrape time, possibly not even
// on the first scrape if the inconsistency only becomes relevant later. That is
// the main reason why a Collector and a Metric have to describe themselves to
// the registry.
//
// So far, everything we did operated on the so-called default registry, as it
// can be found in the global DefaultRegistry variable. With NewRegistry, you
// can create a custom registry, or you can even implement the Registerer or
// Gatherer interfaces yourself. The methods Register and Unregister work in
// the same way on a custom registry as the global functions Register and
// Unregister on the default registry.
//
// There are a number of uses for custom registries: You can use registries
// with special properties, see NewPedanticRegistry. You can avoid global state,
// as it is imposed by the DefaultRegistry. You can use multiple registries at
// the same time to expose different metrics in different ways. You can use
// separate registries for testing purposes.
//
// Also note that the DefaultRegistry comes registered with a Collector for Go
// runtime metrics (via NewGoCollector) and a Collector for process metrics (via
// NewProcessCollector). With a custom registry, you are in control and decide
// yourself about the Collectors to register.
//
// HTTP Exposition
//
// The Registry implements the Gatherer interface. The caller of the Gather
// method can then expose the gathered metrics in some way. Usually, the metrics
// are served via HTTP on the /metrics endpoint. That's happening in the example
// above. The tools to expose metrics via HTTP are in the promhttp
// sub-package. (The top-level functions in the prometheus package are
// deprecated.)
//
// Pushing to the Pushgateway
//
// Function for pushing to the Pushgateway can be found in the push sub-package.
//
// Other Means of Exposition
//
// More ways of exposing metrics can easily be added. Sending metrics to
// Graphite would be an example that will soon be implemented.
package prometheus

View file

@ -18,21 +18,21 @@ import (
"expvar"
)
// ExpvarCollector collects metrics from the expvar interface. It provides a
// quick way to expose numeric values that are already exported via expvar as
// Prometheus metrics. Note that the data models of expvar and Prometheus are
// fundamentally different, and that the ExpvarCollector is inherently
// slow. Thus, the ExpvarCollector is probably great for experiments and
// prototying, but you should seriously consider a more direct implementation of
// Prometheus metrics for monitoring production systems.
//
// Use NewExpvarCollector to create new instances.
type ExpvarCollector struct {
type expvarCollector struct {
exports map[string]*Desc
}
// NewExpvarCollector returns a newly allocated ExpvarCollector that still has
// to be registered with the Prometheus registry.
// NewExpvarCollector returns a newly allocated expvar Collector that still has
// to be registered with a Prometheus registry.
//
// An expvar Collector collects metrics from the expvar interface. It provides a
// quick way to expose numeric values that are already exported via expvar as
// Prometheus metrics. Note that the data models of expvar and Prometheus are
// fundamentally different, and that the expvar Collector is inherently slower
// than native Prometheus metrics. Thus, the expvar Collector is probably great
// for experiments and prototying, but you should seriously consider a more
// direct implementation of Prometheus metrics for monitoring production
// systems.
//
// The exports map has the following meaning:
//
@ -59,21 +59,21 @@ type ExpvarCollector struct {
// sample values.
//
// Anything that does not fit into the scheme above is silently ignored.
func NewExpvarCollector(exports map[string]*Desc) *ExpvarCollector {
return &ExpvarCollector{
func NewExpvarCollector(exports map[string]*Desc) Collector {
return &expvarCollector{
exports: exports,
}
}
// Describe implements Collector.
func (e *ExpvarCollector) Describe(ch chan<- *Desc) {
func (e *expvarCollector) Describe(ch chan<- *Desc) {
for _, desc := range e.exports {
ch <- desc
}
}
// Collect implements Collector.
func (e *ExpvarCollector) Collect(ch chan<- Metric) {
func (e *expvarCollector) Collect(ch chan<- Metric) {
for name, desc := range e.exports {
var m Metric
expVar := expvar.Get(name)

View file

@ -58,7 +58,7 @@ func NewGauge(opts GaugeOpts) Gauge {
// (e.g. number of operations queued, partitioned by user and operation
// type). Create instances with NewGaugeVec.
type GaugeVec struct {
MetricVec
*MetricVec
}
// NewGaugeVec creates a new GaugeVec based on the provided GaugeOpts and
@ -72,13 +72,9 @@ func NewGaugeVec(opts GaugeOpts, labelNames []string) *GaugeVec {
opts.ConstLabels,
)
return &GaugeVec{
MetricVec: MetricVec{
children: map[uint64]Metric{},
desc: desc,
newMetric: func(lvs ...string) Metric {
return newValue(desc, GaugeValue, 0, lvs...)
},
},
MetricVec: newMetricVec(desc, func(lvs ...string) Metric {
return newValue(desc, GaugeValue, 0, lvs...)
}),
}
}

View file

@ -17,7 +17,7 @@ type goCollector struct {
// NewGoCollector returns a collector which exports metrics about the current
// go process.
func NewGoCollector() *goCollector {
func NewGoCollector() Collector {
return &goCollector{
goroutines: NewGauge(GaugeOpts{
Namespace: "go",

View file

@ -51,11 +51,11 @@ type Histogram interface {
// bucket of a histogram ("le" -> "less or equal").
const bucketLabel = "le"
// DefBuckets are the default Histogram buckets. The default buckets are
// tailored to broadly measure the response time (in seconds) of a network
// service. Most likely, however, you will be required to define buckets
// customized to your use case.
var (
// DefBuckets are the default Histogram buckets. The default buckets are
// tailored to broadly measure the response time (in seconds) of a
// network service. Most likely, however, you will be required to define
// buckets customized to your use case.
DefBuckets = []float64{.005, .01, .025, .05, .1, .25, .5, 1, 2.5, 5, 10}
errBucketLabelNotAllowed = fmt.Errorf(
@ -210,7 +210,7 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr
// Finally we know the final length of h.upperBounds and can make counts.
h.counts = make([]uint64, len(h.upperBounds))
h.Init(h) // Init self-collection.
h.init(h) // Init self-collection.
return h
}
@ -222,7 +222,7 @@ type histogram struct {
sumBits uint64
count uint64
SelfCollector
selfCollector
// Note that there is no mutex required.
desc *Desc
@ -287,7 +287,7 @@ func (h *histogram) Write(out *dto.Metric) error {
// (e.g. HTTP request latencies, partitioned by status code and method). Create
// instances with NewHistogramVec.
type HistogramVec struct {
MetricVec
*MetricVec
}
// NewHistogramVec creates a new HistogramVec based on the provided HistogramOpts and
@ -301,13 +301,9 @@ func NewHistogramVec(opts HistogramOpts, labelNames []string) *HistogramVec {
opts.ConstLabels,
)
return &HistogramVec{
MetricVec: MetricVec{
children: map[uint64]Metric{},
desc: desc,
newMetric: func(lvs ...string) Metric {
return newHistogram(desc, opts, lvs...)
},
},
MetricVec: newMetricVec(desc, func(lvs ...string) Metric {
return newHistogram(desc, opts, lvs...)
}),
}
}

View file

@ -15,14 +15,114 @@ package prometheus
import (
"bufio"
"bytes"
"compress/gzip"
"fmt"
"io"
"net"
"net/http"
"strconv"
"strings"
"sync"
"time"
"github.com/prometheus/common/expfmt"
)
// TODO(beorn7): Remove this whole file. It is a partial mirror of
// promhttp/http.go (to avoid circular import chains) where everything HTTP
// related should live. The functions here are just for avoiding
// breakage. Everything is deprecated.
const (
contentTypeHeader = "Content-Type"
contentLengthHeader = "Content-Length"
contentEncodingHeader = "Content-Encoding"
acceptEncodingHeader = "Accept-Encoding"
)
var bufPool sync.Pool
func getBuf() *bytes.Buffer {
buf := bufPool.Get()
if buf == nil {
return &bytes.Buffer{}
}
return buf.(*bytes.Buffer)
}
func giveBuf(buf *bytes.Buffer) {
buf.Reset()
bufPool.Put(buf)
}
// Handler returns an HTTP handler for the DefaultGatherer. It is
// already instrumented with InstrumentHandler (using "prometheus" as handler
// name).
//
// Deprecated: Please note the issues described in the doc comment of
// InstrumentHandler. You might want to consider using promhttp.Handler instead
// (which is non instrumented).
func Handler() http.Handler {
return InstrumentHandler("prometheus", UninstrumentedHandler())
}
// UninstrumentedHandler returns an HTTP handler for the DefaultGatherer.
//
// Deprecated: Use promhttp.Handler instead. See there for further documentation.
func UninstrumentedHandler() http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
mfs, err := DefaultGatherer.Gather()
if err != nil {
http.Error(w, "An error has occurred during metrics collection:\n\n"+err.Error(), http.StatusInternalServerError)
return
}
contentType := expfmt.Negotiate(req.Header)
buf := getBuf()
defer giveBuf(buf)
writer, encoding := decorateWriter(req, buf)
enc := expfmt.NewEncoder(writer, contentType)
var lastErr error
for _, mf := range mfs {
if err := enc.Encode(mf); err != nil {
lastErr = err
http.Error(w, "An error has occurred during metrics encoding:\n\n"+err.Error(), http.StatusInternalServerError)
return
}
}
if closer, ok := writer.(io.Closer); ok {
closer.Close()
}
if lastErr != nil && buf.Len() == 0 {
http.Error(w, "No metrics encoded, last error:\n\n"+err.Error(), http.StatusInternalServerError)
return
}
header := w.Header()
header.Set(contentTypeHeader, string(contentType))
header.Set(contentLengthHeader, fmt.Sprint(buf.Len()))
if encoding != "" {
header.Set(contentEncodingHeader, encoding)
}
w.Write(buf.Bytes())
})
}
// decorateWriter wraps a writer to handle gzip compression if requested. It
// returns the decorated writer and the appropriate "Content-Encoding" header
// (which is empty if no compression is enabled).
func decorateWriter(request *http.Request, writer io.Writer) (io.Writer, string) {
header := request.Header.Get(acceptEncodingHeader)
parts := strings.Split(header, ",")
for _, part := range parts {
part := strings.TrimSpace(part)
if part == "gzip" || strings.HasPrefix(part, "gzip;") {
return gzip.NewWriter(writer), "gzip"
}
}
return writer, ""
}
var instLabels = []string{"method", "code"}
type nower interface {
@ -58,7 +158,7 @@ func nowSeries(t ...time.Time) nower {
// value. http_requests_total is a metric vector partitioned by HTTP method
// (label name "method") and HTTP status code (label name "code").
//
// Note that InstrumentHandler has several issues:
// Deprecated: InstrumentHandler has several issues:
//
// - It uses Summaries rather than Histograms. Summaries are not useful if
// aggregation across multiple instances is required.
@ -73,8 +173,8 @@ func nowSeries(t ...time.Time) nower {
// performing such writes.
//
// Upcoming versions of this package will provide ways of instrumenting HTTP
// handlers that are more flexible and have fewer issues. Consider this function
// DEPRECATED and prefer direct instrumentation in the meantime.
// handlers that are more flexible and have fewer issues. Please prefer direct
// instrumentation in the meantime.
func InstrumentHandler(handlerName string, handler http.Handler) http.HandlerFunc {
return InstrumentHandlerFunc(handlerName, handler.ServeHTTP)
}
@ -82,6 +182,9 @@ func InstrumentHandler(handlerName string, handler http.Handler) http.HandlerFun
// InstrumentHandlerFunc wraps the given function for instrumentation. It
// otherwise works in the same way as InstrumentHandler (and shares the same
// issues).
//
// Deprecated: InstrumentHandlerFunc is deprecated for the same reasons as
// InstrumentHandler is.
func InstrumentHandlerFunc(handlerName string, handlerFunc func(http.ResponseWriter, *http.Request)) http.HandlerFunc {
return InstrumentHandlerFuncWithOpts(
SummaryOpts{
@ -117,6 +220,9 @@ func InstrumentHandlerFunc(handlerName string, handlerFunc func(http.ResponseWri
// cannot use SummaryOpts. Instead, a CounterOpts struct is created internally,
// and all its fields are set to the equally named fields in the provided
// SummaryOpts.
//
// Deprecated: InstrumentHandlerWithOpts is deprecated for the same reasons as
// InstrumentHandler is.
func InstrumentHandlerWithOpts(opts SummaryOpts, handler http.Handler) http.HandlerFunc {
return InstrumentHandlerFuncWithOpts(opts, handler.ServeHTTP)
}
@ -125,6 +231,9 @@ func InstrumentHandlerWithOpts(opts SummaryOpts, handler http.Handler) http.Hand
// the same issues) but provides more flexibility (at the cost of a more complex
// call syntax). See InstrumentHandlerWithOpts for details how the provided
// SummaryOpts are used.
//
// Deprecated: InstrumentHandlerFuncWithOpts is deprecated for the same reasons
// as InstrumentHandler is.
func InstrumentHandlerFuncWithOpts(opts SummaryOpts, handlerFunc func(http.ResponseWriter, *http.Request)) http.HandlerFunc {
reqCnt := NewCounterVec(
CounterOpts{

View file

@ -22,10 +22,8 @@ import (
const separatorByte byte = 255
// A Metric models a single sample value with its meta data being exported to
// Prometheus. Implementers of Metric in this package inclued Gauge, Counter,
// Untyped, and Summary. Users can implement their own Metric types, but that
// should be rarely needed. See the example for SelfCollector, which is also an
// example for a user-implemented Metric.
// Prometheus. Implementations of Metric in this package are Gauge, Counter,
// Histogram, Summary, and Untyped.
type Metric interface {
// Desc returns the descriptor for the Metric. This method idempotently
// returns the same descriptor throughout the lifetime of the
@ -36,21 +34,23 @@ type Metric interface {
// Write encodes the Metric into a "Metric" Protocol Buffer data
// transmission object.
//
// Implementers of custom Metric types must observe concurrency safety
// as reads of this metric may occur at any time, and any blocking
// occurs at the expense of total performance of rendering all
// registered metrics. Ideally Metric implementations should support
// concurrent readers.
// Metric implementations must observe concurrency safety as reads of
// this metric may occur at any time, and any blocking occurs at the
// expense of total performance of rendering all registered
// metrics. Ideally, Metric implementations should support concurrent
// readers.
//
// The Prometheus client library attempts to minimize memory allocations
// and will provide a pre-existing reset dto.Metric pointer. Prometheus
// may recycle the dto.Metric proto message, so Metric implementations
// should just populate the provided dto.Metric and then should not keep
// any reference to it.
//
// While populating dto.Metric, labels must be sorted lexicographically.
// (Implementers may find LabelPairSorter useful for that.)
// While populating dto.Metric, it is the responsibility of the
// implementation to ensure validity of the Metric protobuf (like valid
// UTF-8 strings or syntactically valid metric and label names). It is
// recommended to sort labels lexicographically. (Implementers may find
// LabelPairSorter useful for that.) Callers of Write should still make
// sure of sorting if they depend on it.
Write(*dto.Metric) error
// TODO(beorn7): The original rationale of passing in a pre-allocated
// dto.Metric protobuf to save allocations has disappeared. The
// signature of this method should be changed to "Write() (*dto.Metric,
// error)".
}
// Opts bundles the options for creating most Metric types. Each metric

View file

@ -28,7 +28,7 @@ type processCollector struct {
// NewProcessCollector returns a collector which exports the current state of
// process metrics including cpu, memory and file descriptor usage as well as
// the process start time for the given process id under the given namespace.
func NewProcessCollector(pid int, namespace string) *processCollector {
func NewProcessCollector(pid int, namespace string) Collector {
return NewProcessCollectorPIDFn(
func() (int, error) { return pid, nil },
namespace,
@ -43,7 +43,7 @@ func NewProcessCollector(pid int, namespace string) *processCollector {
func NewProcessCollectorPIDFn(
pidFn func() (int, error),
namespace string,
) *processCollector {
) Collector {
c := processCollector{
pidFn: pidFn,
collectFn: func(chan<- Metric) {},

View file

@ -1,65 +0,0 @@
// Copyright 2015 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Copyright (c) 2013, The Prometheus Authors
// All rights reserved.
//
// Use of this source code is governed by a BSD-style license that can be found
// in the LICENSE file.
package prometheus
// Push triggers a metric collection by the default registry and pushes all
// collected metrics to the Pushgateway specified by url. See the Pushgateway
// documentation for detailed implications of the job and instance
// parameter. instance can be left empty. You can use just host:port or ip:port
// as url, in which case 'http://' is added automatically. You can also include
// the schema in the URL. However, do not include the '/metrics/jobs/...' part.
//
// Note that all previously pushed metrics with the same job and instance will
// be replaced with the metrics pushed by this call. (It uses HTTP method 'PUT'
// to push to the Pushgateway.)
func Push(job, instance, url string) error {
return defRegistry.Push(job, instance, url, "PUT")
}
// PushAdd works like Push, but only previously pushed metrics with the same
// name (and the same job and instance) will be replaced. (It uses HTTP method
// 'POST' to push to the Pushgateway.)
func PushAdd(job, instance, url string) error {
return defRegistry.Push(job, instance, url, "POST")
}
// PushCollectors works like Push, but it does not collect from the default
// registry. Instead, it collects from the provided collectors. It is a
// convenient way to push only a few metrics.
func PushCollectors(job, instance, url string, collectors ...Collector) error {
return pushCollectors(job, instance, url, "PUT", collectors...)
}
// PushAddCollectors works like PushAdd, but it does not collect from the
// default registry. Instead, it collects from the provided collectors. It is a
// convenient way to push only a few metrics.
func PushAddCollectors(job, instance, url string, collectors ...Collector) error {
return pushCollectors(job, instance, url, "POST", collectors...)
}
func pushCollectors(job, instance, url, method string, collectors ...Collector) error {
r := newRegistry()
for _, collector := range collectors {
if _, err := r.Register(collector); err != nil {
return err
}
}
return r.Push(job, instance, url, method)
}

File diff suppressed because it is too large Load diff

View file

@ -53,8 +53,8 @@ type Summary interface {
Observe(float64)
}
// DefObjectives are the default Summary quantile values.
var (
// DefObjectives are the default Summary quantile values.
DefObjectives = map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}
errQuantileLabelNotAllowed = fmt.Errorf(
@ -139,11 +139,11 @@ type SummaryOpts struct {
BufCap uint32
}
// TODO: Great fuck-up with the sliding-window decay algorithm... The Merge
// method of perk/quantile is actually not working as advertised - and it might
// be unfixable, as the underlying algorithm is apparently not capable of
// merging summaries in the first place. To avoid using Merge, we are currently
// adding observations to _each_ age bucket, i.e. the effort to add a sample is
// Great fuck-up with the sliding-window decay algorithm... The Merge method of
// perk/quantile is actually not working as advertised - and it might be
// unfixable, as the underlying algorithm is apparently not capable of merging
// summaries in the first place. To avoid using Merge, we are currently adding
// observations to _each_ age bucket, i.e. the effort to add a sample is
// essentially multiplied by the number of age buckets. When rotating age
// buckets, we empty the previous head stream. On scrape time, we simply take
// the quantiles from the head stream (no merging required). Result: More effort
@ -227,12 +227,12 @@ func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary {
}
sort.Float64s(s.sortedObjectives)
s.Init(s) // Init self-collection.
s.init(s) // Init self-collection.
return s
}
type summary struct {
SelfCollector
selfCollector
bufMtx sync.Mutex // Protects hotBuf and hotBufExpTime.
mtx sync.Mutex // Protects every other moving part.
@ -390,7 +390,7 @@ func (s quantSort) Less(i, j int) bool {
// (e.g. HTTP request latencies, partitioned by status code and method). Create
// instances with NewSummaryVec.
type SummaryVec struct {
MetricVec
*MetricVec
}
// NewSummaryVec creates a new SummaryVec based on the provided SummaryOpts and
@ -404,13 +404,9 @@ func NewSummaryVec(opts SummaryOpts, labelNames []string) *SummaryVec {
opts.ConstLabels,
)
return &SummaryVec{
MetricVec: MetricVec{
children: map[uint64]Metric{},
desc: desc,
newMetric: func(lvs ...string) Metric {
return newSummary(desc, opts, lvs...)
},
},
MetricVec: newMetricVec(desc, func(lvs ...string) Metric {
return newSummary(desc, opts, lvs...)
}),
}
}

View file

@ -56,7 +56,7 @@ func NewUntyped(opts UntypedOpts) Untyped {
// labels. This is used if you want to count the same thing partitioned by
// various dimensions. Create instances with NewUntypedVec.
type UntypedVec struct {
MetricVec
*MetricVec
}
// NewUntypedVec creates a new UntypedVec based on the provided UntypedOpts and
@ -70,13 +70,9 @@ func NewUntypedVec(opts UntypedOpts, labelNames []string) *UntypedVec {
opts.ConstLabels,
)
return &UntypedVec{
MetricVec: MetricVec{
children: map[uint64]Metric{},
desc: desc,
newMetric: func(lvs ...string) Metric {
return newValue(desc, UntypedValue, 0, lvs...)
},
},
MetricVec: newMetricVec(desc, func(lvs ...string) Metric {
return newValue(desc, UntypedValue, 0, lvs...)
}),
}
}

View file

@ -48,7 +48,7 @@ type value struct {
// operations. http://golang.org/pkg/sync/atomic/#pkg-note-BUG
valBits uint64
SelfCollector
selfCollector
desc *Desc
valType ValueType
@ -68,7 +68,7 @@ func newValue(desc *Desc, valueType ValueType, val float64, labelValues ...strin
valBits: math.Float64bits(val),
labelPairs: makeLabelPairs(desc, labelValues),
}
result.Init(result)
result.init(result)
return result
}
@ -113,7 +113,7 @@ func (v *value) Write(out *dto.Metric) error {
// library to back the implementations of CounterFunc, GaugeFunc, and
// UntypedFunc.
type valueFunc struct {
SelfCollector
selfCollector
desc *Desc
valType ValueType
@ -134,7 +134,7 @@ func newValueFunc(desc *Desc, valueType ValueType, function func() float64) *val
function: function,
labelPairs: makeLabelPairs(desc, nil),
}
result.Init(result)
result.init(result)
return result
}

View file

@ -16,6 +16,8 @@ package prometheus
import (
"fmt"
"sync"
"github.com/prometheus/common/model"
)
// MetricVec is a Collector to bundle metrics of the same name that
@ -25,10 +27,31 @@ import (
// provided in this package.
type MetricVec struct {
mtx sync.RWMutex // Protects the children.
children map[uint64]Metric
children map[uint64][]metricWithLabelValues
desc *Desc
newMetric func(labelValues ...string) Metric
newMetric func(labelValues ...string) Metric
hashAdd func(h uint64, s string) uint64 // replace hash function for testing collision handling
hashAddByte func(h uint64, b byte) uint64
}
// newMetricVec returns an initialized MetricVec. The concrete value is
// returned for embedding into another struct.
func newMetricVec(desc *Desc, newMetric func(lvs ...string) Metric) *MetricVec {
return &MetricVec{
children: map[uint64][]metricWithLabelValues{},
desc: desc,
newMetric: newMetric,
hashAdd: hashAdd,
hashAddByte: hashAddByte,
}
}
// metricWithLabelValues provides the metric and its label values for
// disambiguation on hash collision.
type metricWithLabelValues struct {
values []string
metric Metric
}
// Describe implements Collector. The length of the returned slice
@ -42,8 +65,10 @@ func (m *MetricVec) Collect(ch chan<- Metric) {
m.mtx.RLock()
defer m.mtx.RUnlock()
for _, metric := range m.children {
ch <- metric
for _, metrics := range m.children {
for _, metric := range metrics {
ch <- metric.metric
}
}
}
@ -77,16 +102,7 @@ func (m *MetricVec) GetMetricWithLabelValues(lvs ...string) (Metric, error) {
return nil, err
}
m.mtx.RLock()
metric, ok := m.children[h]
m.mtx.RUnlock()
if ok {
return metric, nil
}
m.mtx.Lock()
defer m.mtx.Unlock()
return m.getOrCreateMetric(h, lvs...), nil
return m.getOrCreateMetricWithLabelValues(h, lvs), nil
}
// GetMetricWith returns the Metric for the given Labels map (the label names
@ -107,20 +123,7 @@ func (m *MetricVec) GetMetricWith(labels Labels) (Metric, error) {
return nil, err
}
m.mtx.RLock()
metric, ok := m.children[h]
m.mtx.RUnlock()
if ok {
return metric, nil
}
lvs := make([]string, len(labels))
for i, label := range m.desc.variableLabels {
lvs[i] = labels[label]
}
m.mtx.Lock()
defer m.mtx.Unlock()
return m.getOrCreateMetric(h, lvs...), nil
return m.getOrCreateMetricWithLabels(h, labels), nil
}
// WithLabelValues works as GetMetricWithLabelValues, but panics if an error
@ -168,11 +171,7 @@ func (m *MetricVec) DeleteLabelValues(lvs ...string) bool {
if err != nil {
return false
}
if _, ok := m.children[h]; !ok {
return false
}
delete(m.children, h)
return true
return m.deleteByHashWithLabelValues(h, lvs)
}
// Delete deletes the metric where the variable labels are the same as those
@ -193,10 +192,50 @@ func (m *MetricVec) Delete(labels Labels) bool {
if err != nil {
return false
}
if _, ok := m.children[h]; !ok {
return m.deleteByHashWithLabels(h, labels)
}
// deleteByHashWithLabelValues removes the metric from the hash bucket h. If
// there are multiple matches in the bucket, use lvs to select a metric and
// remove only that metric.
func (m *MetricVec) deleteByHashWithLabelValues(h uint64, lvs []string) bool {
metrics, ok := m.children[h]
if !ok {
return false
}
delete(m.children, h)
i := m.findMetricWithLabelValues(metrics, lvs)
if i >= len(metrics) {
return false
}
if len(metrics) > 1 {
m.children[h] = append(metrics[:i], metrics[i+1:]...)
} else {
delete(m.children, h)
}
return true
}
// deleteByHashWithLabels removes the metric from the hash bucket h. If there
// are multiple matches in the bucket, use lvs to select a metric and remove
// only that metric.
func (m *MetricVec) deleteByHashWithLabels(h uint64, labels Labels) bool {
metrics, ok := m.children[h]
if !ok {
return false
}
i := m.findMetricWithLabels(metrics, labels)
if i >= len(metrics) {
return false
}
if len(metrics) > 1 {
m.children[h] = append(metrics[:i], metrics[i+1:]...)
} else {
delete(m.children, h)
}
return true
}
@ -216,7 +255,8 @@ func (m *MetricVec) hashLabelValues(vals []string) (uint64, error) {
}
h := hashNew()
for _, val := range vals {
h = hashAdd(h, val)
h = m.hashAdd(h, val)
h = m.hashAddByte(h, model.SeparatorByte)
}
return h, nil
}
@ -231,19 +271,134 @@ func (m *MetricVec) hashLabels(labels Labels) (uint64, error) {
if !ok {
return 0, fmt.Errorf("label name %q missing in label map", label)
}
h = hashAdd(h, val)
h = m.hashAdd(h, val)
h = m.hashAddByte(h, model.SeparatorByte)
}
return h, nil
}
func (m *MetricVec) getOrCreateMetric(hash uint64, labelValues ...string) Metric {
metric, ok := m.children[hash]
// getOrCreateMetricWithLabelValues retrieves the metric by hash and label value
// or creates it and returns the new one.
//
// This function holds the mutex.
func (m *MetricVec) getOrCreateMetricWithLabelValues(hash uint64, lvs []string) Metric {
m.mtx.RLock()
metric, ok := m.getMetricWithLabelValues(hash, lvs)
m.mtx.RUnlock()
if ok {
return metric
}
m.mtx.Lock()
defer m.mtx.Unlock()
metric, ok = m.getMetricWithLabelValues(hash, lvs)
if !ok {
// Copy labelValues. Otherwise, they would be allocated even if we don't go
// down this code path.
copiedLabelValues := append(make([]string, 0, len(labelValues)), labelValues...)
metric = m.newMetric(copiedLabelValues...)
m.children[hash] = metric
// Copy to avoid allocation in case wo don't go down this code path.
copiedLVs := make([]string, len(lvs))
copy(copiedLVs, lvs)
metric = m.newMetric(copiedLVs...)
m.children[hash] = append(m.children[hash], metricWithLabelValues{values: copiedLVs, metric: metric})
}
return metric
}
// getOrCreateMetricWithLabelValues retrieves the metric by hash and label value
// or creates it and returns the new one.
//
// This function holds the mutex.
func (m *MetricVec) getOrCreateMetricWithLabels(hash uint64, labels Labels) Metric {
m.mtx.RLock()
metric, ok := m.getMetricWithLabels(hash, labels)
m.mtx.RUnlock()
if ok {
return metric
}
m.mtx.Lock()
defer m.mtx.Unlock()
metric, ok = m.getMetricWithLabels(hash, labels)
if !ok {
lvs := m.extractLabelValues(labels)
metric = m.newMetric(lvs...)
m.children[hash] = append(m.children[hash], metricWithLabelValues{values: lvs, metric: metric})
}
return metric
}
// getMetricWithLabelValues gets a metric while handling possible collisions in
// the hash space. Must be called while holding read mutex.
func (m *MetricVec) getMetricWithLabelValues(h uint64, lvs []string) (Metric, bool) {
metrics, ok := m.children[h]
if ok {
if i := m.findMetricWithLabelValues(metrics, lvs); i < len(metrics) {
return metrics[i].metric, true
}
}
return nil, false
}
// getMetricWithLabels gets a metric while handling possible collisions in
// the hash space. Must be called while holding read mutex.
func (m *MetricVec) getMetricWithLabels(h uint64, labels Labels) (Metric, bool) {
metrics, ok := m.children[h]
if ok {
if i := m.findMetricWithLabels(metrics, labels); i < len(metrics) {
return metrics[i].metric, true
}
}
return nil, false
}
// findMetricWithLabelValues returns the index of the matching metric or
// len(metrics) if not found.
func (m *MetricVec) findMetricWithLabelValues(metrics []metricWithLabelValues, lvs []string) int {
for i, metric := range metrics {
if m.matchLabelValues(metric.values, lvs) {
return i
}
}
return len(metrics)
}
// findMetricWithLabels returns the index of the matching metric or len(metrics)
// if not found.
func (m *MetricVec) findMetricWithLabels(metrics []metricWithLabelValues, labels Labels) int {
for i, metric := range metrics {
if m.matchLabels(metric.values, labels) {
return i
}
}
return len(metrics)
}
func (m *MetricVec) matchLabelValues(values []string, lvs []string) bool {
if len(values) != len(lvs) {
return false
}
for i, v := range values {
if v != lvs[i] {
return false
}
}
return true
}
func (m *MetricVec) matchLabels(values []string, labels Labels) bool {
if len(labels) != len(values) {
return false
}
for i, k := range m.desc.variableLabels {
if values[i] != labels[k] {
return false
}
}
return true
}
func (m *MetricVec) extractLabelValues(labels Labels) []string {
labelValues := make([]string, len(labels))
for i, k := range m.desc.variableLabels {
labelValues[i] = labels[k]
}
return labelValues
}

View file

@ -29,9 +29,6 @@ const (
FmtProtoDelim Format = ProtoFmt + ` encoding=delimited`
FmtProtoText Format = ProtoFmt + ` encoding=text`
FmtProtoCompact Format = ProtoFmt + ` encoding=compact-text`
// fmtJSON2 is hidden as it is deprecated.
fmtJSON2 Format = `application/json; version=0.0.2`
)
const (

View file

@ -14,7 +14,6 @@
package expfmt
import (
"bytes"
"fmt"
"io"
"math"
@ -26,9 +25,12 @@ import (
// MetricFamilyToText converts a MetricFamily proto message into text format and
// writes the resulting lines to 'out'. It returns the number of bytes written
// and any error encountered. This function does not perform checks on the
// content of the metric and label names, i.e. invalid metric or label names
// and any error encountered. The output will have the same order as the input,
// no further sorting is performed. Furthermore, this function assumes the input
// is already sanitized and does not perform any sanity checks. If the input
// contains duplicate metrics or invalid metric or label names, the conversion
// will result in invalid text format output.
//
// This method fulfills the type 'prometheus.encoder'.
func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (int, error) {
var written int
@ -285,21 +287,17 @@ func labelPairsToText(
return written, nil
}
var (
escape = strings.NewReplacer("\\", `\\`, "\n", `\n`)
escapeWithDoubleQuote = strings.NewReplacer("\\", `\\`, "\n", `\n`, "\"", `\"`)
)
// escapeString replaces '\' by '\\', new line character by '\n', and - if
// includeDoubleQuote is true - '"' by '\"'.
func escapeString(v string, includeDoubleQuote bool) string {
result := bytes.NewBuffer(make([]byte, 0, len(v)))
for _, c := range v {
switch {
case c == '\\':
result.WriteString(`\\`)
case includeDoubleQuote && c == '"':
result.WriteString(`\"`)
case c == '\n':
result.WriteString(`\n`)
default:
result.WriteRune(c)
}
if includeDoubleQuote {
return escapeWithDoubleQuote.Replace(v)
}
return result.String()
return escape.Replace(v)
}

View file

@ -47,7 +47,7 @@ func (e ParseError) Error() string {
}
// TextParser is used to parse the simple and flat text-based exchange format. Its
// nil value is ready to use.
// zero value is ready to use.
type TextParser struct {
metricFamiliesByName map[string]*dto.MetricFamily
buf *bufio.Reader // Where the parsed input is read through.

View file

@ -0,0 +1,89 @@
// Copyright 2015 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build windows
package log
import (
"fmt"
"os"
"golang.org/x/sys/windows/svc/eventlog"
"github.com/Sirupsen/logrus"
)
func init() {
setEventlogFormatter = func(name string, debugAsInfo bool) error {
if name == "" {
return fmt.Errorf("missing name parameter")
}
fmter, err := newEventlogger(name, debugAsInfo, origLogger.Formatter)
if err != nil {
fmt.Fprintf(os.Stderr, "error creating eventlog formatter: %v\n", err)
origLogger.Errorf("can't connect logger to eventlog: %v", err)
return err
}
origLogger.Formatter = fmter
return nil
}
}
type eventlogger struct {
log *eventlog.Log
debugAsInfo bool
wrap logrus.Formatter
}
func newEventlogger(name string, debugAsInfo bool, fmter logrus.Formatter) (*eventlogger, error) {
logHandle, err := eventlog.Open(name)
if err != nil {
return nil, err
}
return &eventlogger{log: logHandle, debugAsInfo: debugAsInfo, wrap: fmter}, nil
}
func (s *eventlogger) Format(e *logrus.Entry) ([]byte, error) {
data, err := s.wrap.Format(e)
if err != nil {
fmt.Fprintf(os.Stderr, "eventlogger: can't format entry: %v\n", err)
return data, err
}
switch e.Level {
case logrus.PanicLevel:
fallthrough
case logrus.FatalLevel:
fallthrough
case logrus.ErrorLevel:
err = s.log.Error(102, e.Message)
case logrus.WarnLevel:
err = s.log.Warning(101, e.Message)
case logrus.InfoLevel:
err = s.log.Info(100, e.Message)
case logrus.DebugLevel:
if s.debugAsInfo {
err = s.log.Info(100, e.Message)
}
default:
err = s.log.Info(100, e.Message)
}
if err != nil {
fmt.Fprintf(os.Stderr, "eventlogger: can't send log to eventlog: %v\n", err)
}
return data, err
}

View file

@ -16,20 +16,23 @@ package log
import (
"flag"
"fmt"
"io"
"io/ioutil"
"log"
"net/url"
"os"
"runtime"
"strconv"
"strings"
"github.com/Sirupsen/logrus"
)
type levelFlag struct{}
type levelFlag string
// String implements flag.Value.
func (f levelFlag) String() string {
return origLogger.Level.String()
return fmt.Sprintf("%q", string(f))
}
// Set implements flag.Value.
@ -45,20 +48,23 @@ func (f levelFlag) Set(level string) error {
// setSyslogFormatter is nil if the target architecture does not support syslog.
var setSyslogFormatter func(string, string) error
// setEventlogFormatter is nil if the target OS does not support Eventlog (i.e., is not Windows).
var setEventlogFormatter func(string, bool) error
func setJSONFormatter() {
origLogger.Formatter = &logrus.JSONFormatter{}
}
type logFormatFlag struct{ uri string }
type logFormatFlag url.URL
// String implements flag.Value.
func (f logFormatFlag) String() string {
return f.uri
u := url.URL(f)
return fmt.Sprintf("%q", u.String())
}
// Set implements flag.Value.
func (f logFormatFlag) Set(format string) error {
f.uri = format
u, err := url.Parse(format)
if err != nil {
return err
@ -79,13 +85,23 @@ func (f logFormatFlag) Set(format string) error {
appname := u.Query().Get("appname")
facility := u.Query().Get("local")
return setSyslogFormatter(appname, facility)
case "eventlog":
if setEventlogFormatter == nil {
return fmt.Errorf("system does not support eventlog")
}
name := u.Query().Get("name")
debugAsInfo := false
debugAsInfoRaw := u.Query().Get("debugAsInfo")
if parsedDebugAsInfo, err := strconv.ParseBool(debugAsInfoRaw); err == nil {
debugAsInfo = parsedDebugAsInfo
}
return setEventlogFormatter(name, debugAsInfo)
case "stdout":
origLogger.Out = os.Stdout
case "stderr":
origLogger.Out = os.Stderr
default:
return fmt.Errorf("unsupported logger %s", u.Opaque)
return fmt.Errorf("unsupported logger %q", u.Opaque)
}
return nil
}
@ -99,10 +115,19 @@ func init() {
// adds the flags to flag.CommandLine anyway. Thus, it's usually enough to call
// flag.Parse() to make the logging flags take effect.
func AddFlags(fs *flag.FlagSet) {
fs.Var(levelFlag{}, "log.level", "Only log messages with the given severity or above. Valid levels: [debug, info, warn, error, fatal].")
fs.Var(logFormatFlag{}, "log.format", "If set use a syslog logger or JSON logging. Example: logger:syslog?appname=bob&local=7 or logger:stdout?json=true. Defaults to stderr.")
fs.Var(
levelFlag(origLogger.Level.String()),
"log.level",
"Only log messages with the given severity or above. Valid levels: [debug, info, warn, error, fatal]",
)
fs.Var(
logFormatFlag(url.URL{Scheme: "logger", Opaque: "stderr"}),
"log.format",
`Set the log target and format. Example: "logger:syslog?appname=bob&local=7" or "logger:stdout?json=true"`,
)
}
// Logger is the interface for loggers used in the Prometheus components.
type Logger interface {
Debug(...interface{})
Debugln(...interface{})
@ -227,10 +252,26 @@ func (l logger) sourced() *logrus.Entry {
var origLogger = logrus.New()
var baseLogger = logger{entry: logrus.NewEntry(origLogger)}
// Base returns the default Logger logging to
func Base() Logger {
return baseLogger
}
// NewLogger returns a new Logger logging to out.
func NewLogger(w io.Writer) Logger {
l := logrus.New()
l.Out = w
return logger{entry: logrus.NewEntry(l)}
}
// NewNopLogger returns a logger that discards all log messages.
func NewNopLogger() Logger {
l := logrus.New()
l.Out = ioutil.Discard
return logger{entry: logrus.NewEntry(l)}
}
// With adds a field to the logger.
func With(key string, value interface{}) Logger {
return baseLogger.With(key, value)
}
@ -240,7 +281,7 @@ func Debug(args ...interface{}) {
baseLogger.sourced().Debug(args...)
}
// Debug logs a message at level Debug on the standard logger.
// Debugln logs a message at level Debug on the standard logger.
func Debugln(args ...interface{}) {
baseLogger.sourced().Debugln(args...)
}
@ -255,7 +296,7 @@ func Info(args ...interface{}) {
baseLogger.sourced().Info(args...)
}
// Info logs a message at level Info on the standard logger.
// Infoln logs a message at level Info on the standard logger.
func Infoln(args ...interface{}) {
baseLogger.sourced().Infoln(args...)
}
@ -270,7 +311,7 @@ func Warn(args ...interface{}) {
baseLogger.sourced().Warn(args...)
}
// Warn logs a message at level Warn on the standard logger.
// Warnln logs a message at level Warn on the standard logger.
func Warnln(args ...interface{}) {
baseLogger.sourced().Warnln(args...)
}
@ -285,7 +326,7 @@ func Error(args ...interface{}) {
baseLogger.sourced().Error(args...)
}
// Error logs a message at level Error on the standard logger.
// Errorln logs a message at level Error on the standard logger.
func Errorln(args ...interface{}) {
baseLogger.sourced().Errorln(args...)
}
@ -300,7 +341,7 @@ func Fatal(args ...interface{}) {
baseLogger.sourced().Fatal(args...)
}
// Fatal logs a message at level Fatal on the standard logger.
// Fatalln logs a message at level Fatal on the standard logger.
func Fatalln(args ...interface{}) {
baseLogger.sourced().Fatalln(args...)
}

View file

@ -9,13 +9,15 @@ package leveldb
import (
"encoding/binary"
"fmt"
"io"
"github.com/syndtr/goleveldb/leveldb/errors"
"github.com/syndtr/goleveldb/leveldb/memdb"
"github.com/syndtr/goleveldb/leveldb/storage"
)
// ErrBatchCorrupted records reason of batch corruption.
// ErrBatchCorrupted records reason of batch corruption. This error will be
// wrapped with errors.ErrCorrupted.
type ErrBatchCorrupted struct {
Reason string
}
@ -29,8 +31,9 @@ func newErrBatchCorrupted(reason string) error {
}
const (
batchHdrLen = 8 + 4
batchGrowRec = 3000
batchHeaderLen = 8 + 4
batchGrowRec = 3000
batchBufioSize = 16
)
// BatchReplay wraps basic batch operations.
@ -39,34 +42,46 @@ type BatchReplay interface {
Delete(key []byte)
}
type batchIndex struct {
keyType keyType
keyPos, keyLen int
valuePos, valueLen int
}
func (index batchIndex) k(data []byte) []byte {
return data[index.keyPos : index.keyPos+index.keyLen]
}
func (index batchIndex) v(data []byte) []byte {
if index.valueLen != 0 {
return data[index.valuePos : index.valuePos+index.valueLen]
}
return nil
}
func (index batchIndex) kv(data []byte) (key, value []byte) {
return index.k(data), index.v(data)
}
// Batch is a write batch.
type Batch struct {
data []byte
rLen, bLen int
seq uint64
sync bool
data []byte
index []batchIndex
// internalLen is sums of key/value pair length plus 8-bytes internal key.
internalLen int
}
func (b *Batch) grow(n int) {
off := len(b.data)
if off == 0 {
off = batchHdrLen
if b.data != nil {
b.data = b.data[:off]
}
}
if cap(b.data)-off < n {
if b.data == nil {
b.data = make([]byte, off, off+n)
} else {
odata := b.data
div := 1
if b.rLen > batchGrowRec {
div = b.rLen / batchGrowRec
}
b.data = make([]byte, off, off+n+(off-batchHdrLen)/div)
copy(b.data, odata)
o := len(b.data)
if cap(b.data)-o < n {
div := 1
if len(b.index) > batchGrowRec {
div = len(b.index) / batchGrowRec
}
ndata := make([]byte, o, o+n+o/div)
copy(ndata, b.data)
b.data = ndata
}
}
@ -76,32 +91,36 @@ func (b *Batch) appendRec(kt keyType, key, value []byte) {
n += binary.MaxVarintLen32 + len(value)
}
b.grow(n)
off := len(b.data)
data := b.data[:off+n]
data[off] = byte(kt)
off++
off += binary.PutUvarint(data[off:], uint64(len(key)))
copy(data[off:], key)
off += len(key)
index := batchIndex{keyType: kt}
o := len(b.data)
data := b.data[:o+n]
data[o] = byte(kt)
o++
o += binary.PutUvarint(data[o:], uint64(len(key)))
index.keyPos = o
index.keyLen = len(key)
o += copy(data[o:], key)
if kt == keyTypeVal {
off += binary.PutUvarint(data[off:], uint64(len(value)))
copy(data[off:], value)
off += len(value)
o += binary.PutUvarint(data[o:], uint64(len(value)))
index.valuePos = o
index.valueLen = len(value)
o += copy(data[o:], value)
}
b.data = data[:off]
b.rLen++
// Include 8-byte ikey header
b.bLen += len(key) + len(value) + 8
b.data = data[:o]
b.index = append(b.index, index)
b.internalLen += index.keyLen + index.valueLen + 8
}
// Put appends 'put operation' of the given key/value pair to the batch.
// It is safe to modify the contents of the argument after Put returns.
// It is safe to modify the contents of the argument after Put returns but not
// before.
func (b *Batch) Put(key, value []byte) {
b.appendRec(keyTypeVal, key, value)
}
// Delete appends 'delete operation' of the given key to the batch.
// It is safe to modify the contents of the argument after Delete returns.
// It is safe to modify the contents of the argument after Delete returns but
// not before.
func (b *Batch) Delete(key []byte) {
b.appendRec(keyTypeDel, key, nil)
}
@ -111,7 +130,7 @@ func (b *Batch) Delete(key []byte) {
// The returned slice is not its own copy, so the contents should not be
// modified.
func (b *Batch) Dump() []byte {
return b.encode()
return b.data
}
// Load loads given slice into the batch. Previous contents of the batch
@ -119,144 +138,212 @@ func (b *Batch) Dump() []byte {
// The given slice will not be copied and will be used as batch buffer, so
// it is not safe to modify the contents of the slice.
func (b *Batch) Load(data []byte) error {
return b.decode(0, data)
return b.decode(data, -1)
}
// Replay replays batch contents.
func (b *Batch) Replay(r BatchReplay) error {
return b.decodeRec(func(i int, kt keyType, key, value []byte) error {
switch kt {
for _, index := range b.index {
switch index.keyType {
case keyTypeVal:
r.Put(key, value)
r.Put(index.k(b.data), index.v(b.data))
case keyTypeDel:
r.Delete(key)
r.Delete(index.k(b.data))
}
return nil
})
}
return nil
}
// Len returns number of records in the batch.
func (b *Batch) Len() int {
return b.rLen
return len(b.index)
}
// Reset resets the batch.
func (b *Batch) Reset() {
b.data = b.data[:0]
b.seq = 0
b.rLen = 0
b.bLen = 0
b.sync = false
b.index = b.index[:0]
b.internalLen = 0
}
func (b *Batch) init(sync bool) {
b.sync = sync
}
func (b *Batch) append(p *Batch) {
if p.rLen > 0 {
b.grow(len(p.data) - batchHdrLen)
b.data = append(b.data, p.data[batchHdrLen:]...)
b.rLen += p.rLen
b.bLen += p.bLen
}
if p.sync {
b.sync = true
}
}
// size returns sums of key/value pair length plus 8-bytes ikey.
func (b *Batch) size() int {
return b.bLen
}
func (b *Batch) encode() []byte {
b.grow(0)
binary.LittleEndian.PutUint64(b.data, b.seq)
binary.LittleEndian.PutUint32(b.data[8:], uint32(b.rLen))
return b.data
}
func (b *Batch) decode(prevSeq uint64, data []byte) error {
if len(data) < batchHdrLen {
return newErrBatchCorrupted("too short")
}
b.seq = binary.LittleEndian.Uint64(data)
if b.seq < prevSeq {
return newErrBatchCorrupted("invalid sequence number")
}
b.rLen = int(binary.LittleEndian.Uint32(data[8:]))
if b.rLen < 0 {
return newErrBatchCorrupted("invalid records length")
}
// No need to be precise at this point, it won't be used anyway
b.bLen = len(data) - batchHdrLen
b.data = data
return nil
}
func (b *Batch) decodeRec(f func(i int, kt keyType, key, value []byte) error) error {
off := batchHdrLen
for i := 0; i < b.rLen; i++ {
if off >= len(b.data) {
return newErrBatchCorrupted("invalid records length")
}
kt := keyType(b.data[off])
if kt > keyTypeVal {
panic(kt)
return newErrBatchCorrupted("bad record: invalid type")
}
off++
x, n := binary.Uvarint(b.data[off:])
off += n
if n <= 0 || off+int(x) > len(b.data) {
return newErrBatchCorrupted("bad record: invalid key length")
}
key := b.data[off : off+int(x)]
off += int(x)
var value []byte
if kt == keyTypeVal {
x, n := binary.Uvarint(b.data[off:])
off += n
if n <= 0 || off+int(x) > len(b.data) {
return newErrBatchCorrupted("bad record: invalid value length")
}
value = b.data[off : off+int(x)]
off += int(x)
}
if err := f(i, kt, key, value); err != nil {
func (b *Batch) replayInternal(fn func(i int, kt keyType, k, v []byte) error) error {
for i, index := range b.index {
if err := fn(i, index.keyType, index.k(b.data), index.v(b.data)); err != nil {
return err
}
}
return nil
}
func (b *Batch) memReplay(to *memdb.DB) error {
var ikScratch []byte
return b.decodeRec(func(i int, kt keyType, key, value []byte) error {
ikScratch = makeInternalKey(ikScratch, key, b.seq+uint64(i), kt)
return to.Put(ikScratch, value)
})
func (b *Batch) append(p *Batch) {
ob := len(b.data)
oi := len(b.index)
b.data = append(b.data, p.data...)
b.index = append(b.index, p.index...)
b.internalLen += p.internalLen
// Updating index offset.
if ob != 0 {
for ; oi < len(b.index); oi++ {
index := &b.index[oi]
index.keyPos += ob
if index.valueLen != 0 {
index.valuePos += ob
}
}
}
}
func (b *Batch) memDecodeAndReplay(prevSeq uint64, data []byte, to *memdb.DB) error {
if err := b.decode(prevSeq, data); err != nil {
func (b *Batch) decode(data []byte, expectedLen int) error {
b.data = data
b.index = b.index[:0]
b.internalLen = 0
err := decodeBatch(data, func(i int, index batchIndex) error {
b.index = append(b.index, index)
b.internalLen += index.keyLen + index.valueLen + 8
return nil
})
if err != nil {
return err
}
return b.memReplay(to)
if expectedLen >= 0 && len(b.index) != expectedLen {
return newErrBatchCorrupted(fmt.Sprintf("invalid records length: %d vs %d", expectedLen, len(b.index)))
}
return nil
}
func (b *Batch) revertMemReplay(to *memdb.DB) error {
var ikScratch []byte
return b.decodeRec(func(i int, kt keyType, key, value []byte) error {
ikScratch := makeInternalKey(ikScratch, key, b.seq+uint64(i), kt)
return to.Delete(ikScratch)
})
func (b *Batch) putMem(seq uint64, mdb *memdb.DB) error {
var ik []byte
for i, index := range b.index {
ik = makeInternalKey(ik, index.k(b.data), seq+uint64(i), index.keyType)
if err := mdb.Put(ik, index.v(b.data)); err != nil {
return err
}
}
return nil
}
func (b *Batch) revertMem(seq uint64, mdb *memdb.DB) error {
var ik []byte
for i, index := range b.index {
ik = makeInternalKey(ik, index.k(b.data), seq+uint64(i), index.keyType)
if err := mdb.Delete(ik); err != nil {
return err
}
}
return nil
}
func newBatch() interface{} {
return &Batch{}
}
func decodeBatch(data []byte, fn func(i int, index batchIndex) error) error {
var index batchIndex
for i, o := 0, 0; o < len(data); i++ {
// Key type.
index.keyType = keyType(data[o])
if index.keyType > keyTypeVal {
return newErrBatchCorrupted(fmt.Sprintf("bad record: invalid type %#x", uint(index.keyType)))
}
o++
// Key.
x, n := binary.Uvarint(data[o:])
o += n
if n <= 0 || o+int(x) > len(data) {
return newErrBatchCorrupted("bad record: invalid key length")
}
index.keyPos = o
index.keyLen = int(x)
o += index.keyLen
// Value.
if index.keyType == keyTypeVal {
x, n = binary.Uvarint(data[o:])
o += n
if n <= 0 || o+int(x) > len(data) {
return newErrBatchCorrupted("bad record: invalid value length")
}
index.valuePos = o
index.valueLen = int(x)
o += index.valueLen
} else {
index.valuePos = 0
index.valueLen = 0
}
if err := fn(i, index); err != nil {
return err
}
}
return nil
}
func decodeBatchToMem(data []byte, expectSeq uint64, mdb *memdb.DB) (seq uint64, batchLen int, err error) {
seq, batchLen, err = decodeBatchHeader(data)
if err != nil {
return 0, 0, err
}
if seq < expectSeq {
return 0, 0, newErrBatchCorrupted("invalid sequence number")
}
data = data[batchHeaderLen:]
var ik []byte
var decodedLen int
err = decodeBatch(data, func(i int, index batchIndex) error {
if i >= batchLen {
return newErrBatchCorrupted("invalid records length")
}
ik = makeInternalKey(ik, index.k(data), seq+uint64(i), index.keyType)
if err := mdb.Put(ik, index.v(data)); err != nil {
return err
}
decodedLen++
return nil
})
if err == nil && decodedLen != batchLen {
err = newErrBatchCorrupted(fmt.Sprintf("invalid records length: %d vs %d", batchLen, decodedLen))
}
return
}
func encodeBatchHeader(dst []byte, seq uint64, batchLen int) []byte {
dst = ensureBuffer(dst, batchHeaderLen)
binary.LittleEndian.PutUint64(dst, seq)
binary.LittleEndian.PutUint32(dst[8:], uint32(batchLen))
return dst
}
func decodeBatchHeader(data []byte) (seq uint64, batchLen int, err error) {
if len(data) < batchHeaderLen {
return 0, 0, newErrBatchCorrupted("too short")
}
seq = binary.LittleEndian.Uint64(data)
batchLen = int(binary.LittleEndian.Uint32(data[8:]))
if batchLen < 0 {
return 0, 0, newErrBatchCorrupted("invalid records length")
}
return
}
func batchesLen(batches []*Batch) int {
batchLen := 0
for _, batch := range batches {
batchLen += batch.Len()
}
return batchLen
}
func writeBatchesWithHeader(wr io.Writer, batches []*Batch, seq uint64) error {
if _, err := wr.Write(encodeBatchHeader(nil, seq, batchesLen(batches))); err != nil {
return err
}
for _, batch := range batches {
if _, err := wr.Write(batch.data); err != nil {
return err
}
}
return nil
}

View file

@ -16,7 +16,7 @@ import (
)
// Cacher provides interface to implements a caching functionality.
// An implementation must be goroutine-safe.
// An implementation must be safe for concurrent use.
type Cacher interface {
// Capacity returns cache capacity.
Capacity() int
@ -511,18 +511,12 @@ func (r *Cache) EvictAll() {
}
}
// Close closes the 'cache map' and releases all 'cache node'.
// Close closes the 'cache map' and forcefully releases all 'cache node'.
func (r *Cache) Close() error {
r.mu.Lock()
if !r.closed {
r.closed = true
if r.cacher != nil {
if err := r.cacher.Close(); err != nil {
return err
}
}
h := (*mNode)(r.mHead)
h.initBuckets()
@ -541,10 +535,37 @@ func (r *Cache) Close() error {
for _, f := range n.onDel {
f()
}
n.onDel = nil
}
}
}
r.mu.Unlock()
// Avoid deadlock.
if r.cacher != nil {
if err := r.cacher.Close(); err != nil {
return err
}
}
return nil
}
// CloseWeak closes the 'cache map' and evict all 'cache node' from cacher, but
// unlike Close it doesn't forcefully releases 'cache node'.
func (r *Cache) CloseWeak() error {
r.mu.Lock()
if !r.closed {
r.closed = true
}
r.mu.Unlock()
// Avoid deadlock.
if r.cacher != nil {
r.cacher.EvictAll()
if err := r.cacher.Close(); err != nil {
return err
}
}
return nil
}

View file

@ -6,7 +6,9 @@
package leveldb
import "github.com/syndtr/goleveldb/leveldb/comparer"
import (
"github.com/syndtr/goleveldb/leveldb/comparer"
)
type iComparer struct {
ucmp comparer.Comparer
@ -33,12 +35,12 @@ func (icmp *iComparer) Name() string {
}
func (icmp *iComparer) Compare(a, b []byte) int {
x := icmp.ucmp.Compare(internalKey(a).ukey(), internalKey(b).ukey())
x := icmp.uCompare(internalKey(a).ukey(), internalKey(b).ukey())
if x == 0 {
if m, n := internalKey(a).num(), internalKey(b).num(); m > n {
x = -1
return -1
} else if m < n {
x = 1
return 1
}
}
return x
@ -46,30 +48,20 @@ func (icmp *iComparer) Compare(a, b []byte) int {
func (icmp *iComparer) Separator(dst, a, b []byte) []byte {
ua, ub := internalKey(a).ukey(), internalKey(b).ukey()
dst = icmp.ucmp.Separator(dst, ua, ub)
if dst == nil {
return nil
dst = icmp.uSeparator(dst, ua, ub)
if dst != nil && len(dst) < len(ua) && icmp.uCompare(ua, dst) < 0 {
// Append earliest possible number.
return append(dst, keyMaxNumBytes...)
}
if len(dst) < len(ua) && icmp.uCompare(ua, dst) < 0 {
dst = append(dst, keyMaxNumBytes...)
} else {
// Did not close possibilities that n maybe longer than len(ub).
dst = append(dst, a[len(a)-8:]...)
}
return dst
return nil
}
func (icmp *iComparer) Successor(dst, b []byte) []byte {
ub := internalKey(b).ukey()
dst = icmp.ucmp.Successor(dst, ub)
if dst == nil {
return nil
dst = icmp.uSuccessor(dst, ub)
if dst != nil && len(dst) < len(ub) && icmp.uCompare(ub, dst) < 0 {
// Append earliest possible number.
return append(dst, keyMaxNumBytes...)
}
if len(dst) < len(ub) && icmp.uCompare(ub, dst) < 0 {
dst = append(dst, keyMaxNumBytes...)
} else {
// Did not close possibilities that n maybe longer than len(ub).
dst = append(dst, b[len(b)-8:]...)
}
return dst
return nil
}

View file

@ -53,14 +53,13 @@ type DB struct {
aliveSnaps, aliveIters int32
// Write.
writeC chan *Batch
batchPool sync.Pool
writeMergeC chan writeMerge
writeMergedC chan bool
writeLockC chan struct{}
writeAckC chan error
writeDelay time.Duration
writeDelayN int
journalC chan *Batch
journalAckC chan error
tr *Transaction
// Compaction.
@ -94,12 +93,11 @@ func openDB(s *session) (*DB, error) {
// Snapshot
snapsList: list.New(),
// Write
writeC: make(chan *Batch),
batchPool: sync.Pool{New: newBatch},
writeMergeC: make(chan writeMerge),
writeMergedC: make(chan bool),
writeLockC: make(chan struct{}, 1),
writeAckC: make(chan error),
journalC: make(chan *Batch),
journalAckC: make(chan error),
// Compaction
tcompCmdC: make(chan cCmd),
tcompPauseC: make(chan chan<- struct{}),
@ -144,10 +142,10 @@ func openDB(s *session) (*DB, error) {
if readOnly {
db.SetReadOnly()
} else {
db.closeW.Add(3)
db.closeW.Add(2)
go db.tCompaction()
go db.mCompaction()
go db.jWriter()
// go db.jWriter()
}
s.logf("db@open done T·%v", time.Since(start))
@ -162,10 +160,10 @@ func openDB(s *session) (*DB, error) {
// os.ErrExist error.
//
// Open will return an error with type of ErrCorrupted if corruption
// detected in the DB. Corrupted DB can be recovered with Recover
// function.
// detected in the DB. Use errors.IsCorrupted to test whether an error is
// due to corruption. Corrupted DB can be recovered with Recover function.
//
// The returned DB instance is goroutine-safe.
// The returned DB instance is safe for concurrent use.
// The DB must be closed after use, by calling Close method.
func Open(stor storage.Storage, o *opt.Options) (db *DB, err error) {
s, err := newSession(stor, o)
@ -202,13 +200,13 @@ func Open(stor storage.Storage, o *opt.Options) (db *DB, err error) {
// os.ErrExist error.
//
// OpenFile uses standard file-system backed storage implementation as
// desribed in the leveldb/storage package.
// described in the leveldb/storage package.
//
// OpenFile will return an error with type of ErrCorrupted if corruption
// detected in the DB. Corrupted DB can be recovered with Recover
// function.
// detected in the DB. Use errors.IsCorrupted to test whether an error is
// due to corruption. Corrupted DB can be recovered with Recover function.
//
// The returned DB instance is goroutine-safe.
// The returned DB instance is safe for concurrent use.
// The DB must be closed after use, by calling Close method.
func OpenFile(path string, o *opt.Options) (db *DB, err error) {
stor, err := storage.OpenFile(path, o.GetReadOnly())
@ -229,7 +227,7 @@ func OpenFile(path string, o *opt.Options) (db *DB, err error) {
// The DB must already exist or it will returns an error.
// Also, Recover will ignore ErrorIfMissing and ErrorIfExist options.
//
// The returned DB instance is goroutine-safe.
// The returned DB instance is safe for concurrent use.
// The DB must be closed after use, by calling Close method.
func Recover(stor storage.Storage, o *opt.Options) (db *DB, err error) {
s, err := newSession(stor, o)
@ -255,10 +253,10 @@ func Recover(stor storage.Storage, o *opt.Options) (db *DB, err error) {
// The DB must already exist or it will returns an error.
// Also, Recover will ignore ErrorIfMissing and ErrorIfExist options.
//
// RecoverFile uses standard file-system backed storage implementation as desribed
// RecoverFile uses standard file-system backed storage implementation as described
// in the leveldb/storage package.
//
// The returned DB instance is goroutine-safe.
// The returned DB instance is safe for concurrent use.
// The DB must be closed after use, by calling Close method.
func RecoverFile(path string, o *opt.Options) (db *DB, err error) {
stor, err := storage.OpenFile(path, false)
@ -504,10 +502,11 @@ func (db *DB) recoverJournal() error {
checksum = db.s.o.GetStrict(opt.StrictJournalChecksum)
writeBuffer = db.s.o.GetWriteBuffer()
jr *journal.Reader
mdb = memdb.New(db.s.icmp, writeBuffer)
buf = &util.Buffer{}
batch = &Batch{}
jr *journal.Reader
mdb = memdb.New(db.s.icmp, writeBuffer)
buf = &util.Buffer{}
batchSeq uint64
batchLen int
)
for _, fd := range fds {
@ -526,7 +525,7 @@ func (db *DB) recoverJournal() error {
}
// Flush memdb and remove obsolete journal file.
if !ofd.Nil() {
if !ofd.Zero() {
if mdb.Len() > 0 {
if _, err := db.s.flushMemdb(rec, mdb, 0); err != nil {
fr.Close()
@ -569,7 +568,8 @@ func (db *DB) recoverJournal() error {
fr.Close()
return errors.SetFd(err, fd)
}
if err := batch.memDecodeAndReplay(db.seq, buf.Bytes(), mdb); err != nil {
batchSeq, batchLen, err = decodeBatchToMem(buf.Bytes(), db.seq, mdb)
if err != nil {
if !strict && errors.IsCorrupted(err) {
db.s.logf("journal error: %v (skipped)", err)
// We won't apply sequence number as it might be corrupted.
@ -581,7 +581,7 @@ func (db *DB) recoverJournal() error {
}
// Save sequence number.
db.seq = batch.seq + uint64(batch.Len())
db.seq = batchSeq + uint64(batchLen)
// Flush it if large enough.
if mdb.Size() >= writeBuffer {
@ -624,7 +624,7 @@ func (db *DB) recoverJournal() error {
}
// Remove the last obsolete journal file.
if !ofd.Nil() {
if !ofd.Zero() {
db.s.stor.Remove(ofd)
}
@ -661,9 +661,10 @@ func (db *DB) recoverJournalRO() error {
db.logf("journal@recovery RO·Mode F·%d", len(fds))
var (
jr *journal.Reader
buf = &util.Buffer{}
batch = &Batch{}
jr *journal.Reader
buf = &util.Buffer{}
batchSeq uint64
batchLen int
)
for _, fd := range fds {
@ -703,7 +704,8 @@ func (db *DB) recoverJournalRO() error {
fr.Close()
return errors.SetFd(err, fd)
}
if err := batch.memDecodeAndReplay(db.seq, buf.Bytes(), mdb); err != nil {
batchSeq, batchLen, err = decodeBatchToMem(buf.Bytes(), db.seq, mdb)
if err != nil {
if !strict && errors.IsCorrupted(err) {
db.s.logf("journal error: %v (skipped)", err)
// We won't apply sequence number as it might be corrupted.
@ -715,7 +717,7 @@ func (db *DB) recoverJournalRO() error {
}
// Save sequence number.
db.seq = batch.seq + uint64(batch.Len())
db.seq = batchSeq + uint64(batchLen)
}
fr.Close()
@ -856,7 +858,7 @@ func (db *DB) Has(key []byte, ro *opt.ReadOptions) (ret bool, err error) {
// NewIterator returns an iterator for the latest snapshot of the
// underlying DB.
// The returned iterator is not goroutine-safe, but it is safe to use
// The returned iterator is not safe for concurrent use, but it is safe to use
// multiple iterators concurrently, with each in a dedicated goroutine.
// It is also safe to use an iterator concurrently with modifying its
// underlying DB. The resultant key/value pairs are guaranteed to be
@ -1062,6 +1064,8 @@ func (db *DB) Close() error {
if db.journal != nil {
db.journal.Close()
db.journalWriter.Close()
db.journal = nil
db.journalWriter = nil
}
if db.writeDelayN > 0 {
@ -1077,15 +1081,11 @@ func (db *DB) Close() error {
if err1 := db.closer.Close(); err == nil {
err = err1
}
db.closer = nil
}
// NIL'ing pointers.
db.s = nil
db.mem = nil
db.frozenMem = nil
db.journal = nil
db.journalWriter = nil
db.closer = nil
// Clear memdbs.
db.clearMems()
return err
}

View file

@ -96,7 +96,7 @@ noerr:
default:
goto haserr
}
case _, _ = <-db.closeC:
case <-db.closeC:
return
}
}
@ -113,7 +113,7 @@ haserr:
goto hasperr
default:
}
case _, _ = <-db.closeC:
case <-db.closeC:
return
}
}
@ -126,7 +126,7 @@ hasperr:
case db.writeLockC <- struct{}{}:
// Hold write lock, so that write won't pass-through.
db.compWriteLocking = true
case _, _ = <-db.closeC:
case <-db.closeC:
if db.compWriteLocking {
// We should release the lock or Close will hang.
<-db.writeLockC
@ -195,7 +195,7 @@ func (db *DB) compactionTransact(name string, t compactionTransactInterface) {
db.logf("%s exiting (persistent error %q)", name, perr)
db.compactionExitTransact()
}
case _, _ = <-db.closeC:
case <-db.closeC:
db.logf("%s exiting", name)
db.compactionExitTransact()
}
@ -224,7 +224,7 @@ func (db *DB) compactionTransact(name string, t compactionTransactInterface) {
}
select {
case <-backoffT.C:
case _, _ = <-db.closeC:
case <-db.closeC:
db.logf("%s exiting", name)
db.compactionExitTransact()
}
@ -288,7 +288,7 @@ func (db *DB) memCompaction() {
case <-db.compPerErrC:
close(resumeC)
resumeC = nil
case _, _ = <-db.closeC:
case <-db.closeC:
return
}
@ -337,7 +337,7 @@ func (db *DB) memCompaction() {
select {
case <-resumeC:
close(resumeC)
case _, _ = <-db.closeC:
case <-db.closeC:
return
}
}
@ -378,7 +378,7 @@ func (b *tableCompactionBuilder) appendKV(key, value []byte) error {
select {
case ch := <-b.db.tcompPauseC:
b.db.pauseCompaction(ch)
case _, _ = <-b.db.closeC:
case <-b.db.closeC:
b.db.compactionExitTransact()
default:
}
@ -643,7 +643,7 @@ func (db *DB) tableNeedCompaction() bool {
func (db *DB) pauseCompaction(ch chan<- struct{}) {
select {
case ch <- struct{}{}:
case _, _ = <-db.closeC:
case <-db.closeC:
db.compactionExitTransact()
}
}
@ -697,14 +697,14 @@ func (db *DB) compTriggerWait(compC chan<- cCmd) (err error) {
case compC <- cAuto{ch}:
case err = <-db.compErrC:
return
case _, _ = <-db.closeC:
case <-db.closeC:
return ErrClosed
}
// Wait cmd.
select {
case err = <-ch:
case err = <-db.compErrC:
case _, _ = <-db.closeC:
case <-db.closeC:
return ErrClosed
}
return err
@ -719,14 +719,14 @@ func (db *DB) compTriggerRange(compC chan<- cCmd, level int, min, max []byte) (e
case compC <- cRange{level, min, max, ch}:
case err := <-db.compErrC:
return err
case _, _ = <-db.closeC:
case <-db.closeC:
return ErrClosed
}
// Wait cmd.
select {
case err = <-ch:
case err = <-db.compErrC:
case _, _ = <-db.closeC:
case <-db.closeC:
return ErrClosed
}
return err
@ -758,7 +758,7 @@ func (db *DB) mCompaction() {
default:
panic("leveldb: unknown command")
}
case _, _ = <-db.closeC:
case <-db.closeC:
return
}
}
@ -791,7 +791,7 @@ func (db *DB) tCompaction() {
case ch := <-db.tcompPauseC:
db.pauseCompaction(ch)
continue
case _, _ = <-db.closeC:
case <-db.closeC:
return
default:
}
@ -806,7 +806,7 @@ func (db *DB) tCompaction() {
case ch := <-db.tcompPauseC:
db.pauseCompaction(ch)
continue
case _, _ = <-db.closeC:
case <-db.closeC:
return
}
}

View file

@ -59,7 +59,7 @@ func (db *DB) releaseSnapshot(se *snapshotElement) {
}
}
// Gets minimum sequence that not being snapshoted.
// Gets minimum sequence that not being snapshotted.
func (db *DB) minSeq() uint64 {
db.snapsMu.Lock()
defer db.snapsMu.Unlock()
@ -131,7 +131,7 @@ func (snap *Snapshot) Has(key []byte, ro *opt.ReadOptions) (ret bool, err error)
}
// NewIterator returns an iterator for the snapshot of the underlying DB.
// The returned iterator is not goroutine-safe, but it is safe to use
// The returned iterator is not safe for concurrent use, but it is safe to use
// multiple iterators concurrently, with each in a dedicated goroutine.
// It is also safe to use an iterator concurrently with modifying its
// underlying DB. The resultant key/value pairs are guaranteed to be

Some files were not shown because too many files have changed in this diff Show more