mirror of
https://github.com/prometheus/prometheus.git
synced 2025-03-05 20:59:13 -08:00
commit
1c3531af4e
|
@ -1,3 +1,11 @@
|
||||||
|
## 1.2.2 / 2016-10-28
|
||||||
|
|
||||||
|
* [BUGFIX] UI: Deal properly with aborted requests.
|
||||||
|
* [BUGFIX] UI: Decode URL query parameters properly.
|
||||||
|
* [BUGFIX] Storage: Deal better with data corruption (non-monotonic timestamps).
|
||||||
|
* [BUGFIX] Remote storage: Re-add accidentally removed timeout flag.
|
||||||
|
* [BUGFIX] Updated a number of vendored packages to pick up upstream bug fixes.
|
||||||
|
|
||||||
## 1.2.1 / 2016-10-10
|
## 1.2.1 / 2016-10-10
|
||||||
|
|
||||||
* [BUGFIX] Count chunk evictions properly so that the server doesn't
|
* [BUGFIX] Count chunk evictions properly so that the server doesn't
|
||||||
|
|
|
@ -163,7 +163,7 @@ func (node *BinaryExpr) String() string {
|
||||||
|
|
||||||
matching := ""
|
matching := ""
|
||||||
vm := node.VectorMatching
|
vm := node.VectorMatching
|
||||||
if vm != nil && len(vm.MatchingLabels) > 0 {
|
if vm != nil && (len(vm.MatchingLabels) > 0 || vm.On) {
|
||||||
if vm.On {
|
if vm.On {
|
||||||
matching = fmt.Sprintf(" ON(%s)", vm.MatchingLabels)
|
matching = fmt.Sprintf(" ON(%s)", vm.MatchingLabels)
|
||||||
} else {
|
} else {
|
||||||
|
|
|
@ -59,6 +59,10 @@ func TestExprString(t *testing.T) {
|
||||||
inputs := []struct {
|
inputs := []struct {
|
||||||
in, out string
|
in, out string
|
||||||
}{
|
}{
|
||||||
|
{
|
||||||
|
in: `sum(task:errors:rate10s{job="s"}) BY ()`,
|
||||||
|
out: `sum(task:errors:rate10s{job="s"})`,
|
||||||
|
},
|
||||||
{
|
{
|
||||||
in: `sum(task:errors:rate10s{job="s"}) BY (code)`,
|
in: `sum(task:errors:rate10s{job="s"}) BY (code)`,
|
||||||
},
|
},
|
||||||
|
@ -77,6 +81,9 @@ func TestExprString(t *testing.T) {
|
||||||
{
|
{
|
||||||
in: `count_values("value", task:errors:rate10s{job="s"})`,
|
in: `count_values("value", task:errors:rate10s{job="s"})`,
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
in: `a - ON() c`,
|
||||||
|
},
|
||||||
{
|
{
|
||||||
in: `a - ON(b) c`,
|
in: `a - ON(b) c`,
|
||||||
},
|
},
|
||||||
|
@ -92,6 +99,10 @@ func TestExprString(t *testing.T) {
|
||||||
{
|
{
|
||||||
in: `a - IGNORING(b) c`,
|
in: `a - IGNORING(b) c`,
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
in: `a - IGNORING() c`,
|
||||||
|
out: `a - c`,
|
||||||
|
},
|
||||||
{
|
{
|
||||||
in: `up > BOOL 0`,
|
in: `up > BOOL 0`,
|
||||||
},
|
},
|
||||||
|
|
10
vendor/github.com/asaskevich/govalidator/patterns.go
generated
vendored
10
vendor/github.com/asaskevich/govalidator/patterns.go
generated
vendored
|
@ -30,7 +30,15 @@ const (
|
||||||
Latitude string = "^[-+]?([1-8]?\\d(\\.\\d+)?|90(\\.0+)?)$"
|
Latitude string = "^[-+]?([1-8]?\\d(\\.\\d+)?|90(\\.0+)?)$"
|
||||||
Longitude string = "^[-+]?(180(\\.0+)?|((1[0-7]\\d)|([1-9]?\\d))(\\.\\d+)?)$"
|
Longitude string = "^[-+]?(180(\\.0+)?|((1[0-7]\\d)|([1-9]?\\d))(\\.\\d+)?)$"
|
||||||
DNSName string = `^([a-zA-Z0-9]{1}[a-zA-Z0-9_-]{1,62}){1}(\.[a-zA-Z0-9]{1}[a-zA-Z0-9_-]{1,62})*$`
|
DNSName string = `^([a-zA-Z0-9]{1}[a-zA-Z0-9_-]{1,62}){1}(\.[a-zA-Z0-9]{1}[a-zA-Z0-9_-]{1,62})*$`
|
||||||
URL string = `^((ftp|https?):\/\/)?(\S+(:\S*)?@)?((([1-9]\d?|1\d\d|2[01]\d|22[0-3])(\.(1?\d{1,2}|2[0-4]\d|25[0-5])){2}(?:\.([0-9]\d?|1\d\d|2[0-4]\d|25[0-4]))|(([a-zA-Z0-9]([a-zA-Z0-9-]+)?[a-zA-Z0-9]([-\.][a-zA-Z0-9]+)*)|((www\.)?))?(([a-zA-Z\x{00a1}-\x{ffff}0-9]+-?-?)*[a-zA-Z\x{00a1}-\x{ffff}0-9]+)(?:\.([a-zA-Z\x{00a1}-\x{ffff}]{1,}))?))(:(\d{1,5}))?((\/|\?|#)[^\s]*)?$`
|
IP string = `(([0-9a-fA-F]{1,4}:){7,7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,7}:|([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,5}(:[0-9a-fA-F]{1,4}){1,2}|([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4}){1,3}|([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|([0-9a-fA-F]{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]{1,4}){1,6})|:((:[0-9a-fA-F]{1,4}){1,7}|:)|fe80:(:[0-9a-fA-F]{0,4}){0,4}%[0-9a-zA-Z]{1,}|::(ffff(:0{1,4}){0,1}:){0,1}((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])|([0-9a-fA-F]{1,4}:){1,4}:((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9]))`
|
||||||
|
URLSchema string = `((ftp|tcp|udp|wss?|https?):\/\/)`
|
||||||
|
URLUsername string = `(\S+(:\S*)?@)`
|
||||||
|
Hostname string = ``
|
||||||
|
URLPath string = `((\/|\?|#)[^\s]*)`
|
||||||
|
URLPort string = `(:(\d{1,5}))`
|
||||||
|
URLIP string = `([1-9]\d?|1\d\d|2[01]\d|22[0-3])(\.(1?\d{1,2}|2[0-4]\d|25[0-5])){2}(?:\.([0-9]\d?|1\d\d|2[0-4]\d|25[0-4]))`
|
||||||
|
URLSubdomain string = `((www\.)|([a-zA-Z0-9]([-\.][a-zA-Z0-9]+)*))`
|
||||||
|
URL string = `^` + URLSchema + `?` + URLUsername + `?` + `((` + URLIP + `|(\[` + IP + `\])|(([a-zA-Z0-9]([a-zA-Z0-9-]+)?[a-zA-Z0-9]([-\.][a-zA-Z0-9]+)*)|(` + URLSubdomain + `?))?(([a-zA-Z\x{00a1}-\x{ffff}0-9]+-?-?)*[a-zA-Z\x{00a1}-\x{ffff}0-9]+)(?:\.([a-zA-Z\x{00a1}-\x{ffff}]{1,}))?))` + URLPort + `?` + URLPath + `?$`
|
||||||
SSN string = `^\d{3}[- ]?\d{2}[- ]?\d{4}$`
|
SSN string = `^\d{3}[- ]?\d{2}[- ]?\d{4}$`
|
||||||
WinPath string = `^[a-zA-Z]:\\(?:[^\\/:*?"<>|\r\n]+\\)*[^\\/:*?"<>|\r\n]*$`
|
WinPath string = `^[a-zA-Z]:\\(?:[^\\/:*?"<>|\r\n]+\\)*[^\\/:*?"<>|\r\n]*$`
|
||||||
UnixPath string = `^((?:\/[a-zA-Z0-9\.\:]+(?:_[a-zA-Z0-9\:\.]+)*(?:\-[\:a-zA-Z0-9\.]+)*)+\/?)$`
|
UnixPath string = `^((?:\/[a-zA-Z0-9\.\:]+(?:_[a-zA-Z0-9\:\.]+)*(?:\-[\:a-zA-Z0-9\.]+)*)+\/?)$`
|
||||||
|
|
6
vendor/github.com/asaskevich/govalidator/validator.go
generated
vendored
6
vendor/github.com/asaskevich/govalidator/validator.go
generated
vendored
|
@ -496,6 +496,12 @@ func IsIPv6(str string) bool {
|
||||||
return ip != nil && strings.Contains(str, ":")
|
return ip != nil && strings.Contains(str, ":")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// IsCIDR check if the string is an valid CIDR notiation (IPV4 & IPV6)
|
||||||
|
func IsCIDR(str string) bool {
|
||||||
|
_, _, err := net.ParseCIDR(str)
|
||||||
|
return err == nil
|
||||||
|
}
|
||||||
|
|
||||||
// IsMAC check if a string is valid MAC address.
|
// IsMAC check if a string is valid MAC address.
|
||||||
// Possible MAC formats:
|
// Possible MAC formats:
|
||||||
// 01:23:45:67:89:ab
|
// 01:23:45:67:89:ab
|
||||||
|
|
112
vendor/github.com/golang/protobuf/proto/decode.go
generated
vendored
112
vendor/github.com/golang/protobuf/proto/decode.go
generated
vendored
|
@ -61,7 +61,6 @@ var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for
|
||||||
// int32, int64, uint32, uint64, bool, and enum
|
// int32, int64, uint32, uint64, bool, and enum
|
||||||
// protocol buffer types.
|
// protocol buffer types.
|
||||||
func DecodeVarint(buf []byte) (x uint64, n int) {
|
func DecodeVarint(buf []byte) (x uint64, n int) {
|
||||||
// x, n already 0
|
|
||||||
for shift := uint(0); shift < 64; shift += 7 {
|
for shift := uint(0); shift < 64; shift += 7 {
|
||||||
if n >= len(buf) {
|
if n >= len(buf) {
|
||||||
return 0, 0
|
return 0, 0
|
||||||
|
@ -78,13 +77,7 @@ func DecodeVarint(buf []byte) (x uint64, n int) {
|
||||||
return 0, 0
|
return 0, 0
|
||||||
}
|
}
|
||||||
|
|
||||||
// DecodeVarint reads a varint-encoded integer from the Buffer.
|
func (p *Buffer) decodeVarintSlow() (x uint64, err error) {
|
||||||
// This is the format for the
|
|
||||||
// int32, int64, uint32, uint64, bool, and enum
|
|
||||||
// protocol buffer types.
|
|
||||||
func (p *Buffer) DecodeVarint() (x uint64, err error) {
|
|
||||||
// x, err already 0
|
|
||||||
|
|
||||||
i := p.index
|
i := p.index
|
||||||
l := len(p.buf)
|
l := len(p.buf)
|
||||||
|
|
||||||
|
@ -107,6 +100,107 @@ func (p *Buffer) DecodeVarint() (x uint64, err error) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// DecodeVarint reads a varint-encoded integer from the Buffer.
|
||||||
|
// This is the format for the
|
||||||
|
// int32, int64, uint32, uint64, bool, and enum
|
||||||
|
// protocol buffer types.
|
||||||
|
func (p *Buffer) DecodeVarint() (x uint64, err error) {
|
||||||
|
i := p.index
|
||||||
|
buf := p.buf
|
||||||
|
|
||||||
|
if i >= len(buf) {
|
||||||
|
return 0, io.ErrUnexpectedEOF
|
||||||
|
} else if buf[i] < 0x80 {
|
||||||
|
p.index++
|
||||||
|
return uint64(buf[i]), nil
|
||||||
|
} else if len(buf)-i < 10 {
|
||||||
|
return p.decodeVarintSlow()
|
||||||
|
}
|
||||||
|
|
||||||
|
var b uint64
|
||||||
|
// we already checked the first byte
|
||||||
|
x = uint64(buf[i]) - 0x80
|
||||||
|
i++
|
||||||
|
|
||||||
|
b = uint64(buf[i])
|
||||||
|
i++
|
||||||
|
x += b << 7
|
||||||
|
if b&0x80 == 0 {
|
||||||
|
goto done
|
||||||
|
}
|
||||||
|
x -= 0x80 << 7
|
||||||
|
|
||||||
|
b = uint64(buf[i])
|
||||||
|
i++
|
||||||
|
x += b << 14
|
||||||
|
if b&0x80 == 0 {
|
||||||
|
goto done
|
||||||
|
}
|
||||||
|
x -= 0x80 << 14
|
||||||
|
|
||||||
|
b = uint64(buf[i])
|
||||||
|
i++
|
||||||
|
x += b << 21
|
||||||
|
if b&0x80 == 0 {
|
||||||
|
goto done
|
||||||
|
}
|
||||||
|
x -= 0x80 << 21
|
||||||
|
|
||||||
|
b = uint64(buf[i])
|
||||||
|
i++
|
||||||
|
x += b << 28
|
||||||
|
if b&0x80 == 0 {
|
||||||
|
goto done
|
||||||
|
}
|
||||||
|
x -= 0x80 << 28
|
||||||
|
|
||||||
|
b = uint64(buf[i])
|
||||||
|
i++
|
||||||
|
x += b << 35
|
||||||
|
if b&0x80 == 0 {
|
||||||
|
goto done
|
||||||
|
}
|
||||||
|
x -= 0x80 << 35
|
||||||
|
|
||||||
|
b = uint64(buf[i])
|
||||||
|
i++
|
||||||
|
x += b << 42
|
||||||
|
if b&0x80 == 0 {
|
||||||
|
goto done
|
||||||
|
}
|
||||||
|
x -= 0x80 << 42
|
||||||
|
|
||||||
|
b = uint64(buf[i])
|
||||||
|
i++
|
||||||
|
x += b << 49
|
||||||
|
if b&0x80 == 0 {
|
||||||
|
goto done
|
||||||
|
}
|
||||||
|
x -= 0x80 << 49
|
||||||
|
|
||||||
|
b = uint64(buf[i])
|
||||||
|
i++
|
||||||
|
x += b << 56
|
||||||
|
if b&0x80 == 0 {
|
||||||
|
goto done
|
||||||
|
}
|
||||||
|
x -= 0x80 << 56
|
||||||
|
|
||||||
|
b = uint64(buf[i])
|
||||||
|
i++
|
||||||
|
x += b << 63
|
||||||
|
if b&0x80 == 0 {
|
||||||
|
goto done
|
||||||
|
}
|
||||||
|
// x -= 0x80 << 63 // Always zero.
|
||||||
|
|
||||||
|
return 0, errOverflow
|
||||||
|
|
||||||
|
done:
|
||||||
|
p.index = i
|
||||||
|
return x, nil
|
||||||
|
}
|
||||||
|
|
||||||
// DecodeFixed64 reads a 64-bit integer from the Buffer.
|
// DecodeFixed64 reads a 64-bit integer from the Buffer.
|
||||||
// This is the format for the
|
// This is the format for the
|
||||||
// fixed64, sfixed64, and double protocol buffer types.
|
// fixed64, sfixed64, and double protocol buffer types.
|
||||||
|
@ -340,6 +434,8 @@ func (p *Buffer) DecodeGroup(pb Message) error {
|
||||||
// Buffer and places the decoded result in pb. If the struct
|
// Buffer and places the decoded result in pb. If the struct
|
||||||
// underlying pb does not match the data in the buffer, the results can be
|
// underlying pb does not match the data in the buffer, the results can be
|
||||||
// unpredictable.
|
// unpredictable.
|
||||||
|
//
|
||||||
|
// Unlike proto.Unmarshal, this does not reset pb before starting to unmarshal.
|
||||||
func (p *Buffer) Unmarshal(pb Message) error {
|
func (p *Buffer) Unmarshal(pb Message) error {
|
||||||
// If the object can unmarshal itself, let it.
|
// If the object can unmarshal itself, let it.
|
||||||
if u, ok := pb.(Unmarshaler); ok {
|
if u, ok := pb.(Unmarshaler); ok {
|
||||||
|
|
14
vendor/github.com/golang/protobuf/proto/encode.go
generated
vendored
14
vendor/github.com/golang/protobuf/proto/encode.go
generated
vendored
|
@ -234,10 +234,6 @@ func Marshal(pb Message) ([]byte, error) {
|
||||||
}
|
}
|
||||||
p := NewBuffer(nil)
|
p := NewBuffer(nil)
|
||||||
err := p.Marshal(pb)
|
err := p.Marshal(pb)
|
||||||
var state errorState
|
|
||||||
if err != nil && !state.shouldContinue(err, nil) {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if p.buf == nil && err == nil {
|
if p.buf == nil && err == nil {
|
||||||
// Return a non-nil slice on success.
|
// Return a non-nil slice on success.
|
||||||
return []byte{}, nil
|
return []byte{}, nil
|
||||||
|
@ -266,11 +262,8 @@ func (p *Buffer) Marshal(pb Message) error {
|
||||||
// Can the object marshal itself?
|
// Can the object marshal itself?
|
||||||
if m, ok := pb.(Marshaler); ok {
|
if m, ok := pb.(Marshaler); ok {
|
||||||
data, err := m.Marshal()
|
data, err := m.Marshal()
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
p.buf = append(p.buf, data...)
|
p.buf = append(p.buf, data...)
|
||||||
return nil
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
t, base, err := getbase(pb)
|
t, base, err := getbase(pb)
|
||||||
|
@ -282,7 +275,7 @@ func (p *Buffer) Marshal(pb Message) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
if collectStats {
|
if collectStats {
|
||||||
stats.Encode++
|
(stats).Encode++ // Parens are to work around a goimports bug.
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(p.buf) > maxMarshalSize {
|
if len(p.buf) > maxMarshalSize {
|
||||||
|
@ -309,7 +302,7 @@ func Size(pb Message) (n int) {
|
||||||
}
|
}
|
||||||
|
|
||||||
if collectStats {
|
if collectStats {
|
||||||
stats.Size++
|
(stats).Size++ // Parens are to work around a goimports bug.
|
||||||
}
|
}
|
||||||
|
|
||||||
return
|
return
|
||||||
|
@ -1014,7 +1007,6 @@ func size_slice_struct_message(p *Properties, base structPointer) (n int) {
|
||||||
if p.isMarshaler {
|
if p.isMarshaler {
|
||||||
m := structPointer_Interface(structp, p.stype).(Marshaler)
|
m := structPointer_Interface(structp, p.stype).(Marshaler)
|
||||||
data, _ := m.Marshal()
|
data, _ := m.Marshal()
|
||||||
n += len(p.tagcode)
|
|
||||||
n += sizeRawBytes(data)
|
n += sizeRawBytes(data)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
8
vendor/github.com/golang/protobuf/proto/equal.go
generated
vendored
8
vendor/github.com/golang/protobuf/proto/equal.go
generated
vendored
|
@ -54,13 +54,17 @@ Equality is defined in this way:
|
||||||
in a proto3 .proto file, fields are not "set"; specifically,
|
in a proto3 .proto file, fields are not "set"; specifically,
|
||||||
zero length proto3 "bytes" fields are equal (nil == {}).
|
zero length proto3 "bytes" fields are equal (nil == {}).
|
||||||
- Two repeated fields are equal iff their lengths are the same,
|
- Two repeated fields are equal iff their lengths are the same,
|
||||||
and their corresponding elements are equal (a "bytes" field,
|
and their corresponding elements are equal. Note a "bytes" field,
|
||||||
although represented by []byte, is not a repeated field)
|
although represented by []byte, is not a repeated field and the
|
||||||
|
rule for the scalar fields described above applies.
|
||||||
- Two unset fields are equal.
|
- Two unset fields are equal.
|
||||||
- Two unknown field sets are equal if their current
|
- Two unknown field sets are equal if their current
|
||||||
encoded state is equal.
|
encoded state is equal.
|
||||||
- Two extension sets are equal iff they have corresponding
|
- Two extension sets are equal iff they have corresponding
|
||||||
elements that are pairwise equal.
|
elements that are pairwise equal.
|
||||||
|
- Two map fields are equal iff their lengths are the same,
|
||||||
|
and they contain the same set of elements. Zero-length map
|
||||||
|
fields are equal.
|
||||||
- Every other combination of things are not equal.
|
- Every other combination of things are not equal.
|
||||||
|
|
||||||
The return value is undefined if a and b are not protocol buffers.
|
The return value is undefined if a and b are not protocol buffers.
|
||||||
|
|
484
vendor/github.com/golang/protobuf/proto/pointer_reflect.go
generated
vendored
484
vendor/github.com/golang/protobuf/proto/pointer_reflect.go
generated
vendored
|
@ -1,484 +0,0 @@
|
||||||
// Go support for Protocol Buffers - Google's data interchange format
|
|
||||||
//
|
|
||||||
// Copyright 2012 The Go Authors. All rights reserved.
|
|
||||||
// https://github.com/golang/protobuf
|
|
||||||
//
|
|
||||||
// Redistribution and use in source and binary forms, with or without
|
|
||||||
// modification, are permitted provided that the following conditions are
|
|
||||||
// met:
|
|
||||||
//
|
|
||||||
// * Redistributions of source code must retain the above copyright
|
|
||||||
// notice, this list of conditions and the following disclaimer.
|
|
||||||
// * Redistributions in binary form must reproduce the above
|
|
||||||
// copyright notice, this list of conditions and the following disclaimer
|
|
||||||
// in the documentation and/or other materials provided with the
|
|
||||||
// distribution.
|
|
||||||
// * Neither the name of Google Inc. nor the names of its
|
|
||||||
// contributors may be used to endorse or promote products derived from
|
|
||||||
// this software without specific prior written permission.
|
|
||||||
//
|
|
||||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|
||||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|
||||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|
||||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|
||||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|
||||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|
||||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
||||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
||||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
||||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|
||||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
||||||
|
|
||||||
// +build appengine js
|
|
||||||
|
|
||||||
// This file contains an implementation of proto field accesses using package reflect.
|
|
||||||
// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can
|
|
||||||
// be used on App Engine.
|
|
||||||
|
|
||||||
package proto
|
|
||||||
|
|
||||||
import (
|
|
||||||
"math"
|
|
||||||
"reflect"
|
|
||||||
)
|
|
||||||
|
|
||||||
// A structPointer is a pointer to a struct.
|
|
||||||
type structPointer struct {
|
|
||||||
v reflect.Value
|
|
||||||
}
|
|
||||||
|
|
||||||
// toStructPointer returns a structPointer equivalent to the given reflect value.
|
|
||||||
// The reflect value must itself be a pointer to a struct.
|
|
||||||
func toStructPointer(v reflect.Value) structPointer {
|
|
||||||
return structPointer{v}
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsNil reports whether p is nil.
|
|
||||||
func structPointer_IsNil(p structPointer) bool {
|
|
||||||
return p.v.IsNil()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Interface returns the struct pointer as an interface value.
|
|
||||||
func structPointer_Interface(p structPointer, _ reflect.Type) interface{} {
|
|
||||||
return p.v.Interface()
|
|
||||||
}
|
|
||||||
|
|
||||||
// A field identifies a field in a struct, accessible from a structPointer.
|
|
||||||
// In this implementation, a field is identified by the sequence of field indices
|
|
||||||
// passed to reflect's FieldByIndex.
|
|
||||||
type field []int
|
|
||||||
|
|
||||||
// toField returns a field equivalent to the given reflect field.
|
|
||||||
func toField(f *reflect.StructField) field {
|
|
||||||
return f.Index
|
|
||||||
}
|
|
||||||
|
|
||||||
// invalidField is an invalid field identifier.
|
|
||||||
var invalidField = field(nil)
|
|
||||||
|
|
||||||
// IsValid reports whether the field identifier is valid.
|
|
||||||
func (f field) IsValid() bool { return f != nil }
|
|
||||||
|
|
||||||
// field returns the given field in the struct as a reflect value.
|
|
||||||
func structPointer_field(p structPointer, f field) reflect.Value {
|
|
||||||
// Special case: an extension map entry with a value of type T
|
|
||||||
// passes a *T to the struct-handling code with a zero field,
|
|
||||||
// expecting that it will be treated as equivalent to *struct{ X T },
|
|
||||||
// which has the same memory layout. We have to handle that case
|
|
||||||
// specially, because reflect will panic if we call FieldByIndex on a
|
|
||||||
// non-struct.
|
|
||||||
if f == nil {
|
|
||||||
return p.v.Elem()
|
|
||||||
}
|
|
||||||
|
|
||||||
return p.v.Elem().FieldByIndex(f)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ifield returns the given field in the struct as an interface value.
|
|
||||||
func structPointer_ifield(p structPointer, f field) interface{} {
|
|
||||||
return structPointer_field(p, f).Addr().Interface()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Bytes returns the address of a []byte field in the struct.
|
|
||||||
func structPointer_Bytes(p structPointer, f field) *[]byte {
|
|
||||||
return structPointer_ifield(p, f).(*[]byte)
|
|
||||||
}
|
|
||||||
|
|
||||||
// BytesSlice returns the address of a [][]byte field in the struct.
|
|
||||||
func structPointer_BytesSlice(p structPointer, f field) *[][]byte {
|
|
||||||
return structPointer_ifield(p, f).(*[][]byte)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Bool returns the address of a *bool field in the struct.
|
|
||||||
func structPointer_Bool(p structPointer, f field) **bool {
|
|
||||||
return structPointer_ifield(p, f).(**bool)
|
|
||||||
}
|
|
||||||
|
|
||||||
// BoolVal returns the address of a bool field in the struct.
|
|
||||||
func structPointer_BoolVal(p structPointer, f field) *bool {
|
|
||||||
return structPointer_ifield(p, f).(*bool)
|
|
||||||
}
|
|
||||||
|
|
||||||
// BoolSlice returns the address of a []bool field in the struct.
|
|
||||||
func structPointer_BoolSlice(p structPointer, f field) *[]bool {
|
|
||||||
return structPointer_ifield(p, f).(*[]bool)
|
|
||||||
}
|
|
||||||
|
|
||||||
// String returns the address of a *string field in the struct.
|
|
||||||
func structPointer_String(p structPointer, f field) **string {
|
|
||||||
return structPointer_ifield(p, f).(**string)
|
|
||||||
}
|
|
||||||
|
|
||||||
// StringVal returns the address of a string field in the struct.
|
|
||||||
func structPointer_StringVal(p structPointer, f field) *string {
|
|
||||||
return structPointer_ifield(p, f).(*string)
|
|
||||||
}
|
|
||||||
|
|
||||||
// StringSlice returns the address of a []string field in the struct.
|
|
||||||
func structPointer_StringSlice(p structPointer, f field) *[]string {
|
|
||||||
return structPointer_ifield(p, f).(*[]string)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Extensions returns the address of an extension map field in the struct.
|
|
||||||
func structPointer_Extensions(p structPointer, f field) *XXX_InternalExtensions {
|
|
||||||
return structPointer_ifield(p, f).(*XXX_InternalExtensions)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ExtMap returns the address of an extension map field in the struct.
|
|
||||||
func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension {
|
|
||||||
return structPointer_ifield(p, f).(*map[int32]Extension)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewAt returns the reflect.Value for a pointer to a field in the struct.
|
|
||||||
func structPointer_NewAt(p structPointer, f field, typ reflect.Type) reflect.Value {
|
|
||||||
return structPointer_field(p, f).Addr()
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetStructPointer writes a *struct field in the struct.
|
|
||||||
func structPointer_SetStructPointer(p structPointer, f field, q structPointer) {
|
|
||||||
structPointer_field(p, f).Set(q.v)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetStructPointer reads a *struct field in the struct.
|
|
||||||
func structPointer_GetStructPointer(p structPointer, f field) structPointer {
|
|
||||||
return structPointer{structPointer_field(p, f)}
|
|
||||||
}
|
|
||||||
|
|
||||||
// StructPointerSlice the address of a []*struct field in the struct.
|
|
||||||
func structPointer_StructPointerSlice(p structPointer, f field) structPointerSlice {
|
|
||||||
return structPointerSlice{structPointer_field(p, f)}
|
|
||||||
}
|
|
||||||
|
|
||||||
// A structPointerSlice represents the address of a slice of pointers to structs
|
|
||||||
// (themselves messages or groups). That is, v.Type() is *[]*struct{...}.
|
|
||||||
type structPointerSlice struct {
|
|
||||||
v reflect.Value
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p structPointerSlice) Len() int { return p.v.Len() }
|
|
||||||
func (p structPointerSlice) Index(i int) structPointer { return structPointer{p.v.Index(i)} }
|
|
||||||
func (p structPointerSlice) Append(q structPointer) {
|
|
||||||
p.v.Set(reflect.Append(p.v, q.v))
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
int32Type = reflect.TypeOf(int32(0))
|
|
||||||
uint32Type = reflect.TypeOf(uint32(0))
|
|
||||||
float32Type = reflect.TypeOf(float32(0))
|
|
||||||
int64Type = reflect.TypeOf(int64(0))
|
|
||||||
uint64Type = reflect.TypeOf(uint64(0))
|
|
||||||
float64Type = reflect.TypeOf(float64(0))
|
|
||||||
)
|
|
||||||
|
|
||||||
// A word32 represents a field of type *int32, *uint32, *float32, or *enum.
|
|
||||||
// That is, v.Type() is *int32, *uint32, *float32, or *enum and v is assignable.
|
|
||||||
type word32 struct {
|
|
||||||
v reflect.Value
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsNil reports whether p is nil.
|
|
||||||
func word32_IsNil(p word32) bool {
|
|
||||||
return p.v.IsNil()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set sets p to point at a newly allocated word with bits set to x.
|
|
||||||
func word32_Set(p word32, o *Buffer, x uint32) {
|
|
||||||
t := p.v.Type().Elem()
|
|
||||||
switch t {
|
|
||||||
case int32Type:
|
|
||||||
if len(o.int32s) == 0 {
|
|
||||||
o.int32s = make([]int32, uint32PoolSize)
|
|
||||||
}
|
|
||||||
o.int32s[0] = int32(x)
|
|
||||||
p.v.Set(reflect.ValueOf(&o.int32s[0]))
|
|
||||||
o.int32s = o.int32s[1:]
|
|
||||||
return
|
|
||||||
case uint32Type:
|
|
||||||
if len(o.uint32s) == 0 {
|
|
||||||
o.uint32s = make([]uint32, uint32PoolSize)
|
|
||||||
}
|
|
||||||
o.uint32s[0] = x
|
|
||||||
p.v.Set(reflect.ValueOf(&o.uint32s[0]))
|
|
||||||
o.uint32s = o.uint32s[1:]
|
|
||||||
return
|
|
||||||
case float32Type:
|
|
||||||
if len(o.float32s) == 0 {
|
|
||||||
o.float32s = make([]float32, uint32PoolSize)
|
|
||||||
}
|
|
||||||
o.float32s[0] = math.Float32frombits(x)
|
|
||||||
p.v.Set(reflect.ValueOf(&o.float32s[0]))
|
|
||||||
o.float32s = o.float32s[1:]
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// must be enum
|
|
||||||
p.v.Set(reflect.New(t))
|
|
||||||
p.v.Elem().SetInt(int64(int32(x)))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get gets the bits pointed at by p, as a uint32.
|
|
||||||
func word32_Get(p word32) uint32 {
|
|
||||||
elem := p.v.Elem()
|
|
||||||
switch elem.Kind() {
|
|
||||||
case reflect.Int32:
|
|
||||||
return uint32(elem.Int())
|
|
||||||
case reflect.Uint32:
|
|
||||||
return uint32(elem.Uint())
|
|
||||||
case reflect.Float32:
|
|
||||||
return math.Float32bits(float32(elem.Float()))
|
|
||||||
}
|
|
||||||
panic("unreachable")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Word32 returns a reference to a *int32, *uint32, *float32, or *enum field in the struct.
|
|
||||||
func structPointer_Word32(p structPointer, f field) word32 {
|
|
||||||
return word32{structPointer_field(p, f)}
|
|
||||||
}
|
|
||||||
|
|
||||||
// A word32Val represents a field of type int32, uint32, float32, or enum.
|
|
||||||
// That is, v.Type() is int32, uint32, float32, or enum and v is assignable.
|
|
||||||
type word32Val struct {
|
|
||||||
v reflect.Value
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set sets *p to x.
|
|
||||||
func word32Val_Set(p word32Val, x uint32) {
|
|
||||||
switch p.v.Type() {
|
|
||||||
case int32Type:
|
|
||||||
p.v.SetInt(int64(x))
|
|
||||||
return
|
|
||||||
case uint32Type:
|
|
||||||
p.v.SetUint(uint64(x))
|
|
||||||
return
|
|
||||||
case float32Type:
|
|
||||||
p.v.SetFloat(float64(math.Float32frombits(x)))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// must be enum
|
|
||||||
p.v.SetInt(int64(int32(x)))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get gets the bits pointed at by p, as a uint32.
|
|
||||||
func word32Val_Get(p word32Val) uint32 {
|
|
||||||
elem := p.v
|
|
||||||
switch elem.Kind() {
|
|
||||||
case reflect.Int32:
|
|
||||||
return uint32(elem.Int())
|
|
||||||
case reflect.Uint32:
|
|
||||||
return uint32(elem.Uint())
|
|
||||||
case reflect.Float32:
|
|
||||||
return math.Float32bits(float32(elem.Float()))
|
|
||||||
}
|
|
||||||
panic("unreachable")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Word32Val returns a reference to a int32, uint32, float32, or enum field in the struct.
|
|
||||||
func structPointer_Word32Val(p structPointer, f field) word32Val {
|
|
||||||
return word32Val{structPointer_field(p, f)}
|
|
||||||
}
|
|
||||||
|
|
||||||
// A word32Slice is a slice of 32-bit values.
|
|
||||||
// That is, v.Type() is []int32, []uint32, []float32, or []enum.
|
|
||||||
type word32Slice struct {
|
|
||||||
v reflect.Value
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p word32Slice) Append(x uint32) {
|
|
||||||
n, m := p.v.Len(), p.v.Cap()
|
|
||||||
if n < m {
|
|
||||||
p.v.SetLen(n + 1)
|
|
||||||
} else {
|
|
||||||
t := p.v.Type().Elem()
|
|
||||||
p.v.Set(reflect.Append(p.v, reflect.Zero(t)))
|
|
||||||
}
|
|
||||||
elem := p.v.Index(n)
|
|
||||||
switch elem.Kind() {
|
|
||||||
case reflect.Int32:
|
|
||||||
elem.SetInt(int64(int32(x)))
|
|
||||||
case reflect.Uint32:
|
|
||||||
elem.SetUint(uint64(x))
|
|
||||||
case reflect.Float32:
|
|
||||||
elem.SetFloat(float64(math.Float32frombits(x)))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p word32Slice) Len() int {
|
|
||||||
return p.v.Len()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p word32Slice) Index(i int) uint32 {
|
|
||||||
elem := p.v.Index(i)
|
|
||||||
switch elem.Kind() {
|
|
||||||
case reflect.Int32:
|
|
||||||
return uint32(elem.Int())
|
|
||||||
case reflect.Uint32:
|
|
||||||
return uint32(elem.Uint())
|
|
||||||
case reflect.Float32:
|
|
||||||
return math.Float32bits(float32(elem.Float()))
|
|
||||||
}
|
|
||||||
panic("unreachable")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Word32Slice returns a reference to a []int32, []uint32, []float32, or []enum field in the struct.
|
|
||||||
func structPointer_Word32Slice(p structPointer, f field) word32Slice {
|
|
||||||
return word32Slice{structPointer_field(p, f)}
|
|
||||||
}
|
|
||||||
|
|
||||||
// word64 is like word32 but for 64-bit values.
|
|
||||||
type word64 struct {
|
|
||||||
v reflect.Value
|
|
||||||
}
|
|
||||||
|
|
||||||
func word64_Set(p word64, o *Buffer, x uint64) {
|
|
||||||
t := p.v.Type().Elem()
|
|
||||||
switch t {
|
|
||||||
case int64Type:
|
|
||||||
if len(o.int64s) == 0 {
|
|
||||||
o.int64s = make([]int64, uint64PoolSize)
|
|
||||||
}
|
|
||||||
o.int64s[0] = int64(x)
|
|
||||||
p.v.Set(reflect.ValueOf(&o.int64s[0]))
|
|
||||||
o.int64s = o.int64s[1:]
|
|
||||||
return
|
|
||||||
case uint64Type:
|
|
||||||
if len(o.uint64s) == 0 {
|
|
||||||
o.uint64s = make([]uint64, uint64PoolSize)
|
|
||||||
}
|
|
||||||
o.uint64s[0] = x
|
|
||||||
p.v.Set(reflect.ValueOf(&o.uint64s[0]))
|
|
||||||
o.uint64s = o.uint64s[1:]
|
|
||||||
return
|
|
||||||
case float64Type:
|
|
||||||
if len(o.float64s) == 0 {
|
|
||||||
o.float64s = make([]float64, uint64PoolSize)
|
|
||||||
}
|
|
||||||
o.float64s[0] = math.Float64frombits(x)
|
|
||||||
p.v.Set(reflect.ValueOf(&o.float64s[0]))
|
|
||||||
o.float64s = o.float64s[1:]
|
|
||||||
return
|
|
||||||
}
|
|
||||||
panic("unreachable")
|
|
||||||
}
|
|
||||||
|
|
||||||
func word64_IsNil(p word64) bool {
|
|
||||||
return p.v.IsNil()
|
|
||||||
}
|
|
||||||
|
|
||||||
func word64_Get(p word64) uint64 {
|
|
||||||
elem := p.v.Elem()
|
|
||||||
switch elem.Kind() {
|
|
||||||
case reflect.Int64:
|
|
||||||
return uint64(elem.Int())
|
|
||||||
case reflect.Uint64:
|
|
||||||
return elem.Uint()
|
|
||||||
case reflect.Float64:
|
|
||||||
return math.Float64bits(elem.Float())
|
|
||||||
}
|
|
||||||
panic("unreachable")
|
|
||||||
}
|
|
||||||
|
|
||||||
func structPointer_Word64(p structPointer, f field) word64 {
|
|
||||||
return word64{structPointer_field(p, f)}
|
|
||||||
}
|
|
||||||
|
|
||||||
// word64Val is like word32Val but for 64-bit values.
|
|
||||||
type word64Val struct {
|
|
||||||
v reflect.Value
|
|
||||||
}
|
|
||||||
|
|
||||||
func word64Val_Set(p word64Val, o *Buffer, x uint64) {
|
|
||||||
switch p.v.Type() {
|
|
||||||
case int64Type:
|
|
||||||
p.v.SetInt(int64(x))
|
|
||||||
return
|
|
||||||
case uint64Type:
|
|
||||||
p.v.SetUint(x)
|
|
||||||
return
|
|
||||||
case float64Type:
|
|
||||||
p.v.SetFloat(math.Float64frombits(x))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
panic("unreachable")
|
|
||||||
}
|
|
||||||
|
|
||||||
func word64Val_Get(p word64Val) uint64 {
|
|
||||||
elem := p.v
|
|
||||||
switch elem.Kind() {
|
|
||||||
case reflect.Int64:
|
|
||||||
return uint64(elem.Int())
|
|
||||||
case reflect.Uint64:
|
|
||||||
return elem.Uint()
|
|
||||||
case reflect.Float64:
|
|
||||||
return math.Float64bits(elem.Float())
|
|
||||||
}
|
|
||||||
panic("unreachable")
|
|
||||||
}
|
|
||||||
|
|
||||||
func structPointer_Word64Val(p structPointer, f field) word64Val {
|
|
||||||
return word64Val{structPointer_field(p, f)}
|
|
||||||
}
|
|
||||||
|
|
||||||
type word64Slice struct {
|
|
||||||
v reflect.Value
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p word64Slice) Append(x uint64) {
|
|
||||||
n, m := p.v.Len(), p.v.Cap()
|
|
||||||
if n < m {
|
|
||||||
p.v.SetLen(n + 1)
|
|
||||||
} else {
|
|
||||||
t := p.v.Type().Elem()
|
|
||||||
p.v.Set(reflect.Append(p.v, reflect.Zero(t)))
|
|
||||||
}
|
|
||||||
elem := p.v.Index(n)
|
|
||||||
switch elem.Kind() {
|
|
||||||
case reflect.Int64:
|
|
||||||
elem.SetInt(int64(int64(x)))
|
|
||||||
case reflect.Uint64:
|
|
||||||
elem.SetUint(uint64(x))
|
|
||||||
case reflect.Float64:
|
|
||||||
elem.SetFloat(float64(math.Float64frombits(x)))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p word64Slice) Len() int {
|
|
||||||
return p.v.Len()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p word64Slice) Index(i int) uint64 {
|
|
||||||
elem := p.v.Index(i)
|
|
||||||
switch elem.Kind() {
|
|
||||||
case reflect.Int64:
|
|
||||||
return uint64(elem.Int())
|
|
||||||
case reflect.Uint64:
|
|
||||||
return uint64(elem.Uint())
|
|
||||||
case reflect.Float64:
|
|
||||||
return math.Float64bits(float64(elem.Float()))
|
|
||||||
}
|
|
||||||
panic("unreachable")
|
|
||||||
}
|
|
||||||
|
|
||||||
func structPointer_Word64Slice(p structPointer, f field) word64Slice {
|
|
||||||
return word64Slice{structPointer_field(p, f)}
|
|
||||||
}
|
|
10
vendor/github.com/golang/protobuf/proto/properties.go
generated
vendored
10
vendor/github.com/golang/protobuf/proto/properties.go
generated
vendored
|
@ -844,7 +844,15 @@ func RegisterType(x Message, name string) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// MessageName returns the fully-qualified proto name for the given message type.
|
// MessageName returns the fully-qualified proto name for the given message type.
|
||||||
func MessageName(x Message) string { return revProtoTypes[reflect.TypeOf(x)] }
|
func MessageName(x Message) string {
|
||||||
|
type xname interface {
|
||||||
|
XXX_MessageName() string
|
||||||
|
}
|
||||||
|
if m, ok := x.(xname); ok {
|
||||||
|
return m.XXX_MessageName()
|
||||||
|
}
|
||||||
|
return revProtoTypes[reflect.TypeOf(x)]
|
||||||
|
}
|
||||||
|
|
||||||
// MessageType returns the message type (pointer to struct) for a named message.
|
// MessageType returns the message type (pointer to struct) for a named message.
|
||||||
func MessageType(name string) reflect.Type { return protoTypes[name] }
|
func MessageType(name string) reflect.Type { return protoTypes[name] }
|
||||||
|
|
6
vendor/github.com/golang/protobuf/proto/text_parser.go
generated
vendored
6
vendor/github.com/golang/protobuf/proto/text_parser.go
generated
vendored
|
@ -792,12 +792,12 @@ func (p *textParser) readAny(v reflect.Value, props *Properties) error {
|
||||||
fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem()))
|
fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem()))
|
||||||
return p.readAny(fv.Index(fv.Len()-1), props)
|
return p.readAny(fv.Index(fv.Len()-1), props)
|
||||||
case reflect.Bool:
|
case reflect.Bool:
|
||||||
// Either "true", "false", 1 or 0.
|
// true/1/t/True or false/f/0/False.
|
||||||
switch tok.value {
|
switch tok.value {
|
||||||
case "true", "1":
|
case "true", "1", "t", "True":
|
||||||
fv.SetBool(true)
|
fv.SetBool(true)
|
||||||
return nil
|
return nil
|
||||||
case "false", "0":
|
case "false", "0", "f", "False":
|
||||||
fv.SetBool(false)
|
fv.SetBool(false)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
16
vendor/github.com/hashicorp/consul/api/README.md
generated
vendored
16
vendor/github.com/hashicorp/consul/api/README.md
generated
vendored
|
@ -4,12 +4,12 @@ Consul API client
|
||||||
This package provides the `api` package which attempts to
|
This package provides the `api` package which attempts to
|
||||||
provide programmatic access to the full Consul API.
|
provide programmatic access to the full Consul API.
|
||||||
|
|
||||||
Currently, all of the Consul APIs included in version 0.3 are supported.
|
Currently, all of the Consul APIs included in version 0.6.0 are supported.
|
||||||
|
|
||||||
Documentation
|
Documentation
|
||||||
=============
|
=============
|
||||||
|
|
||||||
The full documentation is available on [Godoc](http://godoc.org/github.com/hashicorp/consul/api)
|
The full documentation is available on [Godoc](https://godoc.org/github.com/hashicorp/consul/api)
|
||||||
|
|
||||||
Usage
|
Usage
|
||||||
=====
|
=====
|
||||||
|
@ -17,13 +17,18 @@ Usage
|
||||||
Below is an example of using the Consul client:
|
Below is an example of using the Consul client:
|
||||||
|
|
||||||
```go
|
```go
|
||||||
// Get a new client, with KV endpoints
|
// Get a new client
|
||||||
client, _ := api.NewClient(api.DefaultConfig())
|
client, err := api.NewClient(api.DefaultConfig())
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get a handle to the KV API
|
||||||
kv := client.KV()
|
kv := client.KV()
|
||||||
|
|
||||||
// PUT a new KV pair
|
// PUT a new KV pair
|
||||||
p := &api.KVPair{Key: "foo", Value: []byte("test")}
|
p := &api.KVPair{Key: "foo", Value: []byte("test")}
|
||||||
_, err := kv.Put(p, nil)
|
_, err = kv.Put(p, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
|
@ -36,4 +41,3 @@ if err != nil {
|
||||||
fmt.Printf("KV: %v", pair)
|
fmt.Printf("KV: %v", pair)
|
||||||
|
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
96
vendor/github.com/hashicorp/consul/api/agent.go
generated
vendored
96
vendor/github.com/hashicorp/consul/api/agent.go
generated
vendored
|
@ -23,6 +23,7 @@ type AgentService struct {
|
||||||
Tags []string
|
Tags []string
|
||||||
Port int
|
Port int
|
||||||
Address string
|
Address string
|
||||||
|
EnableTagOverride bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// AgentMember represents a cluster member known to the agent
|
// AgentMember represents a cluster member known to the agent
|
||||||
|
@ -47,6 +48,7 @@ type AgentServiceRegistration struct {
|
||||||
Tags []string `json:",omitempty"`
|
Tags []string `json:",omitempty"`
|
||||||
Port int `json:",omitempty"`
|
Port int `json:",omitempty"`
|
||||||
Address string `json:",omitempty"`
|
Address string `json:",omitempty"`
|
||||||
|
EnableTagOverride bool `json:",omitempty"`
|
||||||
Check *AgentServiceCheck
|
Check *AgentServiceCheck
|
||||||
Checks AgentServiceChecks
|
Checks AgentServiceChecks
|
||||||
}
|
}
|
||||||
|
@ -60,16 +62,25 @@ type AgentCheckRegistration struct {
|
||||||
AgentServiceCheck
|
AgentServiceCheck
|
||||||
}
|
}
|
||||||
|
|
||||||
// AgentServiceCheck is used to create an associated
|
// AgentServiceCheck is used to define a node or service level check
|
||||||
// check for a service
|
|
||||||
type AgentServiceCheck struct {
|
type AgentServiceCheck struct {
|
||||||
Script string `json:",omitempty"`
|
Script string `json:",omitempty"`
|
||||||
|
DockerContainerID string `json:",omitempty"`
|
||||||
|
Shell string `json:",omitempty"` // Only supported for Docker.
|
||||||
Interval string `json:",omitempty"`
|
Interval string `json:",omitempty"`
|
||||||
Timeout string `json:",omitempty"`
|
Timeout string `json:",omitempty"`
|
||||||
TTL string `json:",omitempty"`
|
TTL string `json:",omitempty"`
|
||||||
HTTP string `json:",omitempty"`
|
HTTP string `json:",omitempty"`
|
||||||
TCP string `json:",omitempty"`
|
TCP string `json:",omitempty"`
|
||||||
Status string `json:",omitempty"`
|
Status string `json:",omitempty"`
|
||||||
|
|
||||||
|
// In Consul 0.7 and later, checks that are associated with a service
|
||||||
|
// may also contain this optional DeregisterCriticalServiceAfter field,
|
||||||
|
// which is a timeout in the same Go time format as Interval and TTL. If
|
||||||
|
// a check is in the critical state for more than this configured value,
|
||||||
|
// then its associated service (and all of its associated checks) will
|
||||||
|
// automatically be deregistered.
|
||||||
|
DeregisterCriticalServiceAfter string `json:",omitempty"`
|
||||||
}
|
}
|
||||||
type AgentServiceChecks []*AgentServiceCheck
|
type AgentServiceChecks []*AgentServiceCheck
|
||||||
|
|
||||||
|
@ -194,23 +205,43 @@ func (a *Agent) ServiceDeregister(serviceID string) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// PassTTL is used to set a TTL check to the passing state
|
// PassTTL is used to set a TTL check to the passing state.
|
||||||
|
//
|
||||||
|
// DEPRECATION NOTICE: This interface is deprecated in favor of UpdateTTL().
|
||||||
|
// The client interface will be removed in 0.8 or changed to use
|
||||||
|
// UpdateTTL()'s endpoint and the server endpoints will be removed in 0.9.
|
||||||
func (a *Agent) PassTTL(checkID, note string) error {
|
func (a *Agent) PassTTL(checkID, note string) error {
|
||||||
return a.UpdateTTL(checkID, note, "pass")
|
return a.updateTTL(checkID, note, "pass")
|
||||||
}
|
}
|
||||||
|
|
||||||
// WarnTTL is used to set a TTL check to the warning state
|
// WarnTTL is used to set a TTL check to the warning state.
|
||||||
|
//
|
||||||
|
// DEPRECATION NOTICE: This interface is deprecated in favor of UpdateTTL().
|
||||||
|
// The client interface will be removed in 0.8 or changed to use
|
||||||
|
// UpdateTTL()'s endpoint and the server endpoints will be removed in 0.9.
|
||||||
func (a *Agent) WarnTTL(checkID, note string) error {
|
func (a *Agent) WarnTTL(checkID, note string) error {
|
||||||
return a.UpdateTTL(checkID, note, "warn")
|
return a.updateTTL(checkID, note, "warn")
|
||||||
}
|
}
|
||||||
|
|
||||||
// FailTTL is used to set a TTL check to the failing state
|
// FailTTL is used to set a TTL check to the failing state.
|
||||||
|
//
|
||||||
|
// DEPRECATION NOTICE: This interface is deprecated in favor of UpdateTTL().
|
||||||
|
// The client interface will be removed in 0.8 or changed to use
|
||||||
|
// UpdateTTL()'s endpoint and the server endpoints will be removed in 0.9.
|
||||||
func (a *Agent) FailTTL(checkID, note string) error {
|
func (a *Agent) FailTTL(checkID, note string) error {
|
||||||
return a.UpdateTTL(checkID, note, "fail")
|
return a.updateTTL(checkID, note, "fail")
|
||||||
}
|
}
|
||||||
|
|
||||||
// UpdateTTL is used to update the TTL of a check
|
// updateTTL is used to update the TTL of a check. This is the internal
|
||||||
func (a *Agent) UpdateTTL(checkID, note, status string) error {
|
// method that uses the old API that's present in Consul versions prior to
|
||||||
|
// 0.6.4. Since Consul didn't have an analogous "update" API before it seemed
|
||||||
|
// ok to break this (former) UpdateTTL in favor of the new UpdateTTL below,
|
||||||
|
// but keep the old Pass/Warn/Fail methods using the old API under the hood.
|
||||||
|
//
|
||||||
|
// DEPRECATION NOTICE: This interface is deprecated in favor of UpdateTTL().
|
||||||
|
// The client interface will be removed in 0.8 and the server endpoints will
|
||||||
|
// be removed in 0.9.
|
||||||
|
func (a *Agent) updateTTL(checkID, note, status string) error {
|
||||||
switch status {
|
switch status {
|
||||||
case "pass":
|
case "pass":
|
||||||
case "warn":
|
case "warn":
|
||||||
|
@ -229,6 +260,51 @@ func (a *Agent) UpdateTTL(checkID, note, status string) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// checkUpdate is the payload for a PUT for a check update.
|
||||||
|
type checkUpdate struct {
|
||||||
|
// Status is one of the api.Health* states: HealthPassing
|
||||||
|
// ("passing"), HealthWarning ("warning"), or HealthCritical
|
||||||
|
// ("critical").
|
||||||
|
Status string
|
||||||
|
|
||||||
|
// Output is the information to post to the UI for operators as the
|
||||||
|
// output of the process that decided to hit the TTL check. This is
|
||||||
|
// different from the note field that's associated with the check
|
||||||
|
// itself.
|
||||||
|
Output string
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateTTL is used to update the TTL of a check. This uses the newer API
|
||||||
|
// that was introduced in Consul 0.6.4 and later. We translate the old status
|
||||||
|
// strings for compatibility (though a newer version of Consul will still be
|
||||||
|
// required to use this API).
|
||||||
|
func (a *Agent) UpdateTTL(checkID, output, status string) error {
|
||||||
|
switch status {
|
||||||
|
case "pass", HealthPassing:
|
||||||
|
status = HealthPassing
|
||||||
|
case "warn", HealthWarning:
|
||||||
|
status = HealthWarning
|
||||||
|
case "fail", HealthCritical:
|
||||||
|
status = HealthCritical
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("Invalid status: %s", status)
|
||||||
|
}
|
||||||
|
|
||||||
|
endpoint := fmt.Sprintf("/v1/agent/check/update/%s", checkID)
|
||||||
|
r := a.c.newRequest("PUT", endpoint)
|
||||||
|
r.obj = &checkUpdate{
|
||||||
|
Status: status,
|
||||||
|
Output: output,
|
||||||
|
}
|
||||||
|
|
||||||
|
_, resp, err := requireOK(a.c.doRequest(r))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
resp.Body.Close()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// CheckRegister is used to register a new check with
|
// CheckRegister is used to register a new check with
|
||||||
// the local agent
|
// the local agent
|
||||||
func (a *Agent) CheckRegister(check *AgentCheckRegistration) error {
|
func (a *Agent) CheckRegister(check *AgentCheckRegistration) error {
|
||||||
|
|
181
vendor/github.com/hashicorp/consul/api/api.go
generated
vendored
181
vendor/github.com/hashicorp/consul/api/api.go
generated
vendored
|
@ -3,9 +3,11 @@ package api
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"crypto/tls"
|
"crypto/tls"
|
||||||
|
"crypto/x509"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
"log"
|
"log"
|
||||||
"net"
|
"net"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
@ -14,6 +16,8 @@ import (
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/hashicorp/go-cleanhttp"
|
||||||
)
|
)
|
||||||
|
|
||||||
// QueryOptions are used to parameterize a query
|
// QueryOptions are used to parameterize a query
|
||||||
|
@ -36,12 +40,18 @@ type QueryOptions struct {
|
||||||
WaitIndex uint64
|
WaitIndex uint64
|
||||||
|
|
||||||
// WaitTime is used to bound the duration of a wait.
|
// WaitTime is used to bound the duration of a wait.
|
||||||
// Defaults to that of the Config, but can be overriden.
|
// Defaults to that of the Config, but can be overridden.
|
||||||
WaitTime time.Duration
|
WaitTime time.Duration
|
||||||
|
|
||||||
// Token is used to provide a per-request ACL token
|
// Token is used to provide a per-request ACL token
|
||||||
// which overrides the agent's default token.
|
// which overrides the agent's default token.
|
||||||
Token string
|
Token string
|
||||||
|
|
||||||
|
// Near is used to provide a node name that will sort the results
|
||||||
|
// in ascending order based on the estimated round trip time from
|
||||||
|
// that node. Setting this to "_agent" will use the agent's node
|
||||||
|
// for the sort.
|
||||||
|
Near string
|
||||||
}
|
}
|
||||||
|
|
||||||
// WriteOptions are used to parameterize a write
|
// WriteOptions are used to parameterize a write
|
||||||
|
@ -70,6 +80,9 @@ type QueryMeta struct {
|
||||||
|
|
||||||
// How long did the request take
|
// How long did the request take
|
||||||
RequestTime time.Duration
|
RequestTime time.Duration
|
||||||
|
|
||||||
|
// Is address translation enabled for HTTP responses on this agent
|
||||||
|
AddressTranslationEnabled bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// WriteMeta is used to return meta data about a write
|
// WriteMeta is used to return meta data about a write
|
||||||
|
@ -114,12 +127,58 @@ type Config struct {
|
||||||
Token string
|
Token string
|
||||||
}
|
}
|
||||||
|
|
||||||
// DefaultConfig returns a default configuration for the client
|
// TLSConfig is used to generate a TLSClientConfig that's useful for talking to
|
||||||
|
// Consul using TLS.
|
||||||
|
type TLSConfig struct {
|
||||||
|
// Address is the optional address of the Consul server. The port, if any
|
||||||
|
// will be removed from here and this will be set to the ServerName of the
|
||||||
|
// resulting config.
|
||||||
|
Address string
|
||||||
|
|
||||||
|
// CAFile is the optional path to the CA certificate used for Consul
|
||||||
|
// communication, defaults to the system bundle if not specified.
|
||||||
|
CAFile string
|
||||||
|
|
||||||
|
// CertFile is the optional path to the certificate for Consul
|
||||||
|
// communication. If this is set then you need to also set KeyFile.
|
||||||
|
CertFile string
|
||||||
|
|
||||||
|
// KeyFile is the optional path to the private key for Consul communication.
|
||||||
|
// If this is set then you need to also set CertFile.
|
||||||
|
KeyFile string
|
||||||
|
|
||||||
|
// InsecureSkipVerify if set to true will disable TLS host verification.
|
||||||
|
InsecureSkipVerify bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// DefaultConfig returns a default configuration for the client. By default this
|
||||||
|
// will pool and reuse idle connections to Consul. If you have a long-lived
|
||||||
|
// client object, this is the desired behavior and should make the most efficient
|
||||||
|
// use of the connections to Consul. If you don't reuse a client object , which
|
||||||
|
// is not recommended, then you may notice idle connections building up over
|
||||||
|
// time. To avoid this, use the DefaultNonPooledConfig() instead.
|
||||||
func DefaultConfig() *Config {
|
func DefaultConfig() *Config {
|
||||||
|
return defaultConfig(cleanhttp.DefaultPooledTransport)
|
||||||
|
}
|
||||||
|
|
||||||
|
// DefaultNonPooledConfig returns a default configuration for the client which
|
||||||
|
// does not pool connections. This isn't a recommended configuration because it
|
||||||
|
// will reconnect to Consul on every request, but this is useful to avoid the
|
||||||
|
// accumulation of idle connections if you make many client objects during the
|
||||||
|
// lifetime of your application.
|
||||||
|
func DefaultNonPooledConfig() *Config {
|
||||||
|
return defaultConfig(cleanhttp.DefaultTransport)
|
||||||
|
}
|
||||||
|
|
||||||
|
// defaultConfig returns the default configuration for the client, using the
|
||||||
|
// given function to make the transport.
|
||||||
|
func defaultConfig(transportFn func() *http.Transport) *Config {
|
||||||
config := &Config{
|
config := &Config{
|
||||||
Address: "127.0.0.1:8500",
|
Address: "127.0.0.1:8500",
|
||||||
Scheme: "http",
|
Scheme: "http",
|
||||||
HttpClient: http.DefaultClient,
|
HttpClient: &http.Client{
|
||||||
|
Transport: transportFn(),
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
if addr := os.Getenv("CONSUL_HTTP_ADDR"); addr != "" {
|
if addr := os.Getenv("CONSUL_HTTP_ADDR"); addr != "" {
|
||||||
|
@ -164,17 +223,70 @@ func DefaultConfig() *Config {
|
||||||
}
|
}
|
||||||
|
|
||||||
if !doVerify {
|
if !doVerify {
|
||||||
config.HttpClient.Transport = &http.Transport{
|
tlsClientConfig, err := SetupTLSConfig(&TLSConfig{
|
||||||
TLSClientConfig: &tls.Config{
|
|
||||||
InsecureSkipVerify: true,
|
InsecureSkipVerify: true,
|
||||||
},
|
})
|
||||||
|
|
||||||
|
// We don't expect this to fail given that we aren't
|
||||||
|
// parsing any of the input, but we panic just in case
|
||||||
|
// since this doesn't have an error return.
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
transport := transportFn()
|
||||||
|
transport.TLSClientConfig = tlsClientConfig
|
||||||
|
config.HttpClient.Transport = transport
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return config
|
return config
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TLSConfig is used to generate a TLSClientConfig that's useful for talking to
|
||||||
|
// Consul using TLS.
|
||||||
|
func SetupTLSConfig(tlsConfig *TLSConfig) (*tls.Config, error) {
|
||||||
|
tlsClientConfig := &tls.Config{
|
||||||
|
InsecureSkipVerify: tlsConfig.InsecureSkipVerify,
|
||||||
|
}
|
||||||
|
|
||||||
|
if tlsConfig.Address != "" {
|
||||||
|
server := tlsConfig.Address
|
||||||
|
hasPort := strings.LastIndex(server, ":") > strings.LastIndex(server, "]")
|
||||||
|
if hasPort {
|
||||||
|
var err error
|
||||||
|
server, _, err = net.SplitHostPort(server)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
tlsClientConfig.ServerName = server
|
||||||
|
}
|
||||||
|
|
||||||
|
if tlsConfig.CertFile != "" && tlsConfig.KeyFile != "" {
|
||||||
|
tlsCert, err := tls.LoadX509KeyPair(tlsConfig.CertFile, tlsConfig.KeyFile)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
tlsClientConfig.Certificates = []tls.Certificate{tlsCert}
|
||||||
|
}
|
||||||
|
|
||||||
|
if tlsConfig.CAFile != "" {
|
||||||
|
data, err := ioutil.ReadFile(tlsConfig.CAFile)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to read CA file: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
caPool := x509.NewCertPool()
|
||||||
|
if !caPool.AppendCertsFromPEM(data) {
|
||||||
|
return nil, fmt.Errorf("failed to parse CA certificate")
|
||||||
|
}
|
||||||
|
tlsClientConfig.RootCAs = caPool
|
||||||
|
}
|
||||||
|
|
||||||
|
return tlsClientConfig, nil
|
||||||
|
}
|
||||||
|
|
||||||
// Client provides a client to the Consul API
|
// Client provides a client to the Consul API
|
||||||
type Client struct {
|
type Client struct {
|
||||||
config Config
|
config Config
|
||||||
|
@ -198,12 +310,12 @@ func NewClient(config *Config) (*Client, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
if parts := strings.SplitN(config.Address, "unix://", 2); len(parts) == 2 {
|
if parts := strings.SplitN(config.Address, "unix://", 2); len(parts) == 2 {
|
||||||
config.HttpClient = &http.Client{
|
trans := cleanhttp.DefaultTransport()
|
||||||
Transport: &http.Transport{
|
trans.Dial = func(_, _ string) (net.Conn, error) {
|
||||||
Dial: func(_, _ string) (net.Conn, error) {
|
|
||||||
return net.Dial("unix", parts[1])
|
return net.Dial("unix", parts[1])
|
||||||
},
|
}
|
||||||
},
|
config.HttpClient = &http.Client{
|
||||||
|
Transport: trans,
|
||||||
}
|
}
|
||||||
config.Address = parts[1]
|
config.Address = parts[1]
|
||||||
}
|
}
|
||||||
|
@ -221,6 +333,7 @@ type request struct {
|
||||||
url *url.URL
|
url *url.URL
|
||||||
params url.Values
|
params url.Values
|
||||||
body io.Reader
|
body io.Reader
|
||||||
|
header http.Header
|
||||||
obj interface{}
|
obj interface{}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -246,13 +359,38 @@ func (r *request) setQueryOptions(q *QueryOptions) {
|
||||||
r.params.Set("wait", durToMsec(q.WaitTime))
|
r.params.Set("wait", durToMsec(q.WaitTime))
|
||||||
}
|
}
|
||||||
if q.Token != "" {
|
if q.Token != "" {
|
||||||
r.params.Set("token", q.Token)
|
r.header.Set("X-Consul-Token", q.Token)
|
||||||
|
}
|
||||||
|
if q.Near != "" {
|
||||||
|
r.params.Set("near", q.Near)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// durToMsec converts a duration to a millisecond specified string
|
// durToMsec converts a duration to a millisecond specified string. If the
|
||||||
|
// user selected a positive value that rounds to 0 ms, then we will use 1 ms
|
||||||
|
// so they get a short delay, otherwise Consul will translate the 0 ms into
|
||||||
|
// a huge default delay.
|
||||||
func durToMsec(dur time.Duration) string {
|
func durToMsec(dur time.Duration) string {
|
||||||
return fmt.Sprintf("%dms", dur/time.Millisecond)
|
ms := dur / time.Millisecond
|
||||||
|
if dur > 0 && ms == 0 {
|
||||||
|
ms = 1
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("%dms", ms)
|
||||||
|
}
|
||||||
|
|
||||||
|
// serverError is a string we look for to detect 500 errors.
|
||||||
|
const serverError = "Unexpected response code: 500"
|
||||||
|
|
||||||
|
// IsServerError returns true for 500 errors from the Consul servers, these are
|
||||||
|
// usually retryable at a later time.
|
||||||
|
func IsServerError(err error) bool {
|
||||||
|
if err == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO (slackpad) - Make a real error type here instead of using
|
||||||
|
// a string check.
|
||||||
|
return strings.Contains(err.Error(), serverError)
|
||||||
}
|
}
|
||||||
|
|
||||||
// setWriteOptions is used to annotate the request with
|
// setWriteOptions is used to annotate the request with
|
||||||
|
@ -265,7 +403,7 @@ func (r *request) setWriteOptions(q *WriteOptions) {
|
||||||
r.params.Set("dc", q.Datacenter)
|
r.params.Set("dc", q.Datacenter)
|
||||||
}
|
}
|
||||||
if q.Token != "" {
|
if q.Token != "" {
|
||||||
r.params.Set("token", q.Token)
|
r.header.Set("X-Consul-Token", q.Token)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -292,6 +430,7 @@ func (r *request) toHTTP() (*http.Request, error) {
|
||||||
req.URL.Host = r.url.Host
|
req.URL.Host = r.url.Host
|
||||||
req.URL.Scheme = r.url.Scheme
|
req.URL.Scheme = r.url.Scheme
|
||||||
req.Host = r.url.Host
|
req.Host = r.url.Host
|
||||||
|
req.Header = r.header
|
||||||
|
|
||||||
// Setup auth
|
// Setup auth
|
||||||
if r.config.HttpAuth != nil {
|
if r.config.HttpAuth != nil {
|
||||||
|
@ -312,6 +451,7 @@ func (c *Client) newRequest(method, path string) *request {
|
||||||
Path: path,
|
Path: path,
|
||||||
},
|
},
|
||||||
params: make(map[string][]string),
|
params: make(map[string][]string),
|
||||||
|
header: make(http.Header),
|
||||||
}
|
}
|
||||||
if c.config.Datacenter != "" {
|
if c.config.Datacenter != "" {
|
||||||
r.params.Set("dc", c.config.Datacenter)
|
r.params.Set("dc", c.config.Datacenter)
|
||||||
|
@ -320,7 +460,7 @@ func (c *Client) newRequest(method, path string) *request {
|
||||||
r.params.Set("wait", durToMsec(r.config.WaitTime))
|
r.params.Set("wait", durToMsec(r.config.WaitTime))
|
||||||
}
|
}
|
||||||
if c.config.Token != "" {
|
if c.config.Token != "" {
|
||||||
r.params.Set("token", r.config.Token)
|
r.header.Set("X-Consul-Token", r.config.Token)
|
||||||
}
|
}
|
||||||
return r
|
return r
|
||||||
}
|
}
|
||||||
|
@ -405,6 +545,15 @@ func parseQueryMeta(resp *http.Response, q *QueryMeta) error {
|
||||||
default:
|
default:
|
||||||
q.KnownLeader = false
|
q.KnownLeader = false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Parse X-Consul-Translate-Addresses
|
||||||
|
switch header.Get("X-Consul-Translate-Addresses") {
|
||||||
|
case "true":
|
||||||
|
q.AddressTranslationEnabled = true
|
||||||
|
default:
|
||||||
|
q.AddressTranslationEnabled = false
|
||||||
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
4
vendor/github.com/hashicorp/consul/api/catalog.go
generated
vendored
4
vendor/github.com/hashicorp/consul/api/catalog.go
generated
vendored
|
@ -3,16 +3,19 @@ package api
|
||||||
type Node struct {
|
type Node struct {
|
||||||
Node string
|
Node string
|
||||||
Address string
|
Address string
|
||||||
|
TaggedAddresses map[string]string
|
||||||
}
|
}
|
||||||
|
|
||||||
type CatalogService struct {
|
type CatalogService struct {
|
||||||
Node string
|
Node string
|
||||||
Address string
|
Address string
|
||||||
|
TaggedAddresses map[string]string
|
||||||
ServiceID string
|
ServiceID string
|
||||||
ServiceName string
|
ServiceName string
|
||||||
ServiceAddress string
|
ServiceAddress string
|
||||||
ServiceTags []string
|
ServiceTags []string
|
||||||
ServicePort int
|
ServicePort int
|
||||||
|
ServiceEnableTagOverride bool
|
||||||
}
|
}
|
||||||
|
|
||||||
type CatalogNode struct {
|
type CatalogNode struct {
|
||||||
|
@ -23,6 +26,7 @@ type CatalogNode struct {
|
||||||
type CatalogRegistration struct {
|
type CatalogRegistration struct {
|
||||||
Node string
|
Node string
|
||||||
Address string
|
Address string
|
||||||
|
TaggedAddresses map[string]string
|
||||||
Datacenter string
|
Datacenter string
|
||||||
Service *AgentService
|
Service *AgentService
|
||||||
Check *AgentCheck
|
Check *AgentCheck
|
||||||
|
|
66
vendor/github.com/hashicorp/consul/api/coordinate.go
generated
vendored
Normal file
66
vendor/github.com/hashicorp/consul/api/coordinate.go
generated
vendored
Normal file
|
@ -0,0 +1,66 @@
|
||||||
|
package api
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/hashicorp/serf/coordinate"
|
||||||
|
)
|
||||||
|
|
||||||
|
// CoordinateEntry represents a node and its associated network coordinate.
|
||||||
|
type CoordinateEntry struct {
|
||||||
|
Node string
|
||||||
|
Coord *coordinate.Coordinate
|
||||||
|
}
|
||||||
|
|
||||||
|
// CoordinateDatacenterMap represents a datacenter and its associated WAN
|
||||||
|
// nodes and their associates coordinates.
|
||||||
|
type CoordinateDatacenterMap struct {
|
||||||
|
Datacenter string
|
||||||
|
Coordinates []CoordinateEntry
|
||||||
|
}
|
||||||
|
|
||||||
|
// Coordinate can be used to query the coordinate endpoints
|
||||||
|
type Coordinate struct {
|
||||||
|
c *Client
|
||||||
|
}
|
||||||
|
|
||||||
|
// Coordinate returns a handle to the coordinate endpoints
|
||||||
|
func (c *Client) Coordinate() *Coordinate {
|
||||||
|
return &Coordinate{c}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Datacenters is used to return the coordinates of all the servers in the WAN
|
||||||
|
// pool.
|
||||||
|
func (c *Coordinate) Datacenters() ([]*CoordinateDatacenterMap, error) {
|
||||||
|
r := c.c.newRequest("GET", "/v1/coordinate/datacenters")
|
||||||
|
_, resp, err := requireOK(c.c.doRequest(r))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
var out []*CoordinateDatacenterMap
|
||||||
|
if err := decodeBody(resp, &out); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return out, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Nodes is used to return the coordinates of all the nodes in the LAN pool.
|
||||||
|
func (c *Coordinate) Nodes(q *QueryOptions) ([]*CoordinateEntry, *QueryMeta, error) {
|
||||||
|
r := c.c.newRequest("GET", "/v1/coordinate/nodes")
|
||||||
|
r.setQueryOptions(q)
|
||||||
|
rtt, resp, err := requireOK(c.c.doRequest(r))
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
qm := &QueryMeta{}
|
||||||
|
parseQueryMeta(resp, qm)
|
||||||
|
qm.RequestTime = rtt
|
||||||
|
|
||||||
|
var out []*CoordinateEntry
|
||||||
|
if err := decodeBody(resp, &out); err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
return out, qm, nil
|
||||||
|
}
|
22
vendor/github.com/hashicorp/consul/api/health.go
generated
vendored
22
vendor/github.com/hashicorp/consul/api/health.go
generated
vendored
|
@ -4,6 +4,15 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// HealthAny is special, and is used as a wild card,
|
||||||
|
// not as a specific state.
|
||||||
|
HealthAny = "any"
|
||||||
|
HealthPassing = "passing"
|
||||||
|
HealthWarning = "warning"
|
||||||
|
HealthCritical = "critical"
|
||||||
|
)
|
||||||
|
|
||||||
// HealthCheck is used to represent a single check
|
// HealthCheck is used to represent a single check
|
||||||
type HealthCheck struct {
|
type HealthCheck struct {
|
||||||
Node string
|
Node string
|
||||||
|
@ -85,7 +94,7 @@ func (h *Health) Service(service, tag string, passingOnly bool, q *QueryOptions)
|
||||||
r.params.Set("tag", tag)
|
r.params.Set("tag", tag)
|
||||||
}
|
}
|
||||||
if passingOnly {
|
if passingOnly {
|
||||||
r.params.Set("passing", "1")
|
r.params.Set(HealthPassing, "1")
|
||||||
}
|
}
|
||||||
rtt, resp, err := requireOK(h.c.doRequest(r))
|
rtt, resp, err := requireOK(h.c.doRequest(r))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -104,15 +113,14 @@ func (h *Health) Service(service, tag string, passingOnly bool, q *QueryOptions)
|
||||||
return out, qm, nil
|
return out, qm, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// State is used to retreive all the checks in a given state.
|
// State is used to retrieve all the checks in a given state.
|
||||||
// The wildcard "any" state can also be used for all checks.
|
// The wildcard "any" state can also be used for all checks.
|
||||||
func (h *Health) State(state string, q *QueryOptions) ([]*HealthCheck, *QueryMeta, error) {
|
func (h *Health) State(state string, q *QueryOptions) ([]*HealthCheck, *QueryMeta, error) {
|
||||||
switch state {
|
switch state {
|
||||||
case "any":
|
case HealthAny:
|
||||||
case "warning":
|
case HealthWarning:
|
||||||
case "critical":
|
case HealthCritical:
|
||||||
case "passing":
|
case HealthPassing:
|
||||||
case "unknown":
|
|
||||||
default:
|
default:
|
||||||
return nil, nil, fmt.Errorf("Unsupported state: %v", state)
|
return nil, nil, fmt.Errorf("Unsupported state: %v", state)
|
||||||
}
|
}
|
||||||
|
|
183
vendor/github.com/hashicorp/consul/api/kv.go
generated
vendored
183
vendor/github.com/hashicorp/consul/api/kv.go
generated
vendored
|
@ -11,18 +11,77 @@ import (
|
||||||
|
|
||||||
// KVPair is used to represent a single K/V entry
|
// KVPair is used to represent a single K/V entry
|
||||||
type KVPair struct {
|
type KVPair struct {
|
||||||
|
// Key is the name of the key. It is also part of the URL path when accessed
|
||||||
|
// via the API.
|
||||||
Key string
|
Key string
|
||||||
|
|
||||||
|
// CreateIndex holds the index corresponding the creation of this KVPair. This
|
||||||
|
// is a read-only field.
|
||||||
CreateIndex uint64
|
CreateIndex uint64
|
||||||
|
|
||||||
|
// ModifyIndex is used for the Check-And-Set operations and can also be fed
|
||||||
|
// back into the WaitIndex of the QueryOptions in order to perform blocking
|
||||||
|
// queries.
|
||||||
ModifyIndex uint64
|
ModifyIndex uint64
|
||||||
|
|
||||||
|
// LockIndex holds the index corresponding to a lock on this key, if any. This
|
||||||
|
// is a read-only field.
|
||||||
LockIndex uint64
|
LockIndex uint64
|
||||||
|
|
||||||
|
// Flags are any user-defined flags on the key. It is up to the implementer
|
||||||
|
// to check these values, since Consul does not treat them specially.
|
||||||
Flags uint64
|
Flags uint64
|
||||||
|
|
||||||
|
// Value is the value for the key. This can be any value, but it will be
|
||||||
|
// base64 encoded upon transport.
|
||||||
Value []byte
|
Value []byte
|
||||||
|
|
||||||
|
// Session is a string representing the ID of the session. Any other
|
||||||
|
// interactions with this key over the same session must specify the same
|
||||||
|
// session ID.
|
||||||
Session string
|
Session string
|
||||||
}
|
}
|
||||||
|
|
||||||
// KVPairs is a list of KVPair objects
|
// KVPairs is a list of KVPair objects
|
||||||
type KVPairs []*KVPair
|
type KVPairs []*KVPair
|
||||||
|
|
||||||
|
// KVOp constants give possible operations available in a KVTxn.
|
||||||
|
type KVOp string
|
||||||
|
|
||||||
|
const (
|
||||||
|
KVSet KVOp = "set"
|
||||||
|
KVDelete = "delete"
|
||||||
|
KVDeleteCAS = "delete-cas"
|
||||||
|
KVDeleteTree = "delete-tree"
|
||||||
|
KVCAS = "cas"
|
||||||
|
KVLock = "lock"
|
||||||
|
KVUnlock = "unlock"
|
||||||
|
KVGet = "get"
|
||||||
|
KVGetTree = "get-tree"
|
||||||
|
KVCheckSession = "check-session"
|
||||||
|
KVCheckIndex = "check-index"
|
||||||
|
)
|
||||||
|
|
||||||
|
// KVTxnOp defines a single operation inside a transaction.
|
||||||
|
type KVTxnOp struct {
|
||||||
|
Verb string
|
||||||
|
Key string
|
||||||
|
Value []byte
|
||||||
|
Flags uint64
|
||||||
|
Index uint64
|
||||||
|
Session string
|
||||||
|
}
|
||||||
|
|
||||||
|
// KVTxnOps defines a set of operations to be performed inside a single
|
||||||
|
// transaction.
|
||||||
|
type KVTxnOps []*KVTxnOp
|
||||||
|
|
||||||
|
// KVTxnResponse has the outcome of a transaction.
|
||||||
|
type KVTxnResponse struct {
|
||||||
|
Results []*KVPair
|
||||||
|
Errors TxnErrors
|
||||||
|
}
|
||||||
|
|
||||||
// KV is used to manipulate the K/V API
|
// KV is used to manipulate the K/V API
|
||||||
type KV struct {
|
type KV struct {
|
||||||
c *Client
|
c *Client
|
||||||
|
@ -33,7 +92,8 @@ func (c *Client) KV() *KV {
|
||||||
return &KV{c}
|
return &KV{c}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get is used to lookup a single key
|
// Get is used to lookup a single key. The returned pointer
|
||||||
|
// to the KVPair will be nil if the key does not exist.
|
||||||
func (k *KV) Get(key string, q *QueryOptions) (*KVPair, *QueryMeta, error) {
|
func (k *KV) Get(key string, q *QueryOptions) (*KVPair, *QueryMeta, error) {
|
||||||
resp, qm, err := k.getInternal(key, nil, q)
|
resp, qm, err := k.getInternal(key, nil, q)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -143,7 +203,7 @@ func (k *KV) CAS(p *KVPair, q *WriteOptions) (bool, *WriteMeta, error) {
|
||||||
return k.put(p.Key, params, p.Value, q)
|
return k.put(p.Key, params, p.Value, q)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Acquire is used for a lock acquisiiton operation. The Key,
|
// Acquire is used for a lock acquisition operation. The Key,
|
||||||
// Flags, Value and Session are respected. Returns true
|
// Flags, Value and Session are respected. Returns true
|
||||||
// on success or false on failures.
|
// on success or false on failures.
|
||||||
func (k *KV) Acquire(p *KVPair, q *WriteOptions) (bool, *WriteMeta, error) {
|
func (k *KV) Acquire(p *KVPair, q *WriteOptions) (bool, *WriteMeta, error) {
|
||||||
|
@ -238,3 +298,122 @@ func (k *KV) deleteInternal(key string, params map[string]string, q *WriteOption
|
||||||
res := strings.Contains(string(buf.Bytes()), "true")
|
res := strings.Contains(string(buf.Bytes()), "true")
|
||||||
return res, qm, nil
|
return res, qm, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TxnOp is the internal format we send to Consul. It's not specific to KV,
|
||||||
|
// though currently only KV operations are supported.
|
||||||
|
type TxnOp struct {
|
||||||
|
KV *KVTxnOp
|
||||||
|
}
|
||||||
|
|
||||||
|
// TxnOps is a list of transaction operations.
|
||||||
|
type TxnOps []*TxnOp
|
||||||
|
|
||||||
|
// TxnResult is the internal format we receive from Consul.
|
||||||
|
type TxnResult struct {
|
||||||
|
KV *KVPair
|
||||||
|
}
|
||||||
|
|
||||||
|
// TxnResults is a list of TxnResult objects.
|
||||||
|
type TxnResults []*TxnResult
|
||||||
|
|
||||||
|
// TxnError is used to return information about an operation in a transaction.
|
||||||
|
type TxnError struct {
|
||||||
|
OpIndex int
|
||||||
|
What string
|
||||||
|
}
|
||||||
|
|
||||||
|
// TxnErrors is a list of TxnError objects.
|
||||||
|
type TxnErrors []*TxnError
|
||||||
|
|
||||||
|
// TxnResponse is the internal format we receive from Consul.
|
||||||
|
type TxnResponse struct {
|
||||||
|
Results TxnResults
|
||||||
|
Errors TxnErrors
|
||||||
|
}
|
||||||
|
|
||||||
|
// Txn is used to apply multiple KV operations in a single, atomic transaction.
|
||||||
|
//
|
||||||
|
// Note that Go will perform the required base64 encoding on the values
|
||||||
|
// automatically because the type is a byte slice. Transactions are defined as a
|
||||||
|
// list of operations to perform, using the KVOp constants and KVTxnOp structure
|
||||||
|
// to define operations. If any operation fails, none of the changes are applied
|
||||||
|
// to the state store. Note that this hides the internal raw transaction interface
|
||||||
|
// and munges the input and output types into KV-specific ones for ease of use.
|
||||||
|
// If there are more non-KV operations in the future we may break out a new
|
||||||
|
// transaction API client, but it will be easy to keep this KV-specific variant
|
||||||
|
// supported.
|
||||||
|
//
|
||||||
|
// Even though this is generally a write operation, we take a QueryOptions input
|
||||||
|
// and return a QueryMeta output. If the transaction contains only read ops, then
|
||||||
|
// Consul will fast-path it to a different endpoint internally which supports
|
||||||
|
// consistency controls, but not blocking. If there are write operations then
|
||||||
|
// the request will always be routed through raft and any consistency settings
|
||||||
|
// will be ignored.
|
||||||
|
//
|
||||||
|
// Here's an example:
|
||||||
|
//
|
||||||
|
// ops := KVTxnOps{
|
||||||
|
// &KVTxnOp{
|
||||||
|
// Verb: KVLock,
|
||||||
|
// Key: "test/lock",
|
||||||
|
// Session: "adf4238a-882b-9ddc-4a9d-5b6758e4159e",
|
||||||
|
// Value: []byte("hello"),
|
||||||
|
// },
|
||||||
|
// &KVTxnOp{
|
||||||
|
// Verb: KVGet,
|
||||||
|
// Key: "another/key",
|
||||||
|
// },
|
||||||
|
// }
|
||||||
|
// ok, response, _, err := kv.Txn(&ops, nil)
|
||||||
|
//
|
||||||
|
// If there is a problem making the transaction request then an error will be
|
||||||
|
// returned. Otherwise, the ok value will be true if the transaction succeeded
|
||||||
|
// or false if it was rolled back. The response is a structured return value which
|
||||||
|
// will have the outcome of the transaction. Its Results member will have entries
|
||||||
|
// for each operation. Deleted keys will have a nil entry in the, and to save
|
||||||
|
// space, the Value of each key in the Results will be nil unless the operation
|
||||||
|
// is a KVGet. If the transaction was rolled back, the Errors member will have
|
||||||
|
// entries referencing the index of the operation that failed along with an error
|
||||||
|
// message.
|
||||||
|
func (k *KV) Txn(txn KVTxnOps, q *QueryOptions) (bool, *KVTxnResponse, *QueryMeta, error) {
|
||||||
|
r := k.c.newRequest("PUT", "/v1/txn")
|
||||||
|
r.setQueryOptions(q)
|
||||||
|
|
||||||
|
// Convert into the internal format since this is an all-KV txn.
|
||||||
|
ops := make(TxnOps, 0, len(txn))
|
||||||
|
for _, kvOp := range txn {
|
||||||
|
ops = append(ops, &TxnOp{KV: kvOp})
|
||||||
|
}
|
||||||
|
r.obj = ops
|
||||||
|
rtt, resp, err := k.c.doRequest(r)
|
||||||
|
if err != nil {
|
||||||
|
return false, nil, nil, err
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
qm := &QueryMeta{}
|
||||||
|
parseQueryMeta(resp, qm)
|
||||||
|
qm.RequestTime = rtt
|
||||||
|
|
||||||
|
if resp.StatusCode == http.StatusOK || resp.StatusCode == http.StatusConflict {
|
||||||
|
var txnResp TxnResponse
|
||||||
|
if err := decodeBody(resp, &txnResp); err != nil {
|
||||||
|
return false, nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Convert from the internal format.
|
||||||
|
kvResp := KVTxnResponse{
|
||||||
|
Errors: txnResp.Errors,
|
||||||
|
}
|
||||||
|
for _, result := range txnResp.Results {
|
||||||
|
kvResp.Results = append(kvResp.Results, result.KV)
|
||||||
|
}
|
||||||
|
return resp.StatusCode == http.StatusOK, &kvResp, qm, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var buf bytes.Buffer
|
||||||
|
if _, err := io.Copy(&buf, resp.Body); err != nil {
|
||||||
|
return false, nil, nil, fmt.Errorf("Failed to read response: %v", err)
|
||||||
|
}
|
||||||
|
return false, nil, nil, fmt.Errorf("Failed request: %s", buf.String())
|
||||||
|
}
|
||||||
|
|
58
vendor/github.com/hashicorp/consul/api/lock.go
generated
vendored
58
vendor/github.com/hashicorp/consul/api/lock.go
generated
vendored
|
@ -22,9 +22,16 @@ const (
|
||||||
|
|
||||||
// DefaultLockRetryTime is how long we wait after a failed lock acquisition
|
// DefaultLockRetryTime is how long we wait after a failed lock acquisition
|
||||||
// before attempting to do the lock again. This is so that once a lock-delay
|
// before attempting to do the lock again. This is so that once a lock-delay
|
||||||
// is in affect, we do not hot loop retrying the acquisition.
|
// is in effect, we do not hot loop retrying the acquisition.
|
||||||
DefaultLockRetryTime = 5 * time.Second
|
DefaultLockRetryTime = 5 * time.Second
|
||||||
|
|
||||||
|
// DefaultMonitorRetryTime is how long we wait after a failed monitor check
|
||||||
|
// of a lock (500 response code). This allows the monitor to ride out brief
|
||||||
|
// periods of unavailability, subject to the MonitorRetries setting in the
|
||||||
|
// lock options which is by default set to 0, disabling this feature. This
|
||||||
|
// affects locks and semaphores.
|
||||||
|
DefaultMonitorRetryTime = 2 * time.Second
|
||||||
|
|
||||||
// LockFlagValue is a magic flag we set to indicate a key
|
// LockFlagValue is a magic flag we set to indicate a key
|
||||||
// is being used for a lock. It is used to detect a potential
|
// is being used for a lock. It is used to detect a potential
|
||||||
// conflict with a semaphore.
|
// conflict with a semaphore.
|
||||||
|
@ -49,7 +56,7 @@ var (
|
||||||
)
|
)
|
||||||
|
|
||||||
// Lock is used to implement client-side leader election. It is follows the
|
// Lock is used to implement client-side leader election. It is follows the
|
||||||
// algorithm as described here: https://consul.io/docs/guides/leader-election.html.
|
// algorithm as described here: https://www.consul.io/docs/guides/leader-election.html.
|
||||||
type Lock struct {
|
type Lock struct {
|
||||||
c *Client
|
c *Client
|
||||||
opts *LockOptions
|
opts *LockOptions
|
||||||
|
@ -65,8 +72,13 @@ type LockOptions struct {
|
||||||
Key string // Must be set and have write permissions
|
Key string // Must be set and have write permissions
|
||||||
Value []byte // Optional, value to associate with the lock
|
Value []byte // Optional, value to associate with the lock
|
||||||
Session string // Optional, created if not specified
|
Session string // Optional, created if not specified
|
||||||
SessionName string // Optional, defaults to DefaultLockSessionName
|
SessionOpts *SessionEntry // Optional, options to use when creating a session
|
||||||
SessionTTL string // Optional, defaults to DefaultLockSessionTTL
|
SessionName string // Optional, defaults to DefaultLockSessionName (ignored if SessionOpts is given)
|
||||||
|
SessionTTL string // Optional, defaults to DefaultLockSessionTTL (ignored if SessionOpts is given)
|
||||||
|
MonitorRetries int // Optional, defaults to 0 which means no retries
|
||||||
|
MonitorRetryTime time.Duration // Optional, defaults to DefaultMonitorRetryTime
|
||||||
|
LockWaitTime time.Duration // Optional, defaults to DefaultLockWaitTime
|
||||||
|
LockTryOnce bool // Optional, defaults to false which means try forever
|
||||||
}
|
}
|
||||||
|
|
||||||
// LockKey returns a handle to a lock struct which can be used
|
// LockKey returns a handle to a lock struct which can be used
|
||||||
|
@ -96,6 +108,12 @@ func (c *Client) LockOpts(opts *LockOptions) (*Lock, error) {
|
||||||
return nil, fmt.Errorf("invalid SessionTTL: %v", err)
|
return nil, fmt.Errorf("invalid SessionTTL: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if opts.MonitorRetryTime == 0 {
|
||||||
|
opts.MonitorRetryTime = DefaultMonitorRetryTime
|
||||||
|
}
|
||||||
|
if opts.LockWaitTime == 0 {
|
||||||
|
opts.LockWaitTime = DefaultLockWaitTime
|
||||||
|
}
|
||||||
l := &Lock{
|
l := &Lock{
|
||||||
c: c,
|
c: c,
|
||||||
opts: opts,
|
opts: opts,
|
||||||
|
@ -146,9 +164,11 @@ func (l *Lock) Lock(stopCh <-chan struct{}) (<-chan struct{}, error) {
|
||||||
// Setup the query options
|
// Setup the query options
|
||||||
kv := l.c.KV()
|
kv := l.c.KV()
|
||||||
qOpts := &QueryOptions{
|
qOpts := &QueryOptions{
|
||||||
WaitTime: DefaultLockWaitTime,
|
WaitTime: l.opts.LockWaitTime,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
start := time.Now()
|
||||||
|
attempts := 0
|
||||||
WAIT:
|
WAIT:
|
||||||
// Check if we should quit
|
// Check if we should quit
|
||||||
select {
|
select {
|
||||||
|
@ -157,6 +177,17 @@ WAIT:
|
||||||
default:
|
default:
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Handle the one-shot mode.
|
||||||
|
if l.opts.LockTryOnce && attempts > 0 {
|
||||||
|
elapsed := time.Now().Sub(start)
|
||||||
|
if elapsed > qOpts.WaitTime {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
qOpts.WaitTime -= elapsed
|
||||||
|
}
|
||||||
|
attempts++
|
||||||
|
|
||||||
// Look for an existing lock, blocking until not taken
|
// Look for an existing lock, blocking until not taken
|
||||||
pair, meta, err := kv.Get(l.opts.Key, qOpts)
|
pair, meta, err := kv.Get(l.opts.Key, qOpts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -299,10 +330,13 @@ func (l *Lock) Destroy() error {
|
||||||
// createSession is used to create a new managed session
|
// createSession is used to create a new managed session
|
||||||
func (l *Lock) createSession() (string, error) {
|
func (l *Lock) createSession() (string, error) {
|
||||||
session := l.c.Session()
|
session := l.c.Session()
|
||||||
se := &SessionEntry{
|
se := l.opts.SessionOpts
|
||||||
|
if se == nil {
|
||||||
|
se = &SessionEntry{
|
||||||
Name: l.opts.SessionName,
|
Name: l.opts.SessionName,
|
||||||
TTL: l.opts.SessionTTL,
|
TTL: l.opts.SessionTTL,
|
||||||
}
|
}
|
||||||
|
}
|
||||||
id, _, err := session.Create(se, nil)
|
id, _, err := session.Create(se, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
|
@ -327,8 +361,20 @@ func (l *Lock) monitorLock(session string, stopCh chan struct{}) {
|
||||||
kv := l.c.KV()
|
kv := l.c.KV()
|
||||||
opts := &QueryOptions{RequireConsistent: true}
|
opts := &QueryOptions{RequireConsistent: true}
|
||||||
WAIT:
|
WAIT:
|
||||||
|
retries := l.opts.MonitorRetries
|
||||||
|
RETRY:
|
||||||
pair, meta, err := kv.Get(l.opts.Key, opts)
|
pair, meta, err := kv.Get(l.opts.Key, opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
// If configured we can try to ride out a brief Consul unavailability
|
||||||
|
// by doing retries. Note that we have to attempt the retry in a non-
|
||||||
|
// blocking fashion so that we have a clean place to reset the retry
|
||||||
|
// counter if service is restored.
|
||||||
|
if retries > 0 && IsServerError(err) {
|
||||||
|
time.Sleep(l.opts.MonitorRetryTime)
|
||||||
|
retries--
|
||||||
|
opts.WaitIndex = 0
|
||||||
|
goto RETRY
|
||||||
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if pair != nil && pair.Session == session {
|
if pair != nil && pair.Session == session {
|
||||||
|
|
81
vendor/github.com/hashicorp/consul/api/operator.go
generated
vendored
Normal file
81
vendor/github.com/hashicorp/consul/api/operator.go
generated
vendored
Normal file
|
@ -0,0 +1,81 @@
|
||||||
|
package api
|
||||||
|
|
||||||
|
// Operator can be used to perform low-level operator tasks for Consul.
|
||||||
|
type Operator struct {
|
||||||
|
c *Client
|
||||||
|
}
|
||||||
|
|
||||||
|
// Operator returns a handle to the operator endpoints.
|
||||||
|
func (c *Client) Operator() *Operator {
|
||||||
|
return &Operator{c}
|
||||||
|
}
|
||||||
|
|
||||||
|
// RaftServer has information about a server in the Raft configuration.
|
||||||
|
type RaftServer struct {
|
||||||
|
// ID is the unique ID for the server. These are currently the same
|
||||||
|
// as the address, but they will be changed to a real GUID in a future
|
||||||
|
// release of Consul.
|
||||||
|
ID string
|
||||||
|
|
||||||
|
// Node is the node name of the server, as known by Consul, or this
|
||||||
|
// will be set to "(unknown)" otherwise.
|
||||||
|
Node string
|
||||||
|
|
||||||
|
// Address is the IP:port of the server, used for Raft communications.
|
||||||
|
Address string
|
||||||
|
|
||||||
|
// Leader is true if this server is the current cluster leader.
|
||||||
|
Leader bool
|
||||||
|
|
||||||
|
// Voter is true if this server has a vote in the cluster. This might
|
||||||
|
// be false if the server is staging and still coming online, or if
|
||||||
|
// it's a non-voting server, which will be added in a future release of
|
||||||
|
// Consul.
|
||||||
|
Voter bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// RaftConfigration is returned when querying for the current Raft configuration.
|
||||||
|
type RaftConfiguration struct {
|
||||||
|
// Servers has the list of servers in the Raft configuration.
|
||||||
|
Servers []*RaftServer
|
||||||
|
|
||||||
|
// Index has the Raft index of this configuration.
|
||||||
|
Index uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
// RaftGetConfiguration is used to query the current Raft peer set.
|
||||||
|
func (op *Operator) RaftGetConfiguration(q *QueryOptions) (*RaftConfiguration, error) {
|
||||||
|
r := op.c.newRequest("GET", "/v1/operator/raft/configuration")
|
||||||
|
r.setQueryOptions(q)
|
||||||
|
_, resp, err := requireOK(op.c.doRequest(r))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
var out RaftConfiguration
|
||||||
|
if err := decodeBody(resp, &out); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &out, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// RaftRemovePeerByAddress is used to kick a stale peer (one that it in the Raft
|
||||||
|
// quorum but no longer known to Serf or the catalog) by address in the form of
|
||||||
|
// "IP:port".
|
||||||
|
func (op *Operator) RaftRemovePeerByAddress(address string, q *WriteOptions) error {
|
||||||
|
r := op.c.newRequest("DELETE", "/v1/operator/raft/peer")
|
||||||
|
r.setWriteOptions(q)
|
||||||
|
|
||||||
|
// TODO (slackpad) Currently we made address a query parameter. Once
|
||||||
|
// IDs are in place this will be DELETE /v1/operator/raft/peer/<id>.
|
||||||
|
r.params.Set("address", string(address))
|
||||||
|
|
||||||
|
_, resp, err := requireOK(op.c.doRequest(r))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
resp.Body.Close()
|
||||||
|
return nil
|
||||||
|
}
|
194
vendor/github.com/hashicorp/consul/api/prepared_query.go
generated
vendored
Normal file
194
vendor/github.com/hashicorp/consul/api/prepared_query.go
generated
vendored
Normal file
|
@ -0,0 +1,194 @@
|
||||||
|
package api
|
||||||
|
|
||||||
|
// QueryDatacenterOptions sets options about how we fail over if there are no
|
||||||
|
// healthy nodes in the local datacenter.
|
||||||
|
type QueryDatacenterOptions struct {
|
||||||
|
// NearestN is set to the number of remote datacenters to try, based on
|
||||||
|
// network coordinates.
|
||||||
|
NearestN int
|
||||||
|
|
||||||
|
// Datacenters is a fixed list of datacenters to try after NearestN. We
|
||||||
|
// never try a datacenter multiple times, so those are subtracted from
|
||||||
|
// this list before proceeding.
|
||||||
|
Datacenters []string
|
||||||
|
}
|
||||||
|
|
||||||
|
// QueryDNSOptions controls settings when query results are served over DNS.
|
||||||
|
type QueryDNSOptions struct {
|
||||||
|
// TTL is the time to live for the served DNS results.
|
||||||
|
TTL string
|
||||||
|
}
|
||||||
|
|
||||||
|
// ServiceQuery is used to query for a set of healthy nodes offering a specific
|
||||||
|
// service.
|
||||||
|
type ServiceQuery struct {
|
||||||
|
// Service is the service to query.
|
||||||
|
Service string
|
||||||
|
|
||||||
|
// Near allows baking in the name of a node to automatically distance-
|
||||||
|
// sort from. The magic "_agent" value is supported, which sorts near
|
||||||
|
// the agent which initiated the request by default.
|
||||||
|
Near string
|
||||||
|
|
||||||
|
// Failover controls what we do if there are no healthy nodes in the
|
||||||
|
// local datacenter.
|
||||||
|
Failover QueryDatacenterOptions
|
||||||
|
|
||||||
|
// If OnlyPassing is true then we will only include nodes with passing
|
||||||
|
// health checks (critical AND warning checks will cause a node to be
|
||||||
|
// discarded)
|
||||||
|
OnlyPassing bool
|
||||||
|
|
||||||
|
// Tags are a set of required and/or disallowed tags. If a tag is in
|
||||||
|
// this list it must be present. If the tag is preceded with "!" then
|
||||||
|
// it is disallowed.
|
||||||
|
Tags []string
|
||||||
|
}
|
||||||
|
|
||||||
|
// QueryTemplate carries the arguments for creating a templated query.
|
||||||
|
type QueryTemplate struct {
|
||||||
|
// Type specifies the type of the query template. Currently only
|
||||||
|
// "name_prefix_match" is supported. This field is required.
|
||||||
|
Type string
|
||||||
|
|
||||||
|
// Regexp allows specifying a regex pattern to match against the name
|
||||||
|
// of the query being executed.
|
||||||
|
Regexp string
|
||||||
|
}
|
||||||
|
|
||||||
|
// PrepatedQueryDefinition defines a complete prepared query.
|
||||||
|
type PreparedQueryDefinition struct {
|
||||||
|
// ID is this UUID-based ID for the query, always generated by Consul.
|
||||||
|
ID string
|
||||||
|
|
||||||
|
// Name is an optional friendly name for the query supplied by the
|
||||||
|
// user. NOTE - if this feature is used then it will reduce the security
|
||||||
|
// of any read ACL associated with this query/service since this name
|
||||||
|
// can be used to locate nodes with supplying any ACL.
|
||||||
|
Name string
|
||||||
|
|
||||||
|
// Session is an optional session to tie this query's lifetime to. If
|
||||||
|
// this is omitted then the query will not expire.
|
||||||
|
Session string
|
||||||
|
|
||||||
|
// Token is the ACL token used when the query was created, and it is
|
||||||
|
// used when a query is subsequently executed. This token, or a token
|
||||||
|
// with management privileges, must be used to change the query later.
|
||||||
|
Token string
|
||||||
|
|
||||||
|
// Service defines a service query (leaving things open for other types
|
||||||
|
// later).
|
||||||
|
Service ServiceQuery
|
||||||
|
|
||||||
|
// DNS has options that control how the results of this query are
|
||||||
|
// served over DNS.
|
||||||
|
DNS QueryDNSOptions
|
||||||
|
|
||||||
|
// Template is used to pass through the arguments for creating a
|
||||||
|
// prepared query with an attached template. If a template is given,
|
||||||
|
// interpolations are possible in other struct fields.
|
||||||
|
Template QueryTemplate
|
||||||
|
}
|
||||||
|
|
||||||
|
// PreparedQueryExecuteResponse has the results of executing a query.
|
||||||
|
type PreparedQueryExecuteResponse struct {
|
||||||
|
// Service is the service that was queried.
|
||||||
|
Service string
|
||||||
|
|
||||||
|
// Nodes has the nodes that were output by the query.
|
||||||
|
Nodes []ServiceEntry
|
||||||
|
|
||||||
|
// DNS has the options for serving these results over DNS.
|
||||||
|
DNS QueryDNSOptions
|
||||||
|
|
||||||
|
// Datacenter is the datacenter that these results came from.
|
||||||
|
Datacenter string
|
||||||
|
|
||||||
|
// Failovers is a count of how many times we had to query a remote
|
||||||
|
// datacenter.
|
||||||
|
Failovers int
|
||||||
|
}
|
||||||
|
|
||||||
|
// PreparedQuery can be used to query the prepared query endpoints.
|
||||||
|
type PreparedQuery struct {
|
||||||
|
c *Client
|
||||||
|
}
|
||||||
|
|
||||||
|
// PreparedQuery returns a handle to the prepared query endpoints.
|
||||||
|
func (c *Client) PreparedQuery() *PreparedQuery {
|
||||||
|
return &PreparedQuery{c}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create makes a new prepared query. The ID of the new query is returned.
|
||||||
|
func (c *PreparedQuery) Create(query *PreparedQueryDefinition, q *WriteOptions) (string, *WriteMeta, error) {
|
||||||
|
r := c.c.newRequest("POST", "/v1/query")
|
||||||
|
r.setWriteOptions(q)
|
||||||
|
r.obj = query
|
||||||
|
rtt, resp, err := requireOK(c.c.doRequest(r))
|
||||||
|
if err != nil {
|
||||||
|
return "", nil, err
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
wm := &WriteMeta{}
|
||||||
|
wm.RequestTime = rtt
|
||||||
|
|
||||||
|
var out struct{ ID string }
|
||||||
|
if err := decodeBody(resp, &out); err != nil {
|
||||||
|
return "", nil, err
|
||||||
|
}
|
||||||
|
return out.ID, wm, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update makes updates to an existing prepared query.
|
||||||
|
func (c *PreparedQuery) Update(query *PreparedQueryDefinition, q *WriteOptions) (*WriteMeta, error) {
|
||||||
|
return c.c.write("/v1/query/"+query.ID, query, nil, q)
|
||||||
|
}
|
||||||
|
|
||||||
|
// List is used to fetch all the prepared queries (always requires a management
|
||||||
|
// token).
|
||||||
|
func (c *PreparedQuery) List(q *QueryOptions) ([]*PreparedQueryDefinition, *QueryMeta, error) {
|
||||||
|
var out []*PreparedQueryDefinition
|
||||||
|
qm, err := c.c.query("/v1/query", &out, q)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
return out, qm, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get is used to fetch a specific prepared query.
|
||||||
|
func (c *PreparedQuery) Get(queryID string, q *QueryOptions) ([]*PreparedQueryDefinition, *QueryMeta, error) {
|
||||||
|
var out []*PreparedQueryDefinition
|
||||||
|
qm, err := c.c.query("/v1/query/"+queryID, &out, q)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
return out, qm, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Delete is used to delete a specific prepared query.
|
||||||
|
func (c *PreparedQuery) Delete(queryID string, q *QueryOptions) (*QueryMeta, error) {
|
||||||
|
r := c.c.newRequest("DELETE", "/v1/query/"+queryID)
|
||||||
|
r.setQueryOptions(q)
|
||||||
|
rtt, resp, err := requireOK(c.c.doRequest(r))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
qm := &QueryMeta{}
|
||||||
|
parseQueryMeta(resp, qm)
|
||||||
|
qm.RequestTime = rtt
|
||||||
|
return qm, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Execute is used to execute a specific prepared query. You can execute using
|
||||||
|
// a query ID or name.
|
||||||
|
func (c *PreparedQuery) Execute(queryIDOrName string, q *QueryOptions) (*PreparedQueryExecuteResponse, *QueryMeta, error) {
|
||||||
|
var out *PreparedQueryExecuteResponse
|
||||||
|
qm, err := c.c.query("/v1/query/"+queryIDOrName+"/execute", &out, q)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
return out, qm, nil
|
||||||
|
}
|
41
vendor/github.com/hashicorp/consul/api/semaphore.go
generated
vendored
41
vendor/github.com/hashicorp/consul/api/semaphore.go
generated
vendored
|
@ -66,9 +66,13 @@ type SemaphoreOptions struct {
|
||||||
Prefix string // Must be set and have write permissions
|
Prefix string // Must be set and have write permissions
|
||||||
Limit int // Must be set, and be positive
|
Limit int // Must be set, and be positive
|
||||||
Value []byte // Optional, value to associate with the contender entry
|
Value []byte // Optional, value to associate with the contender entry
|
||||||
Session string // OPtional, created if not specified
|
Session string // Optional, created if not specified
|
||||||
SessionName string // Optional, defaults to DefaultLockSessionName
|
SessionName string // Optional, defaults to DefaultLockSessionName
|
||||||
SessionTTL string // Optional, defaults to DefaultLockSessionTTL
|
SessionTTL string // Optional, defaults to DefaultLockSessionTTL
|
||||||
|
MonitorRetries int // Optional, defaults to 0 which means no retries
|
||||||
|
MonitorRetryTime time.Duration // Optional, defaults to DefaultMonitorRetryTime
|
||||||
|
SemaphoreWaitTime time.Duration // Optional, defaults to DefaultSemaphoreWaitTime
|
||||||
|
SemaphoreTryOnce bool // Optional, defaults to false which means try forever
|
||||||
}
|
}
|
||||||
|
|
||||||
// semaphoreLock is written under the DefaultSemaphoreKey and
|
// semaphoreLock is written under the DefaultSemaphoreKey and
|
||||||
|
@ -115,6 +119,12 @@ func (c *Client) SemaphoreOpts(opts *SemaphoreOptions) (*Semaphore, error) {
|
||||||
return nil, fmt.Errorf("invalid SessionTTL: %v", err)
|
return nil, fmt.Errorf("invalid SessionTTL: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if opts.MonitorRetryTime == 0 {
|
||||||
|
opts.MonitorRetryTime = DefaultMonitorRetryTime
|
||||||
|
}
|
||||||
|
if opts.SemaphoreWaitTime == 0 {
|
||||||
|
opts.SemaphoreWaitTime = DefaultSemaphoreWaitTime
|
||||||
|
}
|
||||||
s := &Semaphore{
|
s := &Semaphore{
|
||||||
c: c,
|
c: c,
|
||||||
opts: opts,
|
opts: opts,
|
||||||
|
@ -123,7 +133,7 @@ func (c *Client) SemaphoreOpts(opts *SemaphoreOptions) (*Semaphore, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Acquire attempts to reserve a slot in the semaphore, blocking until
|
// Acquire attempts to reserve a slot in the semaphore, blocking until
|
||||||
// success, interrupted via the stopCh or an error is encounted.
|
// success, interrupted via the stopCh or an error is encountered.
|
||||||
// Providing a non-nil stopCh can be used to abort the attempt.
|
// Providing a non-nil stopCh can be used to abort the attempt.
|
||||||
// On success, a channel is returned that represents our slot.
|
// On success, a channel is returned that represents our slot.
|
||||||
// This channel could be closed at any time due to session invalidation,
|
// This channel could be closed at any time due to session invalidation,
|
||||||
|
@ -172,9 +182,11 @@ func (s *Semaphore) Acquire(stopCh <-chan struct{}) (<-chan struct{}, error) {
|
||||||
|
|
||||||
// Setup the query options
|
// Setup the query options
|
||||||
qOpts := &QueryOptions{
|
qOpts := &QueryOptions{
|
||||||
WaitTime: DefaultSemaphoreWaitTime,
|
WaitTime: s.opts.SemaphoreWaitTime,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
start := time.Now()
|
||||||
|
attempts := 0
|
||||||
WAIT:
|
WAIT:
|
||||||
// Check if we should quit
|
// Check if we should quit
|
||||||
select {
|
select {
|
||||||
|
@ -183,6 +195,17 @@ WAIT:
|
||||||
default:
|
default:
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Handle the one-shot mode.
|
||||||
|
if s.opts.SemaphoreTryOnce && attempts > 0 {
|
||||||
|
elapsed := time.Now().Sub(start)
|
||||||
|
if elapsed > qOpts.WaitTime {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
qOpts.WaitTime -= elapsed
|
||||||
|
}
|
||||||
|
attempts++
|
||||||
|
|
||||||
// Read the prefix
|
// Read the prefix
|
||||||
pairs, meta, err := kv.List(s.opts.Prefix, qOpts)
|
pairs, meta, err := kv.List(s.opts.Prefix, qOpts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -460,8 +483,20 @@ func (s *Semaphore) monitorLock(session string, stopCh chan struct{}) {
|
||||||
kv := s.c.KV()
|
kv := s.c.KV()
|
||||||
opts := &QueryOptions{RequireConsistent: true}
|
opts := &QueryOptions{RequireConsistent: true}
|
||||||
WAIT:
|
WAIT:
|
||||||
|
retries := s.opts.MonitorRetries
|
||||||
|
RETRY:
|
||||||
pairs, meta, err := kv.List(s.opts.Prefix, opts)
|
pairs, meta, err := kv.List(s.opts.Prefix, opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
// If configured we can try to ride out a brief Consul unavailability
|
||||||
|
// by doing retries. Note that we have to attempt the retry in a non-
|
||||||
|
// blocking fashion so that we have a clean place to reset the retry
|
||||||
|
// counter if service is restored.
|
||||||
|
if retries > 0 && IsServerError(err) {
|
||||||
|
time.Sleep(s.opts.MonitorRetryTime)
|
||||||
|
retries--
|
||||||
|
opts.WaitIndex = 0
|
||||||
|
goto RETRY
|
||||||
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
lockPair := s.findLock(pairs)
|
lockPair := s.findLock(pairs)
|
||||||
|
|
28
vendor/github.com/hashicorp/consul/api/session.go
generated
vendored
28
vendor/github.com/hashicorp/consul/api/session.go
generated
vendored
|
@ -1,6 +1,7 @@
|
||||||
package api
|
package api
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"time"
|
"time"
|
||||||
)
|
)
|
||||||
|
@ -16,6 +17,8 @@ const (
|
||||||
SessionBehaviorDelete = "delete"
|
SessionBehaviorDelete = "delete"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
var ErrSessionExpired = errors.New("session expired")
|
||||||
|
|
||||||
// SessionEntry represents a session in consul
|
// SessionEntry represents a session in consul
|
||||||
type SessionEntry struct {
|
type SessionEntry struct {
|
||||||
CreateIndex uint64
|
CreateIndex uint64
|
||||||
|
@ -102,7 +105,7 @@ func (s *Session) create(obj interface{}, q *WriteOptions) (string, *WriteMeta,
|
||||||
return out.ID, wm, nil
|
return out.ID, wm, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Destroy invalides a given session
|
// Destroy invalidates a given session
|
||||||
func (s *Session) Destroy(id string, q *WriteOptions) (*WriteMeta, error) {
|
func (s *Session) Destroy(id string, q *WriteOptions) (*WriteMeta, error) {
|
||||||
wm, err := s.c.write("/v1/session/destroy/"+id, nil, nil, q)
|
wm, err := s.c.write("/v1/session/destroy/"+id, nil, nil, q)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -113,11 +116,26 @@ func (s *Session) Destroy(id string, q *WriteOptions) (*WriteMeta, error) {
|
||||||
|
|
||||||
// Renew renews the TTL on a given session
|
// Renew renews the TTL on a given session
|
||||||
func (s *Session) Renew(id string, q *WriteOptions) (*SessionEntry, *WriteMeta, error) {
|
func (s *Session) Renew(id string, q *WriteOptions) (*SessionEntry, *WriteMeta, error) {
|
||||||
var entries []*SessionEntry
|
r := s.c.newRequest("PUT", "/v1/session/renew/"+id)
|
||||||
wm, err := s.c.write("/v1/session/renew/"+id, nil, &entries, q)
|
r.setWriteOptions(q)
|
||||||
|
rtt, resp, err := s.c.doRequest(r)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
wm := &WriteMeta{RequestTime: rtt}
|
||||||
|
|
||||||
|
if resp.StatusCode == 404 {
|
||||||
|
return nil, wm, nil
|
||||||
|
} else if resp.StatusCode != 200 {
|
||||||
|
return nil, nil, fmt.Errorf("Unexpected response code: %d", resp.StatusCode)
|
||||||
|
}
|
||||||
|
|
||||||
|
var entries []*SessionEntry
|
||||||
|
if err := decodeBody(resp, &entries); err != nil {
|
||||||
|
return nil, nil, fmt.Errorf("Failed to read response: %v", err)
|
||||||
|
}
|
||||||
if len(entries) > 0 {
|
if len(entries) > 0 {
|
||||||
return entries[0], wm, nil
|
return entries[0], wm, nil
|
||||||
}
|
}
|
||||||
|
@ -149,9 +167,7 @@ func (s *Session) RenewPeriodic(initialTTL string, id string, q *WriteOptions, d
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if entry == nil {
|
if entry == nil {
|
||||||
waitDur = time.Second
|
return ErrSessionExpired
|
||||||
lastErr = fmt.Errorf("No SessionEntry returned")
|
|
||||||
continue
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Handle the server updating the TTL
|
// Handle the server updating the TTL
|
||||||
|
|
47
vendor/github.com/hashicorp/consul/api/snapshot.go
generated
vendored
Normal file
47
vendor/github.com/hashicorp/consul/api/snapshot.go
generated
vendored
Normal file
|
@ -0,0 +1,47 @@
|
||||||
|
package api
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Snapshot can be used to query the /v1/snapshot endpoint to take snapshots of
|
||||||
|
// Consul's internal state and restore snapshots for disaster recovery.
|
||||||
|
type Snapshot struct {
|
||||||
|
c *Client
|
||||||
|
}
|
||||||
|
|
||||||
|
// Snapshot returns a handle that exposes the snapshot endpoints.
|
||||||
|
func (c *Client) Snapshot() *Snapshot {
|
||||||
|
return &Snapshot{c}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Save requests a new snapshot and provides an io.ReadCloser with the snapshot
|
||||||
|
// data to save. If this doesn't return an error, then it's the responsibility
|
||||||
|
// of the caller to close it. Only a subset of the QueryOptions are supported:
|
||||||
|
// Datacenter, AllowStale, and Token.
|
||||||
|
func (s *Snapshot) Save(q *QueryOptions) (io.ReadCloser, *QueryMeta, error) {
|
||||||
|
r := s.c.newRequest("GET", "/v1/snapshot")
|
||||||
|
r.setQueryOptions(q)
|
||||||
|
|
||||||
|
rtt, resp, err := requireOK(s.c.doRequest(r))
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
qm := &QueryMeta{}
|
||||||
|
parseQueryMeta(resp, qm)
|
||||||
|
qm.RequestTime = rtt
|
||||||
|
return resp.Body, qm, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Restore streams in an existing snapshot and attempts to restore it.
|
||||||
|
func (s *Snapshot) Restore(q *WriteOptions, in io.Reader) error {
|
||||||
|
r := s.c.newRequest("PUT", "/v1/snapshot")
|
||||||
|
r.body = in
|
||||||
|
r.setWriteOptions(q)
|
||||||
|
_, _, err := requireOK(s.c.doRequest(r))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
363
vendor/github.com/hashicorp/go-cleanhttp/LICENSE
generated
vendored
Normal file
363
vendor/github.com/hashicorp/go-cleanhttp/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,363 @@
|
||||||
|
Mozilla Public License, version 2.0
|
||||||
|
|
||||||
|
1. Definitions
|
||||||
|
|
||||||
|
1.1. "Contributor"
|
||||||
|
|
||||||
|
means each individual or legal entity that creates, contributes to the
|
||||||
|
creation of, or owns Covered Software.
|
||||||
|
|
||||||
|
1.2. "Contributor Version"
|
||||||
|
|
||||||
|
means the combination of the Contributions of others (if any) used by a
|
||||||
|
Contributor and that particular Contributor's Contribution.
|
||||||
|
|
||||||
|
1.3. "Contribution"
|
||||||
|
|
||||||
|
means Covered Software of a particular Contributor.
|
||||||
|
|
||||||
|
1.4. "Covered Software"
|
||||||
|
|
||||||
|
means Source Code Form to which the initial Contributor has attached the
|
||||||
|
notice in Exhibit A, the Executable Form of such Source Code Form, and
|
||||||
|
Modifications of such Source Code Form, in each case including portions
|
||||||
|
thereof.
|
||||||
|
|
||||||
|
1.5. "Incompatible With Secondary Licenses"
|
||||||
|
means
|
||||||
|
|
||||||
|
a. that the initial Contributor has attached the notice described in
|
||||||
|
Exhibit B to the Covered Software; or
|
||||||
|
|
||||||
|
b. that the Covered Software was made available under the terms of
|
||||||
|
version 1.1 or earlier of the License, but not also under the terms of
|
||||||
|
a Secondary License.
|
||||||
|
|
||||||
|
1.6. "Executable Form"
|
||||||
|
|
||||||
|
means any form of the work other than Source Code Form.
|
||||||
|
|
||||||
|
1.7. "Larger Work"
|
||||||
|
|
||||||
|
means a work that combines Covered Software with other material, in a
|
||||||
|
separate file or files, that is not Covered Software.
|
||||||
|
|
||||||
|
1.8. "License"
|
||||||
|
|
||||||
|
means this document.
|
||||||
|
|
||||||
|
1.9. "Licensable"
|
||||||
|
|
||||||
|
means having the right to grant, to the maximum extent possible, whether
|
||||||
|
at the time of the initial grant or subsequently, any and all of the
|
||||||
|
rights conveyed by this License.
|
||||||
|
|
||||||
|
1.10. "Modifications"
|
||||||
|
|
||||||
|
means any of the following:
|
||||||
|
|
||||||
|
a. any file in Source Code Form that results from an addition to,
|
||||||
|
deletion from, or modification of the contents of Covered Software; or
|
||||||
|
|
||||||
|
b. any new file in Source Code Form that contains any Covered Software.
|
||||||
|
|
||||||
|
1.11. "Patent Claims" of a Contributor
|
||||||
|
|
||||||
|
means any patent claim(s), including without limitation, method,
|
||||||
|
process, and apparatus claims, in any patent Licensable by such
|
||||||
|
Contributor that would be infringed, but for the grant of the License,
|
||||||
|
by the making, using, selling, offering for sale, having made, import,
|
||||||
|
or transfer of either its Contributions or its Contributor Version.
|
||||||
|
|
||||||
|
1.12. "Secondary License"
|
||||||
|
|
||||||
|
means either the GNU General Public License, Version 2.0, the GNU Lesser
|
||||||
|
General Public License, Version 2.1, the GNU Affero General Public
|
||||||
|
License, Version 3.0, or any later versions of those licenses.
|
||||||
|
|
||||||
|
1.13. "Source Code Form"
|
||||||
|
|
||||||
|
means the form of the work preferred for making modifications.
|
||||||
|
|
||||||
|
1.14. "You" (or "Your")
|
||||||
|
|
||||||
|
means an individual or a legal entity exercising rights under this
|
||||||
|
License. For legal entities, "You" includes any entity that controls, is
|
||||||
|
controlled by, or is under common control with You. For purposes of this
|
||||||
|
definition, "control" means (a) the power, direct or indirect, to cause
|
||||||
|
the direction or management of such entity, whether by contract or
|
||||||
|
otherwise, or (b) ownership of more than fifty percent (50%) of the
|
||||||
|
outstanding shares or beneficial ownership of such entity.
|
||||||
|
|
||||||
|
|
||||||
|
2. License Grants and Conditions
|
||||||
|
|
||||||
|
2.1. Grants
|
||||||
|
|
||||||
|
Each Contributor hereby grants You a world-wide, royalty-free,
|
||||||
|
non-exclusive license:
|
||||||
|
|
||||||
|
a. under intellectual property rights (other than patent or trademark)
|
||||||
|
Licensable by such Contributor to use, reproduce, make available,
|
||||||
|
modify, display, perform, distribute, and otherwise exploit its
|
||||||
|
Contributions, either on an unmodified basis, with Modifications, or
|
||||||
|
as part of a Larger Work; and
|
||||||
|
|
||||||
|
b. under Patent Claims of such Contributor to make, use, sell, offer for
|
||||||
|
sale, have made, import, and otherwise transfer either its
|
||||||
|
Contributions or its Contributor Version.
|
||||||
|
|
||||||
|
2.2. Effective Date
|
||||||
|
|
||||||
|
The licenses granted in Section 2.1 with respect to any Contribution
|
||||||
|
become effective for each Contribution on the date the Contributor first
|
||||||
|
distributes such Contribution.
|
||||||
|
|
||||||
|
2.3. Limitations on Grant Scope
|
||||||
|
|
||||||
|
The licenses granted in this Section 2 are the only rights granted under
|
||||||
|
this License. No additional rights or licenses will be implied from the
|
||||||
|
distribution or licensing of Covered Software under this License.
|
||||||
|
Notwithstanding Section 2.1(b) above, no patent license is granted by a
|
||||||
|
Contributor:
|
||||||
|
|
||||||
|
a. for any code that a Contributor has removed from Covered Software; or
|
||||||
|
|
||||||
|
b. for infringements caused by: (i) Your and any other third party's
|
||||||
|
modifications of Covered Software, or (ii) the combination of its
|
||||||
|
Contributions with other software (except as part of its Contributor
|
||||||
|
Version); or
|
||||||
|
|
||||||
|
c. under Patent Claims infringed by Covered Software in the absence of
|
||||||
|
its Contributions.
|
||||||
|
|
||||||
|
This License does not grant any rights in the trademarks, service marks,
|
||||||
|
or logos of any Contributor (except as may be necessary to comply with
|
||||||
|
the notice requirements in Section 3.4).
|
||||||
|
|
||||||
|
2.4. Subsequent Licenses
|
||||||
|
|
||||||
|
No Contributor makes additional grants as a result of Your choice to
|
||||||
|
distribute the Covered Software under a subsequent version of this
|
||||||
|
License (see Section 10.2) or under the terms of a Secondary License (if
|
||||||
|
permitted under the terms of Section 3.3).
|
||||||
|
|
||||||
|
2.5. Representation
|
||||||
|
|
||||||
|
Each Contributor represents that the Contributor believes its
|
||||||
|
Contributions are its original creation(s) or it has sufficient rights to
|
||||||
|
grant the rights to its Contributions conveyed by this License.
|
||||||
|
|
||||||
|
2.6. Fair Use
|
||||||
|
|
||||||
|
This License is not intended to limit any rights You have under
|
||||||
|
applicable copyright doctrines of fair use, fair dealing, or other
|
||||||
|
equivalents.
|
||||||
|
|
||||||
|
2.7. Conditions
|
||||||
|
|
||||||
|
Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
|
||||||
|
Section 2.1.
|
||||||
|
|
||||||
|
|
||||||
|
3. Responsibilities
|
||||||
|
|
||||||
|
3.1. Distribution of Source Form
|
||||||
|
|
||||||
|
All distribution of Covered Software in Source Code Form, including any
|
||||||
|
Modifications that You create or to which You contribute, must be under
|
||||||
|
the terms of this License. You must inform recipients that the Source
|
||||||
|
Code Form of the Covered Software is governed by the terms of this
|
||||||
|
License, and how they can obtain a copy of this License. You may not
|
||||||
|
attempt to alter or restrict the recipients' rights in the Source Code
|
||||||
|
Form.
|
||||||
|
|
||||||
|
3.2. Distribution of Executable Form
|
||||||
|
|
||||||
|
If You distribute Covered Software in Executable Form then:
|
||||||
|
|
||||||
|
a. such Covered Software must also be made available in Source Code Form,
|
||||||
|
as described in Section 3.1, and You must inform recipients of the
|
||||||
|
Executable Form how they can obtain a copy of such Source Code Form by
|
||||||
|
reasonable means in a timely manner, at a charge no more than the cost
|
||||||
|
of distribution to the recipient; and
|
||||||
|
|
||||||
|
b. You may distribute such Executable Form under the terms of this
|
||||||
|
License, or sublicense it under different terms, provided that the
|
||||||
|
license for the Executable Form does not attempt to limit or alter the
|
||||||
|
recipients' rights in the Source Code Form under this License.
|
||||||
|
|
||||||
|
3.3. Distribution of a Larger Work
|
||||||
|
|
||||||
|
You may create and distribute a Larger Work under terms of Your choice,
|
||||||
|
provided that You also comply with the requirements of this License for
|
||||||
|
the Covered Software. If the Larger Work is a combination of Covered
|
||||||
|
Software with a work governed by one or more Secondary Licenses, and the
|
||||||
|
Covered Software is not Incompatible With Secondary Licenses, this
|
||||||
|
License permits You to additionally distribute such Covered Software
|
||||||
|
under the terms of such Secondary License(s), so that the recipient of
|
||||||
|
the Larger Work may, at their option, further distribute the Covered
|
||||||
|
Software under the terms of either this License or such Secondary
|
||||||
|
License(s).
|
||||||
|
|
||||||
|
3.4. Notices
|
||||||
|
|
||||||
|
You may not remove or alter the substance of any license notices
|
||||||
|
(including copyright notices, patent notices, disclaimers of warranty, or
|
||||||
|
limitations of liability) contained within the Source Code Form of the
|
||||||
|
Covered Software, except that You may alter any license notices to the
|
||||||
|
extent required to remedy known factual inaccuracies.
|
||||||
|
|
||||||
|
3.5. Application of Additional Terms
|
||||||
|
|
||||||
|
You may choose to offer, and to charge a fee for, warranty, support,
|
||||||
|
indemnity or liability obligations to one or more recipients of Covered
|
||||||
|
Software. However, You may do so only on Your own behalf, and not on
|
||||||
|
behalf of any Contributor. You must make it absolutely clear that any
|
||||||
|
such warranty, support, indemnity, or liability obligation is offered by
|
||||||
|
You alone, and You hereby agree to indemnify every Contributor for any
|
||||||
|
liability incurred by such Contributor as a result of warranty, support,
|
||||||
|
indemnity or liability terms You offer. You may include additional
|
||||||
|
disclaimers of warranty and limitations of liability specific to any
|
||||||
|
jurisdiction.
|
||||||
|
|
||||||
|
4. Inability to Comply Due to Statute or Regulation
|
||||||
|
|
||||||
|
If it is impossible for You to comply with any of the terms of this License
|
||||||
|
with respect to some or all of the Covered Software due to statute,
|
||||||
|
judicial order, or regulation then You must: (a) comply with the terms of
|
||||||
|
this License to the maximum extent possible; and (b) describe the
|
||||||
|
limitations and the code they affect. Such description must be placed in a
|
||||||
|
text file included with all distributions of the Covered Software under
|
||||||
|
this License. Except to the extent prohibited by statute or regulation,
|
||||||
|
such description must be sufficiently detailed for a recipient of ordinary
|
||||||
|
skill to be able to understand it.
|
||||||
|
|
||||||
|
5. Termination
|
||||||
|
|
||||||
|
5.1. The rights granted under this License will terminate automatically if You
|
||||||
|
fail to comply with any of its terms. However, if You become compliant,
|
||||||
|
then the rights granted under this License from a particular Contributor
|
||||||
|
are reinstated (a) provisionally, unless and until such Contributor
|
||||||
|
explicitly and finally terminates Your grants, and (b) on an ongoing
|
||||||
|
basis, if such Contributor fails to notify You of the non-compliance by
|
||||||
|
some reasonable means prior to 60 days after You have come back into
|
||||||
|
compliance. Moreover, Your grants from a particular Contributor are
|
||||||
|
reinstated on an ongoing basis if such Contributor notifies You of the
|
||||||
|
non-compliance by some reasonable means, this is the first time You have
|
||||||
|
received notice of non-compliance with this License from such
|
||||||
|
Contributor, and You become compliant prior to 30 days after Your receipt
|
||||||
|
of the notice.
|
||||||
|
|
||||||
|
5.2. If You initiate litigation against any entity by asserting a patent
|
||||||
|
infringement claim (excluding declaratory judgment actions,
|
||||||
|
counter-claims, and cross-claims) alleging that a Contributor Version
|
||||||
|
directly or indirectly infringes any patent, then the rights granted to
|
||||||
|
You by any and all Contributors for the Covered Software under Section
|
||||||
|
2.1 of this License shall terminate.
|
||||||
|
|
||||||
|
5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
|
||||||
|
license agreements (excluding distributors and resellers) which have been
|
||||||
|
validly granted by You or Your distributors under this License prior to
|
||||||
|
termination shall survive termination.
|
||||||
|
|
||||||
|
6. Disclaimer of Warranty
|
||||||
|
|
||||||
|
Covered Software is provided under this License on an "as is" basis,
|
||||||
|
without warranty of any kind, either expressed, implied, or statutory,
|
||||||
|
including, without limitation, warranties that the Covered Software is free
|
||||||
|
of defects, merchantable, fit for a particular purpose or non-infringing.
|
||||||
|
The entire risk as to the quality and performance of the Covered Software
|
||||||
|
is with You. Should any Covered Software prove defective in any respect,
|
||||||
|
You (not any Contributor) assume the cost of any necessary servicing,
|
||||||
|
repair, or correction. This disclaimer of warranty constitutes an essential
|
||||||
|
part of this License. No use of any Covered Software is authorized under
|
||||||
|
this License except under this disclaimer.
|
||||||
|
|
||||||
|
7. Limitation of Liability
|
||||||
|
|
||||||
|
Under no circumstances and under no legal theory, whether tort (including
|
||||||
|
negligence), contract, or otherwise, shall any Contributor, or anyone who
|
||||||
|
distributes Covered Software as permitted above, be liable to You for any
|
||||||
|
direct, indirect, special, incidental, or consequential damages of any
|
||||||
|
character including, without limitation, damages for lost profits, loss of
|
||||||
|
goodwill, work stoppage, computer failure or malfunction, or any and all
|
||||||
|
other commercial damages or losses, even if such party shall have been
|
||||||
|
informed of the possibility of such damages. This limitation of liability
|
||||||
|
shall not apply to liability for death or personal injury resulting from
|
||||||
|
such party's negligence to the extent applicable law prohibits such
|
||||||
|
limitation. Some jurisdictions do not allow the exclusion or limitation of
|
||||||
|
incidental or consequential damages, so this exclusion and limitation may
|
||||||
|
not apply to You.
|
||||||
|
|
||||||
|
8. Litigation
|
||||||
|
|
||||||
|
Any litigation relating to this License may be brought only in the courts
|
||||||
|
of a jurisdiction where the defendant maintains its principal place of
|
||||||
|
business and such litigation shall be governed by laws of that
|
||||||
|
jurisdiction, without reference to its conflict-of-law provisions. Nothing
|
||||||
|
in this Section shall prevent a party's ability to bring cross-claims or
|
||||||
|
counter-claims.
|
||||||
|
|
||||||
|
9. Miscellaneous
|
||||||
|
|
||||||
|
This License represents the complete agreement concerning the subject
|
||||||
|
matter hereof. If any provision of this License is held to be
|
||||||
|
unenforceable, such provision shall be reformed only to the extent
|
||||||
|
necessary to make it enforceable. Any law or regulation which provides that
|
||||||
|
the language of a contract shall be construed against the drafter shall not
|
||||||
|
be used to construe this License against a Contributor.
|
||||||
|
|
||||||
|
|
||||||
|
10. Versions of the License
|
||||||
|
|
||||||
|
10.1. New Versions
|
||||||
|
|
||||||
|
Mozilla Foundation is the license steward. Except as provided in Section
|
||||||
|
10.3, no one other than the license steward has the right to modify or
|
||||||
|
publish new versions of this License. Each version will be given a
|
||||||
|
distinguishing version number.
|
||||||
|
|
||||||
|
10.2. Effect of New Versions
|
||||||
|
|
||||||
|
You may distribute the Covered Software under the terms of the version
|
||||||
|
of the License under which You originally received the Covered Software,
|
||||||
|
or under the terms of any subsequent version published by the license
|
||||||
|
steward.
|
||||||
|
|
||||||
|
10.3. Modified Versions
|
||||||
|
|
||||||
|
If you create software not governed by this License, and you want to
|
||||||
|
create a new license for such software, you may create and use a
|
||||||
|
modified version of this License if you rename the license and remove
|
||||||
|
any references to the name of the license steward (except to note that
|
||||||
|
such modified license differs from this License).
|
||||||
|
|
||||||
|
10.4. Distributing Source Code Form that is Incompatible With Secondary
|
||||||
|
Licenses If You choose to distribute Source Code Form that is
|
||||||
|
Incompatible With Secondary Licenses under the terms of this version of
|
||||||
|
the License, the notice described in Exhibit B of this License must be
|
||||||
|
attached.
|
||||||
|
|
||||||
|
Exhibit A - Source Code Form License Notice
|
||||||
|
|
||||||
|
This Source Code Form is subject to the
|
||||||
|
terms of the Mozilla Public License, v.
|
||||||
|
2.0. If a copy of the MPL was not
|
||||||
|
distributed with this file, You can
|
||||||
|
obtain one at
|
||||||
|
http://mozilla.org/MPL/2.0/.
|
||||||
|
|
||||||
|
If it is not possible or desirable to put the notice in a particular file,
|
||||||
|
then You may include the notice in a location (such as a LICENSE file in a
|
||||||
|
relevant directory) where a recipient would be likely to look for such a
|
||||||
|
notice.
|
||||||
|
|
||||||
|
You may add additional accurate notices of copyright ownership.
|
||||||
|
|
||||||
|
Exhibit B - "Incompatible With Secondary Licenses" Notice
|
||||||
|
|
||||||
|
This Source Code Form is "Incompatible
|
||||||
|
With Secondary Licenses", as defined by
|
||||||
|
the Mozilla Public License, v. 2.0.
|
||||||
|
|
30
vendor/github.com/hashicorp/go-cleanhttp/README.md
generated
vendored
Normal file
30
vendor/github.com/hashicorp/go-cleanhttp/README.md
generated
vendored
Normal file
|
@ -0,0 +1,30 @@
|
||||||
|
# cleanhttp
|
||||||
|
|
||||||
|
Functions for accessing "clean" Go http.Client values
|
||||||
|
|
||||||
|
-------------
|
||||||
|
|
||||||
|
The Go standard library contains a default `http.Client` called
|
||||||
|
`http.DefaultClient`. It is a common idiom in Go code to start with
|
||||||
|
`http.DefaultClient` and tweak it as necessary, and in fact, this is
|
||||||
|
encouraged; from the `http` package documentation:
|
||||||
|
|
||||||
|
> The Client's Transport typically has internal state (cached TCP connections),
|
||||||
|
so Clients should be reused instead of created as needed. Clients are safe for
|
||||||
|
concurrent use by multiple goroutines.
|
||||||
|
|
||||||
|
Unfortunately, this is a shared value, and it is not uncommon for libraries to
|
||||||
|
assume that they are free to modify it at will. With enough dependencies, it
|
||||||
|
can be very easy to encounter strange problems and race conditions due to
|
||||||
|
manipulation of this shared value across libraries and goroutines (clients are
|
||||||
|
safe for concurrent use, but writing values to the client struct itself is not
|
||||||
|
protected).
|
||||||
|
|
||||||
|
Making things worse is the fact that a bare `http.Client` will use a default
|
||||||
|
`http.Transport` called `http.DefaultTransport`, which is another global value
|
||||||
|
that behaves the same way. So it is not simply enough to replace
|
||||||
|
`http.DefaultClient` with `&http.Client{}`.
|
||||||
|
|
||||||
|
This repository provides some simple functions to get a "clean" `http.Client`
|
||||||
|
-- one that uses the same default values as the Go standard library, but
|
||||||
|
returns a client that does not share any state with other clients.
|
53
vendor/github.com/hashicorp/go-cleanhttp/cleanhttp.go
generated
vendored
Normal file
53
vendor/github.com/hashicorp/go-cleanhttp/cleanhttp.go
generated
vendored
Normal file
|
@ -0,0 +1,53 @@
|
||||||
|
package cleanhttp
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net"
|
||||||
|
"net/http"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// DefaultTransport returns a new http.Transport with the same default values
|
||||||
|
// as http.DefaultTransport, but with idle connections and keepalives disabled.
|
||||||
|
func DefaultTransport() *http.Transport {
|
||||||
|
transport := DefaultPooledTransport()
|
||||||
|
transport.DisableKeepAlives = true
|
||||||
|
transport.MaxIdleConnsPerHost = -1
|
||||||
|
return transport
|
||||||
|
}
|
||||||
|
|
||||||
|
// DefaultPooledTransport returns a new http.Transport with similar default
|
||||||
|
// values to http.DefaultTransport. Do not use this for transient transports as
|
||||||
|
// it can leak file descriptors over time. Only use this for transports that
|
||||||
|
// will be re-used for the same host(s).
|
||||||
|
func DefaultPooledTransport() *http.Transport {
|
||||||
|
transport := &http.Transport{
|
||||||
|
Proxy: http.ProxyFromEnvironment,
|
||||||
|
Dial: (&net.Dialer{
|
||||||
|
Timeout: 30 * time.Second,
|
||||||
|
KeepAlive: 30 * time.Second,
|
||||||
|
}).Dial,
|
||||||
|
TLSHandshakeTimeout: 10 * time.Second,
|
||||||
|
DisableKeepAlives: false,
|
||||||
|
MaxIdleConnsPerHost: 1,
|
||||||
|
}
|
||||||
|
return transport
|
||||||
|
}
|
||||||
|
|
||||||
|
// DefaultClient returns a new http.Client with similar default values to
|
||||||
|
// http.Client, but with a non-shared Transport, idle connections disabled, and
|
||||||
|
// keepalives disabled.
|
||||||
|
func DefaultClient() *http.Client {
|
||||||
|
return &http.Client{
|
||||||
|
Transport: DefaultTransport(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// DefaultPooledClient returns a new http.Client with the same default values
|
||||||
|
// as http.Client, but with a shared Transport. Do not use this function
|
||||||
|
// for transient clients as it can leak file descriptors over time. Only use
|
||||||
|
// this for clients that will be re-used for the same host(s).
|
||||||
|
func DefaultPooledClient() *http.Client {
|
||||||
|
return &http.Client{
|
||||||
|
Transport: DefaultPooledTransport(),
|
||||||
|
}
|
||||||
|
}
|
20
vendor/github.com/hashicorp/go-cleanhttp/doc.go
generated
vendored
Normal file
20
vendor/github.com/hashicorp/go-cleanhttp/doc.go
generated
vendored
Normal file
|
@ -0,0 +1,20 @@
|
||||||
|
// Package cleanhttp offers convenience utilities for acquiring "clean"
|
||||||
|
// http.Transport and http.Client structs.
|
||||||
|
//
|
||||||
|
// Values set on http.DefaultClient and http.DefaultTransport affect all
|
||||||
|
// callers. This can have detrimental effects, esepcially in TLS contexts,
|
||||||
|
// where client or root certificates set to talk to multiple endpoints can end
|
||||||
|
// up displacing each other, leading to hard-to-debug issues. This package
|
||||||
|
// provides non-shared http.Client and http.Transport structs to ensure that
|
||||||
|
// the configuration will not be overwritten by other parts of the application
|
||||||
|
// or dependencies.
|
||||||
|
//
|
||||||
|
// The DefaultClient and DefaultTransport functions disable idle connections
|
||||||
|
// and keepalives. Without ensuring that idle connections are closed before
|
||||||
|
// garbage collection, short-term clients/transports can leak file descriptors,
|
||||||
|
// eventually leading to "too many open files" errors. If you will be
|
||||||
|
// connecting to the same hosts repeatedly from the same client, you can use
|
||||||
|
// DefaultPooledClient to receive a client that has connection pooling
|
||||||
|
// semantics similar to http.DefaultClient.
|
||||||
|
//
|
||||||
|
package cleanhttp
|
354
vendor/github.com/hashicorp/serf/LICENSE
generated
vendored
Normal file
354
vendor/github.com/hashicorp/serf/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,354 @@
|
||||||
|
Mozilla Public License, version 2.0
|
||||||
|
|
||||||
|
1. Definitions
|
||||||
|
|
||||||
|
1.1. “Contributor”
|
||||||
|
|
||||||
|
means each individual or legal entity that creates, contributes to the
|
||||||
|
creation of, or owns Covered Software.
|
||||||
|
|
||||||
|
1.2. “Contributor Version”
|
||||||
|
|
||||||
|
means the combination of the Contributions of others (if any) used by a
|
||||||
|
Contributor and that particular Contributor’s Contribution.
|
||||||
|
|
||||||
|
1.3. “Contribution”
|
||||||
|
|
||||||
|
means Covered Software of a particular Contributor.
|
||||||
|
|
||||||
|
1.4. “Covered Software”
|
||||||
|
|
||||||
|
means Source Code Form to which the initial Contributor has attached the
|
||||||
|
notice in Exhibit A, the Executable Form of such Source Code Form, and
|
||||||
|
Modifications of such Source Code Form, in each case including portions
|
||||||
|
thereof.
|
||||||
|
|
||||||
|
1.5. “Incompatible With Secondary Licenses”
|
||||||
|
means
|
||||||
|
|
||||||
|
a. that the initial Contributor has attached the notice described in
|
||||||
|
Exhibit B to the Covered Software; or
|
||||||
|
|
||||||
|
b. that the Covered Software was made available under the terms of version
|
||||||
|
1.1 or earlier of the License, but not also under the terms of a
|
||||||
|
Secondary License.
|
||||||
|
|
||||||
|
1.6. “Executable Form”
|
||||||
|
|
||||||
|
means any form of the work other than Source Code Form.
|
||||||
|
|
||||||
|
1.7. “Larger Work”
|
||||||
|
|
||||||
|
means a work that combines Covered Software with other material, in a separate
|
||||||
|
file or files, that is not Covered Software.
|
||||||
|
|
||||||
|
1.8. “License”
|
||||||
|
|
||||||
|
means this document.
|
||||||
|
|
||||||
|
1.9. “Licensable”
|
||||||
|
|
||||||
|
means having the right to grant, to the maximum extent possible, whether at the
|
||||||
|
time of the initial grant or subsequently, any and all of the rights conveyed by
|
||||||
|
this License.
|
||||||
|
|
||||||
|
1.10. “Modifications”
|
||||||
|
|
||||||
|
means any of the following:
|
||||||
|
|
||||||
|
a. any file in Source Code Form that results from an addition to, deletion
|
||||||
|
from, or modification of the contents of Covered Software; or
|
||||||
|
|
||||||
|
b. any new file in Source Code Form that contains any Covered Software.
|
||||||
|
|
||||||
|
1.11. “Patent Claims” of a Contributor
|
||||||
|
|
||||||
|
means any patent claim(s), including without limitation, method, process,
|
||||||
|
and apparatus claims, in any patent Licensable by such Contributor that
|
||||||
|
would be infringed, but for the grant of the License, by the making,
|
||||||
|
using, selling, offering for sale, having made, import, or transfer of
|
||||||
|
either its Contributions or its Contributor Version.
|
||||||
|
|
||||||
|
1.12. “Secondary License”
|
||||||
|
|
||||||
|
means either the GNU General Public License, Version 2.0, the GNU Lesser
|
||||||
|
General Public License, Version 2.1, the GNU Affero General Public
|
||||||
|
License, Version 3.0, or any later versions of those licenses.
|
||||||
|
|
||||||
|
1.13. “Source Code Form”
|
||||||
|
|
||||||
|
means the form of the work preferred for making modifications.
|
||||||
|
|
||||||
|
1.14. “You” (or “Your”)
|
||||||
|
|
||||||
|
means an individual or a legal entity exercising rights under this
|
||||||
|
License. For legal entities, “You” includes any entity that controls, is
|
||||||
|
controlled by, or is under common control with You. For purposes of this
|
||||||
|
definition, “control” means (a) the power, direct or indirect, to cause
|
||||||
|
the direction or management of such entity, whether by contract or
|
||||||
|
otherwise, or (b) ownership of more than fifty percent (50%) of the
|
||||||
|
outstanding shares or beneficial ownership of such entity.
|
||||||
|
|
||||||
|
|
||||||
|
2. License Grants and Conditions
|
||||||
|
|
||||||
|
2.1. Grants
|
||||||
|
|
||||||
|
Each Contributor hereby grants You a world-wide, royalty-free,
|
||||||
|
non-exclusive license:
|
||||||
|
|
||||||
|
a. under intellectual property rights (other than patent or trademark)
|
||||||
|
Licensable by such Contributor to use, reproduce, make available,
|
||||||
|
modify, display, perform, distribute, and otherwise exploit its
|
||||||
|
Contributions, either on an unmodified basis, with Modifications, or as
|
||||||
|
part of a Larger Work; and
|
||||||
|
|
||||||
|
b. under Patent Claims of such Contributor to make, use, sell, offer for
|
||||||
|
sale, have made, import, and otherwise transfer either its Contributions
|
||||||
|
or its Contributor Version.
|
||||||
|
|
||||||
|
2.2. Effective Date
|
||||||
|
|
||||||
|
The licenses granted in Section 2.1 with respect to any Contribution become
|
||||||
|
effective for each Contribution on the date the Contributor first distributes
|
||||||
|
such Contribution.
|
||||||
|
|
||||||
|
2.3. Limitations on Grant Scope
|
||||||
|
|
||||||
|
The licenses granted in this Section 2 are the only rights granted under this
|
||||||
|
License. No additional rights or licenses will be implied from the distribution
|
||||||
|
or licensing of Covered Software under this License. Notwithstanding Section
|
||||||
|
2.1(b) above, no patent license is granted by a Contributor:
|
||||||
|
|
||||||
|
a. for any code that a Contributor has removed from Covered Software; or
|
||||||
|
|
||||||
|
b. for infringements caused by: (i) Your and any other third party’s
|
||||||
|
modifications of Covered Software, or (ii) the combination of its
|
||||||
|
Contributions with other software (except as part of its Contributor
|
||||||
|
Version); or
|
||||||
|
|
||||||
|
c. under Patent Claims infringed by Covered Software in the absence of its
|
||||||
|
Contributions.
|
||||||
|
|
||||||
|
This License does not grant any rights in the trademarks, service marks, or
|
||||||
|
logos of any Contributor (except as may be necessary to comply with the
|
||||||
|
notice requirements in Section 3.4).
|
||||||
|
|
||||||
|
2.4. Subsequent Licenses
|
||||||
|
|
||||||
|
No Contributor makes additional grants as a result of Your choice to
|
||||||
|
distribute the Covered Software under a subsequent version of this License
|
||||||
|
(see Section 10.2) or under the terms of a Secondary License (if permitted
|
||||||
|
under the terms of Section 3.3).
|
||||||
|
|
||||||
|
2.5. Representation
|
||||||
|
|
||||||
|
Each Contributor represents that the Contributor believes its Contributions
|
||||||
|
are its original creation(s) or it has sufficient rights to grant the
|
||||||
|
rights to its Contributions conveyed by this License.
|
||||||
|
|
||||||
|
2.6. Fair Use
|
||||||
|
|
||||||
|
This License is not intended to limit any rights You have under applicable
|
||||||
|
copyright doctrines of fair use, fair dealing, or other equivalents.
|
||||||
|
|
||||||
|
2.7. Conditions
|
||||||
|
|
||||||
|
Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
|
||||||
|
Section 2.1.
|
||||||
|
|
||||||
|
|
||||||
|
3. Responsibilities
|
||||||
|
|
||||||
|
3.1. Distribution of Source Form
|
||||||
|
|
||||||
|
All distribution of Covered Software in Source Code Form, including any
|
||||||
|
Modifications that You create or to which You contribute, must be under the
|
||||||
|
terms of this License. You must inform recipients that the Source Code Form
|
||||||
|
of the Covered Software is governed by the terms of this License, and how
|
||||||
|
they can obtain a copy of this License. You may not attempt to alter or
|
||||||
|
restrict the recipients’ rights in the Source Code Form.
|
||||||
|
|
||||||
|
3.2. Distribution of Executable Form
|
||||||
|
|
||||||
|
If You distribute Covered Software in Executable Form then:
|
||||||
|
|
||||||
|
a. such Covered Software must also be made available in Source Code Form,
|
||||||
|
as described in Section 3.1, and You must inform recipients of the
|
||||||
|
Executable Form how they can obtain a copy of such Source Code Form by
|
||||||
|
reasonable means in a timely manner, at a charge no more than the cost
|
||||||
|
of distribution to the recipient; and
|
||||||
|
|
||||||
|
b. You may distribute such Executable Form under the terms of this License,
|
||||||
|
or sublicense it under different terms, provided that the license for
|
||||||
|
the Executable Form does not attempt to limit or alter the recipients’
|
||||||
|
rights in the Source Code Form under this License.
|
||||||
|
|
||||||
|
3.3. Distribution of a Larger Work
|
||||||
|
|
||||||
|
You may create and distribute a Larger Work under terms of Your choice,
|
||||||
|
provided that You also comply with the requirements of this License for the
|
||||||
|
Covered Software. If the Larger Work is a combination of Covered Software
|
||||||
|
with a work governed by one or more Secondary Licenses, and the Covered
|
||||||
|
Software is not Incompatible With Secondary Licenses, this License permits
|
||||||
|
You to additionally distribute such Covered Software under the terms of
|
||||||
|
such Secondary License(s), so that the recipient of the Larger Work may, at
|
||||||
|
their option, further distribute the Covered Software under the terms of
|
||||||
|
either this License or such Secondary License(s).
|
||||||
|
|
||||||
|
3.4. Notices
|
||||||
|
|
||||||
|
You may not remove or alter the substance of any license notices (including
|
||||||
|
copyright notices, patent notices, disclaimers of warranty, or limitations
|
||||||
|
of liability) contained within the Source Code Form of the Covered
|
||||||
|
Software, except that You may alter any license notices to the extent
|
||||||
|
required to remedy known factual inaccuracies.
|
||||||
|
|
||||||
|
3.5. Application of Additional Terms
|
||||||
|
|
||||||
|
You may choose to offer, and to charge a fee for, warranty, support,
|
||||||
|
indemnity or liability obligations to one or more recipients of Covered
|
||||||
|
Software. However, You may do so only on Your own behalf, and not on behalf
|
||||||
|
of any Contributor. You must make it absolutely clear that any such
|
||||||
|
warranty, support, indemnity, or liability obligation is offered by You
|
||||||
|
alone, and You hereby agree to indemnify every Contributor for any
|
||||||
|
liability incurred by such Contributor as a result of warranty, support,
|
||||||
|
indemnity or liability terms You offer. You may include additional
|
||||||
|
disclaimers of warranty and limitations of liability specific to any
|
||||||
|
jurisdiction.
|
||||||
|
|
||||||
|
4. Inability to Comply Due to Statute or Regulation
|
||||||
|
|
||||||
|
If it is impossible for You to comply with any of the terms of this License
|
||||||
|
with respect to some or all of the Covered Software due to statute, judicial
|
||||||
|
order, or regulation then You must: (a) comply with the terms of this License
|
||||||
|
to the maximum extent possible; and (b) describe the limitations and the code
|
||||||
|
they affect. Such description must be placed in a text file included with all
|
||||||
|
distributions of the Covered Software under this License. Except to the
|
||||||
|
extent prohibited by statute or regulation, such description must be
|
||||||
|
sufficiently detailed for a recipient of ordinary skill to be able to
|
||||||
|
understand it.
|
||||||
|
|
||||||
|
5. Termination
|
||||||
|
|
||||||
|
5.1. The rights granted under this License will terminate automatically if You
|
||||||
|
fail to comply with any of its terms. However, if You become compliant,
|
||||||
|
then the rights granted under this License from a particular Contributor
|
||||||
|
are reinstated (a) provisionally, unless and until such Contributor
|
||||||
|
explicitly and finally terminates Your grants, and (b) on an ongoing basis,
|
||||||
|
if such Contributor fails to notify You of the non-compliance by some
|
||||||
|
reasonable means prior to 60 days after You have come back into compliance.
|
||||||
|
Moreover, Your grants from a particular Contributor are reinstated on an
|
||||||
|
ongoing basis if such Contributor notifies You of the non-compliance by
|
||||||
|
some reasonable means, this is the first time You have received notice of
|
||||||
|
non-compliance with this License from such Contributor, and You become
|
||||||
|
compliant prior to 30 days after Your receipt of the notice.
|
||||||
|
|
||||||
|
5.2. If You initiate litigation against any entity by asserting a patent
|
||||||
|
infringement claim (excluding declaratory judgment actions, counter-claims,
|
||||||
|
and cross-claims) alleging that a Contributor Version directly or
|
||||||
|
indirectly infringes any patent, then the rights granted to You by any and
|
||||||
|
all Contributors for the Covered Software under Section 2.1 of this License
|
||||||
|
shall terminate.
|
||||||
|
|
||||||
|
5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
|
||||||
|
license agreements (excluding distributors and resellers) which have been
|
||||||
|
validly granted by You or Your distributors under this License prior to
|
||||||
|
termination shall survive termination.
|
||||||
|
|
||||||
|
6. Disclaimer of Warranty
|
||||||
|
|
||||||
|
Covered Software is provided under this License on an “as is” basis, without
|
||||||
|
warranty of any kind, either expressed, implied, or statutory, including,
|
||||||
|
without limitation, warranties that the Covered Software is free of defects,
|
||||||
|
merchantable, fit for a particular purpose or non-infringing. The entire
|
||||||
|
risk as to the quality and performance of the Covered Software is with You.
|
||||||
|
Should any Covered Software prove defective in any respect, You (not any
|
||||||
|
Contributor) assume the cost of any necessary servicing, repair, or
|
||||||
|
correction. This disclaimer of warranty constitutes an essential part of this
|
||||||
|
License. No use of any Covered Software is authorized under this License
|
||||||
|
except under this disclaimer.
|
||||||
|
|
||||||
|
7. Limitation of Liability
|
||||||
|
|
||||||
|
Under no circumstances and under no legal theory, whether tort (including
|
||||||
|
negligence), contract, or otherwise, shall any Contributor, or anyone who
|
||||||
|
distributes Covered Software as permitted above, be liable to You for any
|
||||||
|
direct, indirect, special, incidental, or consequential damages of any
|
||||||
|
character including, without limitation, damages for lost profits, loss of
|
||||||
|
goodwill, work stoppage, computer failure or malfunction, or any and all
|
||||||
|
other commercial damages or losses, even if such party shall have been
|
||||||
|
informed of the possibility of such damages. This limitation of liability
|
||||||
|
shall not apply to liability for death or personal injury resulting from such
|
||||||
|
party’s negligence to the extent applicable law prohibits such limitation.
|
||||||
|
Some jurisdictions do not allow the exclusion or limitation of incidental or
|
||||||
|
consequential damages, so this exclusion and limitation may not apply to You.
|
||||||
|
|
||||||
|
8. Litigation
|
||||||
|
|
||||||
|
Any litigation relating to this License may be brought only in the courts of
|
||||||
|
a jurisdiction where the defendant maintains its principal place of business
|
||||||
|
and such litigation shall be governed by laws of that jurisdiction, without
|
||||||
|
reference to its conflict-of-law provisions. Nothing in this Section shall
|
||||||
|
prevent a party’s ability to bring cross-claims or counter-claims.
|
||||||
|
|
||||||
|
9. Miscellaneous
|
||||||
|
|
||||||
|
This License represents the complete agreement concerning the subject matter
|
||||||
|
hereof. If any provision of this License is held to be unenforceable, such
|
||||||
|
provision shall be reformed only to the extent necessary to make it
|
||||||
|
enforceable. Any law or regulation which provides that the language of a
|
||||||
|
contract shall be construed against the drafter shall not be used to construe
|
||||||
|
this License against a Contributor.
|
||||||
|
|
||||||
|
|
||||||
|
10. Versions of the License
|
||||||
|
|
||||||
|
10.1. New Versions
|
||||||
|
|
||||||
|
Mozilla Foundation is the license steward. Except as provided in Section
|
||||||
|
10.3, no one other than the license steward has the right to modify or
|
||||||
|
publish new versions of this License. Each version will be given a
|
||||||
|
distinguishing version number.
|
||||||
|
|
||||||
|
10.2. Effect of New Versions
|
||||||
|
|
||||||
|
You may distribute the Covered Software under the terms of the version of
|
||||||
|
the License under which You originally received the Covered Software, or
|
||||||
|
under the terms of any subsequent version published by the license
|
||||||
|
steward.
|
||||||
|
|
||||||
|
10.3. Modified Versions
|
||||||
|
|
||||||
|
If you create software not governed by this License, and you want to
|
||||||
|
create a new license for such software, you may create and use a modified
|
||||||
|
version of this License if you rename the license and remove any
|
||||||
|
references to the name of the license steward (except to note that such
|
||||||
|
modified license differs from this License).
|
||||||
|
|
||||||
|
10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses
|
||||||
|
If You choose to distribute Source Code Form that is Incompatible With
|
||||||
|
Secondary Licenses under the terms of this version of the License, the
|
||||||
|
notice described in Exhibit B of this License must be attached.
|
||||||
|
|
||||||
|
Exhibit A - Source Code Form License Notice
|
||||||
|
|
||||||
|
This Source Code Form is subject to the
|
||||||
|
terms of the Mozilla Public License, v.
|
||||||
|
2.0. If a copy of the MPL was not
|
||||||
|
distributed with this file, You can
|
||||||
|
obtain one at
|
||||||
|
http://mozilla.org/MPL/2.0/.
|
||||||
|
|
||||||
|
If it is not possible or desirable to put the notice in a particular file, then
|
||||||
|
You may include the notice in a location (such as a LICENSE file in a relevant
|
||||||
|
directory) where a recipient would be likely to look for such a notice.
|
||||||
|
|
||||||
|
You may add additional accurate notices of copyright ownership.
|
||||||
|
|
||||||
|
Exhibit B - “Incompatible With Secondary Licenses” Notice
|
||||||
|
|
||||||
|
This Source Code Form is “Incompatible
|
||||||
|
With Secondary Licenses”, as defined by
|
||||||
|
the Mozilla Public License, v. 2.0.
|
||||||
|
|
180
vendor/github.com/hashicorp/serf/coordinate/client.go
generated
vendored
Normal file
180
vendor/github.com/hashicorp/serf/coordinate/client.go
generated
vendored
Normal file
|
@ -0,0 +1,180 @@
|
||||||
|
package coordinate
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"math"
|
||||||
|
"sort"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Client manages the estimated network coordinate for a given node, and adjusts
|
||||||
|
// it as the node observes round trip times and estimated coordinates from other
|
||||||
|
// nodes. The core algorithm is based on Vivaldi, see the documentation for Config
|
||||||
|
// for more details.
|
||||||
|
type Client struct {
|
||||||
|
// coord is the current estimate of the client's network coordinate.
|
||||||
|
coord *Coordinate
|
||||||
|
|
||||||
|
// origin is a coordinate sitting at the origin.
|
||||||
|
origin *Coordinate
|
||||||
|
|
||||||
|
// config contains the tuning parameters that govern the performance of
|
||||||
|
// the algorithm.
|
||||||
|
config *Config
|
||||||
|
|
||||||
|
// adjustmentIndex is the current index into the adjustmentSamples slice.
|
||||||
|
adjustmentIndex uint
|
||||||
|
|
||||||
|
// adjustment is used to store samples for the adjustment calculation.
|
||||||
|
adjustmentSamples []float64
|
||||||
|
|
||||||
|
// latencyFilterSamples is used to store the last several RTT samples,
|
||||||
|
// keyed by node name. We will use the config's LatencyFilterSamples
|
||||||
|
// value to determine how many samples we keep, per node.
|
||||||
|
latencyFilterSamples map[string][]float64
|
||||||
|
|
||||||
|
// mutex enables safe concurrent access to the client.
|
||||||
|
mutex sync.RWMutex
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewClient creates a new Client and verifies the configuration is valid.
|
||||||
|
func NewClient(config *Config) (*Client, error) {
|
||||||
|
if !(config.Dimensionality > 0) {
|
||||||
|
return nil, fmt.Errorf("dimensionality must be >0")
|
||||||
|
}
|
||||||
|
|
||||||
|
return &Client{
|
||||||
|
coord: NewCoordinate(config),
|
||||||
|
origin: NewCoordinate(config),
|
||||||
|
config: config,
|
||||||
|
adjustmentIndex: 0,
|
||||||
|
adjustmentSamples: make([]float64, config.AdjustmentWindowSize),
|
||||||
|
latencyFilterSamples: make(map[string][]float64),
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetCoordinate returns a copy of the coordinate for this client.
|
||||||
|
func (c *Client) GetCoordinate() *Coordinate {
|
||||||
|
c.mutex.RLock()
|
||||||
|
defer c.mutex.RUnlock()
|
||||||
|
|
||||||
|
return c.coord.Clone()
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetCoordinate forces the client's coordinate to a known state.
|
||||||
|
func (c *Client) SetCoordinate(coord *Coordinate) {
|
||||||
|
c.mutex.Lock()
|
||||||
|
defer c.mutex.Unlock()
|
||||||
|
|
||||||
|
c.coord = coord.Clone()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ForgetNode removes any client state for the given node.
|
||||||
|
func (c *Client) ForgetNode(node string) {
|
||||||
|
c.mutex.Lock()
|
||||||
|
defer c.mutex.Unlock()
|
||||||
|
|
||||||
|
delete(c.latencyFilterSamples, node)
|
||||||
|
}
|
||||||
|
|
||||||
|
// latencyFilter applies a simple moving median filter with a new sample for
|
||||||
|
// a node. This assumes that the mutex has been locked already.
|
||||||
|
func (c *Client) latencyFilter(node string, rttSeconds float64) float64 {
|
||||||
|
samples, ok := c.latencyFilterSamples[node]
|
||||||
|
if !ok {
|
||||||
|
samples = make([]float64, 0, c.config.LatencyFilterSize)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add the new sample and trim the list, if needed.
|
||||||
|
samples = append(samples, rttSeconds)
|
||||||
|
if len(samples) > int(c.config.LatencyFilterSize) {
|
||||||
|
samples = samples[1:]
|
||||||
|
}
|
||||||
|
c.latencyFilterSamples[node] = samples
|
||||||
|
|
||||||
|
// Sort a copy of the samples and return the median.
|
||||||
|
sorted := make([]float64, len(samples))
|
||||||
|
copy(sorted, samples)
|
||||||
|
sort.Float64s(sorted)
|
||||||
|
return sorted[len(sorted)/2]
|
||||||
|
}
|
||||||
|
|
||||||
|
// updateVivialdi updates the Vivaldi portion of the client's coordinate. This
|
||||||
|
// assumes that the mutex has been locked already.
|
||||||
|
func (c *Client) updateVivaldi(other *Coordinate, rttSeconds float64) {
|
||||||
|
const zeroThreshold = 1.0e-6
|
||||||
|
|
||||||
|
dist := c.coord.DistanceTo(other).Seconds()
|
||||||
|
if rttSeconds < zeroThreshold {
|
||||||
|
rttSeconds = zeroThreshold
|
||||||
|
}
|
||||||
|
wrongness := math.Abs(dist-rttSeconds) / rttSeconds
|
||||||
|
|
||||||
|
totalError := c.coord.Error + other.Error
|
||||||
|
if totalError < zeroThreshold {
|
||||||
|
totalError = zeroThreshold
|
||||||
|
}
|
||||||
|
weight := c.coord.Error / totalError
|
||||||
|
|
||||||
|
c.coord.Error = c.config.VivaldiCE*weight*wrongness + c.coord.Error*(1.0-c.config.VivaldiCE*weight)
|
||||||
|
if c.coord.Error > c.config.VivaldiErrorMax {
|
||||||
|
c.coord.Error = c.config.VivaldiErrorMax
|
||||||
|
}
|
||||||
|
|
||||||
|
delta := c.config.VivaldiCC * weight
|
||||||
|
force := delta * (rttSeconds - dist)
|
||||||
|
c.coord = c.coord.ApplyForce(c.config, force, other)
|
||||||
|
}
|
||||||
|
|
||||||
|
// updateAdjustment updates the adjustment portion of the client's coordinate, if
|
||||||
|
// the feature is enabled. This assumes that the mutex has been locked already.
|
||||||
|
func (c *Client) updateAdjustment(other *Coordinate, rttSeconds float64) {
|
||||||
|
if c.config.AdjustmentWindowSize == 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Note that the existing adjustment factors don't figure in to this
|
||||||
|
// calculation so we use the raw distance here.
|
||||||
|
dist := c.coord.rawDistanceTo(other)
|
||||||
|
c.adjustmentSamples[c.adjustmentIndex] = rttSeconds - dist
|
||||||
|
c.adjustmentIndex = (c.adjustmentIndex + 1) % c.config.AdjustmentWindowSize
|
||||||
|
|
||||||
|
sum := 0.0
|
||||||
|
for _, sample := range c.adjustmentSamples {
|
||||||
|
sum += sample
|
||||||
|
}
|
||||||
|
c.coord.Adjustment = sum / (2.0 * float64(c.config.AdjustmentWindowSize))
|
||||||
|
}
|
||||||
|
|
||||||
|
// updateGravity applies a small amount of gravity to pull coordinates towards
|
||||||
|
// the center of the coordinate system to combat drift. This assumes that the
|
||||||
|
// mutex is locked already.
|
||||||
|
func (c *Client) updateGravity() {
|
||||||
|
dist := c.origin.DistanceTo(c.coord).Seconds()
|
||||||
|
force := -1.0 * math.Pow(dist/c.config.GravityRho, 2.0)
|
||||||
|
c.coord = c.coord.ApplyForce(c.config, force, c.origin)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update takes other, a coordinate for another node, and rtt, a round trip
|
||||||
|
// time observation for a ping to that node, and updates the estimated position of
|
||||||
|
// the client's coordinate. Returns the updated coordinate.
|
||||||
|
func (c *Client) Update(node string, other *Coordinate, rtt time.Duration) *Coordinate {
|
||||||
|
c.mutex.Lock()
|
||||||
|
defer c.mutex.Unlock()
|
||||||
|
|
||||||
|
rttSeconds := c.latencyFilter(node, rtt.Seconds())
|
||||||
|
c.updateVivaldi(other, rttSeconds)
|
||||||
|
c.updateAdjustment(other, rttSeconds)
|
||||||
|
c.updateGravity()
|
||||||
|
return c.coord.Clone()
|
||||||
|
}
|
||||||
|
|
||||||
|
// DistanceTo returns the estimated RTT from the client's coordinate to other, the
|
||||||
|
// coordinate for another node.
|
||||||
|
func (c *Client) DistanceTo(other *Coordinate) time.Duration {
|
||||||
|
c.mutex.RLock()
|
||||||
|
defer c.mutex.RUnlock()
|
||||||
|
|
||||||
|
return c.coord.DistanceTo(other)
|
||||||
|
}
|
70
vendor/github.com/hashicorp/serf/coordinate/config.go
generated
vendored
Normal file
70
vendor/github.com/hashicorp/serf/coordinate/config.go
generated
vendored
Normal file
|
@ -0,0 +1,70 @@
|
||||||
|
package coordinate
|
||||||
|
|
||||||
|
// Config is used to set the parameters of the Vivaldi-based coordinate mapping
|
||||||
|
// algorithm.
|
||||||
|
//
|
||||||
|
// The following references are called out at various points in the documentation
|
||||||
|
// here:
|
||||||
|
//
|
||||||
|
// [1] Dabek, Frank, et al. "Vivaldi: A decentralized network coordinate system."
|
||||||
|
// ACM SIGCOMM Computer Communication Review. Vol. 34. No. 4. ACM, 2004.
|
||||||
|
// [2] Ledlie, Jonathan, Paul Gardner, and Margo I. Seltzer. "Network Coordinates
|
||||||
|
// in the Wild." NSDI. Vol. 7. 2007.
|
||||||
|
// [3] Lee, Sanghwan, et al. "On suitability of Euclidean embedding for
|
||||||
|
// host-based network coordinate systems." Networking, IEEE/ACM Transactions
|
||||||
|
// on 18.1 (2010): 27-40.
|
||||||
|
type Config struct {
|
||||||
|
// The dimensionality of the coordinate system. As discussed in [2], more
|
||||||
|
// dimensions improves the accuracy of the estimates up to a point. Per [2]
|
||||||
|
// we chose 8 dimensions plus a non-Euclidean height.
|
||||||
|
Dimensionality uint
|
||||||
|
|
||||||
|
// VivaldiErrorMax is the default error value when a node hasn't yet made
|
||||||
|
// any observations. It also serves as an upper limit on the error value in
|
||||||
|
// case observations cause the error value to increase without bound.
|
||||||
|
VivaldiErrorMax float64
|
||||||
|
|
||||||
|
// VivaldiCE is a tuning factor that controls the maximum impact an
|
||||||
|
// observation can have on a node's confidence. See [1] for more details.
|
||||||
|
VivaldiCE float64
|
||||||
|
|
||||||
|
// VivaldiCC is a tuning factor that controls the maximum impact an
|
||||||
|
// observation can have on a node's coordinate. See [1] for more details.
|
||||||
|
VivaldiCC float64
|
||||||
|
|
||||||
|
// AdjustmentWindowSize is a tuning factor that determines how many samples
|
||||||
|
// we retain to calculate the adjustment factor as discussed in [3]. Setting
|
||||||
|
// this to zero disables this feature.
|
||||||
|
AdjustmentWindowSize uint
|
||||||
|
|
||||||
|
// HeightMin is the minimum value of the height parameter. Since this
|
||||||
|
// always must be positive, it will introduce a small amount error, so
|
||||||
|
// the chosen value should be relatively small compared to "normal"
|
||||||
|
// coordinates.
|
||||||
|
HeightMin float64
|
||||||
|
|
||||||
|
// LatencyFilterSamples is the maximum number of samples that are retained
|
||||||
|
// per node, in order to compute a median. The intent is to ride out blips
|
||||||
|
// but still keep the delay low, since our time to probe any given node is
|
||||||
|
// pretty infrequent. See [2] for more details.
|
||||||
|
LatencyFilterSize uint
|
||||||
|
|
||||||
|
// GravityRho is a tuning factor that sets how much gravity has an effect
|
||||||
|
// to try to re-center coordinates. See [2] for more details.
|
||||||
|
GravityRho float64
|
||||||
|
}
|
||||||
|
|
||||||
|
// DefaultConfig returns a Config that has some default values suitable for
|
||||||
|
// basic testing of the algorithm, but not tuned to any particular type of cluster.
|
||||||
|
func DefaultConfig() *Config {
|
||||||
|
return &Config{
|
||||||
|
Dimensionality: 8,
|
||||||
|
VivaldiErrorMax: 1.5,
|
||||||
|
VivaldiCE: 0.25,
|
||||||
|
VivaldiCC: 0.25,
|
||||||
|
AdjustmentWindowSize: 20,
|
||||||
|
HeightMin: 10.0e-6,
|
||||||
|
LatencyFilterSize: 3,
|
||||||
|
GravityRho: 150.0,
|
||||||
|
}
|
||||||
|
}
|
183
vendor/github.com/hashicorp/serf/coordinate/coordinate.go
generated
vendored
Normal file
183
vendor/github.com/hashicorp/serf/coordinate/coordinate.go
generated
vendored
Normal file
|
@ -0,0 +1,183 @@
|
||||||
|
package coordinate
|
||||||
|
|
||||||
|
import (
|
||||||
|
"math"
|
||||||
|
"math/rand"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Coordinate is a specialized structure for holding network coordinates for the
|
||||||
|
// Vivaldi-based coordinate mapping algorithm. All of the fields should be public
|
||||||
|
// to enable this to be serialized. All values in here are in units of seconds.
|
||||||
|
type Coordinate struct {
|
||||||
|
// Vec is the Euclidean portion of the coordinate. This is used along
|
||||||
|
// with the other fields to provide an overall distance estimate. The
|
||||||
|
// units here are seconds.
|
||||||
|
Vec []float64
|
||||||
|
|
||||||
|
// Err reflects the confidence in the given coordinate and is updated
|
||||||
|
// dynamically by the Vivaldi Client. This is dimensionless.
|
||||||
|
Error float64
|
||||||
|
|
||||||
|
// Adjustment is a distance offset computed based on a calculation over
|
||||||
|
// observations from all other nodes over a fixed window and is updated
|
||||||
|
// dynamically by the Vivaldi Client. The units here are seconds.
|
||||||
|
Adjustment float64
|
||||||
|
|
||||||
|
// Height is a distance offset that accounts for non-Euclidean effects
|
||||||
|
// which model the access links from nodes to the core Internet. The access
|
||||||
|
// links are usually set by bandwidth and congestion, and the core links
|
||||||
|
// usually follow distance based on geography.
|
||||||
|
Height float64
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
// secondsToNanoseconds is used to convert float seconds to nanoseconds.
|
||||||
|
secondsToNanoseconds = 1.0e9
|
||||||
|
|
||||||
|
// zeroThreshold is used to decide if two coordinates are on top of each
|
||||||
|
// other.
|
||||||
|
zeroThreshold = 1.0e-6
|
||||||
|
)
|
||||||
|
|
||||||
|
// ErrDimensionalityConflict will be panic-d if you try to perform operations
|
||||||
|
// with incompatible dimensions.
|
||||||
|
type DimensionalityConflictError struct{}
|
||||||
|
|
||||||
|
// Adds the error interface.
|
||||||
|
func (e DimensionalityConflictError) Error() string {
|
||||||
|
return "coordinate dimensionality does not match"
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewCoordinate creates a new coordinate at the origin, using the given config
|
||||||
|
// to supply key initial values.
|
||||||
|
func NewCoordinate(config *Config) *Coordinate {
|
||||||
|
return &Coordinate{
|
||||||
|
Vec: make([]float64, config.Dimensionality),
|
||||||
|
Error: config.VivaldiErrorMax,
|
||||||
|
Adjustment: 0.0,
|
||||||
|
Height: config.HeightMin,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Clone creates an independent copy of this coordinate.
|
||||||
|
func (c *Coordinate) Clone() *Coordinate {
|
||||||
|
vec := make([]float64, len(c.Vec))
|
||||||
|
copy(vec, c.Vec)
|
||||||
|
return &Coordinate{
|
||||||
|
Vec: vec,
|
||||||
|
Error: c.Error,
|
||||||
|
Adjustment: c.Adjustment,
|
||||||
|
Height: c.Height,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsCompatibleWith checks to see if the two coordinates are compatible
|
||||||
|
// dimensionally. If this returns true then you are guaranteed to not get
|
||||||
|
// any runtime errors operating on them.
|
||||||
|
func (c *Coordinate) IsCompatibleWith(other *Coordinate) bool {
|
||||||
|
return len(c.Vec) == len(other.Vec)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ApplyForce returns the result of applying the force from the direction of the
|
||||||
|
// other coordinate.
|
||||||
|
func (c *Coordinate) ApplyForce(config *Config, force float64, other *Coordinate) *Coordinate {
|
||||||
|
if !c.IsCompatibleWith(other) {
|
||||||
|
panic(DimensionalityConflictError{})
|
||||||
|
}
|
||||||
|
|
||||||
|
ret := c.Clone()
|
||||||
|
unit, mag := unitVectorAt(c.Vec, other.Vec)
|
||||||
|
ret.Vec = add(ret.Vec, mul(unit, force))
|
||||||
|
if mag > zeroThreshold {
|
||||||
|
ret.Height = (ret.Height+other.Height)*force/mag + ret.Height
|
||||||
|
ret.Height = math.Max(ret.Height, config.HeightMin)
|
||||||
|
}
|
||||||
|
return ret
|
||||||
|
}
|
||||||
|
|
||||||
|
// DistanceTo returns the distance between this coordinate and the other
|
||||||
|
// coordinate, including adjustments.
|
||||||
|
func (c *Coordinate) DistanceTo(other *Coordinate) time.Duration {
|
||||||
|
if !c.IsCompatibleWith(other) {
|
||||||
|
panic(DimensionalityConflictError{})
|
||||||
|
}
|
||||||
|
|
||||||
|
dist := c.rawDistanceTo(other)
|
||||||
|
adjustedDist := dist + c.Adjustment + other.Adjustment
|
||||||
|
if adjustedDist > 0.0 {
|
||||||
|
dist = adjustedDist
|
||||||
|
}
|
||||||
|
return time.Duration(dist * secondsToNanoseconds)
|
||||||
|
}
|
||||||
|
|
||||||
|
// rawDistanceTo returns the Vivaldi distance between this coordinate and the
|
||||||
|
// other coordinate in seconds, not including adjustments. This assumes the
|
||||||
|
// dimensions have already been checked to be compatible.
|
||||||
|
func (c *Coordinate) rawDistanceTo(other *Coordinate) float64 {
|
||||||
|
return magnitude(diff(c.Vec, other.Vec)) + c.Height + other.Height
|
||||||
|
}
|
||||||
|
|
||||||
|
// add returns the sum of vec1 and vec2. This assumes the dimensions have
|
||||||
|
// already been checked to be compatible.
|
||||||
|
func add(vec1 []float64, vec2 []float64) []float64 {
|
||||||
|
ret := make([]float64, len(vec1))
|
||||||
|
for i, _ := range ret {
|
||||||
|
ret[i] = vec1[i] + vec2[i]
|
||||||
|
}
|
||||||
|
return ret
|
||||||
|
}
|
||||||
|
|
||||||
|
// diff returns the difference between the vec1 and vec2. This assumes the
|
||||||
|
// dimensions have already been checked to be compatible.
|
||||||
|
func diff(vec1 []float64, vec2 []float64) []float64 {
|
||||||
|
ret := make([]float64, len(vec1))
|
||||||
|
for i, _ := range ret {
|
||||||
|
ret[i] = vec1[i] - vec2[i]
|
||||||
|
}
|
||||||
|
return ret
|
||||||
|
}
|
||||||
|
|
||||||
|
// mul returns vec multiplied by a scalar factor.
|
||||||
|
func mul(vec []float64, factor float64) []float64 {
|
||||||
|
ret := make([]float64, len(vec))
|
||||||
|
for i, _ := range vec {
|
||||||
|
ret[i] = vec[i] * factor
|
||||||
|
}
|
||||||
|
return ret
|
||||||
|
}
|
||||||
|
|
||||||
|
// magnitude computes the magnitude of the vec.
|
||||||
|
func magnitude(vec []float64) float64 {
|
||||||
|
sum := 0.0
|
||||||
|
for i, _ := range vec {
|
||||||
|
sum += vec[i] * vec[i]
|
||||||
|
}
|
||||||
|
return math.Sqrt(sum)
|
||||||
|
}
|
||||||
|
|
||||||
|
// unitVectorAt returns a unit vector pointing at vec1 from vec2. If the two
|
||||||
|
// positions are the same then a random unit vector is returned. We also return
|
||||||
|
// the distance between the points for use in the later height calculation.
|
||||||
|
func unitVectorAt(vec1 []float64, vec2 []float64) ([]float64, float64) {
|
||||||
|
ret := diff(vec1, vec2)
|
||||||
|
|
||||||
|
// If the coordinates aren't on top of each other we can normalize.
|
||||||
|
if mag := magnitude(ret); mag > zeroThreshold {
|
||||||
|
return mul(ret, 1.0/mag), mag
|
||||||
|
}
|
||||||
|
|
||||||
|
// Otherwise, just return a random unit vector.
|
||||||
|
for i, _ := range ret {
|
||||||
|
ret[i] = rand.Float64() - 0.5
|
||||||
|
}
|
||||||
|
if mag := magnitude(ret); mag > zeroThreshold {
|
||||||
|
return mul(ret, 1.0/mag), 0.0
|
||||||
|
}
|
||||||
|
|
||||||
|
// And finally just give up and make a unit vector along the first
|
||||||
|
// dimension. This should be exceedingly rare.
|
||||||
|
ret = make([]float64, len(ret))
|
||||||
|
ret[0] = 1.0
|
||||||
|
return ret, 0.0
|
||||||
|
}
|
187
vendor/github.com/hashicorp/serf/coordinate/phantom.go
generated
vendored
Normal file
187
vendor/github.com/hashicorp/serf/coordinate/phantom.go
generated
vendored
Normal file
|
@ -0,0 +1,187 @@
|
||||||
|
package coordinate
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"math"
|
||||||
|
"math/rand"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// GenerateClients returns a slice with nodes number of clients, all with the
|
||||||
|
// given config.
|
||||||
|
func GenerateClients(nodes int, config *Config) ([]*Client, error) {
|
||||||
|
clients := make([]*Client, nodes)
|
||||||
|
for i, _ := range clients {
|
||||||
|
client, err := NewClient(config)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
clients[i] = client
|
||||||
|
}
|
||||||
|
return clients, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GenerateLine returns a truth matrix as if all the nodes are in a straight linke
|
||||||
|
// with the given spacing between them.
|
||||||
|
func GenerateLine(nodes int, spacing time.Duration) [][]time.Duration {
|
||||||
|
truth := make([][]time.Duration, nodes)
|
||||||
|
for i := range truth {
|
||||||
|
truth[i] = make([]time.Duration, nodes)
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := 0; i < nodes; i++ {
|
||||||
|
for j := i + 1; j < nodes; j++ {
|
||||||
|
rtt := time.Duration(j-i) * spacing
|
||||||
|
truth[i][j], truth[j][i] = rtt, rtt
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return truth
|
||||||
|
}
|
||||||
|
|
||||||
|
// GenerateGrid returns a truth matrix as if all the nodes are in a two dimensional
|
||||||
|
// grid with the given spacing between them.
|
||||||
|
func GenerateGrid(nodes int, spacing time.Duration) [][]time.Duration {
|
||||||
|
truth := make([][]time.Duration, nodes)
|
||||||
|
for i := range truth {
|
||||||
|
truth[i] = make([]time.Duration, nodes)
|
||||||
|
}
|
||||||
|
|
||||||
|
n := int(math.Sqrt(float64(nodes)))
|
||||||
|
for i := 0; i < nodes; i++ {
|
||||||
|
for j := i + 1; j < nodes; j++ {
|
||||||
|
x1, y1 := float64(i%n), float64(i/n)
|
||||||
|
x2, y2 := float64(j%n), float64(j/n)
|
||||||
|
dx, dy := x2-x1, y2-y1
|
||||||
|
dist := math.Sqrt(dx*dx + dy*dy)
|
||||||
|
rtt := time.Duration(dist * float64(spacing))
|
||||||
|
truth[i][j], truth[j][i] = rtt, rtt
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return truth
|
||||||
|
}
|
||||||
|
|
||||||
|
// GenerateSplit returns a truth matrix as if half the nodes are close together in
|
||||||
|
// one location and half the nodes are close together in another. The lan factor
|
||||||
|
// is used to separate the nodes locally and the wan factor represents the split
|
||||||
|
// between the two sides.
|
||||||
|
func GenerateSplit(nodes int, lan time.Duration, wan time.Duration) [][]time.Duration {
|
||||||
|
truth := make([][]time.Duration, nodes)
|
||||||
|
for i := range truth {
|
||||||
|
truth[i] = make([]time.Duration, nodes)
|
||||||
|
}
|
||||||
|
|
||||||
|
split := nodes / 2
|
||||||
|
for i := 0; i < nodes; i++ {
|
||||||
|
for j := i + 1; j < nodes; j++ {
|
||||||
|
rtt := lan
|
||||||
|
if (i <= split && j > split) || (i > split && j <= split) {
|
||||||
|
rtt += wan
|
||||||
|
}
|
||||||
|
truth[i][j], truth[j][i] = rtt, rtt
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return truth
|
||||||
|
}
|
||||||
|
|
||||||
|
// GenerateCircle returns a truth matrix for a set of nodes, evenly distributed
|
||||||
|
// around a circle with the given radius. The first node is at the "center" of the
|
||||||
|
// circle because it's equidistant from all the other nodes, but we place it at
|
||||||
|
// double the radius, so it should show up above all the other nodes in height.
|
||||||
|
func GenerateCircle(nodes int, radius time.Duration) [][]time.Duration {
|
||||||
|
truth := make([][]time.Duration, nodes)
|
||||||
|
for i := range truth {
|
||||||
|
truth[i] = make([]time.Duration, nodes)
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := 0; i < nodes; i++ {
|
||||||
|
for j := i + 1; j < nodes; j++ {
|
||||||
|
var rtt time.Duration
|
||||||
|
if i == 0 {
|
||||||
|
rtt = 2 * radius
|
||||||
|
} else {
|
||||||
|
t1 := 2.0 * math.Pi * float64(i) / float64(nodes)
|
||||||
|
x1, y1 := math.Cos(t1), math.Sin(t1)
|
||||||
|
t2 := 2.0 * math.Pi * float64(j) / float64(nodes)
|
||||||
|
x2, y2 := math.Cos(t2), math.Sin(t2)
|
||||||
|
dx, dy := x2-x1, y2-y1
|
||||||
|
dist := math.Sqrt(dx*dx + dy*dy)
|
||||||
|
rtt = time.Duration(dist * float64(radius))
|
||||||
|
}
|
||||||
|
truth[i][j], truth[j][i] = rtt, rtt
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return truth
|
||||||
|
}
|
||||||
|
|
||||||
|
// GenerateRandom returns a truth matrix for a set of nodes with normally
|
||||||
|
// distributed delays, with the given mean and deviation. The RNG is re-seeded
|
||||||
|
// so you always get the same matrix for a given size.
|
||||||
|
func GenerateRandom(nodes int, mean time.Duration, deviation time.Duration) [][]time.Duration {
|
||||||
|
rand.Seed(1)
|
||||||
|
|
||||||
|
truth := make([][]time.Duration, nodes)
|
||||||
|
for i := range truth {
|
||||||
|
truth[i] = make([]time.Duration, nodes)
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := 0; i < nodes; i++ {
|
||||||
|
for j := i + 1; j < nodes; j++ {
|
||||||
|
rttSeconds := rand.NormFloat64()*deviation.Seconds() + mean.Seconds()
|
||||||
|
rtt := time.Duration(rttSeconds * secondsToNanoseconds)
|
||||||
|
truth[i][j], truth[j][i] = rtt, rtt
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return truth
|
||||||
|
}
|
||||||
|
|
||||||
|
// Simulate runs the given number of cycles using the given list of clients and
|
||||||
|
// truth matrix. On each cycle, each client will pick a random node and observe
|
||||||
|
// the truth RTT, updating its coordinate estimate. The RNG is re-seeded for
|
||||||
|
// each simulation run to get deterministic results (for this algorithm and the
|
||||||
|
// underlying algorithm which will use random numbers for position vectors when
|
||||||
|
// starting out with everything at the origin).
|
||||||
|
func Simulate(clients []*Client, truth [][]time.Duration, cycles int) {
|
||||||
|
rand.Seed(1)
|
||||||
|
|
||||||
|
nodes := len(clients)
|
||||||
|
for cycle := 0; cycle < cycles; cycle++ {
|
||||||
|
for i, _ := range clients {
|
||||||
|
if j := rand.Intn(nodes); j != i {
|
||||||
|
c := clients[j].GetCoordinate()
|
||||||
|
rtt := truth[i][j]
|
||||||
|
node := fmt.Sprintf("node_%d", j)
|
||||||
|
clients[i].Update(node, c, rtt)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Stats is returned from the Evaluate function with a summary of the algorithm
|
||||||
|
// performance.
|
||||||
|
type Stats struct {
|
||||||
|
ErrorMax float64
|
||||||
|
ErrorAvg float64
|
||||||
|
}
|
||||||
|
|
||||||
|
// Evaluate uses the coordinates of the given clients to calculate estimated
|
||||||
|
// distances and compares them with the given truth matrix, returning summary
|
||||||
|
// stats.
|
||||||
|
func Evaluate(clients []*Client, truth [][]time.Duration) (stats Stats) {
|
||||||
|
nodes := len(clients)
|
||||||
|
count := 0
|
||||||
|
for i := 0; i < nodes; i++ {
|
||||||
|
for j := i + 1; j < nodes; j++ {
|
||||||
|
est := clients[i].DistanceTo(clients[j].GetCoordinate()).Seconds()
|
||||||
|
actual := truth[i][j].Seconds()
|
||||||
|
error := math.Abs(est-actual) / actual
|
||||||
|
stats.ErrorMax = math.Max(stats.ErrorMax, error)
|
||||||
|
stats.ErrorAvg += error
|
||||||
|
count += 1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
stats.ErrorAvg /= float64(count)
|
||||||
|
fmt.Printf("Error avg=%9.6f max=%9.6f\n", stats.ErrorAvg, stats.ErrorMax)
|
||||||
|
return
|
||||||
|
}
|
34
vendor/github.com/miekg/dns/README.md
generated
vendored
34
vendor/github.com/miekg/dns/README.md
generated
vendored
|
@ -1,4 +1,4 @@
|
||||||
[](https://travis-ci.org/miekg/dns)
|
[](https://travis-ci.org/miekg/dns) [](https://godoc.org/github.com/miekg/dns)
|
||||||
|
|
||||||
# Alternative (more granular) approach to a DNS library
|
# Alternative (more granular) approach to a DNS library
|
||||||
|
|
||||||
|
@ -10,9 +10,9 @@ If there is stuff you should know as a DNS programmer there isn't a convenience
|
||||||
function for it. Server side and client side programming is supported, i.e. you
|
function for it. Server side and client side programming is supported, i.e. you
|
||||||
can build servers and resolvers with it.
|
can build servers and resolvers with it.
|
||||||
|
|
||||||
If you like this, you may also be interested in:
|
We try to keep the "master" branch as sane as possible and at the bleeding edge
|
||||||
|
of standards, avoiding breaking changes wherever reasonable. We support the last
|
||||||
* https://github.com/miekg/unbound -- Go wrapper for the Unbound resolver.
|
two versions of Go, currently: 1.5 and 1.6.
|
||||||
|
|
||||||
# Goals
|
# Goals
|
||||||
|
|
||||||
|
@ -33,6 +33,7 @@ A not-so-up-to-date-list-that-may-be-actually-current:
|
||||||
* https://github.com/fcambus/rrda
|
* https://github.com/fcambus/rrda
|
||||||
* https://github.com/kenshinx/godns
|
* https://github.com/kenshinx/godns
|
||||||
* https://github.com/skynetservices/skydns
|
* https://github.com/skynetservices/skydns
|
||||||
|
* https://github.com/hashicorp/consul
|
||||||
* https://github.com/DevelopersPL/godnsagent
|
* https://github.com/DevelopersPL/godnsagent
|
||||||
* https://github.com/duedil-ltd/discodns
|
* https://github.com/duedil-ltd/discodns
|
||||||
* https://github.com/StalkR/dns-reverse-proxy
|
* https://github.com/StalkR/dns-reverse-proxy
|
||||||
|
@ -42,6 +43,16 @@ A not-so-up-to-date-list-that-may-be-actually-current:
|
||||||
* https://play.google.com/store/apps/details?id=com.turbobytes.dig
|
* https://play.google.com/store/apps/details?id=com.turbobytes.dig
|
||||||
* https://github.com/fcambus/statzone
|
* https://github.com/fcambus/statzone
|
||||||
* https://github.com/benschw/dns-clb-go
|
* https://github.com/benschw/dns-clb-go
|
||||||
|
* https://github.com/corny/dnscheck for http://public-dns.info/
|
||||||
|
* https://namesmith.io
|
||||||
|
* https://github.com/miekg/unbound
|
||||||
|
* https://github.com/miekg/exdns
|
||||||
|
* https://dnslookup.org
|
||||||
|
* https://github.com/looterz/grimd
|
||||||
|
* https://github.com/phamhongviet/serf-dns
|
||||||
|
* https://github.com/mehrdadrad/mylg
|
||||||
|
* https://github.com/bamarni/dockness
|
||||||
|
* https://github.com/fffaraz/microdns
|
||||||
|
|
||||||
Send pull request if you want to be listed here.
|
Send pull request if you want to be listed here.
|
||||||
|
|
||||||
|
@ -55,9 +66,10 @@ Send pull request if you want to be listed here.
|
||||||
* Server side programming (mimicking the net/http package);
|
* Server side programming (mimicking the net/http package);
|
||||||
* Client side programming;
|
* Client side programming;
|
||||||
* DNSSEC: signing, validating and key generation for DSA, RSA and ECDSA;
|
* DNSSEC: signing, validating and key generation for DSA, RSA and ECDSA;
|
||||||
* EDNS0, NSID;
|
* EDNS0, NSID, Cookies;
|
||||||
* AXFR/IXFR;
|
* AXFR/IXFR;
|
||||||
* TSIG, SIG(0);
|
* TSIG, SIG(0);
|
||||||
|
* DNS over TLS: optional encrypted connection between client and server;
|
||||||
* DNS name compression;
|
* DNS name compression;
|
||||||
* Depends only on the standard library.
|
* Depends only on the standard library.
|
||||||
|
|
||||||
|
@ -104,7 +116,6 @@ Example programs can be found in the `github.com/miekg/exdns` repository.
|
||||||
* 340{1,2,3} - NAPTR record
|
* 340{1,2,3} - NAPTR record
|
||||||
* 3445 - Limiting the scope of (DNS)KEY
|
* 3445 - Limiting the scope of (DNS)KEY
|
||||||
* 3597 - Unknown RRs
|
* 3597 - Unknown RRs
|
||||||
* 4025 - IPSECKEY
|
|
||||||
* 403{3,4,5} - DNSSEC + validation functions
|
* 403{3,4,5} - DNSSEC + validation functions
|
||||||
* 4255 - SSHFP record
|
* 4255 - SSHFP record
|
||||||
* 4343 - Case insensitivity
|
* 4343 - Case insensitivity
|
||||||
|
@ -123,6 +134,7 @@ Example programs can be found in the `github.com/miekg/exdns` repository.
|
||||||
* 6605 - ECDSA
|
* 6605 - ECDSA
|
||||||
* 6725 - IANA Registry Update
|
* 6725 - IANA Registry Update
|
||||||
* 6742 - ILNP DNS
|
* 6742 - ILNP DNS
|
||||||
|
* 6840 - Clarifications and Implementation Notes for DNS Security
|
||||||
* 6844 - CAA record
|
* 6844 - CAA record
|
||||||
* 6891 - EDNS0 update
|
* 6891 - EDNS0 update
|
||||||
* 6895 - DNS IANA considerations
|
* 6895 - DNS IANA considerations
|
||||||
|
@ -130,6 +142,8 @@ Example programs can be found in the `github.com/miekg/exdns` repository.
|
||||||
* 7043 - EUI48/EUI64 records
|
* 7043 - EUI48/EUI64 records
|
||||||
* 7314 - DNS (EDNS) EXPIRE Option
|
* 7314 - DNS (EDNS) EXPIRE Option
|
||||||
* 7553 - URI record
|
* 7553 - URI record
|
||||||
|
* 7858 - DNS over TLS: Initiation and Performance Considerations (draft)
|
||||||
|
* 7873 - Domain Name System (DNS) Cookies (draft-ietf-dnsop-cookies)
|
||||||
* xxxx - EDNS0 DNS Update Lease (draft)
|
* xxxx - EDNS0 DNS Update Lease (draft)
|
||||||
|
|
||||||
## Loosely based upon
|
## Loosely based upon
|
||||||
|
@ -138,11 +152,3 @@ Example programs can be found in the `github.com/miekg/exdns` repository.
|
||||||
* `NSD`
|
* `NSD`
|
||||||
* `Net::DNS`
|
* `Net::DNS`
|
||||||
* `GRONG`
|
* `GRONG`
|
||||||
|
|
||||||
## TODO
|
|
||||||
|
|
||||||
* privatekey.Precompute() when signing?
|
|
||||||
* Last remaining RRs: APL, ATMA, A6, NSAP and NXT.
|
|
||||||
* Missing in parsing: ISDN, UNSPEC, NSAP and ATMA.
|
|
||||||
* NSEC(3) cover/match/closest enclose.
|
|
||||||
* Replies with TC bit are not parsed to the end.
|
|
||||||
|
|
161
vendor/github.com/miekg/dns/client.go
generated
vendored
161
vendor/github.com/miekg/dns/client.go
generated
vendored
|
@ -4,6 +4,8 @@ package dns
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"crypto/tls"
|
||||||
|
"encoding/binary"
|
||||||
"io"
|
"io"
|
||||||
"net"
|
"net"
|
||||||
"time"
|
"time"
|
||||||
|
@ -24,27 +26,22 @@ type Conn struct {
|
||||||
|
|
||||||
// A Client defines parameters for a DNS client.
|
// A Client defines parameters for a DNS client.
|
||||||
type Client struct {
|
type Client struct {
|
||||||
Net string // if "tcp" a TCP query will be initiated, otherwise an UDP one (default is "" for UDP)
|
Net string // if "tcp" or "tcp-tls" (DNS over TLS) a TCP query will be initiated, otherwise an UDP one (default is "" for UDP)
|
||||||
UDPSize uint16 // minimum receive buffer for UDP messages
|
UDPSize uint16 // minimum receive buffer for UDP messages
|
||||||
DialTimeout time.Duration // net.DialTimeout, defaults to 2 seconds
|
TLSConfig *tls.Config // TLS connection configuration
|
||||||
ReadTimeout time.Duration // net.Conn.SetReadTimeout value for connections, defaults to 2 seconds
|
Timeout time.Duration // a cumulative timeout for dial, write and read, defaults to 0 (disabled) - overrides DialTimeout, ReadTimeout and WriteTimeout when non-zero
|
||||||
WriteTimeout time.Duration // net.Conn.SetWriteTimeout value for connections, defaults to 2 seconds
|
DialTimeout time.Duration // net.DialTimeout, defaults to 2 seconds - overridden by Timeout when that value is non-zero
|
||||||
|
ReadTimeout time.Duration // net.Conn.SetReadTimeout value for connections, defaults to 2 seconds - overridden by Timeout when that value is non-zero
|
||||||
|
WriteTimeout time.Duration // net.Conn.SetWriteTimeout value for connections, defaults to 2 seconds - overridden by Timeout when that value is non-zero
|
||||||
TsigSecret map[string]string // secret(s) for Tsig map[<zonename>]<base64 secret>, zonename must be fully qualified
|
TsigSecret map[string]string // secret(s) for Tsig map[<zonename>]<base64 secret>, zonename must be fully qualified
|
||||||
SingleInflight bool // if true suppress multiple outstanding queries for the same Qname, Qtype and Qclass
|
SingleInflight bool // if true suppress multiple outstanding queries for the same Qname, Qtype and Qclass
|
||||||
group singleflight
|
group singleflight
|
||||||
}
|
}
|
||||||
|
|
||||||
// Exchange performs a synchronous UDP query. It sends the message m to the address
|
// Exchange performs a synchronous UDP query. It sends the message m to the address
|
||||||
// contained in a and waits for an reply. Exchange does not retry a failed query, nor
|
// contained in a and waits for a reply. Exchange does not retry a failed query, nor
|
||||||
// will it fall back to TCP in case of truncation.
|
// will it fall back to TCP in case of truncation.
|
||||||
// If you need to send a DNS message on an already existing connection, you can use the
|
// See client.Exchange for more information on setting larger buffer sizes.
|
||||||
// following:
|
|
||||||
//
|
|
||||||
// co := &dns.Conn{Conn: c} // c is your net.Conn
|
|
||||||
// co.WriteMsg(m)
|
|
||||||
// in, err := co.ReadMsg()
|
|
||||||
// co.Close()
|
|
||||||
//
|
|
||||||
func Exchange(m *Msg, a string) (r *Msg, err error) {
|
func Exchange(m *Msg, a string) (r *Msg, err error) {
|
||||||
var co *Conn
|
var co *Conn
|
||||||
co, err = DialTimeout("udp", a, dnsTimeout)
|
co, err = DialTimeout("udp", a, dnsTimeout)
|
||||||
|
@ -53,8 +50,6 @@ func Exchange(m *Msg, a string) (r *Msg, err error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
defer co.Close()
|
defer co.Close()
|
||||||
co.SetReadDeadline(time.Now().Add(dnsTimeout))
|
|
||||||
co.SetWriteDeadline(time.Now().Add(dnsTimeout))
|
|
||||||
|
|
||||||
opt := m.IsEdns0()
|
opt := m.IsEdns0()
|
||||||
// If EDNS0 is used use that for size.
|
// If EDNS0 is used use that for size.
|
||||||
|
@ -62,9 +57,12 @@ func Exchange(m *Msg, a string) (r *Msg, err error) {
|
||||||
co.UDPSize = opt.UDPSize()
|
co.UDPSize = opt.UDPSize()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
co.SetWriteDeadline(time.Now().Add(dnsTimeout))
|
||||||
if err = co.WriteMsg(m); err != nil {
|
if err = co.WriteMsg(m); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
co.SetReadDeadline(time.Now().Add(dnsTimeout))
|
||||||
r, err = co.ReadMsg()
|
r, err = co.ReadMsg()
|
||||||
if err == nil && r.Id != m.Id {
|
if err == nil && r.Id != m.Id {
|
||||||
err = ErrId
|
err = ErrId
|
||||||
|
@ -95,14 +93,18 @@ func ExchangeConn(c net.Conn, m *Msg) (r *Msg, err error) {
|
||||||
return r, err
|
return r, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Exchange performs an synchronous query. It sends the message m to the address
|
// Exchange performs a synchronous query. It sends the message m to the address
|
||||||
// contained in a and waits for an reply. Basic use pattern with a *dns.Client:
|
// contained in a and waits for a reply. Basic use pattern with a *dns.Client:
|
||||||
//
|
//
|
||||||
// c := new(dns.Client)
|
// c := new(dns.Client)
|
||||||
// in, rtt, err := c.Exchange(message, "127.0.0.1:53")
|
// in, rtt, err := c.Exchange(message, "127.0.0.1:53")
|
||||||
//
|
//
|
||||||
// Exchange does not retry a failed query, nor will it fall back to TCP in
|
// Exchange does not retry a failed query, nor will it fall back to TCP in
|
||||||
// case of truncation.
|
// case of truncation.
|
||||||
|
// It is up to the caller to create a message that allows for larger responses to be
|
||||||
|
// returned. Specifically this means adding an EDNS0 OPT RR that will advertise a larger
|
||||||
|
// buffer, see SetEdns0. Messsages without an OPT RR will fallback to the historic limit
|
||||||
|
// of 512 bytes.
|
||||||
func (c *Client) Exchange(m *Msg, a string) (r *Msg, rtt time.Duration, err error) {
|
func (c *Client) Exchange(m *Msg, a string) (r *Msg, rtt time.Duration, err error) {
|
||||||
if !c.SingleInflight {
|
if !c.SingleInflight {
|
||||||
return c.exchange(m, a)
|
return c.exchange(m, a)
|
||||||
|
@ -129,6 +131,9 @@ func (c *Client) Exchange(m *Msg, a string) (r *Msg, rtt time.Duration, err erro
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Client) dialTimeout() time.Duration {
|
func (c *Client) dialTimeout() time.Duration {
|
||||||
|
if c.Timeout != 0 {
|
||||||
|
return c.Timeout
|
||||||
|
}
|
||||||
if c.DialTimeout != 0 {
|
if c.DialTimeout != 0 {
|
||||||
return c.DialTimeout
|
return c.DialTimeout
|
||||||
}
|
}
|
||||||
|
@ -151,11 +156,36 @@ func (c *Client) writeTimeout() time.Duration {
|
||||||
|
|
||||||
func (c *Client) exchange(m *Msg, a string) (r *Msg, rtt time.Duration, err error) {
|
func (c *Client) exchange(m *Msg, a string) (r *Msg, rtt time.Duration, err error) {
|
||||||
var co *Conn
|
var co *Conn
|
||||||
if c.Net == "" {
|
network := "udp"
|
||||||
co, err = DialTimeout("udp", a, c.dialTimeout())
|
tls := false
|
||||||
} else {
|
|
||||||
co, err = DialTimeout(c.Net, a, c.dialTimeout())
|
switch c.Net {
|
||||||
|
case "tcp-tls":
|
||||||
|
network = "tcp"
|
||||||
|
tls = true
|
||||||
|
case "tcp4-tls":
|
||||||
|
network = "tcp4"
|
||||||
|
tls = true
|
||||||
|
case "tcp6-tls":
|
||||||
|
network = "tcp6"
|
||||||
|
tls = true
|
||||||
|
default:
|
||||||
|
if c.Net != "" {
|
||||||
|
network = c.Net
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var deadline time.Time
|
||||||
|
if c.Timeout != 0 {
|
||||||
|
deadline = time.Now().Add(c.Timeout)
|
||||||
|
}
|
||||||
|
|
||||||
|
if tls {
|
||||||
|
co, err = DialTimeoutWithTLS(network, a, c.TLSConfig, c.dialTimeout())
|
||||||
|
} else {
|
||||||
|
co, err = DialTimeout(network, a, c.dialTimeout())
|
||||||
|
}
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, 0, err
|
return nil, 0, err
|
||||||
}
|
}
|
||||||
|
@ -171,13 +201,13 @@ func (c *Client) exchange(m *Msg, a string) (r *Msg, rtt time.Duration, err erro
|
||||||
co.UDPSize = c.UDPSize
|
co.UDPSize = c.UDPSize
|
||||||
}
|
}
|
||||||
|
|
||||||
co.SetReadDeadline(time.Now().Add(c.readTimeout()))
|
|
||||||
co.SetWriteDeadline(time.Now().Add(c.writeTimeout()))
|
|
||||||
|
|
||||||
co.TsigSecret = c.TsigSecret
|
co.TsigSecret = c.TsigSecret
|
||||||
|
co.SetWriteDeadline(deadlineOrTimeout(deadline, c.writeTimeout()))
|
||||||
if err = co.WriteMsg(m); err != nil {
|
if err = co.WriteMsg(m); err != nil {
|
||||||
return nil, 0, err
|
return nil, 0, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
co.SetReadDeadline(deadlineOrTimeout(deadline, c.readTimeout()))
|
||||||
r, err = co.ReadMsg()
|
r, err = co.ReadMsg()
|
||||||
if err == nil && r.Id != m.Id {
|
if err == nil && r.Id != m.Id {
|
||||||
err = ErrId
|
err = ErrId
|
||||||
|
@ -196,6 +226,12 @@ func (co *Conn) ReadMsg() (*Msg, error) {
|
||||||
|
|
||||||
m := new(Msg)
|
m := new(Msg)
|
||||||
if err := m.Unpack(p); err != nil {
|
if err := m.Unpack(p); err != nil {
|
||||||
|
// If ErrTruncated was returned, we still want to allow the user to use
|
||||||
|
// the message, but naively they can just check err if they don't want
|
||||||
|
// to use a truncated message
|
||||||
|
if err == ErrTruncated {
|
||||||
|
return m, err
|
||||||
|
}
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
if t := m.IsTsig(); t != nil {
|
if t := m.IsTsig(); t != nil {
|
||||||
|
@ -218,21 +254,26 @@ func (co *Conn) ReadMsgHeader(hdr *Header) ([]byte, error) {
|
||||||
err error
|
err error
|
||||||
)
|
)
|
||||||
|
|
||||||
if t, ok := co.Conn.(*net.TCPConn); ok {
|
switch t := co.Conn.(type) {
|
||||||
|
case *net.TCPConn, *tls.Conn:
|
||||||
|
r := t.(io.Reader)
|
||||||
|
|
||||||
// First two bytes specify the length of the entire message.
|
// First two bytes specify the length of the entire message.
|
||||||
l, err := tcpMsgLen(t)
|
l, err := tcpMsgLen(r)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
p = make([]byte, l)
|
p = make([]byte, l)
|
||||||
n, err = tcpRead(t, p)
|
n, err = tcpRead(r, p)
|
||||||
} else {
|
co.rtt = time.Since(co.t)
|
||||||
|
default:
|
||||||
if co.UDPSize > MinMsgSize {
|
if co.UDPSize > MinMsgSize {
|
||||||
p = make([]byte, co.UDPSize)
|
p = make([]byte, co.UDPSize)
|
||||||
} else {
|
} else {
|
||||||
p = make([]byte, MinMsgSize)
|
p = make([]byte, MinMsgSize)
|
||||||
}
|
}
|
||||||
n, err = co.Read(p)
|
n, err = co.Read(p)
|
||||||
|
co.rtt = time.Since(co.t)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -243,15 +284,17 @@ func (co *Conn) ReadMsgHeader(hdr *Header) ([]byte, error) {
|
||||||
|
|
||||||
p = p[:n]
|
p = p[:n]
|
||||||
if hdr != nil {
|
if hdr != nil {
|
||||||
if _, err = UnpackStruct(hdr, p, 0); err != nil {
|
dh, _, err := unpackMsgHdr(p, 0)
|
||||||
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
*hdr = dh
|
||||||
}
|
}
|
||||||
return p, err
|
return p, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// tcpMsgLen is a helper func to read first two bytes of stream as uint16 packet length.
|
// tcpMsgLen is a helper func to read first two bytes of stream as uint16 packet length.
|
||||||
func tcpMsgLen(t *net.TCPConn) (int, error) {
|
func tcpMsgLen(t io.Reader) (int, error) {
|
||||||
p := []byte{0, 0}
|
p := []byte{0, 0}
|
||||||
n, err := t.Read(p)
|
n, err := t.Read(p)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -260,7 +303,7 @@ func tcpMsgLen(t *net.TCPConn) (int, error) {
|
||||||
if n != 2 {
|
if n != 2 {
|
||||||
return 0, ErrShortRead
|
return 0, ErrShortRead
|
||||||
}
|
}
|
||||||
l, _ := unpackUint16(p, 0)
|
l := binary.BigEndian.Uint16(p)
|
||||||
if l == 0 {
|
if l == 0 {
|
||||||
return 0, ErrShortRead
|
return 0, ErrShortRead
|
||||||
}
|
}
|
||||||
|
@ -268,7 +311,7 @@ func tcpMsgLen(t *net.TCPConn) (int, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// tcpRead calls TCPConn.Read enough times to fill allocated buffer.
|
// tcpRead calls TCPConn.Read enough times to fill allocated buffer.
|
||||||
func tcpRead(t *net.TCPConn, p []byte) (int, error) {
|
func tcpRead(t io.Reader, p []byte) (int, error) {
|
||||||
n, err := t.Read(p)
|
n, err := t.Read(p)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return n, err
|
return n, err
|
||||||
|
@ -291,27 +334,28 @@ func (co *Conn) Read(p []byte) (n int, err error) {
|
||||||
if len(p) < 2 {
|
if len(p) < 2 {
|
||||||
return 0, io.ErrShortBuffer
|
return 0, io.ErrShortBuffer
|
||||||
}
|
}
|
||||||
if t, ok := co.Conn.(*net.TCPConn); ok {
|
switch t := co.Conn.(type) {
|
||||||
l, err := tcpMsgLen(t)
|
case *net.TCPConn, *tls.Conn:
|
||||||
|
r := t.(io.Reader)
|
||||||
|
|
||||||
|
l, err := tcpMsgLen(r)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
if l > len(p) {
|
if l > len(p) {
|
||||||
return int(l), io.ErrShortBuffer
|
return int(l), io.ErrShortBuffer
|
||||||
}
|
}
|
||||||
return tcpRead(t, p[:l])
|
return tcpRead(r, p[:l])
|
||||||
}
|
}
|
||||||
// UDP connection
|
// UDP connection
|
||||||
n, err = co.Conn.Read(p)
|
n, err = co.Conn.Read(p)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return n, err
|
return n, err
|
||||||
}
|
}
|
||||||
|
|
||||||
co.rtt = time.Since(co.t)
|
|
||||||
return n, err
|
return n, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// WriteMsg sends a message throught the connection co.
|
// WriteMsg sends a message through the connection co.
|
||||||
// If the message m contains a TSIG record the transaction
|
// If the message m contains a TSIG record the transaction
|
||||||
// signature is calculated.
|
// signature is calculated.
|
||||||
func (co *Conn) WriteMsg(m *Msg) (err error) {
|
func (co *Conn) WriteMsg(m *Msg) (err error) {
|
||||||
|
@ -322,7 +366,7 @@ func (co *Conn) WriteMsg(m *Msg) (err error) {
|
||||||
return ErrSecret
|
return ErrSecret
|
||||||
}
|
}
|
||||||
out, mac, err = TsigGenerate(m, co.TsigSecret[t.Hdr.Name], co.tsigRequestMAC, false)
|
out, mac, err = TsigGenerate(m, co.TsigSecret[t.Hdr.Name], co.tsigRequestMAC, false)
|
||||||
// Set for the next read, allthough only used in zone transfers
|
// Set for the next read, although only used in zone transfers
|
||||||
co.tsigRequestMAC = mac
|
co.tsigRequestMAC = mac
|
||||||
} else {
|
} else {
|
||||||
out, err = m.Pack()
|
out, err = m.Pack()
|
||||||
|
@ -339,7 +383,10 @@ func (co *Conn) WriteMsg(m *Msg) (err error) {
|
||||||
|
|
||||||
// Write implements the net.Conn Write method.
|
// Write implements the net.Conn Write method.
|
||||||
func (co *Conn) Write(p []byte) (n int, err error) {
|
func (co *Conn) Write(p []byte) (n int, err error) {
|
||||||
if t, ok := co.Conn.(*net.TCPConn); ok {
|
switch t := co.Conn.(type) {
|
||||||
|
case *net.TCPConn, *tls.Conn:
|
||||||
|
w := t.(io.Writer)
|
||||||
|
|
||||||
lp := len(p)
|
lp := len(p)
|
||||||
if lp < 2 {
|
if lp < 2 {
|
||||||
return 0, io.ErrShortBuffer
|
return 0, io.ErrShortBuffer
|
||||||
|
@ -348,9 +395,9 @@ func (co *Conn) Write(p []byte) (n int, err error) {
|
||||||
return 0, &Error{err: "message too large"}
|
return 0, &Error{err: "message too large"}
|
||||||
}
|
}
|
||||||
l := make([]byte, 2, lp+2)
|
l := make([]byte, 2, lp+2)
|
||||||
l[0], l[1] = packUint16(uint16(lp))
|
binary.BigEndian.PutUint16(l, uint16(lp))
|
||||||
p = append(l, p...)
|
p = append(l, p...)
|
||||||
n, err := io.Copy(t, bytes.NewReader(p))
|
n, err := io.Copy(w, bytes.NewReader(p))
|
||||||
return int(n), err
|
return int(n), err
|
||||||
}
|
}
|
||||||
n, err = co.Conn.(*net.UDPConn).Write(p)
|
n, err = co.Conn.(*net.UDPConn).Write(p)
|
||||||
|
@ -376,3 +423,33 @@ func DialTimeout(network, address string, timeout time.Duration) (conn *Conn, er
|
||||||
}
|
}
|
||||||
return conn, nil
|
return conn, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// DialWithTLS connects to the address on the named network with TLS.
|
||||||
|
func DialWithTLS(network, address string, tlsConfig *tls.Config) (conn *Conn, err error) {
|
||||||
|
conn = new(Conn)
|
||||||
|
conn.Conn, err = tls.Dial(network, address, tlsConfig)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return conn, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// DialTimeoutWithTLS acts like DialWithTLS but takes a timeout.
|
||||||
|
func DialTimeoutWithTLS(network, address string, tlsConfig *tls.Config, timeout time.Duration) (conn *Conn, err error) {
|
||||||
|
var dialer net.Dialer
|
||||||
|
dialer.Timeout = timeout
|
||||||
|
|
||||||
|
conn = new(Conn)
|
||||||
|
conn.Conn, err = tls.DialWithDialer(&dialer, network, address, tlsConfig)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return conn, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func deadlineOrTimeout(deadline time.Time, timeout time.Duration) time.Time {
|
||||||
|
if deadline.IsZero() {
|
||||||
|
return time.Now().Add(timeout)
|
||||||
|
}
|
||||||
|
return deadline
|
||||||
|
}
|
||||||
|
|
44
vendor/github.com/miekg/dns/dane.go
generated
vendored
Normal file
44
vendor/github.com/miekg/dns/dane.go
generated
vendored
Normal file
|
@ -0,0 +1,44 @@
|
||||||
|
package dns
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/sha256"
|
||||||
|
"crypto/sha512"
|
||||||
|
"crypto/x509"
|
||||||
|
"encoding/hex"
|
||||||
|
"errors"
|
||||||
|
"io"
|
||||||
|
)
|
||||||
|
|
||||||
|
// CertificateToDANE converts a certificate to a hex string as used in the TLSA or SMIMEA records.
|
||||||
|
func CertificateToDANE(selector, matchingType uint8, cert *x509.Certificate) (string, error) {
|
||||||
|
switch matchingType {
|
||||||
|
case 0:
|
||||||
|
switch selector {
|
||||||
|
case 0:
|
||||||
|
return hex.EncodeToString(cert.Raw), nil
|
||||||
|
case 1:
|
||||||
|
return hex.EncodeToString(cert.RawSubjectPublicKeyInfo), nil
|
||||||
|
}
|
||||||
|
case 1:
|
||||||
|
h := sha256.New()
|
||||||
|
switch selector {
|
||||||
|
case 0:
|
||||||
|
io.WriteString(h, string(cert.Raw))
|
||||||
|
return hex.EncodeToString(h.Sum(nil)), nil
|
||||||
|
case 1:
|
||||||
|
io.WriteString(h, string(cert.RawSubjectPublicKeyInfo))
|
||||||
|
return hex.EncodeToString(h.Sum(nil)), nil
|
||||||
|
}
|
||||||
|
case 2:
|
||||||
|
h := sha512.New()
|
||||||
|
switch selector {
|
||||||
|
case 0:
|
||||||
|
io.WriteString(h, string(cert.Raw))
|
||||||
|
return hex.EncodeToString(h.Sum(nil)), nil
|
||||||
|
case 1:
|
||||||
|
io.WriteString(h, string(cert.RawSubjectPublicKeyInfo))
|
||||||
|
return hex.EncodeToString(h.Sum(nil)), nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return "", errors.New("dns: bad MatchingType or Selector")
|
||||||
|
}
|
27
vendor/github.com/miekg/dns/defaults.go
generated
vendored
27
vendor/github.com/miekg/dns/defaults.go
generated
vendored
|
@ -142,26 +142,33 @@ func (dns *Msg) IsTsig() *TSIG {
|
||||||
// record in the additional section will do. It returns the OPT record
|
// record in the additional section will do. It returns the OPT record
|
||||||
// found or nil.
|
// found or nil.
|
||||||
func (dns *Msg) IsEdns0() *OPT {
|
func (dns *Msg) IsEdns0() *OPT {
|
||||||
for _, r := range dns.Extra {
|
// EDNS0 is at the end of the additional section, start there.
|
||||||
if r.Header().Rrtype == TypeOPT {
|
// We might want to change this to *only* look at the last two
|
||||||
return r.(*OPT)
|
// records. So we see TSIG and/or OPT - this a slightly bigger
|
||||||
|
// change though.
|
||||||
|
for i := len(dns.Extra) - 1; i >= 0; i-- {
|
||||||
|
if dns.Extra[i].Header().Rrtype == TypeOPT {
|
||||||
|
return dns.Extra[i].(*OPT)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// IsDomainName checks if s is a valid domainname, it returns
|
// IsDomainName checks if s is a valid domain name, it returns the number of
|
||||||
// the number of labels and true, when a domain name is valid.
|
// labels and true, when a domain name is valid. Note that non fully qualified
|
||||||
// Note that non fully qualified domain name is considered valid, in this case the
|
// domain name is considered valid, in this case the last label is counted in
|
||||||
// last label is counted in the number of labels.
|
// the number of labels. When false is returned the number of labels is not
|
||||||
// When false is returned the number of labels is not defined.
|
// defined. Also note that this function is extremely liberal; almost any
|
||||||
|
// string is a valid domain name as the DNS is 8 bit protocol. It checks if each
|
||||||
|
// label fits in 63 characters, but there is no length check for the entire
|
||||||
|
// string s. I.e. a domain name longer than 255 characters is considered valid.
|
||||||
func IsDomainName(s string) (labels int, ok bool) {
|
func IsDomainName(s string) (labels int, ok bool) {
|
||||||
_, labels, err := packDomainName(s, nil, 0, nil, false)
|
_, labels, err := packDomainName(s, nil, 0, nil, false)
|
||||||
return labels, err == nil
|
return labels, err == nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// IsSubDomain checks if child is indeed a child of the parent. Both child and
|
// IsSubDomain checks if child is indeed a child of the parent. If child and parent
|
||||||
// parent are *not* downcased before doing the comparison.
|
// are the same domain true is returned as well.
|
||||||
func IsSubDomain(parent, child string) bool {
|
func IsSubDomain(parent, child string) bool {
|
||||||
// Entire child is contained in parent
|
// Entire child is contained in parent
|
||||||
return CompareDomainName(parent, child) == CountLabel(parent)
|
return CompareDomainName(parent, child) == CountLabel(parent)
|
||||||
|
|
34
vendor/github.com/miekg/dns/dns.go
generated
vendored
34
vendor/github.com/miekg/dns/dns.go
generated
vendored
|
@ -4,16 +4,14 @@ import "strconv"
|
||||||
|
|
||||||
const (
|
const (
|
||||||
year68 = 1 << 31 // For RFC1982 (Serial Arithmetic) calculations in 32 bits.
|
year68 = 1 << 31 // For RFC1982 (Serial Arithmetic) calculations in 32 bits.
|
||||||
// DefaultMsgSize is the standard default for messages larger than 512 bytes.
|
|
||||||
DefaultMsgSize = 4096
|
|
||||||
// MinMsgSize is the minimal size of a DNS packet.
|
|
||||||
MinMsgSize = 512
|
|
||||||
// MaxMsgSize is the largest possible DNS packet.
|
|
||||||
MaxMsgSize = 65535
|
|
||||||
defaultTtl = 3600 // Default internal TTL.
|
defaultTtl = 3600 // Default internal TTL.
|
||||||
|
|
||||||
|
DefaultMsgSize = 4096 // DefaultMsgSize is the standard default for messages larger than 512 bytes.
|
||||||
|
MinMsgSize = 512 // MinMsgSize is the minimal size of a DNS packet.
|
||||||
|
MaxMsgSize = 65535 // MaxMsgSize is the largest possible DNS packet.
|
||||||
)
|
)
|
||||||
|
|
||||||
// Error represents a DNS error
|
// Error represents a DNS error.
|
||||||
type Error struct{ err string }
|
type Error struct{ err string }
|
||||||
|
|
||||||
func (e *Error) Error() string {
|
func (e *Error) Error() string {
|
||||||
|
@ -30,10 +28,13 @@ type RR interface {
|
||||||
Header() *RR_Header
|
Header() *RR_Header
|
||||||
// String returns the text representation of the resource record.
|
// String returns the text representation of the resource record.
|
||||||
String() string
|
String() string
|
||||||
|
|
||||||
// copy returns a copy of the RR
|
// copy returns a copy of the RR
|
||||||
copy() RR
|
copy() RR
|
||||||
// len returns the length (in octets) of the uncompressed RR in wire format.
|
// len returns the length (in octets) of the uncompressed RR in wire format.
|
||||||
len() int
|
len() int
|
||||||
|
// pack packs an RR into wire format.
|
||||||
|
pack([]byte, int, map[string]int, bool) (int, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
// RR_Header is the header all DNS resource records share.
|
// RR_Header is the header all DNS resource records share.
|
||||||
|
@ -42,13 +43,13 @@ type RR_Header struct {
|
||||||
Rrtype uint16
|
Rrtype uint16
|
||||||
Class uint16
|
Class uint16
|
||||||
Ttl uint32
|
Ttl uint32
|
||||||
Rdlength uint16 // length of data after header
|
Rdlength uint16 // Length of data after header.
|
||||||
}
|
}
|
||||||
|
|
||||||
// Header returns itself. This is here to make RR_Header implement the RR interface.
|
// Header returns itself. This is here to make RR_Header implements the RR interface.
|
||||||
func (h *RR_Header) Header() *RR_Header { return h }
|
func (h *RR_Header) Header() *RR_Header { return h }
|
||||||
|
|
||||||
// Just to imlement the RR interface.
|
// Just to implement the RR interface.
|
||||||
func (h *RR_Header) copy() RR { return nil }
|
func (h *RR_Header) copy() RR { return nil }
|
||||||
|
|
||||||
func (h *RR_Header) copyHeader() *RR_Header {
|
func (h *RR_Header) copyHeader() *RR_Header {
|
||||||
|
@ -82,19 +83,22 @@ func (h *RR_Header) len() int {
|
||||||
return l
|
return l
|
||||||
}
|
}
|
||||||
|
|
||||||
// ToRFC3597 converts a known RR to the unknown RR representation
|
// ToRFC3597 converts a known RR to the unknown RR representation from RFC 3597.
|
||||||
// from RFC 3597.
|
|
||||||
func (rr *RFC3597) ToRFC3597(r RR) error {
|
func (rr *RFC3597) ToRFC3597(r RR) error {
|
||||||
buf := make([]byte, r.len()*2)
|
buf := make([]byte, r.len()*2)
|
||||||
off, err := PackStruct(r, buf, 0)
|
off, err := PackRR(r, buf, 0, nil, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
buf = buf[:off]
|
buf = buf[:off]
|
||||||
rawSetRdlength(buf, 0, off)
|
if int(r.Header().Rdlength) > off {
|
||||||
_, err = UnpackStruct(rr, buf, 0)
|
return ErrBuf
|
||||||
|
}
|
||||||
|
|
||||||
|
rfc3597, _, err := unpackRFC3597(*r.Header(), buf, off-int(r.Header().Rdlength))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
*rr = *rfc3597.(*RFC3597)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
96
vendor/github.com/miekg/dns/dnssec.go
generated
vendored
96
vendor/github.com/miekg/dns/dnssec.go
generated
vendored
|
@ -13,6 +13,7 @@ import (
|
||||||
_ "crypto/sha256"
|
_ "crypto/sha256"
|
||||||
_ "crypto/sha512"
|
_ "crypto/sha512"
|
||||||
"encoding/asn1"
|
"encoding/asn1"
|
||||||
|
"encoding/binary"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
"math/big"
|
"math/big"
|
||||||
"sort"
|
"sort"
|
||||||
|
@ -103,9 +104,7 @@ const (
|
||||||
ZONE = 1 << 8
|
ZONE = 1 << 8
|
||||||
)
|
)
|
||||||
|
|
||||||
// The RRSIG needs to be converted to wireformat with some of
|
// The RRSIG needs to be converted to wireformat with some of the rdata (the signature) missing.
|
||||||
// the rdata (the signature) missing. Use this struct to easy
|
|
||||||
// the conversion (and re-use the pack/unpack functions).
|
|
||||||
type rrsigWireFmt struct {
|
type rrsigWireFmt struct {
|
||||||
TypeCovered uint16
|
TypeCovered uint16
|
||||||
Algorithm uint8
|
Algorithm uint8
|
||||||
|
@ -144,7 +143,7 @@ func (k *DNSKEY) KeyTag() uint16 {
|
||||||
// at the base64 values. But I'm lazy.
|
// at the base64 values. But I'm lazy.
|
||||||
modulus, _ := fromBase64([]byte(k.PublicKey))
|
modulus, _ := fromBase64([]byte(k.PublicKey))
|
||||||
if len(modulus) > 1 {
|
if len(modulus) > 1 {
|
||||||
x, _ := unpackUint16(modulus, len(modulus)-2)
|
x := binary.BigEndian.Uint16(modulus[len(modulus)-2:])
|
||||||
keytag = int(x)
|
keytag = int(x)
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
|
@ -154,7 +153,7 @@ func (k *DNSKEY) KeyTag() uint16 {
|
||||||
keywire.Algorithm = k.Algorithm
|
keywire.Algorithm = k.Algorithm
|
||||||
keywire.PublicKey = k.PublicKey
|
keywire.PublicKey = k.PublicKey
|
||||||
wire := make([]byte, DefaultMsgSize)
|
wire := make([]byte, DefaultMsgSize)
|
||||||
n, err := PackStruct(keywire, wire, 0)
|
n, err := packKeyWire(keywire, wire)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
|
@ -192,7 +191,7 @@ func (k *DNSKEY) ToDS(h uint8) *DS {
|
||||||
keywire.Algorithm = k.Algorithm
|
keywire.Algorithm = k.Algorithm
|
||||||
keywire.PublicKey = k.PublicKey
|
keywire.PublicKey = k.PublicKey
|
||||||
wire := make([]byte, DefaultMsgSize)
|
wire := make([]byte, DefaultMsgSize)
|
||||||
n, err := PackStruct(keywire, wire, 0)
|
n, err := packKeyWire(keywire, wire)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -248,13 +247,12 @@ func (d *DS) ToCDS() *CDS {
|
||||||
return c
|
return c
|
||||||
}
|
}
|
||||||
|
|
||||||
// Sign signs an RRSet. The signature needs to be filled in with
|
// Sign signs an RRSet. The signature needs to be filled in with the values:
|
||||||
// the values: Inception, Expiration, KeyTag, SignerName and Algorithm.
|
// Inception, Expiration, KeyTag, SignerName and Algorithm. The rest is copied
|
||||||
// The rest is copied from the RRset. Sign returns true when the signing went OK,
|
// from the RRset. Sign returns a non-nill error when the signing went OK.
|
||||||
// otherwise false.
|
// There is no check if RRSet is a proper (RFC 2181) RRSet. If OrigTTL is non
|
||||||
// There is no check if RRSet is a proper (RFC 2181) RRSet.
|
// zero, it is used as-is, otherwise the TTL of the RRset is used as the
|
||||||
// If OrigTTL is non zero, it is used as-is, otherwise the TTL of the RRset
|
// OrigTTL.
|
||||||
// is used as the OrigTTL.
|
|
||||||
func (rr *RRSIG) Sign(k crypto.Signer, rrset []RR) error {
|
func (rr *RRSIG) Sign(k crypto.Signer, rrset []RR) error {
|
||||||
if k == nil {
|
if k == nil {
|
||||||
return ErrPrivKey
|
return ErrPrivKey
|
||||||
|
@ -290,7 +288,7 @@ func (rr *RRSIG) Sign(k crypto.Signer, rrset []RR) error {
|
||||||
|
|
||||||
// Create the desired binary blob
|
// Create the desired binary blob
|
||||||
signdata := make([]byte, DefaultMsgSize)
|
signdata := make([]byte, DefaultMsgSize)
|
||||||
n, err := PackStruct(sigwire, signdata, 0)
|
n, err := packSigWire(sigwire, signdata)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -408,7 +406,7 @@ func (rr *RRSIG) Verify(k *DNSKEY, rrset []RR) error {
|
||||||
sigwire.SignerName = strings.ToLower(rr.SignerName)
|
sigwire.SignerName = strings.ToLower(rr.SignerName)
|
||||||
// Create the desired binary blob
|
// Create the desired binary blob
|
||||||
signeddata := make([]byte, DefaultMsgSize)
|
signeddata := make([]byte, DefaultMsgSize)
|
||||||
n, err := PackStruct(sigwire, signeddata, 0)
|
n, err := packSigWire(sigwire, signeddata)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -421,8 +419,8 @@ func (rr *RRSIG) Verify(k *DNSKEY, rrset []RR) error {
|
||||||
|
|
||||||
sigbuf := rr.sigBuf() // Get the binary signature data
|
sigbuf := rr.sigBuf() // Get the binary signature data
|
||||||
if rr.Algorithm == PRIVATEDNS { // PRIVATEOID
|
if rr.Algorithm == PRIVATEDNS { // PRIVATEOID
|
||||||
// TODO(mg)
|
// TODO(miek)
|
||||||
// remove the domain name and assume its our
|
// remove the domain name and assume its ours?
|
||||||
}
|
}
|
||||||
|
|
||||||
hash, ok := AlgorithmToHash[rr.Algorithm]
|
hash, ok := AlgorithmToHash[rr.Algorithm]
|
||||||
|
@ -609,6 +607,12 @@ func rawSignatureData(rrset []RR, s *RRSIG) (buf []byte, err error) {
|
||||||
// NS, MD, MF, CNAME, SOA, MB, MG, MR, PTR,
|
// NS, MD, MF, CNAME, SOA, MB, MG, MR, PTR,
|
||||||
// HINFO, MINFO, MX, RP, AFSDB, RT, SIG, PX, NXT, NAPTR, KX,
|
// HINFO, MINFO, MX, RP, AFSDB, RT, SIG, PX, NXT, NAPTR, KX,
|
||||||
// SRV, DNAME, A6
|
// SRV, DNAME, A6
|
||||||
|
//
|
||||||
|
// RFC 6840 - Clarifications and Implementation Notes for DNS Security (DNSSEC):
|
||||||
|
// Section 6.2 of [RFC4034] also erroneously lists HINFO as a record
|
||||||
|
// that needs conversion to lowercase, and twice at that. Since HINFO
|
||||||
|
// records contain no domain names, they are not subject to case
|
||||||
|
// conversion.
|
||||||
switch x := r1.(type) {
|
switch x := r1.(type) {
|
||||||
case *NS:
|
case *NS:
|
||||||
x.Ns = strings.ToLower(x.Ns)
|
x.Ns = strings.ToLower(x.Ns)
|
||||||
|
@ -657,3 +661,61 @@ func rawSignatureData(rrset []RR, s *RRSIG) (buf []byte, err error) {
|
||||||
}
|
}
|
||||||
return buf, nil
|
return buf, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func packSigWire(sw *rrsigWireFmt, msg []byte) (int, error) {
|
||||||
|
// copied from zmsg.go RRSIG packing
|
||||||
|
off, err := packUint16(sw.TypeCovered, msg, 0)
|
||||||
|
if err != nil {
|
||||||
|
return off, err
|
||||||
|
}
|
||||||
|
off, err = packUint8(sw.Algorithm, msg, off)
|
||||||
|
if err != nil {
|
||||||
|
return off, err
|
||||||
|
}
|
||||||
|
off, err = packUint8(sw.Labels, msg, off)
|
||||||
|
if err != nil {
|
||||||
|
return off, err
|
||||||
|
}
|
||||||
|
off, err = packUint32(sw.OrigTtl, msg, off)
|
||||||
|
if err != nil {
|
||||||
|
return off, err
|
||||||
|
}
|
||||||
|
off, err = packUint32(sw.Expiration, msg, off)
|
||||||
|
if err != nil {
|
||||||
|
return off, err
|
||||||
|
}
|
||||||
|
off, err = packUint32(sw.Inception, msg, off)
|
||||||
|
if err != nil {
|
||||||
|
return off, err
|
||||||
|
}
|
||||||
|
off, err = packUint16(sw.KeyTag, msg, off)
|
||||||
|
if err != nil {
|
||||||
|
return off, err
|
||||||
|
}
|
||||||
|
off, err = PackDomainName(sw.SignerName, msg, off, nil, false)
|
||||||
|
if err != nil {
|
||||||
|
return off, err
|
||||||
|
}
|
||||||
|
return off, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func packKeyWire(dw *dnskeyWireFmt, msg []byte) (int, error) {
|
||||||
|
// copied from zmsg.go DNSKEY packing
|
||||||
|
off, err := packUint16(dw.Flags, msg, 0)
|
||||||
|
if err != nil {
|
||||||
|
return off, err
|
||||||
|
}
|
||||||
|
off, err = packUint8(dw.Protocol, msg, off)
|
||||||
|
if err != nil {
|
||||||
|
return off, err
|
||||||
|
}
|
||||||
|
off, err = packUint8(dw.Algorithm, msg, off)
|
||||||
|
if err != nil {
|
||||||
|
return off, err
|
||||||
|
}
|
||||||
|
off, err = packStringBase64(dw.PublicKey, msg, off)
|
||||||
|
if err != nil {
|
||||||
|
return off, err
|
||||||
|
}
|
||||||
|
return off, nil
|
||||||
|
}
|
||||||
|
|
30
vendor/github.com/miekg/dns/dnssec_keyscan.go
generated
vendored
30
vendor/github.com/miekg/dns/dnssec_keyscan.go
generated
vendored
|
@ -14,7 +14,7 @@ import (
|
||||||
// NewPrivateKey returns a PrivateKey by parsing the string s.
|
// NewPrivateKey returns a PrivateKey by parsing the string s.
|
||||||
// s should be in the same form of the BIND private key files.
|
// s should be in the same form of the BIND private key files.
|
||||||
func (k *DNSKEY) NewPrivateKey(s string) (crypto.PrivateKey, error) {
|
func (k *DNSKEY) NewPrivateKey(s string) (crypto.PrivateKey, error) {
|
||||||
if s[len(s)-1] != '\n' { // We need a closing newline
|
if s == "" || s[len(s)-1] != '\n' { // We need a closing newline
|
||||||
return k.ReadPrivateKey(strings.NewReader(s+"\n"), "")
|
return k.ReadPrivateKey(strings.NewReader(s+"\n"), "")
|
||||||
}
|
}
|
||||||
return k.ReadPrivateKey(strings.NewReader(s), "")
|
return k.ReadPrivateKey(strings.NewReader(s), "")
|
||||||
|
@ -25,9 +25,9 @@ func (k *DNSKEY) NewPrivateKey(s string) (crypto.PrivateKey, error) {
|
||||||
// The public key must be known, because some cryptographic algorithms embed
|
// The public key must be known, because some cryptographic algorithms embed
|
||||||
// the public inside the privatekey.
|
// the public inside the privatekey.
|
||||||
func (k *DNSKEY) ReadPrivateKey(q io.Reader, file string) (crypto.PrivateKey, error) {
|
func (k *DNSKEY) ReadPrivateKey(q io.Reader, file string) (crypto.PrivateKey, error) {
|
||||||
m, e := parseKey(q, file)
|
m, err := parseKey(q, file)
|
||||||
if m == nil {
|
if m == nil {
|
||||||
return nil, e
|
return nil, err
|
||||||
}
|
}
|
||||||
if _, ok := m["private-key-format"]; !ok {
|
if _, ok := m["private-key-format"]; !ok {
|
||||||
return nil, ErrPrivKey
|
return nil, ErrPrivKey
|
||||||
|
@ -42,16 +42,16 @@ func (k *DNSKEY) ReadPrivateKey(q io.Reader, file string) (crypto.PrivateKey, er
|
||||||
}
|
}
|
||||||
switch uint8(algo) {
|
switch uint8(algo) {
|
||||||
case DSA:
|
case DSA:
|
||||||
priv, e := readPrivateKeyDSA(m)
|
priv, err := readPrivateKeyDSA(m)
|
||||||
if e != nil {
|
if err != nil {
|
||||||
return nil, e
|
return nil, err
|
||||||
}
|
}
|
||||||
pub := k.publicKeyDSA()
|
pub := k.publicKeyDSA()
|
||||||
if pub == nil {
|
if pub == nil {
|
||||||
return nil, ErrKey
|
return nil, ErrKey
|
||||||
}
|
}
|
||||||
priv.PublicKey = *pub
|
priv.PublicKey = *pub
|
||||||
return priv, e
|
return priv, nil
|
||||||
case RSAMD5:
|
case RSAMD5:
|
||||||
fallthrough
|
fallthrough
|
||||||
case RSASHA1:
|
case RSASHA1:
|
||||||
|
@ -61,31 +61,31 @@ func (k *DNSKEY) ReadPrivateKey(q io.Reader, file string) (crypto.PrivateKey, er
|
||||||
case RSASHA256:
|
case RSASHA256:
|
||||||
fallthrough
|
fallthrough
|
||||||
case RSASHA512:
|
case RSASHA512:
|
||||||
priv, e := readPrivateKeyRSA(m)
|
priv, err := readPrivateKeyRSA(m)
|
||||||
if e != nil {
|
if err != nil {
|
||||||
return nil, e
|
return nil, err
|
||||||
}
|
}
|
||||||
pub := k.publicKeyRSA()
|
pub := k.publicKeyRSA()
|
||||||
if pub == nil {
|
if pub == nil {
|
||||||
return nil, ErrKey
|
return nil, ErrKey
|
||||||
}
|
}
|
||||||
priv.PublicKey = *pub
|
priv.PublicKey = *pub
|
||||||
return priv, e
|
return priv, nil
|
||||||
case ECCGOST:
|
case ECCGOST:
|
||||||
return nil, ErrPrivKey
|
return nil, ErrPrivKey
|
||||||
case ECDSAP256SHA256:
|
case ECDSAP256SHA256:
|
||||||
fallthrough
|
fallthrough
|
||||||
case ECDSAP384SHA384:
|
case ECDSAP384SHA384:
|
||||||
priv, e := readPrivateKeyECDSA(m)
|
priv, err := readPrivateKeyECDSA(m)
|
||||||
if e != nil {
|
if err != nil {
|
||||||
return nil, e
|
return nil, err
|
||||||
}
|
}
|
||||||
pub := k.publicKeyECDSA()
|
pub := k.publicKeyECDSA()
|
||||||
if pub == nil {
|
if pub == nil {
|
||||||
return nil, ErrKey
|
return nil, ErrKey
|
||||||
}
|
}
|
||||||
priv.PublicKey = *pub
|
priv.PublicKey = *pub
|
||||||
return priv, e
|
return priv, nil
|
||||||
default:
|
default:
|
||||||
return nil, ErrPrivKey
|
return nil, ErrPrivKey
|
||||||
}
|
}
|
||||||
|
|
8
vendor/github.com/miekg/dns/doc.go
generated
vendored
8
vendor/github.com/miekg/dns/doc.go
generated
vendored
|
@ -101,7 +101,7 @@ uses public key cryptography to sign resource records. The
|
||||||
public keys are stored in DNSKEY records and the signatures in RRSIG records.
|
public keys are stored in DNSKEY records and the signatures in RRSIG records.
|
||||||
|
|
||||||
Requesting DNSSEC information for a zone is done by adding the DO (DNSSEC OK) bit
|
Requesting DNSSEC information for a zone is done by adding the DO (DNSSEC OK) bit
|
||||||
to an request.
|
to a request.
|
||||||
|
|
||||||
m := new(dns.Msg)
|
m := new(dns.Msg)
|
||||||
m.SetEdns0(4096, true)
|
m.SetEdns0(4096, true)
|
||||||
|
@ -184,9 +184,9 @@ Basic use pattern validating and replying to a message that has TSIG set.
|
||||||
dns.HandleFunc(".", handleRequest)
|
dns.HandleFunc(".", handleRequest)
|
||||||
|
|
||||||
func handleRequest(w dns.ResponseWriter, r *dns.Msg) {
|
func handleRequest(w dns.ResponseWriter, r *dns.Msg) {
|
||||||
m := new(Msg)
|
m := new(dns.Msg)
|
||||||
m.SetReply(r)
|
m.SetReply(r)
|
||||||
if r.IsTsig() {
|
if r.IsTsig() != nil {
|
||||||
if w.TsigStatus() == nil {
|
if w.TsigStatus() == nil {
|
||||||
// *Msg r has an TSIG record and it was validated
|
// *Msg r has an TSIG record and it was validated
|
||||||
m.SetTsig("axfr.", dns.HmacMD5, 300, time.Now().Unix())
|
m.SetTsig("axfr.", dns.HmacMD5, 300, time.Now().Unix())
|
||||||
|
@ -203,7 +203,7 @@ RFC 6895 sets aside a range of type codes for private use. This range
|
||||||
is 65,280 - 65,534 (0xFF00 - 0xFFFE). When experimenting with new Resource Records these
|
is 65,280 - 65,534 (0xFF00 - 0xFFFE). When experimenting with new Resource Records these
|
||||||
can be used, before requesting an official type code from IANA.
|
can be used, before requesting an official type code from IANA.
|
||||||
|
|
||||||
see http://miek.nl/posts/2014/Sep/21/Private%20RRs%20and%20IDN%20in%20Go%20DNS/ for more
|
see http://miek.nl/2014/September/21/idn-and-private-rr-in-go-dns/ for more
|
||||||
information.
|
information.
|
||||||
|
|
||||||
EDNS0
|
EDNS0
|
||||||
|
|
116
vendor/github.com/miekg/dns/edns.go
generated
vendored
116
vendor/github.com/miekg/dns/edns.go
generated
vendored
|
@ -1,6 +1,7 @@
|
||||||
package dns
|
package dns
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"encoding/binary"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
"errors"
|
"errors"
|
||||||
"net"
|
"net"
|
||||||
|
@ -17,6 +18,7 @@ const (
|
||||||
EDNS0N3U = 0x7 // NSEC3 Hash Understood
|
EDNS0N3U = 0x7 // NSEC3 Hash Understood
|
||||||
EDNS0SUBNET = 0x8 // client-subnet (RFC6891)
|
EDNS0SUBNET = 0x8 // client-subnet (RFC6891)
|
||||||
EDNS0EXPIRE = 0x9 // EDNS0 expire
|
EDNS0EXPIRE = 0x9 // EDNS0 expire
|
||||||
|
EDNS0COOKIE = 0xa // EDNS0 Cookie
|
||||||
EDNS0SUBNETDRAFT = 0x50fa // Don't use! Use EDNS0SUBNET
|
EDNS0SUBNETDRAFT = 0x50fa // Don't use! Use EDNS0SUBNET
|
||||||
EDNS0LOCALSTART = 0xFDE9 // Beginning of range reserved for local/experimental use (RFC6891)
|
EDNS0LOCALSTART = 0xFDE9 // Beginning of range reserved for local/experimental use (RFC6891)
|
||||||
EDNS0LOCALEND = 0xFFFE // End of range reserved for local/experimental use (RFC6891)
|
EDNS0LOCALEND = 0xFFFE // End of range reserved for local/experimental use (RFC6891)
|
||||||
|
@ -30,11 +32,6 @@ type OPT struct {
|
||||||
Option []EDNS0 `dns:"opt"`
|
Option []EDNS0 `dns:"opt"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// Header implements the RR interface.
|
|
||||||
func (rr *OPT) Header() *RR_Header {
|
|
||||||
return &rr.Hdr
|
|
||||||
}
|
|
||||||
|
|
||||||
func (rr *OPT) String() string {
|
func (rr *OPT) String() string {
|
||||||
s := "\n;; OPT PSEUDOSECTION:\n; EDNS: version " + strconv.Itoa(int(rr.Version())) + "; "
|
s := "\n;; OPT PSEUDOSECTION:\n; EDNS: version " + strconv.Itoa(int(rr.Version())) + "; "
|
||||||
if rr.Do() {
|
if rr.Do() {
|
||||||
|
@ -61,6 +58,8 @@ func (rr *OPT) String() string {
|
||||||
if o.(*EDNS0_SUBNET).DraftOption {
|
if o.(*EDNS0_SUBNET).DraftOption {
|
||||||
s += " (draft)"
|
s += " (draft)"
|
||||||
}
|
}
|
||||||
|
case *EDNS0_COOKIE:
|
||||||
|
s += "\n; COOKIE: " + o.String()
|
||||||
case *EDNS0_UL:
|
case *EDNS0_UL:
|
||||||
s += "\n; UPDATE LEASE: " + o.String()
|
s += "\n; UPDATE LEASE: " + o.String()
|
||||||
case *EDNS0_LLQ:
|
case *EDNS0_LLQ:
|
||||||
|
@ -88,10 +87,6 @@ func (rr *OPT) len() int {
|
||||||
return l
|
return l
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rr *OPT) copy() RR {
|
|
||||||
return &OPT{*rr.Hdr.copyHeader(), rr.Option}
|
|
||||||
}
|
|
||||||
|
|
||||||
// return the old value -> delete SetVersion?
|
// return the old value -> delete SetVersion?
|
||||||
|
|
||||||
// Version returns the EDNS version used. Only zero is defined.
|
// Version returns the EDNS version used. Only zero is defined.
|
||||||
|
@ -105,13 +100,16 @@ func (rr *OPT) SetVersion(v uint8) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// ExtendedRcode returns the EDNS extended RCODE field (the upper 8 bits of the TTL).
|
// ExtendedRcode returns the EDNS extended RCODE field (the upper 8 bits of the TTL).
|
||||||
func (rr *OPT) ExtendedRcode() uint8 {
|
func (rr *OPT) ExtendedRcode() int {
|
||||||
return uint8((rr.Hdr.Ttl & 0xFF000000) >> 24)
|
return int((rr.Hdr.Ttl&0xFF000000)>>24) + 15
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetExtendedRcode sets the EDNS extended RCODE field.
|
// SetExtendedRcode sets the EDNS extended RCODE field.
|
||||||
func (rr *OPT) SetExtendedRcode(v uint8) {
|
func (rr *OPT) SetExtendedRcode(v uint8) {
|
||||||
rr.Hdr.Ttl = rr.Hdr.Ttl&0x00FFFFFF | (uint32(v) << 24)
|
if v < RcodeBadVers { // Smaller than 16.. Use the 4 bits you have!
|
||||||
|
return
|
||||||
|
}
|
||||||
|
rr.Hdr.Ttl = rr.Hdr.Ttl&0x00FFFFFF | (uint32(v-15) << 24)
|
||||||
}
|
}
|
||||||
|
|
||||||
// UDPSize returns the UDP buffer size.
|
// UDPSize returns the UDP buffer size.
|
||||||
|
@ -130,12 +128,21 @@ func (rr *OPT) Do() bool {
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetDo sets the DO (DNSSEC OK) bit.
|
// SetDo sets the DO (DNSSEC OK) bit.
|
||||||
func (rr *OPT) SetDo() {
|
// If we pass an argument, set the DO bit to that value.
|
||||||
|
// It is possible to pass 2 or more arguments. Any arguments after the 1st is silently ignored.
|
||||||
|
func (rr *OPT) SetDo(do ...bool) {
|
||||||
|
if len(do) == 1 {
|
||||||
|
if do[0] {
|
||||||
|
rr.Hdr.Ttl |= _DO
|
||||||
|
} else {
|
||||||
|
rr.Hdr.Ttl &^= _DO
|
||||||
|
}
|
||||||
|
} else {
|
||||||
rr.Hdr.Ttl |= _DO
|
rr.Hdr.Ttl |= _DO
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// EDNS0 defines an EDNS0 Option. An OPT RR can have multiple options appended to
|
// EDNS0 defines an EDNS0 Option. An OPT RR can have multiple options appended to it.
|
||||||
// it.
|
|
||||||
type EDNS0 interface {
|
type EDNS0 interface {
|
||||||
// Option returns the option code for the option.
|
// Option returns the option code for the option.
|
||||||
Option() uint16
|
Option() uint16
|
||||||
|
@ -216,7 +223,7 @@ func (e *EDNS0_SUBNET) Option() uint16 {
|
||||||
|
|
||||||
func (e *EDNS0_SUBNET) pack() ([]byte, error) {
|
func (e *EDNS0_SUBNET) pack() ([]byte, error) {
|
||||||
b := make([]byte, 4)
|
b := make([]byte, 4)
|
||||||
b[0], b[1] = packUint16(e.Family)
|
binary.BigEndian.PutUint16(b[0:], e.Family)
|
||||||
b[2] = e.SourceNetmask
|
b[2] = e.SourceNetmask
|
||||||
b[3] = e.SourceScope
|
b[3] = e.SourceScope
|
||||||
switch e.Family {
|
switch e.Family {
|
||||||
|
@ -250,7 +257,7 @@ func (e *EDNS0_SUBNET) unpack(b []byte) error {
|
||||||
if len(b) < 4 {
|
if len(b) < 4 {
|
||||||
return ErrBuf
|
return ErrBuf
|
||||||
}
|
}
|
||||||
e.Family, _ = unpackUint16(b, 0)
|
e.Family = binary.BigEndian.Uint16(b)
|
||||||
e.SourceNetmask = b[2]
|
e.SourceNetmask = b[2]
|
||||||
e.SourceScope = b[3]
|
e.SourceScope = b[3]
|
||||||
switch e.Family {
|
switch e.Family {
|
||||||
|
@ -292,6 +299,41 @@ func (e *EDNS0_SUBNET) String() (s string) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// The Cookie EDNS0 option
|
||||||
|
//
|
||||||
|
// o := new(dns.OPT)
|
||||||
|
// o.Hdr.Name = "."
|
||||||
|
// o.Hdr.Rrtype = dns.TypeOPT
|
||||||
|
// e := new(dns.EDNS0_COOKIE)
|
||||||
|
// e.Code = dns.EDNS0COOKIE
|
||||||
|
// e.Cookie = "24a5ac.."
|
||||||
|
// o.Option = append(o.Option, e)
|
||||||
|
//
|
||||||
|
// The Cookie field consists out of a client cookie (RFC 7873 Section 4), that is
|
||||||
|
// always 8 bytes. It may then optionally be followed by the server cookie. The server
|
||||||
|
// cookie is of variable length, 8 to a maximum of 32 bytes. In other words:
|
||||||
|
//
|
||||||
|
// cCookie := o.Cookie[:16]
|
||||||
|
// sCookie := o.Cookie[16:]
|
||||||
|
//
|
||||||
|
// There is no guarantee that the Cookie string has a specific length.
|
||||||
|
type EDNS0_COOKIE struct {
|
||||||
|
Code uint16 // Always EDNS0COOKIE
|
||||||
|
Cookie string // Hex-encoded cookie data
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *EDNS0_COOKIE) pack() ([]byte, error) {
|
||||||
|
h, err := hex.DecodeString(e.Cookie)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return h, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *EDNS0_COOKIE) Option() uint16 { return EDNS0COOKIE }
|
||||||
|
func (e *EDNS0_COOKIE) unpack(b []byte) error { e.Cookie = hex.EncodeToString(b); return nil }
|
||||||
|
func (e *EDNS0_COOKIE) String() string { return e.Cookie }
|
||||||
|
|
||||||
// The EDNS0_UL (Update Lease) (draft RFC) option is used to tell the server to set
|
// The EDNS0_UL (Update Lease) (draft RFC) option is used to tell the server to set
|
||||||
// an expiration on an update RR. This is helpful for clients that cannot clean
|
// an expiration on an update RR. This is helpful for clients that cannot clean
|
||||||
// up after themselves. This is a draft RFC and more information can be found at
|
// up after themselves. This is a draft RFC and more information can be found at
|
||||||
|
@ -315,10 +357,7 @@ func (e *EDNS0_UL) String() string { return strconv.FormatUint(uint64(e.Lease),
|
||||||
// Copied: http://golang.org/src/pkg/net/dnsmsg.go
|
// Copied: http://golang.org/src/pkg/net/dnsmsg.go
|
||||||
func (e *EDNS0_UL) pack() ([]byte, error) {
|
func (e *EDNS0_UL) pack() ([]byte, error) {
|
||||||
b := make([]byte, 4)
|
b := make([]byte, 4)
|
||||||
b[0] = byte(e.Lease >> 24)
|
binary.BigEndian.PutUint32(b, e.Lease)
|
||||||
b[1] = byte(e.Lease >> 16)
|
|
||||||
b[2] = byte(e.Lease >> 8)
|
|
||||||
b[3] = byte(e.Lease)
|
|
||||||
return b, nil
|
return b, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -326,7 +365,7 @@ func (e *EDNS0_UL) unpack(b []byte) error {
|
||||||
if len(b) < 4 {
|
if len(b) < 4 {
|
||||||
return ErrBuf
|
return ErrBuf
|
||||||
}
|
}
|
||||||
e.Lease = uint32(b[0])<<24 | uint32(b[1])<<16 | uint32(b[2])<<8 | uint32(b[3])
|
e.Lease = binary.BigEndian.Uint32(b)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -345,21 +384,11 @@ func (e *EDNS0_LLQ) Option() uint16 { return EDNS0LLQ }
|
||||||
|
|
||||||
func (e *EDNS0_LLQ) pack() ([]byte, error) {
|
func (e *EDNS0_LLQ) pack() ([]byte, error) {
|
||||||
b := make([]byte, 18)
|
b := make([]byte, 18)
|
||||||
b[0], b[1] = packUint16(e.Version)
|
binary.BigEndian.PutUint16(b[0:], e.Version)
|
||||||
b[2], b[3] = packUint16(e.Opcode)
|
binary.BigEndian.PutUint16(b[2:], e.Opcode)
|
||||||
b[4], b[5] = packUint16(e.Error)
|
binary.BigEndian.PutUint16(b[4:], e.Error)
|
||||||
b[6] = byte(e.Id >> 56)
|
binary.BigEndian.PutUint64(b[6:], e.Id)
|
||||||
b[7] = byte(e.Id >> 48)
|
binary.BigEndian.PutUint32(b[14:], e.LeaseLife)
|
||||||
b[8] = byte(e.Id >> 40)
|
|
||||||
b[9] = byte(e.Id >> 32)
|
|
||||||
b[10] = byte(e.Id >> 24)
|
|
||||||
b[11] = byte(e.Id >> 16)
|
|
||||||
b[12] = byte(e.Id >> 8)
|
|
||||||
b[13] = byte(e.Id)
|
|
||||||
b[14] = byte(e.LeaseLife >> 24)
|
|
||||||
b[15] = byte(e.LeaseLife >> 16)
|
|
||||||
b[16] = byte(e.LeaseLife >> 8)
|
|
||||||
b[17] = byte(e.LeaseLife)
|
|
||||||
return b, nil
|
return b, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -367,12 +396,11 @@ func (e *EDNS0_LLQ) unpack(b []byte) error {
|
||||||
if len(b) < 18 {
|
if len(b) < 18 {
|
||||||
return ErrBuf
|
return ErrBuf
|
||||||
}
|
}
|
||||||
e.Version, _ = unpackUint16(b, 0)
|
e.Version = binary.BigEndian.Uint16(b[0:])
|
||||||
e.Opcode, _ = unpackUint16(b, 2)
|
e.Opcode = binary.BigEndian.Uint16(b[2:])
|
||||||
e.Error, _ = unpackUint16(b, 4)
|
e.Error = binary.BigEndian.Uint16(b[4:])
|
||||||
e.Id = uint64(b[6])<<56 | uint64(b[6+1])<<48 | uint64(b[6+2])<<40 |
|
e.Id = binary.BigEndian.Uint64(b[6:])
|
||||||
uint64(b[6+3])<<32 | uint64(b[6+4])<<24 | uint64(b[6+5])<<16 | uint64(b[6+6])<<8 | uint64(b[6+7])
|
e.LeaseLife = binary.BigEndian.Uint32(b[14:])
|
||||||
e.LeaseLife = uint32(b[14])<<24 | uint32(b[14+1])<<16 | uint32(b[14+2])<<8 | uint32(b[14+3])
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -468,7 +496,7 @@ func (e *EDNS0_EXPIRE) unpack(b []byte) error {
|
||||||
if len(b) < 4 {
|
if len(b) < 4 {
|
||||||
return ErrBuf
|
return ErrBuf
|
||||||
}
|
}
|
||||||
e.Expire = uint32(b[0])<<24 | uint32(b[1])<<16 | uint32(b[2])<<8 | uint32(b[3])
|
e.Expire = binary.BigEndian.Uint32(b)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
9
vendor/github.com/miekg/dns/format.go
generated
vendored
9
vendor/github.com/miekg/dns/format.go
generated
vendored
|
@ -69,15 +69,6 @@ func Field(r RR, i int) string {
|
||||||
s += " " + Type(d.Index(i).Uint()).String()
|
s += " " + Type(d.Index(i).Uint()).String()
|
||||||
}
|
}
|
||||||
return s
|
return s
|
||||||
case `dns:"wks"`:
|
|
||||||
if d.Len() == 0 {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
s := strconv.Itoa(int(d.Index(0).Uint()))
|
|
||||||
for i := 0; i < d.Len(); i++ {
|
|
||||||
s += " " + strconv.Itoa(int(d.Index(i).Uint()))
|
|
||||||
}
|
|
||||||
return s
|
|
||||||
default:
|
default:
|
||||||
// if it does not have a tag its a string slice
|
// if it does not have a tag its a string slice
|
||||||
fallthrough
|
fallthrough
|
||||||
|
|
33
vendor/github.com/miekg/dns/zgenerate.go → vendor/github.com/miekg/dns/generate.go
generated
vendored
33
vendor/github.com/miekg/dns/zgenerate.go → vendor/github.com/miekg/dns/generate.go
generated
vendored
|
@ -2,6 +2,7 @@ package dns
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
@ -15,7 +16,7 @@ import (
|
||||||
// * [[ttl][class]]
|
// * [[ttl][class]]
|
||||||
// * type
|
// * type
|
||||||
// * rhs (rdata)
|
// * rhs (rdata)
|
||||||
// But we are lazy here, only the range is parsed *all* occurences
|
// But we are lazy here, only the range is parsed *all* occurrences
|
||||||
// of $ after that are interpreted.
|
// of $ after that are interpreted.
|
||||||
// Any error are returned as a string value, the empty string signals
|
// Any error are returned as a string value, the empty string signals
|
||||||
// "no error".
|
// "no error".
|
||||||
|
@ -25,7 +26,7 @@ func generate(l lex, c chan lex, t chan *Token, o string) string {
|
||||||
if i+1 == len(l.token) {
|
if i+1 == len(l.token) {
|
||||||
return "bad step in $GENERATE range"
|
return "bad step in $GENERATE range"
|
||||||
}
|
}
|
||||||
if s, e := strconv.Atoi(l.token[i+1:]); e == nil {
|
if s, err := strconv.Atoi(l.token[i+1:]); err == nil {
|
||||||
if s < 0 {
|
if s < 0 {
|
||||||
return "bad step in $GENERATE range"
|
return "bad step in $GENERATE range"
|
||||||
}
|
}
|
||||||
|
@ -65,7 +66,7 @@ BuildRR:
|
||||||
escape bool
|
escape bool
|
||||||
dom bytes.Buffer
|
dom bytes.Buffer
|
||||||
mod string
|
mod string
|
||||||
err string
|
err error
|
||||||
offset int
|
offset int
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -104,8 +105,8 @@ BuildRR:
|
||||||
return "bad modifier in $GENERATE"
|
return "bad modifier in $GENERATE"
|
||||||
}
|
}
|
||||||
mod, offset, err = modToPrintf(s[j+2 : j+2+sep])
|
mod, offset, err = modToPrintf(s[j+2 : j+2+sep])
|
||||||
if err != "" {
|
if err != nil {
|
||||||
return err
|
return err.Error()
|
||||||
}
|
}
|
||||||
j += 2 + sep // Jump to it
|
j += 2 + sep // Jump to it
|
||||||
}
|
}
|
||||||
|
@ -119,9 +120,9 @@ BuildRR:
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Re-parse the RR and send it on the current channel t
|
// Re-parse the RR and send it on the current channel t
|
||||||
rx, e := NewRR("$ORIGIN " + o + "\n" + dom.String())
|
rx, err := NewRR("$ORIGIN " + o + "\n" + dom.String())
|
||||||
if e != nil {
|
if err != nil {
|
||||||
return e.(*ParseError).err
|
return err.Error()
|
||||||
}
|
}
|
||||||
t <- &Token{RR: rx}
|
t <- &Token{RR: rx}
|
||||||
// Its more efficient to first built the rrlist and then parse it in
|
// Its more efficient to first built the rrlist and then parse it in
|
||||||
|
@ -131,28 +132,28 @@ BuildRR:
|
||||||
}
|
}
|
||||||
|
|
||||||
// Convert a $GENERATE modifier 0,0,d to something Printf can deal with.
|
// Convert a $GENERATE modifier 0,0,d to something Printf can deal with.
|
||||||
func modToPrintf(s string) (string, int, string) {
|
func modToPrintf(s string) (string, int, error) {
|
||||||
xs := strings.SplitN(s, ",", 3)
|
xs := strings.SplitN(s, ",", 3)
|
||||||
if len(xs) != 3 {
|
if len(xs) != 3 {
|
||||||
return "", 0, "bad modifier in $GENERATE"
|
return "", 0, errors.New("bad modifier in $GENERATE")
|
||||||
}
|
}
|
||||||
// xs[0] is offset, xs[1] is width, xs[2] is base
|
// xs[0] is offset, xs[1] is width, xs[2] is base
|
||||||
if xs[2] != "o" && xs[2] != "d" && xs[2] != "x" && xs[2] != "X" {
|
if xs[2] != "o" && xs[2] != "d" && xs[2] != "x" && xs[2] != "X" {
|
||||||
return "", 0, "bad base in $GENERATE"
|
return "", 0, errors.New("bad base in $GENERATE")
|
||||||
}
|
}
|
||||||
offset, err := strconv.Atoi(xs[0])
|
offset, err := strconv.Atoi(xs[0])
|
||||||
if err != nil || offset > 255 {
|
if err != nil || offset > 255 {
|
||||||
return "", 0, "bad offset in $GENERATE"
|
return "", 0, errors.New("bad offset in $GENERATE")
|
||||||
}
|
}
|
||||||
width, err := strconv.Atoi(xs[1])
|
width, err := strconv.Atoi(xs[1])
|
||||||
if err != nil || width > 255 {
|
if err != nil || width > 255 {
|
||||||
return "", offset, "bad width in $GENERATE"
|
return "", offset, errors.New("bad width in $GENERATE")
|
||||||
}
|
}
|
||||||
switch {
|
switch {
|
||||||
case width < 0:
|
case width < 0:
|
||||||
return "", offset, "bad width in $GENERATE"
|
return "", offset, errors.New("bad width in $GENERATE")
|
||||||
case width == 0:
|
case width == 0:
|
||||||
return "%" + xs[1] + xs[2], offset, ""
|
return "%" + xs[1] + xs[2], offset, nil
|
||||||
}
|
}
|
||||||
return "%0" + xs[1] + xs[2], offset, ""
|
return "%0" + xs[1] + xs[2], offset, nil
|
||||||
}
|
}
|
6
vendor/github.com/miekg/dns/labels.go
generated
vendored
6
vendor/github.com/miekg/dns/labels.go
generated
vendored
|
@ -4,9 +4,11 @@ package dns
|
||||||
|
|
||||||
// SplitDomainName splits a name string into it's labels.
|
// SplitDomainName splits a name string into it's labels.
|
||||||
// www.miek.nl. returns []string{"www", "miek", "nl"}
|
// www.miek.nl. returns []string{"www", "miek", "nl"}
|
||||||
|
// .www.miek.nl. returns []string{"", "www", "miek", "nl"},
|
||||||
// The root label (.) returns nil. Note that using
|
// The root label (.) returns nil. Note that using
|
||||||
// strings.Split(s) will work in most cases, but does not handle
|
// strings.Split(s) will work in most cases, but does not handle
|
||||||
// escaped dots (\.) for instance.
|
// escaped dots (\.) for instance.
|
||||||
|
// s must be a syntactically valid domain name, see IsDomainName.
|
||||||
func SplitDomainName(s string) (labels []string) {
|
func SplitDomainName(s string) (labels []string) {
|
||||||
if len(s) == 0 {
|
if len(s) == 0 {
|
||||||
return nil
|
return nil
|
||||||
|
@ -45,6 +47,8 @@ func SplitDomainName(s string) (labels []string) {
|
||||||
//
|
//
|
||||||
// www.miek.nl. and miek.nl. have two labels in common: miek and nl
|
// www.miek.nl. and miek.nl. have two labels in common: miek and nl
|
||||||
// www.miek.nl. and www.bla.nl. have one label in common: nl
|
// www.miek.nl. and www.bla.nl. have one label in common: nl
|
||||||
|
//
|
||||||
|
// s1 and s2 must be syntactically valid domain names.
|
||||||
func CompareDomainName(s1, s2 string) (n int) {
|
func CompareDomainName(s1, s2 string) (n int) {
|
||||||
s1 = Fqdn(s1)
|
s1 = Fqdn(s1)
|
||||||
s2 = Fqdn(s2)
|
s2 = Fqdn(s2)
|
||||||
|
@ -85,6 +89,7 @@ func CompareDomainName(s1, s2 string) (n int) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// CountLabel counts the the number of labels in the string s.
|
// CountLabel counts the the number of labels in the string s.
|
||||||
|
// s must be a syntactically valid domain name.
|
||||||
func CountLabel(s string) (labels int) {
|
func CountLabel(s string) (labels int) {
|
||||||
if s == "." {
|
if s == "." {
|
||||||
return
|
return
|
||||||
|
@ -103,6 +108,7 @@ func CountLabel(s string) (labels int) {
|
||||||
// Split splits a name s into its label indexes.
|
// Split splits a name s into its label indexes.
|
||||||
// www.miek.nl. returns []int{0, 4, 9}, www.miek.nl also returns []int{0, 4, 9}.
|
// www.miek.nl. returns []int{0, 4, 9}, www.miek.nl also returns []int{0, 4, 9}.
|
||||||
// The root name (.) returns nil. Also see SplitDomainName.
|
// The root name (.) returns nil. Also see SplitDomainName.
|
||||||
|
// s must be a syntactically valid domain name.
|
||||||
func Split(s string) []int {
|
func Split(s string) []int {
|
||||||
if s == "." {
|
if s == "." {
|
||||||
return nil
|
return nil
|
||||||
|
|
1281
vendor/github.com/miekg/dns/msg.go
generated
vendored
1281
vendor/github.com/miekg/dns/msg.go
generated
vendored
File diff suppressed because it is too large
Load diff
340
vendor/github.com/miekg/dns/msg_generate.go
generated
vendored
Normal file
340
vendor/github.com/miekg/dns/msg_generate.go
generated
vendored
Normal file
|
@ -0,0 +1,340 @@
|
||||||
|
//+build ignore
|
||||||
|
|
||||||
|
// msg_generate.go is meant to run with go generate. It will use
|
||||||
|
// go/{importer,types} to track down all the RR struct types. Then for each type
|
||||||
|
// it will generate pack/unpack methods based on the struct tags. The generated source is
|
||||||
|
// written to zmsg.go, and is meant to be checked into git.
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"go/format"
|
||||||
|
"go/importer"
|
||||||
|
"go/types"
|
||||||
|
"log"
|
||||||
|
"os"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
var packageHdr = `
|
||||||
|
// *** DO NOT MODIFY ***
|
||||||
|
// AUTOGENERATED BY go generate from msg_generate.go
|
||||||
|
|
||||||
|
package dns
|
||||||
|
|
||||||
|
`
|
||||||
|
|
||||||
|
// getTypeStruct will take a type and the package scope, and return the
|
||||||
|
// (innermost) struct if the type is considered a RR type (currently defined as
|
||||||
|
// those structs beginning with a RR_Header, could be redefined as implementing
|
||||||
|
// the RR interface). The bool return value indicates if embedded structs were
|
||||||
|
// resolved.
|
||||||
|
func getTypeStruct(t types.Type, scope *types.Scope) (*types.Struct, bool) {
|
||||||
|
st, ok := t.Underlying().(*types.Struct)
|
||||||
|
if !ok {
|
||||||
|
return nil, false
|
||||||
|
}
|
||||||
|
if st.Field(0).Type() == scope.Lookup("RR_Header").Type() {
|
||||||
|
return st, false
|
||||||
|
}
|
||||||
|
if st.Field(0).Anonymous() {
|
||||||
|
st, _ := getTypeStruct(st.Field(0).Type(), scope)
|
||||||
|
return st, true
|
||||||
|
}
|
||||||
|
return nil, false
|
||||||
|
}
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
// Import and type-check the package
|
||||||
|
pkg, err := importer.Default().Import("github.com/miekg/dns")
|
||||||
|
fatalIfErr(err)
|
||||||
|
scope := pkg.Scope()
|
||||||
|
|
||||||
|
// Collect actual types (*X)
|
||||||
|
var namedTypes []string
|
||||||
|
for _, name := range scope.Names() {
|
||||||
|
o := scope.Lookup(name)
|
||||||
|
if o == nil || !o.Exported() {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if st, _ := getTypeStruct(o.Type(), scope); st == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if name == "PrivateRR" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if corresponding TypeX exists
|
||||||
|
if scope.Lookup("Type"+o.Name()) == nil && o.Name() != "RFC3597" {
|
||||||
|
log.Fatalf("Constant Type%s does not exist.", o.Name())
|
||||||
|
}
|
||||||
|
|
||||||
|
namedTypes = append(namedTypes, o.Name())
|
||||||
|
}
|
||||||
|
|
||||||
|
b := &bytes.Buffer{}
|
||||||
|
b.WriteString(packageHdr)
|
||||||
|
|
||||||
|
fmt.Fprint(b, "// pack*() functions\n\n")
|
||||||
|
for _, name := range namedTypes {
|
||||||
|
o := scope.Lookup(name)
|
||||||
|
st, _ := getTypeStruct(o.Type(), scope)
|
||||||
|
|
||||||
|
fmt.Fprintf(b, "func (rr *%s) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) {\n", name)
|
||||||
|
fmt.Fprint(b, `off, err := rr.Hdr.pack(msg, off, compression, compress)
|
||||||
|
if err != nil {
|
||||||
|
return off, err
|
||||||
|
}
|
||||||
|
headerEnd := off
|
||||||
|
`)
|
||||||
|
for i := 1; i < st.NumFields(); i++ {
|
||||||
|
o := func(s string) {
|
||||||
|
fmt.Fprintf(b, s, st.Field(i).Name())
|
||||||
|
fmt.Fprint(b, `if err != nil {
|
||||||
|
return off, err
|
||||||
|
}
|
||||||
|
`)
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, ok := st.Field(i).Type().(*types.Slice); ok {
|
||||||
|
switch st.Tag(i) {
|
||||||
|
case `dns:"-"`: // ignored
|
||||||
|
case `dns:"txt"`:
|
||||||
|
o("off, err = packStringTxt(rr.%s, msg, off)\n")
|
||||||
|
case `dns:"opt"`:
|
||||||
|
o("off, err = packDataOpt(rr.%s, msg, off)\n")
|
||||||
|
case `dns:"nsec"`:
|
||||||
|
o("off, err = packDataNsec(rr.%s, msg, off)\n")
|
||||||
|
case `dns:"domain-name"`:
|
||||||
|
o("off, err = packDataDomainNames(rr.%s, msg, off, compression, compress)\n")
|
||||||
|
default:
|
||||||
|
log.Fatalln(name, st.Field(i).Name(), st.Tag(i))
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
switch {
|
||||||
|
case st.Tag(i) == `dns:"-"`: // ignored
|
||||||
|
case st.Tag(i) == `dns:"cdomain-name"`:
|
||||||
|
fallthrough
|
||||||
|
case st.Tag(i) == `dns:"domain-name"`:
|
||||||
|
o("off, err = PackDomainName(rr.%s, msg, off, compression, compress)\n")
|
||||||
|
case st.Tag(i) == `dns:"a"`:
|
||||||
|
o("off, err = packDataA(rr.%s, msg, off)\n")
|
||||||
|
case st.Tag(i) == `dns:"aaaa"`:
|
||||||
|
o("off, err = packDataAAAA(rr.%s, msg, off)\n")
|
||||||
|
case st.Tag(i) == `dns:"uint48"`:
|
||||||
|
o("off, err = packUint48(rr.%s, msg, off)\n")
|
||||||
|
case st.Tag(i) == `dns:"txt"`:
|
||||||
|
o("off, err = packString(rr.%s, msg, off)\n")
|
||||||
|
|
||||||
|
case strings.HasPrefix(st.Tag(i), `dns:"size-base32`): // size-base32 can be packed just like base32
|
||||||
|
fallthrough
|
||||||
|
case st.Tag(i) == `dns:"base32"`:
|
||||||
|
o("off, err = packStringBase32(rr.%s, msg, off)\n")
|
||||||
|
|
||||||
|
case strings.HasPrefix(st.Tag(i), `dns:"size-base64`): // size-base64 can be packed just like base64
|
||||||
|
fallthrough
|
||||||
|
case st.Tag(i) == `dns:"base64"`:
|
||||||
|
o("off, err = packStringBase64(rr.%s, msg, off)\n")
|
||||||
|
|
||||||
|
case strings.HasPrefix(st.Tag(i), `dns:"size-hex:SaltLength`): // Hack to fix empty salt length for NSEC3
|
||||||
|
o("if rr.%s == \"-\" { /* do nothing, empty salt */ }\n")
|
||||||
|
continue
|
||||||
|
case strings.HasPrefix(st.Tag(i), `dns:"size-hex`): // size-hex can be packed just like hex
|
||||||
|
fallthrough
|
||||||
|
case st.Tag(i) == `dns:"hex"`:
|
||||||
|
o("off, err = packStringHex(rr.%s, msg, off)\n")
|
||||||
|
|
||||||
|
case st.Tag(i) == `dns:"octet"`:
|
||||||
|
o("off, err = packStringOctet(rr.%s, msg, off)\n")
|
||||||
|
case st.Tag(i) == "":
|
||||||
|
switch st.Field(i).Type().(*types.Basic).Kind() {
|
||||||
|
case types.Uint8:
|
||||||
|
o("off, err = packUint8(rr.%s, msg, off)\n")
|
||||||
|
case types.Uint16:
|
||||||
|
o("off, err = packUint16(rr.%s, msg, off)\n")
|
||||||
|
case types.Uint32:
|
||||||
|
o("off, err = packUint32(rr.%s, msg, off)\n")
|
||||||
|
case types.Uint64:
|
||||||
|
o("off, err = packUint64(rr.%s, msg, off)\n")
|
||||||
|
case types.String:
|
||||||
|
o("off, err = packString(rr.%s, msg, off)\n")
|
||||||
|
default:
|
||||||
|
log.Fatalln(name, st.Field(i).Name())
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
log.Fatalln(name, st.Field(i).Name(), st.Tag(i))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// We have packed everything, only now we know the rdlength of this RR
|
||||||
|
fmt.Fprintln(b, "rr.Header().Rdlength = uint16(off-headerEnd)")
|
||||||
|
fmt.Fprintln(b, "return off, nil }\n")
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Fprint(b, "// unpack*() functions\n\n")
|
||||||
|
for _, name := range namedTypes {
|
||||||
|
o := scope.Lookup(name)
|
||||||
|
st, _ := getTypeStruct(o.Type(), scope)
|
||||||
|
|
||||||
|
fmt.Fprintf(b, "func unpack%s(h RR_Header, msg []byte, off int) (RR, int, error) {\n", name)
|
||||||
|
fmt.Fprintf(b, "rr := new(%s)\n", name)
|
||||||
|
fmt.Fprint(b, "rr.Hdr = h\n")
|
||||||
|
fmt.Fprint(b, `if noRdata(h) {
|
||||||
|
return rr, off, nil
|
||||||
|
}
|
||||||
|
var err error
|
||||||
|
rdStart := off
|
||||||
|
_ = rdStart
|
||||||
|
|
||||||
|
`)
|
||||||
|
for i := 1; i < st.NumFields(); i++ {
|
||||||
|
o := func(s string) {
|
||||||
|
fmt.Fprintf(b, s, st.Field(i).Name())
|
||||||
|
fmt.Fprint(b, `if err != nil {
|
||||||
|
return rr, off, err
|
||||||
|
}
|
||||||
|
`)
|
||||||
|
}
|
||||||
|
|
||||||
|
// size-* are special, because they reference a struct member we should use for the length.
|
||||||
|
if strings.HasPrefix(st.Tag(i), `dns:"size-`) {
|
||||||
|
structMember := structMember(st.Tag(i))
|
||||||
|
structTag := structTag(st.Tag(i))
|
||||||
|
switch structTag {
|
||||||
|
case "hex":
|
||||||
|
fmt.Fprintf(b, "rr.%s, off, err = unpackStringHex(msg, off, off + int(rr.%s))\n", st.Field(i).Name(), structMember)
|
||||||
|
case "base32":
|
||||||
|
fmt.Fprintf(b, "rr.%s, off, err = unpackStringBase32(msg, off, off + int(rr.%s))\n", st.Field(i).Name(), structMember)
|
||||||
|
case "base64":
|
||||||
|
fmt.Fprintf(b, "rr.%s, off, err = unpackStringBase64(msg, off, off + int(rr.%s))\n", st.Field(i).Name(), structMember)
|
||||||
|
default:
|
||||||
|
log.Fatalln(name, st.Field(i).Name(), st.Tag(i))
|
||||||
|
}
|
||||||
|
fmt.Fprint(b, `if err != nil {
|
||||||
|
return rr, off, err
|
||||||
|
}
|
||||||
|
`)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, ok := st.Field(i).Type().(*types.Slice); ok {
|
||||||
|
switch st.Tag(i) {
|
||||||
|
case `dns:"-"`: // ignored
|
||||||
|
case `dns:"txt"`:
|
||||||
|
o("rr.%s, off, err = unpackStringTxt(msg, off)\n")
|
||||||
|
case `dns:"opt"`:
|
||||||
|
o("rr.%s, off, err = unpackDataOpt(msg, off)\n")
|
||||||
|
case `dns:"nsec"`:
|
||||||
|
o("rr.%s, off, err = unpackDataNsec(msg, off)\n")
|
||||||
|
case `dns:"domain-name"`:
|
||||||
|
o("rr.%s, off, err = unpackDataDomainNames(msg, off, rdStart + int(rr.Hdr.Rdlength))\n")
|
||||||
|
default:
|
||||||
|
log.Fatalln(name, st.Field(i).Name(), st.Tag(i))
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
switch st.Tag(i) {
|
||||||
|
case `dns:"-"`: // ignored
|
||||||
|
case `dns:"cdomain-name"`:
|
||||||
|
fallthrough
|
||||||
|
case `dns:"domain-name"`:
|
||||||
|
o("rr.%s, off, err = UnpackDomainName(msg, off)\n")
|
||||||
|
case `dns:"a"`:
|
||||||
|
o("rr.%s, off, err = unpackDataA(msg, off)\n")
|
||||||
|
case `dns:"aaaa"`:
|
||||||
|
o("rr.%s, off, err = unpackDataAAAA(msg, off)\n")
|
||||||
|
case `dns:"uint48"`:
|
||||||
|
o("rr.%s, off, err = unpackUint48(msg, off)\n")
|
||||||
|
case `dns:"txt"`:
|
||||||
|
o("rr.%s, off, err = unpackString(msg, off)\n")
|
||||||
|
case `dns:"base32"`:
|
||||||
|
o("rr.%s, off, err = unpackStringBase32(msg, off, rdStart + int(rr.Hdr.Rdlength))\n")
|
||||||
|
case `dns:"base64"`:
|
||||||
|
o("rr.%s, off, err = unpackStringBase64(msg, off, rdStart + int(rr.Hdr.Rdlength))\n")
|
||||||
|
case `dns:"hex"`:
|
||||||
|
o("rr.%s, off, err = unpackStringHex(msg, off, rdStart + int(rr.Hdr.Rdlength))\n")
|
||||||
|
case `dns:"octet"`:
|
||||||
|
o("rr.%s, off, err = unpackStringOctet(msg, off)\n")
|
||||||
|
case "":
|
||||||
|
switch st.Field(i).Type().(*types.Basic).Kind() {
|
||||||
|
case types.Uint8:
|
||||||
|
o("rr.%s, off, err = unpackUint8(msg, off)\n")
|
||||||
|
case types.Uint16:
|
||||||
|
o("rr.%s, off, err = unpackUint16(msg, off)\n")
|
||||||
|
case types.Uint32:
|
||||||
|
o("rr.%s, off, err = unpackUint32(msg, off)\n")
|
||||||
|
case types.Uint64:
|
||||||
|
o("rr.%s, off, err = unpackUint64(msg, off)\n")
|
||||||
|
case types.String:
|
||||||
|
o("rr.%s, off, err = unpackString(msg, off)\n")
|
||||||
|
default:
|
||||||
|
log.Fatalln(name, st.Field(i).Name())
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
log.Fatalln(name, st.Field(i).Name(), st.Tag(i))
|
||||||
|
}
|
||||||
|
// If we've hit len(msg) we return without error.
|
||||||
|
if i < st.NumFields()-1 {
|
||||||
|
fmt.Fprintf(b, `if off == len(msg) {
|
||||||
|
return rr, off, nil
|
||||||
|
}
|
||||||
|
`)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
fmt.Fprintf(b, "return rr, off, err }\n\n")
|
||||||
|
}
|
||||||
|
// Generate typeToUnpack map
|
||||||
|
fmt.Fprintln(b, "var typeToUnpack = map[uint16]func(RR_Header, []byte, int) (RR, int, error){")
|
||||||
|
for _, name := range namedTypes {
|
||||||
|
if name == "RFC3597" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
fmt.Fprintf(b, "Type%s: unpack%s,\n", name, name)
|
||||||
|
}
|
||||||
|
fmt.Fprintln(b, "}\n")
|
||||||
|
|
||||||
|
// gofmt
|
||||||
|
res, err := format.Source(b.Bytes())
|
||||||
|
if err != nil {
|
||||||
|
b.WriteTo(os.Stderr)
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// write result
|
||||||
|
f, err := os.Create("zmsg.go")
|
||||||
|
fatalIfErr(err)
|
||||||
|
defer f.Close()
|
||||||
|
f.Write(res)
|
||||||
|
}
|
||||||
|
|
||||||
|
// structMember will take a tag like dns:"size-base32:SaltLength" and return the last part of this string.
|
||||||
|
func structMember(s string) string {
|
||||||
|
fields := strings.Split(s, ":")
|
||||||
|
if len(fields) == 0 {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
f := fields[len(fields)-1]
|
||||||
|
// f should have a closing "
|
||||||
|
if len(f) > 1 {
|
||||||
|
return f[:len(f)-1]
|
||||||
|
}
|
||||||
|
return f
|
||||||
|
}
|
||||||
|
|
||||||
|
// structTag will take a tag like dns:"size-base32:SaltLength" and return base32.
|
||||||
|
func structTag(s string) string {
|
||||||
|
fields := strings.Split(s, ":")
|
||||||
|
if len(fields) < 2 {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
return fields[1][len("\"size-"):]
|
||||||
|
}
|
||||||
|
|
||||||
|
func fatalIfErr(err error) {
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
}
|
630
vendor/github.com/miekg/dns/msg_helpers.go
generated
vendored
Normal file
630
vendor/github.com/miekg/dns/msg_helpers.go
generated
vendored
Normal file
|
@ -0,0 +1,630 @@
|
||||||
|
package dns
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/base32"
|
||||||
|
"encoding/base64"
|
||||||
|
"encoding/binary"
|
||||||
|
"encoding/hex"
|
||||||
|
"net"
|
||||||
|
"strconv"
|
||||||
|
)
|
||||||
|
|
||||||
|
// helper functions called from the generated zmsg.go
|
||||||
|
|
||||||
|
// These function are named after the tag to help pack/unpack, if there is no tag it is the name
|
||||||
|
// of the type they pack/unpack (string, int, etc). We prefix all with unpackData or packData, so packDataA or
|
||||||
|
// packDataDomainName.
|
||||||
|
|
||||||
|
func unpackDataA(msg []byte, off int) (net.IP, int, error) {
|
||||||
|
if off+net.IPv4len > len(msg) {
|
||||||
|
return nil, len(msg), &Error{err: "overflow unpacking a"}
|
||||||
|
}
|
||||||
|
a := append(make(net.IP, 0, net.IPv4len), msg[off:off+net.IPv4len]...)
|
||||||
|
off += net.IPv4len
|
||||||
|
return a, off, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func packDataA(a net.IP, msg []byte, off int) (int, error) {
|
||||||
|
// It must be a slice of 4, even if it is 16, we encode only the first 4
|
||||||
|
if off+net.IPv4len > len(msg) {
|
||||||
|
return len(msg), &Error{err: "overflow packing a"}
|
||||||
|
}
|
||||||
|
switch len(a) {
|
||||||
|
case net.IPv4len, net.IPv6len:
|
||||||
|
copy(msg[off:], a.To4())
|
||||||
|
off += net.IPv4len
|
||||||
|
case 0:
|
||||||
|
// Allowed, for dynamic updates.
|
||||||
|
default:
|
||||||
|
return len(msg), &Error{err: "overflow packing a"}
|
||||||
|
}
|
||||||
|
return off, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func unpackDataAAAA(msg []byte, off int) (net.IP, int, error) {
|
||||||
|
if off+net.IPv6len > len(msg) {
|
||||||
|
return nil, len(msg), &Error{err: "overflow unpacking aaaa"}
|
||||||
|
}
|
||||||
|
aaaa := append(make(net.IP, 0, net.IPv6len), msg[off:off+net.IPv6len]...)
|
||||||
|
off += net.IPv6len
|
||||||
|
return aaaa, off, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func packDataAAAA(aaaa net.IP, msg []byte, off int) (int, error) {
|
||||||
|
if off+net.IPv6len > len(msg) {
|
||||||
|
return len(msg), &Error{err: "overflow packing aaaa"}
|
||||||
|
}
|
||||||
|
|
||||||
|
switch len(aaaa) {
|
||||||
|
case net.IPv6len:
|
||||||
|
copy(msg[off:], aaaa)
|
||||||
|
off += net.IPv6len
|
||||||
|
case 0:
|
||||||
|
// Allowed, dynamic updates.
|
||||||
|
default:
|
||||||
|
return len(msg), &Error{err: "overflow packing aaaa"}
|
||||||
|
}
|
||||||
|
return off, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// unpackHeader unpacks an RR header, returning the offset to the end of the header and a
|
||||||
|
// re-sliced msg according to the expected length of the RR.
|
||||||
|
func unpackHeader(msg []byte, off int) (rr RR_Header, off1 int, truncmsg []byte, err error) {
|
||||||
|
hdr := RR_Header{}
|
||||||
|
if off == len(msg) {
|
||||||
|
return hdr, off, msg, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
hdr.Name, off, err = UnpackDomainName(msg, off)
|
||||||
|
if err != nil {
|
||||||
|
return hdr, len(msg), msg, err
|
||||||
|
}
|
||||||
|
hdr.Rrtype, off, err = unpackUint16(msg, off)
|
||||||
|
if err != nil {
|
||||||
|
return hdr, len(msg), msg, err
|
||||||
|
}
|
||||||
|
hdr.Class, off, err = unpackUint16(msg, off)
|
||||||
|
if err != nil {
|
||||||
|
return hdr, len(msg), msg, err
|
||||||
|
}
|
||||||
|
hdr.Ttl, off, err = unpackUint32(msg, off)
|
||||||
|
if err != nil {
|
||||||
|
return hdr, len(msg), msg, err
|
||||||
|
}
|
||||||
|
hdr.Rdlength, off, err = unpackUint16(msg, off)
|
||||||
|
if err != nil {
|
||||||
|
return hdr, len(msg), msg, err
|
||||||
|
}
|
||||||
|
msg, err = truncateMsgFromRdlength(msg, off, hdr.Rdlength)
|
||||||
|
return hdr, off, msg, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// pack packs an RR header, returning the offset to the end of the header.
|
||||||
|
// See PackDomainName for documentation about the compression.
|
||||||
|
func (hdr RR_Header) pack(msg []byte, off int, compression map[string]int, compress bool) (off1 int, err error) {
|
||||||
|
if off == len(msg) {
|
||||||
|
return off, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
off, err = PackDomainName(hdr.Name, msg, off, compression, compress)
|
||||||
|
if err != nil {
|
||||||
|
return len(msg), err
|
||||||
|
}
|
||||||
|
off, err = packUint16(hdr.Rrtype, msg, off)
|
||||||
|
if err != nil {
|
||||||
|
return len(msg), err
|
||||||
|
}
|
||||||
|
off, err = packUint16(hdr.Class, msg, off)
|
||||||
|
if err != nil {
|
||||||
|
return len(msg), err
|
||||||
|
}
|
||||||
|
off, err = packUint32(hdr.Ttl, msg, off)
|
||||||
|
if err != nil {
|
||||||
|
return len(msg), err
|
||||||
|
}
|
||||||
|
off, err = packUint16(hdr.Rdlength, msg, off)
|
||||||
|
if err != nil {
|
||||||
|
return len(msg), err
|
||||||
|
}
|
||||||
|
return off, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// helper helper functions.
|
||||||
|
|
||||||
|
// truncateMsgFromRdLength truncates msg to match the expected length of the RR.
|
||||||
|
// Returns an error if msg is smaller than the expected size.
|
||||||
|
func truncateMsgFromRdlength(msg []byte, off int, rdlength uint16) (truncmsg []byte, err error) {
|
||||||
|
lenrd := off + int(rdlength)
|
||||||
|
if lenrd > len(msg) {
|
||||||
|
return msg, &Error{err: "overflowing header size"}
|
||||||
|
}
|
||||||
|
return msg[:lenrd], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func fromBase32(s []byte) (buf []byte, err error) {
|
||||||
|
buflen := base32.HexEncoding.DecodedLen(len(s))
|
||||||
|
buf = make([]byte, buflen)
|
||||||
|
n, err := base32.HexEncoding.Decode(buf, s)
|
||||||
|
buf = buf[:n]
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func toBase32(b []byte) string { return base32.HexEncoding.EncodeToString(b) }
|
||||||
|
|
||||||
|
func fromBase64(s []byte) (buf []byte, err error) {
|
||||||
|
buflen := base64.StdEncoding.DecodedLen(len(s))
|
||||||
|
buf = make([]byte, buflen)
|
||||||
|
n, err := base64.StdEncoding.Decode(buf, s)
|
||||||
|
buf = buf[:n]
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func toBase64(b []byte) string { return base64.StdEncoding.EncodeToString(b) }
|
||||||
|
|
||||||
|
// dynamicUpdate returns true if the Rdlength is zero.
|
||||||
|
func noRdata(h RR_Header) bool { return h.Rdlength == 0 }
|
||||||
|
|
||||||
|
func unpackUint8(msg []byte, off int) (i uint8, off1 int, err error) {
|
||||||
|
if off+1 > len(msg) {
|
||||||
|
return 0, len(msg), &Error{err: "overflow unpacking uint8"}
|
||||||
|
}
|
||||||
|
return uint8(msg[off]), off + 1, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func packUint8(i uint8, msg []byte, off int) (off1 int, err error) {
|
||||||
|
if off+1 > len(msg) {
|
||||||
|
return len(msg), &Error{err: "overflow packing uint8"}
|
||||||
|
}
|
||||||
|
msg[off] = byte(i)
|
||||||
|
return off + 1, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func unpackUint16(msg []byte, off int) (i uint16, off1 int, err error) {
|
||||||
|
if off+2 > len(msg) {
|
||||||
|
return 0, len(msg), &Error{err: "overflow unpacking uint16"}
|
||||||
|
}
|
||||||
|
return binary.BigEndian.Uint16(msg[off:]), off + 2, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func packUint16(i uint16, msg []byte, off int) (off1 int, err error) {
|
||||||
|
if off+2 > len(msg) {
|
||||||
|
return len(msg), &Error{err: "overflow packing uint16"}
|
||||||
|
}
|
||||||
|
binary.BigEndian.PutUint16(msg[off:], i)
|
||||||
|
return off + 2, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func unpackUint32(msg []byte, off int) (i uint32, off1 int, err error) {
|
||||||
|
if off+4 > len(msg) {
|
||||||
|
return 0, len(msg), &Error{err: "overflow unpacking uint32"}
|
||||||
|
}
|
||||||
|
return binary.BigEndian.Uint32(msg[off:]), off + 4, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func packUint32(i uint32, msg []byte, off int) (off1 int, err error) {
|
||||||
|
if off+4 > len(msg) {
|
||||||
|
return len(msg), &Error{err: "overflow packing uint32"}
|
||||||
|
}
|
||||||
|
binary.BigEndian.PutUint32(msg[off:], i)
|
||||||
|
return off + 4, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func unpackUint48(msg []byte, off int) (i uint64, off1 int, err error) {
|
||||||
|
if off+6 > len(msg) {
|
||||||
|
return 0, len(msg), &Error{err: "overflow unpacking uint64 as uint48"}
|
||||||
|
}
|
||||||
|
// Used in TSIG where the last 48 bits are occupied, so for now, assume a uint48 (6 bytes)
|
||||||
|
i = (uint64(uint64(msg[off])<<40 | uint64(msg[off+1])<<32 | uint64(msg[off+2])<<24 | uint64(msg[off+3])<<16 |
|
||||||
|
uint64(msg[off+4])<<8 | uint64(msg[off+5])))
|
||||||
|
off += 6
|
||||||
|
return i, off, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func packUint48(i uint64, msg []byte, off int) (off1 int, err error) {
|
||||||
|
if off+6 > len(msg) {
|
||||||
|
return len(msg), &Error{err: "overflow packing uint64 as uint48"}
|
||||||
|
}
|
||||||
|
msg[off] = byte(i >> 40)
|
||||||
|
msg[off+1] = byte(i >> 32)
|
||||||
|
msg[off+2] = byte(i >> 24)
|
||||||
|
msg[off+3] = byte(i >> 16)
|
||||||
|
msg[off+4] = byte(i >> 8)
|
||||||
|
msg[off+5] = byte(i)
|
||||||
|
off += 6
|
||||||
|
return off, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func unpackUint64(msg []byte, off int) (i uint64, off1 int, err error) {
|
||||||
|
if off+8 > len(msg) {
|
||||||
|
return 0, len(msg), &Error{err: "overflow unpacking uint64"}
|
||||||
|
}
|
||||||
|
return binary.BigEndian.Uint64(msg[off:]), off + 8, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func packUint64(i uint64, msg []byte, off int) (off1 int, err error) {
|
||||||
|
if off+8 > len(msg) {
|
||||||
|
return len(msg), &Error{err: "overflow packing uint64"}
|
||||||
|
}
|
||||||
|
binary.BigEndian.PutUint64(msg[off:], i)
|
||||||
|
off += 8
|
||||||
|
return off, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func unpackString(msg []byte, off int) (string, int, error) {
|
||||||
|
if off+1 > len(msg) {
|
||||||
|
return "", off, &Error{err: "overflow unpacking txt"}
|
||||||
|
}
|
||||||
|
l := int(msg[off])
|
||||||
|
if off+l+1 > len(msg) {
|
||||||
|
return "", off, &Error{err: "overflow unpacking txt"}
|
||||||
|
}
|
||||||
|
s := make([]byte, 0, l)
|
||||||
|
for _, b := range msg[off+1 : off+1+l] {
|
||||||
|
switch b {
|
||||||
|
case '"', '\\':
|
||||||
|
s = append(s, '\\', b)
|
||||||
|
case '\t', '\r', '\n':
|
||||||
|
s = append(s, b)
|
||||||
|
default:
|
||||||
|
if b < 32 || b > 127 { // unprintable
|
||||||
|
var buf [3]byte
|
||||||
|
bufs := strconv.AppendInt(buf[:0], int64(b), 10)
|
||||||
|
s = append(s, '\\')
|
||||||
|
for i := 0; i < 3-len(bufs); i++ {
|
||||||
|
s = append(s, '0')
|
||||||
|
}
|
||||||
|
for _, r := range bufs {
|
||||||
|
s = append(s, r)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
s = append(s, b)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
off += 1 + l
|
||||||
|
return string(s), off, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func packString(s string, msg []byte, off int) (int, error) {
|
||||||
|
txtTmp := make([]byte, 256*4+1)
|
||||||
|
off, err := packTxtString(s, msg, off, txtTmp)
|
||||||
|
if err != nil {
|
||||||
|
return len(msg), err
|
||||||
|
}
|
||||||
|
return off, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func unpackStringBase32(msg []byte, off, end int) (string, int, error) {
|
||||||
|
if end > len(msg) {
|
||||||
|
return "", len(msg), &Error{err: "overflow unpacking base32"}
|
||||||
|
}
|
||||||
|
s := toBase32(msg[off:end])
|
||||||
|
return s, end, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func packStringBase32(s string, msg []byte, off int) (int, error) {
|
||||||
|
b32, err := fromBase32([]byte(s))
|
||||||
|
if err != nil {
|
||||||
|
return len(msg), err
|
||||||
|
}
|
||||||
|
if off+len(b32) > len(msg) {
|
||||||
|
return len(msg), &Error{err: "overflow packing base32"}
|
||||||
|
}
|
||||||
|
copy(msg[off:off+len(b32)], b32)
|
||||||
|
off += len(b32)
|
||||||
|
return off, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func unpackStringBase64(msg []byte, off, end int) (string, int, error) {
|
||||||
|
// Rest of the RR is base64 encoded value, so we don't need an explicit length
|
||||||
|
// to be set. Thus far all RR's that have base64 encoded fields have those as their
|
||||||
|
// last one. What we do need is the end of the RR!
|
||||||
|
if end > len(msg) {
|
||||||
|
return "", len(msg), &Error{err: "overflow unpacking base64"}
|
||||||
|
}
|
||||||
|
s := toBase64(msg[off:end])
|
||||||
|
return s, end, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func packStringBase64(s string, msg []byte, off int) (int, error) {
|
||||||
|
b64, err := fromBase64([]byte(s))
|
||||||
|
if err != nil {
|
||||||
|
return len(msg), err
|
||||||
|
}
|
||||||
|
if off+len(b64) > len(msg) {
|
||||||
|
return len(msg), &Error{err: "overflow packing base64"}
|
||||||
|
}
|
||||||
|
copy(msg[off:off+len(b64)], b64)
|
||||||
|
off += len(b64)
|
||||||
|
return off, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func unpackStringHex(msg []byte, off, end int) (string, int, error) {
|
||||||
|
// Rest of the RR is hex encoded value, so we don't need an explicit length
|
||||||
|
// to be set. NSEC and TSIG have hex fields with a length field.
|
||||||
|
// What we do need is the end of the RR!
|
||||||
|
if end > len(msg) {
|
||||||
|
return "", len(msg), &Error{err: "overflow unpacking hex"}
|
||||||
|
}
|
||||||
|
|
||||||
|
s := hex.EncodeToString(msg[off:end])
|
||||||
|
return s, end, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func packStringHex(s string, msg []byte, off int) (int, error) {
|
||||||
|
h, err := hex.DecodeString(s)
|
||||||
|
if err != nil {
|
||||||
|
return len(msg), err
|
||||||
|
}
|
||||||
|
if off+(len(h)) > len(msg) {
|
||||||
|
return len(msg), &Error{err: "overflow packing hex"}
|
||||||
|
}
|
||||||
|
copy(msg[off:off+len(h)], h)
|
||||||
|
off += len(h)
|
||||||
|
return off, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func unpackStringTxt(msg []byte, off int) ([]string, int, error) {
|
||||||
|
txt, off, err := unpackTxt(msg, off)
|
||||||
|
if err != nil {
|
||||||
|
return nil, len(msg), err
|
||||||
|
}
|
||||||
|
return txt, off, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func packStringTxt(s []string, msg []byte, off int) (int, error) {
|
||||||
|
txtTmp := make([]byte, 256*4+1) // If the whole string consists out of \DDD we need this many.
|
||||||
|
off, err := packTxt(s, msg, off, txtTmp)
|
||||||
|
if err != nil {
|
||||||
|
return len(msg), err
|
||||||
|
}
|
||||||
|
return off, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func unpackDataOpt(msg []byte, off int) ([]EDNS0, int, error) {
|
||||||
|
var edns []EDNS0
|
||||||
|
Option:
|
||||||
|
code := uint16(0)
|
||||||
|
if off+4 > len(msg) {
|
||||||
|
return nil, len(msg), &Error{err: "overflow unpacking opt"}
|
||||||
|
}
|
||||||
|
code = binary.BigEndian.Uint16(msg[off:])
|
||||||
|
off += 2
|
||||||
|
optlen := binary.BigEndian.Uint16(msg[off:])
|
||||||
|
off += 2
|
||||||
|
if off+int(optlen) > len(msg) {
|
||||||
|
return nil, len(msg), &Error{err: "overflow unpacking opt"}
|
||||||
|
}
|
||||||
|
switch code {
|
||||||
|
case EDNS0NSID:
|
||||||
|
e := new(EDNS0_NSID)
|
||||||
|
if err := e.unpack(msg[off : off+int(optlen)]); err != nil {
|
||||||
|
return nil, len(msg), err
|
||||||
|
}
|
||||||
|
edns = append(edns, e)
|
||||||
|
off += int(optlen)
|
||||||
|
case EDNS0SUBNET, EDNS0SUBNETDRAFT:
|
||||||
|
e := new(EDNS0_SUBNET)
|
||||||
|
if err := e.unpack(msg[off : off+int(optlen)]); err != nil {
|
||||||
|
return nil, len(msg), err
|
||||||
|
}
|
||||||
|
edns = append(edns, e)
|
||||||
|
off += int(optlen)
|
||||||
|
if code == EDNS0SUBNETDRAFT {
|
||||||
|
e.DraftOption = true
|
||||||
|
}
|
||||||
|
case EDNS0COOKIE:
|
||||||
|
e := new(EDNS0_COOKIE)
|
||||||
|
if err := e.unpack(msg[off : off+int(optlen)]); err != nil {
|
||||||
|
return nil, len(msg), err
|
||||||
|
}
|
||||||
|
edns = append(edns, e)
|
||||||
|
off += int(optlen)
|
||||||
|
case EDNS0UL:
|
||||||
|
e := new(EDNS0_UL)
|
||||||
|
if err := e.unpack(msg[off : off+int(optlen)]); err != nil {
|
||||||
|
return nil, len(msg), err
|
||||||
|
}
|
||||||
|
edns = append(edns, e)
|
||||||
|
off += int(optlen)
|
||||||
|
case EDNS0LLQ:
|
||||||
|
e := new(EDNS0_LLQ)
|
||||||
|
if err := e.unpack(msg[off : off+int(optlen)]); err != nil {
|
||||||
|
return nil, len(msg), err
|
||||||
|
}
|
||||||
|
edns = append(edns, e)
|
||||||
|
off += int(optlen)
|
||||||
|
case EDNS0DAU:
|
||||||
|
e := new(EDNS0_DAU)
|
||||||
|
if err := e.unpack(msg[off : off+int(optlen)]); err != nil {
|
||||||
|
return nil, len(msg), err
|
||||||
|
}
|
||||||
|
edns = append(edns, e)
|
||||||
|
off += int(optlen)
|
||||||
|
case EDNS0DHU:
|
||||||
|
e := new(EDNS0_DHU)
|
||||||
|
if err := e.unpack(msg[off : off+int(optlen)]); err != nil {
|
||||||
|
return nil, len(msg), err
|
||||||
|
}
|
||||||
|
edns = append(edns, e)
|
||||||
|
off += int(optlen)
|
||||||
|
case EDNS0N3U:
|
||||||
|
e := new(EDNS0_N3U)
|
||||||
|
if err := e.unpack(msg[off : off+int(optlen)]); err != nil {
|
||||||
|
return nil, len(msg), err
|
||||||
|
}
|
||||||
|
edns = append(edns, e)
|
||||||
|
off += int(optlen)
|
||||||
|
default:
|
||||||
|
e := new(EDNS0_LOCAL)
|
||||||
|
e.Code = code
|
||||||
|
if err := e.unpack(msg[off : off+int(optlen)]); err != nil {
|
||||||
|
return nil, len(msg), err
|
||||||
|
}
|
||||||
|
edns = append(edns, e)
|
||||||
|
off += int(optlen)
|
||||||
|
}
|
||||||
|
|
||||||
|
if off < len(msg) {
|
||||||
|
goto Option
|
||||||
|
}
|
||||||
|
|
||||||
|
return edns, off, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func packDataOpt(options []EDNS0, msg []byte, off int) (int, error) {
|
||||||
|
for _, el := range options {
|
||||||
|
b, err := el.pack()
|
||||||
|
if err != nil || off+3 > len(msg) {
|
||||||
|
return len(msg), &Error{err: "overflow packing opt"}
|
||||||
|
}
|
||||||
|
binary.BigEndian.PutUint16(msg[off:], el.Option()) // Option code
|
||||||
|
binary.BigEndian.PutUint16(msg[off+2:], uint16(len(b))) // Length
|
||||||
|
off += 4
|
||||||
|
if off+len(b) > len(msg) {
|
||||||
|
copy(msg[off:], b)
|
||||||
|
off = len(msg)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
// Actual data
|
||||||
|
copy(msg[off:off+len(b)], b)
|
||||||
|
off += len(b)
|
||||||
|
}
|
||||||
|
return off, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func unpackStringOctet(msg []byte, off int) (string, int, error) {
|
||||||
|
s := string(msg[off:])
|
||||||
|
return s, len(msg), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func packStringOctet(s string, msg []byte, off int) (int, error) {
|
||||||
|
txtTmp := make([]byte, 256*4+1)
|
||||||
|
off, err := packOctetString(s, msg, off, txtTmp)
|
||||||
|
if err != nil {
|
||||||
|
return len(msg), err
|
||||||
|
}
|
||||||
|
return off, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func unpackDataNsec(msg []byte, off int) ([]uint16, int, error) {
|
||||||
|
var nsec []uint16
|
||||||
|
length, window, lastwindow := 0, 0, -1
|
||||||
|
for off < len(msg) {
|
||||||
|
if off+2 > len(msg) {
|
||||||
|
return nsec, len(msg), &Error{err: "overflow unpacking nsecx"}
|
||||||
|
}
|
||||||
|
window = int(msg[off])
|
||||||
|
length = int(msg[off+1])
|
||||||
|
off += 2
|
||||||
|
if window <= lastwindow {
|
||||||
|
// RFC 4034: Blocks are present in the NSEC RR RDATA in
|
||||||
|
// increasing numerical order.
|
||||||
|
return nsec, len(msg), &Error{err: "out of order NSEC block"}
|
||||||
|
}
|
||||||
|
if length == 0 {
|
||||||
|
// RFC 4034: Blocks with no types present MUST NOT be included.
|
||||||
|
return nsec, len(msg), &Error{err: "empty NSEC block"}
|
||||||
|
}
|
||||||
|
if length > 32 {
|
||||||
|
return nsec, len(msg), &Error{err: "NSEC block too long"}
|
||||||
|
}
|
||||||
|
if off+length > len(msg) {
|
||||||
|
return nsec, len(msg), &Error{err: "overflowing NSEC block"}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Walk the bytes in the window and extract the type bits
|
||||||
|
for j := 0; j < length; j++ {
|
||||||
|
b := msg[off+j]
|
||||||
|
// Check the bits one by one, and set the type
|
||||||
|
if b&0x80 == 0x80 {
|
||||||
|
nsec = append(nsec, uint16(window*256+j*8+0))
|
||||||
|
}
|
||||||
|
if b&0x40 == 0x40 {
|
||||||
|
nsec = append(nsec, uint16(window*256+j*8+1))
|
||||||
|
}
|
||||||
|
if b&0x20 == 0x20 {
|
||||||
|
nsec = append(nsec, uint16(window*256+j*8+2))
|
||||||
|
}
|
||||||
|
if b&0x10 == 0x10 {
|
||||||
|
nsec = append(nsec, uint16(window*256+j*8+3))
|
||||||
|
}
|
||||||
|
if b&0x8 == 0x8 {
|
||||||
|
nsec = append(nsec, uint16(window*256+j*8+4))
|
||||||
|
}
|
||||||
|
if b&0x4 == 0x4 {
|
||||||
|
nsec = append(nsec, uint16(window*256+j*8+5))
|
||||||
|
}
|
||||||
|
if b&0x2 == 0x2 {
|
||||||
|
nsec = append(nsec, uint16(window*256+j*8+6))
|
||||||
|
}
|
||||||
|
if b&0x1 == 0x1 {
|
||||||
|
nsec = append(nsec, uint16(window*256+j*8+7))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
off += length
|
||||||
|
lastwindow = window
|
||||||
|
}
|
||||||
|
return nsec, off, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func packDataNsec(bitmap []uint16, msg []byte, off int) (int, error) {
|
||||||
|
if len(bitmap) == 0 {
|
||||||
|
return off, nil
|
||||||
|
}
|
||||||
|
var lastwindow, lastlength uint16
|
||||||
|
for j := 0; j < len(bitmap); j++ {
|
||||||
|
t := bitmap[j]
|
||||||
|
window := t / 256
|
||||||
|
length := (t-window*256)/8 + 1
|
||||||
|
if window > lastwindow && lastlength != 0 { // New window, jump to the new offset
|
||||||
|
off += int(lastlength) + 2
|
||||||
|
lastlength = 0
|
||||||
|
}
|
||||||
|
if window < lastwindow || length < lastlength {
|
||||||
|
return len(msg), &Error{err: "nsec bits out of order"}
|
||||||
|
}
|
||||||
|
if off+2+int(length) > len(msg) {
|
||||||
|
return len(msg), &Error{err: "overflow packing nsec"}
|
||||||
|
}
|
||||||
|
// Setting the window #
|
||||||
|
msg[off] = byte(window)
|
||||||
|
// Setting the octets length
|
||||||
|
msg[off+1] = byte(length)
|
||||||
|
// Setting the bit value for the type in the right octet
|
||||||
|
msg[off+1+int(length)] |= byte(1 << (7 - (t % 8)))
|
||||||
|
lastwindow, lastlength = window, length
|
||||||
|
}
|
||||||
|
off += int(lastlength) + 2
|
||||||
|
return off, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func unpackDataDomainNames(msg []byte, off, end int) ([]string, int, error) {
|
||||||
|
var (
|
||||||
|
servers []string
|
||||||
|
s string
|
||||||
|
err error
|
||||||
|
)
|
||||||
|
if end > len(msg) {
|
||||||
|
return nil, len(msg), &Error{err: "overflow unpacking domain names"}
|
||||||
|
}
|
||||||
|
for off < end {
|
||||||
|
s, off, err = UnpackDomainName(msg, off)
|
||||||
|
if err != nil {
|
||||||
|
return servers, len(msg), err
|
||||||
|
}
|
||||||
|
servers = append(servers, s)
|
||||||
|
}
|
||||||
|
return servers, off, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func packDataDomainNames(names []string, msg []byte, off int, compression map[string]int, compress bool) (int, error) {
|
||||||
|
var err error
|
||||||
|
for j := 0; j < len(names); j++ {
|
||||||
|
off, err = PackDomainName(names[j], msg, off, compression, false && compress)
|
||||||
|
if err != nil {
|
||||||
|
return len(msg), err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return off, nil
|
||||||
|
}
|
13
vendor/github.com/miekg/dns/nsecx.go
generated
vendored
13
vendor/github.com/miekg/dns/nsecx.go
generated
vendored
|
@ -11,13 +11,12 @@ type saltWireFmt struct {
|
||||||
Salt string `dns:"size-hex"`
|
Salt string `dns:"size-hex"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// HashName hashes a string (label) according to RFC 5155. It returns the hashed string in
|
// HashName hashes a string (label) according to RFC 5155. It returns the hashed string in uppercase.
|
||||||
// uppercase.
|
|
||||||
func HashName(label string, ha uint8, iter uint16, salt string) string {
|
func HashName(label string, ha uint8, iter uint16, salt string) string {
|
||||||
saltwire := new(saltWireFmt)
|
saltwire := new(saltWireFmt)
|
||||||
saltwire.Salt = salt
|
saltwire.Salt = salt
|
||||||
wire := make([]byte, DefaultMsgSize)
|
wire := make([]byte, DefaultMsgSize)
|
||||||
n, err := PackStruct(saltwire, wire, 0)
|
n, err := packSaltWire(saltwire, wire)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
|
@ -110,3 +109,11 @@ func (rr *NSEC3) Match(name string) bool {
|
||||||
}
|
}
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func packSaltWire(sw *saltWireFmt, msg []byte) (int, error) {
|
||||||
|
off, err := packStringHex(sw.Salt, msg, 0)
|
||||||
|
if err != nil {
|
||||||
|
return off, err
|
||||||
|
}
|
||||||
|
return off, nil
|
||||||
|
}
|
||||||
|
|
44
vendor/github.com/miekg/dns/privaterr.go
generated
vendored
44
vendor/github.com/miekg/dns/privaterr.go
generated
vendored
|
@ -33,7 +33,7 @@ type PrivateRR struct {
|
||||||
|
|
||||||
func mkPrivateRR(rrtype uint16) *PrivateRR {
|
func mkPrivateRR(rrtype uint16) *PrivateRR {
|
||||||
// Panics if RR is not an instance of PrivateRR.
|
// Panics if RR is not an instance of PrivateRR.
|
||||||
rrfunc, ok := typeToRR[rrtype]
|
rrfunc, ok := TypeToRR[rrtype]
|
||||||
if !ok {
|
if !ok {
|
||||||
panic(fmt.Sprintf("dns: invalid operation with Private RR type %d", rrtype))
|
panic(fmt.Sprintf("dns: invalid operation with Private RR type %d", rrtype))
|
||||||
}
|
}
|
||||||
|
@ -43,7 +43,7 @@ func mkPrivateRR(rrtype uint16) *PrivateRR {
|
||||||
case *PrivateRR:
|
case *PrivateRR:
|
||||||
return rr
|
return rr
|
||||||
}
|
}
|
||||||
panic(fmt.Sprintf("dns: RR is not a PrivateRR, typeToRR[%d] generator returned %T", rrtype, anyrr))
|
panic(fmt.Sprintf("dns: RR is not a PrivateRR, TypeToRR[%d] generator returned %T", rrtype, anyrr))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Header return the RR header of r.
|
// Header return the RR header of r.
|
||||||
|
@ -65,29 +65,60 @@ func (r *PrivateRR) copy() RR {
|
||||||
}
|
}
|
||||||
return rr
|
return rr
|
||||||
}
|
}
|
||||||
|
func (r *PrivateRR) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) {
|
||||||
|
off, err := r.Hdr.pack(msg, off, compression, compress)
|
||||||
|
if err != nil {
|
||||||
|
return off, err
|
||||||
|
}
|
||||||
|
headerEnd := off
|
||||||
|
n, err := r.Data.Pack(msg[off:])
|
||||||
|
if err != nil {
|
||||||
|
return len(msg), err
|
||||||
|
}
|
||||||
|
off += n
|
||||||
|
r.Header().Rdlength = uint16(off - headerEnd)
|
||||||
|
return off, nil
|
||||||
|
}
|
||||||
|
|
||||||
// PrivateHandle registers a private resource record type. It requires
|
// PrivateHandle registers a private resource record type. It requires
|
||||||
// string and numeric representation of private RR type and generator function as argument.
|
// string and numeric representation of private RR type and generator function as argument.
|
||||||
func PrivateHandle(rtypestr string, rtype uint16, generator func() PrivateRdata) {
|
func PrivateHandle(rtypestr string, rtype uint16, generator func() PrivateRdata) {
|
||||||
rtypestr = strings.ToUpper(rtypestr)
|
rtypestr = strings.ToUpper(rtypestr)
|
||||||
|
|
||||||
typeToRR[rtype] = func() RR { return &PrivateRR{RR_Header{}, generator()} }
|
TypeToRR[rtype] = func() RR { return &PrivateRR{RR_Header{}, generator()} }
|
||||||
TypeToString[rtype] = rtypestr
|
TypeToString[rtype] = rtypestr
|
||||||
StringToType[rtypestr] = rtype
|
StringToType[rtypestr] = rtype
|
||||||
|
|
||||||
|
typeToUnpack[rtype] = func(h RR_Header, msg []byte, off int) (RR, int, error) {
|
||||||
|
if noRdata(h) {
|
||||||
|
return &h, off, nil
|
||||||
|
}
|
||||||
|
var err error
|
||||||
|
|
||||||
|
rr := mkPrivateRR(h.Rrtype)
|
||||||
|
rr.Hdr = h
|
||||||
|
|
||||||
|
off1, err := rr.Data.Unpack(msg[off:])
|
||||||
|
off += off1
|
||||||
|
if err != nil {
|
||||||
|
return rr, off, err
|
||||||
|
}
|
||||||
|
return rr, off, err
|
||||||
|
}
|
||||||
|
|
||||||
setPrivateRR := func(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
|
setPrivateRR := func(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
|
||||||
rr := mkPrivateRR(h.Rrtype)
|
rr := mkPrivateRR(h.Rrtype)
|
||||||
rr.Hdr = h
|
rr.Hdr = h
|
||||||
|
|
||||||
var l lex
|
var l lex
|
||||||
text := make([]string, 0, 2) // could be 0..N elements, median is probably 1
|
text := make([]string, 0, 2) // could be 0..N elements, median is probably 1
|
||||||
FETCH:
|
Fetch:
|
||||||
for {
|
for {
|
||||||
// TODO(miek): we could also be returning _QUOTE, this might or might not
|
// TODO(miek): we could also be returning _QUOTE, this might or might not
|
||||||
// be an issue (basically parsing TXT becomes hard)
|
// be an issue (basically parsing TXT becomes hard)
|
||||||
switch l = <-c; l.value {
|
switch l = <-c; l.value {
|
||||||
case zNewline, zEOF:
|
case zNewline, zEOF:
|
||||||
break FETCH
|
break Fetch
|
||||||
case zString:
|
case zString:
|
||||||
text = append(text, l.token)
|
text = append(text, l.token)
|
||||||
}
|
}
|
||||||
|
@ -108,10 +139,11 @@ func PrivateHandle(rtypestr string, rtype uint16, generator func() PrivateRdata)
|
||||||
func PrivateHandleRemove(rtype uint16) {
|
func PrivateHandleRemove(rtype uint16) {
|
||||||
rtypestr, ok := TypeToString[rtype]
|
rtypestr, ok := TypeToString[rtype]
|
||||||
if ok {
|
if ok {
|
||||||
delete(typeToRR, rtype)
|
delete(TypeToRR, rtype)
|
||||||
delete(TypeToString, rtype)
|
delete(TypeToString, rtype)
|
||||||
delete(typeToparserFunc, rtype)
|
delete(typeToparserFunc, rtype)
|
||||||
delete(StringToType, rtypestr)
|
delete(StringToType, rtypestr)
|
||||||
|
delete(typeToUnpack, rtype)
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
50
vendor/github.com/miekg/dns/rawmsg.go
generated
vendored
50
vendor/github.com/miekg/dns/rawmsg.go
generated
vendored
|
@ -1,52 +1,6 @@
|
||||||
package dns
|
package dns
|
||||||
|
|
||||||
// These raw* functions do not use reflection, they directly set the values
|
import "encoding/binary"
|
||||||
// in the buffer. There are faster than their reflection counterparts.
|
|
||||||
|
|
||||||
// RawSetId sets the message id in buf.
|
|
||||||
func rawSetId(msg []byte, i uint16) bool {
|
|
||||||
if len(msg) < 2 {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
msg[0], msg[1] = packUint16(i)
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// rawSetQuestionLen sets the length of the question section.
|
|
||||||
func rawSetQuestionLen(msg []byte, i uint16) bool {
|
|
||||||
if len(msg) < 6 {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
msg[4], msg[5] = packUint16(i)
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// rawSetAnswerLen sets the lenght of the answer section.
|
|
||||||
func rawSetAnswerLen(msg []byte, i uint16) bool {
|
|
||||||
if len(msg) < 8 {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
msg[6], msg[7] = packUint16(i)
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// rawSetsNsLen sets the lenght of the authority section.
|
|
||||||
func rawSetNsLen(msg []byte, i uint16) bool {
|
|
||||||
if len(msg) < 10 {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
msg[8], msg[9] = packUint16(i)
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// rawSetExtraLen sets the lenght of the additional section.
|
|
||||||
func rawSetExtraLen(msg []byte, i uint16) bool {
|
|
||||||
if len(msg) < 12 {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
msg[10], msg[11] = packUint16(i)
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// rawSetRdlength sets the rdlength in the header of
|
// rawSetRdlength sets the rdlength in the header of
|
||||||
// the RR. The offset 'off' must be positioned at the
|
// the RR. The offset 'off' must be positioned at the
|
||||||
|
@ -90,6 +44,6 @@ Loop:
|
||||||
if rdatalen > 0xFFFF {
|
if rdatalen > 0xFFFF {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
msg[off], msg[off+1] = packUint16(uint16(rdatalen))
|
binary.BigEndian.PutUint16(msg[off:], uint16(rdatalen))
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
38
vendor/github.com/miekg/dns/reverse.go
generated
vendored
Normal file
38
vendor/github.com/miekg/dns/reverse.go
generated
vendored
Normal file
|
@ -0,0 +1,38 @@
|
||||||
|
package dns
|
||||||
|
|
||||||
|
// StringToType is the reverse of TypeToString, needed for string parsing.
|
||||||
|
var StringToType = reverseInt16(TypeToString)
|
||||||
|
|
||||||
|
// StringToClass is the reverse of ClassToString, needed for string parsing.
|
||||||
|
var StringToClass = reverseInt16(ClassToString)
|
||||||
|
|
||||||
|
// Map of opcodes strings.
|
||||||
|
var StringToOpcode = reverseInt(OpcodeToString)
|
||||||
|
|
||||||
|
// Map of rcodes strings.
|
||||||
|
var StringToRcode = reverseInt(RcodeToString)
|
||||||
|
|
||||||
|
// Reverse a map
|
||||||
|
func reverseInt8(m map[uint8]string) map[string]uint8 {
|
||||||
|
n := make(map[string]uint8, len(m))
|
||||||
|
for u, s := range m {
|
||||||
|
n[s] = u
|
||||||
|
}
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
|
||||||
|
func reverseInt16(m map[uint16]string) map[string]uint16 {
|
||||||
|
n := make(map[string]uint16, len(m))
|
||||||
|
for u, s := range m {
|
||||||
|
n[s] = u
|
||||||
|
}
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
|
||||||
|
func reverseInt(m map[int]string) map[string]int {
|
||||||
|
n := make(map[string]int, len(m))
|
||||||
|
for u, s := range m {
|
||||||
|
n[s] = u
|
||||||
|
}
|
||||||
|
return n
|
||||||
|
}
|
23
vendor/github.com/miekg/dns/zscan.go → vendor/github.com/miekg/dns/scan.go
generated
vendored
23
vendor/github.com/miekg/dns/zscan.go → vendor/github.com/miekg/dns/scan.go
generated
vendored
|
@ -67,7 +67,7 @@ const (
|
||||||
)
|
)
|
||||||
|
|
||||||
// ParseError is a parsing error. It contains the parse error and the location in the io.Reader
|
// ParseError is a parsing error. It contains the parse error and the location in the io.Reader
|
||||||
// where the error occured.
|
// where the error occurred.
|
||||||
type ParseError struct {
|
type ParseError struct {
|
||||||
file string
|
file string
|
||||||
err string
|
err string
|
||||||
|
@ -86,7 +86,7 @@ func (e *ParseError) Error() (s string) {
|
||||||
type lex struct {
|
type lex struct {
|
||||||
token string // text of the token
|
token string // text of the token
|
||||||
tokenUpper string // uppercase text of the token
|
tokenUpper string // uppercase text of the token
|
||||||
length int // lenght of the token
|
length int // length of the token
|
||||||
err bool // when true, token text has lexer error
|
err bool // when true, token text has lexer error
|
||||||
value uint8 // value: zString, _BLANK, etc.
|
value uint8 // value: zString, _BLANK, etc.
|
||||||
line int // line in the file
|
line int // line in the file
|
||||||
|
@ -99,7 +99,7 @@ type lex struct {
|
||||||
type Token struct {
|
type Token struct {
|
||||||
// The scanned resource record when error is not nil.
|
// The scanned resource record when error is not nil.
|
||||||
RR
|
RR
|
||||||
// When an error occured, this has the error specifics.
|
// When an error occurred, this has the error specifics.
|
||||||
Error *ParseError
|
Error *ParseError
|
||||||
// A potential comment positioned after the RR and on the same line.
|
// A potential comment positioned after the RR and on the same line.
|
||||||
Comment string
|
Comment string
|
||||||
|
@ -144,6 +144,8 @@ func ReadRR(q io.Reader, filename string) (RR, error) {
|
||||||
//
|
//
|
||||||
// for x := range dns.ParseZone(strings.NewReader(z), "", "") {
|
// for x := range dns.ParseZone(strings.NewReader(z), "", "") {
|
||||||
// if x.Error != nil {
|
// if x.Error != nil {
|
||||||
|
// // log.Println(x.Error)
|
||||||
|
// } else {
|
||||||
// // Do something with x.RR
|
// // Do something with x.RR
|
||||||
// }
|
// }
|
||||||
// }
|
// }
|
||||||
|
@ -375,8 +377,8 @@ func parseZone(r io.Reader, origin, f string, t chan *Token, include int) {
|
||||||
t <- &Token{Error: &ParseError{f, "expecting $GENERATE value, not this...", l}}
|
t <- &Token{Error: &ParseError{f, "expecting $GENERATE value, not this...", l}}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if e := generate(l, c, t, origin); e != "" {
|
if errMsg := generate(l, c, t, origin); errMsg != "" {
|
||||||
t <- &Token{Error: &ParseError{f, e, l}}
|
t <- &Token{Error: &ParseError{f, errMsg, l}}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
st = zExpectOwnerDir
|
st = zExpectOwnerDir
|
||||||
|
@ -625,6 +627,7 @@ func zlexer(s *scan, c chan lex) {
|
||||||
if stri > 0 {
|
if stri > 0 {
|
||||||
l.value = zString
|
l.value = zString
|
||||||
l.token = string(str[:stri])
|
l.token = string(str[:stri])
|
||||||
|
l.tokenUpper = strings.ToUpper(l.token)
|
||||||
l.length = stri
|
l.length = stri
|
||||||
debug.Printf("[4 %+v]", l.token)
|
debug.Printf("[4 %+v]", l.token)
|
||||||
c <- l
|
c <- l
|
||||||
|
@ -661,6 +664,7 @@ func zlexer(s *scan, c chan lex) {
|
||||||
owner = true
|
owner = true
|
||||||
l.value = zNewline
|
l.value = zNewline
|
||||||
l.token = "\n"
|
l.token = "\n"
|
||||||
|
l.tokenUpper = l.token
|
||||||
l.length = 1
|
l.length = 1
|
||||||
l.comment = string(com[:comi])
|
l.comment = string(com[:comi])
|
||||||
debug.Printf("[3 %+v %+v]", l.token, l.comment)
|
debug.Printf("[3 %+v %+v]", l.token, l.comment)
|
||||||
|
@ -694,6 +698,7 @@ func zlexer(s *scan, c chan lex) {
|
||||||
}
|
}
|
||||||
l.value = zNewline
|
l.value = zNewline
|
||||||
l.token = "\n"
|
l.token = "\n"
|
||||||
|
l.tokenUpper = l.token
|
||||||
l.length = 1
|
l.length = 1
|
||||||
debug.Printf("[1 %+v]", l.token)
|
debug.Printf("[1 %+v]", l.token)
|
||||||
c <- l
|
c <- l
|
||||||
|
@ -738,6 +743,7 @@ func zlexer(s *scan, c chan lex) {
|
||||||
if stri != 0 {
|
if stri != 0 {
|
||||||
l.value = zString
|
l.value = zString
|
||||||
l.token = string(str[:stri])
|
l.token = string(str[:stri])
|
||||||
|
l.tokenUpper = strings.ToUpper(l.token)
|
||||||
l.length = stri
|
l.length = stri
|
||||||
|
|
||||||
debug.Printf("[%+v]", l.token)
|
debug.Printf("[%+v]", l.token)
|
||||||
|
@ -748,6 +754,7 @@ func zlexer(s *scan, c chan lex) {
|
||||||
// send quote itself as separate token
|
// send quote itself as separate token
|
||||||
l.value = zQuote
|
l.value = zQuote
|
||||||
l.token = "\""
|
l.token = "\""
|
||||||
|
l.tokenUpper = l.token
|
||||||
l.length = 1
|
l.length = 1
|
||||||
c <- l
|
c <- l
|
||||||
quote = !quote
|
quote = !quote
|
||||||
|
@ -773,6 +780,7 @@ func zlexer(s *scan, c chan lex) {
|
||||||
brace--
|
brace--
|
||||||
if brace < 0 {
|
if brace < 0 {
|
||||||
l.token = "extra closing brace"
|
l.token = "extra closing brace"
|
||||||
|
l.tokenUpper = l.token
|
||||||
l.err = true
|
l.err = true
|
||||||
debug.Printf("[%+v]", l.token)
|
debug.Printf("[%+v]", l.token)
|
||||||
c <- l
|
c <- l
|
||||||
|
@ -797,6 +805,7 @@ func zlexer(s *scan, c chan lex) {
|
||||||
if stri > 0 {
|
if stri > 0 {
|
||||||
// Send remainder
|
// Send remainder
|
||||||
l.token = string(str[:stri])
|
l.token = string(str[:stri])
|
||||||
|
l.tokenUpper = strings.ToUpper(l.token)
|
||||||
l.length = stri
|
l.length = stri
|
||||||
l.value = zString
|
l.value = zString
|
||||||
debug.Printf("[%+v]", l.token)
|
debug.Printf("[%+v]", l.token)
|
||||||
|
@ -964,8 +973,8 @@ func stringToNodeID(l lex) (uint64, *ParseError) {
|
||||||
return 0, &ParseError{l.token, "bad NID/L64 NodeID/Locator64", l}
|
return 0, &ParseError{l.token, "bad NID/L64 NodeID/Locator64", l}
|
||||||
}
|
}
|
||||||
s := l.token[0:4] + l.token[5:9] + l.token[10:14] + l.token[15:19]
|
s := l.token[0:4] + l.token[5:9] + l.token[10:14] + l.token[15:19]
|
||||||
u, e := strconv.ParseUint(s, 16, 64)
|
u, err := strconv.ParseUint(s, 16, 64)
|
||||||
if e != nil {
|
if err != nil {
|
||||||
return 0, &ParseError{l.token, "bad NID/L64 NodeID/Locator64", l}
|
return 0, &ParseError{l.token, "bad NID/L64 NodeID/Locator64", l}
|
||||||
}
|
}
|
||||||
return u, nil
|
return u, nil
|
287
vendor/github.com/miekg/dns/zscan_rr.go → vendor/github.com/miekg/dns/scan_rr.go
generated
vendored
287
vendor/github.com/miekg/dns/zscan_rr.go → vendor/github.com/miekg/dns/scan_rr.go
generated
vendored
|
@ -1443,64 +1443,6 @@ func setEUI64(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
|
||||||
return rr, nil, ""
|
return rr, nil, ""
|
||||||
}
|
}
|
||||||
|
|
||||||
func setWKS(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
|
|
||||||
rr := new(WKS)
|
|
||||||
rr.Hdr = h
|
|
||||||
|
|
||||||
l := <-c
|
|
||||||
if l.length == 0 {
|
|
||||||
return rr, nil, l.comment
|
|
||||||
}
|
|
||||||
rr.Address = net.ParseIP(l.token)
|
|
||||||
if rr.Address == nil || l.err {
|
|
||||||
return nil, &ParseError{f, "bad WKS Address", l}, ""
|
|
||||||
}
|
|
||||||
|
|
||||||
<-c // zBlank
|
|
||||||
l = <-c
|
|
||||||
proto := "tcp"
|
|
||||||
i, e := strconv.Atoi(l.token)
|
|
||||||
if e != nil || l.err {
|
|
||||||
return nil, &ParseError{f, "bad WKS Protocol", l}, ""
|
|
||||||
}
|
|
||||||
rr.Protocol = uint8(i)
|
|
||||||
switch rr.Protocol {
|
|
||||||
case 17:
|
|
||||||
proto = "udp"
|
|
||||||
case 6:
|
|
||||||
proto = "tcp"
|
|
||||||
default:
|
|
||||||
return nil, &ParseError{f, "bad WKS Protocol", l}, ""
|
|
||||||
}
|
|
||||||
|
|
||||||
<-c
|
|
||||||
l = <-c
|
|
||||||
rr.BitMap = make([]uint16, 0)
|
|
||||||
var (
|
|
||||||
k int
|
|
||||||
err error
|
|
||||||
)
|
|
||||||
for l.value != zNewline && l.value != zEOF {
|
|
||||||
switch l.value {
|
|
||||||
case zBlank:
|
|
||||||
// Ok
|
|
||||||
case zString:
|
|
||||||
if k, err = net.LookupPort(proto, l.token); err != nil {
|
|
||||||
i, e := strconv.Atoi(l.token) // If a number use that
|
|
||||||
if e != nil {
|
|
||||||
return nil, &ParseError{f, "bad WKS BitMap", l}, ""
|
|
||||||
}
|
|
||||||
rr.BitMap = append(rr.BitMap, uint16(i))
|
|
||||||
}
|
|
||||||
rr.BitMap = append(rr.BitMap, uint16(k))
|
|
||||||
default:
|
|
||||||
return nil, &ParseError{f, "bad WKS BitMap", l}, ""
|
|
||||||
}
|
|
||||||
l = <-c
|
|
||||||
}
|
|
||||||
return rr, nil, l.comment
|
|
||||||
}
|
|
||||||
|
|
||||||
func setSSHFP(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
|
func setSSHFP(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
|
||||||
rr := new(SSHFP)
|
rr := new(SSHFP)
|
||||||
rr.Hdr = h
|
rr.Hdr = h
|
||||||
|
@ -1804,6 +1746,41 @@ func setTLSA(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
|
||||||
return rr, nil, c1
|
return rr, nil, c1
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func setSMIMEA(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
|
||||||
|
rr := new(SMIMEA)
|
||||||
|
rr.Hdr = h
|
||||||
|
l := <-c
|
||||||
|
if l.length == 0 {
|
||||||
|
return rr, nil, l.comment
|
||||||
|
}
|
||||||
|
i, e := strconv.Atoi(l.token)
|
||||||
|
if e != nil || l.err {
|
||||||
|
return nil, &ParseError{f, "bad SMIMEA Usage", l}, ""
|
||||||
|
}
|
||||||
|
rr.Usage = uint8(i)
|
||||||
|
<-c // zBlank
|
||||||
|
l = <-c
|
||||||
|
i, e = strconv.Atoi(l.token)
|
||||||
|
if e != nil || l.err {
|
||||||
|
return nil, &ParseError{f, "bad SMIMEA Selector", l}, ""
|
||||||
|
}
|
||||||
|
rr.Selector = uint8(i)
|
||||||
|
<-c // zBlank
|
||||||
|
l = <-c
|
||||||
|
i, e = strconv.Atoi(l.token)
|
||||||
|
if e != nil || l.err {
|
||||||
|
return nil, &ParseError{f, "bad SMIMEA MatchingType", l}, ""
|
||||||
|
}
|
||||||
|
rr.MatchingType = uint8(i)
|
||||||
|
// So this needs be e2 (i.e. different than e), because...??t
|
||||||
|
s, e2, c1 := endingToString(c, "bad SMIMEA Certificate", f)
|
||||||
|
if e2 != nil {
|
||||||
|
return nil, e2, c1
|
||||||
|
}
|
||||||
|
rr.Certificate = s
|
||||||
|
return rr, nil, c1
|
||||||
|
}
|
||||||
|
|
||||||
func setRFC3597(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
|
func setRFC3597(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
|
||||||
rr := new(RFC3597)
|
rr := new(RFC3597)
|
||||||
rr.Hdr = h
|
rr.Hdr = h
|
||||||
|
@ -2103,73 +2080,6 @@ func setPX(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
|
||||||
return rr, nil, ""
|
return rr, nil, ""
|
||||||
}
|
}
|
||||||
|
|
||||||
func setIPSECKEY(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
|
|
||||||
rr := new(IPSECKEY)
|
|
||||||
rr.Hdr = h
|
|
||||||
l := <-c
|
|
||||||
if l.length == 0 {
|
|
||||||
return rr, nil, l.comment
|
|
||||||
}
|
|
||||||
i, err := strconv.Atoi(l.token)
|
|
||||||
if err != nil || l.err {
|
|
||||||
return nil, &ParseError{f, "bad IPSECKEY Precedence", l}, ""
|
|
||||||
}
|
|
||||||
rr.Precedence = uint8(i)
|
|
||||||
<-c // zBlank
|
|
||||||
l = <-c
|
|
||||||
i, err = strconv.Atoi(l.token)
|
|
||||||
if err != nil || l.err {
|
|
||||||
return nil, &ParseError{f, "bad IPSECKEY GatewayType", l}, ""
|
|
||||||
}
|
|
||||||
rr.GatewayType = uint8(i)
|
|
||||||
<-c // zBlank
|
|
||||||
l = <-c
|
|
||||||
i, err = strconv.Atoi(l.token)
|
|
||||||
if err != nil || l.err {
|
|
||||||
return nil, &ParseError{f, "bad IPSECKEY Algorithm", l}, ""
|
|
||||||
}
|
|
||||||
rr.Algorithm = uint8(i)
|
|
||||||
|
|
||||||
// Now according to GatewayType we can have different elements here
|
|
||||||
<-c // zBlank
|
|
||||||
l = <-c
|
|
||||||
switch rr.GatewayType {
|
|
||||||
case 0:
|
|
||||||
fallthrough
|
|
||||||
case 3:
|
|
||||||
rr.GatewayName = l.token
|
|
||||||
if l.token == "@" {
|
|
||||||
rr.GatewayName = o
|
|
||||||
}
|
|
||||||
_, ok := IsDomainName(l.token)
|
|
||||||
if !ok || l.length == 0 || l.err {
|
|
||||||
return nil, &ParseError{f, "bad IPSECKEY GatewayName", l}, ""
|
|
||||||
}
|
|
||||||
if rr.GatewayName[l.length-1] != '.' {
|
|
||||||
rr.GatewayName = appendOrigin(rr.GatewayName, o)
|
|
||||||
}
|
|
||||||
case 1:
|
|
||||||
rr.GatewayA = net.ParseIP(l.token)
|
|
||||||
if rr.GatewayA == nil {
|
|
||||||
return nil, &ParseError{f, "bad IPSECKEY GatewayA", l}, ""
|
|
||||||
}
|
|
||||||
case 2:
|
|
||||||
rr.GatewayAAAA = net.ParseIP(l.token)
|
|
||||||
if rr.GatewayAAAA == nil {
|
|
||||||
return nil, &ParseError{f, "bad IPSECKEY GatewayAAAA", l}, ""
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
return nil, &ParseError{f, "bad IPSECKEY GatewayType", l}, ""
|
|
||||||
}
|
|
||||||
|
|
||||||
s, e, c1 := endingToString(c, "bad IPSECKEY PublicKey", f)
|
|
||||||
if e != nil {
|
|
||||||
return nil, e, c1
|
|
||||||
}
|
|
||||||
rr.PublicKey = s
|
|
||||||
return rr, nil, c1
|
|
||||||
}
|
|
||||||
|
|
||||||
func setCAA(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
|
func setCAA(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
|
||||||
rr := new(CAA)
|
rr := new(CAA)
|
||||||
rr.Hdr = h
|
rr.Hdr = h
|
||||||
|
@ -2203,68 +2113,67 @@ func setCAA(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
|
||||||
}
|
}
|
||||||
|
|
||||||
var typeToparserFunc = map[uint16]parserFunc{
|
var typeToparserFunc = map[uint16]parserFunc{
|
||||||
TypeAAAA: parserFunc{setAAAA, false},
|
TypeAAAA: {setAAAA, false},
|
||||||
TypeAFSDB: parserFunc{setAFSDB, false},
|
TypeAFSDB: {setAFSDB, false},
|
||||||
TypeA: parserFunc{setA, false},
|
TypeA: {setA, false},
|
||||||
TypeCAA: parserFunc{setCAA, true},
|
TypeCAA: {setCAA, true},
|
||||||
TypeCDS: parserFunc{setCDS, true},
|
TypeCDS: {setCDS, true},
|
||||||
TypeCDNSKEY: parserFunc{setCDNSKEY, true},
|
TypeCDNSKEY: {setCDNSKEY, true},
|
||||||
TypeCERT: parserFunc{setCERT, true},
|
TypeCERT: {setCERT, true},
|
||||||
TypeCNAME: parserFunc{setCNAME, false},
|
TypeCNAME: {setCNAME, false},
|
||||||
TypeDHCID: parserFunc{setDHCID, true},
|
TypeDHCID: {setDHCID, true},
|
||||||
TypeDLV: parserFunc{setDLV, true},
|
TypeDLV: {setDLV, true},
|
||||||
TypeDNAME: parserFunc{setDNAME, false},
|
TypeDNAME: {setDNAME, false},
|
||||||
TypeKEY: parserFunc{setKEY, true},
|
TypeKEY: {setKEY, true},
|
||||||
TypeDNSKEY: parserFunc{setDNSKEY, true},
|
TypeDNSKEY: {setDNSKEY, true},
|
||||||
TypeDS: parserFunc{setDS, true},
|
TypeDS: {setDS, true},
|
||||||
TypeEID: parserFunc{setEID, true},
|
TypeEID: {setEID, true},
|
||||||
TypeEUI48: parserFunc{setEUI48, false},
|
TypeEUI48: {setEUI48, false},
|
||||||
TypeEUI64: parserFunc{setEUI64, false},
|
TypeEUI64: {setEUI64, false},
|
||||||
TypeGID: parserFunc{setGID, false},
|
TypeGID: {setGID, false},
|
||||||
TypeGPOS: parserFunc{setGPOS, false},
|
TypeGPOS: {setGPOS, false},
|
||||||
TypeHINFO: parserFunc{setHINFO, true},
|
TypeHINFO: {setHINFO, true},
|
||||||
TypeHIP: parserFunc{setHIP, true},
|
TypeHIP: {setHIP, true},
|
||||||
TypeIPSECKEY: parserFunc{setIPSECKEY, true},
|
TypeKX: {setKX, false},
|
||||||
TypeKX: parserFunc{setKX, false},
|
TypeL32: {setL32, false},
|
||||||
TypeL32: parserFunc{setL32, false},
|
TypeL64: {setL64, false},
|
||||||
TypeL64: parserFunc{setL64, false},
|
TypeLOC: {setLOC, true},
|
||||||
TypeLOC: parserFunc{setLOC, true},
|
TypeLP: {setLP, false},
|
||||||
TypeLP: parserFunc{setLP, false},
|
TypeMB: {setMB, false},
|
||||||
TypeMB: parserFunc{setMB, false},
|
TypeMD: {setMD, false},
|
||||||
TypeMD: parserFunc{setMD, false},
|
TypeMF: {setMF, false},
|
||||||
TypeMF: parserFunc{setMF, false},
|
TypeMG: {setMG, false},
|
||||||
TypeMG: parserFunc{setMG, false},
|
TypeMINFO: {setMINFO, false},
|
||||||
TypeMINFO: parserFunc{setMINFO, false},
|
TypeMR: {setMR, false},
|
||||||
TypeMR: parserFunc{setMR, false},
|
TypeMX: {setMX, false},
|
||||||
TypeMX: parserFunc{setMX, false},
|
TypeNAPTR: {setNAPTR, false},
|
||||||
TypeNAPTR: parserFunc{setNAPTR, false},
|
TypeNID: {setNID, false},
|
||||||
TypeNID: parserFunc{setNID, false},
|
TypeNIMLOC: {setNIMLOC, true},
|
||||||
TypeNIMLOC: parserFunc{setNIMLOC, true},
|
TypeNINFO: {setNINFO, true},
|
||||||
TypeNINFO: parserFunc{setNINFO, true},
|
TypeNSAPPTR: {setNSAPPTR, false},
|
||||||
TypeNSAPPTR: parserFunc{setNSAPPTR, false},
|
TypeNSEC3PARAM: {setNSEC3PARAM, false},
|
||||||
TypeNSEC3PARAM: parserFunc{setNSEC3PARAM, false},
|
TypeNSEC3: {setNSEC3, true},
|
||||||
TypeNSEC3: parserFunc{setNSEC3, true},
|
TypeNSEC: {setNSEC, true},
|
||||||
TypeNSEC: parserFunc{setNSEC, true},
|
TypeNS: {setNS, false},
|
||||||
TypeNS: parserFunc{setNS, false},
|
TypeOPENPGPKEY: {setOPENPGPKEY, true},
|
||||||
TypeOPENPGPKEY: parserFunc{setOPENPGPKEY, true},
|
TypePTR: {setPTR, false},
|
||||||
TypePTR: parserFunc{setPTR, false},
|
TypePX: {setPX, false},
|
||||||
TypePX: parserFunc{setPX, false},
|
TypeSIG: {setSIG, true},
|
||||||
TypeSIG: parserFunc{setSIG, true},
|
TypeRKEY: {setRKEY, true},
|
||||||
TypeRKEY: parserFunc{setRKEY, true},
|
TypeRP: {setRP, false},
|
||||||
TypeRP: parserFunc{setRP, false},
|
TypeRRSIG: {setRRSIG, true},
|
||||||
TypeRRSIG: parserFunc{setRRSIG, true},
|
TypeRT: {setRT, false},
|
||||||
TypeRT: parserFunc{setRT, false},
|
TypeSMIMEA: {setSMIMEA, true},
|
||||||
TypeSOA: parserFunc{setSOA, false},
|
TypeSOA: {setSOA, false},
|
||||||
TypeSPF: parserFunc{setSPF, true},
|
TypeSPF: {setSPF, true},
|
||||||
TypeSRV: parserFunc{setSRV, false},
|
TypeSRV: {setSRV, false},
|
||||||
TypeSSHFP: parserFunc{setSSHFP, true},
|
TypeSSHFP: {setSSHFP, true},
|
||||||
TypeTALINK: parserFunc{setTALINK, false},
|
TypeTALINK: {setTALINK, false},
|
||||||
TypeTA: parserFunc{setTA, true},
|
TypeTA: {setTA, true},
|
||||||
TypeTLSA: parserFunc{setTLSA, true},
|
TypeTLSA: {setTLSA, true},
|
||||||
TypeTXT: parserFunc{setTXT, true},
|
TypeTXT: {setTXT, true},
|
||||||
TypeUID: parserFunc{setUID, false},
|
TypeUID: {setUID, false},
|
||||||
TypeUINFO: parserFunc{setUINFO, true},
|
TypeUINFO: {setUINFO, true},
|
||||||
TypeURI: parserFunc{setURI, true},
|
TypeURI: {setURI, true},
|
||||||
TypeWKS: parserFunc{setWKS, true},
|
TypeX25: {setX25, false},
|
||||||
TypeX25: parserFunc{setX25, false},
|
|
||||||
}
|
}
|
234
vendor/github.com/miekg/dns/server.go
generated
vendored
234
vendor/github.com/miekg/dns/server.go
generated
vendored
|
@ -4,6 +4,8 @@ package dns
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"crypto/tls"
|
||||||
|
"encoding/binary"
|
||||||
"io"
|
"io"
|
||||||
"net"
|
"net"
|
||||||
"sync"
|
"sync"
|
||||||
|
@ -47,7 +49,7 @@ type response struct {
|
||||||
tsigRequestMAC string
|
tsigRequestMAC string
|
||||||
tsigSecret map[string]string // the tsig secrets
|
tsigSecret map[string]string // the tsig secrets
|
||||||
udp *net.UDPConn // i/o connection if UDP was used
|
udp *net.UDPConn // i/o connection if UDP was used
|
||||||
tcp *net.TCPConn // i/o connection if TCP was used
|
tcp net.Conn // i/o connection if TCP was used
|
||||||
udpSession *SessionUDP // oob data to get egress interface right
|
udpSession *SessionUDP // oob data to get egress interface right
|
||||||
remoteAddr net.Addr // address of the client
|
remoteAddr net.Addr // address of the client
|
||||||
writer Writer // writer to output the raw DNS bits
|
writer Writer // writer to output the raw DNS bits
|
||||||
|
@ -92,13 +94,35 @@ func HandleFailed(w ResponseWriter, r *Msg) {
|
||||||
|
|
||||||
func failedHandler() Handler { return HandlerFunc(HandleFailed) }
|
func failedHandler() Handler { return HandlerFunc(HandleFailed) }
|
||||||
|
|
||||||
// ListenAndServe Starts a server on addresss and network speficied. Invoke handler
|
// ListenAndServe Starts a server on address and network specified Invoke handler
|
||||||
// for incoming queries.
|
// for incoming queries.
|
||||||
func ListenAndServe(addr string, network string, handler Handler) error {
|
func ListenAndServe(addr string, network string, handler Handler) error {
|
||||||
server := &Server{Addr: addr, Net: network, Handler: handler}
|
server := &Server{Addr: addr, Net: network, Handler: handler}
|
||||||
return server.ListenAndServe()
|
return server.ListenAndServe()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ListenAndServeTLS acts like http.ListenAndServeTLS, more information in
|
||||||
|
// http://golang.org/pkg/net/http/#ListenAndServeTLS
|
||||||
|
func ListenAndServeTLS(addr, certFile, keyFile string, handler Handler) error {
|
||||||
|
cert, err := tls.LoadX509KeyPair(certFile, keyFile)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
config := tls.Config{
|
||||||
|
Certificates: []tls.Certificate{cert},
|
||||||
|
}
|
||||||
|
|
||||||
|
server := &Server{
|
||||||
|
Addr: addr,
|
||||||
|
Net: "tcp-tls",
|
||||||
|
TLSConfig: &config,
|
||||||
|
Handler: handler,
|
||||||
|
}
|
||||||
|
|
||||||
|
return server.ListenAndServe()
|
||||||
|
}
|
||||||
|
|
||||||
// ActivateAndServe activates a server with a listener from systemd,
|
// ActivateAndServe activates a server with a listener from systemd,
|
||||||
// l and p should not both be non-nil.
|
// l and p should not both be non-nil.
|
||||||
// If both l and p are not nil only p will be used.
|
// If both l and p are not nil only p will be used.
|
||||||
|
@ -123,7 +147,7 @@ func (mux *ServeMux) match(q string, t uint16) Handler {
|
||||||
b[i] |= ('a' - 'A')
|
b[i] |= ('a' - 'A')
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if h, ok := mux.z[string(b[:l])]; ok { // 'causes garbage, might want to change the map key
|
if h, ok := mux.z[string(b[:l])]; ok { // causes garbage, might want to change the map key
|
||||||
if t != TypeDS {
|
if t != TypeDS {
|
||||||
return h
|
return h
|
||||||
}
|
}
|
||||||
|
@ -210,7 +234,7 @@ type Writer interface {
|
||||||
type Reader interface {
|
type Reader interface {
|
||||||
// ReadTCP reads a raw message from a TCP connection. Implementations may alter
|
// ReadTCP reads a raw message from a TCP connection. Implementations may alter
|
||||||
// connection properties, for example the read-deadline.
|
// connection properties, for example the read-deadline.
|
||||||
ReadTCP(conn *net.TCPConn, timeout time.Duration) ([]byte, error)
|
ReadTCP(conn net.Conn, timeout time.Duration) ([]byte, error)
|
||||||
// ReadUDP reads a raw message from a UDP connection. Implementations may alter
|
// ReadUDP reads a raw message from a UDP connection. Implementations may alter
|
||||||
// connection properties, for example the read-deadline.
|
// connection properties, for example the read-deadline.
|
||||||
ReadUDP(conn *net.UDPConn, timeout time.Duration) ([]byte, *SessionUDP, error)
|
ReadUDP(conn *net.UDPConn, timeout time.Duration) ([]byte, *SessionUDP, error)
|
||||||
|
@ -222,7 +246,7 @@ type defaultReader struct {
|
||||||
*Server
|
*Server
|
||||||
}
|
}
|
||||||
|
|
||||||
func (dr *defaultReader) ReadTCP(conn *net.TCPConn, timeout time.Duration) ([]byte, error) {
|
func (dr *defaultReader) ReadTCP(conn net.Conn, timeout time.Duration) ([]byte, error) {
|
||||||
return dr.readTCP(conn, timeout)
|
return dr.readTCP(conn, timeout)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -242,10 +266,12 @@ type DecorateWriter func(Writer) Writer
|
||||||
type Server struct {
|
type Server struct {
|
||||||
// Address to listen on, ":dns" if empty.
|
// Address to listen on, ":dns" if empty.
|
||||||
Addr string
|
Addr string
|
||||||
// if "tcp" it will invoke a TCP listener, otherwise an UDP one.
|
// if "tcp" or "tcp-tls" (DNS over TLS) it will invoke a TCP listener, otherwise an UDP one
|
||||||
Net string
|
Net string
|
||||||
// TCP Listener to use, this is to aid in systemd's socket activation.
|
// TCP Listener to use, this is to aid in systemd's socket activation.
|
||||||
Listener net.Listener
|
Listener net.Listener
|
||||||
|
// TLS connection configuration
|
||||||
|
TLSConfig *tls.Config
|
||||||
// UDP "Listener" to use, this is to aid in systemd's socket activation.
|
// UDP "Listener" to use, this is to aid in systemd's socket activation.
|
||||||
PacketConn net.PacketConn
|
PacketConn net.PacketConn
|
||||||
// Handler to invoke, dns.DefaultServeMux if nil.
|
// Handler to invoke, dns.DefaultServeMux if nil.
|
||||||
|
@ -262,7 +288,7 @@ type Server struct {
|
||||||
// Secret(s) for Tsig map[<zonename>]<base64 secret>.
|
// Secret(s) for Tsig map[<zonename>]<base64 secret>.
|
||||||
TsigSecret map[string]string
|
TsigSecret map[string]string
|
||||||
// Unsafe instructs the server to disregard any sanity checks and directly hand the message to
|
// Unsafe instructs the server to disregard any sanity checks and directly hand the message to
|
||||||
// the handler. It will specfically not check if the query has the QR bit not set.
|
// the handler. It will specifically not check if the query has the QR bit not set.
|
||||||
Unsafe bool
|
Unsafe bool
|
||||||
// If NotifyStartedFunc is set it is called once the server has started listening.
|
// If NotifyStartedFunc is set it is called once the server has started listening.
|
||||||
NotifyStartedFunc func()
|
NotifyStartedFunc func()
|
||||||
|
@ -271,26 +297,21 @@ type Server struct {
|
||||||
// DecorateWriter is optional, allows customization of the process that writes raw DNS messages.
|
// DecorateWriter is optional, allows customization of the process that writes raw DNS messages.
|
||||||
DecorateWriter DecorateWriter
|
DecorateWriter DecorateWriter
|
||||||
|
|
||||||
// For graceful shutdown.
|
// Graceful shutdown handling
|
||||||
stopUDP chan bool
|
|
||||||
stopTCP chan bool
|
|
||||||
wgUDP sync.WaitGroup
|
|
||||||
wgTCP sync.WaitGroup
|
|
||||||
|
|
||||||
// make start/shutdown not racy
|
inFlight sync.WaitGroup
|
||||||
lock sync.Mutex
|
|
||||||
|
lock sync.RWMutex
|
||||||
started bool
|
started bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// ListenAndServe starts a nameserver on the configured address in *Server.
|
// ListenAndServe starts a nameserver on the configured address in *Server.
|
||||||
func (srv *Server) ListenAndServe() error {
|
func (srv *Server) ListenAndServe() error {
|
||||||
srv.lock.Lock()
|
srv.lock.Lock()
|
||||||
|
defer srv.lock.Unlock()
|
||||||
if srv.started {
|
if srv.started {
|
||||||
srv.lock.Unlock()
|
|
||||||
return &Error{err: "server already started"}
|
return &Error{err: "server already started"}
|
||||||
}
|
}
|
||||||
srv.stopUDP, srv.stopTCP = make(chan bool), make(chan bool)
|
|
||||||
srv.started = true
|
|
||||||
addr := srv.Addr
|
addr := srv.Addr
|
||||||
if addr == "" {
|
if addr == "" {
|
||||||
addr = ":domain"
|
addr = ":domain"
|
||||||
|
@ -300,34 +321,57 @@ func (srv *Server) ListenAndServe() error {
|
||||||
}
|
}
|
||||||
switch srv.Net {
|
switch srv.Net {
|
||||||
case "tcp", "tcp4", "tcp6":
|
case "tcp", "tcp4", "tcp6":
|
||||||
a, e := net.ResolveTCPAddr(srv.Net, addr)
|
a, err := net.ResolveTCPAddr(srv.Net, addr)
|
||||||
if e != nil {
|
if err != nil {
|
||||||
return e
|
return err
|
||||||
}
|
}
|
||||||
l, e := net.ListenTCP(srv.Net, a)
|
l, err := net.ListenTCP(srv.Net, a)
|
||||||
if e != nil {
|
if err != nil {
|
||||||
return e
|
return err
|
||||||
}
|
}
|
||||||
srv.Listener = l
|
srv.Listener = l
|
||||||
|
srv.started = true
|
||||||
srv.lock.Unlock()
|
srv.lock.Unlock()
|
||||||
return srv.serveTCP(l)
|
err = srv.serveTCP(l)
|
||||||
case "udp", "udp4", "udp6":
|
srv.lock.Lock() // to satisfy the defer at the top
|
||||||
a, e := net.ResolveUDPAddr(srv.Net, addr)
|
return err
|
||||||
if e != nil {
|
case "tcp-tls", "tcp4-tls", "tcp6-tls":
|
||||||
return e
|
network := "tcp"
|
||||||
|
if srv.Net == "tcp4-tls" {
|
||||||
|
network = "tcp4"
|
||||||
|
} else if srv.Net == "tcp6" {
|
||||||
|
network = "tcp6"
|
||||||
}
|
}
|
||||||
l, e := net.ListenUDP(srv.Net, a)
|
|
||||||
if e != nil {
|
l, err := tls.Listen(network, addr, srv.TLSConfig)
|
||||||
return e
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
srv.Listener = l
|
||||||
|
srv.started = true
|
||||||
|
srv.lock.Unlock()
|
||||||
|
err = srv.serveTCP(l)
|
||||||
|
srv.lock.Lock() // to satisfy the defer at the top
|
||||||
|
return err
|
||||||
|
case "udp", "udp4", "udp6":
|
||||||
|
a, err := net.ResolveUDPAddr(srv.Net, addr)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
l, err := net.ListenUDP(srv.Net, a)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
if e := setUDPSocketOptions(l); e != nil {
|
if e := setUDPSocketOptions(l); e != nil {
|
||||||
return e
|
return e
|
||||||
}
|
}
|
||||||
srv.PacketConn = l
|
srv.PacketConn = l
|
||||||
|
srv.started = true
|
||||||
srv.lock.Unlock()
|
srv.lock.Unlock()
|
||||||
return srv.serveUDP(l)
|
err = srv.serveUDP(l)
|
||||||
|
srv.lock.Lock() // to satisfy the defer at the top
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
srv.lock.Unlock()
|
|
||||||
return &Error{err: "bad network"}
|
return &Error{err: "bad network"}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -335,15 +379,12 @@ func (srv *Server) ListenAndServe() error {
|
||||||
// configured in *Server. Its main use is to start a server from systemd.
|
// configured in *Server. Its main use is to start a server from systemd.
|
||||||
func (srv *Server) ActivateAndServe() error {
|
func (srv *Server) ActivateAndServe() error {
|
||||||
srv.lock.Lock()
|
srv.lock.Lock()
|
||||||
|
defer srv.lock.Unlock()
|
||||||
if srv.started {
|
if srv.started {
|
||||||
srv.lock.Unlock()
|
|
||||||
return &Error{err: "server already started"}
|
return &Error{err: "server already started"}
|
||||||
}
|
}
|
||||||
srv.stopUDP, srv.stopTCP = make(chan bool), make(chan bool)
|
|
||||||
srv.started = true
|
|
||||||
pConn := srv.PacketConn
|
pConn := srv.PacketConn
|
||||||
l := srv.Listener
|
l := srv.Listener
|
||||||
srv.lock.Unlock()
|
|
||||||
if pConn != nil {
|
if pConn != nil {
|
||||||
if srv.UDPSize == 0 {
|
if srv.UDPSize == 0 {
|
||||||
srv.UDPSize = MinMsgSize
|
srv.UDPSize = MinMsgSize
|
||||||
|
@ -352,13 +393,19 @@ func (srv *Server) ActivateAndServe() error {
|
||||||
if e := setUDPSocketOptions(t); e != nil {
|
if e := setUDPSocketOptions(t); e != nil {
|
||||||
return e
|
return e
|
||||||
}
|
}
|
||||||
return srv.serveUDP(t)
|
srv.started = true
|
||||||
|
srv.lock.Unlock()
|
||||||
|
e := srv.serveUDP(t)
|
||||||
|
srv.lock.Lock() // to satisfy the defer at the top
|
||||||
|
return e
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if l != nil {
|
if l != nil {
|
||||||
if t, ok := l.(*net.TCPListener); ok {
|
srv.started = true
|
||||||
return srv.serveTCP(t)
|
srv.lock.Unlock()
|
||||||
}
|
e := srv.serveTCP(l)
|
||||||
|
srv.lock.Lock() // to satisfy the defer at the top
|
||||||
|
return e
|
||||||
}
|
}
|
||||||
return &Error{err: "bad listeners"}
|
return &Error{err: "bad listeners"}
|
||||||
}
|
}
|
||||||
|
@ -374,36 +421,20 @@ func (srv *Server) Shutdown() error {
|
||||||
return &Error{err: "server not started"}
|
return &Error{err: "server not started"}
|
||||||
}
|
}
|
||||||
srv.started = false
|
srv.started = false
|
||||||
net, addr := srv.Net, srv.Addr
|
|
||||||
switch {
|
|
||||||
case srv.Listener != nil:
|
|
||||||
a := srv.Listener.Addr()
|
|
||||||
net, addr = a.Network(), a.String()
|
|
||||||
case srv.PacketConn != nil:
|
|
||||||
a := srv.PacketConn.LocalAddr()
|
|
||||||
net, addr = a.Network(), a.String()
|
|
||||||
}
|
|
||||||
srv.lock.Unlock()
|
srv.lock.Unlock()
|
||||||
|
|
||||||
fin := make(chan bool)
|
if srv.PacketConn != nil {
|
||||||
switch net {
|
srv.PacketConn.Close()
|
||||||
case "tcp", "tcp4", "tcp6":
|
}
|
||||||
go func() {
|
if srv.Listener != nil {
|
||||||
srv.stopTCP <- true
|
srv.Listener.Close()
|
||||||
srv.wgTCP.Wait()
|
|
||||||
fin <- true
|
|
||||||
}()
|
|
||||||
|
|
||||||
case "udp", "udp4", "udp6":
|
|
||||||
go func() {
|
|
||||||
srv.stopUDP <- true
|
|
||||||
srv.wgUDP.Wait()
|
|
||||||
fin <- true
|
|
||||||
}()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
c := &Client{Net: net}
|
fin := make(chan bool)
|
||||||
go c.Exchange(new(Msg), addr) // extra query to help ReadXXX loop to pass
|
go func() {
|
||||||
|
srv.inFlight.Wait()
|
||||||
|
fin <- true
|
||||||
|
}()
|
||||||
|
|
||||||
select {
|
select {
|
||||||
case <-time.After(srv.getReadTimeout()):
|
case <-time.After(srv.getReadTimeout()):
|
||||||
|
@ -424,7 +455,7 @@ func (srv *Server) getReadTimeout() time.Duration {
|
||||||
|
|
||||||
// serveTCP starts a TCP listener for the server.
|
// serveTCP starts a TCP listener for the server.
|
||||||
// Each request is handled in a separate goroutine.
|
// Each request is handled in a separate goroutine.
|
||||||
func (srv *Server) serveTCP(l *net.TCPListener) error {
|
func (srv *Server) serveTCP(l net.Listener) error {
|
||||||
defer l.Close()
|
defer l.Close()
|
||||||
|
|
||||||
if srv.NotifyStartedFunc != nil {
|
if srv.NotifyStartedFunc != nil {
|
||||||
|
@ -443,20 +474,24 @@ func (srv *Server) serveTCP(l *net.TCPListener) error {
|
||||||
rtimeout := srv.getReadTimeout()
|
rtimeout := srv.getReadTimeout()
|
||||||
// deadline is not used here
|
// deadline is not used here
|
||||||
for {
|
for {
|
||||||
rw, e := l.AcceptTCP()
|
rw, err := l.Accept()
|
||||||
if e != nil {
|
if err != nil {
|
||||||
|
if neterr, ok := err.(net.Error); ok && neterr.Temporary() {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
m, e := reader.ReadTCP(rw, rtimeout)
|
return err
|
||||||
select {
|
}
|
||||||
case <-srv.stopTCP:
|
m, err := reader.ReadTCP(rw, rtimeout)
|
||||||
|
srv.lock.RLock()
|
||||||
|
if !srv.started {
|
||||||
|
srv.lock.RUnlock()
|
||||||
return nil
|
return nil
|
||||||
default:
|
|
||||||
}
|
}
|
||||||
if e != nil {
|
srv.lock.RUnlock()
|
||||||
|
if err != nil {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
srv.wgTCP.Add(1)
|
srv.inFlight.Add(1)
|
||||||
go srv.serve(rw.RemoteAddr(), handler, m, nil, nil, rw)
|
go srv.serve(rw.RemoteAddr(), handler, m, nil, nil, rw)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -482,22 +517,25 @@ func (srv *Server) serveUDP(l *net.UDPConn) error {
|
||||||
rtimeout := srv.getReadTimeout()
|
rtimeout := srv.getReadTimeout()
|
||||||
// deadline is not used here
|
// deadline is not used here
|
||||||
for {
|
for {
|
||||||
m, s, e := reader.ReadUDP(l, rtimeout)
|
m, s, err := reader.ReadUDP(l, rtimeout)
|
||||||
select {
|
srv.lock.RLock()
|
||||||
case <-srv.stopUDP:
|
if !srv.started {
|
||||||
|
srv.lock.RUnlock()
|
||||||
return nil
|
return nil
|
||||||
default:
|
|
||||||
}
|
}
|
||||||
if e != nil {
|
srv.lock.RUnlock()
|
||||||
|
if err != nil {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
srv.wgUDP.Add(1)
|
srv.inFlight.Add(1)
|
||||||
go srv.serve(s.RemoteAddr(), handler, m, l, s, nil)
|
go srv.serve(s.RemoteAddr(), handler, m, l, s, nil)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Serve a new connection.
|
// Serve a new connection.
|
||||||
func (srv *Server) serve(a net.Addr, h Handler, m []byte, u *net.UDPConn, s *SessionUDP, t *net.TCPConn) {
|
func (srv *Server) serve(a net.Addr, h Handler, m []byte, u *net.UDPConn, s *SessionUDP, t net.Conn) {
|
||||||
|
defer srv.inFlight.Done()
|
||||||
|
|
||||||
w := &response{tsigSecret: srv.TsigSecret, udp: u, tcp: t, remoteAddr: a, udpSession: s}
|
w := &response{tsigSecret: srv.TsigSecret, udp: u, tcp: t, remoteAddr: a, udpSession: s}
|
||||||
if srv.DecorateWriter != nil {
|
if srv.DecorateWriter != nil {
|
||||||
w.writer = srv.DecorateWriter(w)
|
w.writer = srv.DecorateWriter(w)
|
||||||
|
@ -507,15 +545,6 @@ func (srv *Server) serve(a net.Addr, h Handler, m []byte, u *net.UDPConn, s *Ses
|
||||||
|
|
||||||
q := 0 // counter for the amount of TCP queries we get
|
q := 0 // counter for the amount of TCP queries we get
|
||||||
|
|
||||||
defer func() {
|
|
||||||
if u != nil {
|
|
||||||
srv.wgUDP.Done()
|
|
||||||
}
|
|
||||||
if t != nil {
|
|
||||||
srv.wgTCP.Done()
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
reader := Reader(&defaultReader{srv})
|
reader := Reader(&defaultReader{srv})
|
||||||
if srv.DecorateReader != nil {
|
if srv.DecorateReader != nil {
|
||||||
reader = srv.DecorateReader(reader)
|
reader = srv.DecorateReader(reader)
|
||||||
|
@ -548,6 +577,9 @@ Redo:
|
||||||
h.ServeDNS(w, req) // Writes back to the client
|
h.ServeDNS(w, req) // Writes back to the client
|
||||||
|
|
||||||
Exit:
|
Exit:
|
||||||
|
if w.tcp == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
// TODO(miek): make this number configurable?
|
// TODO(miek): make this number configurable?
|
||||||
if q > maxTCPQueries { // close socket after this many queries
|
if q > maxTCPQueries { // close socket after this many queries
|
||||||
w.Close()
|
w.Close()
|
||||||
|
@ -565,8 +597,8 @@ Exit:
|
||||||
if srv.IdleTimeout != nil {
|
if srv.IdleTimeout != nil {
|
||||||
idleTimeout = srv.IdleTimeout()
|
idleTimeout = srv.IdleTimeout()
|
||||||
}
|
}
|
||||||
m, e := reader.ReadTCP(w.tcp, idleTimeout)
|
m, err = reader.ReadTCP(w.tcp, idleTimeout)
|
||||||
if e == nil {
|
if err == nil {
|
||||||
q++
|
q++
|
||||||
goto Redo
|
goto Redo
|
||||||
}
|
}
|
||||||
|
@ -574,7 +606,7 @@ Exit:
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func (srv *Server) readTCP(conn *net.TCPConn, timeout time.Duration) ([]byte, error) {
|
func (srv *Server) readTCP(conn net.Conn, timeout time.Duration) ([]byte, error) {
|
||||||
conn.SetReadDeadline(time.Now().Add(timeout))
|
conn.SetReadDeadline(time.Now().Add(timeout))
|
||||||
l := make([]byte, 2)
|
l := make([]byte, 2)
|
||||||
n, err := conn.Read(l)
|
n, err := conn.Read(l)
|
||||||
|
@ -584,7 +616,7 @@ func (srv *Server) readTCP(conn *net.TCPConn, timeout time.Duration) ([]byte, er
|
||||||
}
|
}
|
||||||
return nil, ErrShortRead
|
return nil, ErrShortRead
|
||||||
}
|
}
|
||||||
length, _ := unpackUint16(l, 0)
|
length := binary.BigEndian.Uint16(l)
|
||||||
if length == 0 {
|
if length == 0 {
|
||||||
return nil, ErrShortRead
|
return nil, ErrShortRead
|
||||||
}
|
}
|
||||||
|
@ -612,10 +644,10 @@ func (srv *Server) readTCP(conn *net.TCPConn, timeout time.Duration) ([]byte, er
|
||||||
func (srv *Server) readUDP(conn *net.UDPConn, timeout time.Duration) ([]byte, *SessionUDP, error) {
|
func (srv *Server) readUDP(conn *net.UDPConn, timeout time.Duration) ([]byte, *SessionUDP, error) {
|
||||||
conn.SetReadDeadline(time.Now().Add(timeout))
|
conn.SetReadDeadline(time.Now().Add(timeout))
|
||||||
m := make([]byte, srv.UDPSize)
|
m := make([]byte, srv.UDPSize)
|
||||||
n, s, e := ReadFromSessionUDP(conn, m)
|
n, s, err := ReadFromSessionUDP(conn, m)
|
||||||
if e != nil || n == 0 {
|
if err != nil || n == 0 {
|
||||||
if e != nil {
|
if err != nil {
|
||||||
return nil, nil, e
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
return nil, nil, ErrShortRead
|
return nil, nil, ErrShortRead
|
||||||
}
|
}
|
||||||
|
@ -659,7 +691,7 @@ func (w *response) Write(m []byte) (int, error) {
|
||||||
return 0, &Error{err: "message too large"}
|
return 0, &Error{err: "message too large"}
|
||||||
}
|
}
|
||||||
l := make([]byte, 2, 2+lm)
|
l := make([]byte, 2, 2+lm)
|
||||||
l[0], l[1] = packUint16(uint16(lm))
|
binary.BigEndian.PutUint16(l, uint16(lm))
|
||||||
m = append(l, m...)
|
m = append(l, m...)
|
||||||
|
|
||||||
n, err := io.Copy(w.tcp, bytes.NewReader(m))
|
n, err := io.Copy(w.tcp, bytes.NewReader(m))
|
||||||
|
|
25
vendor/github.com/miekg/dns/sig0.go
generated
vendored
25
vendor/github.com/miekg/dns/sig0.go
generated
vendored
|
@ -5,6 +5,7 @@ import (
|
||||||
"crypto/dsa"
|
"crypto/dsa"
|
||||||
"crypto/ecdsa"
|
"crypto/ecdsa"
|
||||||
"crypto/rsa"
|
"crypto/rsa"
|
||||||
|
"encoding/binary"
|
||||||
"math/big"
|
"math/big"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
@ -67,13 +68,13 @@ func (rr *SIG) Sign(k crypto.Signer, m *Msg) ([]byte, error) {
|
||||||
}
|
}
|
||||||
// Adjust sig data length
|
// Adjust sig data length
|
||||||
rdoff := len(mbuf) + 1 + 2 + 2 + 4
|
rdoff := len(mbuf) + 1 + 2 + 2 + 4
|
||||||
rdlen, _ := unpackUint16(buf, rdoff)
|
rdlen := binary.BigEndian.Uint16(buf[rdoff:])
|
||||||
rdlen += uint16(len(sig))
|
rdlen += uint16(len(sig))
|
||||||
buf[rdoff], buf[rdoff+1] = packUint16(rdlen)
|
binary.BigEndian.PutUint16(buf[rdoff:], rdlen)
|
||||||
// Adjust additional count
|
// Adjust additional count
|
||||||
adc, _ := unpackUint16(buf, 10)
|
adc := binary.BigEndian.Uint16(buf[10:])
|
||||||
adc++
|
adc++
|
||||||
buf[10], buf[11] = packUint16(adc)
|
binary.BigEndian.PutUint16(buf[10:], adc)
|
||||||
return buf, nil
|
return buf, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -103,10 +104,11 @@ func (rr *SIG) Verify(k *KEY, buf []byte) error {
|
||||||
hasher := hash.New()
|
hasher := hash.New()
|
||||||
|
|
||||||
buflen := len(buf)
|
buflen := len(buf)
|
||||||
qdc, _ := unpackUint16(buf, 4)
|
qdc := binary.BigEndian.Uint16(buf[4:])
|
||||||
anc, _ := unpackUint16(buf, 6)
|
anc := binary.BigEndian.Uint16(buf[6:])
|
||||||
auc, _ := unpackUint16(buf, 8)
|
auc := binary.BigEndian.Uint16(buf[8:])
|
||||||
adc, offset := unpackUint16(buf, 10)
|
adc := binary.BigEndian.Uint16(buf[10:])
|
||||||
|
offset := 12
|
||||||
var err error
|
var err error
|
||||||
for i := uint16(0); i < qdc && offset < buflen; i++ {
|
for i := uint16(0); i < qdc && offset < buflen; i++ {
|
||||||
_, offset, err = UnpackDomainName(buf, offset)
|
_, offset, err = UnpackDomainName(buf, offset)
|
||||||
|
@ -127,7 +129,8 @@ func (rr *SIG) Verify(k *KEY, buf []byte) error {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
var rdlen uint16
|
var rdlen uint16
|
||||||
rdlen, offset = unpackUint16(buf, offset)
|
rdlen = binary.BigEndian.Uint16(buf[offset:])
|
||||||
|
offset += 2
|
||||||
offset += int(rdlen)
|
offset += int(rdlen)
|
||||||
}
|
}
|
||||||
if offset >= buflen {
|
if offset >= buflen {
|
||||||
|
@ -149,9 +152,9 @@ func (rr *SIG) Verify(k *KEY, buf []byte) error {
|
||||||
if offset+4+4 >= buflen {
|
if offset+4+4 >= buflen {
|
||||||
return &Error{err: "overflow unpacking signed message"}
|
return &Error{err: "overflow unpacking signed message"}
|
||||||
}
|
}
|
||||||
expire := uint32(buf[offset])<<24 | uint32(buf[offset+1])<<16 | uint32(buf[offset+2])<<8 | uint32(buf[offset+3])
|
expire := binary.BigEndian.Uint32(buf[offset:])
|
||||||
offset += 4
|
offset += 4
|
||||||
incept := uint32(buf[offset])<<24 | uint32(buf[offset+1])<<16 | uint32(buf[offset+2])<<8 | uint32(buf[offset+3])
|
incept := binary.BigEndian.Uint32(buf[offset:])
|
||||||
offset += 4
|
offset += 4
|
||||||
now := uint32(time.Now().Unix())
|
now := uint32(time.Now().Unix())
|
||||||
if now < incept || now > expire {
|
if now < incept || now > expire {
|
||||||
|
|
47
vendor/github.com/miekg/dns/smimea.go
generated
vendored
Normal file
47
vendor/github.com/miekg/dns/smimea.go
generated
vendored
Normal file
|
@ -0,0 +1,47 @@
|
||||||
|
package dns
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/sha256"
|
||||||
|
"crypto/x509"
|
||||||
|
"encoding/hex"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Sign creates a SMIMEA record from an SSL certificate.
|
||||||
|
func (r *SMIMEA) Sign(usage, selector, matchingType int, cert *x509.Certificate) (err error) {
|
||||||
|
r.Hdr.Rrtype = TypeSMIMEA
|
||||||
|
r.Usage = uint8(usage)
|
||||||
|
r.Selector = uint8(selector)
|
||||||
|
r.MatchingType = uint8(matchingType)
|
||||||
|
|
||||||
|
r.Certificate, err = CertificateToDANE(r.Selector, r.MatchingType, cert)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify verifies a SMIMEA record against an SSL certificate. If it is OK
|
||||||
|
// a nil error is returned.
|
||||||
|
func (r *SMIMEA) Verify(cert *x509.Certificate) error {
|
||||||
|
c, err := CertificateToDANE(r.Selector, r.MatchingType, cert)
|
||||||
|
if err != nil {
|
||||||
|
return err // Not also ErrSig?
|
||||||
|
}
|
||||||
|
if r.Certificate == c {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return ErrSig // ErrSig, really?
|
||||||
|
}
|
||||||
|
|
||||||
|
// SIMEAName returns the ownername of a SMIMEA resource record as per the
|
||||||
|
// format specified in RFC 'draft-ietf-dane-smime-12' Section 2 and 3
|
||||||
|
func SMIMEAName(email_address string, domain_name string) (string, error) {
|
||||||
|
hasher := sha256.New()
|
||||||
|
hasher.Write([]byte(email_address))
|
||||||
|
|
||||||
|
// RFC Section 3: "The local-part is hashed using the SHA2-256
|
||||||
|
// algorithm with the hash truncated to 28 octets and
|
||||||
|
// represented in its hexadecimal representation to become the
|
||||||
|
// left-most label in the prepared domain name"
|
||||||
|
return hex.EncodeToString(hasher.Sum(nil)[:28]) + "." + "_smimecert." + domain_name, nil
|
||||||
|
}
|
47
vendor/github.com/miekg/dns/tlsa.go
generated
vendored
47
vendor/github.com/miekg/dns/tlsa.go
generated
vendored
|
@ -1,50 +1,11 @@
|
||||||
package dns
|
package dns
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"crypto/sha256"
|
|
||||||
"crypto/sha512"
|
|
||||||
"crypto/x509"
|
"crypto/x509"
|
||||||
"encoding/hex"
|
|
||||||
"errors"
|
|
||||||
"io"
|
|
||||||
"net"
|
"net"
|
||||||
"strconv"
|
"strconv"
|
||||||
)
|
)
|
||||||
|
|
||||||
// CertificateToDANE converts a certificate to a hex string as used in the TLSA record.
|
|
||||||
func CertificateToDANE(selector, matchingType uint8, cert *x509.Certificate) (string, error) {
|
|
||||||
switch matchingType {
|
|
||||||
case 0:
|
|
||||||
switch selector {
|
|
||||||
case 0:
|
|
||||||
return hex.EncodeToString(cert.Raw), nil
|
|
||||||
case 1:
|
|
||||||
return hex.EncodeToString(cert.RawSubjectPublicKeyInfo), nil
|
|
||||||
}
|
|
||||||
case 1:
|
|
||||||
h := sha256.New()
|
|
||||||
switch selector {
|
|
||||||
case 0:
|
|
||||||
io.WriteString(h, string(cert.Raw))
|
|
||||||
return hex.EncodeToString(h.Sum(nil)), nil
|
|
||||||
case 1:
|
|
||||||
io.WriteString(h, string(cert.RawSubjectPublicKeyInfo))
|
|
||||||
return hex.EncodeToString(h.Sum(nil)), nil
|
|
||||||
}
|
|
||||||
case 2:
|
|
||||||
h := sha512.New()
|
|
||||||
switch selector {
|
|
||||||
case 0:
|
|
||||||
io.WriteString(h, string(cert.Raw))
|
|
||||||
return hex.EncodeToString(h.Sum(nil)), nil
|
|
||||||
case 1:
|
|
||||||
io.WriteString(h, string(cert.RawSubjectPublicKeyInfo))
|
|
||||||
return hex.EncodeToString(h.Sum(nil)), nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return "", errors.New("dns: bad TLSA MatchingType or TLSA Selector")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Sign creates a TLSA record from an SSL certificate.
|
// Sign creates a TLSA record from an SSL certificate.
|
||||||
func (r *TLSA) Sign(usage, selector, matchingType int, cert *x509.Certificate) (err error) {
|
func (r *TLSA) Sign(usage, selector, matchingType int, cert *x509.Certificate) (err error) {
|
||||||
r.Hdr.Rrtype = TypeTLSA
|
r.Hdr.Rrtype = TypeTLSA
|
||||||
|
@ -78,9 +39,9 @@ func TLSAName(name, service, network string) (string, error) {
|
||||||
if !IsFqdn(name) {
|
if !IsFqdn(name) {
|
||||||
return "", ErrFqdn
|
return "", ErrFqdn
|
||||||
}
|
}
|
||||||
p, e := net.LookupPort(network, service)
|
p, err := net.LookupPort(network, service)
|
||||||
if e != nil {
|
if err != nil {
|
||||||
return "", e
|
return "", err
|
||||||
}
|
}
|
||||||
return "_" + strconv.Itoa(p) + "_" + network + "." + name, nil
|
return "_" + strconv.Itoa(p) + "._" + network + "." + name, nil
|
||||||
}
|
}
|
||||||
|
|
165
vendor/github.com/miekg/dns/tsig.go
generated
vendored
165
vendor/github.com/miekg/dns/tsig.go
generated
vendored
|
@ -6,6 +6,7 @@ import (
|
||||||
"crypto/sha1"
|
"crypto/sha1"
|
||||||
"crypto/sha256"
|
"crypto/sha256"
|
||||||
"crypto/sha512"
|
"crypto/sha512"
|
||||||
|
"encoding/binary"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
"hash"
|
"hash"
|
||||||
"io"
|
"io"
|
||||||
|
@ -30,15 +31,11 @@ type TSIG struct {
|
||||||
TimeSigned uint64 `dns:"uint48"`
|
TimeSigned uint64 `dns:"uint48"`
|
||||||
Fudge uint16
|
Fudge uint16
|
||||||
MACSize uint16
|
MACSize uint16
|
||||||
MAC string `dns:"size-hex"`
|
MAC string `dns:"size-hex:MACSize"`
|
||||||
OrigId uint16
|
OrigId uint16
|
||||||
Error uint16
|
Error uint16
|
||||||
OtherLen uint16
|
OtherLen uint16
|
||||||
OtherData string `dns:"size-hex"`
|
OtherData string `dns:"size-hex:OtherLen"`
|
||||||
}
|
|
||||||
|
|
||||||
func (rr *TSIG) Header() *RR_Header {
|
|
||||||
return &rr.Hdr
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// TSIG has no official presentation format, but this will suffice.
|
// TSIG has no official presentation format, but this will suffice.
|
||||||
|
@ -58,15 +55,6 @@ func (rr *TSIG) String() string {
|
||||||
return s
|
return s
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rr *TSIG) len() int {
|
|
||||||
return rr.Hdr.len() + len(rr.Algorithm) + 1 + 6 +
|
|
||||||
4 + len(rr.MAC)/2 + 1 + 6 + len(rr.OtherData)/2 + 1
|
|
||||||
}
|
|
||||||
|
|
||||||
func (rr *TSIG) copy() RR {
|
|
||||||
return &TSIG{*rr.Hdr.copyHeader(), rr.Algorithm, rr.TimeSigned, rr.Fudge, rr.MACSize, rr.MAC, rr.OrigId, rr.Error, rr.OtherLen, rr.OtherData}
|
|
||||||
}
|
|
||||||
|
|
||||||
// The following values must be put in wireformat, so that the MAC can be calculated.
|
// The following values must be put in wireformat, so that the MAC can be calculated.
|
||||||
// RFC 2845, section 3.4.2. TSIG Variables.
|
// RFC 2845, section 3.4.2. TSIG Variables.
|
||||||
type tsigWireFmt struct {
|
type tsigWireFmt struct {
|
||||||
|
@ -81,14 +69,13 @@ type tsigWireFmt struct {
|
||||||
// MACSize, MAC and OrigId excluded
|
// MACSize, MAC and OrigId excluded
|
||||||
Error uint16
|
Error uint16
|
||||||
OtherLen uint16
|
OtherLen uint16
|
||||||
OtherData string `dns:"size-hex"`
|
OtherData string `dns:"size-hex:OtherLen"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// If we have the MAC use this type to convert it to wiredata.
|
// If we have the MAC use this type to convert it to wiredata. Section 3.4.3. Request MAC
|
||||||
// Section 3.4.3. Request MAC
|
|
||||||
type macWireFmt struct {
|
type macWireFmt struct {
|
||||||
MACSize uint16
|
MACSize uint16
|
||||||
MAC string `dns:"size-hex"`
|
MAC string `dns:"size-hex:MACSize"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// 3.3. Time values used in TSIG calculations
|
// 3.3. Time values used in TSIG calculations
|
||||||
|
@ -125,7 +112,7 @@ func TsigGenerate(m *Msg, secret, requestMAC string, timersOnly bool) ([]byte, s
|
||||||
|
|
||||||
t := new(TSIG)
|
t := new(TSIG)
|
||||||
var h hash.Hash
|
var h hash.Hash
|
||||||
switch rr.Algorithm {
|
switch strings.ToLower(rr.Algorithm) {
|
||||||
case HmacMD5:
|
case HmacMD5:
|
||||||
h = hmac.New(md5.New, []byte(rawsecret))
|
h = hmac.New(md5.New, []byte(rawsecret))
|
||||||
case HmacSHA1:
|
case HmacSHA1:
|
||||||
|
@ -154,7 +141,9 @@ func TsigGenerate(m *Msg, secret, requestMAC string, timersOnly bool) ([]byte, s
|
||||||
return nil, "", err
|
return nil, "", err
|
||||||
}
|
}
|
||||||
mbuf = append(mbuf, tbuf...)
|
mbuf = append(mbuf, tbuf...)
|
||||||
rawSetExtraLen(mbuf, uint16(len(m.Extra)+1))
|
// Update the ArCount directly in the buffer.
|
||||||
|
binary.BigEndian.PutUint16(mbuf[10:], uint16(len(m.Extra)+1))
|
||||||
|
|
||||||
return mbuf, t.MAC, nil
|
return mbuf, t.MAC, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -191,7 +180,7 @@ func TsigVerify(msg []byte, secret, requestMAC string, timersOnly bool) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
var h hash.Hash
|
var h hash.Hash
|
||||||
switch tsig.Algorithm {
|
switch strings.ToLower(tsig.Algorithm) {
|
||||||
case HmacMD5:
|
case HmacMD5:
|
||||||
h = hmac.New(md5.New, rawsecret)
|
h = hmac.New(md5.New, rawsecret)
|
||||||
case HmacSHA1:
|
case HmacSHA1:
|
||||||
|
@ -225,7 +214,7 @@ func tsigBuffer(msgbuf []byte, rr *TSIG, requestMAC string, timersOnly bool) []b
|
||||||
m.MACSize = uint16(len(requestMAC) / 2)
|
m.MACSize = uint16(len(requestMAC) / 2)
|
||||||
m.MAC = requestMAC
|
m.MAC = requestMAC
|
||||||
buf = make([]byte, len(requestMAC)) // long enough
|
buf = make([]byte, len(requestMAC)) // long enough
|
||||||
n, _ := PackStruct(m, buf, 0)
|
n, _ := packMacWire(m, buf)
|
||||||
buf = buf[:n]
|
buf = buf[:n]
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -234,7 +223,7 @@ func tsigBuffer(msgbuf []byte, rr *TSIG, requestMAC string, timersOnly bool) []b
|
||||||
tsig := new(timerWireFmt)
|
tsig := new(timerWireFmt)
|
||||||
tsig.TimeSigned = rr.TimeSigned
|
tsig.TimeSigned = rr.TimeSigned
|
||||||
tsig.Fudge = rr.Fudge
|
tsig.Fudge = rr.Fudge
|
||||||
n, _ := PackStruct(tsig, tsigvar, 0)
|
n, _ := packTimerWire(tsig, tsigvar)
|
||||||
tsigvar = tsigvar[:n]
|
tsigvar = tsigvar[:n]
|
||||||
} else {
|
} else {
|
||||||
tsig := new(tsigWireFmt)
|
tsig := new(tsigWireFmt)
|
||||||
|
@ -247,7 +236,7 @@ func tsigBuffer(msgbuf []byte, rr *TSIG, requestMAC string, timersOnly bool) []b
|
||||||
tsig.Error = rr.Error
|
tsig.Error = rr.Error
|
||||||
tsig.OtherLen = rr.OtherLen
|
tsig.OtherLen = rr.OtherLen
|
||||||
tsig.OtherData = rr.OtherData
|
tsig.OtherData = rr.OtherData
|
||||||
n, _ := PackStruct(tsig, tsigvar, 0)
|
n, _ := packTsigWire(tsig, tsigvar)
|
||||||
tsigvar = tsigvar[:n]
|
tsigvar = tsigvar[:n]
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -262,60 +251,54 @@ func tsigBuffer(msgbuf []byte, rr *TSIG, requestMAC string, timersOnly bool) []b
|
||||||
|
|
||||||
// Strip the TSIG from the raw message.
|
// Strip the TSIG from the raw message.
|
||||||
func stripTsig(msg []byte) ([]byte, *TSIG, error) {
|
func stripTsig(msg []byte) ([]byte, *TSIG, error) {
|
||||||
// Copied from msg.go's Unpack()
|
// Copied from msg.go's Unpack() Header, but modified.
|
||||||
// Header.
|
var (
|
||||||
var dh Header
|
dh Header
|
||||||
var err error
|
err error
|
||||||
dns := new(Msg)
|
)
|
||||||
rr := new(TSIG)
|
off, tsigoff := 0, 0
|
||||||
off := 0
|
|
||||||
tsigoff := 0
|
if dh, off, err = unpackMsgHdr(msg, off); err != nil {
|
||||||
if off, err = UnpackStruct(&dh, msg, off); err != nil {
|
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
if dh.Arcount == 0 {
|
if dh.Arcount == 0 {
|
||||||
return nil, nil, ErrNoSig
|
return nil, nil, ErrNoSig
|
||||||
}
|
}
|
||||||
|
|
||||||
// Rcode, see msg.go Unpack()
|
// Rcode, see msg.go Unpack()
|
||||||
if int(dh.Bits&0xF) == RcodeNotAuth {
|
if int(dh.Bits&0xF) == RcodeNotAuth {
|
||||||
return nil, nil, ErrAuth
|
return nil, nil, ErrAuth
|
||||||
}
|
}
|
||||||
|
|
||||||
// Arrays.
|
for i := 0; i < int(dh.Qdcount); i++ {
|
||||||
dns.Question = make([]Question, dh.Qdcount)
|
_, off, err = unpackQuestion(msg, off)
|
||||||
dns.Answer = make([]RR, dh.Ancount)
|
if err != nil {
|
||||||
dns.Ns = make([]RR, dh.Nscount)
|
return nil, nil, err
|
||||||
dns.Extra = make([]RR, dh.Arcount)
|
}
|
||||||
|
}
|
||||||
|
|
||||||
for i := 0; i < len(dns.Question); i++ {
|
_, off, err = unpackRRslice(int(dh.Ancount), msg, off)
|
||||||
off, err = UnpackStruct(&dns.Question[i], msg, off)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
}
|
_, off, err = unpackRRslice(int(dh.Nscount), msg, off)
|
||||||
for i := 0; i < len(dns.Answer); i++ {
|
|
||||||
dns.Answer[i], off, err = UnpackRR(msg, off)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
}
|
|
||||||
for i := 0; i < len(dns.Ns); i++ {
|
rr := new(TSIG)
|
||||||
dns.Ns[i], off, err = UnpackRR(msg, off)
|
var extra RR
|
||||||
if err != nil {
|
for i := 0; i < int(dh.Arcount); i++ {
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
for i := 0; i < len(dns.Extra); i++ {
|
|
||||||
tsigoff = off
|
tsigoff = off
|
||||||
dns.Extra[i], off, err = UnpackRR(msg, off)
|
extra, off, err = UnpackRR(msg, off)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
if dns.Extra[i].Header().Rrtype == TypeTSIG {
|
if extra.Header().Rrtype == TypeTSIG {
|
||||||
rr = dns.Extra[i].(*TSIG)
|
rr = extra.(*TSIG)
|
||||||
// Adjust Arcount.
|
// Adjust Arcount.
|
||||||
arcount, _ := unpackUint16(msg, 10)
|
arcount := binary.BigEndian.Uint16(msg[10:])
|
||||||
msg[10], msg[11] = packUint16(arcount - 1)
|
binary.BigEndian.PutUint16(msg[10:], arcount-1)
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -331,3 +314,71 @@ func tsigTimeToString(t uint64) string {
|
||||||
ti := time.Unix(int64(t), 0).UTC()
|
ti := time.Unix(int64(t), 0).UTC()
|
||||||
return ti.Format("20060102150405")
|
return ti.Format("20060102150405")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func packTsigWire(tw *tsigWireFmt, msg []byte) (int, error) {
|
||||||
|
// copied from zmsg.go TSIG packing
|
||||||
|
// RR_Header
|
||||||
|
off, err := PackDomainName(tw.Name, msg, 0, nil, false)
|
||||||
|
if err != nil {
|
||||||
|
return off, err
|
||||||
|
}
|
||||||
|
off, err = packUint16(tw.Class, msg, off)
|
||||||
|
if err != nil {
|
||||||
|
return off, err
|
||||||
|
}
|
||||||
|
off, err = packUint32(tw.Ttl, msg, off)
|
||||||
|
if err != nil {
|
||||||
|
return off, err
|
||||||
|
}
|
||||||
|
|
||||||
|
off, err = PackDomainName(tw.Algorithm, msg, off, nil, false)
|
||||||
|
if err != nil {
|
||||||
|
return off, err
|
||||||
|
}
|
||||||
|
off, err = packUint48(tw.TimeSigned, msg, off)
|
||||||
|
if err != nil {
|
||||||
|
return off, err
|
||||||
|
}
|
||||||
|
off, err = packUint16(tw.Fudge, msg, off)
|
||||||
|
if err != nil {
|
||||||
|
return off, err
|
||||||
|
}
|
||||||
|
|
||||||
|
off, err = packUint16(tw.Error, msg, off)
|
||||||
|
if err != nil {
|
||||||
|
return off, err
|
||||||
|
}
|
||||||
|
off, err = packUint16(tw.OtherLen, msg, off)
|
||||||
|
if err != nil {
|
||||||
|
return off, err
|
||||||
|
}
|
||||||
|
off, err = packStringHex(tw.OtherData, msg, off)
|
||||||
|
if err != nil {
|
||||||
|
return off, err
|
||||||
|
}
|
||||||
|
return off, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func packMacWire(mw *macWireFmt, msg []byte) (int, error) {
|
||||||
|
off, err := packUint16(mw.MACSize, msg, 0)
|
||||||
|
if err != nil {
|
||||||
|
return off, err
|
||||||
|
}
|
||||||
|
off, err = packStringHex(mw.MAC, msg, off)
|
||||||
|
if err != nil {
|
||||||
|
return off, err
|
||||||
|
}
|
||||||
|
return off, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func packTimerWire(tw *timerWireFmt, msg []byte) (int, error) {
|
||||||
|
off, err := packUint48(tw.TimeSigned, msg, 0)
|
||||||
|
if err != nil {
|
||||||
|
return off, err
|
||||||
|
}
|
||||||
|
off, err = packUint16(tw.Fudge, msg, off)
|
||||||
|
if err != nil {
|
||||||
|
return off, err
|
||||||
|
}
|
||||||
|
return off, nil
|
||||||
|
}
|
||||||
|
|
569
vendor/github.com/miekg/dns/types.go
generated
vendored
569
vendor/github.com/miekg/dns/types.go
generated
vendored
File diff suppressed because it is too large
Load diff
271
vendor/github.com/miekg/dns/types_generate.go
generated
vendored
Normal file
271
vendor/github.com/miekg/dns/types_generate.go
generated
vendored
Normal file
|
@ -0,0 +1,271 @@
|
||||||
|
//+build ignore
|
||||||
|
|
||||||
|
// types_generate.go is meant to run with go generate. It will use
|
||||||
|
// go/{importer,types} to track down all the RR struct types. Then for each type
|
||||||
|
// it will generate conversion tables (TypeToRR and TypeToString) and banal
|
||||||
|
// methods (len, Header, copy) based on the struct tags. The generated source is
|
||||||
|
// written to ztypes.go, and is meant to be checked into git.
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"go/format"
|
||||||
|
"go/importer"
|
||||||
|
"go/types"
|
||||||
|
"log"
|
||||||
|
"os"
|
||||||
|
"strings"
|
||||||
|
"text/template"
|
||||||
|
)
|
||||||
|
|
||||||
|
var skipLen = map[string]struct{}{
|
||||||
|
"NSEC": {},
|
||||||
|
"NSEC3": {},
|
||||||
|
"OPT": {},
|
||||||
|
}
|
||||||
|
|
||||||
|
var packageHdr = `
|
||||||
|
// *** DO NOT MODIFY ***
|
||||||
|
// AUTOGENERATED BY go generate from type_generate.go
|
||||||
|
|
||||||
|
package dns
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/base64"
|
||||||
|
"net"
|
||||||
|
)
|
||||||
|
|
||||||
|
`
|
||||||
|
|
||||||
|
var TypeToRR = template.Must(template.New("TypeToRR").Parse(`
|
||||||
|
// TypeToRR is a map of constructors for each RR type.
|
||||||
|
var TypeToRR = map[uint16]func() RR{
|
||||||
|
{{range .}}{{if ne . "RFC3597"}} Type{{.}}: func() RR { return new({{.}}) },
|
||||||
|
{{end}}{{end}} }
|
||||||
|
|
||||||
|
`))
|
||||||
|
|
||||||
|
var typeToString = template.Must(template.New("typeToString").Parse(`
|
||||||
|
// TypeToString is a map of strings for each RR type.
|
||||||
|
var TypeToString = map[uint16]string{
|
||||||
|
{{range .}}{{if ne . "NSAPPTR"}} Type{{.}}: "{{.}}",
|
||||||
|
{{end}}{{end}} TypeNSAPPTR: "NSAP-PTR",
|
||||||
|
}
|
||||||
|
|
||||||
|
`))
|
||||||
|
|
||||||
|
var headerFunc = template.Must(template.New("headerFunc").Parse(`
|
||||||
|
// Header() functions
|
||||||
|
{{range .}} func (rr *{{.}}) Header() *RR_Header { return &rr.Hdr }
|
||||||
|
{{end}}
|
||||||
|
|
||||||
|
`))
|
||||||
|
|
||||||
|
// getTypeStruct will take a type and the package scope, and return the
|
||||||
|
// (innermost) struct if the type is considered a RR type (currently defined as
|
||||||
|
// those structs beginning with a RR_Header, could be redefined as implementing
|
||||||
|
// the RR interface). The bool return value indicates if embedded structs were
|
||||||
|
// resolved.
|
||||||
|
func getTypeStruct(t types.Type, scope *types.Scope) (*types.Struct, bool) {
|
||||||
|
st, ok := t.Underlying().(*types.Struct)
|
||||||
|
if !ok {
|
||||||
|
return nil, false
|
||||||
|
}
|
||||||
|
if st.Field(0).Type() == scope.Lookup("RR_Header").Type() {
|
||||||
|
return st, false
|
||||||
|
}
|
||||||
|
if st.Field(0).Anonymous() {
|
||||||
|
st, _ := getTypeStruct(st.Field(0).Type(), scope)
|
||||||
|
return st, true
|
||||||
|
}
|
||||||
|
return nil, false
|
||||||
|
}
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
// Import and type-check the package
|
||||||
|
pkg, err := importer.Default().Import("github.com/miekg/dns")
|
||||||
|
fatalIfErr(err)
|
||||||
|
scope := pkg.Scope()
|
||||||
|
|
||||||
|
// Collect constants like TypeX
|
||||||
|
var numberedTypes []string
|
||||||
|
for _, name := range scope.Names() {
|
||||||
|
o := scope.Lookup(name)
|
||||||
|
if o == nil || !o.Exported() {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
b, ok := o.Type().(*types.Basic)
|
||||||
|
if !ok || b.Kind() != types.Uint16 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if !strings.HasPrefix(o.Name(), "Type") {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
name := strings.TrimPrefix(o.Name(), "Type")
|
||||||
|
if name == "PrivateRR" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
numberedTypes = append(numberedTypes, name)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Collect actual types (*X)
|
||||||
|
var namedTypes []string
|
||||||
|
for _, name := range scope.Names() {
|
||||||
|
o := scope.Lookup(name)
|
||||||
|
if o == nil || !o.Exported() {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if st, _ := getTypeStruct(o.Type(), scope); st == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if name == "PrivateRR" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if corresponding TypeX exists
|
||||||
|
if scope.Lookup("Type"+o.Name()) == nil && o.Name() != "RFC3597" {
|
||||||
|
log.Fatalf("Constant Type%s does not exist.", o.Name())
|
||||||
|
}
|
||||||
|
|
||||||
|
namedTypes = append(namedTypes, o.Name())
|
||||||
|
}
|
||||||
|
|
||||||
|
b := &bytes.Buffer{}
|
||||||
|
b.WriteString(packageHdr)
|
||||||
|
|
||||||
|
// Generate TypeToRR
|
||||||
|
fatalIfErr(TypeToRR.Execute(b, namedTypes))
|
||||||
|
|
||||||
|
// Generate typeToString
|
||||||
|
fatalIfErr(typeToString.Execute(b, numberedTypes))
|
||||||
|
|
||||||
|
// Generate headerFunc
|
||||||
|
fatalIfErr(headerFunc.Execute(b, namedTypes))
|
||||||
|
|
||||||
|
// Generate len()
|
||||||
|
fmt.Fprint(b, "// len() functions\n")
|
||||||
|
for _, name := range namedTypes {
|
||||||
|
if _, ok := skipLen[name]; ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
o := scope.Lookup(name)
|
||||||
|
st, isEmbedded := getTypeStruct(o.Type(), scope)
|
||||||
|
if isEmbedded {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
fmt.Fprintf(b, "func (rr *%s) len() int {\n", name)
|
||||||
|
fmt.Fprintf(b, "l := rr.Hdr.len()\n")
|
||||||
|
for i := 1; i < st.NumFields(); i++ {
|
||||||
|
o := func(s string) { fmt.Fprintf(b, s, st.Field(i).Name()) }
|
||||||
|
|
||||||
|
if _, ok := st.Field(i).Type().(*types.Slice); ok {
|
||||||
|
switch st.Tag(i) {
|
||||||
|
case `dns:"-"`:
|
||||||
|
// ignored
|
||||||
|
case `dns:"cdomain-name"`, `dns:"domain-name"`, `dns:"txt"`:
|
||||||
|
o("for _, x := range rr.%s { l += len(x) + 1 }\n")
|
||||||
|
default:
|
||||||
|
log.Fatalln(name, st.Field(i).Name(), st.Tag(i))
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
switch {
|
||||||
|
case st.Tag(i) == `dns:"-"`:
|
||||||
|
// ignored
|
||||||
|
case st.Tag(i) == `dns:"cdomain-name"`, st.Tag(i) == `dns:"domain-name"`:
|
||||||
|
o("l += len(rr.%s) + 1\n")
|
||||||
|
case st.Tag(i) == `dns:"octet"`:
|
||||||
|
o("l += len(rr.%s)\n")
|
||||||
|
case strings.HasPrefix(st.Tag(i), `dns:"size-base64`):
|
||||||
|
fallthrough
|
||||||
|
case st.Tag(i) == `dns:"base64"`:
|
||||||
|
o("l += base64.StdEncoding.DecodedLen(len(rr.%s))\n")
|
||||||
|
case strings.HasPrefix(st.Tag(i), `dns:"size-hex`):
|
||||||
|
fallthrough
|
||||||
|
case st.Tag(i) == `dns:"hex"`:
|
||||||
|
o("l += len(rr.%s)/2 + 1\n")
|
||||||
|
case st.Tag(i) == `dns:"a"`:
|
||||||
|
o("l += net.IPv4len // %s\n")
|
||||||
|
case st.Tag(i) == `dns:"aaaa"`:
|
||||||
|
o("l += net.IPv6len // %s\n")
|
||||||
|
case st.Tag(i) == `dns:"txt"`:
|
||||||
|
o("for _, t := range rr.%s { l += len(t) + 1 }\n")
|
||||||
|
case st.Tag(i) == `dns:"uint48"`:
|
||||||
|
o("l += 6 // %s\n")
|
||||||
|
case st.Tag(i) == "":
|
||||||
|
switch st.Field(i).Type().(*types.Basic).Kind() {
|
||||||
|
case types.Uint8:
|
||||||
|
o("l += 1 // %s\n")
|
||||||
|
case types.Uint16:
|
||||||
|
o("l += 2 // %s\n")
|
||||||
|
case types.Uint32:
|
||||||
|
o("l += 4 // %s\n")
|
||||||
|
case types.Uint64:
|
||||||
|
o("l += 8 // %s\n")
|
||||||
|
case types.String:
|
||||||
|
o("l += len(rr.%s) + 1\n")
|
||||||
|
default:
|
||||||
|
log.Fatalln(name, st.Field(i).Name())
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
log.Fatalln(name, st.Field(i).Name(), st.Tag(i))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
fmt.Fprintf(b, "return l }\n")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Generate copy()
|
||||||
|
fmt.Fprint(b, "// copy() functions\n")
|
||||||
|
for _, name := range namedTypes {
|
||||||
|
o := scope.Lookup(name)
|
||||||
|
st, isEmbedded := getTypeStruct(o.Type(), scope)
|
||||||
|
if isEmbedded {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
fmt.Fprintf(b, "func (rr *%s) copy() RR {\n", name)
|
||||||
|
fields := []string{"*rr.Hdr.copyHeader()"}
|
||||||
|
for i := 1; i < st.NumFields(); i++ {
|
||||||
|
f := st.Field(i).Name()
|
||||||
|
if sl, ok := st.Field(i).Type().(*types.Slice); ok {
|
||||||
|
t := sl.Underlying().String()
|
||||||
|
t = strings.TrimPrefix(t, "[]")
|
||||||
|
if strings.Contains(t, ".") {
|
||||||
|
splits := strings.Split(t, ".")
|
||||||
|
t = splits[len(splits)-1]
|
||||||
|
}
|
||||||
|
fmt.Fprintf(b, "%s := make([]%s, len(rr.%s)); copy(%s, rr.%s)\n",
|
||||||
|
f, t, f, f, f)
|
||||||
|
fields = append(fields, f)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if st.Field(i).Type().String() == "net.IP" {
|
||||||
|
fields = append(fields, "copyIP(rr."+f+")")
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
fields = append(fields, "rr."+f)
|
||||||
|
}
|
||||||
|
fmt.Fprintf(b, "return &%s{%s}\n", name, strings.Join(fields, ","))
|
||||||
|
fmt.Fprintf(b, "}\n")
|
||||||
|
}
|
||||||
|
|
||||||
|
// gofmt
|
||||||
|
res, err := format.Source(b.Bytes())
|
||||||
|
if err != nil {
|
||||||
|
b.WriteTo(os.Stderr)
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// write result
|
||||||
|
f, err := os.Create("ztypes.go")
|
||||||
|
fatalIfErr(err)
|
||||||
|
defer f.Close()
|
||||||
|
f.Write(res)
|
||||||
|
}
|
||||||
|
|
||||||
|
func fatalIfErr(err error) {
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
}
|
2
vendor/github.com/miekg/dns/udp.go
generated
vendored
2
vendor/github.com/miekg/dns/udp.go
generated
vendored
|
@ -1,4 +1,4 @@
|
||||||
// +build !windows
|
// +build !windows,!plan9
|
||||||
|
|
||||||
package dns
|
package dns
|
||||||
|
|
||||||
|
|
10
vendor/github.com/miekg/dns/udp_linux.go
generated
vendored
10
vendor/github.com/miekg/dns/udp_linux.go
generated
vendored
|
@ -24,6 +24,12 @@ func setUDPSocketOptions4(conn *net.UDPConn) error {
|
||||||
if err := syscall.SetsockoptInt(int(file.Fd()), syscall.IPPROTO_IP, syscall.IP_PKTINFO, 1); err != nil {
|
if err := syscall.SetsockoptInt(int(file.Fd()), syscall.IPPROTO_IP, syscall.IP_PKTINFO, 1); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
// Calling File() above results in the connection becoming blocking, we must fix that.
|
||||||
|
// See https://github.com/miekg/dns/issues/279
|
||||||
|
err = syscall.SetNonblock(int(file.Fd()), true)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -36,6 +42,10 @@ func setUDPSocketOptions6(conn *net.UDPConn) error {
|
||||||
if err := syscall.SetsockoptInt(int(file.Fd()), syscall.IPPROTO_IPV6, syscall.IPV6_RECVPKTINFO, 1); err != nil {
|
if err := syscall.SetsockoptInt(int(file.Fd()), syscall.IPPROTO_IPV6, syscall.IPV6_RECVPKTINFO, 1); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
err = syscall.SetNonblock(int(file.Fd()), true)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
2
vendor/github.com/miekg/dns/udp_other.go
generated
vendored
2
vendor/github.com/miekg/dns/udp_other.go
generated
vendored
|
@ -1,4 +1,4 @@
|
||||||
// +build !linux
|
// +build !linux,!plan9
|
||||||
|
|
||||||
package dns
|
package dns
|
||||||
|
|
||||||
|
|
34
vendor/github.com/miekg/dns/udp_plan9.go
generated
vendored
Normal file
34
vendor/github.com/miekg/dns/udp_plan9.go
generated
vendored
Normal file
|
@ -0,0 +1,34 @@
|
||||||
|
package dns
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net"
|
||||||
|
)
|
||||||
|
|
||||||
|
func setUDPSocketOptions(conn *net.UDPConn) error { return nil }
|
||||||
|
|
||||||
|
// SessionUDP holds the remote address and the associated
|
||||||
|
// out-of-band data.
|
||||||
|
type SessionUDP struct {
|
||||||
|
raddr *net.UDPAddr
|
||||||
|
context []byte
|
||||||
|
}
|
||||||
|
|
||||||
|
// RemoteAddr returns the remote network address.
|
||||||
|
func (s *SessionUDP) RemoteAddr() net.Addr { return s.raddr }
|
||||||
|
|
||||||
|
// ReadFromSessionUDP acts just like net.UDPConn.ReadFrom(), but returns a session object instead of a
|
||||||
|
// net.UDPAddr.
|
||||||
|
func ReadFromSessionUDP(conn *net.UDPConn, b []byte) (int, *SessionUDP, error) {
|
||||||
|
oob := make([]byte, 40)
|
||||||
|
n, oobn, _, raddr, err := conn.ReadMsgUDP(b, oob)
|
||||||
|
if err != nil {
|
||||||
|
return n, nil, err
|
||||||
|
}
|
||||||
|
return n, &SessionUDP{raddr, oob[:oobn]}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// WriteToSessionUDP acts just like net.UDPConn.WritetTo(), but uses a *SessionUDP instead of a net.Addr.
|
||||||
|
func WriteToSessionUDP(conn *net.UDPConn, b []byte, session *SessionUDP) (int, error) {
|
||||||
|
n, _, err := conn.WriteMsgUDP(b, session.context, session.raddr)
|
||||||
|
return n, err
|
||||||
|
}
|
88
vendor/github.com/miekg/dns/update.go
generated
vendored
88
vendor/github.com/miekg/dns/update.go
generated
vendored
|
@ -3,18 +3,22 @@ package dns
|
||||||
// NameUsed sets the RRs in the prereq section to
|
// NameUsed sets the RRs in the prereq section to
|
||||||
// "Name is in use" RRs. RFC 2136 section 2.4.4.
|
// "Name is in use" RRs. RFC 2136 section 2.4.4.
|
||||||
func (u *Msg) NameUsed(rr []RR) {
|
func (u *Msg) NameUsed(rr []RR) {
|
||||||
u.Answer = make([]RR, len(rr))
|
if u.Answer == nil {
|
||||||
for i, r := range rr {
|
u.Answer = make([]RR, 0, len(rr))
|
||||||
u.Answer[i] = &ANY{Hdr: RR_Header{Name: r.Header().Name, Ttl: 0, Rrtype: TypeANY, Class: ClassANY}}
|
}
|
||||||
|
for _, r := range rr {
|
||||||
|
u.Answer = append(u.Answer, &ANY{Hdr: RR_Header{Name: r.Header().Name, Ttl: 0, Rrtype: TypeANY, Class: ClassANY}})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// NameNotUsed sets the RRs in the prereq section to
|
// NameNotUsed sets the RRs in the prereq section to
|
||||||
// "Name is in not use" RRs. RFC 2136 section 2.4.5.
|
// "Name is in not use" RRs. RFC 2136 section 2.4.5.
|
||||||
func (u *Msg) NameNotUsed(rr []RR) {
|
func (u *Msg) NameNotUsed(rr []RR) {
|
||||||
u.Answer = make([]RR, len(rr))
|
if u.Answer == nil {
|
||||||
for i, r := range rr {
|
u.Answer = make([]RR, 0, len(rr))
|
||||||
u.Answer[i] = &ANY{Hdr: RR_Header{Name: r.Header().Name, Ttl: 0, Rrtype: TypeANY, Class: ClassNONE}}
|
}
|
||||||
|
for _, r := range rr {
|
||||||
|
u.Answer = append(u.Answer, &ANY{Hdr: RR_Header{Name: r.Header().Name, Ttl: 0, Rrtype: TypeANY, Class: ClassNONE}})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -24,34 +28,34 @@ func (u *Msg) Used(rr []RR) {
|
||||||
if len(u.Question) == 0 {
|
if len(u.Question) == 0 {
|
||||||
panic("dns: empty question section")
|
panic("dns: empty question section")
|
||||||
}
|
}
|
||||||
u.Answer = make([]RR, len(rr))
|
if u.Answer == nil {
|
||||||
for i, r := range rr {
|
u.Answer = make([]RR, 0, len(rr))
|
||||||
u.Answer[i] = r
|
}
|
||||||
u.Answer[i].Header().Class = u.Question[0].Qclass
|
for _, r := range rr {
|
||||||
|
r.Header().Class = u.Question[0].Qclass
|
||||||
|
u.Answer = append(u.Answer, r)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// RRsetUsed sets the RRs in the prereq section to
|
// RRsetUsed sets the RRs in the prereq section to
|
||||||
// "RRset exists (value independent -- no rdata)" RRs. RFC 2136 section 2.4.1.
|
// "RRset exists (value independent -- no rdata)" RRs. RFC 2136 section 2.4.1.
|
||||||
func (u *Msg) RRsetUsed(rr []RR) {
|
func (u *Msg) RRsetUsed(rr []RR) {
|
||||||
u.Answer = make([]RR, len(rr))
|
if u.Answer == nil {
|
||||||
for i, r := range rr {
|
u.Answer = make([]RR, 0, len(rr))
|
||||||
u.Answer[i] = r
|
}
|
||||||
u.Answer[i].Header().Class = ClassANY
|
for _, r := range rr {
|
||||||
u.Answer[i].Header().Ttl = 0
|
u.Answer = append(u.Answer, &ANY{Hdr: RR_Header{Name: r.Header().Name, Ttl: 0, Rrtype: r.Header().Rrtype, Class: ClassANY}})
|
||||||
u.Answer[i].Header().Rdlength = 0
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// RRsetNotUsed sets the RRs in the prereq section to
|
// RRsetNotUsed sets the RRs in the prereq section to
|
||||||
// "RRset does not exist" RRs. RFC 2136 section 2.4.3.
|
// "RRset does not exist" RRs. RFC 2136 section 2.4.3.
|
||||||
func (u *Msg) RRsetNotUsed(rr []RR) {
|
func (u *Msg) RRsetNotUsed(rr []RR) {
|
||||||
u.Answer = make([]RR, len(rr))
|
if u.Answer == nil {
|
||||||
for i, r := range rr {
|
u.Answer = make([]RR, 0, len(rr))
|
||||||
u.Answer[i] = r
|
}
|
||||||
u.Answer[i].Header().Class = ClassNONE
|
for _, r := range rr {
|
||||||
u.Answer[i].Header().Rdlength = 0
|
u.Answer = append(u.Answer, &ANY{Hdr: RR_Header{Name: r.Header().Name, Ttl: 0, Rrtype: r.Header().Rrtype, Class: ClassNONE}})
|
||||||
u.Answer[i].Header().Ttl = 0
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -60,35 +64,43 @@ func (u *Msg) Insert(rr []RR) {
|
||||||
if len(u.Question) == 0 {
|
if len(u.Question) == 0 {
|
||||||
panic("dns: empty question section")
|
panic("dns: empty question section")
|
||||||
}
|
}
|
||||||
u.Ns = make([]RR, len(rr))
|
if u.Ns == nil {
|
||||||
for i, r := range rr {
|
u.Ns = make([]RR, 0, len(rr))
|
||||||
u.Ns[i] = r
|
}
|
||||||
u.Ns[i].Header().Class = u.Question[0].Qclass
|
for _, r := range rr {
|
||||||
|
r.Header().Class = u.Question[0].Qclass
|
||||||
|
u.Ns = append(u.Ns, r)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// RemoveRRset creates a dynamic update packet that deletes an RRset, see RFC 2136 section 2.5.2.
|
// RemoveRRset creates a dynamic update packet that deletes an RRset, see RFC 2136 section 2.5.2.
|
||||||
func (u *Msg) RemoveRRset(rr []RR) {
|
func (u *Msg) RemoveRRset(rr []RR) {
|
||||||
u.Ns = make([]RR, len(rr))
|
if u.Ns == nil {
|
||||||
for i, r := range rr {
|
u.Ns = make([]RR, 0, len(rr))
|
||||||
u.Ns[i] = &ANY{Hdr: RR_Header{Name: r.Header().Name, Ttl: 0, Rrtype: r.Header().Rrtype, Class: ClassANY}}
|
}
|
||||||
|
for _, r := range rr {
|
||||||
|
u.Ns = append(u.Ns, &ANY{Hdr: RR_Header{Name: r.Header().Name, Ttl: 0, Rrtype: r.Header().Rrtype, Class: ClassANY}})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// RemoveName creates a dynamic update packet that deletes all RRsets of a name, see RFC 2136 section 2.5.3
|
// RemoveName creates a dynamic update packet that deletes all RRsets of a name, see RFC 2136 section 2.5.3
|
||||||
func (u *Msg) RemoveName(rr []RR) {
|
func (u *Msg) RemoveName(rr []RR) {
|
||||||
u.Ns = make([]RR, len(rr))
|
if u.Ns == nil {
|
||||||
for i, r := range rr {
|
u.Ns = make([]RR, 0, len(rr))
|
||||||
u.Ns[i] = &ANY{Hdr: RR_Header{Name: r.Header().Name, Ttl: 0, Rrtype: TypeANY, Class: ClassANY}}
|
}
|
||||||
|
for _, r := range rr {
|
||||||
|
u.Ns = append(u.Ns, &ANY{Hdr: RR_Header{Name: r.Header().Name, Ttl: 0, Rrtype: TypeANY, Class: ClassANY}})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Remove creates a dynamic update packet deletes RR from the RRSset, see RFC 2136 section 2.5.4
|
// Remove creates a dynamic update packet deletes RR from a RRSset, see RFC 2136 section 2.5.4
|
||||||
func (u *Msg) Remove(rr []RR) {
|
func (u *Msg) Remove(rr []RR) {
|
||||||
u.Ns = make([]RR, len(rr))
|
if u.Ns == nil {
|
||||||
for i, r := range rr {
|
u.Ns = make([]RR, 0, len(rr))
|
||||||
u.Ns[i] = r
|
}
|
||||||
u.Ns[i].Header().Class = ClassNONE
|
for _, r := range rr {
|
||||||
u.Ns[i].Header().Ttl = 0
|
r.Header().Class = ClassNONE
|
||||||
|
r.Header().Ttl = 0
|
||||||
|
u.Ns = append(u.Ns, r)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
16
vendor/github.com/miekg/dns/xfr.go
generated
vendored
16
vendor/github.com/miekg/dns/xfr.go
generated
vendored
|
@ -23,15 +23,27 @@ type Transfer struct {
|
||||||
// Think we need to away to stop the transfer
|
// Think we need to away to stop the transfer
|
||||||
|
|
||||||
// In performs an incoming transfer with the server in a.
|
// In performs an incoming transfer with the server in a.
|
||||||
|
// If you would like to set the source IP, or some other attribute
|
||||||
|
// of a Dialer for a Transfer, you can do so by specifying the attributes
|
||||||
|
// in the Transfer.Conn:
|
||||||
|
//
|
||||||
|
// d := net.Dialer{LocalAddr: transfer_source}
|
||||||
|
// con, err := d.Dial("tcp", master)
|
||||||
|
// dnscon := &dns.Conn{Conn:con}
|
||||||
|
// transfer = &dns.Transfer{Conn: dnscon}
|
||||||
|
// channel, err := transfer.In(message, master)
|
||||||
|
//
|
||||||
func (t *Transfer) In(q *Msg, a string) (env chan *Envelope, err error) {
|
func (t *Transfer) In(q *Msg, a string) (env chan *Envelope, err error) {
|
||||||
timeout := dnsTimeout
|
timeout := dnsTimeout
|
||||||
if t.DialTimeout != 0 {
|
if t.DialTimeout != 0 {
|
||||||
timeout = t.DialTimeout
|
timeout = t.DialTimeout
|
||||||
}
|
}
|
||||||
|
if t.Conn == nil {
|
||||||
t.Conn, err = DialTimeout("tcp", a, timeout)
|
t.Conn, err = DialTimeout("tcp", a, timeout)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
}
|
||||||
if err := t.WriteMsg(q); err != nil {
|
if err := t.WriteMsg(q); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -150,8 +162,8 @@ func (t *Transfer) inIxfr(id uint16, c chan *Envelope) {
|
||||||
//
|
//
|
||||||
// ch := make(chan *dns.Envelope)
|
// ch := make(chan *dns.Envelope)
|
||||||
// tr := new(dns.Transfer)
|
// tr := new(dns.Transfer)
|
||||||
// tr.Out(w, r, ch)
|
// go tr.Out(w, r, ch)
|
||||||
// c <- &dns.Envelope{RR: []dns.RR{soa, rr1, rr2, rr3, soa}}
|
// ch <- &dns.Envelope{RR: []dns.RR{soa, rr1, rr2, rr3, soa}}
|
||||||
// close(ch)
|
// close(ch)
|
||||||
// w.Hijack()
|
// w.Hijack()
|
||||||
// // w.Close() // Client closes connection
|
// // w.Close() // Client closes connection
|
||||||
|
|
3529
vendor/github.com/miekg/dns/zmsg.go
generated
vendored
Normal file
3529
vendor/github.com/miekg/dns/zmsg.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
842
vendor/github.com/miekg/dns/ztypes.go
generated
vendored
Normal file
842
vendor/github.com/miekg/dns/ztypes.go
generated
vendored
Normal file
|
@ -0,0 +1,842 @@
|
||||||
|
// *** DO NOT MODIFY ***
|
||||||
|
// AUTOGENERATED BY go generate from type_generate.go
|
||||||
|
|
||||||
|
package dns
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/base64"
|
||||||
|
"net"
|
||||||
|
)
|
||||||
|
|
||||||
|
// TypeToRR is a map of constructors for each RR type.
|
||||||
|
var TypeToRR = map[uint16]func() RR{
|
||||||
|
TypeA: func() RR { return new(A) },
|
||||||
|
TypeAAAA: func() RR { return new(AAAA) },
|
||||||
|
TypeAFSDB: func() RR { return new(AFSDB) },
|
||||||
|
TypeANY: func() RR { return new(ANY) },
|
||||||
|
TypeCAA: func() RR { return new(CAA) },
|
||||||
|
TypeCDNSKEY: func() RR { return new(CDNSKEY) },
|
||||||
|
TypeCDS: func() RR { return new(CDS) },
|
||||||
|
TypeCERT: func() RR { return new(CERT) },
|
||||||
|
TypeCNAME: func() RR { return new(CNAME) },
|
||||||
|
TypeDHCID: func() RR { return new(DHCID) },
|
||||||
|
TypeDLV: func() RR { return new(DLV) },
|
||||||
|
TypeDNAME: func() RR { return new(DNAME) },
|
||||||
|
TypeDNSKEY: func() RR { return new(DNSKEY) },
|
||||||
|
TypeDS: func() RR { return new(DS) },
|
||||||
|
TypeEID: func() RR { return new(EID) },
|
||||||
|
TypeEUI48: func() RR { return new(EUI48) },
|
||||||
|
TypeEUI64: func() RR { return new(EUI64) },
|
||||||
|
TypeGID: func() RR { return new(GID) },
|
||||||
|
TypeGPOS: func() RR { return new(GPOS) },
|
||||||
|
TypeHINFO: func() RR { return new(HINFO) },
|
||||||
|
TypeHIP: func() RR { return new(HIP) },
|
||||||
|
TypeKEY: func() RR { return new(KEY) },
|
||||||
|
TypeKX: func() RR { return new(KX) },
|
||||||
|
TypeL32: func() RR { return new(L32) },
|
||||||
|
TypeL64: func() RR { return new(L64) },
|
||||||
|
TypeLOC: func() RR { return new(LOC) },
|
||||||
|
TypeLP: func() RR { return new(LP) },
|
||||||
|
TypeMB: func() RR { return new(MB) },
|
||||||
|
TypeMD: func() RR { return new(MD) },
|
||||||
|
TypeMF: func() RR { return new(MF) },
|
||||||
|
TypeMG: func() RR { return new(MG) },
|
||||||
|
TypeMINFO: func() RR { return new(MINFO) },
|
||||||
|
TypeMR: func() RR { return new(MR) },
|
||||||
|
TypeMX: func() RR { return new(MX) },
|
||||||
|
TypeNAPTR: func() RR { return new(NAPTR) },
|
||||||
|
TypeNID: func() RR { return new(NID) },
|
||||||
|
TypeNIMLOC: func() RR { return new(NIMLOC) },
|
||||||
|
TypeNINFO: func() RR { return new(NINFO) },
|
||||||
|
TypeNS: func() RR { return new(NS) },
|
||||||
|
TypeNSAPPTR: func() RR { return new(NSAPPTR) },
|
||||||
|
TypeNSEC: func() RR { return new(NSEC) },
|
||||||
|
TypeNSEC3: func() RR { return new(NSEC3) },
|
||||||
|
TypeNSEC3PARAM: func() RR { return new(NSEC3PARAM) },
|
||||||
|
TypeOPENPGPKEY: func() RR { return new(OPENPGPKEY) },
|
||||||
|
TypeOPT: func() RR { return new(OPT) },
|
||||||
|
TypePTR: func() RR { return new(PTR) },
|
||||||
|
TypePX: func() RR { return new(PX) },
|
||||||
|
TypeRKEY: func() RR { return new(RKEY) },
|
||||||
|
TypeRP: func() RR { return new(RP) },
|
||||||
|
TypeRRSIG: func() RR { return new(RRSIG) },
|
||||||
|
TypeRT: func() RR { return new(RT) },
|
||||||
|
TypeSIG: func() RR { return new(SIG) },
|
||||||
|
TypeSMIMEA: func() RR { return new(SMIMEA) },
|
||||||
|
TypeSOA: func() RR { return new(SOA) },
|
||||||
|
TypeSPF: func() RR { return new(SPF) },
|
||||||
|
TypeSRV: func() RR { return new(SRV) },
|
||||||
|
TypeSSHFP: func() RR { return new(SSHFP) },
|
||||||
|
TypeTA: func() RR { return new(TA) },
|
||||||
|
TypeTALINK: func() RR { return new(TALINK) },
|
||||||
|
TypeTKEY: func() RR { return new(TKEY) },
|
||||||
|
TypeTLSA: func() RR { return new(TLSA) },
|
||||||
|
TypeTSIG: func() RR { return new(TSIG) },
|
||||||
|
TypeTXT: func() RR { return new(TXT) },
|
||||||
|
TypeUID: func() RR { return new(UID) },
|
||||||
|
TypeUINFO: func() RR { return new(UINFO) },
|
||||||
|
TypeURI: func() RR { return new(URI) },
|
||||||
|
TypeX25: func() RR { return new(X25) },
|
||||||
|
}
|
||||||
|
|
||||||
|
// TypeToString is a map of strings for each RR type.
|
||||||
|
var TypeToString = map[uint16]string{
|
||||||
|
TypeA: "A",
|
||||||
|
TypeAAAA: "AAAA",
|
||||||
|
TypeAFSDB: "AFSDB",
|
||||||
|
TypeANY: "ANY",
|
||||||
|
TypeATMA: "ATMA",
|
||||||
|
TypeAXFR: "AXFR",
|
||||||
|
TypeCAA: "CAA",
|
||||||
|
TypeCDNSKEY: "CDNSKEY",
|
||||||
|
TypeCDS: "CDS",
|
||||||
|
TypeCERT: "CERT",
|
||||||
|
TypeCNAME: "CNAME",
|
||||||
|
TypeDHCID: "DHCID",
|
||||||
|
TypeDLV: "DLV",
|
||||||
|
TypeDNAME: "DNAME",
|
||||||
|
TypeDNSKEY: "DNSKEY",
|
||||||
|
TypeDS: "DS",
|
||||||
|
TypeEID: "EID",
|
||||||
|
TypeEUI48: "EUI48",
|
||||||
|
TypeEUI64: "EUI64",
|
||||||
|
TypeGID: "GID",
|
||||||
|
TypeGPOS: "GPOS",
|
||||||
|
TypeHINFO: "HINFO",
|
||||||
|
TypeHIP: "HIP",
|
||||||
|
TypeISDN: "ISDN",
|
||||||
|
TypeIXFR: "IXFR",
|
||||||
|
TypeKEY: "KEY",
|
||||||
|
TypeKX: "KX",
|
||||||
|
TypeL32: "L32",
|
||||||
|
TypeL64: "L64",
|
||||||
|
TypeLOC: "LOC",
|
||||||
|
TypeLP: "LP",
|
||||||
|
TypeMAILA: "MAILA",
|
||||||
|
TypeMAILB: "MAILB",
|
||||||
|
TypeMB: "MB",
|
||||||
|
TypeMD: "MD",
|
||||||
|
TypeMF: "MF",
|
||||||
|
TypeMG: "MG",
|
||||||
|
TypeMINFO: "MINFO",
|
||||||
|
TypeMR: "MR",
|
||||||
|
TypeMX: "MX",
|
||||||
|
TypeNAPTR: "NAPTR",
|
||||||
|
TypeNID: "NID",
|
||||||
|
TypeNIMLOC: "NIMLOC",
|
||||||
|
TypeNINFO: "NINFO",
|
||||||
|
TypeNS: "NS",
|
||||||
|
TypeNSEC: "NSEC",
|
||||||
|
TypeNSEC3: "NSEC3",
|
||||||
|
TypeNSEC3PARAM: "NSEC3PARAM",
|
||||||
|
TypeNULL: "NULL",
|
||||||
|
TypeNXT: "NXT",
|
||||||
|
TypeNone: "None",
|
||||||
|
TypeOPENPGPKEY: "OPENPGPKEY",
|
||||||
|
TypeOPT: "OPT",
|
||||||
|
TypePTR: "PTR",
|
||||||
|
TypePX: "PX",
|
||||||
|
TypeRKEY: "RKEY",
|
||||||
|
TypeRP: "RP",
|
||||||
|
TypeRRSIG: "RRSIG",
|
||||||
|
TypeRT: "RT",
|
||||||
|
TypeReserved: "Reserved",
|
||||||
|
TypeSIG: "SIG",
|
||||||
|
TypeSMIMEA: "SMIMEA",
|
||||||
|
TypeSOA: "SOA",
|
||||||
|
TypeSPF: "SPF",
|
||||||
|
TypeSRV: "SRV",
|
||||||
|
TypeSSHFP: "SSHFP",
|
||||||
|
TypeTA: "TA",
|
||||||
|
TypeTALINK: "TALINK",
|
||||||
|
TypeTKEY: "TKEY",
|
||||||
|
TypeTLSA: "TLSA",
|
||||||
|
TypeTSIG: "TSIG",
|
||||||
|
TypeTXT: "TXT",
|
||||||
|
TypeUID: "UID",
|
||||||
|
TypeUINFO: "UINFO",
|
||||||
|
TypeUNSPEC: "UNSPEC",
|
||||||
|
TypeURI: "URI",
|
||||||
|
TypeX25: "X25",
|
||||||
|
TypeNSAPPTR: "NSAP-PTR",
|
||||||
|
}
|
||||||
|
|
||||||
|
// Header() functions
|
||||||
|
func (rr *A) Header() *RR_Header { return &rr.Hdr }
|
||||||
|
func (rr *AAAA) Header() *RR_Header { return &rr.Hdr }
|
||||||
|
func (rr *AFSDB) Header() *RR_Header { return &rr.Hdr }
|
||||||
|
func (rr *ANY) Header() *RR_Header { return &rr.Hdr }
|
||||||
|
func (rr *CAA) Header() *RR_Header { return &rr.Hdr }
|
||||||
|
func (rr *CDNSKEY) Header() *RR_Header { return &rr.Hdr }
|
||||||
|
func (rr *CDS) Header() *RR_Header { return &rr.Hdr }
|
||||||
|
func (rr *CERT) Header() *RR_Header { return &rr.Hdr }
|
||||||
|
func (rr *CNAME) Header() *RR_Header { return &rr.Hdr }
|
||||||
|
func (rr *DHCID) Header() *RR_Header { return &rr.Hdr }
|
||||||
|
func (rr *DLV) Header() *RR_Header { return &rr.Hdr }
|
||||||
|
func (rr *DNAME) Header() *RR_Header { return &rr.Hdr }
|
||||||
|
func (rr *DNSKEY) Header() *RR_Header { return &rr.Hdr }
|
||||||
|
func (rr *DS) Header() *RR_Header { return &rr.Hdr }
|
||||||
|
func (rr *EID) Header() *RR_Header { return &rr.Hdr }
|
||||||
|
func (rr *EUI48) Header() *RR_Header { return &rr.Hdr }
|
||||||
|
func (rr *EUI64) Header() *RR_Header { return &rr.Hdr }
|
||||||
|
func (rr *GID) Header() *RR_Header { return &rr.Hdr }
|
||||||
|
func (rr *GPOS) Header() *RR_Header { return &rr.Hdr }
|
||||||
|
func (rr *HINFO) Header() *RR_Header { return &rr.Hdr }
|
||||||
|
func (rr *HIP) Header() *RR_Header { return &rr.Hdr }
|
||||||
|
func (rr *KEY) Header() *RR_Header { return &rr.Hdr }
|
||||||
|
func (rr *KX) Header() *RR_Header { return &rr.Hdr }
|
||||||
|
func (rr *L32) Header() *RR_Header { return &rr.Hdr }
|
||||||
|
func (rr *L64) Header() *RR_Header { return &rr.Hdr }
|
||||||
|
func (rr *LOC) Header() *RR_Header { return &rr.Hdr }
|
||||||
|
func (rr *LP) Header() *RR_Header { return &rr.Hdr }
|
||||||
|
func (rr *MB) Header() *RR_Header { return &rr.Hdr }
|
||||||
|
func (rr *MD) Header() *RR_Header { return &rr.Hdr }
|
||||||
|
func (rr *MF) Header() *RR_Header { return &rr.Hdr }
|
||||||
|
func (rr *MG) Header() *RR_Header { return &rr.Hdr }
|
||||||
|
func (rr *MINFO) Header() *RR_Header { return &rr.Hdr }
|
||||||
|
func (rr *MR) Header() *RR_Header { return &rr.Hdr }
|
||||||
|
func (rr *MX) Header() *RR_Header { return &rr.Hdr }
|
||||||
|
func (rr *NAPTR) Header() *RR_Header { return &rr.Hdr }
|
||||||
|
func (rr *NID) Header() *RR_Header { return &rr.Hdr }
|
||||||
|
func (rr *NIMLOC) Header() *RR_Header { return &rr.Hdr }
|
||||||
|
func (rr *NINFO) Header() *RR_Header { return &rr.Hdr }
|
||||||
|
func (rr *NS) Header() *RR_Header { return &rr.Hdr }
|
||||||
|
func (rr *NSAPPTR) Header() *RR_Header { return &rr.Hdr }
|
||||||
|
func (rr *NSEC) Header() *RR_Header { return &rr.Hdr }
|
||||||
|
func (rr *NSEC3) Header() *RR_Header { return &rr.Hdr }
|
||||||
|
func (rr *NSEC3PARAM) Header() *RR_Header { return &rr.Hdr }
|
||||||
|
func (rr *OPENPGPKEY) Header() *RR_Header { return &rr.Hdr }
|
||||||
|
func (rr *OPT) Header() *RR_Header { return &rr.Hdr }
|
||||||
|
func (rr *PTR) Header() *RR_Header { return &rr.Hdr }
|
||||||
|
func (rr *PX) Header() *RR_Header { return &rr.Hdr }
|
||||||
|
func (rr *RFC3597) Header() *RR_Header { return &rr.Hdr }
|
||||||
|
func (rr *RKEY) Header() *RR_Header { return &rr.Hdr }
|
||||||
|
func (rr *RP) Header() *RR_Header { return &rr.Hdr }
|
||||||
|
func (rr *RRSIG) Header() *RR_Header { return &rr.Hdr }
|
||||||
|
func (rr *RT) Header() *RR_Header { return &rr.Hdr }
|
||||||
|
func (rr *SIG) Header() *RR_Header { return &rr.Hdr }
|
||||||
|
func (rr *SMIMEA) Header() *RR_Header { return &rr.Hdr }
|
||||||
|
func (rr *SOA) Header() *RR_Header { return &rr.Hdr }
|
||||||
|
func (rr *SPF) Header() *RR_Header { return &rr.Hdr }
|
||||||
|
func (rr *SRV) Header() *RR_Header { return &rr.Hdr }
|
||||||
|
func (rr *SSHFP) Header() *RR_Header { return &rr.Hdr }
|
||||||
|
func (rr *TA) Header() *RR_Header { return &rr.Hdr }
|
||||||
|
func (rr *TALINK) Header() *RR_Header { return &rr.Hdr }
|
||||||
|
func (rr *TKEY) Header() *RR_Header { return &rr.Hdr }
|
||||||
|
func (rr *TLSA) Header() *RR_Header { return &rr.Hdr }
|
||||||
|
func (rr *TSIG) Header() *RR_Header { return &rr.Hdr }
|
||||||
|
func (rr *TXT) Header() *RR_Header { return &rr.Hdr }
|
||||||
|
func (rr *UID) Header() *RR_Header { return &rr.Hdr }
|
||||||
|
func (rr *UINFO) Header() *RR_Header { return &rr.Hdr }
|
||||||
|
func (rr *URI) Header() *RR_Header { return &rr.Hdr }
|
||||||
|
func (rr *X25) Header() *RR_Header { return &rr.Hdr }
|
||||||
|
|
||||||
|
// len() functions
|
||||||
|
func (rr *A) len() int {
|
||||||
|
l := rr.Hdr.len()
|
||||||
|
l += net.IPv4len // A
|
||||||
|
return l
|
||||||
|
}
|
||||||
|
func (rr *AAAA) len() int {
|
||||||
|
l := rr.Hdr.len()
|
||||||
|
l += net.IPv6len // AAAA
|
||||||
|
return l
|
||||||
|
}
|
||||||
|
func (rr *AFSDB) len() int {
|
||||||
|
l := rr.Hdr.len()
|
||||||
|
l += 2 // Subtype
|
||||||
|
l += len(rr.Hostname) + 1
|
||||||
|
return l
|
||||||
|
}
|
||||||
|
func (rr *ANY) len() int {
|
||||||
|
l := rr.Hdr.len()
|
||||||
|
return l
|
||||||
|
}
|
||||||
|
func (rr *CAA) len() int {
|
||||||
|
l := rr.Hdr.len()
|
||||||
|
l += 1 // Flag
|
||||||
|
l += len(rr.Tag) + 1
|
||||||
|
l += len(rr.Value)
|
||||||
|
return l
|
||||||
|
}
|
||||||
|
func (rr *CERT) len() int {
|
||||||
|
l := rr.Hdr.len()
|
||||||
|
l += 2 // Type
|
||||||
|
l += 2 // KeyTag
|
||||||
|
l += 1 // Algorithm
|
||||||
|
l += base64.StdEncoding.DecodedLen(len(rr.Certificate))
|
||||||
|
return l
|
||||||
|
}
|
||||||
|
func (rr *CNAME) len() int {
|
||||||
|
l := rr.Hdr.len()
|
||||||
|
l += len(rr.Target) + 1
|
||||||
|
return l
|
||||||
|
}
|
||||||
|
func (rr *DHCID) len() int {
|
||||||
|
l := rr.Hdr.len()
|
||||||
|
l += base64.StdEncoding.DecodedLen(len(rr.Digest))
|
||||||
|
return l
|
||||||
|
}
|
||||||
|
func (rr *DNAME) len() int {
|
||||||
|
l := rr.Hdr.len()
|
||||||
|
l += len(rr.Target) + 1
|
||||||
|
return l
|
||||||
|
}
|
||||||
|
func (rr *DNSKEY) len() int {
|
||||||
|
l := rr.Hdr.len()
|
||||||
|
l += 2 // Flags
|
||||||
|
l += 1 // Protocol
|
||||||
|
l += 1 // Algorithm
|
||||||
|
l += base64.StdEncoding.DecodedLen(len(rr.PublicKey))
|
||||||
|
return l
|
||||||
|
}
|
||||||
|
func (rr *DS) len() int {
|
||||||
|
l := rr.Hdr.len()
|
||||||
|
l += 2 // KeyTag
|
||||||
|
l += 1 // Algorithm
|
||||||
|
l += 1 // DigestType
|
||||||
|
l += len(rr.Digest)/2 + 1
|
||||||
|
return l
|
||||||
|
}
|
||||||
|
func (rr *EID) len() int {
|
||||||
|
l := rr.Hdr.len()
|
||||||
|
l += len(rr.Endpoint)/2 + 1
|
||||||
|
return l
|
||||||
|
}
|
||||||
|
func (rr *EUI48) len() int {
|
||||||
|
l := rr.Hdr.len()
|
||||||
|
l += 6 // Address
|
||||||
|
return l
|
||||||
|
}
|
||||||
|
func (rr *EUI64) len() int {
|
||||||
|
l := rr.Hdr.len()
|
||||||
|
l += 8 // Address
|
||||||
|
return l
|
||||||
|
}
|
||||||
|
func (rr *GID) len() int {
|
||||||
|
l := rr.Hdr.len()
|
||||||
|
l += 4 // Gid
|
||||||
|
return l
|
||||||
|
}
|
||||||
|
func (rr *GPOS) len() int {
|
||||||
|
l := rr.Hdr.len()
|
||||||
|
l += len(rr.Longitude) + 1
|
||||||
|
l += len(rr.Latitude) + 1
|
||||||
|
l += len(rr.Altitude) + 1
|
||||||
|
return l
|
||||||
|
}
|
||||||
|
func (rr *HINFO) len() int {
|
||||||
|
l := rr.Hdr.len()
|
||||||
|
l += len(rr.Cpu) + 1
|
||||||
|
l += len(rr.Os) + 1
|
||||||
|
return l
|
||||||
|
}
|
||||||
|
func (rr *HIP) len() int {
|
||||||
|
l := rr.Hdr.len()
|
||||||
|
l += 1 // HitLength
|
||||||
|
l += 1 // PublicKeyAlgorithm
|
||||||
|
l += 2 // PublicKeyLength
|
||||||
|
l += len(rr.Hit)/2 + 1
|
||||||
|
l += base64.StdEncoding.DecodedLen(len(rr.PublicKey))
|
||||||
|
for _, x := range rr.RendezvousServers {
|
||||||
|
l += len(x) + 1
|
||||||
|
}
|
||||||
|
return l
|
||||||
|
}
|
||||||
|
func (rr *KX) len() int {
|
||||||
|
l := rr.Hdr.len()
|
||||||
|
l += 2 // Preference
|
||||||
|
l += len(rr.Exchanger) + 1
|
||||||
|
return l
|
||||||
|
}
|
||||||
|
func (rr *L32) len() int {
|
||||||
|
l := rr.Hdr.len()
|
||||||
|
l += 2 // Preference
|
||||||
|
l += net.IPv4len // Locator32
|
||||||
|
return l
|
||||||
|
}
|
||||||
|
func (rr *L64) len() int {
|
||||||
|
l := rr.Hdr.len()
|
||||||
|
l += 2 // Preference
|
||||||
|
l += 8 // Locator64
|
||||||
|
return l
|
||||||
|
}
|
||||||
|
func (rr *LOC) len() int {
|
||||||
|
l := rr.Hdr.len()
|
||||||
|
l += 1 // Version
|
||||||
|
l += 1 // Size
|
||||||
|
l += 1 // HorizPre
|
||||||
|
l += 1 // VertPre
|
||||||
|
l += 4 // Latitude
|
||||||
|
l += 4 // Longitude
|
||||||
|
l += 4 // Altitude
|
||||||
|
return l
|
||||||
|
}
|
||||||
|
func (rr *LP) len() int {
|
||||||
|
l := rr.Hdr.len()
|
||||||
|
l += 2 // Preference
|
||||||
|
l += len(rr.Fqdn) + 1
|
||||||
|
return l
|
||||||
|
}
|
||||||
|
func (rr *MB) len() int {
|
||||||
|
l := rr.Hdr.len()
|
||||||
|
l += len(rr.Mb) + 1
|
||||||
|
return l
|
||||||
|
}
|
||||||
|
func (rr *MD) len() int {
|
||||||
|
l := rr.Hdr.len()
|
||||||
|
l += len(rr.Md) + 1
|
||||||
|
return l
|
||||||
|
}
|
||||||
|
func (rr *MF) len() int {
|
||||||
|
l := rr.Hdr.len()
|
||||||
|
l += len(rr.Mf) + 1
|
||||||
|
return l
|
||||||
|
}
|
||||||
|
func (rr *MG) len() int {
|
||||||
|
l := rr.Hdr.len()
|
||||||
|
l += len(rr.Mg) + 1
|
||||||
|
return l
|
||||||
|
}
|
||||||
|
func (rr *MINFO) len() int {
|
||||||
|
l := rr.Hdr.len()
|
||||||
|
l += len(rr.Rmail) + 1
|
||||||
|
l += len(rr.Email) + 1
|
||||||
|
return l
|
||||||
|
}
|
||||||
|
func (rr *MR) len() int {
|
||||||
|
l := rr.Hdr.len()
|
||||||
|
l += len(rr.Mr) + 1
|
||||||
|
return l
|
||||||
|
}
|
||||||
|
func (rr *MX) len() int {
|
||||||
|
l := rr.Hdr.len()
|
||||||
|
l += 2 // Preference
|
||||||
|
l += len(rr.Mx) + 1
|
||||||
|
return l
|
||||||
|
}
|
||||||
|
func (rr *NAPTR) len() int {
|
||||||
|
l := rr.Hdr.len()
|
||||||
|
l += 2 // Order
|
||||||
|
l += 2 // Preference
|
||||||
|
l += len(rr.Flags) + 1
|
||||||
|
l += len(rr.Service) + 1
|
||||||
|
l += len(rr.Regexp) + 1
|
||||||
|
l += len(rr.Replacement) + 1
|
||||||
|
return l
|
||||||
|
}
|
||||||
|
func (rr *NID) len() int {
|
||||||
|
l := rr.Hdr.len()
|
||||||
|
l += 2 // Preference
|
||||||
|
l += 8 // NodeID
|
||||||
|
return l
|
||||||
|
}
|
||||||
|
func (rr *NIMLOC) len() int {
|
||||||
|
l := rr.Hdr.len()
|
||||||
|
l += len(rr.Locator)/2 + 1
|
||||||
|
return l
|
||||||
|
}
|
||||||
|
func (rr *NINFO) len() int {
|
||||||
|
l := rr.Hdr.len()
|
||||||
|
for _, x := range rr.ZSData {
|
||||||
|
l += len(x) + 1
|
||||||
|
}
|
||||||
|
return l
|
||||||
|
}
|
||||||
|
func (rr *NS) len() int {
|
||||||
|
l := rr.Hdr.len()
|
||||||
|
l += len(rr.Ns) + 1
|
||||||
|
return l
|
||||||
|
}
|
||||||
|
func (rr *NSAPPTR) len() int {
|
||||||
|
l := rr.Hdr.len()
|
||||||
|
l += len(rr.Ptr) + 1
|
||||||
|
return l
|
||||||
|
}
|
||||||
|
func (rr *NSEC3PARAM) len() int {
|
||||||
|
l := rr.Hdr.len()
|
||||||
|
l += 1 // Hash
|
||||||
|
l += 1 // Flags
|
||||||
|
l += 2 // Iterations
|
||||||
|
l += 1 // SaltLength
|
||||||
|
l += len(rr.Salt)/2 + 1
|
||||||
|
return l
|
||||||
|
}
|
||||||
|
func (rr *OPENPGPKEY) len() int {
|
||||||
|
l := rr.Hdr.len()
|
||||||
|
l += base64.StdEncoding.DecodedLen(len(rr.PublicKey))
|
||||||
|
return l
|
||||||
|
}
|
||||||
|
func (rr *PTR) len() int {
|
||||||
|
l := rr.Hdr.len()
|
||||||
|
l += len(rr.Ptr) + 1
|
||||||
|
return l
|
||||||
|
}
|
||||||
|
func (rr *PX) len() int {
|
||||||
|
l := rr.Hdr.len()
|
||||||
|
l += 2 // Preference
|
||||||
|
l += len(rr.Map822) + 1
|
||||||
|
l += len(rr.Mapx400) + 1
|
||||||
|
return l
|
||||||
|
}
|
||||||
|
func (rr *RFC3597) len() int {
|
||||||
|
l := rr.Hdr.len()
|
||||||
|
l += len(rr.Rdata)/2 + 1
|
||||||
|
return l
|
||||||
|
}
|
||||||
|
func (rr *RKEY) len() int {
|
||||||
|
l := rr.Hdr.len()
|
||||||
|
l += 2 // Flags
|
||||||
|
l += 1 // Protocol
|
||||||
|
l += 1 // Algorithm
|
||||||
|
l += base64.StdEncoding.DecodedLen(len(rr.PublicKey))
|
||||||
|
return l
|
||||||
|
}
|
||||||
|
func (rr *RP) len() int {
|
||||||
|
l := rr.Hdr.len()
|
||||||
|
l += len(rr.Mbox) + 1
|
||||||
|
l += len(rr.Txt) + 1
|
||||||
|
return l
|
||||||
|
}
|
||||||
|
func (rr *RRSIG) len() int {
|
||||||
|
l := rr.Hdr.len()
|
||||||
|
l += 2 // TypeCovered
|
||||||
|
l += 1 // Algorithm
|
||||||
|
l += 1 // Labels
|
||||||
|
l += 4 // OrigTtl
|
||||||
|
l += 4 // Expiration
|
||||||
|
l += 4 // Inception
|
||||||
|
l += 2 // KeyTag
|
||||||
|
l += len(rr.SignerName) + 1
|
||||||
|
l += base64.StdEncoding.DecodedLen(len(rr.Signature))
|
||||||
|
return l
|
||||||
|
}
|
||||||
|
func (rr *RT) len() int {
|
||||||
|
l := rr.Hdr.len()
|
||||||
|
l += 2 // Preference
|
||||||
|
l += len(rr.Host) + 1
|
||||||
|
return l
|
||||||
|
}
|
||||||
|
func (rr *SMIMEA) len() int {
|
||||||
|
l := rr.Hdr.len()
|
||||||
|
l += 1 // Usage
|
||||||
|
l += 1 // Selector
|
||||||
|
l += 1 // MatchingType
|
||||||
|
l += len(rr.Certificate)/2 + 1
|
||||||
|
return l
|
||||||
|
}
|
||||||
|
func (rr *SOA) len() int {
|
||||||
|
l := rr.Hdr.len()
|
||||||
|
l += len(rr.Ns) + 1
|
||||||
|
l += len(rr.Mbox) + 1
|
||||||
|
l += 4 // Serial
|
||||||
|
l += 4 // Refresh
|
||||||
|
l += 4 // Retry
|
||||||
|
l += 4 // Expire
|
||||||
|
l += 4 // Minttl
|
||||||
|
return l
|
||||||
|
}
|
||||||
|
func (rr *SPF) len() int {
|
||||||
|
l := rr.Hdr.len()
|
||||||
|
for _, x := range rr.Txt {
|
||||||
|
l += len(x) + 1
|
||||||
|
}
|
||||||
|
return l
|
||||||
|
}
|
||||||
|
func (rr *SRV) len() int {
|
||||||
|
l := rr.Hdr.len()
|
||||||
|
l += 2 // Priority
|
||||||
|
l += 2 // Weight
|
||||||
|
l += 2 // Port
|
||||||
|
l += len(rr.Target) + 1
|
||||||
|
return l
|
||||||
|
}
|
||||||
|
func (rr *SSHFP) len() int {
|
||||||
|
l := rr.Hdr.len()
|
||||||
|
l += 1 // Algorithm
|
||||||
|
l += 1 // Type
|
||||||
|
l += len(rr.FingerPrint)/2 + 1
|
||||||
|
return l
|
||||||
|
}
|
||||||
|
func (rr *TA) len() int {
|
||||||
|
l := rr.Hdr.len()
|
||||||
|
l += 2 // KeyTag
|
||||||
|
l += 1 // Algorithm
|
||||||
|
l += 1 // DigestType
|
||||||
|
l += len(rr.Digest)/2 + 1
|
||||||
|
return l
|
||||||
|
}
|
||||||
|
func (rr *TALINK) len() int {
|
||||||
|
l := rr.Hdr.len()
|
||||||
|
l += len(rr.PreviousName) + 1
|
||||||
|
l += len(rr.NextName) + 1
|
||||||
|
return l
|
||||||
|
}
|
||||||
|
func (rr *TKEY) len() int {
|
||||||
|
l := rr.Hdr.len()
|
||||||
|
l += len(rr.Algorithm) + 1
|
||||||
|
l += 4 // Inception
|
||||||
|
l += 4 // Expiration
|
||||||
|
l += 2 // Mode
|
||||||
|
l += 2 // Error
|
||||||
|
l += 2 // KeySize
|
||||||
|
l += len(rr.Key) + 1
|
||||||
|
l += 2 // OtherLen
|
||||||
|
l += len(rr.OtherData) + 1
|
||||||
|
return l
|
||||||
|
}
|
||||||
|
func (rr *TLSA) len() int {
|
||||||
|
l := rr.Hdr.len()
|
||||||
|
l += 1 // Usage
|
||||||
|
l += 1 // Selector
|
||||||
|
l += 1 // MatchingType
|
||||||
|
l += len(rr.Certificate)/2 + 1
|
||||||
|
return l
|
||||||
|
}
|
||||||
|
func (rr *TSIG) len() int {
|
||||||
|
l := rr.Hdr.len()
|
||||||
|
l += len(rr.Algorithm) + 1
|
||||||
|
l += 6 // TimeSigned
|
||||||
|
l += 2 // Fudge
|
||||||
|
l += 2 // MACSize
|
||||||
|
l += len(rr.MAC)/2 + 1
|
||||||
|
l += 2 // OrigId
|
||||||
|
l += 2 // Error
|
||||||
|
l += 2 // OtherLen
|
||||||
|
l += len(rr.OtherData)/2 + 1
|
||||||
|
return l
|
||||||
|
}
|
||||||
|
func (rr *TXT) len() int {
|
||||||
|
l := rr.Hdr.len()
|
||||||
|
for _, x := range rr.Txt {
|
||||||
|
l += len(x) + 1
|
||||||
|
}
|
||||||
|
return l
|
||||||
|
}
|
||||||
|
func (rr *UID) len() int {
|
||||||
|
l := rr.Hdr.len()
|
||||||
|
l += 4 // Uid
|
||||||
|
return l
|
||||||
|
}
|
||||||
|
func (rr *UINFO) len() int {
|
||||||
|
l := rr.Hdr.len()
|
||||||
|
l += len(rr.Uinfo) + 1
|
||||||
|
return l
|
||||||
|
}
|
||||||
|
func (rr *URI) len() int {
|
||||||
|
l := rr.Hdr.len()
|
||||||
|
l += 2 // Priority
|
||||||
|
l += 2 // Weight
|
||||||
|
l += len(rr.Target)
|
||||||
|
return l
|
||||||
|
}
|
||||||
|
func (rr *X25) len() int {
|
||||||
|
l := rr.Hdr.len()
|
||||||
|
l += len(rr.PSDNAddress) + 1
|
||||||
|
return l
|
||||||
|
}
|
||||||
|
|
||||||
|
// copy() functions
|
||||||
|
func (rr *A) copy() RR {
|
||||||
|
return &A{*rr.Hdr.copyHeader(), copyIP(rr.A)}
|
||||||
|
}
|
||||||
|
func (rr *AAAA) copy() RR {
|
||||||
|
return &AAAA{*rr.Hdr.copyHeader(), copyIP(rr.AAAA)}
|
||||||
|
}
|
||||||
|
func (rr *AFSDB) copy() RR {
|
||||||
|
return &AFSDB{*rr.Hdr.copyHeader(), rr.Subtype, rr.Hostname}
|
||||||
|
}
|
||||||
|
func (rr *ANY) copy() RR {
|
||||||
|
return &ANY{*rr.Hdr.copyHeader()}
|
||||||
|
}
|
||||||
|
func (rr *CAA) copy() RR {
|
||||||
|
return &CAA{*rr.Hdr.copyHeader(), rr.Flag, rr.Tag, rr.Value}
|
||||||
|
}
|
||||||
|
func (rr *CERT) copy() RR {
|
||||||
|
return &CERT{*rr.Hdr.copyHeader(), rr.Type, rr.KeyTag, rr.Algorithm, rr.Certificate}
|
||||||
|
}
|
||||||
|
func (rr *CNAME) copy() RR {
|
||||||
|
return &CNAME{*rr.Hdr.copyHeader(), rr.Target}
|
||||||
|
}
|
||||||
|
func (rr *DHCID) copy() RR {
|
||||||
|
return &DHCID{*rr.Hdr.copyHeader(), rr.Digest}
|
||||||
|
}
|
||||||
|
func (rr *DNAME) copy() RR {
|
||||||
|
return &DNAME{*rr.Hdr.copyHeader(), rr.Target}
|
||||||
|
}
|
||||||
|
func (rr *DNSKEY) copy() RR {
|
||||||
|
return &DNSKEY{*rr.Hdr.copyHeader(), rr.Flags, rr.Protocol, rr.Algorithm, rr.PublicKey}
|
||||||
|
}
|
||||||
|
func (rr *DS) copy() RR {
|
||||||
|
return &DS{*rr.Hdr.copyHeader(), rr.KeyTag, rr.Algorithm, rr.DigestType, rr.Digest}
|
||||||
|
}
|
||||||
|
func (rr *EID) copy() RR {
|
||||||
|
return &EID{*rr.Hdr.copyHeader(), rr.Endpoint}
|
||||||
|
}
|
||||||
|
func (rr *EUI48) copy() RR {
|
||||||
|
return &EUI48{*rr.Hdr.copyHeader(), rr.Address}
|
||||||
|
}
|
||||||
|
func (rr *EUI64) copy() RR {
|
||||||
|
return &EUI64{*rr.Hdr.copyHeader(), rr.Address}
|
||||||
|
}
|
||||||
|
func (rr *GID) copy() RR {
|
||||||
|
return &GID{*rr.Hdr.copyHeader(), rr.Gid}
|
||||||
|
}
|
||||||
|
func (rr *GPOS) copy() RR {
|
||||||
|
return &GPOS{*rr.Hdr.copyHeader(), rr.Longitude, rr.Latitude, rr.Altitude}
|
||||||
|
}
|
||||||
|
func (rr *HINFO) copy() RR {
|
||||||
|
return &HINFO{*rr.Hdr.copyHeader(), rr.Cpu, rr.Os}
|
||||||
|
}
|
||||||
|
func (rr *HIP) copy() RR {
|
||||||
|
RendezvousServers := make([]string, len(rr.RendezvousServers))
|
||||||
|
copy(RendezvousServers, rr.RendezvousServers)
|
||||||
|
return &HIP{*rr.Hdr.copyHeader(), rr.HitLength, rr.PublicKeyAlgorithm, rr.PublicKeyLength, rr.Hit, rr.PublicKey, RendezvousServers}
|
||||||
|
}
|
||||||
|
func (rr *KX) copy() RR {
|
||||||
|
return &KX{*rr.Hdr.copyHeader(), rr.Preference, rr.Exchanger}
|
||||||
|
}
|
||||||
|
func (rr *L32) copy() RR {
|
||||||
|
return &L32{*rr.Hdr.copyHeader(), rr.Preference, copyIP(rr.Locator32)}
|
||||||
|
}
|
||||||
|
func (rr *L64) copy() RR {
|
||||||
|
return &L64{*rr.Hdr.copyHeader(), rr.Preference, rr.Locator64}
|
||||||
|
}
|
||||||
|
func (rr *LOC) copy() RR {
|
||||||
|
return &LOC{*rr.Hdr.copyHeader(), rr.Version, rr.Size, rr.HorizPre, rr.VertPre, rr.Latitude, rr.Longitude, rr.Altitude}
|
||||||
|
}
|
||||||
|
func (rr *LP) copy() RR {
|
||||||
|
return &LP{*rr.Hdr.copyHeader(), rr.Preference, rr.Fqdn}
|
||||||
|
}
|
||||||
|
func (rr *MB) copy() RR {
|
||||||
|
return &MB{*rr.Hdr.copyHeader(), rr.Mb}
|
||||||
|
}
|
||||||
|
func (rr *MD) copy() RR {
|
||||||
|
return &MD{*rr.Hdr.copyHeader(), rr.Md}
|
||||||
|
}
|
||||||
|
func (rr *MF) copy() RR {
|
||||||
|
return &MF{*rr.Hdr.copyHeader(), rr.Mf}
|
||||||
|
}
|
||||||
|
func (rr *MG) copy() RR {
|
||||||
|
return &MG{*rr.Hdr.copyHeader(), rr.Mg}
|
||||||
|
}
|
||||||
|
func (rr *MINFO) copy() RR {
|
||||||
|
return &MINFO{*rr.Hdr.copyHeader(), rr.Rmail, rr.Email}
|
||||||
|
}
|
||||||
|
func (rr *MR) copy() RR {
|
||||||
|
return &MR{*rr.Hdr.copyHeader(), rr.Mr}
|
||||||
|
}
|
||||||
|
func (rr *MX) copy() RR {
|
||||||
|
return &MX{*rr.Hdr.copyHeader(), rr.Preference, rr.Mx}
|
||||||
|
}
|
||||||
|
func (rr *NAPTR) copy() RR {
|
||||||
|
return &NAPTR{*rr.Hdr.copyHeader(), rr.Order, rr.Preference, rr.Flags, rr.Service, rr.Regexp, rr.Replacement}
|
||||||
|
}
|
||||||
|
func (rr *NID) copy() RR {
|
||||||
|
return &NID{*rr.Hdr.copyHeader(), rr.Preference, rr.NodeID}
|
||||||
|
}
|
||||||
|
func (rr *NIMLOC) copy() RR {
|
||||||
|
return &NIMLOC{*rr.Hdr.copyHeader(), rr.Locator}
|
||||||
|
}
|
||||||
|
func (rr *NINFO) copy() RR {
|
||||||
|
ZSData := make([]string, len(rr.ZSData))
|
||||||
|
copy(ZSData, rr.ZSData)
|
||||||
|
return &NINFO{*rr.Hdr.copyHeader(), ZSData}
|
||||||
|
}
|
||||||
|
func (rr *NS) copy() RR {
|
||||||
|
return &NS{*rr.Hdr.copyHeader(), rr.Ns}
|
||||||
|
}
|
||||||
|
func (rr *NSAPPTR) copy() RR {
|
||||||
|
return &NSAPPTR{*rr.Hdr.copyHeader(), rr.Ptr}
|
||||||
|
}
|
||||||
|
func (rr *NSEC) copy() RR {
|
||||||
|
TypeBitMap := make([]uint16, len(rr.TypeBitMap))
|
||||||
|
copy(TypeBitMap, rr.TypeBitMap)
|
||||||
|
return &NSEC{*rr.Hdr.copyHeader(), rr.NextDomain, TypeBitMap}
|
||||||
|
}
|
||||||
|
func (rr *NSEC3) copy() RR {
|
||||||
|
TypeBitMap := make([]uint16, len(rr.TypeBitMap))
|
||||||
|
copy(TypeBitMap, rr.TypeBitMap)
|
||||||
|
return &NSEC3{*rr.Hdr.copyHeader(), rr.Hash, rr.Flags, rr.Iterations, rr.SaltLength, rr.Salt, rr.HashLength, rr.NextDomain, TypeBitMap}
|
||||||
|
}
|
||||||
|
func (rr *NSEC3PARAM) copy() RR {
|
||||||
|
return &NSEC3PARAM{*rr.Hdr.copyHeader(), rr.Hash, rr.Flags, rr.Iterations, rr.SaltLength, rr.Salt}
|
||||||
|
}
|
||||||
|
func (rr *OPENPGPKEY) copy() RR {
|
||||||
|
return &OPENPGPKEY{*rr.Hdr.copyHeader(), rr.PublicKey}
|
||||||
|
}
|
||||||
|
func (rr *OPT) copy() RR {
|
||||||
|
Option := make([]EDNS0, len(rr.Option))
|
||||||
|
copy(Option, rr.Option)
|
||||||
|
return &OPT{*rr.Hdr.copyHeader(), Option}
|
||||||
|
}
|
||||||
|
func (rr *PTR) copy() RR {
|
||||||
|
return &PTR{*rr.Hdr.copyHeader(), rr.Ptr}
|
||||||
|
}
|
||||||
|
func (rr *PX) copy() RR {
|
||||||
|
return &PX{*rr.Hdr.copyHeader(), rr.Preference, rr.Map822, rr.Mapx400}
|
||||||
|
}
|
||||||
|
func (rr *RFC3597) copy() RR {
|
||||||
|
return &RFC3597{*rr.Hdr.copyHeader(), rr.Rdata}
|
||||||
|
}
|
||||||
|
func (rr *RKEY) copy() RR {
|
||||||
|
return &RKEY{*rr.Hdr.copyHeader(), rr.Flags, rr.Protocol, rr.Algorithm, rr.PublicKey}
|
||||||
|
}
|
||||||
|
func (rr *RP) copy() RR {
|
||||||
|
return &RP{*rr.Hdr.copyHeader(), rr.Mbox, rr.Txt}
|
||||||
|
}
|
||||||
|
func (rr *RRSIG) copy() RR {
|
||||||
|
return &RRSIG{*rr.Hdr.copyHeader(), rr.TypeCovered, rr.Algorithm, rr.Labels, rr.OrigTtl, rr.Expiration, rr.Inception, rr.KeyTag, rr.SignerName, rr.Signature}
|
||||||
|
}
|
||||||
|
func (rr *RT) copy() RR {
|
||||||
|
return &RT{*rr.Hdr.copyHeader(), rr.Preference, rr.Host}
|
||||||
|
}
|
||||||
|
func (rr *SMIMEA) copy() RR {
|
||||||
|
return &SMIMEA{*rr.Hdr.copyHeader(), rr.Usage, rr.Selector, rr.MatchingType, rr.Certificate}
|
||||||
|
}
|
||||||
|
func (rr *SOA) copy() RR {
|
||||||
|
return &SOA{*rr.Hdr.copyHeader(), rr.Ns, rr.Mbox, rr.Serial, rr.Refresh, rr.Retry, rr.Expire, rr.Minttl}
|
||||||
|
}
|
||||||
|
func (rr *SPF) copy() RR {
|
||||||
|
Txt := make([]string, len(rr.Txt))
|
||||||
|
copy(Txt, rr.Txt)
|
||||||
|
return &SPF{*rr.Hdr.copyHeader(), Txt}
|
||||||
|
}
|
||||||
|
func (rr *SRV) copy() RR {
|
||||||
|
return &SRV{*rr.Hdr.copyHeader(), rr.Priority, rr.Weight, rr.Port, rr.Target}
|
||||||
|
}
|
||||||
|
func (rr *SSHFP) copy() RR {
|
||||||
|
return &SSHFP{*rr.Hdr.copyHeader(), rr.Algorithm, rr.Type, rr.FingerPrint}
|
||||||
|
}
|
||||||
|
func (rr *TA) copy() RR {
|
||||||
|
return &TA{*rr.Hdr.copyHeader(), rr.KeyTag, rr.Algorithm, rr.DigestType, rr.Digest}
|
||||||
|
}
|
||||||
|
func (rr *TALINK) copy() RR {
|
||||||
|
return &TALINK{*rr.Hdr.copyHeader(), rr.PreviousName, rr.NextName}
|
||||||
|
}
|
||||||
|
func (rr *TKEY) copy() RR {
|
||||||
|
return &TKEY{*rr.Hdr.copyHeader(), rr.Algorithm, rr.Inception, rr.Expiration, rr.Mode, rr.Error, rr.KeySize, rr.Key, rr.OtherLen, rr.OtherData}
|
||||||
|
}
|
||||||
|
func (rr *TLSA) copy() RR {
|
||||||
|
return &TLSA{*rr.Hdr.copyHeader(), rr.Usage, rr.Selector, rr.MatchingType, rr.Certificate}
|
||||||
|
}
|
||||||
|
func (rr *TSIG) copy() RR {
|
||||||
|
return &TSIG{*rr.Hdr.copyHeader(), rr.Algorithm, rr.TimeSigned, rr.Fudge, rr.MACSize, rr.MAC, rr.OrigId, rr.Error, rr.OtherLen, rr.OtherData}
|
||||||
|
}
|
||||||
|
func (rr *TXT) copy() RR {
|
||||||
|
Txt := make([]string, len(rr.Txt))
|
||||||
|
copy(Txt, rr.Txt)
|
||||||
|
return &TXT{*rr.Hdr.copyHeader(), Txt}
|
||||||
|
}
|
||||||
|
func (rr *UID) copy() RR {
|
||||||
|
return &UID{*rr.Hdr.copyHeader(), rr.Uid}
|
||||||
|
}
|
||||||
|
func (rr *UINFO) copy() RR {
|
||||||
|
return &UINFO{*rr.Hdr.copyHeader(), rr.Uinfo}
|
||||||
|
}
|
||||||
|
func (rr *URI) copy() RR {
|
||||||
|
return &URI{*rr.Hdr.copyHeader(), rr.Priority, rr.Weight, rr.Target}
|
||||||
|
}
|
||||||
|
func (rr *X25) copy() RR {
|
||||||
|
return &X25{*rr.Hdr.copyHeader(), rr.PSDNAddress}
|
||||||
|
}
|
5
vendor/github.com/prometheus/client_golang/NOTICE
generated
vendored
5
vendor/github.com/prometheus/client_golang/NOTICE
generated
vendored
|
@ -7,11 +7,6 @@ SoundCloud Ltd. (http://soundcloud.com/).
|
||||||
|
|
||||||
The following components are included in this product:
|
The following components are included in this product:
|
||||||
|
|
||||||
goautoneg
|
|
||||||
http://bitbucket.org/ww/goautoneg
|
|
||||||
Copyright 2011, Open Knowledge Foundation Ltd.
|
|
||||||
See README.txt for license details.
|
|
||||||
|
|
||||||
perks - a fork of https://github.com/bmizerany/perks
|
perks - a fork of https://github.com/bmizerany/perks
|
||||||
https://github.com/beorn7/perks
|
https://github.com/beorn7/perks
|
||||||
Copyright 2013-2015 Blake Mizerany, Björn Rabenstein
|
Copyright 2013-2015 Blake Mizerany, Björn Rabenstein
|
||||||
|
|
54
vendor/github.com/prometheus/client_golang/prometheus/README.md
generated
vendored
54
vendor/github.com/prometheus/client_golang/prometheus/README.md
generated
vendored
|
@ -1,53 +1 @@
|
||||||
# Overview
|
See [](https://godoc.org/github.com/prometheus/client_golang/prometheus).
|
||||||
This is the [Prometheus](http://www.prometheus.io) telemetric
|
|
||||||
instrumentation client [Go](http://golang.org) client library. It
|
|
||||||
enable authors to define process-space metrics for their servers and
|
|
||||||
expose them through a web service interface for extraction,
|
|
||||||
aggregation, and a whole slew of other post processing techniques.
|
|
||||||
|
|
||||||
# Installing
|
|
||||||
$ go get github.com/prometheus/client_golang/prometheus
|
|
||||||
|
|
||||||
# Example
|
|
||||||
```go
|
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"net/http"
|
|
||||||
|
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
indexed = prometheus.NewCounter(prometheus.CounterOpts{
|
|
||||||
Namespace: "my_company",
|
|
||||||
Subsystem: "indexer",
|
|
||||||
Name: "documents_indexed",
|
|
||||||
Help: "The number of documents indexed.",
|
|
||||||
})
|
|
||||||
size = prometheus.NewGauge(prometheus.GaugeOpts{
|
|
||||||
Namespace: "my_company",
|
|
||||||
Subsystem: "storage",
|
|
||||||
Name: "documents_total_size_bytes",
|
|
||||||
Help: "The total size of all documents in the storage.",
|
|
||||||
})
|
|
||||||
)
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
http.Handle("/metrics", prometheus.Handler())
|
|
||||||
|
|
||||||
indexed.Inc()
|
|
||||||
size.Set(5)
|
|
||||||
|
|
||||||
http.ListenAndServe(":8080", nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
prometheus.MustRegister(indexed)
|
|
||||||
prometheus.MustRegister(size)
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
# Documentation
|
|
||||||
|
|
||||||
[](https://godoc.org/github.com/prometheus/client_golang)
|
|
||||||
|
|
52
vendor/github.com/prometheus/client_golang/prometheus/collector.go
generated
vendored
52
vendor/github.com/prometheus/client_golang/prometheus/collector.go
generated
vendored
|
@ -15,15 +15,15 @@ package prometheus
|
||||||
|
|
||||||
// Collector is the interface implemented by anything that can be used by
|
// Collector is the interface implemented by anything that can be used by
|
||||||
// Prometheus to collect metrics. A Collector has to be registered for
|
// Prometheus to collect metrics. A Collector has to be registered for
|
||||||
// collection. See Register, MustRegister, RegisterOrGet, and MustRegisterOrGet.
|
// collection. See Registerer.Register.
|
||||||
//
|
//
|
||||||
// The stock metrics provided by this package (like Gauge, Counter, Summary) are
|
// The stock metrics provided by this package (Gauge, Counter, Summary,
|
||||||
// also Collectors (which only ever collect one metric, namely itself). An
|
// Histogram, Untyped) are also Collectors (which only ever collect one metric,
|
||||||
// implementer of Collector may, however, collect multiple metrics in a
|
// namely itself). An implementer of Collector may, however, collect multiple
|
||||||
// coordinated fashion and/or create metrics on the fly. Examples for collectors
|
// metrics in a coordinated fashion and/or create metrics on the fly. Examples
|
||||||
// already implemented in this library are the metric vectors (i.e. collection
|
// for collectors already implemented in this library are the metric vectors
|
||||||
// of multiple instances of the same Metric but with different label values)
|
// (i.e. collection of multiple instances of the same Metric but with different
|
||||||
// like GaugeVec or SummaryVec, and the ExpvarCollector.
|
// label values) like GaugeVec or SummaryVec, and the ExpvarCollector.
|
||||||
type Collector interface {
|
type Collector interface {
|
||||||
// Describe sends the super-set of all possible descriptors of metrics
|
// Describe sends the super-set of all possible descriptors of metrics
|
||||||
// collected by this Collector to the provided channel and returns once
|
// collected by this Collector to the provided channel and returns once
|
||||||
|
@ -37,39 +37,39 @@ type Collector interface {
|
||||||
// executing this method, it must send an invalid descriptor (created
|
// executing this method, it must send an invalid descriptor (created
|
||||||
// with NewInvalidDesc) to signal the error to the registry.
|
// with NewInvalidDesc) to signal the error to the registry.
|
||||||
Describe(chan<- *Desc)
|
Describe(chan<- *Desc)
|
||||||
// Collect is called by Prometheus when collecting metrics. The
|
// Collect is called by the Prometheus registry when collecting
|
||||||
// implementation sends each collected metric via the provided channel
|
// metrics. The implementation sends each collected metric via the
|
||||||
// and returns once the last metric has been sent. The descriptor of
|
// provided channel and returns once the last metric has been sent. The
|
||||||
// each sent metric is one of those returned by Describe. Returned
|
// descriptor of each sent metric is one of those returned by
|
||||||
// metrics that share the same descriptor must differ in their variable
|
// Describe. Returned metrics that share the same descriptor must differ
|
||||||
// label values. This method may be called concurrently and must
|
// in their variable label values. This method may be called
|
||||||
// therefore be implemented in a concurrency safe way. Blocking occurs
|
// concurrently and must therefore be implemented in a concurrency safe
|
||||||
// at the expense of total performance of rendering all registered
|
// way. Blocking occurs at the expense of total performance of rendering
|
||||||
// metrics. Ideally, Collector implementations support concurrent
|
// all registered metrics. Ideally, Collector implementations support
|
||||||
// readers.
|
// concurrent readers.
|
||||||
Collect(chan<- Metric)
|
Collect(chan<- Metric)
|
||||||
}
|
}
|
||||||
|
|
||||||
// SelfCollector implements Collector for a single Metric so that that the
|
// selfCollector implements Collector for a single Metric so that the Metric
|
||||||
// Metric collects itself. Add it as an anonymous field to a struct that
|
// collects itself. Add it as an anonymous field to a struct that implements
|
||||||
// implements Metric, and call Init with the Metric itself as an argument.
|
// Metric, and call init with the Metric itself as an argument.
|
||||||
type SelfCollector struct {
|
type selfCollector struct {
|
||||||
self Metric
|
self Metric
|
||||||
}
|
}
|
||||||
|
|
||||||
// Init provides the SelfCollector with a reference to the metric it is supposed
|
// init provides the selfCollector with a reference to the metric it is supposed
|
||||||
// to collect. It is usually called within the factory function to create a
|
// to collect. It is usually called within the factory function to create a
|
||||||
// metric. See example.
|
// metric. See example.
|
||||||
func (c *SelfCollector) Init(self Metric) {
|
func (c *selfCollector) init(self Metric) {
|
||||||
c.self = self
|
c.self = self
|
||||||
}
|
}
|
||||||
|
|
||||||
// Describe implements Collector.
|
// Describe implements Collector.
|
||||||
func (c *SelfCollector) Describe(ch chan<- *Desc) {
|
func (c *selfCollector) Describe(ch chan<- *Desc) {
|
||||||
ch <- c.self.Desc()
|
ch <- c.self.Desc()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Collect implements Collector.
|
// Collect implements Collector.
|
||||||
func (c *SelfCollector) Collect(ch chan<- Metric) {
|
func (c *selfCollector) Collect(ch chan<- Metric) {
|
||||||
ch <- c.self
|
ch <- c.self
|
||||||
}
|
}
|
||||||
|
|
17
vendor/github.com/prometheus/client_golang/prometheus/counter.go
generated
vendored
17
vendor/github.com/prometheus/client_golang/prometheus/counter.go
generated
vendored
|
@ -35,6 +35,9 @@ type Counter interface {
|
||||||
// Prometheus metric. Do not use it for regular handling of a
|
// Prometheus metric. Do not use it for regular handling of a
|
||||||
// Prometheus counter (as it can be used to break the contract of
|
// Prometheus counter (as it can be used to break the contract of
|
||||||
// monotonically increasing values).
|
// monotonically increasing values).
|
||||||
|
//
|
||||||
|
// Deprecated: Use NewConstMetric to create a counter for an external
|
||||||
|
// value. A Counter should never be set.
|
||||||
Set(float64)
|
Set(float64)
|
||||||
// Inc increments the counter by 1.
|
// Inc increments the counter by 1.
|
||||||
Inc()
|
Inc()
|
||||||
|
@ -55,7 +58,7 @@ func NewCounter(opts CounterOpts) Counter {
|
||||||
opts.ConstLabels,
|
opts.ConstLabels,
|
||||||
)
|
)
|
||||||
result := &counter{value: value{desc: desc, valType: CounterValue, labelPairs: desc.constLabelPairs}}
|
result := &counter{value: value{desc: desc, valType: CounterValue, labelPairs: desc.constLabelPairs}}
|
||||||
result.Init(result) // Init self-collection.
|
result.init(result) // Init self-collection.
|
||||||
return result
|
return result
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -79,7 +82,7 @@ func (c *counter) Add(v float64) {
|
||||||
// CounterVec embeds MetricVec. See there for a full list of methods with
|
// CounterVec embeds MetricVec. See there for a full list of methods with
|
||||||
// detailed documentation.
|
// detailed documentation.
|
||||||
type CounterVec struct {
|
type CounterVec struct {
|
||||||
MetricVec
|
*MetricVec
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewCounterVec creates a new CounterVec based on the provided CounterOpts and
|
// NewCounterVec creates a new CounterVec based on the provided CounterOpts and
|
||||||
|
@ -93,19 +96,15 @@ func NewCounterVec(opts CounterOpts, labelNames []string) *CounterVec {
|
||||||
opts.ConstLabels,
|
opts.ConstLabels,
|
||||||
)
|
)
|
||||||
return &CounterVec{
|
return &CounterVec{
|
||||||
MetricVec: MetricVec{
|
MetricVec: newMetricVec(desc, func(lvs ...string) Metric {
|
||||||
children: map[uint64]Metric{},
|
|
||||||
desc: desc,
|
|
||||||
newMetric: func(lvs ...string) Metric {
|
|
||||||
result := &counter{value: value{
|
result := &counter{value: value{
|
||||||
desc: desc,
|
desc: desc,
|
||||||
valType: CounterValue,
|
valType: CounterValue,
|
||||||
labelPairs: makeLabelPairs(desc, lvs),
|
labelPairs: makeLabelPairs(desc, lvs),
|
||||||
}}
|
}}
|
||||||
result.Init(result) // Init self-collection.
|
result.init(result) // Init self-collection.
|
||||||
return result
|
return result
|
||||||
},
|
}),
|
||||||
},
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
13
vendor/github.com/prometheus/client_golang/prometheus/desc.go
generated
vendored
13
vendor/github.com/prometheus/client_golang/prometheus/desc.go
generated
vendored
|
@ -1,3 +1,16 @@
|
||||||
|
// Copyright 2016 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
package prometheus
|
package prometheus
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
|
172
vendor/github.com/prometheus/client_golang/prometheus/doc.go
generated
vendored
172
vendor/github.com/prometheus/client_golang/prometheus/doc.go
generated
vendored
|
@ -11,18 +11,17 @@
|
||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
// Package prometheus provides embeddable metric primitives for servers and
|
// Package prometheus provides metrics primitives to instrument code for
|
||||||
// standardized exposition of telemetry through a web services interface.
|
// monitoring. It also offers a registry for metrics. Sub-packages allow to
|
||||||
|
// expose the registered metrics via HTTP (package promhttp) or push them to a
|
||||||
|
// Pushgateway (package push).
|
||||||
//
|
//
|
||||||
// All exported functions and methods are safe to be used concurrently unless
|
// All exported functions and methods are safe to be used concurrently unless
|
||||||
//specified otherwise.
|
//specified otherwise.
|
||||||
//
|
//
|
||||||
// To expose metrics registered with the Prometheus registry, an HTTP server
|
// A Basic Example
|
||||||
// needs to know about the Prometheus handler. The usual endpoint is "/metrics".
|
|
||||||
//
|
//
|
||||||
// http.Handle("/metrics", prometheus.Handler())
|
// As a starting point, a very basic usage example:
|
||||||
//
|
|
||||||
// As a starting point a very basic usage example:
|
|
||||||
//
|
//
|
||||||
// package main
|
// package main
|
||||||
//
|
//
|
||||||
|
@ -30,6 +29,7 @@
|
||||||
// "net/http"
|
// "net/http"
|
||||||
//
|
//
|
||||||
// "github.com/prometheus/client_golang/prometheus"
|
// "github.com/prometheus/client_golang/prometheus"
|
||||||
|
// "github.com/prometheus/client_golang/prometheus/promhttp"
|
||||||
// )
|
// )
|
||||||
//
|
//
|
||||||
// var (
|
// var (
|
||||||
|
@ -37,75 +37,145 @@
|
||||||
// Name: "cpu_temperature_celsius",
|
// Name: "cpu_temperature_celsius",
|
||||||
// Help: "Current temperature of the CPU.",
|
// Help: "Current temperature of the CPU.",
|
||||||
// })
|
// })
|
||||||
// hdFailures = prometheus.NewCounter(prometheus.CounterOpts{
|
// hdFailures = prometheus.NewCounterVec(
|
||||||
|
// prometheus.CounterOpts{
|
||||||
// Name: "hd_errors_total",
|
// Name: "hd_errors_total",
|
||||||
// Help: "Number of hard-disk errors.",
|
// Help: "Number of hard-disk errors.",
|
||||||
// })
|
// },
|
||||||
|
// []string{"device"},
|
||||||
|
// )
|
||||||
// )
|
// )
|
||||||
//
|
//
|
||||||
// func init() {
|
// func init() {
|
||||||
|
// // Metrics have to be registered to be exposed:
|
||||||
// prometheus.MustRegister(cpuTemp)
|
// prometheus.MustRegister(cpuTemp)
|
||||||
// prometheus.MustRegister(hdFailures)
|
// prometheus.MustRegister(hdFailures)
|
||||||
// }
|
// }
|
||||||
//
|
//
|
||||||
// func main() {
|
// func main() {
|
||||||
// cpuTemp.Set(65.3)
|
// cpuTemp.Set(65.3)
|
||||||
// hdFailures.Inc()
|
// hdFailures.With(prometheus.Labels{"device":"/dev/sda"}).Inc()
|
||||||
//
|
//
|
||||||
// http.Handle("/metrics", prometheus.Handler())
|
// // The Handler function provides a default handler to expose metrics
|
||||||
|
// // via an HTTP server. "/metrics" is the usual endpoint for that.
|
||||||
|
// http.Handle("/metrics", promhttp.Handler())
|
||||||
// http.ListenAndServe(":8080", nil)
|
// http.ListenAndServe(":8080", nil)
|
||||||
// }
|
// }
|
||||||
//
|
//
|
||||||
//
|
//
|
||||||
// This is a complete program that exports two metrics, a Gauge and a Counter.
|
// This is a complete program that exports two metrics, a Gauge and a Counter,
|
||||||
// It also exports some stats about the HTTP usage of the /metrics
|
// the latter with a label attached to turn it into a (one-dimensional) vector.
|
||||||
// endpoint. (See the Handler function for more detail.)
|
|
||||||
//
|
//
|
||||||
// Two more advanced metric types are the Summary and Histogram. A more
|
// Metrics
|
||||||
// thorough description of metric types can be found in the prometheus docs:
|
//
|
||||||
|
// The number of exported identifiers in this package might appear a bit
|
||||||
|
// overwhelming. Hovever, in addition to the basic plumbing shown in the example
|
||||||
|
// above, you only need to understand the different metric types and their
|
||||||
|
// vector versions for basic usage.
|
||||||
|
//
|
||||||
|
// Above, you have already touched the Counter and the Gauge. There are two more
|
||||||
|
// advanced metric types: the Summary and Histogram. A more thorough description
|
||||||
|
// of those four metric types can be found in the Prometheus docs:
|
||||||
// https://prometheus.io/docs/concepts/metric_types/
|
// https://prometheus.io/docs/concepts/metric_types/
|
||||||
//
|
//
|
||||||
// In addition to the fundamental metric types Gauge, Counter, Summary, and
|
// A fifth "type" of metric is Untyped. It behaves like a Gauge, but signals the
|
||||||
// Histogram, a very important part of the Prometheus data model is the
|
// Prometheus server not to assume anything about its type.
|
||||||
// partitioning of samples along dimensions called labels, which results in
|
//
|
||||||
|
// In addition to the fundamental metric types Gauge, Counter, Summary,
|
||||||
|
// Histogram, and Untyped, a very important part of the Prometheus data model is
|
||||||
|
// the partitioning of samples along dimensions called labels, which results in
|
||||||
// metric vectors. The fundamental types are GaugeVec, CounterVec, SummaryVec,
|
// metric vectors. The fundamental types are GaugeVec, CounterVec, SummaryVec,
|
||||||
// and HistogramVec.
|
// HistogramVec, and UntypedVec.
|
||||||
//
|
//
|
||||||
// Those are all the parts needed for basic usage. Detailed documentation and
|
// While only the fundamental metric types implement the Metric interface, both
|
||||||
// examples are provided below.
|
// the metrics and their vector versions implement the Collector interface. A
|
||||||
|
// Collector manages the collection of a number of Metrics, but for convenience,
|
||||||
|
// a Metric can also “collect itself”. Note that Gauge, Counter, Summary,
|
||||||
|
// Histogram, and Untyped are interfaces themselves while GaugeVec, CounterVec,
|
||||||
|
// SummaryVec, HistogramVec, and UntypedVec are not.
|
||||||
//
|
//
|
||||||
// Everything else this package offers is essentially for "power users" only. A
|
// To create instances of Metrics and their vector versions, you need a suitable
|
||||||
// few pointers to "power user features":
|
// …Opts struct, i.e. GaugeOpts, CounterOpts, SummaryOpts,
|
||||||
|
// HistogramOpts, or UntypedOpts.
|
||||||
//
|
//
|
||||||
// All the various ...Opts structs have a ConstLabels field for labels that
|
// Custom Collectors and constant Metrics
|
||||||
// never change their value (which is only useful under special circumstances,
|
|
||||||
// see documentation of the Opts type).
|
|
||||||
//
|
//
|
||||||
// The Untyped metric behaves like a Gauge, but signals the Prometheus server
|
// While you could create your own implementations of Metric, most likely you
|
||||||
// not to assume anything about its type.
|
// will only ever implement the Collector interface on your own. At a first
|
||||||
|
// glance, a custom Collector seems handy to bundle Metrics for common
|
||||||
|
// registration (with the prime example of the different metric vectors above,
|
||||||
|
// which bundle all the metrics of the same name but with different labels).
|
||||||
//
|
//
|
||||||
// Functions to fine-tune how the metric registry works: EnableCollectChecks,
|
// There is a more involved use case, too: If you already have metrics
|
||||||
// PanicOnCollectError, Register, Unregister, SetMetricFamilyInjectionHook.
|
// available, created outside of the Prometheus context, you don't need the
|
||||||
|
// interface of the various Metric types. You essentially want to mirror the
|
||||||
|
// existing numbers into Prometheus Metrics during collection. An own
|
||||||
|
// implementation of the Collector interface is perfect for that. You can create
|
||||||
|
// Metric instances “on the fly” using NewConstMetric, NewConstHistogram, and
|
||||||
|
// NewConstSummary (and their respective Must… versions). That will happen in
|
||||||
|
// the Collect method. The Describe method has to return separate Desc
|
||||||
|
// instances, representative of the “throw-away” metrics to be created
|
||||||
|
// later. NewDesc comes in handy to create those Desc instances.
|
||||||
//
|
//
|
||||||
// For custom metric collection, there are two entry points: Custom Metric
|
// The Collector example illustrates the use case. You can also look at the
|
||||||
// implementations and custom Collector implementations. A Metric is the
|
// source code of the processCollector (mirroring process metrics), the
|
||||||
// fundamental unit in the Prometheus data model: a sample at a point in time
|
// goCollector (mirroring Go metrics), or the expvarCollector (mirroring expvar
|
||||||
// together with its meta-data (like its fully-qualified name and any number of
|
// metrics) as examples that are used in this package itself.
|
||||||
// pairs of label name and label value) that knows how to marshal itself into a
|
|
||||||
// data transfer object (aka DTO, implemented as a protocol buffer). A Collector
|
|
||||||
// gets registered with the Prometheus registry and manages the collection of
|
|
||||||
// one or more Metrics. Many parts of this package are building blocks for
|
|
||||||
// Metrics and Collectors. Desc is the metric descriptor, actually used by all
|
|
||||||
// metrics under the hood, and by Collectors to describe the Metrics to be
|
|
||||||
// collected, but only to be dealt with by users if they implement their own
|
|
||||||
// Metrics or Collectors. To create a Desc, the BuildFQName function will come
|
|
||||||
// in handy. Other useful components for Metric and Collector implementation
|
|
||||||
// include: LabelPairSorter to sort the DTO version of label pairs,
|
|
||||||
// NewConstMetric and MustNewConstMetric to create "throw away" Metrics at
|
|
||||||
// collection time, MetricVec to bundle custom Metrics into a metric vector
|
|
||||||
// Collector, SelfCollector to make a custom Metric collect itself.
|
|
||||||
//
|
//
|
||||||
// A good example for a custom Collector is the ExpVarCollector included in this
|
// If you just need to call a function to get a single float value to collect as
|
||||||
// package, which exports variables exported via the "expvar" package as
|
// a metric, GaugeFunc, CounterFunc, or UntypedFunc might be interesting
|
||||||
// Prometheus metrics.
|
// shortcuts.
|
||||||
|
//
|
||||||
|
// Advanced Uses of the Registry
|
||||||
|
//
|
||||||
|
// While MustRegister is the by far most common way of registering a Collector,
|
||||||
|
// sometimes you might want to handle the errors the registration might
|
||||||
|
// cause. As suggested by the name, MustRegister panics if an error occurs. With
|
||||||
|
// the Register function, the error is returned and can be handled.
|
||||||
|
//
|
||||||
|
// An error is returned if the registered Collector is incompatible or
|
||||||
|
// inconsistent with already registered metrics. The registry aims for
|
||||||
|
// consistency of the collected metrics according to the Prometheus data
|
||||||
|
// model. Inconsistencies are ideally detected at registration time, not at
|
||||||
|
// collect time. The former will usually be detected at start-up time of a
|
||||||
|
// program, while the latter will only happen at scrape time, possibly not even
|
||||||
|
// on the first scrape if the inconsistency only becomes relevant later. That is
|
||||||
|
// the main reason why a Collector and a Metric have to describe themselves to
|
||||||
|
// the registry.
|
||||||
|
//
|
||||||
|
// So far, everything we did operated on the so-called default registry, as it
|
||||||
|
// can be found in the global DefaultRegistry variable. With NewRegistry, you
|
||||||
|
// can create a custom registry, or you can even implement the Registerer or
|
||||||
|
// Gatherer interfaces yourself. The methods Register and Unregister work in
|
||||||
|
// the same way on a custom registry as the global functions Register and
|
||||||
|
// Unregister on the default registry.
|
||||||
|
//
|
||||||
|
// There are a number of uses for custom registries: You can use registries
|
||||||
|
// with special properties, see NewPedanticRegistry. You can avoid global state,
|
||||||
|
// as it is imposed by the DefaultRegistry. You can use multiple registries at
|
||||||
|
// the same time to expose different metrics in different ways. You can use
|
||||||
|
// separate registries for testing purposes.
|
||||||
|
//
|
||||||
|
// Also note that the DefaultRegistry comes registered with a Collector for Go
|
||||||
|
// runtime metrics (via NewGoCollector) and a Collector for process metrics (via
|
||||||
|
// NewProcessCollector). With a custom registry, you are in control and decide
|
||||||
|
// yourself about the Collectors to register.
|
||||||
|
//
|
||||||
|
// HTTP Exposition
|
||||||
|
//
|
||||||
|
// The Registry implements the Gatherer interface. The caller of the Gather
|
||||||
|
// method can then expose the gathered metrics in some way. Usually, the metrics
|
||||||
|
// are served via HTTP on the /metrics endpoint. That's happening in the example
|
||||||
|
// above. The tools to expose metrics via HTTP are in the promhttp
|
||||||
|
// sub-package. (The top-level functions in the prometheus package are
|
||||||
|
// deprecated.)
|
||||||
|
//
|
||||||
|
// Pushing to the Pushgateway
|
||||||
|
//
|
||||||
|
// Function for pushing to the Pushgateway can be found in the push sub-package.
|
||||||
|
//
|
||||||
|
// Other Means of Exposition
|
||||||
|
//
|
||||||
|
// More ways of exposing metrics can easily be added. Sending metrics to
|
||||||
|
// Graphite would be an example that will soon be implemented.
|
||||||
package prometheus
|
package prometheus
|
||||||
|
|
|
@ -18,21 +18,21 @@ import (
|
||||||
"expvar"
|
"expvar"
|
||||||
)
|
)
|
||||||
|
|
||||||
// ExpvarCollector collects metrics from the expvar interface. It provides a
|
type expvarCollector struct {
|
||||||
// quick way to expose numeric values that are already exported via expvar as
|
|
||||||
// Prometheus metrics. Note that the data models of expvar and Prometheus are
|
|
||||||
// fundamentally different, and that the ExpvarCollector is inherently
|
|
||||||
// slow. Thus, the ExpvarCollector is probably great for experiments and
|
|
||||||
// prototying, but you should seriously consider a more direct implementation of
|
|
||||||
// Prometheus metrics for monitoring production systems.
|
|
||||||
//
|
|
||||||
// Use NewExpvarCollector to create new instances.
|
|
||||||
type ExpvarCollector struct {
|
|
||||||
exports map[string]*Desc
|
exports map[string]*Desc
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewExpvarCollector returns a newly allocated ExpvarCollector that still has
|
// NewExpvarCollector returns a newly allocated expvar Collector that still has
|
||||||
// to be registered with the Prometheus registry.
|
// to be registered with a Prometheus registry.
|
||||||
|
//
|
||||||
|
// An expvar Collector collects metrics from the expvar interface. It provides a
|
||||||
|
// quick way to expose numeric values that are already exported via expvar as
|
||||||
|
// Prometheus metrics. Note that the data models of expvar and Prometheus are
|
||||||
|
// fundamentally different, and that the expvar Collector is inherently slower
|
||||||
|
// than native Prometheus metrics. Thus, the expvar Collector is probably great
|
||||||
|
// for experiments and prototying, but you should seriously consider a more
|
||||||
|
// direct implementation of Prometheus metrics for monitoring production
|
||||||
|
// systems.
|
||||||
//
|
//
|
||||||
// The exports map has the following meaning:
|
// The exports map has the following meaning:
|
||||||
//
|
//
|
||||||
|
@ -59,21 +59,21 @@ type ExpvarCollector struct {
|
||||||
// sample values.
|
// sample values.
|
||||||
//
|
//
|
||||||
// Anything that does not fit into the scheme above is silently ignored.
|
// Anything that does not fit into the scheme above is silently ignored.
|
||||||
func NewExpvarCollector(exports map[string]*Desc) *ExpvarCollector {
|
func NewExpvarCollector(exports map[string]*Desc) Collector {
|
||||||
return &ExpvarCollector{
|
return &expvarCollector{
|
||||||
exports: exports,
|
exports: exports,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Describe implements Collector.
|
// Describe implements Collector.
|
||||||
func (e *ExpvarCollector) Describe(ch chan<- *Desc) {
|
func (e *expvarCollector) Describe(ch chan<- *Desc) {
|
||||||
for _, desc := range e.exports {
|
for _, desc := range e.exports {
|
||||||
ch <- desc
|
ch <- desc
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Collect implements Collector.
|
// Collect implements Collector.
|
||||||
func (e *ExpvarCollector) Collect(ch chan<- Metric) {
|
func (e *expvarCollector) Collect(ch chan<- Metric) {
|
||||||
for name, desc := range e.exports {
|
for name, desc := range e.exports {
|
||||||
var m Metric
|
var m Metric
|
||||||
expVar := expvar.Get(name)
|
expVar := expvar.Get(name)
|
10
vendor/github.com/prometheus/client_golang/prometheus/gauge.go
generated
vendored
10
vendor/github.com/prometheus/client_golang/prometheus/gauge.go
generated
vendored
|
@ -58,7 +58,7 @@ func NewGauge(opts GaugeOpts) Gauge {
|
||||||
// (e.g. number of operations queued, partitioned by user and operation
|
// (e.g. number of operations queued, partitioned by user and operation
|
||||||
// type). Create instances with NewGaugeVec.
|
// type). Create instances with NewGaugeVec.
|
||||||
type GaugeVec struct {
|
type GaugeVec struct {
|
||||||
MetricVec
|
*MetricVec
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewGaugeVec creates a new GaugeVec based on the provided GaugeOpts and
|
// NewGaugeVec creates a new GaugeVec based on the provided GaugeOpts and
|
||||||
|
@ -72,13 +72,9 @@ func NewGaugeVec(opts GaugeOpts, labelNames []string) *GaugeVec {
|
||||||
opts.ConstLabels,
|
opts.ConstLabels,
|
||||||
)
|
)
|
||||||
return &GaugeVec{
|
return &GaugeVec{
|
||||||
MetricVec: MetricVec{
|
MetricVec: newMetricVec(desc, func(lvs ...string) Metric {
|
||||||
children: map[uint64]Metric{},
|
|
||||||
desc: desc,
|
|
||||||
newMetric: func(lvs ...string) Metric {
|
|
||||||
return newValue(desc, GaugeValue, 0, lvs...)
|
return newValue(desc, GaugeValue, 0, lvs...)
|
||||||
},
|
}),
|
||||||
},
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
2
vendor/github.com/prometheus/client_golang/prometheus/go_collector.go
generated
vendored
2
vendor/github.com/prometheus/client_golang/prometheus/go_collector.go
generated
vendored
|
@ -17,7 +17,7 @@ type goCollector struct {
|
||||||
|
|
||||||
// NewGoCollector returns a collector which exports metrics about the current
|
// NewGoCollector returns a collector which exports metrics about the current
|
||||||
// go process.
|
// go process.
|
||||||
func NewGoCollector() *goCollector {
|
func NewGoCollector() Collector {
|
||||||
return &goCollector{
|
return &goCollector{
|
||||||
goroutines: NewGauge(GaugeOpts{
|
goroutines: NewGauge(GaugeOpts{
|
||||||
Namespace: "go",
|
Namespace: "go",
|
||||||
|
|
22
vendor/github.com/prometheus/client_golang/prometheus/histogram.go
generated
vendored
22
vendor/github.com/prometheus/client_golang/prometheus/histogram.go
generated
vendored
|
@ -51,11 +51,11 @@ type Histogram interface {
|
||||||
// bucket of a histogram ("le" -> "less or equal").
|
// bucket of a histogram ("le" -> "less or equal").
|
||||||
const bucketLabel = "le"
|
const bucketLabel = "le"
|
||||||
|
|
||||||
var (
|
|
||||||
// DefBuckets are the default Histogram buckets. The default buckets are
|
// DefBuckets are the default Histogram buckets. The default buckets are
|
||||||
// tailored to broadly measure the response time (in seconds) of a
|
// tailored to broadly measure the response time (in seconds) of a network
|
||||||
// network service. Most likely, however, you will be required to define
|
// service. Most likely, however, you will be required to define buckets
|
||||||
// buckets customized to your use case.
|
// customized to your use case.
|
||||||
|
var (
|
||||||
DefBuckets = []float64{.005, .01, .025, .05, .1, .25, .5, 1, 2.5, 5, 10}
|
DefBuckets = []float64{.005, .01, .025, .05, .1, .25, .5, 1, 2.5, 5, 10}
|
||||||
|
|
||||||
errBucketLabelNotAllowed = fmt.Errorf(
|
errBucketLabelNotAllowed = fmt.Errorf(
|
||||||
|
@ -210,7 +210,7 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr
|
||||||
// Finally we know the final length of h.upperBounds and can make counts.
|
// Finally we know the final length of h.upperBounds and can make counts.
|
||||||
h.counts = make([]uint64, len(h.upperBounds))
|
h.counts = make([]uint64, len(h.upperBounds))
|
||||||
|
|
||||||
h.Init(h) // Init self-collection.
|
h.init(h) // Init self-collection.
|
||||||
return h
|
return h
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -222,7 +222,7 @@ type histogram struct {
|
||||||
sumBits uint64
|
sumBits uint64
|
||||||
count uint64
|
count uint64
|
||||||
|
|
||||||
SelfCollector
|
selfCollector
|
||||||
// Note that there is no mutex required.
|
// Note that there is no mutex required.
|
||||||
|
|
||||||
desc *Desc
|
desc *Desc
|
||||||
|
@ -287,7 +287,7 @@ func (h *histogram) Write(out *dto.Metric) error {
|
||||||
// (e.g. HTTP request latencies, partitioned by status code and method). Create
|
// (e.g. HTTP request latencies, partitioned by status code and method). Create
|
||||||
// instances with NewHistogramVec.
|
// instances with NewHistogramVec.
|
||||||
type HistogramVec struct {
|
type HistogramVec struct {
|
||||||
MetricVec
|
*MetricVec
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewHistogramVec creates a new HistogramVec based on the provided HistogramOpts and
|
// NewHistogramVec creates a new HistogramVec based on the provided HistogramOpts and
|
||||||
|
@ -301,13 +301,9 @@ func NewHistogramVec(opts HistogramOpts, labelNames []string) *HistogramVec {
|
||||||
opts.ConstLabels,
|
opts.ConstLabels,
|
||||||
)
|
)
|
||||||
return &HistogramVec{
|
return &HistogramVec{
|
||||||
MetricVec: MetricVec{
|
MetricVec: newMetricVec(desc, func(lvs ...string) Metric {
|
||||||
children: map[uint64]Metric{},
|
|
||||||
desc: desc,
|
|
||||||
newMetric: func(lvs ...string) Metric {
|
|
||||||
return newHistogram(desc, opts, lvs...)
|
return newHistogram(desc, opts, lvs...)
|
||||||
},
|
}),
|
||||||
},
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
115
vendor/github.com/prometheus/client_golang/prometheus/http.go
generated
vendored
115
vendor/github.com/prometheus/client_golang/prometheus/http.go
generated
vendored
|
@ -15,14 +15,114 @@ package prometheus
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bufio"
|
"bufio"
|
||||||
|
"bytes"
|
||||||
|
"compress/gzip"
|
||||||
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"net"
|
"net"
|
||||||
"net/http"
|
"net/http"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/prometheus/common/expfmt"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// TODO(beorn7): Remove this whole file. It is a partial mirror of
|
||||||
|
// promhttp/http.go (to avoid circular import chains) where everything HTTP
|
||||||
|
// related should live. The functions here are just for avoiding
|
||||||
|
// breakage. Everything is deprecated.
|
||||||
|
|
||||||
|
const (
|
||||||
|
contentTypeHeader = "Content-Type"
|
||||||
|
contentLengthHeader = "Content-Length"
|
||||||
|
contentEncodingHeader = "Content-Encoding"
|
||||||
|
acceptEncodingHeader = "Accept-Encoding"
|
||||||
|
)
|
||||||
|
|
||||||
|
var bufPool sync.Pool
|
||||||
|
|
||||||
|
func getBuf() *bytes.Buffer {
|
||||||
|
buf := bufPool.Get()
|
||||||
|
if buf == nil {
|
||||||
|
return &bytes.Buffer{}
|
||||||
|
}
|
||||||
|
return buf.(*bytes.Buffer)
|
||||||
|
}
|
||||||
|
|
||||||
|
func giveBuf(buf *bytes.Buffer) {
|
||||||
|
buf.Reset()
|
||||||
|
bufPool.Put(buf)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handler returns an HTTP handler for the DefaultGatherer. It is
|
||||||
|
// already instrumented with InstrumentHandler (using "prometheus" as handler
|
||||||
|
// name).
|
||||||
|
//
|
||||||
|
// Deprecated: Please note the issues described in the doc comment of
|
||||||
|
// InstrumentHandler. You might want to consider using promhttp.Handler instead
|
||||||
|
// (which is non instrumented).
|
||||||
|
func Handler() http.Handler {
|
||||||
|
return InstrumentHandler("prometheus", UninstrumentedHandler())
|
||||||
|
}
|
||||||
|
|
||||||
|
// UninstrumentedHandler returns an HTTP handler for the DefaultGatherer.
|
||||||
|
//
|
||||||
|
// Deprecated: Use promhttp.Handler instead. See there for further documentation.
|
||||||
|
func UninstrumentedHandler() http.Handler {
|
||||||
|
return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
|
||||||
|
mfs, err := DefaultGatherer.Gather()
|
||||||
|
if err != nil {
|
||||||
|
http.Error(w, "An error has occurred during metrics collection:\n\n"+err.Error(), http.StatusInternalServerError)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
contentType := expfmt.Negotiate(req.Header)
|
||||||
|
buf := getBuf()
|
||||||
|
defer giveBuf(buf)
|
||||||
|
writer, encoding := decorateWriter(req, buf)
|
||||||
|
enc := expfmt.NewEncoder(writer, contentType)
|
||||||
|
var lastErr error
|
||||||
|
for _, mf := range mfs {
|
||||||
|
if err := enc.Encode(mf); err != nil {
|
||||||
|
lastErr = err
|
||||||
|
http.Error(w, "An error has occurred during metrics encoding:\n\n"+err.Error(), http.StatusInternalServerError)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if closer, ok := writer.(io.Closer); ok {
|
||||||
|
closer.Close()
|
||||||
|
}
|
||||||
|
if lastErr != nil && buf.Len() == 0 {
|
||||||
|
http.Error(w, "No metrics encoded, last error:\n\n"+err.Error(), http.StatusInternalServerError)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
header := w.Header()
|
||||||
|
header.Set(contentTypeHeader, string(contentType))
|
||||||
|
header.Set(contentLengthHeader, fmt.Sprint(buf.Len()))
|
||||||
|
if encoding != "" {
|
||||||
|
header.Set(contentEncodingHeader, encoding)
|
||||||
|
}
|
||||||
|
w.Write(buf.Bytes())
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// decorateWriter wraps a writer to handle gzip compression if requested. It
|
||||||
|
// returns the decorated writer and the appropriate "Content-Encoding" header
|
||||||
|
// (which is empty if no compression is enabled).
|
||||||
|
func decorateWriter(request *http.Request, writer io.Writer) (io.Writer, string) {
|
||||||
|
header := request.Header.Get(acceptEncodingHeader)
|
||||||
|
parts := strings.Split(header, ",")
|
||||||
|
for _, part := range parts {
|
||||||
|
part := strings.TrimSpace(part)
|
||||||
|
if part == "gzip" || strings.HasPrefix(part, "gzip;") {
|
||||||
|
return gzip.NewWriter(writer), "gzip"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return writer, ""
|
||||||
|
}
|
||||||
|
|
||||||
var instLabels = []string{"method", "code"}
|
var instLabels = []string{"method", "code"}
|
||||||
|
|
||||||
type nower interface {
|
type nower interface {
|
||||||
|
@ -58,7 +158,7 @@ func nowSeries(t ...time.Time) nower {
|
||||||
// value. http_requests_total is a metric vector partitioned by HTTP method
|
// value. http_requests_total is a metric vector partitioned by HTTP method
|
||||||
// (label name "method") and HTTP status code (label name "code").
|
// (label name "method") and HTTP status code (label name "code").
|
||||||
//
|
//
|
||||||
// Note that InstrumentHandler has several issues:
|
// Deprecated: InstrumentHandler has several issues:
|
||||||
//
|
//
|
||||||
// - It uses Summaries rather than Histograms. Summaries are not useful if
|
// - It uses Summaries rather than Histograms. Summaries are not useful if
|
||||||
// aggregation across multiple instances is required.
|
// aggregation across multiple instances is required.
|
||||||
|
@ -73,8 +173,8 @@ func nowSeries(t ...time.Time) nower {
|
||||||
// performing such writes.
|
// performing such writes.
|
||||||
//
|
//
|
||||||
// Upcoming versions of this package will provide ways of instrumenting HTTP
|
// Upcoming versions of this package will provide ways of instrumenting HTTP
|
||||||
// handlers that are more flexible and have fewer issues. Consider this function
|
// handlers that are more flexible and have fewer issues. Please prefer direct
|
||||||
// DEPRECATED and prefer direct instrumentation in the meantime.
|
// instrumentation in the meantime.
|
||||||
func InstrumentHandler(handlerName string, handler http.Handler) http.HandlerFunc {
|
func InstrumentHandler(handlerName string, handler http.Handler) http.HandlerFunc {
|
||||||
return InstrumentHandlerFunc(handlerName, handler.ServeHTTP)
|
return InstrumentHandlerFunc(handlerName, handler.ServeHTTP)
|
||||||
}
|
}
|
||||||
|
@ -82,6 +182,9 @@ func InstrumentHandler(handlerName string, handler http.Handler) http.HandlerFun
|
||||||
// InstrumentHandlerFunc wraps the given function for instrumentation. It
|
// InstrumentHandlerFunc wraps the given function for instrumentation. It
|
||||||
// otherwise works in the same way as InstrumentHandler (and shares the same
|
// otherwise works in the same way as InstrumentHandler (and shares the same
|
||||||
// issues).
|
// issues).
|
||||||
|
//
|
||||||
|
// Deprecated: InstrumentHandlerFunc is deprecated for the same reasons as
|
||||||
|
// InstrumentHandler is.
|
||||||
func InstrumentHandlerFunc(handlerName string, handlerFunc func(http.ResponseWriter, *http.Request)) http.HandlerFunc {
|
func InstrumentHandlerFunc(handlerName string, handlerFunc func(http.ResponseWriter, *http.Request)) http.HandlerFunc {
|
||||||
return InstrumentHandlerFuncWithOpts(
|
return InstrumentHandlerFuncWithOpts(
|
||||||
SummaryOpts{
|
SummaryOpts{
|
||||||
|
@ -117,6 +220,9 @@ func InstrumentHandlerFunc(handlerName string, handlerFunc func(http.ResponseWri
|
||||||
// cannot use SummaryOpts. Instead, a CounterOpts struct is created internally,
|
// cannot use SummaryOpts. Instead, a CounterOpts struct is created internally,
|
||||||
// and all its fields are set to the equally named fields in the provided
|
// and all its fields are set to the equally named fields in the provided
|
||||||
// SummaryOpts.
|
// SummaryOpts.
|
||||||
|
//
|
||||||
|
// Deprecated: InstrumentHandlerWithOpts is deprecated for the same reasons as
|
||||||
|
// InstrumentHandler is.
|
||||||
func InstrumentHandlerWithOpts(opts SummaryOpts, handler http.Handler) http.HandlerFunc {
|
func InstrumentHandlerWithOpts(opts SummaryOpts, handler http.Handler) http.HandlerFunc {
|
||||||
return InstrumentHandlerFuncWithOpts(opts, handler.ServeHTTP)
|
return InstrumentHandlerFuncWithOpts(opts, handler.ServeHTTP)
|
||||||
}
|
}
|
||||||
|
@ -125,6 +231,9 @@ func InstrumentHandlerWithOpts(opts SummaryOpts, handler http.Handler) http.Hand
|
||||||
// the same issues) but provides more flexibility (at the cost of a more complex
|
// the same issues) but provides more flexibility (at the cost of a more complex
|
||||||
// call syntax). See InstrumentHandlerWithOpts for details how the provided
|
// call syntax). See InstrumentHandlerWithOpts for details how the provided
|
||||||
// SummaryOpts are used.
|
// SummaryOpts are used.
|
||||||
|
//
|
||||||
|
// Deprecated: InstrumentHandlerFuncWithOpts is deprecated for the same reasons
|
||||||
|
// as InstrumentHandler is.
|
||||||
func InstrumentHandlerFuncWithOpts(opts SummaryOpts, handlerFunc func(http.ResponseWriter, *http.Request)) http.HandlerFunc {
|
func InstrumentHandlerFuncWithOpts(opts SummaryOpts, handlerFunc func(http.ResponseWriter, *http.Request)) http.HandlerFunc {
|
||||||
reqCnt := NewCounterVec(
|
reqCnt := NewCounterVec(
|
||||||
CounterOpts{
|
CounterOpts{
|
||||||
|
|
34
vendor/github.com/prometheus/client_golang/prometheus/metric.go
generated
vendored
34
vendor/github.com/prometheus/client_golang/prometheus/metric.go
generated
vendored
|
@ -22,10 +22,8 @@ import (
|
||||||
const separatorByte byte = 255
|
const separatorByte byte = 255
|
||||||
|
|
||||||
// A Metric models a single sample value with its meta data being exported to
|
// A Metric models a single sample value with its meta data being exported to
|
||||||
// Prometheus. Implementers of Metric in this package inclued Gauge, Counter,
|
// Prometheus. Implementations of Metric in this package are Gauge, Counter,
|
||||||
// Untyped, and Summary. Users can implement their own Metric types, but that
|
// Histogram, Summary, and Untyped.
|
||||||
// should be rarely needed. See the example for SelfCollector, which is also an
|
|
||||||
// example for a user-implemented Metric.
|
|
||||||
type Metric interface {
|
type Metric interface {
|
||||||
// Desc returns the descriptor for the Metric. This method idempotently
|
// Desc returns the descriptor for the Metric. This method idempotently
|
||||||
// returns the same descriptor throughout the lifetime of the
|
// returns the same descriptor throughout the lifetime of the
|
||||||
|
@ -36,21 +34,23 @@ type Metric interface {
|
||||||
// Write encodes the Metric into a "Metric" Protocol Buffer data
|
// Write encodes the Metric into a "Metric" Protocol Buffer data
|
||||||
// transmission object.
|
// transmission object.
|
||||||
//
|
//
|
||||||
// Implementers of custom Metric types must observe concurrency safety
|
// Metric implementations must observe concurrency safety as reads of
|
||||||
// as reads of this metric may occur at any time, and any blocking
|
// this metric may occur at any time, and any blocking occurs at the
|
||||||
// occurs at the expense of total performance of rendering all
|
// expense of total performance of rendering all registered
|
||||||
// registered metrics. Ideally Metric implementations should support
|
// metrics. Ideally, Metric implementations should support concurrent
|
||||||
// concurrent readers.
|
// readers.
|
||||||
//
|
//
|
||||||
// The Prometheus client library attempts to minimize memory allocations
|
// While populating dto.Metric, it is the responsibility of the
|
||||||
// and will provide a pre-existing reset dto.Metric pointer. Prometheus
|
// implementation to ensure validity of the Metric protobuf (like valid
|
||||||
// may recycle the dto.Metric proto message, so Metric implementations
|
// UTF-8 strings or syntactically valid metric and label names). It is
|
||||||
// should just populate the provided dto.Metric and then should not keep
|
// recommended to sort labels lexicographically. (Implementers may find
|
||||||
// any reference to it.
|
// LabelPairSorter useful for that.) Callers of Write should still make
|
||||||
//
|
// sure of sorting if they depend on it.
|
||||||
// While populating dto.Metric, labels must be sorted lexicographically.
|
|
||||||
// (Implementers may find LabelPairSorter useful for that.)
|
|
||||||
Write(*dto.Metric) error
|
Write(*dto.Metric) error
|
||||||
|
// TODO(beorn7): The original rationale of passing in a pre-allocated
|
||||||
|
// dto.Metric protobuf to save allocations has disappeared. The
|
||||||
|
// signature of this method should be changed to "Write() (*dto.Metric,
|
||||||
|
// error)".
|
||||||
}
|
}
|
||||||
|
|
||||||
// Opts bundles the options for creating most Metric types. Each metric
|
// Opts bundles the options for creating most Metric types. Each metric
|
||||||
|
|
4
vendor/github.com/prometheus/client_golang/prometheus/process_collector.go
generated
vendored
4
vendor/github.com/prometheus/client_golang/prometheus/process_collector.go
generated
vendored
|
@ -28,7 +28,7 @@ type processCollector struct {
|
||||||
// NewProcessCollector returns a collector which exports the current state of
|
// NewProcessCollector returns a collector which exports the current state of
|
||||||
// process metrics including cpu, memory and file descriptor usage as well as
|
// process metrics including cpu, memory and file descriptor usage as well as
|
||||||
// the process start time for the given process id under the given namespace.
|
// the process start time for the given process id under the given namespace.
|
||||||
func NewProcessCollector(pid int, namespace string) *processCollector {
|
func NewProcessCollector(pid int, namespace string) Collector {
|
||||||
return NewProcessCollectorPIDFn(
|
return NewProcessCollectorPIDFn(
|
||||||
func() (int, error) { return pid, nil },
|
func() (int, error) { return pid, nil },
|
||||||
namespace,
|
namespace,
|
||||||
|
@ -43,7 +43,7 @@ func NewProcessCollector(pid int, namespace string) *processCollector {
|
||||||
func NewProcessCollectorPIDFn(
|
func NewProcessCollectorPIDFn(
|
||||||
pidFn func() (int, error),
|
pidFn func() (int, error),
|
||||||
namespace string,
|
namespace string,
|
||||||
) *processCollector {
|
) Collector {
|
||||||
c := processCollector{
|
c := processCollector{
|
||||||
pidFn: pidFn,
|
pidFn: pidFn,
|
||||||
collectFn: func(chan<- Metric) {},
|
collectFn: func(chan<- Metric) {},
|
||||||
|
|
65
vendor/github.com/prometheus/client_golang/prometheus/push.go
generated
vendored
65
vendor/github.com/prometheus/client_golang/prometheus/push.go
generated
vendored
|
@ -1,65 +0,0 @@
|
||||||
// Copyright 2015 The Prometheus Authors
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
// Copyright (c) 2013, The Prometheus Authors
|
|
||||||
// All rights reserved.
|
|
||||||
//
|
|
||||||
// Use of this source code is governed by a BSD-style license that can be found
|
|
||||||
// in the LICENSE file.
|
|
||||||
|
|
||||||
package prometheus
|
|
||||||
|
|
||||||
// Push triggers a metric collection by the default registry and pushes all
|
|
||||||
// collected metrics to the Pushgateway specified by url. See the Pushgateway
|
|
||||||
// documentation for detailed implications of the job and instance
|
|
||||||
// parameter. instance can be left empty. You can use just host:port or ip:port
|
|
||||||
// as url, in which case 'http://' is added automatically. You can also include
|
|
||||||
// the schema in the URL. However, do not include the '/metrics/jobs/...' part.
|
|
||||||
//
|
|
||||||
// Note that all previously pushed metrics with the same job and instance will
|
|
||||||
// be replaced with the metrics pushed by this call. (It uses HTTP method 'PUT'
|
|
||||||
// to push to the Pushgateway.)
|
|
||||||
func Push(job, instance, url string) error {
|
|
||||||
return defRegistry.Push(job, instance, url, "PUT")
|
|
||||||
}
|
|
||||||
|
|
||||||
// PushAdd works like Push, but only previously pushed metrics with the same
|
|
||||||
// name (and the same job and instance) will be replaced. (It uses HTTP method
|
|
||||||
// 'POST' to push to the Pushgateway.)
|
|
||||||
func PushAdd(job, instance, url string) error {
|
|
||||||
return defRegistry.Push(job, instance, url, "POST")
|
|
||||||
}
|
|
||||||
|
|
||||||
// PushCollectors works like Push, but it does not collect from the default
|
|
||||||
// registry. Instead, it collects from the provided collectors. It is a
|
|
||||||
// convenient way to push only a few metrics.
|
|
||||||
func PushCollectors(job, instance, url string, collectors ...Collector) error {
|
|
||||||
return pushCollectors(job, instance, url, "PUT", collectors...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// PushAddCollectors works like PushAdd, but it does not collect from the
|
|
||||||
// default registry. Instead, it collects from the provided collectors. It is a
|
|
||||||
// convenient way to push only a few metrics.
|
|
||||||
func PushAddCollectors(job, instance, url string, collectors ...Collector) error {
|
|
||||||
return pushCollectors(job, instance, url, "POST", collectors...)
|
|
||||||
}
|
|
||||||
|
|
||||||
func pushCollectors(job, instance, url, method string, collectors ...Collector) error {
|
|
||||||
r := newRegistry()
|
|
||||||
for _, collector := range collectors {
|
|
||||||
if _, err := r.Register(collector); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return r.Push(job, instance, url, method)
|
|
||||||
}
|
|
1013
vendor/github.com/prometheus/client_golang/prometheus/registry.go
generated
vendored
1013
vendor/github.com/prometheus/client_golang/prometheus/registry.go
generated
vendored
File diff suppressed because it is too large
Load diff
26
vendor/github.com/prometheus/client_golang/prometheus/summary.go
generated
vendored
26
vendor/github.com/prometheus/client_golang/prometheus/summary.go
generated
vendored
|
@ -53,8 +53,8 @@ type Summary interface {
|
||||||
Observe(float64)
|
Observe(float64)
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
|
||||||
// DefObjectives are the default Summary quantile values.
|
// DefObjectives are the default Summary quantile values.
|
||||||
|
var (
|
||||||
DefObjectives = map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}
|
DefObjectives = map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}
|
||||||
|
|
||||||
errQuantileLabelNotAllowed = fmt.Errorf(
|
errQuantileLabelNotAllowed = fmt.Errorf(
|
||||||
|
@ -139,11 +139,11 @@ type SummaryOpts struct {
|
||||||
BufCap uint32
|
BufCap uint32
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: Great fuck-up with the sliding-window decay algorithm... The Merge
|
// Great fuck-up with the sliding-window decay algorithm... The Merge method of
|
||||||
// method of perk/quantile is actually not working as advertised - and it might
|
// perk/quantile is actually not working as advertised - and it might be
|
||||||
// be unfixable, as the underlying algorithm is apparently not capable of
|
// unfixable, as the underlying algorithm is apparently not capable of merging
|
||||||
// merging summaries in the first place. To avoid using Merge, we are currently
|
// summaries in the first place. To avoid using Merge, we are currently adding
|
||||||
// adding observations to _each_ age bucket, i.e. the effort to add a sample is
|
// observations to _each_ age bucket, i.e. the effort to add a sample is
|
||||||
// essentially multiplied by the number of age buckets. When rotating age
|
// essentially multiplied by the number of age buckets. When rotating age
|
||||||
// buckets, we empty the previous head stream. On scrape time, we simply take
|
// buckets, we empty the previous head stream. On scrape time, we simply take
|
||||||
// the quantiles from the head stream (no merging required). Result: More effort
|
// the quantiles from the head stream (no merging required). Result: More effort
|
||||||
|
@ -227,12 +227,12 @@ func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary {
|
||||||
}
|
}
|
||||||
sort.Float64s(s.sortedObjectives)
|
sort.Float64s(s.sortedObjectives)
|
||||||
|
|
||||||
s.Init(s) // Init self-collection.
|
s.init(s) // Init self-collection.
|
||||||
return s
|
return s
|
||||||
}
|
}
|
||||||
|
|
||||||
type summary struct {
|
type summary struct {
|
||||||
SelfCollector
|
selfCollector
|
||||||
|
|
||||||
bufMtx sync.Mutex // Protects hotBuf and hotBufExpTime.
|
bufMtx sync.Mutex // Protects hotBuf and hotBufExpTime.
|
||||||
mtx sync.Mutex // Protects every other moving part.
|
mtx sync.Mutex // Protects every other moving part.
|
||||||
|
@ -390,7 +390,7 @@ func (s quantSort) Less(i, j int) bool {
|
||||||
// (e.g. HTTP request latencies, partitioned by status code and method). Create
|
// (e.g. HTTP request latencies, partitioned by status code and method). Create
|
||||||
// instances with NewSummaryVec.
|
// instances with NewSummaryVec.
|
||||||
type SummaryVec struct {
|
type SummaryVec struct {
|
||||||
MetricVec
|
*MetricVec
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewSummaryVec creates a new SummaryVec based on the provided SummaryOpts and
|
// NewSummaryVec creates a new SummaryVec based on the provided SummaryOpts and
|
||||||
|
@ -404,13 +404,9 @@ func NewSummaryVec(opts SummaryOpts, labelNames []string) *SummaryVec {
|
||||||
opts.ConstLabels,
|
opts.ConstLabels,
|
||||||
)
|
)
|
||||||
return &SummaryVec{
|
return &SummaryVec{
|
||||||
MetricVec: MetricVec{
|
MetricVec: newMetricVec(desc, func(lvs ...string) Metric {
|
||||||
children: map[uint64]Metric{},
|
|
||||||
desc: desc,
|
|
||||||
newMetric: func(lvs ...string) Metric {
|
|
||||||
return newSummary(desc, opts, lvs...)
|
return newSummary(desc, opts, lvs...)
|
||||||
},
|
}),
|
||||||
},
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
10
vendor/github.com/prometheus/client_golang/prometheus/untyped.go
generated
vendored
10
vendor/github.com/prometheus/client_golang/prometheus/untyped.go
generated
vendored
|
@ -56,7 +56,7 @@ func NewUntyped(opts UntypedOpts) Untyped {
|
||||||
// labels. This is used if you want to count the same thing partitioned by
|
// labels. This is used if you want to count the same thing partitioned by
|
||||||
// various dimensions. Create instances with NewUntypedVec.
|
// various dimensions. Create instances with NewUntypedVec.
|
||||||
type UntypedVec struct {
|
type UntypedVec struct {
|
||||||
MetricVec
|
*MetricVec
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewUntypedVec creates a new UntypedVec based on the provided UntypedOpts and
|
// NewUntypedVec creates a new UntypedVec based on the provided UntypedOpts and
|
||||||
|
@ -70,13 +70,9 @@ func NewUntypedVec(opts UntypedOpts, labelNames []string) *UntypedVec {
|
||||||
opts.ConstLabels,
|
opts.ConstLabels,
|
||||||
)
|
)
|
||||||
return &UntypedVec{
|
return &UntypedVec{
|
||||||
MetricVec: MetricVec{
|
MetricVec: newMetricVec(desc, func(lvs ...string) Metric {
|
||||||
children: map[uint64]Metric{},
|
|
||||||
desc: desc,
|
|
||||||
newMetric: func(lvs ...string) Metric {
|
|
||||||
return newValue(desc, UntypedValue, 0, lvs...)
|
return newValue(desc, UntypedValue, 0, lvs...)
|
||||||
},
|
}),
|
||||||
},
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
8
vendor/github.com/prometheus/client_golang/prometheus/value.go
generated
vendored
8
vendor/github.com/prometheus/client_golang/prometheus/value.go
generated
vendored
|
@ -48,7 +48,7 @@ type value struct {
|
||||||
// operations. http://golang.org/pkg/sync/atomic/#pkg-note-BUG
|
// operations. http://golang.org/pkg/sync/atomic/#pkg-note-BUG
|
||||||
valBits uint64
|
valBits uint64
|
||||||
|
|
||||||
SelfCollector
|
selfCollector
|
||||||
|
|
||||||
desc *Desc
|
desc *Desc
|
||||||
valType ValueType
|
valType ValueType
|
||||||
|
@ -68,7 +68,7 @@ func newValue(desc *Desc, valueType ValueType, val float64, labelValues ...strin
|
||||||
valBits: math.Float64bits(val),
|
valBits: math.Float64bits(val),
|
||||||
labelPairs: makeLabelPairs(desc, labelValues),
|
labelPairs: makeLabelPairs(desc, labelValues),
|
||||||
}
|
}
|
||||||
result.Init(result)
|
result.init(result)
|
||||||
return result
|
return result
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -113,7 +113,7 @@ func (v *value) Write(out *dto.Metric) error {
|
||||||
// library to back the implementations of CounterFunc, GaugeFunc, and
|
// library to back the implementations of CounterFunc, GaugeFunc, and
|
||||||
// UntypedFunc.
|
// UntypedFunc.
|
||||||
type valueFunc struct {
|
type valueFunc struct {
|
||||||
SelfCollector
|
selfCollector
|
||||||
|
|
||||||
desc *Desc
|
desc *Desc
|
||||||
valType ValueType
|
valType ValueType
|
||||||
|
@ -134,7 +134,7 @@ func newValueFunc(desc *Desc, valueType ValueType, function func() float64) *val
|
||||||
function: function,
|
function: function,
|
||||||
labelPairs: makeLabelPairs(desc, nil),
|
labelPairs: makeLabelPairs(desc, nil),
|
||||||
}
|
}
|
||||||
result.Init(result)
|
result.init(result)
|
||||||
return result
|
return result
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
239
vendor/github.com/prometheus/client_golang/prometheus/vec.go
generated
vendored
239
vendor/github.com/prometheus/client_golang/prometheus/vec.go
generated
vendored
|
@ -16,6 +16,8 @@ package prometheus
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
|
"github.com/prometheus/common/model"
|
||||||
)
|
)
|
||||||
|
|
||||||
// MetricVec is a Collector to bundle metrics of the same name that
|
// MetricVec is a Collector to bundle metrics of the same name that
|
||||||
|
@ -25,10 +27,31 @@ import (
|
||||||
// provided in this package.
|
// provided in this package.
|
||||||
type MetricVec struct {
|
type MetricVec struct {
|
||||||
mtx sync.RWMutex // Protects the children.
|
mtx sync.RWMutex // Protects the children.
|
||||||
children map[uint64]Metric
|
children map[uint64][]metricWithLabelValues
|
||||||
desc *Desc
|
desc *Desc
|
||||||
|
|
||||||
newMetric func(labelValues ...string) Metric
|
newMetric func(labelValues ...string) Metric
|
||||||
|
hashAdd func(h uint64, s string) uint64 // replace hash function for testing collision handling
|
||||||
|
hashAddByte func(h uint64, b byte) uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
// newMetricVec returns an initialized MetricVec. The concrete value is
|
||||||
|
// returned for embedding into another struct.
|
||||||
|
func newMetricVec(desc *Desc, newMetric func(lvs ...string) Metric) *MetricVec {
|
||||||
|
return &MetricVec{
|
||||||
|
children: map[uint64][]metricWithLabelValues{},
|
||||||
|
desc: desc,
|
||||||
|
newMetric: newMetric,
|
||||||
|
hashAdd: hashAdd,
|
||||||
|
hashAddByte: hashAddByte,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// metricWithLabelValues provides the metric and its label values for
|
||||||
|
// disambiguation on hash collision.
|
||||||
|
type metricWithLabelValues struct {
|
||||||
|
values []string
|
||||||
|
metric Metric
|
||||||
}
|
}
|
||||||
|
|
||||||
// Describe implements Collector. The length of the returned slice
|
// Describe implements Collector. The length of the returned slice
|
||||||
|
@ -42,8 +65,10 @@ func (m *MetricVec) Collect(ch chan<- Metric) {
|
||||||
m.mtx.RLock()
|
m.mtx.RLock()
|
||||||
defer m.mtx.RUnlock()
|
defer m.mtx.RUnlock()
|
||||||
|
|
||||||
for _, metric := range m.children {
|
for _, metrics := range m.children {
|
||||||
ch <- metric
|
for _, metric := range metrics {
|
||||||
|
ch <- metric.metric
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -77,16 +102,7 @@ func (m *MetricVec) GetMetricWithLabelValues(lvs ...string) (Metric, error) {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
m.mtx.RLock()
|
return m.getOrCreateMetricWithLabelValues(h, lvs), nil
|
||||||
metric, ok := m.children[h]
|
|
||||||
m.mtx.RUnlock()
|
|
||||||
if ok {
|
|
||||||
return metric, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
m.mtx.Lock()
|
|
||||||
defer m.mtx.Unlock()
|
|
||||||
return m.getOrCreateMetric(h, lvs...), nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetMetricWith returns the Metric for the given Labels map (the label names
|
// GetMetricWith returns the Metric for the given Labels map (the label names
|
||||||
|
@ -107,20 +123,7 @@ func (m *MetricVec) GetMetricWith(labels Labels) (Metric, error) {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
m.mtx.RLock()
|
return m.getOrCreateMetricWithLabels(h, labels), nil
|
||||||
metric, ok := m.children[h]
|
|
||||||
m.mtx.RUnlock()
|
|
||||||
if ok {
|
|
||||||
return metric, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
lvs := make([]string, len(labels))
|
|
||||||
for i, label := range m.desc.variableLabels {
|
|
||||||
lvs[i] = labels[label]
|
|
||||||
}
|
|
||||||
m.mtx.Lock()
|
|
||||||
defer m.mtx.Unlock()
|
|
||||||
return m.getOrCreateMetric(h, lvs...), nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// WithLabelValues works as GetMetricWithLabelValues, but panics if an error
|
// WithLabelValues works as GetMetricWithLabelValues, but panics if an error
|
||||||
|
@ -168,11 +171,7 @@ func (m *MetricVec) DeleteLabelValues(lvs ...string) bool {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
if _, ok := m.children[h]; !ok {
|
return m.deleteByHashWithLabelValues(h, lvs)
|
||||||
return false
|
|
||||||
}
|
|
||||||
delete(m.children, h)
|
|
||||||
return true
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Delete deletes the metric where the variable labels are the same as those
|
// Delete deletes the metric where the variable labels are the same as those
|
||||||
|
@ -193,10 +192,50 @@ func (m *MetricVec) Delete(labels Labels) bool {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
if _, ok := m.children[h]; !ok {
|
|
||||||
|
return m.deleteByHashWithLabels(h, labels)
|
||||||
|
}
|
||||||
|
|
||||||
|
// deleteByHashWithLabelValues removes the metric from the hash bucket h. If
|
||||||
|
// there are multiple matches in the bucket, use lvs to select a metric and
|
||||||
|
// remove only that metric.
|
||||||
|
func (m *MetricVec) deleteByHashWithLabelValues(h uint64, lvs []string) bool {
|
||||||
|
metrics, ok := m.children[h]
|
||||||
|
if !ok {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
i := m.findMetricWithLabelValues(metrics, lvs)
|
||||||
|
if i >= len(metrics) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(metrics) > 1 {
|
||||||
|
m.children[h] = append(metrics[:i], metrics[i+1:]...)
|
||||||
|
} else {
|
||||||
delete(m.children, h)
|
delete(m.children, h)
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// deleteByHashWithLabels removes the metric from the hash bucket h. If there
|
||||||
|
// are multiple matches in the bucket, use lvs to select a metric and remove
|
||||||
|
// only that metric.
|
||||||
|
func (m *MetricVec) deleteByHashWithLabels(h uint64, labels Labels) bool {
|
||||||
|
metrics, ok := m.children[h]
|
||||||
|
if !ok {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
i := m.findMetricWithLabels(metrics, labels)
|
||||||
|
if i >= len(metrics) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(metrics) > 1 {
|
||||||
|
m.children[h] = append(metrics[:i], metrics[i+1:]...)
|
||||||
|
} else {
|
||||||
|
delete(m.children, h)
|
||||||
|
}
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -216,7 +255,8 @@ func (m *MetricVec) hashLabelValues(vals []string) (uint64, error) {
|
||||||
}
|
}
|
||||||
h := hashNew()
|
h := hashNew()
|
||||||
for _, val := range vals {
|
for _, val := range vals {
|
||||||
h = hashAdd(h, val)
|
h = m.hashAdd(h, val)
|
||||||
|
h = m.hashAddByte(h, model.SeparatorByte)
|
||||||
}
|
}
|
||||||
return h, nil
|
return h, nil
|
||||||
}
|
}
|
||||||
|
@ -231,19 +271,134 @@ func (m *MetricVec) hashLabels(labels Labels) (uint64, error) {
|
||||||
if !ok {
|
if !ok {
|
||||||
return 0, fmt.Errorf("label name %q missing in label map", label)
|
return 0, fmt.Errorf("label name %q missing in label map", label)
|
||||||
}
|
}
|
||||||
h = hashAdd(h, val)
|
h = m.hashAdd(h, val)
|
||||||
|
h = m.hashAddByte(h, model.SeparatorByte)
|
||||||
}
|
}
|
||||||
return h, nil
|
return h, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *MetricVec) getOrCreateMetric(hash uint64, labelValues ...string) Metric {
|
// getOrCreateMetricWithLabelValues retrieves the metric by hash and label value
|
||||||
metric, ok := m.children[hash]
|
// or creates it and returns the new one.
|
||||||
|
//
|
||||||
|
// This function holds the mutex.
|
||||||
|
func (m *MetricVec) getOrCreateMetricWithLabelValues(hash uint64, lvs []string) Metric {
|
||||||
|
m.mtx.RLock()
|
||||||
|
metric, ok := m.getMetricWithLabelValues(hash, lvs)
|
||||||
|
m.mtx.RUnlock()
|
||||||
|
if ok {
|
||||||
|
return metric
|
||||||
|
}
|
||||||
|
|
||||||
|
m.mtx.Lock()
|
||||||
|
defer m.mtx.Unlock()
|
||||||
|
metric, ok = m.getMetricWithLabelValues(hash, lvs)
|
||||||
if !ok {
|
if !ok {
|
||||||
// Copy labelValues. Otherwise, they would be allocated even if we don't go
|
// Copy to avoid allocation in case wo don't go down this code path.
|
||||||
// down this code path.
|
copiedLVs := make([]string, len(lvs))
|
||||||
copiedLabelValues := append(make([]string, 0, len(labelValues)), labelValues...)
|
copy(copiedLVs, lvs)
|
||||||
metric = m.newMetric(copiedLabelValues...)
|
metric = m.newMetric(copiedLVs...)
|
||||||
m.children[hash] = metric
|
m.children[hash] = append(m.children[hash], metricWithLabelValues{values: copiedLVs, metric: metric})
|
||||||
}
|
}
|
||||||
return metric
|
return metric
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// getOrCreateMetricWithLabelValues retrieves the metric by hash and label value
|
||||||
|
// or creates it and returns the new one.
|
||||||
|
//
|
||||||
|
// This function holds the mutex.
|
||||||
|
func (m *MetricVec) getOrCreateMetricWithLabels(hash uint64, labels Labels) Metric {
|
||||||
|
m.mtx.RLock()
|
||||||
|
metric, ok := m.getMetricWithLabels(hash, labels)
|
||||||
|
m.mtx.RUnlock()
|
||||||
|
if ok {
|
||||||
|
return metric
|
||||||
|
}
|
||||||
|
|
||||||
|
m.mtx.Lock()
|
||||||
|
defer m.mtx.Unlock()
|
||||||
|
metric, ok = m.getMetricWithLabels(hash, labels)
|
||||||
|
if !ok {
|
||||||
|
lvs := m.extractLabelValues(labels)
|
||||||
|
metric = m.newMetric(lvs...)
|
||||||
|
m.children[hash] = append(m.children[hash], metricWithLabelValues{values: lvs, metric: metric})
|
||||||
|
}
|
||||||
|
return metric
|
||||||
|
}
|
||||||
|
|
||||||
|
// getMetricWithLabelValues gets a metric while handling possible collisions in
|
||||||
|
// the hash space. Must be called while holding read mutex.
|
||||||
|
func (m *MetricVec) getMetricWithLabelValues(h uint64, lvs []string) (Metric, bool) {
|
||||||
|
metrics, ok := m.children[h]
|
||||||
|
if ok {
|
||||||
|
if i := m.findMetricWithLabelValues(metrics, lvs); i < len(metrics) {
|
||||||
|
return metrics[i].metric, true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil, false
|
||||||
|
}
|
||||||
|
|
||||||
|
// getMetricWithLabels gets a metric while handling possible collisions in
|
||||||
|
// the hash space. Must be called while holding read mutex.
|
||||||
|
func (m *MetricVec) getMetricWithLabels(h uint64, labels Labels) (Metric, bool) {
|
||||||
|
metrics, ok := m.children[h]
|
||||||
|
if ok {
|
||||||
|
if i := m.findMetricWithLabels(metrics, labels); i < len(metrics) {
|
||||||
|
return metrics[i].metric, true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil, false
|
||||||
|
}
|
||||||
|
|
||||||
|
// findMetricWithLabelValues returns the index of the matching metric or
|
||||||
|
// len(metrics) if not found.
|
||||||
|
func (m *MetricVec) findMetricWithLabelValues(metrics []metricWithLabelValues, lvs []string) int {
|
||||||
|
for i, metric := range metrics {
|
||||||
|
if m.matchLabelValues(metric.values, lvs) {
|
||||||
|
return i
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return len(metrics)
|
||||||
|
}
|
||||||
|
|
||||||
|
// findMetricWithLabels returns the index of the matching metric or len(metrics)
|
||||||
|
// if not found.
|
||||||
|
func (m *MetricVec) findMetricWithLabels(metrics []metricWithLabelValues, labels Labels) int {
|
||||||
|
for i, metric := range metrics {
|
||||||
|
if m.matchLabels(metric.values, labels) {
|
||||||
|
return i
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return len(metrics)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *MetricVec) matchLabelValues(values []string, lvs []string) bool {
|
||||||
|
if len(values) != len(lvs) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
for i, v := range values {
|
||||||
|
if v != lvs[i] {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *MetricVec) matchLabels(values []string, labels Labels) bool {
|
||||||
|
if len(labels) != len(values) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
for i, k := range m.desc.variableLabels {
|
||||||
|
if values[i] != labels[k] {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *MetricVec) extractLabelValues(labels Labels) []string {
|
||||||
|
labelValues := make([]string, len(labels))
|
||||||
|
for i, k := range m.desc.variableLabels {
|
||||||
|
labelValues[i] = labels[k]
|
||||||
|
}
|
||||||
|
return labelValues
|
||||||
|
}
|
||||||
|
|
3
vendor/github.com/prometheus/common/expfmt/expfmt.go
generated
vendored
3
vendor/github.com/prometheus/common/expfmt/expfmt.go
generated
vendored
|
@ -29,9 +29,6 @@ const (
|
||||||
FmtProtoDelim Format = ProtoFmt + ` encoding=delimited`
|
FmtProtoDelim Format = ProtoFmt + ` encoding=delimited`
|
||||||
FmtProtoText Format = ProtoFmt + ` encoding=text`
|
FmtProtoText Format = ProtoFmt + ` encoding=text`
|
||||||
FmtProtoCompact Format = ProtoFmt + ` encoding=compact-text`
|
FmtProtoCompact Format = ProtoFmt + ` encoding=compact-text`
|
||||||
|
|
||||||
// fmtJSON2 is hidden as it is deprecated.
|
|
||||||
fmtJSON2 Format = `application/json; version=0.0.2`
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
|
30
vendor/github.com/prometheus/common/expfmt/text_create.go
generated
vendored
30
vendor/github.com/prometheus/common/expfmt/text_create.go
generated
vendored
|
@ -14,7 +14,6 @@
|
||||||
package expfmt
|
package expfmt
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"math"
|
"math"
|
||||||
|
@ -26,9 +25,12 @@ import (
|
||||||
|
|
||||||
// MetricFamilyToText converts a MetricFamily proto message into text format and
|
// MetricFamilyToText converts a MetricFamily proto message into text format and
|
||||||
// writes the resulting lines to 'out'. It returns the number of bytes written
|
// writes the resulting lines to 'out'. It returns the number of bytes written
|
||||||
// and any error encountered. This function does not perform checks on the
|
// and any error encountered. The output will have the same order as the input,
|
||||||
// content of the metric and label names, i.e. invalid metric or label names
|
// no further sorting is performed. Furthermore, this function assumes the input
|
||||||
|
// is already sanitized and does not perform any sanity checks. If the input
|
||||||
|
// contains duplicate metrics or invalid metric or label names, the conversion
|
||||||
// will result in invalid text format output.
|
// will result in invalid text format output.
|
||||||
|
//
|
||||||
// This method fulfills the type 'prometheus.encoder'.
|
// This method fulfills the type 'prometheus.encoder'.
|
||||||
func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (int, error) {
|
func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (int, error) {
|
||||||
var written int
|
var written int
|
||||||
|
@ -285,21 +287,17 @@ func labelPairsToText(
|
||||||
return written, nil
|
return written, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
escape = strings.NewReplacer("\\", `\\`, "\n", `\n`)
|
||||||
|
escapeWithDoubleQuote = strings.NewReplacer("\\", `\\`, "\n", `\n`, "\"", `\"`)
|
||||||
|
)
|
||||||
|
|
||||||
// escapeString replaces '\' by '\\', new line character by '\n', and - if
|
// escapeString replaces '\' by '\\', new line character by '\n', and - if
|
||||||
// includeDoubleQuote is true - '"' by '\"'.
|
// includeDoubleQuote is true - '"' by '\"'.
|
||||||
func escapeString(v string, includeDoubleQuote bool) string {
|
func escapeString(v string, includeDoubleQuote bool) string {
|
||||||
result := bytes.NewBuffer(make([]byte, 0, len(v)))
|
if includeDoubleQuote {
|
||||||
for _, c := range v {
|
return escapeWithDoubleQuote.Replace(v)
|
||||||
switch {
|
|
||||||
case c == '\\':
|
|
||||||
result.WriteString(`\\`)
|
|
||||||
case includeDoubleQuote && c == '"':
|
|
||||||
result.WriteString(`\"`)
|
|
||||||
case c == '\n':
|
|
||||||
result.WriteString(`\n`)
|
|
||||||
default:
|
|
||||||
result.WriteRune(c)
|
|
||||||
}
|
}
|
||||||
}
|
|
||||||
return result.String()
|
return escape.Replace(v)
|
||||||
}
|
}
|
||||||
|
|
2
vendor/github.com/prometheus/common/expfmt/text_parse.go
generated
vendored
2
vendor/github.com/prometheus/common/expfmt/text_parse.go
generated
vendored
|
@ -47,7 +47,7 @@ func (e ParseError) Error() string {
|
||||||
}
|
}
|
||||||
|
|
||||||
// TextParser is used to parse the simple and flat text-based exchange format. Its
|
// TextParser is used to parse the simple and flat text-based exchange format. Its
|
||||||
// nil value is ready to use.
|
// zero value is ready to use.
|
||||||
type TextParser struct {
|
type TextParser struct {
|
||||||
metricFamiliesByName map[string]*dto.MetricFamily
|
metricFamiliesByName map[string]*dto.MetricFamily
|
||||||
buf *bufio.Reader // Where the parsed input is read through.
|
buf *bufio.Reader // Where the parsed input is read through.
|
||||||
|
|
89
vendor/github.com/prometheus/common/log/eventlog_formatter.go
generated
vendored
Normal file
89
vendor/github.com/prometheus/common/log/eventlog_formatter.go
generated
vendored
Normal file
|
@ -0,0 +1,89 @@
|
||||||
|
// Copyright 2015 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
// +build windows
|
||||||
|
|
||||||
|
package log
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"golang.org/x/sys/windows/svc/eventlog"
|
||||||
|
|
||||||
|
"github.com/Sirupsen/logrus"
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
setEventlogFormatter = func(name string, debugAsInfo bool) error {
|
||||||
|
if name == "" {
|
||||||
|
return fmt.Errorf("missing name parameter")
|
||||||
|
}
|
||||||
|
|
||||||
|
fmter, err := newEventlogger(name, debugAsInfo, origLogger.Formatter)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Fprintf(os.Stderr, "error creating eventlog formatter: %v\n", err)
|
||||||
|
origLogger.Errorf("can't connect logger to eventlog: %v", err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
origLogger.Formatter = fmter
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type eventlogger struct {
|
||||||
|
log *eventlog.Log
|
||||||
|
debugAsInfo bool
|
||||||
|
wrap logrus.Formatter
|
||||||
|
}
|
||||||
|
|
||||||
|
func newEventlogger(name string, debugAsInfo bool, fmter logrus.Formatter) (*eventlogger, error) {
|
||||||
|
logHandle, err := eventlog.Open(name)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &eventlogger{log: logHandle, debugAsInfo: debugAsInfo, wrap: fmter}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *eventlogger) Format(e *logrus.Entry) ([]byte, error) {
|
||||||
|
data, err := s.wrap.Format(e)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Fprintf(os.Stderr, "eventlogger: can't format entry: %v\n", err)
|
||||||
|
return data, err
|
||||||
|
}
|
||||||
|
|
||||||
|
switch e.Level {
|
||||||
|
case logrus.PanicLevel:
|
||||||
|
fallthrough
|
||||||
|
case logrus.FatalLevel:
|
||||||
|
fallthrough
|
||||||
|
case logrus.ErrorLevel:
|
||||||
|
err = s.log.Error(102, e.Message)
|
||||||
|
case logrus.WarnLevel:
|
||||||
|
err = s.log.Warning(101, e.Message)
|
||||||
|
case logrus.InfoLevel:
|
||||||
|
err = s.log.Info(100, e.Message)
|
||||||
|
case logrus.DebugLevel:
|
||||||
|
if s.debugAsInfo {
|
||||||
|
err = s.log.Info(100, e.Message)
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
err = s.log.Info(100, e.Message)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
fmt.Fprintf(os.Stderr, "eventlogger: can't send log to eventlog: %v\n", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return data, err
|
||||||
|
}
|
69
vendor/github.com/prometheus/common/log/log.go
generated
vendored
69
vendor/github.com/prometheus/common/log/log.go
generated
vendored
|
@ -16,20 +16,23 @@ package log
|
||||||
import (
|
import (
|
||||||
"flag"
|
"flag"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
"log"
|
"log"
|
||||||
"net/url"
|
"net/url"
|
||||||
"os"
|
"os"
|
||||||
"runtime"
|
"runtime"
|
||||||
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/Sirupsen/logrus"
|
"github.com/Sirupsen/logrus"
|
||||||
)
|
)
|
||||||
|
|
||||||
type levelFlag struct{}
|
type levelFlag string
|
||||||
|
|
||||||
// String implements flag.Value.
|
// String implements flag.Value.
|
||||||
func (f levelFlag) String() string {
|
func (f levelFlag) String() string {
|
||||||
return origLogger.Level.String()
|
return fmt.Sprintf("%q", string(f))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Set implements flag.Value.
|
// Set implements flag.Value.
|
||||||
|
@ -45,20 +48,23 @@ func (f levelFlag) Set(level string) error {
|
||||||
// setSyslogFormatter is nil if the target architecture does not support syslog.
|
// setSyslogFormatter is nil if the target architecture does not support syslog.
|
||||||
var setSyslogFormatter func(string, string) error
|
var setSyslogFormatter func(string, string) error
|
||||||
|
|
||||||
|
// setEventlogFormatter is nil if the target OS does not support Eventlog (i.e., is not Windows).
|
||||||
|
var setEventlogFormatter func(string, bool) error
|
||||||
|
|
||||||
func setJSONFormatter() {
|
func setJSONFormatter() {
|
||||||
origLogger.Formatter = &logrus.JSONFormatter{}
|
origLogger.Formatter = &logrus.JSONFormatter{}
|
||||||
}
|
}
|
||||||
|
|
||||||
type logFormatFlag struct{ uri string }
|
type logFormatFlag url.URL
|
||||||
|
|
||||||
// String implements flag.Value.
|
// String implements flag.Value.
|
||||||
func (f logFormatFlag) String() string {
|
func (f logFormatFlag) String() string {
|
||||||
return f.uri
|
u := url.URL(f)
|
||||||
|
return fmt.Sprintf("%q", u.String())
|
||||||
}
|
}
|
||||||
|
|
||||||
// Set implements flag.Value.
|
// Set implements flag.Value.
|
||||||
func (f logFormatFlag) Set(format string) error {
|
func (f logFormatFlag) Set(format string) error {
|
||||||
f.uri = format
|
|
||||||
u, err := url.Parse(format)
|
u, err := url.Parse(format)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -79,13 +85,23 @@ func (f logFormatFlag) Set(format string) error {
|
||||||
appname := u.Query().Get("appname")
|
appname := u.Query().Get("appname")
|
||||||
facility := u.Query().Get("local")
|
facility := u.Query().Get("local")
|
||||||
return setSyslogFormatter(appname, facility)
|
return setSyslogFormatter(appname, facility)
|
||||||
|
case "eventlog":
|
||||||
|
if setEventlogFormatter == nil {
|
||||||
|
return fmt.Errorf("system does not support eventlog")
|
||||||
|
}
|
||||||
|
name := u.Query().Get("name")
|
||||||
|
debugAsInfo := false
|
||||||
|
debugAsInfoRaw := u.Query().Get("debugAsInfo")
|
||||||
|
if parsedDebugAsInfo, err := strconv.ParseBool(debugAsInfoRaw); err == nil {
|
||||||
|
debugAsInfo = parsedDebugAsInfo
|
||||||
|
}
|
||||||
|
return setEventlogFormatter(name, debugAsInfo)
|
||||||
case "stdout":
|
case "stdout":
|
||||||
origLogger.Out = os.Stdout
|
origLogger.Out = os.Stdout
|
||||||
case "stderr":
|
case "stderr":
|
||||||
origLogger.Out = os.Stderr
|
origLogger.Out = os.Stderr
|
||||||
|
|
||||||
default:
|
default:
|
||||||
return fmt.Errorf("unsupported logger %s", u.Opaque)
|
return fmt.Errorf("unsupported logger %q", u.Opaque)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -99,10 +115,19 @@ func init() {
|
||||||
// adds the flags to flag.CommandLine anyway. Thus, it's usually enough to call
|
// adds the flags to flag.CommandLine anyway. Thus, it's usually enough to call
|
||||||
// flag.Parse() to make the logging flags take effect.
|
// flag.Parse() to make the logging flags take effect.
|
||||||
func AddFlags(fs *flag.FlagSet) {
|
func AddFlags(fs *flag.FlagSet) {
|
||||||
fs.Var(levelFlag{}, "log.level", "Only log messages with the given severity or above. Valid levels: [debug, info, warn, error, fatal].")
|
fs.Var(
|
||||||
fs.Var(logFormatFlag{}, "log.format", "If set use a syslog logger or JSON logging. Example: logger:syslog?appname=bob&local=7 or logger:stdout?json=true. Defaults to stderr.")
|
levelFlag(origLogger.Level.String()),
|
||||||
|
"log.level",
|
||||||
|
"Only log messages with the given severity or above. Valid levels: [debug, info, warn, error, fatal]",
|
||||||
|
)
|
||||||
|
fs.Var(
|
||||||
|
logFormatFlag(url.URL{Scheme: "logger", Opaque: "stderr"}),
|
||||||
|
"log.format",
|
||||||
|
`Set the log target and format. Example: "logger:syslog?appname=bob&local=7" or "logger:stdout?json=true"`,
|
||||||
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Logger is the interface for loggers used in the Prometheus components.
|
||||||
type Logger interface {
|
type Logger interface {
|
||||||
Debug(...interface{})
|
Debug(...interface{})
|
||||||
Debugln(...interface{})
|
Debugln(...interface{})
|
||||||
|
@ -227,10 +252,26 @@ func (l logger) sourced() *logrus.Entry {
|
||||||
var origLogger = logrus.New()
|
var origLogger = logrus.New()
|
||||||
var baseLogger = logger{entry: logrus.NewEntry(origLogger)}
|
var baseLogger = logger{entry: logrus.NewEntry(origLogger)}
|
||||||
|
|
||||||
|
// Base returns the default Logger logging to
|
||||||
func Base() Logger {
|
func Base() Logger {
|
||||||
return baseLogger
|
return baseLogger
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// NewLogger returns a new Logger logging to out.
|
||||||
|
func NewLogger(w io.Writer) Logger {
|
||||||
|
l := logrus.New()
|
||||||
|
l.Out = w
|
||||||
|
return logger{entry: logrus.NewEntry(l)}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewNopLogger returns a logger that discards all log messages.
|
||||||
|
func NewNopLogger() Logger {
|
||||||
|
l := logrus.New()
|
||||||
|
l.Out = ioutil.Discard
|
||||||
|
return logger{entry: logrus.NewEntry(l)}
|
||||||
|
}
|
||||||
|
|
||||||
|
// With adds a field to the logger.
|
||||||
func With(key string, value interface{}) Logger {
|
func With(key string, value interface{}) Logger {
|
||||||
return baseLogger.With(key, value)
|
return baseLogger.With(key, value)
|
||||||
}
|
}
|
||||||
|
@ -240,7 +281,7 @@ func Debug(args ...interface{}) {
|
||||||
baseLogger.sourced().Debug(args...)
|
baseLogger.sourced().Debug(args...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Debug logs a message at level Debug on the standard logger.
|
// Debugln logs a message at level Debug on the standard logger.
|
||||||
func Debugln(args ...interface{}) {
|
func Debugln(args ...interface{}) {
|
||||||
baseLogger.sourced().Debugln(args...)
|
baseLogger.sourced().Debugln(args...)
|
||||||
}
|
}
|
||||||
|
@ -255,7 +296,7 @@ func Info(args ...interface{}) {
|
||||||
baseLogger.sourced().Info(args...)
|
baseLogger.sourced().Info(args...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Info logs a message at level Info on the standard logger.
|
// Infoln logs a message at level Info on the standard logger.
|
||||||
func Infoln(args ...interface{}) {
|
func Infoln(args ...interface{}) {
|
||||||
baseLogger.sourced().Infoln(args...)
|
baseLogger.sourced().Infoln(args...)
|
||||||
}
|
}
|
||||||
|
@ -270,7 +311,7 @@ func Warn(args ...interface{}) {
|
||||||
baseLogger.sourced().Warn(args...)
|
baseLogger.sourced().Warn(args...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Warn logs a message at level Warn on the standard logger.
|
// Warnln logs a message at level Warn on the standard logger.
|
||||||
func Warnln(args ...interface{}) {
|
func Warnln(args ...interface{}) {
|
||||||
baseLogger.sourced().Warnln(args...)
|
baseLogger.sourced().Warnln(args...)
|
||||||
}
|
}
|
||||||
|
@ -285,7 +326,7 @@ func Error(args ...interface{}) {
|
||||||
baseLogger.sourced().Error(args...)
|
baseLogger.sourced().Error(args...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Error logs a message at level Error on the standard logger.
|
// Errorln logs a message at level Error on the standard logger.
|
||||||
func Errorln(args ...interface{}) {
|
func Errorln(args ...interface{}) {
|
||||||
baseLogger.sourced().Errorln(args...)
|
baseLogger.sourced().Errorln(args...)
|
||||||
}
|
}
|
||||||
|
@ -300,7 +341,7 @@ func Fatal(args ...interface{}) {
|
||||||
baseLogger.sourced().Fatal(args...)
|
baseLogger.sourced().Fatal(args...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Fatal logs a message at level Fatal on the standard logger.
|
// Fatalln logs a message at level Fatal on the standard logger.
|
||||||
func Fatalln(args ...interface{}) {
|
func Fatalln(args ...interface{}) {
|
||||||
baseLogger.sourced().Fatalln(args...)
|
baseLogger.sourced().Fatalln(args...)
|
||||||
}
|
}
|
||||||
|
|
343
vendor/github.com/syndtr/goleveldb/leveldb/batch.go
generated
vendored
343
vendor/github.com/syndtr/goleveldb/leveldb/batch.go
generated
vendored
|
@ -9,13 +9,15 @@ package leveldb
|
||||||
import (
|
import (
|
||||||
"encoding/binary"
|
"encoding/binary"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"io"
|
||||||
|
|
||||||
"github.com/syndtr/goleveldb/leveldb/errors"
|
"github.com/syndtr/goleveldb/leveldb/errors"
|
||||||
"github.com/syndtr/goleveldb/leveldb/memdb"
|
"github.com/syndtr/goleveldb/leveldb/memdb"
|
||||||
"github.com/syndtr/goleveldb/leveldb/storage"
|
"github.com/syndtr/goleveldb/leveldb/storage"
|
||||||
)
|
)
|
||||||
|
|
||||||
// ErrBatchCorrupted records reason of batch corruption.
|
// ErrBatchCorrupted records reason of batch corruption. This error will be
|
||||||
|
// wrapped with errors.ErrCorrupted.
|
||||||
type ErrBatchCorrupted struct {
|
type ErrBatchCorrupted struct {
|
||||||
Reason string
|
Reason string
|
||||||
}
|
}
|
||||||
|
@ -29,8 +31,9 @@ func newErrBatchCorrupted(reason string) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
const (
|
const (
|
||||||
batchHdrLen = 8 + 4
|
batchHeaderLen = 8 + 4
|
||||||
batchGrowRec = 3000
|
batchGrowRec = 3000
|
||||||
|
batchBufioSize = 16
|
||||||
)
|
)
|
||||||
|
|
||||||
// BatchReplay wraps basic batch operations.
|
// BatchReplay wraps basic batch operations.
|
||||||
|
@ -39,34 +42,46 @@ type BatchReplay interface {
|
||||||
Delete(key []byte)
|
Delete(key []byte)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type batchIndex struct {
|
||||||
|
keyType keyType
|
||||||
|
keyPos, keyLen int
|
||||||
|
valuePos, valueLen int
|
||||||
|
}
|
||||||
|
|
||||||
|
func (index batchIndex) k(data []byte) []byte {
|
||||||
|
return data[index.keyPos : index.keyPos+index.keyLen]
|
||||||
|
}
|
||||||
|
|
||||||
|
func (index batchIndex) v(data []byte) []byte {
|
||||||
|
if index.valueLen != 0 {
|
||||||
|
return data[index.valuePos : index.valuePos+index.valueLen]
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (index batchIndex) kv(data []byte) (key, value []byte) {
|
||||||
|
return index.k(data), index.v(data)
|
||||||
|
}
|
||||||
|
|
||||||
// Batch is a write batch.
|
// Batch is a write batch.
|
||||||
type Batch struct {
|
type Batch struct {
|
||||||
data []byte
|
data []byte
|
||||||
rLen, bLen int
|
index []batchIndex
|
||||||
seq uint64
|
|
||||||
sync bool
|
// internalLen is sums of key/value pair length plus 8-bytes internal key.
|
||||||
|
internalLen int
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *Batch) grow(n int) {
|
func (b *Batch) grow(n int) {
|
||||||
off := len(b.data)
|
o := len(b.data)
|
||||||
if off == 0 {
|
if cap(b.data)-o < n {
|
||||||
off = batchHdrLen
|
|
||||||
if b.data != nil {
|
|
||||||
b.data = b.data[:off]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if cap(b.data)-off < n {
|
|
||||||
if b.data == nil {
|
|
||||||
b.data = make([]byte, off, off+n)
|
|
||||||
} else {
|
|
||||||
odata := b.data
|
|
||||||
div := 1
|
div := 1
|
||||||
if b.rLen > batchGrowRec {
|
if len(b.index) > batchGrowRec {
|
||||||
div = b.rLen / batchGrowRec
|
div = len(b.index) / batchGrowRec
|
||||||
}
|
|
||||||
b.data = make([]byte, off, off+n+(off-batchHdrLen)/div)
|
|
||||||
copy(b.data, odata)
|
|
||||||
}
|
}
|
||||||
|
ndata := make([]byte, o, o+n+o/div)
|
||||||
|
copy(ndata, b.data)
|
||||||
|
b.data = ndata
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -76,32 +91,36 @@ func (b *Batch) appendRec(kt keyType, key, value []byte) {
|
||||||
n += binary.MaxVarintLen32 + len(value)
|
n += binary.MaxVarintLen32 + len(value)
|
||||||
}
|
}
|
||||||
b.grow(n)
|
b.grow(n)
|
||||||
off := len(b.data)
|
index := batchIndex{keyType: kt}
|
||||||
data := b.data[:off+n]
|
o := len(b.data)
|
||||||
data[off] = byte(kt)
|
data := b.data[:o+n]
|
||||||
off++
|
data[o] = byte(kt)
|
||||||
off += binary.PutUvarint(data[off:], uint64(len(key)))
|
o++
|
||||||
copy(data[off:], key)
|
o += binary.PutUvarint(data[o:], uint64(len(key)))
|
||||||
off += len(key)
|
index.keyPos = o
|
||||||
|
index.keyLen = len(key)
|
||||||
|
o += copy(data[o:], key)
|
||||||
if kt == keyTypeVal {
|
if kt == keyTypeVal {
|
||||||
off += binary.PutUvarint(data[off:], uint64(len(value)))
|
o += binary.PutUvarint(data[o:], uint64(len(value)))
|
||||||
copy(data[off:], value)
|
index.valuePos = o
|
||||||
off += len(value)
|
index.valueLen = len(value)
|
||||||
|
o += copy(data[o:], value)
|
||||||
}
|
}
|
||||||
b.data = data[:off]
|
b.data = data[:o]
|
||||||
b.rLen++
|
b.index = append(b.index, index)
|
||||||
// Include 8-byte ikey header
|
b.internalLen += index.keyLen + index.valueLen + 8
|
||||||
b.bLen += len(key) + len(value) + 8
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Put appends 'put operation' of the given key/value pair to the batch.
|
// Put appends 'put operation' of the given key/value pair to the batch.
|
||||||
// It is safe to modify the contents of the argument after Put returns.
|
// It is safe to modify the contents of the argument after Put returns but not
|
||||||
|
// before.
|
||||||
func (b *Batch) Put(key, value []byte) {
|
func (b *Batch) Put(key, value []byte) {
|
||||||
b.appendRec(keyTypeVal, key, value)
|
b.appendRec(keyTypeVal, key, value)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Delete appends 'delete operation' of the given key to the batch.
|
// Delete appends 'delete operation' of the given key to the batch.
|
||||||
// It is safe to modify the contents of the argument after Delete returns.
|
// It is safe to modify the contents of the argument after Delete returns but
|
||||||
|
// not before.
|
||||||
func (b *Batch) Delete(key []byte) {
|
func (b *Batch) Delete(key []byte) {
|
||||||
b.appendRec(keyTypeDel, key, nil)
|
b.appendRec(keyTypeDel, key, nil)
|
||||||
}
|
}
|
||||||
|
@ -111,7 +130,7 @@ func (b *Batch) Delete(key []byte) {
|
||||||
// The returned slice is not its own copy, so the contents should not be
|
// The returned slice is not its own copy, so the contents should not be
|
||||||
// modified.
|
// modified.
|
||||||
func (b *Batch) Dump() []byte {
|
func (b *Batch) Dump() []byte {
|
||||||
return b.encode()
|
return b.data
|
||||||
}
|
}
|
||||||
|
|
||||||
// Load loads given slice into the batch. Previous contents of the batch
|
// Load loads given slice into the batch. Previous contents of the batch
|
||||||
|
@ -119,144 +138,212 @@ func (b *Batch) Dump() []byte {
|
||||||
// The given slice will not be copied and will be used as batch buffer, so
|
// The given slice will not be copied and will be used as batch buffer, so
|
||||||
// it is not safe to modify the contents of the slice.
|
// it is not safe to modify the contents of the slice.
|
||||||
func (b *Batch) Load(data []byte) error {
|
func (b *Batch) Load(data []byte) error {
|
||||||
return b.decode(0, data)
|
return b.decode(data, -1)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Replay replays batch contents.
|
// Replay replays batch contents.
|
||||||
func (b *Batch) Replay(r BatchReplay) error {
|
func (b *Batch) Replay(r BatchReplay) error {
|
||||||
return b.decodeRec(func(i int, kt keyType, key, value []byte) error {
|
for _, index := range b.index {
|
||||||
switch kt {
|
switch index.keyType {
|
||||||
case keyTypeVal:
|
case keyTypeVal:
|
||||||
r.Put(key, value)
|
r.Put(index.k(b.data), index.v(b.data))
|
||||||
case keyTypeDel:
|
case keyTypeDel:
|
||||||
r.Delete(key)
|
r.Delete(index.k(b.data))
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
})
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Len returns number of records in the batch.
|
// Len returns number of records in the batch.
|
||||||
func (b *Batch) Len() int {
|
func (b *Batch) Len() int {
|
||||||
return b.rLen
|
return len(b.index)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Reset resets the batch.
|
// Reset resets the batch.
|
||||||
func (b *Batch) Reset() {
|
func (b *Batch) Reset() {
|
||||||
b.data = b.data[:0]
|
b.data = b.data[:0]
|
||||||
b.seq = 0
|
b.index = b.index[:0]
|
||||||
b.rLen = 0
|
b.internalLen = 0
|
||||||
b.bLen = 0
|
|
||||||
b.sync = false
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *Batch) init(sync bool) {
|
func (b *Batch) replayInternal(fn func(i int, kt keyType, k, v []byte) error) error {
|
||||||
b.sync = sync
|
for i, index := range b.index {
|
||||||
|
if err := fn(i, index.keyType, index.k(b.data), index.v(b.data)); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *Batch) append(p *Batch) {
|
func (b *Batch) append(p *Batch) {
|
||||||
if p.rLen > 0 {
|
ob := len(b.data)
|
||||||
b.grow(len(p.data) - batchHdrLen)
|
oi := len(b.index)
|
||||||
b.data = append(b.data, p.data[batchHdrLen:]...)
|
b.data = append(b.data, p.data...)
|
||||||
b.rLen += p.rLen
|
b.index = append(b.index, p.index...)
|
||||||
b.bLen += p.bLen
|
b.internalLen += p.internalLen
|
||||||
|
|
||||||
|
// Updating index offset.
|
||||||
|
if ob != 0 {
|
||||||
|
for ; oi < len(b.index); oi++ {
|
||||||
|
index := &b.index[oi]
|
||||||
|
index.keyPos += ob
|
||||||
|
if index.valueLen != 0 {
|
||||||
|
index.valuePos += ob
|
||||||
|
}
|
||||||
}
|
}
|
||||||
if p.sync {
|
|
||||||
b.sync = true
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// size returns sums of key/value pair length plus 8-bytes ikey.
|
func (b *Batch) decode(data []byte, expectedLen int) error {
|
||||||
func (b *Batch) size() int {
|
|
||||||
return b.bLen
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *Batch) encode() []byte {
|
|
||||||
b.grow(0)
|
|
||||||
binary.LittleEndian.PutUint64(b.data, b.seq)
|
|
||||||
binary.LittleEndian.PutUint32(b.data[8:], uint32(b.rLen))
|
|
||||||
|
|
||||||
return b.data
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *Batch) decode(prevSeq uint64, data []byte) error {
|
|
||||||
if len(data) < batchHdrLen {
|
|
||||||
return newErrBatchCorrupted("too short")
|
|
||||||
}
|
|
||||||
|
|
||||||
b.seq = binary.LittleEndian.Uint64(data)
|
|
||||||
if b.seq < prevSeq {
|
|
||||||
return newErrBatchCorrupted("invalid sequence number")
|
|
||||||
}
|
|
||||||
b.rLen = int(binary.LittleEndian.Uint32(data[8:]))
|
|
||||||
if b.rLen < 0 {
|
|
||||||
return newErrBatchCorrupted("invalid records length")
|
|
||||||
}
|
|
||||||
// No need to be precise at this point, it won't be used anyway
|
|
||||||
b.bLen = len(data) - batchHdrLen
|
|
||||||
b.data = data
|
b.data = data
|
||||||
|
b.index = b.index[:0]
|
||||||
|
b.internalLen = 0
|
||||||
|
err := decodeBatch(data, func(i int, index batchIndex) error {
|
||||||
|
b.index = append(b.index, index)
|
||||||
|
b.internalLen += index.keyLen + index.valueLen + 8
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if expectedLen >= 0 && len(b.index) != expectedLen {
|
||||||
|
return newErrBatchCorrupted(fmt.Sprintf("invalid records length: %d vs %d", expectedLen, len(b.index)))
|
||||||
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *Batch) decodeRec(f func(i int, kt keyType, key, value []byte) error) error {
|
func (b *Batch) putMem(seq uint64, mdb *memdb.DB) error {
|
||||||
off := batchHdrLen
|
var ik []byte
|
||||||
for i := 0; i < b.rLen; i++ {
|
for i, index := range b.index {
|
||||||
if off >= len(b.data) {
|
ik = makeInternalKey(ik, index.k(b.data), seq+uint64(i), index.keyType)
|
||||||
return newErrBatchCorrupted("invalid records length")
|
if err := mdb.Put(ik, index.v(b.data)); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
kt := keyType(b.data[off])
|
func (b *Batch) revertMem(seq uint64, mdb *memdb.DB) error {
|
||||||
if kt > keyTypeVal {
|
var ik []byte
|
||||||
panic(kt)
|
for i, index := range b.index {
|
||||||
return newErrBatchCorrupted("bad record: invalid type")
|
ik = makeInternalKey(ik, index.k(b.data), seq+uint64(i), index.keyType)
|
||||||
|
if err := mdb.Delete(ik); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
off++
|
|
||||||
|
|
||||||
x, n := binary.Uvarint(b.data[off:])
|
func newBatch() interface{} {
|
||||||
off += n
|
return &Batch{}
|
||||||
if n <= 0 || off+int(x) > len(b.data) {
|
}
|
||||||
|
|
||||||
|
func decodeBatch(data []byte, fn func(i int, index batchIndex) error) error {
|
||||||
|
var index batchIndex
|
||||||
|
for i, o := 0, 0; o < len(data); i++ {
|
||||||
|
// Key type.
|
||||||
|
index.keyType = keyType(data[o])
|
||||||
|
if index.keyType > keyTypeVal {
|
||||||
|
return newErrBatchCorrupted(fmt.Sprintf("bad record: invalid type %#x", uint(index.keyType)))
|
||||||
|
}
|
||||||
|
o++
|
||||||
|
|
||||||
|
// Key.
|
||||||
|
x, n := binary.Uvarint(data[o:])
|
||||||
|
o += n
|
||||||
|
if n <= 0 || o+int(x) > len(data) {
|
||||||
return newErrBatchCorrupted("bad record: invalid key length")
|
return newErrBatchCorrupted("bad record: invalid key length")
|
||||||
}
|
}
|
||||||
key := b.data[off : off+int(x)]
|
index.keyPos = o
|
||||||
off += int(x)
|
index.keyLen = int(x)
|
||||||
var value []byte
|
o += index.keyLen
|
||||||
if kt == keyTypeVal {
|
|
||||||
x, n := binary.Uvarint(b.data[off:])
|
// Value.
|
||||||
off += n
|
if index.keyType == keyTypeVal {
|
||||||
if n <= 0 || off+int(x) > len(b.data) {
|
x, n = binary.Uvarint(data[o:])
|
||||||
|
o += n
|
||||||
|
if n <= 0 || o+int(x) > len(data) {
|
||||||
return newErrBatchCorrupted("bad record: invalid value length")
|
return newErrBatchCorrupted("bad record: invalid value length")
|
||||||
}
|
}
|
||||||
value = b.data[off : off+int(x)]
|
index.valuePos = o
|
||||||
off += int(x)
|
index.valueLen = int(x)
|
||||||
|
o += index.valueLen
|
||||||
|
} else {
|
||||||
|
index.valuePos = 0
|
||||||
|
index.valueLen = 0
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := f(i, kt, key, value); err != nil {
|
if err := fn(i, index); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *Batch) memReplay(to *memdb.DB) error {
|
func decodeBatchToMem(data []byte, expectSeq uint64, mdb *memdb.DB) (seq uint64, batchLen int, err error) {
|
||||||
var ikScratch []byte
|
seq, batchLen, err = decodeBatchHeader(data)
|
||||||
return b.decodeRec(func(i int, kt keyType, key, value []byte) error {
|
if err != nil {
|
||||||
ikScratch = makeInternalKey(ikScratch, key, b.seq+uint64(i), kt)
|
return 0, 0, err
|
||||||
return to.Put(ikScratch, value)
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
|
if seq < expectSeq {
|
||||||
func (b *Batch) memDecodeAndReplay(prevSeq uint64, data []byte, to *memdb.DB) error {
|
return 0, 0, newErrBatchCorrupted("invalid sequence number")
|
||||||
if err := b.decode(prevSeq, data); err != nil {
|
}
|
||||||
|
data = data[batchHeaderLen:]
|
||||||
|
var ik []byte
|
||||||
|
var decodedLen int
|
||||||
|
err = decodeBatch(data, func(i int, index batchIndex) error {
|
||||||
|
if i >= batchLen {
|
||||||
|
return newErrBatchCorrupted("invalid records length")
|
||||||
|
}
|
||||||
|
ik = makeInternalKey(ik, index.k(data), seq+uint64(i), index.keyType)
|
||||||
|
if err := mdb.Put(ik, index.v(data)); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
return b.memReplay(to)
|
decodedLen++
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
if err == nil && decodedLen != batchLen {
|
||||||
|
err = newErrBatchCorrupted(fmt.Sprintf("invalid records length: %d vs %d", batchLen, decodedLen))
|
||||||
|
}
|
||||||
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *Batch) revertMemReplay(to *memdb.DB) error {
|
func encodeBatchHeader(dst []byte, seq uint64, batchLen int) []byte {
|
||||||
var ikScratch []byte
|
dst = ensureBuffer(dst, batchHeaderLen)
|
||||||
return b.decodeRec(func(i int, kt keyType, key, value []byte) error {
|
binary.LittleEndian.PutUint64(dst, seq)
|
||||||
ikScratch := makeInternalKey(ikScratch, key, b.seq+uint64(i), kt)
|
binary.LittleEndian.PutUint32(dst[8:], uint32(batchLen))
|
||||||
return to.Delete(ikScratch)
|
return dst
|
||||||
})
|
}
|
||||||
|
|
||||||
|
func decodeBatchHeader(data []byte) (seq uint64, batchLen int, err error) {
|
||||||
|
if len(data) < batchHeaderLen {
|
||||||
|
return 0, 0, newErrBatchCorrupted("too short")
|
||||||
|
}
|
||||||
|
|
||||||
|
seq = binary.LittleEndian.Uint64(data)
|
||||||
|
batchLen = int(binary.LittleEndian.Uint32(data[8:]))
|
||||||
|
if batchLen < 0 {
|
||||||
|
return 0, 0, newErrBatchCorrupted("invalid records length")
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func batchesLen(batches []*Batch) int {
|
||||||
|
batchLen := 0
|
||||||
|
for _, batch := range batches {
|
||||||
|
batchLen += batch.Len()
|
||||||
|
}
|
||||||
|
return batchLen
|
||||||
|
}
|
||||||
|
|
||||||
|
func writeBatchesWithHeader(wr io.Writer, batches []*Batch, seq uint64) error {
|
||||||
|
if _, err := wr.Write(encodeBatchHeader(nil, seq, batchesLen(batches))); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for _, batch := range batches {
|
||||||
|
if _, err := wr.Write(batch.data); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
37
vendor/github.com/syndtr/goleveldb/leveldb/cache/cache.go
generated
vendored
37
vendor/github.com/syndtr/goleveldb/leveldb/cache/cache.go
generated
vendored
|
@ -16,7 +16,7 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
// Cacher provides interface to implements a caching functionality.
|
// Cacher provides interface to implements a caching functionality.
|
||||||
// An implementation must be goroutine-safe.
|
// An implementation must be safe for concurrent use.
|
||||||
type Cacher interface {
|
type Cacher interface {
|
||||||
// Capacity returns cache capacity.
|
// Capacity returns cache capacity.
|
||||||
Capacity() int
|
Capacity() int
|
||||||
|
@ -511,18 +511,12 @@ func (r *Cache) EvictAll() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Close closes the 'cache map' and releases all 'cache node'.
|
// Close closes the 'cache map' and forcefully releases all 'cache node'.
|
||||||
func (r *Cache) Close() error {
|
func (r *Cache) Close() error {
|
||||||
r.mu.Lock()
|
r.mu.Lock()
|
||||||
if !r.closed {
|
if !r.closed {
|
||||||
r.closed = true
|
r.closed = true
|
||||||
|
|
||||||
if r.cacher != nil {
|
|
||||||
if err := r.cacher.Close(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
h := (*mNode)(r.mHead)
|
h := (*mNode)(r.mHead)
|
||||||
h.initBuckets()
|
h.initBuckets()
|
||||||
|
|
||||||
|
@ -541,10 +535,37 @@ func (r *Cache) Close() error {
|
||||||
for _, f := range n.onDel {
|
for _, f := range n.onDel {
|
||||||
f()
|
f()
|
||||||
}
|
}
|
||||||
|
n.onDel = nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
r.mu.Unlock()
|
r.mu.Unlock()
|
||||||
|
|
||||||
|
// Avoid deadlock.
|
||||||
|
if r.cacher != nil {
|
||||||
|
if err := r.cacher.Close(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// CloseWeak closes the 'cache map' and evict all 'cache node' from cacher, but
|
||||||
|
// unlike Close it doesn't forcefully releases 'cache node'.
|
||||||
|
func (r *Cache) CloseWeak() error {
|
||||||
|
r.mu.Lock()
|
||||||
|
if !r.closed {
|
||||||
|
r.closed = true
|
||||||
|
}
|
||||||
|
r.mu.Unlock()
|
||||||
|
|
||||||
|
// Avoid deadlock.
|
||||||
|
if r.cacher != nil {
|
||||||
|
r.cacher.EvictAll()
|
||||||
|
if err := r.cacher.Close(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
40
vendor/github.com/syndtr/goleveldb/leveldb/comparer.go
generated
vendored
40
vendor/github.com/syndtr/goleveldb/leveldb/comparer.go
generated
vendored
|
@ -6,7 +6,9 @@
|
||||||
|
|
||||||
package leveldb
|
package leveldb
|
||||||
|
|
||||||
import "github.com/syndtr/goleveldb/leveldb/comparer"
|
import (
|
||||||
|
"github.com/syndtr/goleveldb/leveldb/comparer"
|
||||||
|
)
|
||||||
|
|
||||||
type iComparer struct {
|
type iComparer struct {
|
||||||
ucmp comparer.Comparer
|
ucmp comparer.Comparer
|
||||||
|
@ -33,12 +35,12 @@ func (icmp *iComparer) Name() string {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (icmp *iComparer) Compare(a, b []byte) int {
|
func (icmp *iComparer) Compare(a, b []byte) int {
|
||||||
x := icmp.ucmp.Compare(internalKey(a).ukey(), internalKey(b).ukey())
|
x := icmp.uCompare(internalKey(a).ukey(), internalKey(b).ukey())
|
||||||
if x == 0 {
|
if x == 0 {
|
||||||
if m, n := internalKey(a).num(), internalKey(b).num(); m > n {
|
if m, n := internalKey(a).num(), internalKey(b).num(); m > n {
|
||||||
x = -1
|
return -1
|
||||||
} else if m < n {
|
} else if m < n {
|
||||||
x = 1
|
return 1
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return x
|
return x
|
||||||
|
@ -46,30 +48,20 @@ func (icmp *iComparer) Compare(a, b []byte) int {
|
||||||
|
|
||||||
func (icmp *iComparer) Separator(dst, a, b []byte) []byte {
|
func (icmp *iComparer) Separator(dst, a, b []byte) []byte {
|
||||||
ua, ub := internalKey(a).ukey(), internalKey(b).ukey()
|
ua, ub := internalKey(a).ukey(), internalKey(b).ukey()
|
||||||
dst = icmp.ucmp.Separator(dst, ua, ub)
|
dst = icmp.uSeparator(dst, ua, ub)
|
||||||
if dst == nil {
|
if dst != nil && len(dst) < len(ua) && icmp.uCompare(ua, dst) < 0 {
|
||||||
|
// Append earliest possible number.
|
||||||
|
return append(dst, keyMaxNumBytes...)
|
||||||
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
if len(dst) < len(ua) && icmp.uCompare(ua, dst) < 0 {
|
|
||||||
dst = append(dst, keyMaxNumBytes...)
|
|
||||||
} else {
|
|
||||||
// Did not close possibilities that n maybe longer than len(ub).
|
|
||||||
dst = append(dst, a[len(a)-8:]...)
|
|
||||||
}
|
|
||||||
return dst
|
|
||||||
}
|
|
||||||
|
|
||||||
func (icmp *iComparer) Successor(dst, b []byte) []byte {
|
func (icmp *iComparer) Successor(dst, b []byte) []byte {
|
||||||
ub := internalKey(b).ukey()
|
ub := internalKey(b).ukey()
|
||||||
dst = icmp.ucmp.Successor(dst, ub)
|
dst = icmp.uSuccessor(dst, ub)
|
||||||
if dst == nil {
|
if dst != nil && len(dst) < len(ub) && icmp.uCompare(ub, dst) < 0 {
|
||||||
|
// Append earliest possible number.
|
||||||
|
return append(dst, keyMaxNumBytes...)
|
||||||
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
if len(dst) < len(ub) && icmp.uCompare(ub, dst) < 0 {
|
|
||||||
dst = append(dst, keyMaxNumBytes...)
|
|
||||||
} else {
|
|
||||||
// Did not close possibilities that n maybe longer than len(ub).
|
|
||||||
dst = append(dst, b[len(b)-8:]...)
|
|
||||||
}
|
|
||||||
return dst
|
|
||||||
}
|
|
||||||
|
|
68
vendor/github.com/syndtr/goleveldb/leveldb/db.go
generated
vendored
68
vendor/github.com/syndtr/goleveldb/leveldb/db.go
generated
vendored
|
@ -53,14 +53,13 @@ type DB struct {
|
||||||
aliveSnaps, aliveIters int32
|
aliveSnaps, aliveIters int32
|
||||||
|
|
||||||
// Write.
|
// Write.
|
||||||
writeC chan *Batch
|
batchPool sync.Pool
|
||||||
|
writeMergeC chan writeMerge
|
||||||
writeMergedC chan bool
|
writeMergedC chan bool
|
||||||
writeLockC chan struct{}
|
writeLockC chan struct{}
|
||||||
writeAckC chan error
|
writeAckC chan error
|
||||||
writeDelay time.Duration
|
writeDelay time.Duration
|
||||||
writeDelayN int
|
writeDelayN int
|
||||||
journalC chan *Batch
|
|
||||||
journalAckC chan error
|
|
||||||
tr *Transaction
|
tr *Transaction
|
||||||
|
|
||||||
// Compaction.
|
// Compaction.
|
||||||
|
@ -94,12 +93,11 @@ func openDB(s *session) (*DB, error) {
|
||||||
// Snapshot
|
// Snapshot
|
||||||
snapsList: list.New(),
|
snapsList: list.New(),
|
||||||
// Write
|
// Write
|
||||||
writeC: make(chan *Batch),
|
batchPool: sync.Pool{New: newBatch},
|
||||||
|
writeMergeC: make(chan writeMerge),
|
||||||
writeMergedC: make(chan bool),
|
writeMergedC: make(chan bool),
|
||||||
writeLockC: make(chan struct{}, 1),
|
writeLockC: make(chan struct{}, 1),
|
||||||
writeAckC: make(chan error),
|
writeAckC: make(chan error),
|
||||||
journalC: make(chan *Batch),
|
|
||||||
journalAckC: make(chan error),
|
|
||||||
// Compaction
|
// Compaction
|
||||||
tcompCmdC: make(chan cCmd),
|
tcompCmdC: make(chan cCmd),
|
||||||
tcompPauseC: make(chan chan<- struct{}),
|
tcompPauseC: make(chan chan<- struct{}),
|
||||||
|
@ -144,10 +142,10 @@ func openDB(s *session) (*DB, error) {
|
||||||
if readOnly {
|
if readOnly {
|
||||||
db.SetReadOnly()
|
db.SetReadOnly()
|
||||||
} else {
|
} else {
|
||||||
db.closeW.Add(3)
|
db.closeW.Add(2)
|
||||||
go db.tCompaction()
|
go db.tCompaction()
|
||||||
go db.mCompaction()
|
go db.mCompaction()
|
||||||
go db.jWriter()
|
// go db.jWriter()
|
||||||
}
|
}
|
||||||
|
|
||||||
s.logf("db@open done T·%v", time.Since(start))
|
s.logf("db@open done T·%v", time.Since(start))
|
||||||
|
@ -162,10 +160,10 @@ func openDB(s *session) (*DB, error) {
|
||||||
// os.ErrExist error.
|
// os.ErrExist error.
|
||||||
//
|
//
|
||||||
// Open will return an error with type of ErrCorrupted if corruption
|
// Open will return an error with type of ErrCorrupted if corruption
|
||||||
// detected in the DB. Corrupted DB can be recovered with Recover
|
// detected in the DB. Use errors.IsCorrupted to test whether an error is
|
||||||
// function.
|
// due to corruption. Corrupted DB can be recovered with Recover function.
|
||||||
//
|
//
|
||||||
// The returned DB instance is goroutine-safe.
|
// The returned DB instance is safe for concurrent use.
|
||||||
// The DB must be closed after use, by calling Close method.
|
// The DB must be closed after use, by calling Close method.
|
||||||
func Open(stor storage.Storage, o *opt.Options) (db *DB, err error) {
|
func Open(stor storage.Storage, o *opt.Options) (db *DB, err error) {
|
||||||
s, err := newSession(stor, o)
|
s, err := newSession(stor, o)
|
||||||
|
@ -202,13 +200,13 @@ func Open(stor storage.Storage, o *opt.Options) (db *DB, err error) {
|
||||||
// os.ErrExist error.
|
// os.ErrExist error.
|
||||||
//
|
//
|
||||||
// OpenFile uses standard file-system backed storage implementation as
|
// OpenFile uses standard file-system backed storage implementation as
|
||||||
// desribed in the leveldb/storage package.
|
// described in the leveldb/storage package.
|
||||||
//
|
//
|
||||||
// OpenFile will return an error with type of ErrCorrupted if corruption
|
// OpenFile will return an error with type of ErrCorrupted if corruption
|
||||||
// detected in the DB. Corrupted DB can be recovered with Recover
|
// detected in the DB. Use errors.IsCorrupted to test whether an error is
|
||||||
// function.
|
// due to corruption. Corrupted DB can be recovered with Recover function.
|
||||||
//
|
//
|
||||||
// The returned DB instance is goroutine-safe.
|
// The returned DB instance is safe for concurrent use.
|
||||||
// The DB must be closed after use, by calling Close method.
|
// The DB must be closed after use, by calling Close method.
|
||||||
func OpenFile(path string, o *opt.Options) (db *DB, err error) {
|
func OpenFile(path string, o *opt.Options) (db *DB, err error) {
|
||||||
stor, err := storage.OpenFile(path, o.GetReadOnly())
|
stor, err := storage.OpenFile(path, o.GetReadOnly())
|
||||||
|
@ -229,7 +227,7 @@ func OpenFile(path string, o *opt.Options) (db *DB, err error) {
|
||||||
// The DB must already exist or it will returns an error.
|
// The DB must already exist or it will returns an error.
|
||||||
// Also, Recover will ignore ErrorIfMissing and ErrorIfExist options.
|
// Also, Recover will ignore ErrorIfMissing and ErrorIfExist options.
|
||||||
//
|
//
|
||||||
// The returned DB instance is goroutine-safe.
|
// The returned DB instance is safe for concurrent use.
|
||||||
// The DB must be closed after use, by calling Close method.
|
// The DB must be closed after use, by calling Close method.
|
||||||
func Recover(stor storage.Storage, o *opt.Options) (db *DB, err error) {
|
func Recover(stor storage.Storage, o *opt.Options) (db *DB, err error) {
|
||||||
s, err := newSession(stor, o)
|
s, err := newSession(stor, o)
|
||||||
|
@ -255,10 +253,10 @@ func Recover(stor storage.Storage, o *opt.Options) (db *DB, err error) {
|
||||||
// The DB must already exist or it will returns an error.
|
// The DB must already exist or it will returns an error.
|
||||||
// Also, Recover will ignore ErrorIfMissing and ErrorIfExist options.
|
// Also, Recover will ignore ErrorIfMissing and ErrorIfExist options.
|
||||||
//
|
//
|
||||||
// RecoverFile uses standard file-system backed storage implementation as desribed
|
// RecoverFile uses standard file-system backed storage implementation as described
|
||||||
// in the leveldb/storage package.
|
// in the leveldb/storage package.
|
||||||
//
|
//
|
||||||
// The returned DB instance is goroutine-safe.
|
// The returned DB instance is safe for concurrent use.
|
||||||
// The DB must be closed after use, by calling Close method.
|
// The DB must be closed after use, by calling Close method.
|
||||||
func RecoverFile(path string, o *opt.Options) (db *DB, err error) {
|
func RecoverFile(path string, o *opt.Options) (db *DB, err error) {
|
||||||
stor, err := storage.OpenFile(path, false)
|
stor, err := storage.OpenFile(path, false)
|
||||||
|
@ -507,7 +505,8 @@ func (db *DB) recoverJournal() error {
|
||||||
jr *journal.Reader
|
jr *journal.Reader
|
||||||
mdb = memdb.New(db.s.icmp, writeBuffer)
|
mdb = memdb.New(db.s.icmp, writeBuffer)
|
||||||
buf = &util.Buffer{}
|
buf = &util.Buffer{}
|
||||||
batch = &Batch{}
|
batchSeq uint64
|
||||||
|
batchLen int
|
||||||
)
|
)
|
||||||
|
|
||||||
for _, fd := range fds {
|
for _, fd := range fds {
|
||||||
|
@ -526,7 +525,7 @@ func (db *DB) recoverJournal() error {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Flush memdb and remove obsolete journal file.
|
// Flush memdb and remove obsolete journal file.
|
||||||
if !ofd.Nil() {
|
if !ofd.Zero() {
|
||||||
if mdb.Len() > 0 {
|
if mdb.Len() > 0 {
|
||||||
if _, err := db.s.flushMemdb(rec, mdb, 0); err != nil {
|
if _, err := db.s.flushMemdb(rec, mdb, 0); err != nil {
|
||||||
fr.Close()
|
fr.Close()
|
||||||
|
@ -569,7 +568,8 @@ func (db *DB) recoverJournal() error {
|
||||||
fr.Close()
|
fr.Close()
|
||||||
return errors.SetFd(err, fd)
|
return errors.SetFd(err, fd)
|
||||||
}
|
}
|
||||||
if err := batch.memDecodeAndReplay(db.seq, buf.Bytes(), mdb); err != nil {
|
batchSeq, batchLen, err = decodeBatchToMem(buf.Bytes(), db.seq, mdb)
|
||||||
|
if err != nil {
|
||||||
if !strict && errors.IsCorrupted(err) {
|
if !strict && errors.IsCorrupted(err) {
|
||||||
db.s.logf("journal error: %v (skipped)", err)
|
db.s.logf("journal error: %v (skipped)", err)
|
||||||
// We won't apply sequence number as it might be corrupted.
|
// We won't apply sequence number as it might be corrupted.
|
||||||
|
@ -581,7 +581,7 @@ func (db *DB) recoverJournal() error {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Save sequence number.
|
// Save sequence number.
|
||||||
db.seq = batch.seq + uint64(batch.Len())
|
db.seq = batchSeq + uint64(batchLen)
|
||||||
|
|
||||||
// Flush it if large enough.
|
// Flush it if large enough.
|
||||||
if mdb.Size() >= writeBuffer {
|
if mdb.Size() >= writeBuffer {
|
||||||
|
@ -624,7 +624,7 @@ func (db *DB) recoverJournal() error {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Remove the last obsolete journal file.
|
// Remove the last obsolete journal file.
|
||||||
if !ofd.Nil() {
|
if !ofd.Zero() {
|
||||||
db.s.stor.Remove(ofd)
|
db.s.stor.Remove(ofd)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -663,7 +663,8 @@ func (db *DB) recoverJournalRO() error {
|
||||||
var (
|
var (
|
||||||
jr *journal.Reader
|
jr *journal.Reader
|
||||||
buf = &util.Buffer{}
|
buf = &util.Buffer{}
|
||||||
batch = &Batch{}
|
batchSeq uint64
|
||||||
|
batchLen int
|
||||||
)
|
)
|
||||||
|
|
||||||
for _, fd := range fds {
|
for _, fd := range fds {
|
||||||
|
@ -703,7 +704,8 @@ func (db *DB) recoverJournalRO() error {
|
||||||
fr.Close()
|
fr.Close()
|
||||||
return errors.SetFd(err, fd)
|
return errors.SetFd(err, fd)
|
||||||
}
|
}
|
||||||
if err := batch.memDecodeAndReplay(db.seq, buf.Bytes(), mdb); err != nil {
|
batchSeq, batchLen, err = decodeBatchToMem(buf.Bytes(), db.seq, mdb)
|
||||||
|
if err != nil {
|
||||||
if !strict && errors.IsCorrupted(err) {
|
if !strict && errors.IsCorrupted(err) {
|
||||||
db.s.logf("journal error: %v (skipped)", err)
|
db.s.logf("journal error: %v (skipped)", err)
|
||||||
// We won't apply sequence number as it might be corrupted.
|
// We won't apply sequence number as it might be corrupted.
|
||||||
|
@ -715,7 +717,7 @@ func (db *DB) recoverJournalRO() error {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Save sequence number.
|
// Save sequence number.
|
||||||
db.seq = batch.seq + uint64(batch.Len())
|
db.seq = batchSeq + uint64(batchLen)
|
||||||
}
|
}
|
||||||
|
|
||||||
fr.Close()
|
fr.Close()
|
||||||
|
@ -856,7 +858,7 @@ func (db *DB) Has(key []byte, ro *opt.ReadOptions) (ret bool, err error) {
|
||||||
|
|
||||||
// NewIterator returns an iterator for the latest snapshot of the
|
// NewIterator returns an iterator for the latest snapshot of the
|
||||||
// underlying DB.
|
// underlying DB.
|
||||||
// The returned iterator is not goroutine-safe, but it is safe to use
|
// The returned iterator is not safe for concurrent use, but it is safe to use
|
||||||
// multiple iterators concurrently, with each in a dedicated goroutine.
|
// multiple iterators concurrently, with each in a dedicated goroutine.
|
||||||
// It is also safe to use an iterator concurrently with modifying its
|
// It is also safe to use an iterator concurrently with modifying its
|
||||||
// underlying DB. The resultant key/value pairs are guaranteed to be
|
// underlying DB. The resultant key/value pairs are guaranteed to be
|
||||||
|
@ -1062,6 +1064,8 @@ func (db *DB) Close() error {
|
||||||
if db.journal != nil {
|
if db.journal != nil {
|
||||||
db.journal.Close()
|
db.journal.Close()
|
||||||
db.journalWriter.Close()
|
db.journalWriter.Close()
|
||||||
|
db.journal = nil
|
||||||
|
db.journalWriter = nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if db.writeDelayN > 0 {
|
if db.writeDelayN > 0 {
|
||||||
|
@ -1077,15 +1081,11 @@ func (db *DB) Close() error {
|
||||||
if err1 := db.closer.Close(); err == nil {
|
if err1 := db.closer.Close(); err == nil {
|
||||||
err = err1
|
err = err1
|
||||||
}
|
}
|
||||||
|
db.closer = nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// NIL'ing pointers.
|
// Clear memdbs.
|
||||||
db.s = nil
|
db.clearMems()
|
||||||
db.mem = nil
|
|
||||||
db.frozenMem = nil
|
|
||||||
db.journal = nil
|
|
||||||
db.journalWriter = nil
|
|
||||||
db.closer = nil
|
|
||||||
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
32
vendor/github.com/syndtr/goleveldb/leveldb/db_compaction.go
generated
vendored
32
vendor/github.com/syndtr/goleveldb/leveldb/db_compaction.go
generated
vendored
|
@ -96,7 +96,7 @@ noerr:
|
||||||
default:
|
default:
|
||||||
goto haserr
|
goto haserr
|
||||||
}
|
}
|
||||||
case _, _ = <-db.closeC:
|
case <-db.closeC:
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -113,7 +113,7 @@ haserr:
|
||||||
goto hasperr
|
goto hasperr
|
||||||
default:
|
default:
|
||||||
}
|
}
|
||||||
case _, _ = <-db.closeC:
|
case <-db.closeC:
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -126,7 +126,7 @@ hasperr:
|
||||||
case db.writeLockC <- struct{}{}:
|
case db.writeLockC <- struct{}{}:
|
||||||
// Hold write lock, so that write won't pass-through.
|
// Hold write lock, so that write won't pass-through.
|
||||||
db.compWriteLocking = true
|
db.compWriteLocking = true
|
||||||
case _, _ = <-db.closeC:
|
case <-db.closeC:
|
||||||
if db.compWriteLocking {
|
if db.compWriteLocking {
|
||||||
// We should release the lock or Close will hang.
|
// We should release the lock or Close will hang.
|
||||||
<-db.writeLockC
|
<-db.writeLockC
|
||||||
|
@ -195,7 +195,7 @@ func (db *DB) compactionTransact(name string, t compactionTransactInterface) {
|
||||||
db.logf("%s exiting (persistent error %q)", name, perr)
|
db.logf("%s exiting (persistent error %q)", name, perr)
|
||||||
db.compactionExitTransact()
|
db.compactionExitTransact()
|
||||||
}
|
}
|
||||||
case _, _ = <-db.closeC:
|
case <-db.closeC:
|
||||||
db.logf("%s exiting", name)
|
db.logf("%s exiting", name)
|
||||||
db.compactionExitTransact()
|
db.compactionExitTransact()
|
||||||
}
|
}
|
||||||
|
@ -224,7 +224,7 @@ func (db *DB) compactionTransact(name string, t compactionTransactInterface) {
|
||||||
}
|
}
|
||||||
select {
|
select {
|
||||||
case <-backoffT.C:
|
case <-backoffT.C:
|
||||||
case _, _ = <-db.closeC:
|
case <-db.closeC:
|
||||||
db.logf("%s exiting", name)
|
db.logf("%s exiting", name)
|
||||||
db.compactionExitTransact()
|
db.compactionExitTransact()
|
||||||
}
|
}
|
||||||
|
@ -288,7 +288,7 @@ func (db *DB) memCompaction() {
|
||||||
case <-db.compPerErrC:
|
case <-db.compPerErrC:
|
||||||
close(resumeC)
|
close(resumeC)
|
||||||
resumeC = nil
|
resumeC = nil
|
||||||
case _, _ = <-db.closeC:
|
case <-db.closeC:
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -337,7 +337,7 @@ func (db *DB) memCompaction() {
|
||||||
select {
|
select {
|
||||||
case <-resumeC:
|
case <-resumeC:
|
||||||
close(resumeC)
|
close(resumeC)
|
||||||
case _, _ = <-db.closeC:
|
case <-db.closeC:
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -378,7 +378,7 @@ func (b *tableCompactionBuilder) appendKV(key, value []byte) error {
|
||||||
select {
|
select {
|
||||||
case ch := <-b.db.tcompPauseC:
|
case ch := <-b.db.tcompPauseC:
|
||||||
b.db.pauseCompaction(ch)
|
b.db.pauseCompaction(ch)
|
||||||
case _, _ = <-b.db.closeC:
|
case <-b.db.closeC:
|
||||||
b.db.compactionExitTransact()
|
b.db.compactionExitTransact()
|
||||||
default:
|
default:
|
||||||
}
|
}
|
||||||
|
@ -643,7 +643,7 @@ func (db *DB) tableNeedCompaction() bool {
|
||||||
func (db *DB) pauseCompaction(ch chan<- struct{}) {
|
func (db *DB) pauseCompaction(ch chan<- struct{}) {
|
||||||
select {
|
select {
|
||||||
case ch <- struct{}{}:
|
case ch <- struct{}{}:
|
||||||
case _, _ = <-db.closeC:
|
case <-db.closeC:
|
||||||
db.compactionExitTransact()
|
db.compactionExitTransact()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -697,14 +697,14 @@ func (db *DB) compTriggerWait(compC chan<- cCmd) (err error) {
|
||||||
case compC <- cAuto{ch}:
|
case compC <- cAuto{ch}:
|
||||||
case err = <-db.compErrC:
|
case err = <-db.compErrC:
|
||||||
return
|
return
|
||||||
case _, _ = <-db.closeC:
|
case <-db.closeC:
|
||||||
return ErrClosed
|
return ErrClosed
|
||||||
}
|
}
|
||||||
// Wait cmd.
|
// Wait cmd.
|
||||||
select {
|
select {
|
||||||
case err = <-ch:
|
case err = <-ch:
|
||||||
case err = <-db.compErrC:
|
case err = <-db.compErrC:
|
||||||
case _, _ = <-db.closeC:
|
case <-db.closeC:
|
||||||
return ErrClosed
|
return ErrClosed
|
||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
|
@ -719,14 +719,14 @@ func (db *DB) compTriggerRange(compC chan<- cCmd, level int, min, max []byte) (e
|
||||||
case compC <- cRange{level, min, max, ch}:
|
case compC <- cRange{level, min, max, ch}:
|
||||||
case err := <-db.compErrC:
|
case err := <-db.compErrC:
|
||||||
return err
|
return err
|
||||||
case _, _ = <-db.closeC:
|
case <-db.closeC:
|
||||||
return ErrClosed
|
return ErrClosed
|
||||||
}
|
}
|
||||||
// Wait cmd.
|
// Wait cmd.
|
||||||
select {
|
select {
|
||||||
case err = <-ch:
|
case err = <-ch:
|
||||||
case err = <-db.compErrC:
|
case err = <-db.compErrC:
|
||||||
case _, _ = <-db.closeC:
|
case <-db.closeC:
|
||||||
return ErrClosed
|
return ErrClosed
|
||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
|
@ -758,7 +758,7 @@ func (db *DB) mCompaction() {
|
||||||
default:
|
default:
|
||||||
panic("leveldb: unknown command")
|
panic("leveldb: unknown command")
|
||||||
}
|
}
|
||||||
case _, _ = <-db.closeC:
|
case <-db.closeC:
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -791,7 +791,7 @@ func (db *DB) tCompaction() {
|
||||||
case ch := <-db.tcompPauseC:
|
case ch := <-db.tcompPauseC:
|
||||||
db.pauseCompaction(ch)
|
db.pauseCompaction(ch)
|
||||||
continue
|
continue
|
||||||
case _, _ = <-db.closeC:
|
case <-db.closeC:
|
||||||
return
|
return
|
||||||
default:
|
default:
|
||||||
}
|
}
|
||||||
|
@ -806,7 +806,7 @@ func (db *DB) tCompaction() {
|
||||||
case ch := <-db.tcompPauseC:
|
case ch := <-db.tcompPauseC:
|
||||||
db.pauseCompaction(ch)
|
db.pauseCompaction(ch)
|
||||||
continue
|
continue
|
||||||
case _, _ = <-db.closeC:
|
case <-db.closeC:
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
4
vendor/github.com/syndtr/goleveldb/leveldb/db_snapshot.go
generated
vendored
4
vendor/github.com/syndtr/goleveldb/leveldb/db_snapshot.go
generated
vendored
|
@ -59,7 +59,7 @@ func (db *DB) releaseSnapshot(se *snapshotElement) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Gets minimum sequence that not being snapshoted.
|
// Gets minimum sequence that not being snapshotted.
|
||||||
func (db *DB) minSeq() uint64 {
|
func (db *DB) minSeq() uint64 {
|
||||||
db.snapsMu.Lock()
|
db.snapsMu.Lock()
|
||||||
defer db.snapsMu.Unlock()
|
defer db.snapsMu.Unlock()
|
||||||
|
@ -131,7 +131,7 @@ func (snap *Snapshot) Has(key []byte, ro *opt.ReadOptions) (ret bool, err error)
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewIterator returns an iterator for the snapshot of the underlying DB.
|
// NewIterator returns an iterator for the snapshot of the underlying DB.
|
||||||
// The returned iterator is not goroutine-safe, but it is safe to use
|
// The returned iterator is not safe for concurrent use, but it is safe to use
|
||||||
// multiple iterators concurrently, with each in a dedicated goroutine.
|
// multiple iterators concurrently, with each in a dedicated goroutine.
|
||||||
// It is also safe to use an iterator concurrently with modifying its
|
// It is also safe to use an iterator concurrently with modifying its
|
||||||
// underlying DB. The resultant key/value pairs are guaranteed to be
|
// underlying DB. The resultant key/value pairs are guaranteed to be
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue