Update vendoring.

This commit is contained in:
beorn7 2015-04-08 18:24:23 +02:00
parent 7991b4be51
commit 191c728152
90 changed files with 2726 additions and 1901 deletions

32
Godeps/Godeps.json generated
View file

@ -1,6 +1,6 @@
{
"ImportPath": "github.com/prometheus/prometheus",
"GoVersion": "go1.4",
"GoVersion": "go1.4.2",
"Deps": [
{
"ImportPath": "bitbucket.org/ww/goautoneg",
@ -17,35 +17,35 @@
},
{
"ImportPath": "github.com/golang/protobuf/proto",
"Rev": "5677a0e3d5e89854c9974e1256839ee23f8233ca"
"Rev": "655cdfa588ea190e901bc5590e65d5621688847c"
},
{
"ImportPath": "github.com/matttproud/golang_protobuf_extensions/ext",
"Rev": "ba7d65ac66e9da93a714ca18f6d1bc7a0c09100c"
"ImportPath": "github.com/matttproud/golang_protobuf_extensions/pbutil",
"Rev": "fc2b8d3a73c4867e51861bbdd5ae3c1f0869dd6a"
},
{
"ImportPath": "github.com/miekg/dns",
"Rev": "b65f52f3f0dd1afa25cbbf63f8e7eb15fb5c0641"
"Rev": "e6898c8f30b5d002db962043a62db90552e90bf7"
},
{
"ImportPath": "github.com/prometheus/client_golang/extraction",
"Comment": "0.3.2",
"Rev": "1cf6d4b964951c63779ba7513c57fe389b609014"
"Comment": "0.4.0-1-g692492e",
"Rev": "692492e54b553a81013254cc1fba4b6dd76fad30"
},
{
"ImportPath": "github.com/prometheus/client_golang/model",
"Comment": "0.3.2",
"Rev": "1cf6d4b964951c63779ba7513c57fe389b609014"
"Comment": "0.4.0-1-g692492e",
"Rev": "692492e54b553a81013254cc1fba4b6dd76fad30"
},
{
"ImportPath": "github.com/prometheus/client_golang/prometheus",
"Comment": "0.3.2",
"Rev": "1cf6d4b964951c63779ba7513c57fe389b609014"
"Comment": "0.4.0-1-g692492e",
"Rev": "692492e54b553a81013254cc1fba4b6dd76fad30"
},
{
"ImportPath": "github.com/prometheus/client_golang/text",
"Comment": "0.3.2",
"Rev": "1cf6d4b964951c63779ba7513c57fe389b609014"
"Comment": "0.4.0-1-g692492e",
"Rev": "692492e54b553a81013254cc1fba4b6dd76fad30"
},
{
"ImportPath": "github.com/prometheus/client_model/go",
@ -54,15 +54,15 @@
},
{
"ImportPath": "github.com/prometheus/procfs",
"Rev": "92faa308558161acab0ada1db048e9996ecec160"
"Rev": "490cc6eb5fa45bf8a8b7b73c8bc82a8160e8531d"
},
{
"ImportPath": "github.com/syndtr/goleveldb/leveldb",
"Rev": "e9e2c8f6d3b9c313fb4acaac5ab06285bcf30b04"
"Rev": "4875955338b0a434238a31165cb87255ab6e9e4a"
},
{
"ImportPath": "github.com/syndtr/gosnappy/snappy",
"Rev": "ce8acff4829e0c2458a67ead32390ac0a381c862"
"Rev": "156a073208e131d7d2e212cb749feae7c339e846"
}
]
}

View file

@ -39,5 +39,5 @@ test: install generate-test-pbs
generate-test-pbs:
make install
make -C testdata
make -C proto3_proto
protoc --go_out=Mtestdata/test.proto=github.com/golang/protobuf/proto/testdata:. proto3_proto/proto3.proto
make

View file

@ -44,8 +44,8 @@ import (
"testing"
"time"
. "./testdata"
. "github.com/golang/protobuf/proto"
. "github.com/golang/protobuf/proto/testdata"
)
var globalO *Buffer
@ -1252,7 +1252,8 @@ func TestProto1RepeatedGroup(t *testing.T) {
}
o := old()
if err := o.Marshal(pb); err != ErrRepeatedHasNil {
err := o.Marshal(pb)
if err == nil || !strings.Contains(err.Error(), "repeated field Message has nil") {
t.Fatalf("unexpected or no error when marshaling: %v", err)
}
}
@ -1441,6 +1442,17 @@ func TestSetDefaultsWithRepeatedSubMessage(t *testing.T) {
}
}
func TestSetDefaultWithRepeatedNonMessage(t *testing.T) {
m := &MyMessage{
Pet: []string{"turtle", "wombat"},
}
expected := Clone(m)
SetDefaults(m)
if !Equal(m, expected) {
t.Errorf("\n got %v\nwant %v", m, expected)
}
}
func TestMaximumTagNumber(t *testing.T) {
m := &MaxTag{
LastField: String("natural goat essence"),

View file

@ -36,7 +36,7 @@ import (
"github.com/golang/protobuf/proto"
pb "./testdata"
pb "github.com/golang/protobuf/proto/testdata"
)
var cloneTestMessage = &pb.MyMessage{

View file

@ -60,9 +60,9 @@ func (e *RequiredNotSetError) Error() string {
}
var (
// ErrRepeatedHasNil is the error returned if Marshal is called with
// errRepeatedHasNil is the error returned if Marshal is called with
// a struct with a repeated field containing a nil element.
ErrRepeatedHasNil = errors.New("proto: repeated field has nil element")
errRepeatedHasNil = errors.New("proto: repeated field has nil element")
// ErrNil is the error returned if Marshal is called with nil.
ErrNil = errors.New("proto: Marshal called with nil")
@ -939,7 +939,7 @@ func (o *Buffer) enc_slice_struct_message(p *Properties, base structPointer) err
for i := 0; i < l; i++ {
structp := s.Index(i)
if structPointer_IsNil(structp) {
return ErrRepeatedHasNil
return errRepeatedHasNil
}
// Can the object marshal itself?
@ -958,7 +958,7 @@ func (o *Buffer) enc_slice_struct_message(p *Properties, base structPointer) err
err := o.enc_len_struct(p.sprop, structp, &state)
if err != nil && !state.shouldContinue(err, nil) {
if err == ErrNil {
return ErrRepeatedHasNil
return errRepeatedHasNil
}
return err
}
@ -1001,7 +1001,7 @@ func (o *Buffer) enc_slice_struct_group(p *Properties, base structPointer) error
for i := 0; i < l; i++ {
b := s.Index(i)
if structPointer_IsNil(b) {
return ErrRepeatedHasNil
return errRepeatedHasNil
}
o.EncodeVarint(uint64((p.Tag << 3) | WireStartGroup))
@ -1010,7 +1010,7 @@ func (o *Buffer) enc_slice_struct_group(p *Properties, base structPointer) error
if err != nil && !state.shouldContinue(err, nil) {
if err == ErrNil {
return ErrRepeatedHasNil
return errRepeatedHasNil
}
return err
}
@ -1128,10 +1128,12 @@ func size_new_map(p *Properties, base structPointer) int {
keycopy.Set(key)
valcopy.Set(val)
// Tag codes are two bytes per map entry.
n += 2
n += p.mkeyprop.size(p.mkeyprop, keybase)
n += p.mvalprop.size(p.mvalprop, valbase)
// Tag codes for key and val are the responsibility of the sub-sizer.
keysize := p.mkeyprop.size(p.mkeyprop, keybase)
valsize := p.mvalprop.size(p.mvalprop, valbase)
entry := keysize + valsize
// Add on tag code and length of map entry itself.
n += len(p.tagcode) + sizeVarint(uint64(entry)) + entry
}
return n
}
@ -1184,6 +1186,9 @@ func (o *Buffer) enc_struct(prop *StructProperties, base structPointer) error {
if p.Required && state.err == nil {
state.err = &RequiredNotSetError{p.Name}
}
} else if err == errRepeatedHasNil {
// Give more context to nil values in repeated fields.
return errors.New("repeated field " + p.OrigName + " has nil element")
} else if !state.shouldContinue(err, p) {
return err
}

View file

@ -34,8 +34,8 @@ package proto_test
import (
"testing"
pb "./testdata"
. "github.com/golang/protobuf/proto"
pb "github.com/golang/protobuf/proto/testdata"
)
// Four identical base messages.

View file

@ -37,6 +37,7 @@ package proto
import (
"errors"
"fmt"
"reflect"
"strconv"
"sync"
@ -321,6 +322,14 @@ func SetExtension(pb extendableProto, extension *ExtensionDesc, value interface{
if typ != reflect.TypeOf(value) {
return errors.New("proto: bad extension value type")
}
// nil extension values need to be caught early, because the
// encoder can't distinguish an ErrNil due to a nil extension
// from an ErrNil due to a missing field. Extensions are
// always optional, so the encoder would just swallow the error
// and drop all the extensions from the encoded message.
if reflect.ValueOf(value).IsNil() {
return fmt.Errorf("proto: SetExtension called with nil value of type %T", value)
}
pb.ExtensionMap()[extension.Field] = Extension{desc: extension, value: value}
return nil

View file

@ -34,8 +34,8 @@ package proto_test
import (
"testing"
pb "./testdata"
"github.com/golang/protobuf/proto"
pb "github.com/golang/protobuf/proto/testdata"
)
func TestGetExtensionsWithMissingExtensions(t *testing.T) {
@ -135,3 +135,19 @@ func TestExtensionsRoundTrip(t *testing.T) {
t.Error("expected some sort of type mismatch error, got nil")
}
}
func TestNilExtension(t *testing.T) {
msg := &pb.MyMessage{
Count: proto.Int32(1),
}
if err := proto.SetExtension(msg, pb.E_Ext_Text, proto.String("hello")); err != nil {
t.Fatal(err)
}
if err := proto.SetExtension(msg, pb.E_Ext_More, (*pb.Ext)(nil)); err == nil {
t.Error("expected SetExtension to fail due to a nil extension")
} else if want := "proto: SetExtension called with nil value of type *testdata.Ext"; err.Error() != want {
t.Errorf("expected error %v, got %v", want, err)
}
// Note: if the behavior of Marshal is ever changed to ignore nil extensions, update
// this test to verify that E_Ext_Text is properly propagated through marshal->unmarshal.
}

View file

@ -607,13 +607,15 @@ func setDefaults(v reflect.Value, recur, zeros bool) {
for _, ni := range dm.nested {
f := v.Field(ni)
if f.IsNil() {
continue
}
// f is *T or []*T
if f.Kind() == reflect.Ptr {
// f is *T or []*T or map[T]*T
switch f.Kind() {
case reflect.Ptr:
if f.IsNil() {
continue
}
setDefaults(f, recur, zeros)
} else {
case reflect.Slice:
for i := 0; i < f.Len(); i++ {
e := f.Index(i)
if e.IsNil() {
@ -621,6 +623,15 @@ func setDefaults(v reflect.Value, recur, zeros bool) {
}
setDefaults(e, recur, zeros)
}
case reflect.Map:
for _, k := range f.MapKeys() {
e := f.MapIndex(k)
if e.IsNil() {
continue
}
setDefaults(e, recur, zeros)
}
}
}
}
@ -646,10 +657,6 @@ type scalarField struct {
value interface{} // the proto-declared default value, or nil
}
func ptrToStruct(t reflect.Type) bool {
return t.Kind() == reflect.Ptr && t.Elem().Kind() == reflect.Struct
}
// t is a struct type.
func buildDefaultMessage(t reflect.Type) (dm defaultMessage) {
sprop := GetProperties(t)
@ -661,9 +668,33 @@ func buildDefaultMessage(t reflect.Type) (dm defaultMessage) {
}
ft := t.Field(fi).Type
// nested messages
if ptrToStruct(ft) || (ft.Kind() == reflect.Slice && ptrToStruct(ft.Elem())) {
dm.nested = append(dm.nested, fi)
var canHaveDefault, nestedMessage bool
switch ft.Kind() {
case reflect.Ptr:
if ft.Elem().Kind() == reflect.Struct {
nestedMessage = true
} else {
canHaveDefault = true // proto2 scalar field
}
case reflect.Slice:
switch ft.Elem().Kind() {
case reflect.Ptr:
nestedMessage = true // repeated message
case reflect.Uint8:
canHaveDefault = true // bytes field
}
case reflect.Map:
if ft.Elem().Kind() == reflect.Ptr {
nestedMessage = true // map with message values
}
}
if !canHaveDefault {
if nestedMessage {
dm.nested = append(dm.nested, fi)
}
continue
}

View file

@ -440,7 +440,12 @@ func (p *Properties) setEncAndDec(typ reflect.Type, f *reflect.StructField, lock
p.enc = (*Buffer).enc_slice_byte
p.dec = (*Buffer).dec_slice_byte
p.size = size_slice_byte
if p.proto3 {
// This is a []byte, which is either a bytes field,
// or the value of a map field. In the latter case,
// we always encode an empty []byte, so we should not
// use the proto3 enc/size funcs.
// f == nil iff this is the key/value of a map field.
if p.proto3 && f != nil {
p.enc = (*Buffer).enc_proto3_slice_byte
p.size = size_proto3_slice_byte
}
@ -595,7 +600,7 @@ func (p *Properties) init(typ reflect.Type, name, tag string, f *reflect.StructF
}
var (
mutex sync.Mutex
propertiesMu sync.RWMutex
propertiesMap = make(map[reflect.Type]*StructProperties)
)
@ -605,13 +610,26 @@ func GetProperties(t reflect.Type) *StructProperties {
if t.Kind() != reflect.Struct {
panic("proto: type must have kind struct")
}
mutex.Lock()
sprop := getPropertiesLocked(t)
mutex.Unlock()
// Most calls to GetProperties in a long-running program will be
// retrieving details for types we have seen before.
propertiesMu.RLock()
sprop, ok := propertiesMap[t]
propertiesMu.RUnlock()
if ok {
if collectStats {
stats.Chit++
}
return sprop
}
propertiesMu.Lock()
sprop = getPropertiesLocked(t)
propertiesMu.Unlock()
return sprop
}
// getPropertiesLocked requires that mutex is held.
// getPropertiesLocked requires that propertiesMu is held.
func getPropertiesLocked(t reflect.Type) *StructProperties {
if prop, ok := propertiesMap[t]; ok {
if collectStats {

View file

@ -1,44 +0,0 @@
# Go support for Protocol Buffers - Google's data interchange format
#
# Copyright 2014 The Go Authors. All rights reserved.
# https://github.com/golang/protobuf
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
include ../../Make.protobuf
all: regenerate
regenerate:
rm -f proto3.pb.go
make proto3.pb.go
# The following rules are just aids to development. Not needed for typical testing.
diff: regenerate
git diff proto3.pb.go

View file

@ -31,6 +31,8 @@
syntax = "proto3";
import "testdata/test.proto";
package proto3_proto;
message Message {
@ -51,8 +53,16 @@ message Message {
repeated uint64 key = 5;
Nested nested = 6;
map<string, Nested> terrain = 10;
testdata.SubDefaults proto2_field = 11;
map<string, testdata.SubDefaults> proto2_value = 13;
}
message Nested {
string bunny = 1;
}
message MessageWithMap {
map<bool, bytes> byte_mapping = 1;
}

View file

@ -34,8 +34,9 @@ package proto_test
import (
"testing"
pb "./proto3_proto"
"github.com/golang/protobuf/proto"
pb "github.com/golang/protobuf/proto/proto3_proto"
tpb "github.com/golang/protobuf/proto/testdata"
)
func TestProto3ZeroValues(t *testing.T) {
@ -91,3 +92,34 @@ func TestRoundTripProto3(t *testing.T) {
t.Errorf("proto.Equal returned false:\n m: %v\nm2: %v", m, m2)
}
}
func TestProto3SetDefaults(t *testing.T) {
in := &pb.Message{
Terrain: map[string]*pb.Nested{
"meadow": new(pb.Nested),
},
Proto2Field: new(tpb.SubDefaults),
Proto2Value: map[string]*tpb.SubDefaults{
"badlands": new(tpb.SubDefaults),
},
}
got := proto.Clone(in).(*pb.Message)
proto.SetDefaults(got)
// There are no defaults in proto3. Everything should be the zero value, but
// we need to remember to set defaults for nested proto2 messages.
want := &pb.Message{
Terrain: map[string]*pb.Nested{
"meadow": new(pb.Nested),
},
Proto2Field: &tpb.SubDefaults{N: proto.Int64(7)},
Proto2Value: map[string]*tpb.SubDefaults{
"badlands": &tpb.SubDefaults{N: proto.Int64(7)},
},
}
if !proto.Equal(got, want) {
t.Errorf("with in = %v\nproto.SetDefaults(in) =>\ngot %v\nwant %v", in, got, want)
}
}

View file

@ -33,11 +33,12 @@ package proto_test
import (
"log"
"strings"
"testing"
proto3pb "./proto3_proto"
pb "./testdata"
. "github.com/golang/protobuf/proto"
proto3pb "github.com/golang/protobuf/proto/proto3_proto"
pb "github.com/golang/protobuf/proto/testdata"
)
var messageWithExtension1 = &pb.MyMessage{Count: Int32(7)}
@ -113,10 +114,16 @@ var SizeTests = []struct {
{"proto3 bytes", &proto3pb.Message{Data: []byte("wowsa")}},
{"proto3 bytes, empty", &proto3pb.Message{Data: []byte{}}},
{"proto3 enum", &proto3pb.Message{Hilarity: proto3pb.Message_PUNS}},
{"proto3 map field with empty bytes", &proto3pb.MessageWithMap{ByteMapping: map[bool][]byte{false: []byte{}}}},
{"map field", &pb.MessageWithMap{NameMapping: map[int32]string{1: "Rob", 7: "Andrew"}}},
{"map field with message", &pb.MessageWithMap{MsgMapping: map[int64]*pb.FloatingPoint{0x7001: &pb.FloatingPoint{F: Float64(2.0)}}}},
{"map field with bytes", &pb.MessageWithMap{ByteMapping: map[bool][]byte{true: []byte("this time for sure")}}},
{"map field with empty bytes", &pb.MessageWithMap{ByteMapping: map[bool][]byte{true: []byte{}}}},
{"map field with big entry", &pb.MessageWithMap{NameMapping: map[int32]string{8: strings.Repeat("x", 125)}}},
{"map field with big key and val", &pb.MessageWithMap{StrToStr: map[string]string{strings.Repeat("x", 70): strings.Repeat("y", 70)}}},
{"map field with big numeric key", &pb.MessageWithMap{NameMapping: map[int32]string{0xf00d: "om nom nom"}}},
}
func TestSize(t *testing.T) {

View file

@ -1890,6 +1890,7 @@ type MessageWithMap struct {
NameMapping map[int32]string `protobuf:"bytes,1,rep,name=name_mapping" json:"name_mapping,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
MsgMapping map[int64]*FloatingPoint `protobuf:"bytes,2,rep,name=msg_mapping" json:"msg_mapping,omitempty" protobuf_key:"zigzag64,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
ByteMapping map[bool][]byte `protobuf:"bytes,3,rep,name=byte_mapping" json:"byte_mapping,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
StrToStr map[string]string `protobuf:"bytes,4,rep,name=str_to_str" json:"str_to_str,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
XXX_unrecognized []byte `json:"-"`
}
@ -1918,6 +1919,13 @@ func (m *MessageWithMap) GetByteMapping() map[bool][]byte {
return nil
}
func (m *MessageWithMap) GetStrToStr() map[string]string {
if m != nil {
return m.StrToStr
}
return nil
}
var E_Greeting = &proto.ExtensionDesc{
ExtendedType: (*MyMessage)(nil),
ExtensionType: ([]string)(nil),

View file

@ -431,4 +431,5 @@ message MessageWithMap {
map<int32, string> name_mapping = 1;
map<sint64, FloatingPoint> msg_mapping = 2;
map<bool, bytes> byte_mapping = 3;
map<string, string> str_to_str = 4;
}

View file

@ -36,9 +36,9 @@ import (
"reflect"
"testing"
proto3pb "./proto3_proto"
. "./testdata"
. "github.com/golang/protobuf/proto"
proto3pb "github.com/golang/protobuf/proto/proto3_proto"
. "github.com/golang/protobuf/proto/testdata"
)
type UnmarshalTextTest struct {

View file

@ -41,8 +41,8 @@ import (
"github.com/golang/protobuf/proto"
proto3pb "./proto3_proto"
pb "./testdata"
proto3pb "github.com/golang/protobuf/proto/proto3_proto"
pb "github.com/golang/protobuf/proto/testdata"
)
// textMessage implements the methods that allow it to marshal and unmarshal

View file

@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
package ext
package pbutil
import (
"bytes"
@ -21,6 +21,8 @@ import (
"testing"
"testing/quick"
"github.com/matttproud/golang_protobuf_extensions/pbtest"
. "github.com/golang/protobuf/proto"
. "github.com/golang/protobuf/proto/testdata"
)
@ -138,10 +140,10 @@ I expect it may. Let's hope you enjoy testing as much as we do.`),
func TestEndToEndValid(t *testing.T) {
for _, test := range [][]Message{
[]Message{&Empty{}},
[]Message{&GoEnum{Foo: FOO_FOO1.Enum()}, &Empty{}, &GoEnum{Foo: FOO_FOO1.Enum()}},
[]Message{&GoEnum{Foo: FOO_FOO1.Enum()}},
[]Message{&Strings{
{&Empty{}},
{&GoEnum{Foo: FOO_FOO1.Enum()}, &Empty{}, &GoEnum{Foo: FOO_FOO1.Enum()}},
{&GoEnum{Foo: FOO_FOO1.Enum()}},
{&Strings{
StringField: String(`This is my gigantic, unhappy string. It exceeds
the encoding size of a single byte varint. We are using it to fuzz test the
correctness of the header decoding mechanisms, which may prove problematic.
@ -176,45 +178,6 @@ I expect it may. Let's hope you enjoy testing as much as we do.`),
}
}
// visitMessage empties the private state fields of the quick.Value()-generated
// Protocol Buffer messages, for they cause an inordinate amount of problems.
// This is because we are using an automated fuzz generator on a type with
// private fields.
func visitMessage(m Message) {
t := reflect.TypeOf(m)
if t.Kind() != reflect.Ptr {
return
}
derefed := t.Elem()
if derefed.Kind() != reflect.Struct {
return
}
v := reflect.ValueOf(m)
elem := v.Elem()
for i := 0; i < elem.NumField(); i++ {
field := elem.FieldByIndex([]int{i})
fieldType := field.Type()
if fieldType.Implements(reflect.TypeOf((*Message)(nil)).Elem()) {
visitMessage(field.Interface().(Message))
}
if field.Kind() == reflect.Slice {
for i := 0; i < field.Len(); i++ {
elem := field.Index(i)
elemType := elem.Type()
if elemType.Implements(reflect.TypeOf((*Message)(nil)).Elem()) {
visitMessage(elem.Interface().(Message))
}
}
}
}
if field := elem.FieldByName("XXX_unrecognized"); field.IsValid() {
field.Set(reflect.ValueOf([]byte{}))
}
if field := elem.FieldByName("XXX_extensions"); field.IsValid() {
field.Set(reflect.ValueOf(nil))
}
}
// rndMessage generates a random valid Protocol Buffer message.
func rndMessage(r *rand.Rand) Message {
var t reflect.Type
@ -307,7 +270,9 @@ func rndMessage(r *rand.Rand) Message {
if !ok {
panic("attempt to generate illegal item; consult item 11")
}
visitMessage(v.Interface().(Message))
if err := pbtest.SanitizeGenerated(v.Interface().(Message)); err != nil {
panic(err)
}
return v.Interface().(Message)
}

View file

@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
package ext
package pbutil
import (
"encoding/binary"

View file

@ -12,5 +12,5 @@
// See the License for the specific language governing permissions and
// limitations under the License.
// Package ext enables record length-delimited Protocol Buffer streaming.
package ext
// Package pbutil provides record length-delimited Protocol Buffer streaming.
package pbutil

View file

@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
package ext
package pbutil
import (
"encoding/binary"

View file

@ -27,7 +27,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package ext
package pbutil
import (
. "github.com/golang/protobuf/proto"

View file

@ -1,21 +1,6 @@
language: go
go:
- 1.2
- 1.3
env:
# "gvm update" resets GOOS and GOARCH environment variable, workaround it by setting
# BUILD_GOOS and BUILD_GOARCH and overriding GOARCH and GOOS in the build script
global:
- BUILD_GOARCH=amd64
matrix:
- BUILD_GOOS=linux
- BUILD_GOOS=darwin
- BUILD_GOOS=windows
- 1.4
script:
- gvm cross $BUILD_GOOS $BUILD_GOARCH
- GOARCH=$BUILD_GOARCH GOOS=$BUILD_GOOS go build
# only test on linux
# also specify -short; the crypto tests fail in weird ways *sometimes*
# See issue #151
- if [ $BUILD_GOOS == "linux" ]; then GOARCH=$BUILD_GOARCH GOOS=$BUILD_GOOS go test -short -bench=.; fi
- go test -short -bench=.

View file

@ -35,6 +35,7 @@ A not-so-up-to-date-list-that-may-be-actually-current:
* https://github.com/DevelopersPL/godnsagent
* https://github.com/duedil-ltd/discodns
* https://github.com/StalkR/dns-reverse-proxy
* https://github.com/tianon/rawdns
Send pull request if you want to be listed here.
@ -68,7 +69,7 @@ correctly, the following should work:
## Examples
A short "how to use the API" is at the beginning of dns.go (this also will show
A short "how to use the API" is at the beginning of doc.go (this also will show
when you call `godoc github.com/miekg/dns`).
Example programs can be found in the `github.com/miekg/exdns` repository.

View file

@ -55,6 +55,13 @@ func Exchange(m *Msg, a string) (r *Msg, err error) {
defer co.Close()
co.SetReadDeadline(time.Now().Add(dnsTimeout))
co.SetWriteDeadline(time.Now().Add(dnsTimeout))
opt := m.IsEdns0()
// If EDNS0 is used use that for size.
if opt != nil && opt.UDPSize() >= MinMsgSize {
co.UDPSize = opt.UDPSize()
}
if err = co.WriteMsg(m); err != nil {
return nil, err
}
@ -290,7 +297,7 @@ func Dial(network, address string) (conn *Conn, err error) {
return conn, nil
}
// Dialtimeout acts like Dial but takes a timeout.
// DialTimeout acts like Dial but takes a timeout.
func DialTimeout(network, address string, timeout time.Duration) (conn *Conn, err error) {
conn = new(Conn)
conn.Conn, err = net.DialTimeout(network, address, timeout)

View file

@ -1,6 +1,7 @@
package dns
import (
"strconv"
"testing"
"time"
)
@ -11,7 +12,7 @@ func TestClientSync(t *testing.T) {
s, addrstr, err := RunLocalUDPServer("127.0.0.1:0")
if err != nil {
t.Fatalf("Unable to run test server: %s", err)
t.Fatalf("Unable to run test server: %v", err)
}
defer s.Shutdown()
@ -19,26 +20,20 @@ func TestClientSync(t *testing.T) {
m.SetQuestion("miek.nl.", TypeSOA)
c := new(Client)
r, _, e := c.Exchange(m, addrstr)
if e != nil {
t.Logf("failed to exchange: %s", e.Error())
t.Fail()
r, _, err := c.Exchange(m, addrstr)
if err != nil {
t.Errorf("failed to exchange: %v", err)
}
if r != nil && r.Rcode != RcodeSuccess {
t.Log("failed to get an valid answer")
t.Fail()
t.Logf("%v\n", r)
t.Errorf("failed to get an valid answer\n%v", r)
}
// And now with plain Exchange().
r, e = Exchange(m, addrstr)
if e != nil {
t.Logf("failed to exchange: %s", e.Error())
t.Fail()
r, err = Exchange(m, addrstr)
if err != nil {
t.Errorf("failed to exchange: %v", err)
}
if r != nil && r.Rcode != RcodeSuccess {
t.Log("failed to get an valid answer")
t.Fail()
t.Logf("%v\n", r)
t.Errorf("failed to get an valid answer\n%v", r)
}
}
@ -48,7 +43,7 @@ func TestClientEDNS0(t *testing.T) {
s, addrstr, err := RunLocalUDPServer("127.0.0.1:0")
if err != nil {
t.Fatalf("Unable to run test server: %s", err)
t.Fatalf("Unable to run test server: %v", err)
}
defer s.Shutdown()
@ -57,6 +52,58 @@ func TestClientEDNS0(t *testing.T) {
m.SetEdns0(2048, true)
c := new(Client)
r, _, err := c.Exchange(m, addrstr)
if err != nil {
t.Errorf("failed to exchange: %v", err)
}
if r != nil && r.Rcode != RcodeSuccess {
t.Errorf("failed to get an valid answer\n%v", r)
}
}
// Validates the transmission and parsing of local EDNS0 options.
func TestClientEDNS0Local(t *testing.T) {
optStr1 := "1979:0x0707"
optStr2 := strconv.Itoa(EDNS0LOCALSTART) + ":0x0601"
handler := func(w ResponseWriter, req *Msg) {
m := new(Msg)
m.SetReply(req)
m.Extra = make([]RR, 1, 2)
m.Extra[0] = &TXT{Hdr: RR_Header{Name: m.Question[0].Name, Rrtype: TypeTXT, Class: ClassINET, Ttl: 0}, Txt: []string{"Hello local edns"}}
// If the local options are what we expect, then reflect them back.
ec1 := req.Extra[0].(*OPT).Option[0].(*EDNS0_LOCAL).String()
ec2 := req.Extra[0].(*OPT).Option[1].(*EDNS0_LOCAL).String()
if ec1 == optStr1 && ec2 == optStr2 {
m.Extra = append(m.Extra, req.Extra[0])
}
w.WriteMsg(m)
}
HandleFunc("miek.nl.", handler)
defer HandleRemove("miek.nl.")
s, addrstr, err := RunLocalUDPServer("127.0.0.1:0")
if err != nil {
t.Fatalf("Unable to run test server: %s", err)
}
defer s.Shutdown()
m := new(Msg)
m.SetQuestion("miek.nl.", TypeTXT)
// Add two local edns options to the query.
ec1 := &EDNS0_LOCAL{Code: 1979, Data: []byte{7, 7}}
ec2 := &EDNS0_LOCAL{Code: EDNS0LOCALSTART, Data: []byte{6, 1}}
o := &OPT{Hdr: RR_Header{Name: ".", Rrtype: TypeOPT}, Option: []EDNS0{ec1, ec2}}
m.Extra = append(m.Extra, o)
c := new(Client)
r, _, e := c.Exchange(m, addrstr)
if e != nil {
@ -65,7 +112,28 @@ func TestClientEDNS0(t *testing.T) {
}
if r != nil && r.Rcode != RcodeSuccess {
t.Log("failed to get an valid answer")
t.Log("failed to get a valid answer")
t.Fail()
t.Logf("%v\n", r)
}
txt := r.Extra[0].(*TXT).Txt[0]
if txt != "Hello local edns" {
t.Log("Unexpected result for miek.nl", txt, "!= Hello local edns")
t.Fail()
}
// Validate the local options in the reply.
got := r.Extra[1].(*OPT).Option[0].(*EDNS0_LOCAL).String()
if got != optStr1 {
t.Log("failed to get local edns0 answer; got %s, expected %s", got, optStr1)
t.Fail()
t.Logf("%v\n", r)
}
got = r.Extra[1].(*OPT).Option[1].(*EDNS0_LOCAL).String()
if got != optStr2 {
t.Log("failed to get local edns0 answer; got %s, expected %s", got, optStr2)
t.Fail()
t.Logf("%v\n", r)
}
@ -77,7 +145,7 @@ func TestSingleSingleInflight(t *testing.T) {
s, addrstr, err := RunLocalUDPServer("127.0.0.1:0")
if err != nil {
t.Fatalf("Unable to run test server: %s", err)
t.Fatalf("Unable to run test server: %v", err)
}
defer s.Shutdown()
@ -106,8 +174,7 @@ Loop:
first = rtt
} else {
if first != rtt {
t.Log("all rtts should be equal")
t.Fail()
t.Errorf("all rtts should be equal. got %d want %d", rtt, first)
}
}
i++
@ -127,14 +194,14 @@ func ExampleUpdateLeaseTSIG(t *testing.T) {
rrs[0] = rr
m.Insert(rrs)
lease_rr := new(OPT)
lease_rr.Hdr.Name = "."
lease_rr.Hdr.Rrtype = TypeOPT
leaseRr := new(OPT)
leaseRr.Hdr.Name = "."
leaseRr.Hdr.Rrtype = TypeOPT
e := new(EDNS0_UL)
e.Code = EDNS0UL
e.Lease = 120
lease_rr.Option = append(lease_rr.Option, e)
m.Extra = append(m.Extra, lease_rr)
leaseRr.Option = append(leaseRr.Option, e)
m.Extra = append(m.Extra, leaseRr)
c := new(Client)
m.SetTsig("polvi.", HmacMD5, 300, time.Now().Unix())
@ -142,7 +209,6 @@ func ExampleUpdateLeaseTSIG(t *testing.T) {
_, _, err := c.Exchange(m, "127.0.0.1:53")
if err != nil {
t.Log(err.Error())
t.Fail()
t.Error(err)
}
}

View file

@ -7,7 +7,7 @@ import (
"strings"
)
// Wraps the contents of the /etc/resolv.conf.
// ClientConfig wraps the contents of the /etc/resolv.conf file.
type ClientConfig struct {
Servers []string // servers to use
Search []string // suffixes to append to local name

View file

@ -32,7 +32,7 @@ func testConfig(t *testing.T, data string) {
}
cc, err := ClientConfigFromFile(path)
if err != nil {
t.Errorf("error parsing resolv.conf: %s", err)
t.Errorf("error parsing resolv.conf: %v", err)
}
if l := len(cc.Servers); l != 2 {
t.Errorf("incorrect number of nameservers detected: %d", l)

View file

@ -24,7 +24,9 @@ func (dns *Msg) SetReply(request *Msg) *Msg {
return dns
}
// SetQuestion creates a question message.
// SetQuestion creates a question message, it sets the Question
// section, generates an Id and sets the RecursionDesired (RD)
// bit to true.
func (dns *Msg) SetQuestion(z string, t uint16) *Msg {
dns.Id = Id()
dns.RecursionDesired = true
@ -33,7 +35,9 @@ func (dns *Msg) SetQuestion(z string, t uint16) *Msg {
return dns
}
// SetNotify creates a notify message.
// SetNotify creates a notify message, it sets the Question
// section, generates an Id and sets the Authoritative (AA)
// bit to true.
func (dns *Msg) SetNotify(z string) *Msg {
dns.Opcode = OpcodeNotify
dns.Authoritative = true
@ -184,7 +188,7 @@ func IsFqdn(s string) bool {
return s[l-1] == '.'
}
// Fqdns return the fully qualified domain name from s.
// Fqdn return the fully qualified domain name from s.
// If s is already fully qualified, it behaves as the identity function.
func Fqdn(s string) string {
if IsFqdn(s) {

View file

@ -1,106 +1,16 @@
// Package dns implements a full featured interface to the Domain Name System.
// Server- and client-side programming is supported.
// The package allows complete control over what is send out to the DNS. The package
// API follows the less-is-more principle, by presenting a small, clean interface.
//
// The package dns supports (asynchronous) querying/replying, incoming/outgoing zone transfers,
// TSIG, EDNS0, dynamic updates, notifies and DNSSEC validation/signing.
// Note that domain names MUST be fully qualified, before sending them, unqualified
// names in a message will result in a packing failure.
//
// Resource records are native types. They are not stored in wire format.
// Basic usage pattern for creating a new resource record:
//
// r := new(dns.MX)
// r.Hdr = dns.RR_Header{Name: "miek.nl.", Rrtype: dns.TypeMX, Class: dns.ClassINET, Ttl: 3600}
// r.Preference = 10
// r.Mx = "mx.miek.nl."
//
// Or directly from a string:
//
// mx, err := dns.NewRR("miek.nl. 3600 IN MX 10 mx.miek.nl.")
//
// Or when the default TTL (3600) and class (IN) suit you:
//
// mx, err := dns.NewRR("miek.nl. MX 10 mx.miek.nl.")
//
// Or even:
//
// mx, err := dns.NewRR("$ORIGIN nl.\nmiek 1H IN MX 10 mx.miek")
//
// In the DNS messages are exchanged, these messages contain resource
// records (sets). Use pattern for creating a message:
//
// m := new(dns.Msg)
// m.SetQuestion("miek.nl.", dns.TypeMX)
//
// Or when not certain if the domain name is fully qualified:
//
// m.SetQuestion(dns.Fqdn("miek.nl"), dns.TypeMX)
//
// The message m is now a message with the question section set to ask
// the MX records for the miek.nl. zone.
//
// The following is slightly more verbose, but more flexible:
//
// m1 := new(dns.Msg)
// m1.Id = dns.Id()
// m1.RecursionDesired = true
// m1.Question = make([]dns.Question, 1)
// m1.Question[0] = dns.Question{"miek.nl.", dns.TypeMX, dns.ClassINET}
//
// After creating a message it can be send.
// Basic use pattern for synchronous querying the DNS at a
// server configured on 127.0.0.1 and port 53:
//
// c := new(dns.Client)
// in, rtt, err := c.Exchange(m1, "127.0.0.1:53")
//
// Suppressing
// multiple outstanding queries (with the same question, type and class) is as easy as setting:
//
// c.SingleInflight = true
//
// If these "advanced" features are not needed, a simple UDP query can be send,
// with:
//
// in, err := dns.Exchange(m1, "127.0.0.1:53")
//
// When this functions returns you will get dns message. A dns message consists
// out of four sections.
// The question section: in.Question, the answer section: in.Answer,
// the authority section: in.Ns and the additional section: in.Extra.
//
// Each of these sections (except the Question section) contain a []RR. Basic
// use pattern for accessing the rdata of a TXT RR as the first RR in
// the Answer section:
//
// if t, ok := in.Answer[0].(*dns.TXT); ok {
// // do something with t.Txt
// }
//
// Domain Name and TXT Character String Representations
//
// Both domain names and TXT character strings are converted to presentation
// form both when unpacked and when converted to strings.
//
// For TXT character strings, tabs, carriage returns and line feeds will be
// converted to \t, \r and \n respectively. Back slashes and quotations marks
// will be escaped. Bytes below 32 and above 127 will be converted to \DDD
// form.
//
// For domain names, in addition to the above rules brackets, periods,
// spaces, semicolons and the at symbol are escaped.
package dns
import "strconv"
const (
year68 = 1 << 31 // For RFC1982 (Serial Arithmetic) calculations in 32 bits.
DefaultMsgSize = 4096 // Standard default for larger than 512 bytes.
MinMsgSize = 512 // Minimal size of a DNS packet.
MaxMsgSize = 65536 // Largest possible DNS packet.
defaultTtl = 3600 // Default TTL.
year68 = 1 << 31 // For RFC1982 (Serial Arithmetic) calculations in 32 bits.
// DefaultMsgSize is the standard default for messages larger than 512 bytes.
DefaultMsgSize = 4096
// MinMsgSize is the minimal size of a DNS packet.
MinMsgSize = 512
// MaxMsgSize is the largest possible DNS packet.
MaxMsgSize = 65535
defaultTtl = 3600 // Default internal TTL.
)
// Error represents a DNS error
@ -137,9 +47,10 @@ type RR_Header struct {
Rdlength uint16 // length of data after header
}
// Header returns itself. This is here to make RR_Header implement the RR interface.
func (h *RR_Header) Header() *RR_Header { return h }
// Just to imlement the RR interface
// Just to imlement the RR interface.
func (h *RR_Header) copy() RR { return nil }
func (h *RR_Header) copyHeader() *RR_Header {

View file

@ -17,13 +17,11 @@ func TestPackUnpack(t *testing.T) {
out.Answer[0] = key
msg, err := out.Pack()
if err != nil {
t.Log("failed to pack msg with DNSKEY")
t.Fail()
t.Error("failed to pack msg with DNSKEY")
}
in := new(Msg)
if in.Unpack(msg) != nil {
t.Log("failed to unpack msg with DNSKEY")
t.Fail()
t.Error("failed to unpack msg with DNSKEY")
}
sig := new(RRSIG)
@ -35,13 +33,11 @@ func TestPackUnpack(t *testing.T) {
out.Answer[0] = sig
msg, err = out.Pack()
if err != nil {
t.Log("failed to pack msg with RRSIG")
t.Fail()
t.Error("failed to pack msg with RRSIG")
}
if in.Unpack(msg) != nil {
t.Log("failed to unpack msg with RRSIG")
t.Fail()
t.Error("failed to unpack msg with RRSIG")
}
}
@ -62,8 +58,7 @@ func TestPackUnpack2(t *testing.T) {
m.Answer[0] = rr
_, err := m.Pack()
if err != nil {
t.Log("Packing failed: " + err.Error())
t.Fail()
t.Error("Packing failed: ", err)
return
}
}
@ -90,16 +85,14 @@ func TestPackUnpack3(t *testing.T) {
m.Answer[0] = rr
b, err := m.Pack()
if err != nil {
t.Log("packing failed: " + err.Error())
t.Fail()
t.Error("packing failed: ", err)
return
}
var unpackMsg Msg
err = unpackMsg.Unpack(b)
if err != nil {
t.Log("unpacking failed")
t.Fail()
t.Error("unpacking failed")
return
}
}
@ -111,10 +104,9 @@ func TestBailiwick(t *testing.T) {
}
for parent, child := range yes {
if !IsSubDomain(parent, child) {
t.Logf("%s should be child of %s\n", child, parent)
t.Logf("comparelabels %d", CompareDomainName(parent, child))
t.Logf("lenlabels %d %d", CountLabel(parent), CountLabel(child))
t.Fail()
t.Errorf("%s should be child of %s", child, parent)
t.Errorf("comparelabels %d", CompareDomainName(parent, child))
t.Errorf("lenlabels %d %d", CountLabel(parent), CountLabel(child))
}
}
no := map[string]string{
@ -126,10 +118,9 @@ func TestBailiwick(t *testing.T) {
}
for parent, child := range no {
if IsSubDomain(parent, child) {
t.Logf("%s should not be child of %s\n", child, parent)
t.Logf("comparelabels %d", CompareDomainName(parent, child))
t.Logf("lenlabels %d %d", CountLabel(parent), CountLabel(child))
t.Fail()
t.Errorf("%s should not be child of %s", child, parent)
t.Errorf("comparelabels %d", CompareDomainName(parent, child))
t.Errorf("lenlabels %d %d", CountLabel(parent), CountLabel(child))
}
}
}
@ -142,13 +133,11 @@ func TestPack(t *testing.T) {
for _, r := range rr {
m.Answer[0], err = NewRR(r)
if err != nil {
t.Logf("failed to create RR: %s\n", err.Error())
t.Fail()
t.Errorf("failed to create RR: %v", err)
continue
}
if _, err := m.Pack(); err != nil {
t.Logf("packing failed: %s\n", err.Error())
t.Fail()
t.Errorf("packing failed: %v", err)
}
}
x := new(Msg)
@ -160,20 +149,17 @@ func TestPack(t *testing.T) {
// This crashes due to the fact the a.ntpns.org isn't a FQDN
// How to recover() from a remove panic()?
if _, err := x.Pack(); err == nil {
t.Log("packing should fail")
t.Fail()
t.Error("packing should fail")
}
x.Answer = make([]RR, 1)
x.Answer[0], err = NewRR(rr[0])
if _, err := x.Pack(); err == nil {
t.Log("packing should fail")
t.Fail()
t.Error("packing should fail")
}
x.Question = make([]Question, 1)
x.Question[0] = Question{";sd#edddds鍛↙赏‘℅∥↙xzztsestxssweewwsssstx@s@Z嵌e@cn.pool.ntp.org.", TypeA, ClassINET}
if _, err := x.Pack(); err == nil {
t.Log("packing should fail")
t.Fail()
t.Error("packing should fail")
}
}
@ -186,11 +172,10 @@ func TestPackNAPTR(t *testing.T) {
rr, _ := NewRR(n)
msg := make([]byte, rr.len())
if off, err := PackRR(rr, msg, 0, nil, false); err != nil {
t.Logf("packing failed: %s", err.Error())
t.Logf("length %d, need more than %d\n", rr.len(), off)
t.Fail()
t.Errorf("packing failed: %v", err)
t.Errorf("length %d, need more than %d", rr.len(), off)
} else {
t.Logf("buf size needed: %d\n", off)
t.Logf("buf size needed: %d", off)
}
}
}
@ -229,12 +214,10 @@ func TestMsgCompressLength(t *testing.T) {
buf, err := msg.Pack()
if err != nil {
t.Error(err)
t.Fail()
}
if predicted < len(buf) {
t.Errorf("predicted compressed length is wrong: predicted %s (len=%d) %d, actual %d\n",
t.Errorf("predicted compressed length is wrong: predicted %s (len=%d) %d, actual %d",
msg.Question[0].Name, len(msg.Answer), predicted, len(buf))
t.Fail()
}
}
}
@ -261,12 +244,10 @@ func TestMsgLength(t *testing.T) {
buf, err := msg.Pack()
if err != nil {
t.Error(err)
t.Fail()
}
if predicted < len(buf) {
t.Errorf("predicted length is wrong: predicted %s (len=%d), actual %d\n",
t.Errorf("predicted length is wrong: predicted %s (len=%d), actual %d",
msg.Question[0].Name, predicted, len(buf))
t.Fail()
}
}
}
@ -400,10 +381,10 @@ func BenchmarkMsgUnpack(b *testing.B) {
name1 := "12345678901234567890123456789012345.12345678.123."
rrMx, _ := NewRR(name1 + " 3600 IN MX 10 " + name1)
msg := makeMsg(name1, []RR{rrMx, rrMx}, nil, nil)
msg_buf, _ := msg.Pack()
msgBuf, _ := msg.Pack()
b.ResetTimer()
for i := 0; i < b.N; i++ {
_ = msg.Unpack(msg_buf)
_ = msg.Unpack(msgBuf)
}
}
@ -441,7 +422,7 @@ func TestToRFC3597(t *testing.T) {
x := new(RFC3597)
x.ToRFC3597(a)
if x.String() != `miek.nl. 3600 CLASS1 TYPE1 \# 4 0a000101` {
t.Fail()
t.Error("string mismatch")
}
}
@ -453,10 +434,9 @@ func TestNoRdataPack(t *testing.T) {
}
r := fn()
*r.Header() = RR_Header{Name: "miek.nl.", Rrtype: typ, Class: ClassINET, Ttl: 3600}
_, e := PackRR(r, data, 0, nil, false)
if e != nil {
t.Logf("failed to pack RR with zero rdata: %s: %s\n", TypeToString[typ], e.Error())
t.Fail()
_, err := PackRR(r, data, 0, nil, false)
if err != nil {
t.Errorf("failed to pack RR with zero rdata: %s: %v", TypeToString[typ], err)
}
}
}
@ -472,17 +452,17 @@ func TestNoRdataUnpack(t *testing.T) {
}
r := fn()
*r.Header() = RR_Header{Name: "miek.nl.", Rrtype: typ, Class: ClassINET, Ttl: 3600}
off, e := PackRR(r, data, 0, nil, false)
if e != nil {
// Should always works, TestNoDataPack should have catched this
off, err := PackRR(r, data, 0, nil, false)
if err != nil {
// Should always works, TestNoDataPack should have caught this
t.Errorf("failed to pack RR: %v", err)
continue
}
rr, _, e := UnpackRR(data[:off], 0)
if e != nil {
t.Logf("failed to unpack RR with zero rdata: %s: %s\n", TypeToString[typ], e.Error())
t.Fail()
rr, _, err := UnpackRR(data[:off], 0)
if err != nil {
t.Errorf("failed to unpack RR with zero rdata: %s: %v", TypeToString[typ], err)
}
t.Logf("%s\n", rr)
t.Log(rr)
}
}
@ -563,18 +543,39 @@ func TestPackIPSECKEY(t *testing.T) {
buf := make([]byte, 1024)
for _, t1 := range tests {
rr, _ := NewRR(t1)
off, e := PackRR(rr, buf, 0, nil, false)
if e != nil {
t.Logf("failed to pack IPSECKEY %s: %s\n", e, t1)
t.Fail()
off, err := PackRR(rr, buf, 0, nil, false)
if err != nil {
t.Errorf("failed to pack IPSECKEY %v: %s", err, t1)
continue
}
rr, _, e = UnpackRR(buf[:off], 0)
if e != nil {
t.Logf("failed to unpack IPSECKEY %s: %s\n", e, t1)
t.Fail()
rr, _, err = UnpackRR(buf[:off], 0)
if err != nil {
t.Errorf("failed to unpack IPSECKEY %v: %s", err, t1)
}
t.Logf("%s\n", rr)
t.Log(rr)
}
}
func TestMsgPackBuffer(t *testing.T) {
var testMessages = []string{
// news.ycombinator.com.in.escapemg.com. IN A, response
"586285830001000000010000046e6577730b79636f6d62696e61746f7203636f6d02696e086573636170656d6703636f6d0000010001c0210006000100000e10002c036e7332c02103646e730b67726f6f7665736861726bc02d77ed50e600002a3000000e1000093a8000000e10",
// news.ycombinator.com.in.escapemg.com. IN A, question
"586201000001000000000000046e6577730b79636f6d62696e61746f7203636f6d02696e086573636170656d6703636f6d0000010001",
"398781020001000000000000046e6577730b79636f6d62696e61746f7203636f6d0000010001",
}
for i, hexData := range testMessages {
// we won't fail the decoding of the hex
input, _ := hex.DecodeString(hexData)
m := new(Msg)
if err := m.Unpack(input); err != nil {
t.Errorf("packet %d failed to unpack", i)
continue
}
t.Logf("packet %d %s", i, m.String())
}
}

View file

@ -1,16 +1,3 @@
// DNSSEC
//
// DNSSEC (DNS Security Extension) adds a layer of security to the DNS. It
// uses public key cryptography to sign resource records. The
// public keys are stored in DNSKEY records and the signatures in RRSIG records.
//
// Requesting DNSSEC information for a zone is done by adding the DO (DNSSEC OK) bit
// to an request.
//
// m := new(dns.Msg)
// m.SetEdns0(4096, true)
//
// Signature generation, signature verification and key generation are all supported.
package dns
import (
@ -422,8 +409,8 @@ func (rr *RRSIG) ValidityPeriod(t time.Time) bool {
}
// Return the signatures base64 encodedig sigdata as a byte slice.
func (s *RRSIG) sigBuf() []byte {
sigbuf, err := fromBase64([]byte(s.Signature))
func (rr *RRSIG) sigBuf() []byte {
sigbuf, err := fromBase64([]byte(rr.Signature))
if err != nil {
return nil
}
@ -588,7 +575,10 @@ func rawSignatureData(rrset []RR, s *RRSIG) (buf []byte, err error) {
wires[i] = wire
}
sort.Sort(wires)
for _, wire := range wires {
for i, wire := range wires {
if i > 0 && bytes.Equal(wire, wires[i-1]) {
continue
}
buf = append(buf, wire...)
}
return buf, nil

View file

@ -15,8 +15,8 @@ import (
// what kind of DNSKEY will be generated.
// The ECDSA algorithms imply a fixed keysize, in that case
// bits should be set to the size of the algorithm.
func (r *DNSKEY) Generate(bits int) (PrivateKey, error) {
switch r.Algorithm {
func (k *DNSKEY) Generate(bits int) (PrivateKey, error) {
switch k.Algorithm {
case DSA, DSANSEC3SHA1:
if bits != 1024 {
return nil, ErrKeySize
@ -39,7 +39,7 @@ func (r *DNSKEY) Generate(bits int) (PrivateKey, error) {
}
}
switch r.Algorithm {
switch k.Algorithm {
case DSA, DSANSEC3SHA1:
params := new(dsa.Parameters)
if err := dsa.GenerateParameters(params, rand.Reader, dsa.L1024N160); err != nil {
@ -51,18 +51,18 @@ func (r *DNSKEY) Generate(bits int) (PrivateKey, error) {
if err != nil {
return nil, err
}
r.setPublicKeyDSA(params.Q, params.P, params.G, priv.PublicKey.Y)
k.setPublicKeyDSA(params.Q, params.P, params.G, priv.PublicKey.Y)
return (*DSAPrivateKey)(priv), nil
case RSAMD5, RSASHA1, RSASHA256, RSASHA512, RSASHA1NSEC3SHA1:
priv, err := rsa.GenerateKey(rand.Reader, bits)
if err != nil {
return nil, err
}
r.setPublicKeyRSA(priv.PublicKey.E, priv.PublicKey.N)
k.setPublicKeyRSA(priv.PublicKey.E, priv.PublicKey.N)
return (*RSAPrivateKey)(priv), nil
case ECDSAP256SHA256, ECDSAP384SHA384:
var c elliptic.Curve
switch r.Algorithm {
switch k.Algorithm {
case ECDSAP256SHA256:
c = elliptic.P256()
case ECDSAP384SHA384:
@ -72,7 +72,7 @@ func (r *DNSKEY) Generate(bits int) (PrivateKey, error) {
if err != nil {
return nil, err
}
r.setPublicKeyECDSA(priv.PublicKey.X, priv.PublicKey.Y)
k.setPublicKeyECDSA(priv.PublicKey.X, priv.PublicKey.Y)
return (*ECDSAPrivateKey)(priv), nil
default:
return nil, ErrAlg

View file

@ -9,6 +9,8 @@ import (
"strings"
)
// NewPrivateKey returns a PrivateKey by parsing the string s.
// s should be in the same form of the BIND private key files.
func (k *DNSKEY) NewPrivateKey(s string) (PrivateKey, error) {
if s[len(s)-1] != '\n' { // We need a closing newline
return k.ReadPrivateKey(strings.NewReader(s+"\n"), "")
@ -170,9 +172,9 @@ func parseKey(r io.Reader, file string) (map[string]string, error) {
for l := range c {
// It should alternate
switch l.value {
case _KEY:
case zKey:
k = l.token
case _VALUE:
case zValue:
if k == "" {
return nil, &ParseError{file, "no private key seen", l}
}
@ -202,14 +204,14 @@ func klexer(s *scan, c chan lex) {
}
l.token = str
if key {
l.value = _KEY
l.value = zKey
c <- l
// Next token is a space, eat it
s.tokenText()
key = false
str = ""
} else {
l.value = _VALUE
l.value = zValue
}
case ';':
commt = true
@ -218,7 +220,7 @@ func klexer(s *scan, c chan lex) {
// Reset a comment
commt = false
}
l.value = _VALUE
l.value = zValue
l.token = str
c <- l
str = ""
@ -235,7 +237,7 @@ func klexer(s *scan, c chan lex) {
if len(str) > 0 {
// Send remainder
l.token = str
l.value = _VALUE
l.value = zValue
c <- l
}
}

View file

@ -10,8 +10,9 @@ import (
"strconv"
)
const _FORMAT = "Private-key-format: v1.3\n"
const format = "Private-key-format: v1.3\n"
// PrivateKey ... TODO(miek)
type PrivateKey interface {
Sign([]byte, uint8) ([]byte, error)
String(uint8) string
@ -53,17 +54,17 @@ func (p *RSAPrivateKey) String(alg uint8) string {
// Calculate Exponent1/2 and Coefficient as per: http://en.wikipedia.org/wiki/RSA#Using_the_Chinese_remainder_algorithm
// and from: http://code.google.com/p/go/issues/detail?id=987
one := big.NewInt(1)
p_1 := big.NewInt(0).Sub(p.Primes[0], one)
q_1 := big.NewInt(0).Sub(p.Primes[1], one)
exp1 := big.NewInt(0).Mod(p.D, p_1)
exp2 := big.NewInt(0).Mod(p.D, q_1)
p1 := big.NewInt(0).Sub(p.Primes[0], one)
q1 := big.NewInt(0).Sub(p.Primes[1], one)
exp1 := big.NewInt(0).Mod(p.D, p1)
exp2 := big.NewInt(0).Mod(p.D, q1)
coeff := big.NewInt(0).ModInverse(p.Primes[1], p.Primes[0])
exponent1 := toBase64(exp1.Bytes())
exponent2 := toBase64(exp2.Bytes())
coefficient := toBase64(coeff.Bytes())
return _FORMAT +
return format +
"Algorithm: " + algorithm + "\n" +
"Modulus: " + modulus + "\n" +
"PublicExponent: " + publicExponent + "\n" +
@ -106,7 +107,7 @@ func (p *ECDSAPrivateKey) String(alg uint8) string {
intlen = 48
}
private := toBase64(intToBytes(p.D, intlen))
return _FORMAT +
return format +
"Algorithm: " + algorithm + "\n" +
"PrivateKey: " + private + "\n"
}
@ -133,7 +134,7 @@ func (p *DSAPrivateKey) String(alg uint8) string {
base := toBase64(intToBytes(p.PublicKey.Parameters.G, 64+T*8))
priv := toBase64(intToBytes(p.X, 20))
pub := toBase64(intToBytes(p.PublicKey.Y, 64+T*8))
return _FORMAT +
return format +
"Algorithm: " + algorithm + "\n" +
"Prime(p): " + prime + "\n" +
"Subprime(q): " + subprime + "\n" +

View file

@ -45,8 +45,8 @@ func TestGenerateEC(t *testing.T) {
key.Protocol = 3
key.Algorithm = ECDSAP256SHA256
privkey, _ := key.Generate(256)
t.Logf("%s\n", key.String())
t.Logf("%s\n", key.PrivateKeyString(privkey))
t.Log(key.String())
t.Log(key.PrivateKeyString(privkey))
}
func TestGenerateDSA(t *testing.T) {
@ -62,8 +62,8 @@ func TestGenerateDSA(t *testing.T) {
key.Protocol = 3
key.Algorithm = DSA
privkey, _ := key.Generate(1024)
t.Logf("%s\n", key.String())
t.Logf("%s\n", key.PrivateKeyString(privkey))
t.Log(key.String())
t.Log(key.PrivateKeyString(privkey))
}
func TestGenerateRSA(t *testing.T) {
@ -79,8 +79,8 @@ func TestGenerateRSA(t *testing.T) {
key.Protocol = 3
key.Algorithm = RSASHA256
privkey, _ := key.Generate(1024)
t.Logf("%s\n", key.String())
t.Logf("%s\n", key.PrivateKeyString(privkey))
t.Log(key.String())
t.Log(key.PrivateKeyString(privkey))
}
func TestSecure(t *testing.T) {
@ -107,10 +107,9 @@ func TestSecure(t *testing.T) {
key.Algorithm = RSASHA256
key.PublicKey = "AwEAAcNEU67LJI5GEgF9QLNqLO1SMq1EdoQ6E9f85ha0k0ewQGCblyW2836GiVsm6k8Kr5ECIoMJ6fZWf3CQSQ9ycWfTyOHfmI3eQ/1Covhb2y4bAmL/07PhrL7ozWBW3wBfM335Ft9xjtXHPy7ztCbV9qZ4TVDTW/Iyg0PiwgoXVesz"
// It should validate. Period is checked seperately, so this will keep on working
// It should validate. Period is checked separately, so this will keep on working
if sig.Verify(key, []RR{soa}) != nil {
t.Log("failure to validate")
t.Fail()
t.Error("failure to validate")
}
}
@ -131,15 +130,13 @@ func TestSignature(t *testing.T) {
// Should not be valid
if sig.ValidityPeriod(time.Now()) {
t.Log("should not be valid")
t.Fail()
t.Error("should not be valid")
}
sig.Inception = 315565800 //Tue Jan 1 10:10:00 CET 1980
sig.Expiration = 4102477800 //Fri Jan 1 10:10:00 CET 2100
if !sig.ValidityPeriod(time.Now()) {
t.Log("should be valid")
t.Fail()
t.Error("should be valid")
}
}
@ -196,16 +193,14 @@ func TestSignVerify(t *testing.T) {
for _, r := range []RR{soa, soa1, srv} {
if sig.Sign(privkey, []RR{r}) != nil {
t.Log("failure to sign the record")
t.Fail()
t.Error("failure to sign the record")
continue
}
if sig.Verify(key, []RR{r}) != nil {
t.Log("failure to validate")
t.Fail()
t.Error("failure to validate")
continue
}
t.Logf("validated: %s\n", r.Header().Name)
t.Logf("validated: %s", r.Header().Name)
}
}
@ -234,16 +229,14 @@ func Test65534(t *testing.T) {
sig.SignerName = key.Hdr.Name
sig.Algorithm = RSASHA256
if err := sig.Sign(privkey, []RR{t6}); err != nil {
t.Log(err)
t.Log("failure to sign the TYPE65534 record")
t.Fail()
t.Error(err)
t.Error("failure to sign the TYPE65534 record")
}
if err := sig.Verify(key, []RR{t6}); err != nil {
t.Log(err)
t.Log("failure to validate")
t.Fail()
t.Error(err)
t.Error("failure to validate")
} else {
t.Logf("validated: %s\n", t6.Header().Name)
t.Logf("validated: %s", t6.Header().Name)
}
}
@ -271,13 +264,11 @@ Coefficient: UuRoNqe7YHnKmQzE6iDWKTMIWTuoqqrFAmXPmKQnC+Y+BQzOVEHUo9bXdDnoI9hzXP1
t.Fatal(err)
}
if pubkey.(*DNSKEY).PublicKey != "AwEAAZuMCu2FdugHkTrXYgl5qixvcDw1aDDlvL46/xJKbHBAHY16fNUb2b65cwko2Js/aJxUYJbZk5dwCDZxYfrfbZVtDPQuc3o8QaChVxC7/JYz2AHc9qHvqQ1j4VrH71RWINlQo6VYjzN/BGpMhOZoZOEwzp1HfsOE3lNYcoWU1smL" {
t.Log("pubkey is not what we've read")
t.Fail()
t.Error("pubkey is not what we've read")
}
if pubkey.(*DNSKEY).PrivateKeyString(privkey) != privStr {
t.Log("privkey is not what we've read")
t.Logf("%v", pubkey.(*DNSKEY).PrivateKeyString(privkey))
t.Fail()
t.Error("privkey is not what we've read")
t.Errorf("%v", pubkey.(*DNSKEY).PrivateKeyString(privkey))
}
}
@ -294,8 +285,7 @@ func TestTag(t *testing.T) {
tag := key.KeyTag()
if tag != 12051 {
t.Logf("wrong key tag: %d for key %v\n", tag, key)
t.Fail()
t.Errorf("wrong key tag: %d for key %v", tag, key)
}
}
@ -335,13 +325,11 @@ func TestKeyRSA(t *testing.T) {
sig.SignerName = key.Hdr.Name
if err := sig.Sign(priv, []RR{soa}); err != nil {
t.Logf("failed to sign")
t.Fail()
t.Error("failed to sign")
return
}
if err := sig.Verify(key, []RR{soa}); err != nil {
t.Logf("failed to verify")
t.Fail()
t.Error("failed to verify")
}
}
@ -358,8 +346,7 @@ func TestKeyToDS(t *testing.T) {
ds := key.ToDS(SHA1)
if strings.ToUpper(ds.Digest) != "B5121BDB5B8D86D0CC5FFAFBAAABE26C3E20BAC1" {
t.Logf("wrong DS digest for SHA1\n%v\n", ds)
t.Fail()
t.Errorf("wrong DS digest for SHA1\n%v", ds)
}
}
@ -384,23 +371,18 @@ Activate: 20110302104537`
k := xk.(*DNSKEY)
p, err := k.NewPrivateKey(priv)
if err != nil {
t.Logf("%v\n", err)
t.Fail()
t.Error(err)
}
switch priv := p.(type) {
case *RSAPrivateKey:
if 65537 != priv.PublicKey.E {
t.Log("exponenent should be 65537")
t.Fail()
t.Error("exponenent should be 65537")
}
default:
t.Logf("we should have read an RSA key: %v", priv)
t.Fail()
t.Errorf("we should have read an RSA key: %v", priv)
}
if k.KeyTag() != 37350 {
t.Logf("%d %v\n", k.KeyTag(), k)
t.Log("keytag should be 37350")
t.Fail()
t.Errorf("keytag should be 37350, got %d %v", k.KeyTag(), k)
}
soa := new(SOA)
@ -423,9 +405,7 @@ Activate: 20110302104537`
sig.Sign(p, []RR{soa})
if sig.Signature != "D5zsobpQcmMmYsUMLxCVEtgAdCvTu8V/IEeP4EyLBjqPJmjt96bwM9kqihsccofA5LIJ7DN91qkCORjWSTwNhzCv7bMyr2o5vBZElrlpnRzlvsFIoAZCD9xg6ZY7ZyzUJmU6IcTwG4v3xEYajcpbJJiyaw/RqR90MuRdKPiBzSo=" {
t.Log("signature is not correct")
t.Logf("%v\n", sig)
t.Fail()
t.Errorf("signature is not correct: %v", sig)
}
}
@ -440,13 +420,13 @@ PrivateKey: WURgWHCcYIYUPWgeLmiPY2DJJk02vgrmTfitxgqcL4vwW7BOrbawVmVe0d9V94SR`
eckey, err := NewRR(pub)
if err != nil {
t.Fatal(err.Error())
t.Fatal(err)
}
privkey, err := eckey.(*DNSKEY).NewPrivateKey(priv)
if err != nil {
t.Fatal(err.Error())
t.Fatal(err)
}
// TODO: Create seperate test for this
// TODO: Create separate test for this
ds := eckey.(*DNSKEY).ToDS(SHA384)
if ds.KeyTag != 10771 {
t.Fatal("wrong keytag on DS")
@ -467,22 +447,21 @@ PrivateKey: WURgWHCcYIYUPWgeLmiPY2DJJk02vgrmTfitxgqcL4vwW7BOrbawVmVe0d9V94SR`
t.Fatal("failure to sign the record")
}
if e := sig.Verify(eckey.(*DNSKEY), []RR{a}); e != nil {
t.Logf("\n%s\n%s\n%s\n\n%s\n\n",
if err := sig.Verify(eckey.(*DNSKEY), []RR{a}); err != nil {
t.Fatalf("Failure to validate:\n%s\n%s\n%s\n\n%s\n\n%v",
eckey.(*DNSKEY).String(),
a.String(),
sig.String(),
eckey.(*DNSKEY).PrivateKeyString(privkey),
err,
)
t.Fatalf("failure to validate: %s", e.Error())
}
}
func TestSignVerifyECDSA2(t *testing.T) {
srv1, err := NewRR("srv.miek.nl. IN SRV 1000 800 0 web1.miek.nl.")
if err != nil {
t.Fatalf(err.Error())
t.Fatal(err)
}
srv := srv1.(*SRV)
@ -518,14 +497,13 @@ func TestSignVerifyECDSA2(t *testing.T) {
err = sig.Verify(key, []RR{srv})
if err != nil {
t.Logf("\n%s\n%s\n%s\n\n%s\n\n",
t.Logf("Failure to validate:\n%s\n%s\n%s\n\n%s\n\n%v",
key.String(),
srv.String(),
sig.String(),
key.PrivateKeyString(privkey),
err,
)
t.Fatal("Failure to validate:", err)
}
}
@ -540,11 +518,11 @@ Algorithm: 13 (ECDSAP256SHA256)
PrivateKey: GU6SnQ/Ou+xC5RumuIUIuJZteXT2z0O/ok1s38Et6mQ=`
rrDNSKEY, err := NewRR(exDNSKEY)
if err != nil {
t.Fatal(err.Error())
t.Fatal(err)
}
priv, err := rrDNSKEY.(*DNSKEY).NewPrivateKey(exPriv)
if err != nil {
t.Fatal(err.Error())
t.Fatal(err)
}
exDS := `example.net. 3600 IN DS 55648 13 2 (
@ -552,11 +530,11 @@ PrivateKey: GU6SnQ/Ou+xC5RumuIUIuJZteXT2z0O/ok1s38Et6mQ=`
e2770ce6d6e37df61d17 )`
rrDS, err := NewRR(exDS)
if err != nil {
t.Fatal(err.Error())
t.Fatal(err)
}
ourDS := rrDNSKEY.(*DNSKEY).ToDS(SHA256)
if !reflect.DeepEqual(ourDS, rrDS.(*DS)) {
t.Errorf("DS record differs:\n%v\n%v\n", ourDS, rrDS.(*DS))
t.Errorf("DS record differs:\n%v\n%v", ourDS, rrDS.(*DS))
}
exA := `www.example.net. 3600 IN A 192.0.2.1`
@ -566,11 +544,11 @@ PrivateKey: GU6SnQ/Ou+xC5RumuIUIuJZteXT2z0O/ok1s38Et6mQ=`
yGmt+3SNruPFKG7tZoLBLlUzGGus7ZwmwWep666VCw== )`
rrA, err := NewRR(exA)
if err != nil {
t.Fatal(err.Error())
t.Fatal(err)
}
rrRRSIG, err := NewRR(exRRSIG)
if err != nil {
t.Fatal(err.Error())
t.Fatal(err)
}
if err = rrRRSIG.(*RRSIG).Verify(rrDNSKEY.(*DNSKEY), []RR{rrA}); err != nil {
t.Errorf("Failure to validate the spec RRSIG: %v", err)
@ -588,7 +566,7 @@ PrivateKey: GU6SnQ/Ou+xC5RumuIUIuJZteXT2z0O/ok1s38Et6mQ=`
ourRRSIG.Inception, _ = StringToTime("20100812100439")
err = ourRRSIG.Sign(priv, []RR{rrA})
if err != nil {
t.Fatal(err.Error())
t.Fatal(err)
}
if err = ourRRSIG.Verify(rrDNSKEY.(*DNSKEY), []RR{rrA}); err != nil {
@ -599,7 +577,7 @@ PrivateKey: GU6SnQ/Ou+xC5RumuIUIuJZteXT2z0O/ok1s38Et6mQ=`
rrRRSIG.(*RRSIG).Signature = ""
ourRRSIG.Signature = ""
if !reflect.DeepEqual(ourRRSIG, rrRRSIG.(*RRSIG)) {
t.Fatalf("RRSIG record differs:\n%v\n%v\n", ourRRSIG, rrRRSIG.(*RRSIG))
t.Fatalf("RRSIG record differs:\n%v\n%v", ourRRSIG, rrRRSIG.(*RRSIG))
}
}
@ -614,11 +592,11 @@ Algorithm: 14 (ECDSAP384SHA384)
PrivateKey: WURgWHCcYIYUPWgeLmiPY2DJJk02vgrmTfitxgqcL4vwW7BOrbawVmVe0d9V94SR`
rrDNSKEY, err := NewRR(exDNSKEY)
if err != nil {
t.Fatal(err.Error())
t.Fatal(err)
}
priv, err := rrDNSKEY.(*DNSKEY).NewPrivateKey(exPriv)
if err != nil {
t.Fatal(err.Error())
t.Fatal(err)
}
exDS := `example.net. 3600 IN DS 10771 14 4 (
@ -627,11 +605,11 @@ PrivateKey: WURgWHCcYIYUPWgeLmiPY2DJJk02vgrmTfitxgqcL4vwW7BOrbawVmVe0d9V94SR`
6df983d6 )`
rrDS, err := NewRR(exDS)
if err != nil {
t.Fatal(err.Error())
t.Fatal(err)
}
ourDS := rrDNSKEY.(*DNSKEY).ToDS(SHA384)
if !reflect.DeepEqual(ourDS, rrDS.(*DS)) {
t.Fatalf("DS record differs:\n%v\n%v\n", ourDS, rrDS.(*DS))
t.Fatalf("DS record differs:\n%v\n%v", ourDS, rrDS.(*DS))
}
exA := `www.example.net. 3600 IN A 192.0.2.1`
@ -642,11 +620,11 @@ PrivateKey: WURgWHCcYIYUPWgeLmiPY2DJJk02vgrmTfitxgqcL4vwW7BOrbawVmVe0d9V94SR`
WTSSPdz7wnqXL5bdcJzusdnI0RSMROxxwGipWcJm )`
rrA, err := NewRR(exA)
if err != nil {
t.Fatal(err.Error())
t.Fatal(err)
}
rrRRSIG, err := NewRR(exRRSIG)
if err != nil {
t.Fatal(err.Error())
t.Fatal(err)
}
if err = rrRRSIG.(*RRSIG).Verify(rrDNSKEY.(*DNSKEY), []RR{rrA}); err != nil {
t.Errorf("Failure to validate the spec RRSIG: %v", err)
@ -664,7 +642,7 @@ PrivateKey: WURgWHCcYIYUPWgeLmiPY2DJJk02vgrmTfitxgqcL4vwW7BOrbawVmVe0d9V94SR`
ourRRSIG.Inception, _ = StringToTime("20100812102025")
err = ourRRSIG.Sign(priv, []RR{rrA})
if err != nil {
t.Fatal(err.Error())
t.Fatal(err)
}
if err = ourRRSIG.Verify(rrDNSKEY.(*DNSKEY), []RR{rrA}); err != nil {
@ -675,6 +653,6 @@ PrivateKey: WURgWHCcYIYUPWgeLmiPY2DJJk02vgrmTfitxgqcL4vwW7BOrbawVmVe0d9V94SR`
rrRRSIG.(*RRSIG).Signature = ""
ourRRSIG.Signature = ""
if !reflect.DeepEqual(ourRRSIG, rrRRSIG.(*RRSIG)) {
t.Fatalf("RRSIG record differs:\n%v\n%v\n", ourRRSIG, rrRRSIG.(*RRSIG))
t.Fatalf("RRSIG record differs:\n%v\n%v", ourRRSIG, rrRRSIG.(*RRSIG))
}
}

247
Godeps/_workspace/src/github.com/miekg/dns/doc.go generated vendored Normal file
View file

@ -0,0 +1,247 @@
/*
Package dns implements a full featured interface to the Domain Name System.
Server- and client-side programming is supported.
The package allows complete control over what is send out to the DNS. The package
API follows the less-is-more principle, by presenting a small, clean interface.
The package dns supports (asynchronous) querying/replying, incoming/outgoing zone transfers,
TSIG, EDNS0, dynamic updates, notifies and DNSSEC validation/signing.
Note that domain names MUST be fully qualified, before sending them, unqualified
names in a message will result in a packing failure.
Resource records are native types. They are not stored in wire format.
Basic usage pattern for creating a new resource record:
r := new(dns.MX)
r.Hdr = dns.RR_Header{Name: "miek.nl.", Rrtype: dns.TypeMX, Class: dns.ClassINET, Ttl: 3600}
r.Preference = 10
r.Mx = "mx.miek.nl."
Or directly from a string:
mx, err := dns.NewRR("miek.nl. 3600 IN MX 10 mx.miek.nl.")
Or when the default TTL (3600) and class (IN) suit you:
mx, err := dns.NewRR("miek.nl. MX 10 mx.miek.nl.")
Or even:
mx, err := dns.NewRR("$ORIGIN nl.\nmiek 1H IN MX 10 mx.miek")
In the DNS messages are exchanged, these messages contain resource
records (sets). Use pattern for creating a message:
m := new(dns.Msg)
m.SetQuestion("miek.nl.", dns.TypeMX)
Or when not certain if the domain name is fully qualified:
m.SetQuestion(dns.Fqdn("miek.nl"), dns.TypeMX)
The message m is now a message with the question section set to ask
the MX records for the miek.nl. zone.
The following is slightly more verbose, but more flexible:
m1 := new(dns.Msg)
m1.Id = dns.Id()
m1.RecursionDesired = true
m1.Question = make([]dns.Question, 1)
m1.Question[0] = dns.Question{"miek.nl.", dns.TypeMX, dns.ClassINET}
After creating a message it can be send.
Basic use pattern for synchronous querying the DNS at a
server configured on 127.0.0.1 and port 53:
c := new(dns.Client)
in, rtt, err := c.Exchange(m1, "127.0.0.1:53")
Suppressing
multiple outstanding queries (with the same question, type and class) is as easy as setting:
c.SingleInflight = true
If these "advanced" features are not needed, a simple UDP query can be send,
with:
in, err := dns.Exchange(m1, "127.0.0.1:53")
When this functions returns you will get dns message. A dns message consists
out of four sections.
The question section: in.Question, the answer section: in.Answer,
the authority section: in.Ns and the additional section: in.Extra.
Each of these sections (except the Question section) contain a []RR. Basic
use pattern for accessing the rdata of a TXT RR as the first RR in
the Answer section:
if t, ok := in.Answer[0].(*dns.TXT); ok {
// do something with t.Txt
}
Domain Name and TXT Character String Representations
Both domain names and TXT character strings are converted to presentation
form both when unpacked and when converted to strings.
For TXT character strings, tabs, carriage returns and line feeds will be
converted to \t, \r and \n respectively. Back slashes and quotations marks
will be escaped. Bytes below 32 and above 127 will be converted to \DDD
form.
For domain names, in addition to the above rules brackets, periods,
spaces, semicolons and the at symbol are escaped.
DNSSEC
DNSSEC (DNS Security Extension) adds a layer of security to the DNS. It
uses public key cryptography to sign resource records. The
public keys are stored in DNSKEY records and the signatures in RRSIG records.
Requesting DNSSEC information for a zone is done by adding the DO (DNSSEC OK) bit
to an request.
m := new(dns.Msg)
m.SetEdns0(4096, true)
Signature generation, signature verification and key generation are all supported.
DYNAMIC UPDATES
Dynamic updates reuses the DNS message format, but renames three of
the sections. Question is Zone, Answer is Prerequisite, Authority is
Update, only the Additional is not renamed. See RFC 2136 for the gory details.
You can set a rather complex set of rules for the existence of absence of
certain resource records or names in a zone to specify if resource records
should be added or removed. The table from RFC 2136 supplemented with the Go
DNS function shows which functions exist to specify the prerequisites.
3.2.4 - Table Of Metavalues Used In Prerequisite Section
CLASS TYPE RDATA Meaning Function
--------------------------------------------------------------
ANY ANY empty Name is in use dns.NameUsed
ANY rrset empty RRset exists (value indep) dns.RRsetUsed
NONE ANY empty Name is not in use dns.NameNotUsed
NONE rrset empty RRset does not exist dns.RRsetNotUsed
zone rrset rr RRset exists (value dep) dns.Used
The prerequisite section can also be left empty.
If you have decided on the prerequisites you can tell what RRs should
be added or deleted. The next table shows the options you have and
what functions to call.
3.4.2.6 - Table Of Metavalues Used In Update Section
CLASS TYPE RDATA Meaning Function
---------------------------------------------------------------
ANY ANY empty Delete all RRsets from name dns.RemoveName
ANY rrset empty Delete an RRset dns.RemoveRRset
NONE rrset rr Delete an RR from RRset dns.Remove
zone rrset rr Add to an RRset dns.Insert
TRANSACTION SIGNATURE
An TSIG or transaction signature adds a HMAC TSIG record to each message sent.
The supported algorithms include: HmacMD5, HmacSHA1, HmacSHA256 and HmacSHA512.
Basic use pattern when querying with a TSIG name "axfr." (note that these key names
must be fully qualified - as they are domain names) and the base64 secret
"so6ZGir4GPAqINNh9U5c3A==":
c := new(dns.Client)
c.TsigSecret = map[string]string{"axfr.": "so6ZGir4GPAqINNh9U5c3A=="}
m := new(dns.Msg)
m.SetQuestion("miek.nl.", dns.TypeMX)
m.SetTsig("axfr.", dns.HmacMD5, 300, time.Now().Unix())
...
// When sending the TSIG RR is calculated and filled in before sending
When requesting an zone transfer (almost all TSIG usage is when requesting zone transfers), with
TSIG, this is the basic use pattern. In this example we request an AXFR for
miek.nl. with TSIG key named "axfr." and secret "so6ZGir4GPAqINNh9U5c3A=="
and using the server 176.58.119.54:
t := new(dns.Transfer)
m := new(dns.Msg)
t.TsigSecret = map[string]string{"axfr.": "so6ZGir4GPAqINNh9U5c3A=="}
m.SetAxfr("miek.nl.")
m.SetTsig("axfr.", dns.HmacMD5, 300, time.Now().Unix())
c, err := t.In(m, "176.58.119.54:53")
for r := range c { ... }
You can now read the records from the transfer as they come in. Each envelope is checked with TSIG.
If something is not correct an error is returned.
Basic use pattern validating and replying to a message that has TSIG set.
server := &dns.Server{Addr: ":53", Net: "udp"}
server.TsigSecret = map[string]string{"axfr.": "so6ZGir4GPAqINNh9U5c3A=="}
go server.ListenAndServe()
dns.HandleFunc(".", handleRequest)
func handleRequest(w dns.ResponseWriter, r *dns.Msg) {
m := new(Msg)
m.SetReply(r)
if r.IsTsig() {
if w.TsigStatus() == nil {
// *Msg r has an TSIG record and it was validated
m.SetTsig("axfr.", dns.HmacMD5, 300, time.Now().Unix())
} else {
// *Msg r has an TSIG records and it was not valided
}
}
w.WriteMsg(m)
}
PRIVATE RRS
RFC 6895 sets aside a range of type codes for private use. This range
is 65,280 - 65,534 (0xFF00 - 0xFFFE). When experimenting with new Resource Records these
can be used, before requesting an official type code from IANA.
EDNS0
EDNS0 is an extension mechanism for the DNS defined in RFC 2671 and updated
by RFC 6891. It defines an new RR type, the OPT RR, which is then completely
abused.
Basic use pattern for creating an (empty) OPT RR:
o := new(dns.OPT)
o.Hdr.Name = "." // MUST be the root zone, per definition.
o.Hdr.Rrtype = dns.TypeOPT
The rdata of an OPT RR consists out of a slice of EDNS0 (RFC 6891)
interfaces. Currently only a few have been standardized: EDNS0_NSID
(RFC 5001) and EDNS0_SUBNET (draft-vandergaast-edns-client-subnet-02). Note
that these options may be combined in an OPT RR.
Basic use pattern for a server to check if (and which) options are set:
// o is a dns.OPT
for _, s := range o.Option {
switch e := s.(type) {
case *dns.EDNS0_NSID:
// do stuff with e.Nsid
case *dns.EDNS0_SUBNET:
// access e.Family, e.Address, etc.
}
}
SIG(0)
From RFC 2931:
SIG(0) provides protection for DNS transactions and requests ....
... protection for glue records, DNS requests, protection for message headers
on requests and responses, and protection of the overall integrity of a response.
It works like TSIG, except that SIG(0) uses public key cryptography, instead of the shared
secret approach in TSIG.
Supported algorithms: DSA, ECDSAP256SHA256, ECDSAP384SHA384, RSASHA1, RSASHA256 and
RSASHA512.
Signing subsequent messages in multi-message sessions is not implemented.
*/
package dns

View file

@ -1,29 +1,3 @@
// EDNS0
//
// EDNS0 is an extension mechanism for the DNS defined in RFC 2671 and updated
// by RFC 6891. It defines an new RR type, the OPT RR, which is then completely
// abused.
// Basic use pattern for creating an (empty) OPT RR:
//
// o := new(dns.OPT)
// o.Hdr.Name = "." // MUST be the root zone, per definition.
// o.Hdr.Rrtype = dns.TypeOPT
//
// The rdata of an OPT RR consists out of a slice of EDNS0 (RFC 6891)
// interfaces. Currently only a few have been standardized: EDNS0_NSID
// (RFC 5001) and EDNS0_SUBNET (draft-vandergaast-edns-client-subnet-02). Note
// that these options may be combined in an OPT RR.
// Basic use pattern for a server to check if (and which) options are set:
//
// // o is a dns.OPT
// for _, s := range o.Option {
// switch e := s.(type) {
// case *dns.EDNS0_NSID:
// // do stuff with e.Nsid
// case *dns.EDNS0_SUBNET:
// // access e.Family, e.Address, etc.
// }
// }
package dns
import (
@ -44,9 +18,13 @@ const (
EDNS0SUBNET = 0x8 // client-subnet (RFC6891)
EDNS0EXPIRE = 0x9 // EDNS0 expire
EDNS0SUBNETDRAFT = 0x50fa // Don't use! Use EDNS0SUBNET
EDNS0LOCALSTART = 0xFDE9 // Beginning of range reserved for local/experimental use (RFC6891)
EDNS0LOCALEND = 0xFFFE // End of range reserved for local/experimental use (RFC6891)
_DO = 1 << 15 // dnssec ok
)
// OPT is the EDNS0 RR appended to messages to convey extra (meta) information.
// See RFC 6891.
type OPT struct {
Hdr RR_Header
Option []EDNS0 `dns:"opt"`
@ -92,6 +70,8 @@ func (rr *OPT) String() string {
s += "\n; DS HASH UNDERSTOOD: " + o.String()
case *EDNS0_N3U:
s += "\n; NSEC3 HASH UNDERSTOOD: " + o.String()
case *EDNS0_LOCAL:
s += "\n; LOCAL OPT: " + o.String()
}
}
return s
@ -100,8 +80,9 @@ func (rr *OPT) String() string {
func (rr *OPT) len() int {
l := rr.Hdr.len()
for i := 0; i < len(rr.Option); i++ {
l += 4 // Account for 2-byte option code and 2-byte option length.
lo, _ := rr.Option[i].pack()
l += 2 + len(lo)
l += len(lo)
}
return l
}
@ -499,3 +480,44 @@ func (e *EDNS0_EXPIRE) unpack(b []byte) error {
e.Expire = uint32(b[0])<<24 | uint32(b[1])<<16 | uint32(b[2])<<8 | uint32(b[3])
return nil
}
// The local EDNS0 option is used for local/experimental purposes. The option
// code is recommended to be within the range [EDNS0LOCALSTART, EDNS0LOCALEND]
// (RFC6891), although any unassigned code can actually be used. The content of
// the option is made available in Data, unaltered.
// Basic use pattern for creating a local option:
//
// o := new(dns.OPT)
// o.Hdr.Name = "."
// o.Hdr.Rrtype = dns.TypeOPT
// e := new(dns.EDNS0_LOCAL)
// e.Code = dns.EDNS0LOCALSTART
// e.Data = []byte{72, 82, 74}
// o.Option = append(o.Option, e)
type EDNS0_LOCAL struct {
Code uint16
Data []byte
}
func (e *EDNS0_LOCAL) Option() uint16 { return e.Code }
func (e *EDNS0_LOCAL) String() string {
return strconv.FormatInt(int64(e.Code), 10) + ":0x" + hex.EncodeToString(e.Data)
}
func (e *EDNS0_LOCAL) pack() ([]byte, error) {
b := make([]byte, len(e.Data))
copied := copy(b, e.Data)
if copied != len(e.Data) {
return nil, ErrBuf
}
return b, nil
}
func (e *EDNS0_LOCAL) unpack(b []byte) error {
e.Data = make([]byte, len(b))
copied := copy(e.Data, b)
if copied != len(b) {
return ErrBuf
}
return nil
}

View file

@ -216,8 +216,9 @@ func decode(b []byte) []byte {
}
out := make([]rune, 0, len(b))
b = b[len(_PREFIX):]
for pos, x := range b {
if x == _DELIMITER {
for pos := len(b) - 1; pos >= 0; pos-- {
// only last delimiter is our interest
if b[pos] == _DELIMITER {
out = append(out, bytes.Runes(b[:pos])...)
b = b[pos+1:] // trim source string
break

View file

@ -9,10 +9,12 @@ var testcases = [][2]string{
{"", ""},
{"a", "a"},
{"A-B", "a-b"},
{"A-B-C", "a-b-c"},
{"AbC", "abc"},
{"я", "xn--41a"},
{"zя", "xn--z-0ub"},
{"ЯZ", "xn--z-zub"},
{"а-я", "xn----7sb8g"},
{"إختبار", "xn--kgbechtv"},
{"آزمایشی", "xn--hgbk6aj7f53bba"},
{"测试", "xn--0zwm56d"},
@ -24,6 +26,7 @@ var testcases = [][2]string{
{"טעסט", "xn--deba0ad"},
{"テスト", "xn--zckzah"},
{"பரிட்சை", "xn--hlcj6aya9esc7a"},
{"mamão-com-açúcar", "xn--mamo-com-acar-yeb1e6q"},
}
func TestEncodeDecodePunycode(t *testing.T) {

View file

@ -13,34 +13,27 @@ func TestCompareDomainName(t *testing.T) {
s6 := "miek.nl"
if CompareDomainName(s1, s2) != 2 {
t.Logf("%s with %s should be %d", s1, s2, 2)
t.Fail()
t.Errorf("%s with %s should be %d", s1, s2, 2)
}
if CompareDomainName(s1, s3) != 1 {
t.Logf("%s with %s should be %d", s1, s3, 1)
t.Fail()
t.Errorf("%s with %s should be %d", s1, s3, 1)
}
if CompareDomainName(s3, s4) != 0 {
t.Logf("%s with %s should be %d", s3, s4, 0)
t.Fail()
t.Errorf("%s with %s should be %d", s3, s4, 0)
}
// Non qualified tests
if CompareDomainName(s1, s5) != 1 {
t.Logf("%s with %s should be %d", s1, s5, 1)
t.Fail()
t.Errorf("%s with %s should be %d", s1, s5, 1)
}
if CompareDomainName(s1, s6) != 2 {
t.Logf("%s with %s should be %d", s1, s5, 2)
t.Fail()
t.Errorf("%s with %s should be %d", s1, s5, 2)
}
if CompareDomainName(s1, ".") != 0 {
t.Logf("%s with %s should be %d", s1, s5, 0)
t.Fail()
t.Errorf("%s with %s should be %d", s1, s5, 0)
}
if CompareDomainName(".", ".") != 0 {
t.Logf("%s with %s should be %d", ".", ".", 0)
t.Fail()
t.Errorf("%s with %s should be %d", ".", ".", 0)
}
}
@ -59,10 +52,9 @@ func TestSplit(t *testing.T) {
}
for s, i := range splitter {
if x := len(Split(s)); x != i {
t.Logf("labels should be %d, got %d: %s %v\n", i, x, s, Split(s))
t.Fail()
t.Errorf("labels should be %d, got %d: %s %v", i, x, s, Split(s))
} else {
t.Logf("%s %v\n", s, Split(s))
t.Logf("%s %v", s, Split(s))
}
}
}
@ -78,13 +70,11 @@ func TestSplit2(t *testing.T) {
switch len(i) {
case 1:
if x[0] != i[0] {
t.Logf("labels should be %v, got %v: %s\n", i, x, s)
t.Fail()
t.Errorf("labels should be %v, got %v: %s", i, x, s)
}
default:
if x[0] != i[0] || x[1] != i[1] || x[2] != i[2] {
t.Logf("labels should be %v, got %v: %s\n", i, x, s)
t.Fail()
t.Errorf("labels should be %v, got %v: %s", i, x, s)
}
}
}
@ -113,8 +103,7 @@ func TestPrevLabel(t *testing.T) {
for s, i := range prever {
x, ok := PrevLabel(s.string, s.int)
if i != x {
t.Logf("label should be %d, got %d, %t: preving %d, %s\n", i, x, ok, s.int, s.string)
t.Fail()
t.Errorf("label should be %d, got %d, %t: preving %d, %s", i, x, ok, s.int, s.string)
}
}
}
@ -129,8 +118,7 @@ func TestCountLabel(t *testing.T) {
for s, i := range splitter {
x := CountLabel(s)
if x != i {
t.Logf("CountLabel should have %d, got %d\n", i, x)
t.Fail()
t.Errorf("CountLabel should have %d, got %d", i, x)
}
}
}
@ -149,14 +137,12 @@ domainLoop:
for domain, splits := range labels {
parts := SplitDomainName(domain)
if len(parts) != len(splits) {
t.Logf("SplitDomainName returned %v for %s, expected %v", parts, domain, splits)
t.Fail()
t.Errorf("SplitDomainName returned %v for %s, expected %v", parts, domain, splits)
continue domainLoop
}
for i := range parts {
if parts[i] != splits[i] {
t.Logf("SplitDomainName returned %v for %s, expected %v", parts, domain, splits)
t.Fail()
t.Errorf("SplitDomainName returned %v for %s, expected %v", parts, domain, splits)
continue domainLoop
}
}
@ -180,9 +166,8 @@ func TestIsDomainName(t *testing.T) {
for d, ok := range names {
l, k := IsDomainName(d)
if ok.ok != k || ok.lab != l {
t.Logf(" got %v %d for %s ", k, l, d)
t.Logf("have %v %d for %s ", ok.ok, ok.lab, d)
t.Fail()
t.Errorf(" got %v %d for %s ", k, l, d)
t.Errorf("have %v %d for %s ", ok.ok, ok.lab, d)
}
}
}

View file

@ -23,29 +23,40 @@ import (
const maxCompressionOffset = 2 << 13 // We have 14 bits for the compression pointer
var (
ErrAlg error = &Error{err: "bad algorithm"}
ErrAuth error = &Error{err: "bad authentication"}
ErrBuf error = &Error{err: "buffer size too small"}
ErrConnEmpty error = &Error{err: "conn has no connection"}
ErrConn error = &Error{err: "conn holds both UDP and TCP connection"}
// ErrAlg indicates an error with the (DNSSEC) algorithm.
ErrAlg error = &Error{err: "bad algorithm"}
// ErrAuth indicates an error in the TSIG authentication.
ErrAuth error = &Error{err: "bad authentication"}
// ErrBuf indicates that the buffer used it too small for the message.
ErrBuf error = &Error{err: "buffer size too small"}
// ErrConn indicates that a connection has both a TCP and UDP socket.
ErrConn error = &Error{err: "conn holds both UDP and TCP connection"}
// ErrConnEmpty indicates a connection is being uses before it is initialized.
ErrConnEmpty error = &Error{err: "conn has no connection"}
// ErrExtendedRcode ...
ErrExtendedRcode error = &Error{err: "bad extended rcode"}
ErrFqdn error = &Error{err: "domain must be fully qualified"}
ErrId error = &Error{err: "id mismatch"}
ErrKeyAlg error = &Error{err: "bad key algorithm"}
ErrKey error = &Error{err: "bad key"}
ErrKeySize error = &Error{err: "bad key size"}
ErrNoSig error = &Error{err: "no signature found"}
ErrPrivKey error = &Error{err: "bad private key"}
ErrRcode error = &Error{err: "bad rcode"}
ErrRdata error = &Error{err: "bad rdata"}
ErrRRset error = &Error{err: "bad rrset"}
ErrSecret error = &Error{err: "no secrets defined"}
ErrServ error = &Error{err: "no servers could be reached"}
ErrShortRead error = &Error{err: "short read"}
ErrSig error = &Error{err: "bad signature"}
ErrSigGen error = &Error{err: "bad signature generation"}
ErrSoa error = &Error{err: "no SOA"}
ErrTime error = &Error{err: "bad time"}
// ErrFqdn indicates that a domain name does not have a closing dot.
ErrFqdn error = &Error{err: "domain must be fully qualified"}
// ErrId indicates there is a mismatch with the message's ID.
ErrId error = &Error{err: "id mismatch"}
ErrKeyAlg error = &Error{err: "bad key algorithm"}
ErrKey error = &Error{err: "bad key"}
ErrKeySize error = &Error{err: "bad key size"}
ErrNoSig error = &Error{err: "no signature found"}
ErrPrivKey error = &Error{err: "bad private key"}
ErrRcode error = &Error{err: "bad rcode"}
ErrRdata error = &Error{err: "bad rdata"}
ErrRRset error = &Error{err: "bad rrset"}
ErrSecret error = &Error{err: "no secrets defined"}
ErrShortRead error = &Error{err: "short read"}
// ErrSig indicates that a signature can not be cryptographically validated.
ErrSig error = &Error{err: "bad signature"}
// ErrSigGen indicates a faulure to generate a signature.
ErrSigGen error = &Error{err: "bad signature generation"}
// ErrSOA indicates that no SOA RR was seen when doing zone transfers.
ErrSoa error = &Error{err: "no SOA"}
// ErrTime indicates a timing error in TSIG authentication.
ErrTime error = &Error{err: "bad time"}
)
// Id, by default, returns a 16 bits random number to be used as a
@ -56,8 +67,7 @@ var (
// dns.Id = func() uint16 { return 3 }
var Id func() uint16 = id
// A manually-unpacked version of (id, bits).
// This is in its own struct for easy printing.
// MsgHdr is a a manually-unpacked version of (id, bits).
type MsgHdr struct {
Id uint16
Response bool
@ -72,7 +82,7 @@ type MsgHdr struct {
Rcode int
}
// The layout of a DNS message.
// Msg contains the layout of a DNS message.
type Msg struct {
MsgHdr
Compress bool `json:"-"` // If true, the message will be compressed when converted to wire format. This not part of the official DNS packet format.
@ -82,7 +92,7 @@ type Msg struct {
Extra []RR // Holds the RR(s) of the additional section.
}
// Map of strings for each RR wire type.
// TypeToString is a map of strings for each RR wire type.
var TypeToString = map[uint16]string{
TypeA: "A",
TypeAAAA: "AAAA",
@ -161,8 +171,10 @@ var TypeToString = map[uint16]string{
TypeX25: "X25",
}
// Reverse, needed for string parsing.
// StringToType is the reverse of TypeToString, needed for string parsing.
var StringToType = reverseInt16(TypeToString)
// StringToClass is the reverse of ClassToString, needed for string parsing.
var StringToClass = reverseInt16(ClassToString)
// Map of opcodes strings.
@ -171,7 +183,7 @@ var StringToOpcode = reverseInt(OpcodeToString)
// Map of rcodes strings.
var StringToRcode = reverseInt(RcodeToString)
// Map of strings for each CLASS wire type.
// ClassToString is a maps Classes to strings for each CLASS wire type.
var ClassToString = map[uint16]string{
ClassINET: "IN",
ClassCSNET: "CS",
@ -181,7 +193,7 @@ var ClassToString = map[uint16]string{
ClassANY: "ANY",
}
// Map of strings for opcodes.
// OpcodeToString maps Opcodes to strings.
var OpcodeToString = map[int]string{
OpcodeQuery: "QUERY",
OpcodeIQuery: "IQUERY",
@ -190,7 +202,7 @@ var OpcodeToString = map[int]string{
OpcodeUpdate: "UPDATE",
}
// Map of strings for rcodes.
// RcodeToString maps Rcodes to strings.
var RcodeToString = map[int]string{
RcodeSuccess: "NOERROR",
RcodeFormatError: "FORMERR",
@ -264,7 +276,7 @@ func packDomainName(s string, msg []byte, off int, compression map[string]int, c
// Emit sequence of counted strings, chopping at dots.
begin := 0
bs := []byte(s)
ro_bs, bs_fresh, escaped_dot := s, true, false
roBs, bsFresh, escapedDot := s, true, false
for i := 0; i < ls; i++ {
if bs[i] == '\\' {
for j := i; j < ls-1; j++ {
@ -288,13 +300,13 @@ func packDomainName(s string, msg []byte, off int, compression map[string]int, c
} else if bs[i] == 'n' {
bs[i] = '\n'
}
escaped_dot = bs[i] == '.'
bs_fresh = false
escapedDot = bs[i] == '.'
bsFresh = false
continue
}
if bs[i] == '.' {
if i > 0 && bs[i-1] == '.' && !escaped_dot {
if i > 0 && bs[i-1] == '.' && !escapedDot {
// two dots back to back is not legal
return lenmsg, labels, ErrRdata
}
@ -320,16 +332,16 @@ func packDomainName(s string, msg []byte, off int, compression map[string]int, c
}
off++
}
if compress && !bs_fresh {
ro_bs = string(bs)
bs_fresh = true
if compress && !bsFresh {
roBs = string(bs)
bsFresh = true
}
// Dont try to compress '.'
if compress && ro_bs[begin:] != "." {
if p, ok := compression[ro_bs[begin:]]; !ok {
if compress && roBs[begin:] != "." {
if p, ok := compression[roBs[begin:]]; !ok {
// Only offsets smaller than this can be used.
if offset < maxCompressionOffset {
compression[ro_bs[begin:]] = offset
compression[roBs[begin:]] = offset
}
} else {
// The first hit is the longest matching dname
@ -348,7 +360,7 @@ func packDomainName(s string, msg []byte, off int, compression map[string]int, c
labels++
begin = i + 1
}
escaped_dot = false
escapedDot = false
}
// Root label is special
if len(bs) == 1 && bs[0] == '.' {
@ -945,7 +957,7 @@ func unpackStructValue(val reflect.Value, msg []byte, off int) (off1 int, err er
return lenmsg, &Error{"bad tag unpacking slice: " + val.Type().Field(i).Tag.Get("dns")}
case `dns:"domain-name"`:
// HIP record slice of name (or none)
servers := make([]string, 0)
var servers []string
var s string
for off < lenrd {
s, off, err = UnpackDomainName(msg, off)
@ -971,7 +983,7 @@ func unpackStructValue(val reflect.Value, msg []byte, off int) (off1 int, err er
// We can safely return here.
break
}
edns := make([]EDNS0, 0)
var edns []EDNS0
Option:
code := uint16(0)
if off+2 > lenmsg {
@ -1036,7 +1048,12 @@ func unpackStructValue(val reflect.Value, msg []byte, off int) (off1 int, err er
edns = append(edns, e)
off = off1 + int(optlen)
default:
// do nothing?
e := new(EDNS0_LOCAL)
e.Code = code
if err := e.unpack(msg[off1 : off1+int(optlen)]); err != nil {
return lenmsg, err
}
edns = append(edns, e)
off = off1 + int(optlen)
}
if off < lenrd {
@ -1077,7 +1094,7 @@ func unpackStructValue(val reflect.Value, msg []byte, off int) (off1 int, err er
off += net.IPv6len
case `dns:"wks"`:
// Rest of the record is the bitmap
serv := make([]uint16, 0)
var serv []uint16
j := 0
for off < lenrd {
if off+1 > lenmsg {
@ -1121,7 +1138,7 @@ func unpackStructValue(val reflect.Value, msg []byte, off int) (off1 int, err er
if off+2 > lenrd || off+2 > lenmsg {
return lenmsg, &Error{err: "overflow unpacking nsecx"}
}
nsec := make([]uint16, 0)
var nsec []uint16
length := 0
window := 0
for off+2 < lenrd {
@ -1903,7 +1920,11 @@ func Copy(r RR) RR {
// Copy returns a new *Msg which is a deep-copy of dns.
func (dns *Msg) Copy() *Msg {
r1 := new(Msg)
return dns.CopyTo(new(Msg))
}
// CopyTo copies the contents to the provided message using a deep-copy and returns the copy.
func (dns *Msg) CopyTo(r1 *Msg) *Msg {
r1.MsgHdr = dns.MsgHdr
r1.Compress = dns.Compress

View file

@ -50,6 +50,8 @@ func HashName(label string, ha uint8, iter uint16, salt string) string {
return toBase32(nsec3)
}
// Denialer is an interface that should be implemented by types that are used to denial
// answers in DNSSEC.
type Denialer interface {
// Cover will check if the (unhashed) name is being covered by this NSEC or NSEC3.
Cover(name string) bool

View file

@ -7,14 +7,12 @@ import (
func TestPackNsec3(t *testing.T) {
nsec3 := HashName("dnsex.nl.", SHA1, 0, "DEAD")
if nsec3 != "ROCCJAE8BJJU7HN6T7NG3TNM8ACRS87J" {
t.Logf("%v\n", nsec3)
t.Fail()
t.Error(nsec3)
}
nsec3 = HashName("a.b.c.example.org.", SHA1, 2, "DEAD")
if nsec3 != "6LQ07OAHBTOOEU2R9ANI2AT70K5O0RCG" {
t.Logf("%v\n", nsec3)
t.Fail()
t.Error(nsec3)
}
}
@ -22,12 +20,10 @@ func TestNsec3(t *testing.T) {
// examples taken from .nl
nsec3, _ := NewRR("39p91242oslggest5e6a7cci4iaeqvnk.nl. IN NSEC3 1 1 5 F10E9F7EA83FC8F3 39P99DCGG0MDLARTCRMCF6OFLLUL7PR6 NS DS RRSIG")
if !nsec3.(*NSEC3).Cover("snasajsksasasa.nl.") { // 39p94jrinub66hnpem8qdpstrec86pg3
t.Logf("39p94jrinub66hnpem8qdpstrec86pg3. should be covered by 39p91242oslggest5e6a7cci4iaeqvnk.nl. - 39P99DCGG0MDLARTCRMCF6OFLLUL7PR6")
t.Fail()
t.Error("39p94jrinub66hnpem8qdpstrec86pg3. should be covered by 39p91242oslggest5e6a7cci4iaeqvnk.nl. - 39P99DCGG0MDLARTCRMCF6OFLLUL7PR6")
}
nsec3, _ = NewRR("sk4e8fj94u78smusb40o1n0oltbblu2r.nl. IN NSEC3 1 1 5 F10E9F7EA83FC8F3 SK4F38CQ0ATIEI8MH3RGD0P5I4II6QAN NS SOA TXT RRSIG DNSKEY NSEC3PARAM")
if !nsec3.(*NSEC3).Match("nl.") { // sk4e8fj94u78smusb40o1n0oltbblu2r.nl.
t.Logf("sk4e8fj94u78smusb40o1n0oltbblu2r.nl. should match sk4e8fj94u78smusb40o1n0oltbblu2r.nl.")
t.Fail()
t.Error("sk4e8fj94u78smusb40o1n0oltbblu2r.nl. should match sk4e8fj94u78smusb40o1n0oltbblu2r.nl.")
}
}

View file

@ -20,19 +20,16 @@ func TestDotInName(t *testing.T) {
PackDomainName("aa\\.bb.nl.", buf, 0, nil, false)
// index 3 must be a real dot
if buf[3] != '.' {
t.Log("dot should be a real dot")
t.Fail()
t.Error("dot should be a real dot")
}
if buf[6] != 2 {
t.Log("this must have the value 2")
t.Fail()
t.Error("this must have the value 2")
}
dom, _, _ := UnpackDomainName(buf, 0)
// printing it should yield the backspace again
if dom != "aa\\.bb.nl." {
t.Log("dot should have been escaped: " + dom)
t.Fail()
t.Error("dot should have been escaped: ", dom)
}
}
@ -41,7 +38,7 @@ func TestDotLastInLabel(t *testing.T) {
buf := make([]byte, 20)
_, err := PackDomainName(sample, buf, 0, nil, false)
if err != nil {
t.Fatalf("unexpected error packing domain: %s", err)
t.Fatalf("unexpected error packing domain: %v", err)
}
dom, _, _ := UnpackDomainName(buf, 0)
if dom != sample {
@ -52,19 +49,17 @@ func TestDotLastInLabel(t *testing.T) {
func TestTooLongDomainName(t *testing.T) {
l := "aaabbbcccdddeeefffggghhhiiijjjkkklllmmmnnnooopppqqqrrrsssttt."
dom := l + l + l + l + l + l + l
_, e := NewRR(dom + " IN A 127.0.0.1")
if e == nil {
t.Log("should be too long")
t.Fail()
_, err := NewRR(dom + " IN A 127.0.0.1")
if err == nil {
t.Error("should be too long")
} else {
t.Logf("error is %s", e.Error())
t.Logf("error is %v", err)
}
_, e = NewRR("..com. IN A 127.0.0.1")
if e == nil {
t.Log("should fail")
t.Fail()
_, err = NewRR("..com. IN A 127.0.0.1")
if err == nil {
t.Error("should fail")
} else {
t.Logf("error is %s", e.Error())
t.Logf("error is %v", err)
}
}
@ -76,19 +71,16 @@ func TestDomainName(t *testing.T) {
for _, ts := range tests {
if _, err := PackDomainName(ts, dbuff, 0, nil, false); err != nil {
t.Log("not a valid domain name")
t.Fail()
t.Error("not a valid domain name")
continue
}
n, _, err := UnpackDomainName(dbuff, 0)
if err != nil {
t.Log("failed to unpack packed domain name")
t.Fail()
t.Error("failed to unpack packed domain name")
continue
}
if ts != n {
t.Logf("must be equal: in: %s, out: %s\n", ts, n)
t.Fail()
t.Errorf("must be equal: in: %s, out: %s", ts, n)
}
}
}
@ -110,26 +102,23 @@ func TestDomainNameAndTXTEscapes(t *testing.T) {
s := rr1.String()
rr2, err := NewRR(s)
if err != nil {
t.Logf("Error parsing unpacked RR's string: %v", err)
t.Logf(" Bytes: %v\n", rrbytes)
t.Logf("String: %v\n", s)
t.Fail()
t.Errorf("Error parsing unpacked RR's string: %v", err)
t.Errorf(" Bytes: %v", rrbytes)
t.Errorf("String: %v", s)
}
repacked := make([]byte, len(rrbytes))
if _, err := PackRR(rr2, repacked, 0, nil, false); err != nil {
t.Logf("error packing parsed RR: %v", err)
t.Logf(" original Bytes: %v\n", rrbytes)
t.Logf("unpacked Struct: %V\n", rr1)
t.Logf(" parsed Struct: %V\n", rr2)
t.Fail()
t.Errorf("error packing parsed RR: %v", err)
t.Errorf(" original Bytes: %v", rrbytes)
t.Errorf("unpacked Struct: %v", rr1)
t.Errorf(" parsed Struct: %v", rr2)
}
if !bytes.Equal(repacked, rrbytes) {
t.Log("packed bytes don't match original bytes")
t.Logf(" original bytes: %v", rrbytes)
t.Logf(" packed bytes: %v", repacked)
t.Logf("unpacked struct: %V", rr1)
t.Logf(" parsed struct: %V", rr2)
t.Fail()
t.Error("packed bytes don't match original bytes")
t.Errorf(" original bytes: %v", rrbytes)
t.Errorf(" packed bytes: %v", repacked)
t.Errorf("unpacked struct: %v", rr1)
t.Errorf(" parsed struct: %v", rr2)
}
}
}
@ -206,16 +195,16 @@ func TestDomainQuick(t *testing.T) {
buf := make([]byte, 255)
off, err := PackDomainName(ds, buf, 0, nil, false)
if err != nil {
t.Logf("error packing domain: %s", err.Error())
t.Logf(" bytes: %v\n", db)
t.Logf("string: %v\n", ds)
t.Errorf("error packing domain: %v", err)
t.Errorf(" bytes: %v", db)
t.Errorf("string: %v", ds)
return false
}
if !bytes.Equal(db, buf[:off]) {
t.Logf("repacked domain doesn't match original:")
t.Logf("src bytes: %v", db)
t.Logf(" string: %v", ds)
t.Logf("out bytes: %v", buf[:off])
t.Errorf("repacked domain doesn't match original:")
t.Errorf("src bytes: %v", db)
t.Errorf(" string: %v", ds)
t.Errorf("out bytes: %v", buf[:off])
return false
}
return true
@ -248,7 +237,13 @@ func GenerateTXT(r *rand.Rand, size int) []byte {
return rd
}
func TestTXTRRQuick(t *testing.T) {
// Ok, 2 things. 1) this test breaks with the new functionality of splitting up larger txt
// chunks into 255 byte pieces. 2) I don't like the random nature of this thing, because I can't
// place the quotes where they need to be.
// So either add some code the places the quotes in just the right spots, make this non random
// or do something else.
// Disabled for now. (miek)
func testTXTRRQuick(t *testing.T) {
s := rand.NewSource(0)
r := rand.New(s)
typeAndClass := []byte{
@ -272,15 +267,15 @@ func TestTXTRRQuick(t *testing.T) {
buf := make([]byte, len(rrbytes)*3)
off, err := PackRR(rr, buf, 0, nil, false)
if err != nil {
t.Logf("pack Error: %s\nRR: %V", err.Error(), rr)
t.Errorf("pack Error: %v\nRR: %v", err, rr)
return false
}
buf = buf[:off]
if !bytes.Equal(buf, rrbytes) {
t.Logf("packed bytes don't match original bytes")
t.Logf("src bytes: %v", rrbytes)
t.Logf(" struct: %V", rr)
t.Logf("oUt bytes: %v", buf)
t.Errorf("packed bytes don't match original bytes")
t.Errorf("src bytes: %v", rrbytes)
t.Errorf(" struct: %v", rr)
t.Errorf("out bytes: %v", buf)
return false
}
if len(rdata) == 0 {
@ -290,35 +285,35 @@ func TestTXTRRQuick(t *testing.T) {
rrString := rr.String()
rr2, err := NewRR(rrString)
if err != nil {
t.Logf("error parsing own output: %s", err.Error())
t.Logf("struct: %V", rr)
t.Logf("string: %v", rrString)
t.Errorf("error parsing own output: %v", err)
t.Errorf("struct: %v", rr)
t.Errorf("string: %v", rrString)
return false
}
if rr2.String() != rrString {
t.Logf("parsed rr.String() doesn't match original string")
t.Logf("original: %v", rrString)
t.Logf(" parsed: %v", rr2.String())
t.Errorf("parsed rr.String() doesn't match original string")
t.Errorf("original: %v", rrString)
t.Errorf(" parsed: %v", rr2.String())
return false
}
buf = make([]byte, len(rrbytes)*3)
off, err = PackRR(rr2, buf, 0, nil, false)
if err != nil {
t.Logf("error packing parsed rr: %s", err.Error())
t.Logf("unpacked Struct: %V", rr)
t.Logf(" string: %v", rrString)
t.Logf(" parsed Struct: %V", rr2)
t.Errorf("error packing parsed rr: %v", err)
t.Errorf("unpacked Struct: %v", rr)
t.Errorf(" string: %v", rrString)
t.Errorf(" parsed Struct: %v", rr2)
return false
}
buf = buf[:off]
if !bytes.Equal(buf, rrbytes) {
t.Logf("parsed packed bytes don't match original bytes")
t.Logf(" source bytes: %v", rrbytes)
t.Logf("unpacked struct: %V", rr)
t.Logf(" string: %v", rrString)
t.Logf(" parsed struct: %V", rr2)
t.Logf(" repacked bytes: %v", buf)
t.Errorf("parsed packed bytes don't match original bytes")
t.Errorf(" source bytes: %v", rrbytes)
t.Errorf("unpacked struct: %v", rr)
t.Errorf(" string: %v", rrString)
t.Errorf(" parsed struct: %v", rr2)
t.Errorf(" repacked bytes: %v", buf)
return false
}
return true
@ -345,15 +340,13 @@ func TestParseDirectiveMisc(t *testing.T) {
"ONE.MY-ROOTS.NET. 3600000 IN A 192.168.1.1": "ONE.MY-ROOTS.NET.\t3600000\tIN\tA\t192.168.1.1",
}
for i, o := range tests {
rr, e := NewRR(i)
if e != nil {
t.Log("failed to parse RR: " + e.Error())
t.Fail()
rr, err := NewRR(i)
if err != nil {
t.Error("failed to parse RR: ", err)
continue
}
if rr.String() != o {
t.Logf("`%s' should be equal to\n`%s', but is `%s'\n", i, o, rr.String())
t.Fail()
t.Errorf("`%s' should be equal to\n`%s', but is `%s'", i, o, rr.String())
} else {
t.Logf("RR is OK: `%s'", rr.String())
}
@ -369,15 +362,13 @@ func TestNSEC(t *testing.T) {
"localhost.dnssex.nl. IN NSEC www.dnssex.nl. A RRSIG NSec Type65534": "localhost.dnssex.nl.\t3600\tIN\tNSEC\twww.dnssex.nl. A RRSIG NSEC TYPE65534",
}
for i, o := range nsectests {
rr, e := NewRR(i)
if e != nil {
t.Log("failed to parse RR: " + e.Error())
t.Fail()
rr, err := NewRR(i)
if err != nil {
t.Error("failed to parse RR: ", err)
continue
}
if rr.String() != o {
t.Logf("`%s' should be equal to\n`%s', but is `%s'\n", i, o, rr.String())
t.Fail()
t.Errorf("`%s' should be equal to\n`%s', but is `%s'", i, o, rr.String())
} else {
t.Logf("RR is OK: `%s'", rr.String())
}
@ -390,15 +381,13 @@ func TestParseLOC(t *testing.T) {
"SW1A2AA.find.me.uk. LOC 51 0 0.0 N 00 07 39.611 W 0.00m 0.00m 0.00m 0.00m": "SW1A2AA.find.me.uk.\t3600\tIN\tLOC\t51 00 0.000 N 00 07 39.611 W 0m 0.00m 0.00m 0.00m",
}
for i, o := range lt {
rr, e := NewRR(i)
if e != nil {
t.Log("failed to parse RR: " + e.Error())
t.Fail()
rr, err := NewRR(i)
if err != nil {
t.Error("failed to parse RR: ", err)
continue
}
if rr.String() != o {
t.Logf("`%s' should be equal to\n`%s', but is `%s'\n", i, o, rr.String())
t.Fail()
t.Errorf("`%s' should be equal to\n`%s', but is `%s'", i, o, rr.String())
} else {
t.Logf("RR is OK: `%s'", rr.String())
}
@ -410,15 +399,13 @@ func TestParseDS(t *testing.T) {
"example.net. 3600 IN DS 40692 12 3 22261A8B0E0D799183E35E24E2AD6BB58533CBA7E3B14D659E9CA09B 2071398F": "example.net.\t3600\tIN\tDS\t40692 12 3 22261A8B0E0D799183E35E24E2AD6BB58533CBA7E3B14D659E9CA09B2071398F",
}
for i, o := range dt {
rr, e := NewRR(i)
if e != nil {
t.Log("failed to parse RR: " + e.Error())
t.Fail()
rr, err := NewRR(i)
if err != nil {
t.Error("failed to parse RR: ", err)
continue
}
if rr.String() != o {
t.Logf("`%s' should be equal to\n`%s', but is `%s'\n", i, o, rr.String())
t.Fail()
t.Errorf("`%s' should be equal to\n`%s', but is `%s'", i, o, rr.String())
} else {
t.Logf("RR is OK: `%s'", rr.String())
}
@ -446,15 +433,13 @@ func TestQuotes(t *testing.T) {
"cid.urn.arpa. NAPTR 100 10 \"\" \"\" \"/urn:cid:.+@([^\\.]+\\.)(.*)$/\\2/i\" .": "cid.urn.arpa.\t3600\tIN\tNAPTR\t100 10 \"\" \"\" \"/urn:cid:.+@([^\\.]+\\.)(.*)$/\\2/i\" .",
}
for i, o := range tests {
rr, e := NewRR(i)
if e != nil {
t.Log("failed to parse RR: " + e.Error())
t.Fail()
rr, err := NewRR(i)
if err != nil {
t.Error("failed to parse RR: ", err)
continue
}
if rr.String() != o {
t.Logf("`%s' should be equal to\n`%s', but is\n`%s'\n", i, o, rr.String())
t.Fail()
t.Errorf("`%s' should be equal to\n`%s', but is\n`%s'", i, o, rr.String())
} else {
t.Logf("RR is OK: `%s'", rr.String())
}
@ -471,15 +456,13 @@ func TestParseClass(t *testing.T) {
"t.example.com. NONE A 127.0.0.1": "t.example.com. 3600 NONE A 127.0.0.1",
}
for i, o := range tests {
rr, e := NewRR(i)
if e != nil {
t.Log("failed to parse RR: " + e.Error())
t.Fail()
rr, err := NewRR(i)
if err != nil {
t.Error("failed to parse RR: ", err)
continue
}
if rr.String() != o {
t.Logf("`%s' should be equal to\n`%s', but is\n`%s'\n", i, o, rr.String())
t.Fail()
t.Errorf("`%s' should be equal to\n`%s', but is\n`%s'", i, o, rr.String())
} else {
t.Logf("RR is OK: `%s'", rr.String())
}
@ -524,15 +507,13 @@ func TestBrace(t *testing.T) {
)`: "miek.nl.\t86400\tIN\tSOA\telektron.atoom.net. miekg.atoom.net. 2009032802 21600 7200 604800 3600",
}
for i, o := range tests {
rr, e := NewRR(i)
if e != nil {
t.Log("failed to parse RR: " + e.Error() + "\n\t" + i)
t.Fail()
rr, err := NewRR(i)
if err != nil {
t.Errorf("failed to parse RR: %v\n\t%s", err, i)
continue
}
if rr.String() != o {
t.Logf("`%s' should be equal to\n`%s', but is `%s'\n", i, o, rr.String())
t.Fail()
t.Errorf("`%s' should be equal to\n`%s', but is `%s'", i, o, rr.String())
} else {
t.Logf("RR is OK: `%s'", rr.String())
}
@ -555,8 +536,7 @@ func TestParseFailure(t *testing.T) {
for _, s := range tests {
_, err := NewRR(s)
if err == nil {
t.Logf("should have triggered an error: \"%s\"", s)
t.Fail()
t.Errorf("should have triggered an error: \"%s\"", s)
}
}
}
@ -577,11 +557,10 @@ test IN CNAME test.a.example.com.
for x := range to {
i++
if x.Error != nil {
t.Logf("%s\n", x.Error)
t.Fail()
t.Error(x.Error)
continue
}
t.Logf("%s\n", x.RR)
t.Log(x.RR)
}
delta := time.Now().UnixNano() - start
t.Logf("%d RRs parsed in %.2f s (%.2f RR/s)", i, float32(delta)/1e9, float32(i)/(float32(delta)/1e9))
@ -625,7 +604,7 @@ moutamassey NS ns01.yahoodomains.jp.
`
to := ParseZone(strings.NewReader(zone), "", "testzone")
for x := range to {
fmt.Printf("%s\n", x.RR)
fmt.Println(x.RR)
}
// Output:
// name. 3600 IN SOA a6.nstld.com. hostmaster.nic.name. 203362132 300 300 1209600 300
@ -658,7 +637,7 @@ func ExampleHIP() {
b1slImA8YVJyuIDsj7kwzG7jnERNqnWxZ48AWkskmdHaVDP4BcelrTI3rMXdXF5D
rvs.example.com. )`
if hip, err := NewRR(h); err == nil {
fmt.Printf("%s\n", hip.String())
fmt.Println(hip.String())
}
// Output:
// www.example.com. 3600 IN HIP 2 200100107B1A74DF365639CC39F1D578 AwEAAbdxyhNuSutc5EMzxTs9LBPCIkOFH8cIvM4p9+LrV4e19WzK00+CI6zBCQTdtWsuxKbWIy87UOoJTwkUs7lBu+Upr1gsNrut79ryra+bSRGQb1slImA8YVJyuIDsj7kwzG7jnERNqnWxZ48AWkskmdHaVDP4BcelrTI3rMXdXF5D rvs.example.com.
@ -673,30 +652,30 @@ b1slImA8YVJyuIDsj7kwzG7jnERNqnWxZ48AWkskmdHaVDP4BcelrTI3rMXdXF5D
rvs2.example.com. )`
rr, err := NewRR(h)
if err != nil {
t.Fatalf("failed to parse RR: %s", err)
t.Fatalf("failed to parse RR: %v", err)
}
t.Logf("RR: %s", rr)
msg := new(Msg)
msg.Answer = []RR{rr, rr}
bytes, err := msg.Pack()
if err != nil {
t.Fatalf("failed to pack msg: %s", err)
t.Fatalf("failed to pack msg: %v", err)
}
if err := msg.Unpack(bytes); err != nil {
t.Fatalf("failed to unpack msg: %s", err)
t.Fatalf("failed to unpack msg: %v", err)
}
if len(msg.Answer) != 2 {
t.Fatalf("2 answers expected: %V", msg)
t.Fatalf("2 answers expected: %v", msg)
}
for i, rr := range msg.Answer {
rr := rr.(*HIP)
t.Logf("RR: %s", rr)
if l := len(rr.RendezvousServers); l != 2 {
t.Fatalf("2 servers expected, only %d in record %d:\n%V", l, i, msg)
t.Fatalf("2 servers expected, only %d in record %d:\n%v", l, i, msg)
}
for j, s := range []string{"rvs1.example.com.", "rvs2.example.com."} {
if rr.RendezvousServers[j] != s {
t.Fatalf("expected server %d of record %d to be %s:\n%V", j, i, s, msg)
t.Fatalf("expected server %d of record %d to be %s:\n%v", j, i, s, msg)
}
}
}
@ -705,7 +684,7 @@ b1slImA8YVJyuIDsj7kwzG7jnERNqnWxZ48AWkskmdHaVDP4BcelrTI3rMXdXF5D
func ExampleSOA() {
s := "example.com. 1000 SOA master.example.com. admin.example.com. 1 4294967294 4294967293 4294967295 100"
if soa, err := NewRR(s); err == nil {
fmt.Printf("%s\n", soa.String())
fmt.Println(soa.String())
}
// Output:
// example.com. 1000 IN SOA master.example.com. admin.example.com. 1 4294967294 4294967293 4294967295 100
@ -715,8 +694,7 @@ func TestLineNumberError(t *testing.T) {
s := "example.com. 1000 SOA master.example.com. admin.example.com. monkey 4294967294 4294967293 4294967295 100"
if _, err := NewRR(s); err != nil {
if err.Error() != "dns: bad SOA zone parameter: \"monkey\" at line: 1:68" {
t.Logf("not expecting this error: " + err.Error())
t.Fail()
t.Error("not expecting this error: ", err)
}
}
}
@ -733,15 +711,13 @@ func TestLineNumberError2(t *testing.T) {
`example.com 1000 IN TALINK ( a.example.com. b...example.com.
)`: "dns: bad TALINK NextName: \"b...example.com.\" at line: 2:1"}
for in, err := range tests {
_, e := NewRR(in)
if e == nil {
t.Fail()
for in, errStr := range tests {
_, err := NewRR(in)
if err == nil {
t.Error("err is nil")
} else {
if e.Error() != err {
t.Logf("%s\n", in)
t.Logf("error should be %s is %s\n", err, e.Error())
t.Fail()
if err.Error() != errStr {
t.Errorf("%s: error should be %s is %v", in, errStr, err)
}
}
}
@ -756,8 +732,7 @@ func TestRfc1982(t *testing.T) {
strtests := []string{"20120525134203", "19700101000000", "20380119031408"}
for _, v := range strtests {
if x, _ := StringToTime(v); v != TimeToString(x) {
t.Logf("1982 arithmetic string failure %s (%s:%d)", v, TimeToString(x), x)
t.Fail()
t.Errorf("1982 arithmetic string failure %s (%s:%d)", v, TimeToString(x), x)
}
}
@ -767,8 +742,7 @@ func TestRfc1982(t *testing.T) {
}
for i, v := range inttests {
if TimeToString(i) != v {
t.Logf("1982 arithmetic int failure %d:%s (%s)", i, v, TimeToString(i))
t.Fail()
t.Errorf("1982 arithmetic int failure %d:%s (%s)", i, v, TimeToString(i))
}
}
@ -785,16 +759,14 @@ func TestRfc1982(t *testing.T) {
x, _ := StringToTime(from)
y := TimeToString(x)
if y != to {
t.Logf("1982 arithmetic future failure %s:%s (%s)", from, to, y)
t.Fail()
t.Errorf("1982 arithmetic future failure %s:%s (%s)", from, to, y)
}
}
}
func TestEmpty(t *testing.T) {
for _ = range ParseZone(strings.NewReader(""), "", "") {
t.Logf("should be empty")
t.Fail()
t.Errorf("should be empty")
}
}
@ -819,7 +791,7 @@ func TestLowercaseTokens(t *testing.T) {
for _, testrr := range testrecords {
_, err := NewRR(testrr)
if err != nil {
t.Errorf("failed to parse %#v, got %s", testrr, err.Error())
t.Errorf("failed to parse %#v, got %v", testrr, err)
}
}
}
@ -830,7 +802,7 @@ func ExampleGenerate() {
to := ParseZone(strings.NewReader(zone), "0.0.192.IN-ADDR.ARPA.", "")
for x := range to {
if x.Error == nil {
fmt.Printf("%s\n", x.RR.String())
fmt.Println(x.RR.String())
}
}
// Output:
@ -881,25 +853,25 @@ func TestSRVPacking(t *testing.T) {
_, err := msg.Pack()
if err != nil {
t.Fatalf("couldn't pack %v\n", msg)
t.Fatalf("couldn't pack %v: %v", msg, err)
}
}
func TestParseBackslash(t *testing.T) {
if r, e := NewRR("nul\\000gap.test.globnix.net. 600 IN A 192.0.2.10"); e != nil {
t.Fatalf("could not create RR with \\000 in it")
if r, err := NewRR("nul\\000gap.test.globnix.net. 600 IN A 192.0.2.10"); err != nil {
t.Errorf("could not create RR with \\000 in it")
} else {
t.Logf("parsed %s\n", r.String())
t.Logf("parsed %s", r.String())
}
if r, e := NewRR(`nul\000gap.test.globnix.net. 600 IN TXT "Hello\123"`); e != nil {
t.Fatalf("could not create RR with \\000 in it")
if r, err := NewRR(`nul\000gap.test.globnix.net. 600 IN TXT "Hello\123"`); err != nil {
t.Errorf("could not create RR with \\000 in it")
} else {
t.Logf("parsed %s\n", r.String())
t.Logf("parsed %s", r.String())
}
if r, e := NewRR(`m\ @\ iek.nl. IN 3600 A 127.0.0.1`); e != nil {
t.Fatalf("could not create RR with \\ and \\@ in it")
if r, err := NewRR(`m\ @\ iek.nl. IN 3600 A 127.0.0.1`); err != nil {
t.Errorf("could not create RR with \\ and \\@ in it")
} else {
t.Logf("parsed %s\n", r.String())
t.Logf("parsed %s", r.String())
}
}
@ -919,9 +891,9 @@ func TestILNP(t *testing.T) {
"host1.example.com.\t3600\tIN\tLP\t20 l32-subnet1.example.com.",
}
for _, t1 := range tests {
r, e := NewRR(t1)
if e != nil {
t.Fatalf("an error occured: %s\n", e.Error())
r, err := NewRR(t1)
if err != nil {
t.Fatalf("an error occurred: %v", err)
} else {
if t1 != r.String() {
t.Fatalf("strings should be equal %s %s", t1, r.String())
@ -941,15 +913,13 @@ func TestNsapGposEidNimloc(t *testing.T) {
"VAXA. IN EID 3141592653589793": "VAXA.\t3600\tIN\tEID\t3141592653589793",
}
for i, o := range dt {
rr, e := NewRR(i)
if e != nil {
t.Log("failed to parse RR: " + e.Error())
t.Fail()
rr, err := NewRR(i)
if err != nil {
t.Error("failed to parse RR: ", err)
continue
}
if rr.String() != o {
t.Logf("`%s' should be equal to\n`%s', but is `%s'\n", i, o, rr.String())
t.Fail()
t.Errorf("`%s' should be equal to\n`%s', but is `%s'", i, o, rr.String())
} else {
t.Logf("RR is OK: `%s'", rr.String())
}
@ -962,15 +932,13 @@ func TestPX(t *testing.T) {
"ab.net2.it. IN PX 10 ab.net2.it. O-ab.PRMD-net2.ADMDb.C-it.": "ab.net2.it.\t3600\tIN\tPX\t10 ab.net2.it. O-ab.PRMD-net2.ADMDb.C-it.",
}
for i, o := range dt {
rr, e := NewRR(i)
if e != nil {
t.Log("failed to parse RR: " + e.Error())
t.Fail()
rr, err := NewRR(i)
if err != nil {
t.Error("failed to parse RR: ", err)
continue
}
if rr.String() != o {
t.Logf("`%s' should be equal to\n`%s', but is `%s'\n", i, o, rr.String())
t.Fail()
t.Errorf("`%s' should be equal to\n`%s', but is `%s'", i, o, rr.String())
} else {
t.Logf("RR is OK: `%s'", rr.String())
}
@ -1004,8 +972,7 @@ foo. IN TXT "THIS IS TEXT MAN"; this is comment 8
if x.Error == nil {
if x.Comment != "" {
if _, ok := comments[x.Comment]; !ok {
t.Logf("wrong comment %s", x.Comment)
t.Fail()
t.Errorf("wrong comment %s", x.Comment)
}
}
}
@ -1018,14 +985,12 @@ func TestEUIxx(t *testing.T) {
"host.example. IN EUI64 00-00-5e-ef-00-00-00-2a": "host.example.\t3600\tIN\tEUI64\t00-00-5e-ef-00-00-00-2a",
}
for i, o := range tests {
r, e := NewRR(i)
if e != nil {
t.Logf("failed to parse %s: %s\n", i, e.Error())
t.Fail()
r, err := NewRR(i)
if err != nil {
t.Errorf("failed to parse %s: %v", i, err)
}
if r.String() != o {
t.Logf("want %s, got %s\n", o, r.String())
t.Fail()
t.Errorf("want %s, got %s", o, r.String())
}
}
}
@ -1037,14 +1002,12 @@ func TestUserRR(t *testing.T) {
"host.example. IN UINFO \"Miek Gieben\"": "host.example.\t3600\tIN\tUINFO\t\"Miek Gieben\"",
}
for i, o := range tests {
r, e := NewRR(i)
if e != nil {
t.Logf("failed to parse %s: %s\n", i, e.Error())
t.Fail()
r, err := NewRR(i)
if err != nil {
t.Errorf("failed to parse %s: %v", i, err)
}
if r.String() != o {
t.Logf("want %s, got %s\n", o, r.String())
t.Fail()
t.Errorf("want %s, got %s", o, r.String())
}
}
}
@ -1109,30 +1072,42 @@ func TestTXT(t *testing.T) {
t.Error("bad size of serialized record:", rr.len())
}
}
// Test TXT record with chunk larger than 255 bytes, they should be split up, by the parser
s := ""
for i := 0; i < 255; i++ {
s += "a"
}
s += "b"
rr, err = NewRR(`test.local. 60 IN TXT "` + s + `"`)
if err != nil {
t.Error("failed to parse empty-string TXT record", err)
}
if rr.(*TXT).Txt[1] != "b" {
t.Errorf("Txt should have two chunk, last one my be 'b', but is %s", rr.(*TXT).Txt[1])
}
t.Log(rr.String())
}
func TestTypeXXXX(t *testing.T) {
_, err := NewRR("example.com IN TYPE1234 \\# 4 aabbccdd")
if err != nil {
t.Logf("failed to parse TYPE1234 RR: %s", err.Error())
t.Fail()
t.Errorf("failed to parse TYPE1234 RR: %v", err)
}
_, err = NewRR("example.com IN TYPE655341 \\# 8 aabbccddaabbccdd")
if err == nil {
t.Logf("this should not work, for TYPE655341")
t.Fail()
t.Errorf("this should not work, for TYPE655341")
}
_, err = NewRR("example.com IN TYPE1 \\# 4 0a000001")
if err == nil {
t.Logf("this should not work")
t.Fail()
t.Errorf("this should not work")
}
}
func TestPTR(t *testing.T) {
_, err := NewRR("144.2.0.192.in-addr.arpa. 900 IN PTR ilouse03146p0\\(.example.com.")
if err != nil {
t.Error("failed to parse ", err.Error())
t.Error("failed to parse ", err)
}
}
@ -1147,13 +1122,13 @@ func TestDigit(t *testing.T) {
"miek\\004.nl. 100 IN TXT \"A\"": 4,
}
for s, i := range tests {
r, e := NewRR(s)
r, err := NewRR(s)
buf := make([]byte, 40)
if e != nil {
t.Fatalf("failed to parse %s\n", e.Error())
if err != nil {
t.Fatalf("failed to parse %v", err)
}
PackRR(r, buf, 0, nil, false)
t.Logf("%v\n", buf)
t.Log(buf)
if buf[5] != i {
t.Fatalf("5 pos must be %d, is %d", i, buf[5])
}
@ -1169,11 +1144,10 @@ func TestParseRRSIGTimestamp(t *testing.T) {
`miek.nl. IN RRSIG SOA 8 2 43200 20140210031301 20140111031301 12051 miek.nl. MVZUyrYwq0iZhMFDDnVXD2BvuNiUJjSYlJAgzyAE6CF875BMvvZa+Sb0 RlSCL7WODQSQHhCx/fegHhVVF+Iz8N8kOLrmXD1+jO3Bm6Prl5UhcsPx WTBsg/kmxbp8sR1kvH4oZJtVfakG3iDerrxNaf0sQwhZzyfJQAqpC7pcBoc=`: true,
`miek.nl. IN RRSIG SOA 8 2 43200 315565800 4102477800 12051 miek.nl. MVZUyrYwq0iZhMFDDnVXD2BvuNiUJjSYlJAgzyAE6CF875BMvvZa+Sb0 RlSCL7WODQSQHhCx/fegHhVVF+Iz8N8kOLrmXD1+jO3Bm6Prl5UhcsPx WTBsg/kmxbp8sR1kvH4oZJtVfakG3iDerrxNaf0sQwhZzyfJQAqpC7pcBoc=`: true,
}
for r, _ := range tests {
_, e := NewRR(r)
if e != nil {
t.Fail()
t.Logf("%s\n", e.Error())
for r := range tests {
_, err := NewRR(r)
if err != nil {
t.Error(err)
}
}
}
@ -1184,11 +1158,10 @@ func TestTxtEqual(t *testing.T) {
rr1.Txt = []string{"a\"a", "\"", "b"}
rr2, _ := NewRR(rr1.String())
if rr1.String() != rr2.String() {
t.Logf("these two TXT records should match")
t.Logf("\n%s\n%s\n", rr1.String(), rr2.String())
t.Fail() // This is not an error, but keep this test.
// This is not an error, but keep this test.
t.Errorf("these two TXT records should match:\n%s\n%s", rr1.String(), rr2.String())
}
t.Logf("\n%s\n%s\n", rr1.String(), rr2.String())
t.Logf("%s\n%s", rr1.String(), rr2.String())
}
func TestTxtLong(t *testing.T) {
@ -1202,8 +1175,7 @@ func TestTxtLong(t *testing.T) {
}
str := rr1.String()
if len(str) < len(rr1.Txt[0]) {
t.Logf("string conversion should work")
t.Fail()
t.Error("string conversion should work")
}
}
@ -1217,7 +1189,7 @@ func TestMalformedPackets(t *testing.T) {
for _, packet := range packets {
data, _ := hex.DecodeString(packet)
// for _, v := range data {
// t.Logf("%s ", string(v))
// t.Log(v)
// }
var msg Msg
msg.Unpack(data)
@ -1253,15 +1225,14 @@ func TestNewPrivateKey(t *testing.T) {
key.Algorithm = algo.name
privkey, err := key.Generate(algo.bits)
if err != nil {
t.Fatal(err.Error())
t.Fatal(err)
}
newPrivKey, err := key.NewPrivateKey(key.PrivateKeyString(privkey))
if err != nil {
t.Log(key.String())
t.Log(key.PrivateKeyString(privkey))
t.Fatal(err.Error())
t.Error(key.String())
t.Error(key.PrivateKeyString(privkey))
t.Fatal(err)
}
switch newPrivKey := newPrivKey.(type) {
@ -1270,7 +1241,7 @@ func TestNewPrivateKey(t *testing.T) {
}
if !reflect.DeepEqual(privkey, newPrivKey) {
t.Errorf("[%v] Private keys differ:\n%#v\n%#v\n", AlgorithmToString[algo.name], privkey, newPrivKey)
t.Errorf("[%v] Private keys differ:\n%#v\n%#v", AlgorithmToString[algo.name], privkey, newPrivKey)
}
}
}
@ -1286,7 +1257,7 @@ func TestNewRRSpecial(t *testing.T) {
rr, err = NewRR("; comment")
expect = ""
if err != nil {
t.Errorf("unexpected err: %s", err)
t.Errorf("unexpected err: %v", err)
}
if rr != nil {
t.Errorf("unexpected result: [%s] != [%s]", rr, expect)
@ -1295,7 +1266,7 @@ func TestNewRRSpecial(t *testing.T) {
rr, err = NewRR("")
expect = ""
if err != nil {
t.Errorf("unexpected err: %s", err)
t.Errorf("unexpected err: %v", err)
}
if rr != nil {
t.Errorf("unexpected result: [%s] != [%s]", rr, expect)
@ -1304,7 +1275,7 @@ func TestNewRRSpecial(t *testing.T) {
rr, err = NewRR("$ORIGIN foo.")
expect = ""
if err != nil {
t.Errorf("unexpected err: %s", err)
t.Errorf("unexpected err: %v", err)
}
if rr != nil {
t.Errorf("unexpected result: [%s] != [%s]", rr, expect)
@ -1313,7 +1284,7 @@ func TestNewRRSpecial(t *testing.T) {
rr, err = NewRR(" ")
expect = ""
if err != nil {
t.Errorf("unexpected err: %s", err)
t.Errorf("unexpected err: %v", err)
}
if rr != nil {
t.Errorf("unexpected result: [%s] != [%s]", rr, expect)
@ -1322,7 +1293,7 @@ func TestNewRRSpecial(t *testing.T) {
rr, err = NewRR("\n")
expect = ""
if err != nil {
t.Errorf("unexpected err: %s", err)
t.Errorf("unexpected err: %v", err)
}
if rr != nil {
t.Errorf("unexpected result: [%s] != [%s]", rr, expect)
@ -1331,7 +1302,7 @@ func TestNewRRSpecial(t *testing.T) {
rr, err = NewRR("foo. A 1.1.1.1\nbar. A 2.2.2.2")
expect = "foo.\t3600\tIN\tA\t1.1.1.1"
if err != nil {
t.Errorf("unexpected err: %s", err)
t.Errorf("unexpected err: %v", err)
}
if rr == nil || rr.String() != expect {
t.Errorf("unexpected result: [%s] != [%s]", rr, expect)
@ -1394,15 +1365,13 @@ func TestParseIPSECKEY(t *testing.T) {
for i := 0; i < len(tests)-1; i++ {
t1 := tests[i]
e1 := tests[i+1]
r, e := NewRR(t1)
if e != nil {
t.Logf("failed to parse IPSECKEY %s", e)
r, err := NewRR(t1)
if err != nil {
t.Errorf("failed to parse IPSECKEY %v", err)
continue
}
if r.String() != e1 {
t.Logf("these two IPSECKEY records should match")
t.Logf("\n%s\n%s\n", r.String(), e1)
t.Fail()
t.Errorf("these two IPSECKEY records should match:\n%s\n%s", r.String(), e1)
}
i++
}

View file

@ -1,10 +1,3 @@
/*
PRIVATE RR
RFC 6895 sets aside a range of type codes for private use. This range
is 65,280 - 65,534 (0xFF00 - 0xFFFE). When experimenting with new Resource Records these
can be used, before requesting an official type code from IANA.
*/
package dns
import (
@ -91,9 +84,9 @@ func PrivateHandle(rtypestr string, rtype uint16, generator func() PrivateRdata)
// TODO(miek): we could also be returning _QUOTE, this might or might not
// be an issue (basically parsing TXT becomes hard)
switch l = <-c; l.value {
case _NEWLINE, _EOF:
case zNewline, zEOF:
break FETCH
case _STRING:
case zString:
text = append(text, l.token)
}
}

View file

@ -1,9 +1,10 @@
package dns_test
import (
"github.com/miekg/dns"
"strings"
"testing"
"github.com/miekg/dns"
)
const TypeISBN uint16 = 0x0F01
@ -75,7 +76,7 @@ func TestPrivateByteSlice(t *testing.T) {
buf := make([]byte, 100)
off, err := dns.PackRR(rr, buf, 0, nil, false)
if err != nil {
t.Errorf("got error packing ISBN: %s", err)
t.Errorf("got error packing ISBN: %v", err)
}
custrr := rr.(*dns.PrivateRR)
@ -85,7 +86,7 @@ func TestPrivateByteSlice(t *testing.T) {
rr1, off1, err := dns.UnpackRR(buf[:off], 0)
if err != nil {
t.Errorf("got error unpacking ISBN: %s", err)
t.Errorf("got error unpacking ISBN: %v", err)
}
if off1 != off {

View file

@ -10,6 +10,7 @@ import (
"time"
)
// Handler is implemented by any value that implements ServeDNS.
type Handler interface {
ServeDNS(w ResponseWriter, r *Msg)
}
@ -44,7 +45,7 @@ type response struct {
tsigSecret map[string]string // the tsig secrets
udp *net.UDPConn // i/o connection if UDP was used
tcp *net.TCPConn // i/o connection if TCP was used
udpSession *sessionUDP // oob data to get egress interface right
udpSession *SessionUDP // oob data to get egress interface right
remoteAddr net.Addr // address of the client
}
@ -72,12 +73,12 @@ var DefaultServeMux = NewServeMux()
// Handler object that calls f.
type HandlerFunc func(ResponseWriter, *Msg)
// ServerDNS calls f(w, r)
// ServeDNS calls f(w, r).
func (f HandlerFunc) ServeDNS(w ResponseWriter, r *Msg) {
f(w, r)
}
// FailedHandler returns a HandlerFunc that returns SERVFAIL for every request it gets.
// HandleFailed returns a HandlerFunc that returns SERVFAIL for every request it gets.
func HandleFailed(w ResponseWriter, r *Msg) {
m := new(Msg)
m.SetRcode(r, RcodeServerFailure)
@ -121,10 +122,9 @@ func (mux *ServeMux) match(q string, t uint16) Handler {
if h, ok := mux.z[string(b[:l])]; ok { // 'causes garbage, might want to change the map key
if t != TypeDS {
return h
} else {
// Continue for DS to see if we have a parent too, if so delegeate to the parent
handler = h
}
// Continue for DS to see if we have a parent too, if so delegeate to the parent
handler = h
}
off, end = NextLabel(q, off)
if end {
@ -148,7 +148,7 @@ func (mux *ServeMux) Handle(pattern string, handler Handler) {
mux.m.Unlock()
}
// Handle adds a handler to the ServeMux for pattern.
// HandleFunc adds a handler function to the ServeMux for pattern.
func (mux *ServeMux) HandleFunc(pattern string, handler func(ResponseWriter, *Msg)) {
mux.Handle(pattern, HandlerFunc(handler))
}
@ -372,7 +372,7 @@ func (srv *Server) getReadTimeout() time.Duration {
}
// serveTCP starts a TCP listener for the server.
// Each request is handled in a seperate goroutine.
// Each request is handled in a separate goroutine.
func (srv *Server) serveTCP(l *net.TCPListener) error {
defer l.Close()
@ -407,7 +407,7 @@ func (srv *Server) serveTCP(l *net.TCPListener) error {
}
// serveUDP starts a UDP listener for the server.
// Each request is handled in a seperate goroutine.
// Each request is handled in a separate goroutine.
func (srv *Server) serveUDP(l *net.UDPConn) error {
defer l.Close()
@ -438,7 +438,7 @@ func (srv *Server) serveUDP(l *net.UDPConn) error {
}
// Serve a new connection.
func (srv *Server) serve(a net.Addr, h Handler, m []byte, u *net.UDPConn, s *sessionUDP, t *net.TCPConn) {
func (srv *Server) serve(a net.Addr, h Handler, m []byte, u *net.UDPConn, s *SessionUDP, t *net.TCPConn) {
w := &response{tsigSecret: srv.TsigSecret, udp: u, tcp: t, remoteAddr: a, udpSession: s}
q := 0
defer func() {
@ -537,10 +537,10 @@ func (srv *Server) readTCP(conn *net.TCPConn, timeout time.Duration) ([]byte, er
return m, nil
}
func (srv *Server) readUDP(conn *net.UDPConn, timeout time.Duration) ([]byte, *sessionUDP, error) {
func (srv *Server) readUDP(conn *net.UDPConn, timeout time.Duration) ([]byte, *SessionUDP, error) {
conn.SetReadDeadline(time.Now().Add(timeout))
m := make([]byte, srv.UDPSize)
n, s, e := readFromSessionUDP(conn, m)
n, s, e := ReadFromSessionUDP(conn, m)
if e != nil || n == 0 {
if e != nil {
return nil, nil, e
@ -576,7 +576,7 @@ func (w *response) WriteMsg(m *Msg) (err error) {
func (w *response) Write(m []byte) (int, error) {
switch {
case w.udp != nil:
n, err := writeToSessionUDP(w.udp, m, w.udpSession)
n, err := WriteToSessionUDP(w.udp, m, w.udpSession)
return n, err
case w.tcp != nil:
lm := len(m)

View file

@ -95,7 +95,7 @@ func TestServing(t *testing.T) {
s, addrstr, err := RunLocalUDPServer("127.0.0.1:0")
if err != nil {
t.Fatalf("Unable to run test server: %s", err)
t.Fatalf("Unable to run test server: %v", err)
}
defer s.Shutdown()
@ -104,38 +104,32 @@ func TestServing(t *testing.T) {
m.SetQuestion("miek.nl.", TypeTXT)
r, _, err := c.Exchange(m, addrstr)
if err != nil || len(r.Extra) == 0 {
t.Log("failed to exchange miek.nl", err)
t.Fatal()
t.Fatal("failed to exchange miek.nl", err)
}
txt := r.Extra[0].(*TXT).Txt[0]
if txt != "Hello world" {
t.Log("Unexpected result for miek.nl", txt, "!= Hello world")
t.Fail()
t.Error("Unexpected result for miek.nl", txt, "!= Hello world")
}
m.SetQuestion("example.com.", TypeTXT)
r, _, err = c.Exchange(m, addrstr)
if err != nil {
t.Log("failed to exchange example.com", err)
t.Fatal()
t.Fatal("failed to exchange example.com", err)
}
txt = r.Extra[0].(*TXT).Txt[0]
if txt != "Hello example" {
t.Log("Unexpected result for example.com", txt, "!= Hello example")
t.Fail()
t.Error("Unexpected result for example.com", txt, "!= Hello example")
}
// Test Mixes cased as noticed by Ask.
m.SetQuestion("eXaMplE.cOm.", TypeTXT)
r, _, err = c.Exchange(m, addrstr)
if err != nil {
t.Log("failed to exchange eXaMplE.cOm", err)
t.Fail()
t.Error("failed to exchange eXaMplE.cOm", err)
}
txt = r.Extra[0].(*TXT).Txt[0]
if txt != "Hello example" {
t.Log("Unexpected result for example.com", txt, "!= Hello example")
t.Fail()
t.Error("Unexpected result for example.com", txt, "!= Hello example")
}
}
@ -147,7 +141,7 @@ func BenchmarkServe(b *testing.B) {
s, addrstr, err := RunLocalUDPServer("127.0.0.1:0")
if err != nil {
b.Fatalf("Unable to run test server: %s", err)
b.Fatalf("Unable to run test server: %v", err)
}
defer s.Shutdown()
@ -169,7 +163,7 @@ func benchmarkServe6(b *testing.B) {
a := runtime.GOMAXPROCS(4)
s, addrstr, err := RunLocalUDPServer("[::1]:0")
if err != nil {
b.Fatalf("Unable to run test server: %s", err)
b.Fatalf("Unable to run test server: %v", err)
}
defer s.Shutdown()
@ -200,7 +194,7 @@ func BenchmarkServeCompress(b *testing.B) {
a := runtime.GOMAXPROCS(4)
s, addrstr, err := RunLocalUDPServer("127.0.0.1:0")
if err != nil {
b.Fatalf("Unable to run test server: %s", err)
b.Fatalf("Unable to run test server: %v", err)
}
defer s.Shutdown()
@ -301,7 +295,7 @@ func TestServingLargeResponses(t *testing.T) {
s, addrstr, err := RunLocalUDPServer("127.0.0.1:0")
if err != nil {
t.Fatalf("Unable to run test server: %s", err)
t.Fatalf("Unable to run test server: %v", err)
}
defer s.Shutdown()
@ -316,8 +310,7 @@ func TestServingLargeResponses(t *testing.T) {
M.Unlock()
_, _, err = c.Exchange(m, addrstr)
if err != nil {
t.Logf("failed to exchange: %s", err.Error())
t.Fail()
t.Errorf("failed to exchange: %v", err)
}
// This must fail
M.Lock()
@ -325,15 +318,13 @@ func TestServingLargeResponses(t *testing.T) {
M.Unlock()
_, _, err = c.Exchange(m, addrstr)
if err == nil {
t.Logf("failed to fail exchange, this should generate packet error")
t.Fail()
t.Error("failed to fail exchange, this should generate packet error")
}
// But this must work again
c.UDPSize = 7000
_, _, err = c.Exchange(m, addrstr)
if err != nil {
t.Logf("failed to exchange: %s", err.Error())
t.Fail()
t.Errorf("failed to exchange: %v", err)
}
}
@ -344,7 +335,7 @@ func TestServingResponse(t *testing.T) {
HandleFunc("miek.nl.", HelloServer)
s, addrstr, err := RunLocalUDPServer("127.0.0.1:0")
if err != nil {
t.Fatalf("Unable to run test server: %s", err)
t.Fatalf("Unable to run test server: %v", err)
}
c := new(Client)
@ -353,49 +344,46 @@ func TestServingResponse(t *testing.T) {
m.Response = false
_, _, err = c.Exchange(m, addrstr)
if err != nil {
t.Log("failed to exchange", err)
t.Fatal()
t.Fatal("failed to exchange", err)
}
m.Response = true
_, _, err = c.Exchange(m, addrstr)
if err == nil {
t.Log("exchanged response message")
t.Fatal()
t.Fatal("exchanged response message")
}
s.Shutdown()
s, addrstr, err = RunLocalUDPServerUnsafe("127.0.0.1:0")
if err != nil {
t.Fatalf("Unable to run test server: %s", err)
t.Fatalf("Unable to run test server: %v", err)
}
defer s.Shutdown()
m.Response = true
_, _, err = c.Exchange(m, addrstr)
if err != nil {
t.Log("could exchanged response message in Unsafe mode")
t.Fatal()
t.Fatal("could exchanged response message in Unsafe mode")
}
}
func TestShutdownTCP(t *testing.T) {
s, _, err := RunLocalTCPServer("127.0.0.1:0")
if err != nil {
t.Fatalf("Unable to run test server: %s", err)
t.Fatalf("Unable to run test server: %v", err)
}
err = s.Shutdown()
if err != nil {
t.Errorf("Could not shutdown test TCP server, %s", err)
t.Errorf("Could not shutdown test TCP server, %v", err)
}
}
func TestShutdownUDP(t *testing.T) {
s, _, err := RunLocalUDPServer("127.0.0.1:0")
if err != nil {
t.Fatalf("Unable to run test server: %s", err)
t.Fatalf("Unable to run test server: %v", err)
}
err = s.Shutdown()
if err != nil {
t.Errorf("Could not shutdown test UDP server, %s", err)
t.Errorf("Could not shutdown test UDP server, %v", err)
}
}

View file

@ -1,18 +1,3 @@
// SIG(0)
//
// From RFC 2931:
//
// SIG(0) provides protection for DNS transactions and requests ....
// ... protection for glue records, DNS requests, protection for message headers
// on requests and responses, and protection of the overall integrity of a response.
//
// It works like TSIG, except that SIG(0) uses public key cryptography, instead of the shared
// secret approach in TSIG.
// Supported algorithms: DSA, ECDSAP256SHA256, ECDSAP384SHA384, RSASHA1, RSASHA256 and
// RSASHA512.
//
// Signing subsequent messages in multi-message sessions is not implemented.
//
package dns
import (
@ -92,7 +77,7 @@ func (rr *SIG) Sign(k PrivateKey, m *Msg) ([]byte, error) {
buf[rdoff], buf[rdoff+1] = packUint16(rdlen)
// Adjust additional count
adc, _ := unpackUint16(buf, 10)
adc += 1
adc++
buf[10], buf[11] = packUint16(adc)
return buf, nil
}

View file

@ -27,8 +27,7 @@ func TestSIG0(t *testing.T) {
}
pk, err := keyrr.Generate(keysize)
if err != nil {
t.Logf("Failed to generate key for “%s”: %v", algstr, err)
t.Fail()
t.Errorf("Failed to generate key for “%s”: %v", algstr, err)
continue
}
now := uint32(time.Now().Unix())
@ -43,19 +42,16 @@ func TestSIG0(t *testing.T) {
sigrr.SignerName = keyrr.Hdr.Name
mb, err := sigrr.Sign(pk, m)
if err != nil {
t.Logf("Failed to sign message using “%s”: %v", algstr, err)
t.Fail()
t.Errorf("Failed to sign message using “%s”: %v", algstr, err)
continue
}
m := new(Msg)
if err := m.Unpack(mb); err != nil {
t.Logf("Failed to unpack message signed using “%s”: %v", algstr, err)
t.Fail()
t.Errorf("Failed to unpack message signed using “%s”: %v", algstr, err)
continue
}
if len(m.Extra) != 1 {
t.Logf("Missing SIG for message signed using “%s”", algstr)
t.Fail()
t.Errorf("Missing SIG for message signed using “%s”", algstr)
continue
}
var sigrrwire *SIG
@ -63,8 +59,7 @@ func TestSIG0(t *testing.T) {
case *SIG:
sigrrwire = rr
default:
t.Logf("Expected SIG RR, instead: %v", rr)
t.Fail()
t.Errorf("Expected SIG RR, instead: %v", rr)
continue
}
for _, rr := range []*SIG{sigrr, sigrrwire} {
@ -73,23 +68,20 @@ func TestSIG0(t *testing.T) {
id = "sigrrwire"
}
if err := rr.Verify(keyrr, mb); err != nil {
t.Logf("Failed to verify “%s” signed SIG(%s): %v", algstr, id, err)
t.Fail()
t.Errorf("Failed to verify “%s” signed SIG(%s): %v", algstr, id, err)
continue
}
}
mb[13]++
if err := sigrr.Verify(keyrr, mb); err == nil {
t.Logf("Verify succeeded on an altered message using “%s”", algstr)
t.Fail()
t.Errorf("Verify succeeded on an altered message using “%s”", algstr)
continue
}
sigrr.Expiration = 2
sigrr.Inception = 1
mb, _ = sigrr.Sign(pk, m)
if err := sigrr.Verify(keyrr, mb); err == nil {
t.Logf("Verify succeeded on an expired message using “%s”", algstr)
t.Fail()
t.Errorf("Verify succeeded on an expired message using “%s”", algstr)
continue
}
}

View file

@ -25,7 +25,8 @@ func CertificateToDANE(selector, matchingType uint8, cert *x509.Certificate) (st
h := sha256.New()
switch selector {
case 0:
return hex.EncodeToString(cert.Raw), nil
io.WriteString(h, string(cert.Raw))
return hex.EncodeToString(h.Sum(nil)), nil
case 1:
io.WriteString(h, string(cert.RawSubjectPublicKeyInfo))
return hex.EncodeToString(h.Sum(nil)), nil
@ -34,7 +35,8 @@ func CertificateToDANE(selector, matchingType uint8, cert *x509.Certificate) (st
h := sha512.New()
switch selector {
case 0:
return hex.EncodeToString(cert.Raw), nil
io.WriteString(h, string(cert.Raw))
return hex.EncodeToString(h.Sum(nil)), nil
case 1:
io.WriteString(h, string(cert.RawSubjectPublicKeyInfo))
return hex.EncodeToString(h.Sum(nil)), nil

View file

@ -1,56 +1,3 @@
// TRANSACTION SIGNATURE
//
// An TSIG or transaction signature adds a HMAC TSIG record to each message sent.
// The supported algorithms include: HmacMD5, HmacSHA1, HmacSHA256 and HmacSHA512.
//
// Basic use pattern when querying with a TSIG name "axfr." (note that these key names
// must be fully qualified - as they are domain names) and the base64 secret
// "so6ZGir4GPAqINNh9U5c3A==":
//
// c := new(dns.Client)
// c.TsigSecret = map[string]string{"axfr.": "so6ZGir4GPAqINNh9U5c3A=="}
// m := new(dns.Msg)
// m.SetQuestion("miek.nl.", dns.TypeMX)
// m.SetTsig("axfr.", dns.HmacMD5, 300, time.Now().Unix())
// ...
// // When sending the TSIG RR is calculated and filled in before sending
//
// When requesting an zone transfer (almost all TSIG usage is when requesting zone transfers), with
// TSIG, this is the basic use pattern. In this example we request an AXFR for
// miek.nl. with TSIG key named "axfr." and secret "so6ZGir4GPAqINNh9U5c3A=="
// and using the server 176.58.119.54:
//
// t := new(dns.Transfer)
// m := new(dns.Msg)
// t.TsigSecret = map[string]string{"axfr.": "so6ZGir4GPAqINNh9U5c3A=="}
// m.SetAxfr("miek.nl.")
// m.SetTsig("axfr.", dns.HmacMD5, 300, time.Now().Unix())
// c, err := t.In(m, "176.58.119.54:53")
// for r := range c { /* r.RR */ }
//
// You can now read the records from the transfer as they come in. Each envelope is checked with TSIG.
// If something is not correct an error is returned.
//
// Basic use pattern validating and replying to a message that has TSIG set.
//
// server := &dns.Server{Addr: ":53", Net: "udp"}
// server.TsigSecret = map[string]string{"axfr.": "so6ZGir4GPAqINNh9U5c3A=="}
// go server.ListenAndServe()
// dns.HandleFunc(".", handleRequest)
//
// func handleRequest(w dns.ResponseWriter, r *dns.Msg) {
// m := new(Msg)
// m.SetReply(r)
// if r.IsTsig() {
// if w.TsigStatus() == nil {
// // *Msg r has an TSIG record and it was validated
// m.SetTsig("axfr.", dns.HmacMD5, 300, time.Now().Unix())
// } else {
// // *Msg r has an TSIG records and it was not valided
// }
// }
// w.WriteMsg(m)
// }
package dns
import (
@ -75,6 +22,8 @@ const (
HmacSHA512 = "hmac-sha512."
)
// TSIG is the RR the holds the transaction signature of a message.
// See RFC 2845 and RFC 4635.
type TSIG struct {
Hdr RR_Header
Algorithm string `dns:"domain-name"`

View file

@ -10,9 +10,12 @@ import (
)
type (
Type uint16 // Type is a DNS type.
Class uint16 // Class is a DNS class.
Name string // Name is a DNS domain name.
// Type is a DNS type.
Type uint16
// Class is a DNS class.
Class uint16
// Name is a DNS domain name.
Name string
)
// Packet formats
@ -20,6 +23,7 @@ type (
// Wire constants and supported types.
const (
// valid RR_Header.Rrtype and Question.qtype
TypeNone uint16 = 0
TypeA uint16 = 1
TypeNS uint16 = 2
@ -91,7 +95,9 @@ const (
TypeTKEY uint16 = 249
TypeTSIG uint16 = 250
// valid Question.Qtype only
TypeIXFR uint16 = 251
TypeAXFR uint16 = 252
TypeMAILB uint16 = 253
@ -105,6 +111,7 @@ const (
TypeReserved uint16 = 65535
// valid Question.Qclass
ClassINET = 1
ClassCSNET = 2
ClassCHAOS = 3
@ -113,6 +120,7 @@ const (
ClassANY = 255
// Msg.rcode
RcodeSuccess = 0
RcodeFormatError = 1
RcodeServerFailure = 2
@ -133,11 +141,11 @@ const (
RcodeBadAlg = 21
RcodeBadTrunc = 22 // TSIG
// Opcode
// Opcode, there is no 3
OpcodeQuery = 0
OpcodeIQuery = 1
OpcodeStatus = 2
// There is no 3
OpcodeNotify = 4
OpcodeUpdate = 5
)
@ -198,7 +206,8 @@ var CertTypeToString = map[uint16]string{
var StringToCertType = reverseInt16(CertTypeToString)
// DNS queries.
// Question holds a DNS question. There can be multiple questions in the
// question section of a message. Usually there is just one.
type Question struct {
Name string `dns:"cdomain-name"` // "cdomain-name" specifies encoding (and may be compressed)
Qtype uint16
@ -801,7 +810,7 @@ func cmToM(m, e uint8) string {
s := fmt.Sprintf("%d", m)
for e > 2 {
s += "0"
e -= 1
e--
}
return s
}
@ -838,7 +847,7 @@ func (rr *LOC) String() string {
lon = lon % LOC_HOURS
s += fmt.Sprintf("%02d %02d %0.3f %s ", h, m, (float64(lon) / 1000), ew)
var alt float64 = float64(rr.Altitude) / 100
var alt = float64(rr.Altitude) / 100
alt -= LOC_ALTITUDEBASE
if rr.Altitude%100 != 0 {
s += fmt.Sprintf("%.2fm ", alt)

View file

@ -7,12 +7,12 @@ import (
"syscall"
)
type sessionUDP struct {
type SessionUDP struct {
raddr *net.UDPAddr
context []byte
}
func (s *sessionUDP) RemoteAddr() net.Addr { return s.raddr }
func (s *SessionUDP) RemoteAddr() net.Addr { return s.raddr }
// setUDPSocketOptions sets the UDP socket options.
// This function is implemented on a per platform basis. See udp_*.go for more details
@ -37,19 +37,19 @@ func setUDPSocketOptions(conn *net.UDPConn) error {
return nil
}
// readFromSessionUDP acts just like net.UDPConn.ReadFrom(), but returns a session object instead of a
// ReadFromSessionUDP acts just like net.UDPConn.ReadFrom(), but returns a session object instead of a
// net.UDPAddr.
func readFromSessionUDP(conn *net.UDPConn, b []byte) (int, *sessionUDP, error) {
func ReadFromSessionUDP(conn *net.UDPConn, b []byte) (int, *SessionUDP, error) {
oob := make([]byte, 40)
n, oobn, _, raddr, err := conn.ReadMsgUDP(b, oob)
if err != nil {
return n, nil, err
}
return n, &sessionUDP{raddr, oob[:oobn]}, err
return n, &SessionUDP{raddr, oob[:oobn]}, err
}
// writeToSessionUDP acts just like net.UDPConn.WritetTo(), but uses a *sessionUDP instead of a net.Addr.
func writeToSessionUDP(conn *net.UDPConn, b []byte, session *sessionUDP) (int, error) {
// WriteToSessionUDP acts just like net.UDPConn.WritetTo(), but uses a *SessionUDP instead of a net.Addr.
func WriteToSessionUDP(conn *net.UDPConn, b []byte, session *SessionUDP) (int, error) {
n, _, err := conn.WriteMsgUDP(b, session.context, session.raddr)
return n, err
}

View file

@ -4,28 +4,28 @@ package dns
import "net"
type sessionUDP struct {
type SessionUDP struct {
raddr *net.UDPAddr
}
// readFromSessionUDP acts just like net.UDPConn.ReadFrom(), but returns a session object instead of a
// ReadFromSessionUDP acts just like net.UDPConn.ReadFrom(), but returns a session object instead of a
// net.UDPAddr.
func readFromSessionUDP(conn *net.UDPConn, b []byte) (int, *sessionUDP, error) {
func ReadFromSessionUDP(conn *net.UDPConn, b []byte) (int, *SessionUDP, error) {
n, raddr, err := conn.ReadFrom(b)
if err != nil {
return n, nil, err
}
session := &sessionUDP{raddr.(*net.UDPAddr)}
session := &SessionUDP{raddr.(*net.UDPAddr)}
return n, session, err
}
// writeToSessionUDP acts just like net.UDPConn.WritetTo(), but uses a *sessionUDP instead of a net.Addr.
func writeToSessionUDP(conn *net.UDPConn, b []byte, session *sessionUDP) (int, error) {
// WriteToSessionUDP acts just like net.UDPConn.WritetTo(), but uses a *SessionUDP instead of a net.Addr.
func WriteToSessionUDP(conn *net.UDPConn, b []byte, session *SessionUDP) (int, error) {
n, err := conn.WriteTo(b, session.raddr)
return n, err
}
func (s *sessionUDP) RemoteAddr() net.Addr { return s.raddr }
func (s *SessionUDP) RemoteAddr() net.Addr { return s.raddr }
// setUDPSocketOptions sets the UDP socket options.
// This function is implemented on a per platform basis. See udp_*.go for more details

View file

@ -1,38 +1,3 @@
// DYNAMIC UPDATES
//
// Dynamic updates reuses the DNS message format, but renames three of
// the sections. Question is Zone, Answer is Prerequisite, Authority is
// Update, only the Additional is not renamed. See RFC 2136 for the gory details.
//
// You can set a rather complex set of rules for the existence of absence of
// certain resource records or names in a zone to specify if resource records
// should be added or removed. The table from RFC 2136 supplemented with the Go
// DNS function shows which functions exist to specify the prerequisites.
//
// 3.2.4 - Table Of Metavalues Used In Prerequisite Section
//
// CLASS TYPE RDATA Meaning Function
// --------------------------------------------------------------
// ANY ANY empty Name is in use dns.NameUsed
// ANY rrset empty RRset exists (value indep) dns.RRsetUsed
// NONE ANY empty Name is not in use dns.NameNotUsed
// NONE rrset empty RRset does not exist dns.RRsetNotUsed
// zone rrset rr RRset exists (value dep) dns.Used
//
// The prerequisite section can also be left empty.
// If you have decided on the prerequisites you can tell what RRs should
// be added or deleted. The next table shows the options you have and
// what functions to call.
//
// 3.4.2.6 - Table Of Metavalues Used In Update Section
//
// CLASS TYPE RDATA Meaning Function
// ---------------------------------------------------------------
// ANY ANY empty Delete all RRsets from name dns.RemoveName
// ANY rrset empty Delete an RRset dns.RemoveRRset
// NONE rrset rr Delete an RR from RRset dns.Remove
// zone rrset rr Add to an RRset dns.Insert
//
package dns
// NameUsed sets the RRs in the prereq section to

View file

@ -12,10 +12,9 @@ func TestDynamicUpdateParsing(t *testing.T) {
typ == "TSIG" || typ == "ISDN" || typ == "UNSPEC" || typ == "NULL" || typ == "ATMA" {
continue
}
r, e := NewRR(prefix + typ)
if e != nil {
t.Log("failure to parse: " + prefix + typ)
t.Fail()
r, err := NewRR(prefix + typ)
if err != nil {
t.Errorf("failure to parse: %s %s: %v", prefix, typ, err)
} else {
t.Logf("parsed: %s", r.String())
}
@ -31,8 +30,7 @@ func TestDynamicUpdateUnpack(t *testing.T) {
msg := new(Msg)
err := msg.Unpack(buf)
if err != nil {
t.Log("failed to unpack: " + err.Error() + "\n" + msg.String())
t.Fail()
t.Errorf("failed to unpack: %v\n%s", err, msg.String())
}
}
@ -45,13 +43,11 @@ func TestDynamicUpdateZeroRdataUnpack(t *testing.T) {
rr.Rrtype = n
bytes, err := m.Pack()
if err != nil {
t.Logf("failed to pack %s: %v", s, err)
t.Fail()
t.Errorf("failed to pack %s: %v", s, err)
continue
}
if err := new(Msg).Unpack(bytes); err != nil {
t.Logf("failed to unpack %s: %v", s, err)
t.Fail()
t.Errorf("failed to unpack %s: %v", s, err)
}
}
}
@ -82,8 +78,7 @@ func TestRemoveRRset(t *testing.T) {
if err := tmp.Unpack(actual); err != nil {
t.Fatalf("Error unpacking actual msg: %v", err)
}
t.Logf("Expected msg:\n%s", expectstr)
t.Logf("Actual msg:\n%v", tmp)
t.Fail()
t.Errorf("Expected msg:\n%s", expectstr)
t.Errorf("Actual msg:\n%v", tmp)
}
}

View file

@ -193,6 +193,7 @@ func (t *Transfer) ReadMsg() (*Msg, error) {
}
// Need to work on the original message p, as that was used to calculate the tsig.
err = TsigVerify(p, t.TsigSecret[ts.Hdr.Name], t.tsigRequestMAC, t.tsigTimersOnly)
t.tsigRequestMAC = ts.MAC
}
return m, err
}

View file

@ -7,8 +7,8 @@ import (
)
func getIP(s string) string {
a, e := net.LookupAddr(s)
if e != nil {
a, err := net.LookupAddr(s)
if err != nil {
return ""
}
return a[0]
@ -28,17 +28,15 @@ func testClientAXFR(t *testing.T) {
tr := new(Transfer)
if a, err := tr.In(m, net.JoinHostPort(server, "53")); err != nil {
t.Log("failed to setup axfr: " + err.Error())
t.Fatal()
t.Fatal("failed to setup axfr: ", err)
} else {
for ex := range a {
if ex.Error != nil {
t.Logf("error %s\n", ex.Error.Error())
t.Fail()
t.Errorf("error %v", ex.Error)
break
}
for _, rr := range ex.RR {
t.Logf("%s\n", rr.String())
t.Log(rr.String())
}
}
}
@ -56,14 +54,11 @@ func testClientAXFRMultipleEnvelopes(t *testing.T) {
tr := new(Transfer)
if a, err := tr.In(m, net.JoinHostPort(server, "53")); err != nil {
t.Log("Failed to setup axfr" + err.Error() + "for server: " + server)
t.Fail()
return
t.Fatalf("Failed to setup axfr %v for server: %v", err, server)
} else {
for ex := range a {
if ex.Error != nil {
t.Logf("Error %s\n", ex.Error.Error())
t.Fail()
t.Errorf("Error %v", ex.Error)
break
}
}
@ -82,17 +77,15 @@ func testClientTsigAXFR(t *testing.T) {
tr.TsigSecret = map[string]string{"axfr.": "so6ZGir4GPAqINNh9U5c3A=="}
if a, err := tr.In(m, "176.58.119.54:53"); err != nil {
t.Log("failed to setup axfr: " + err.Error())
t.Fatal()
t.Fatal("failed to setup axfr: ", err)
} else {
for ex := range a {
if ex.Error != nil {
t.Logf("error %s\n", ex.Error.Error())
t.Fail()
t.Errorf("error %v", ex.Error)
break
}
for _, rr := range ex.RR {
t.Logf("%s\n", rr.String())
t.Log(rr.String())
}
}
}

View file

@ -1,6 +1,7 @@
package dns
import (
"bytes"
"fmt"
"strconv"
"strings"
@ -24,13 +25,13 @@ func generate(l lex, c chan lex, t chan *Token, o string) string {
if i+1 == len(l.token) {
return "bad step in $GENERATE range"
}
if s, e := strconv.Atoi(l.token[i+1:]); e != nil {
return "bad step in $GENERATE range"
} else {
if s, e := strconv.Atoi(l.token[i+1:]); e == nil {
if s < 0 {
return "bad step in $GENERATE range"
}
step = s
} else {
return "bad step in $GENERATE range"
}
l.token = l.token[:i]
}
@ -46,7 +47,7 @@ func generate(l lex, c chan lex, t chan *Token, o string) string {
if err != nil {
return "bad stop in $GENERATE range"
}
if end < 0 || start < 0 || end <= start {
if end < 0 || start < 0 || end < start {
return "bad range in $GENERATE range"
}
@ -55,14 +56,14 @@ func generate(l lex, c chan lex, t chan *Token, o string) string {
s := ""
BuildRR:
l = <-c
if l.value != _NEWLINE && l.value != _EOF {
if l.value != zNewline && l.value != zEOF {
s += l.token
goto BuildRR
}
for i := start; i <= end; i += step {
var (
escape bool
dom string
dom bytes.Buffer
mod string
err string
offset int
@ -72,7 +73,7 @@ BuildRR:
switch s[j] {
case '\\':
if escape {
dom += "\\"
dom.WriteByte('\\')
escape = false
continue
}
@ -81,17 +82,17 @@ BuildRR:
mod = "%d"
offset = 0
if escape {
dom += "$"
dom.WriteByte('$')
escape = false
continue
}
escape = false
if j+1 >= len(s) { // End of the string
dom += fmt.Sprintf(mod, i+offset)
dom.WriteString(fmt.Sprintf(mod, i+offset))
continue
} else {
if s[j+1] == '$' {
dom += "$"
dom.WriteByte('$')
j++
continue
}
@ -108,17 +109,17 @@ BuildRR:
}
j += 2 + sep // Jump to it
}
dom += fmt.Sprintf(mod, i+offset)
dom.WriteString(fmt.Sprintf(mod, i+offset))
default:
if escape { // Pretty useless here
escape = false
continue
}
dom += string(s[j])
dom.WriteByte(s[j])
}
}
// Re-parse the RR and send it on the current channel t
rx, e := NewRR("$ORIGIN " + o + "\n" + dom)
rx, e := NewRR("$ORIGIN " + o + "\n" + dom.String())
if e != nil {
return e.(*ParseError).err
}

View file

@ -29,41 +29,41 @@ const maxUint16 = 1<<16 - 1
// * Handle braces - anywhere.
const (
// Zonefile
_EOF = iota
_STRING
_BLANK
_QUOTE
_NEWLINE
_RRTYPE
_OWNER
_CLASS
_DIRORIGIN // $ORIGIN
_DIRTTL // $TTL
_DIRINCLUDE // $INCLUDE
_DIRGENERATE // $GENERATE
zEOF = iota
zString
zBlank
zQuote
zNewline
zRrtpe
zOwner
zClass
zDirOrigin // $ORIGIN
zDirTtl // $TTL
zDirInclude // $INCLUDE
zDirGenerate // $GENERATE
// Privatekey file
_VALUE
_KEY
zValue
zKey
_EXPECT_OWNER_DIR // Ownername
_EXPECT_OWNER_BL // Whitespace after the ownername
_EXPECT_ANY // Expect rrtype, ttl or class
_EXPECT_ANY_NOCLASS // Expect rrtype or ttl
_EXPECT_ANY_NOCLASS_BL // The whitespace after _EXPECT_ANY_NOCLASS
_EXPECT_ANY_NOTTL // Expect rrtype or class
_EXPECT_ANY_NOTTL_BL // Whitespace after _EXPECT_ANY_NOTTL
_EXPECT_RRTYPE // Expect rrtype
_EXPECT_RRTYPE_BL // Whitespace BEFORE rrtype
_EXPECT_RDATA // The first element of the rdata
_EXPECT_DIRTTL_BL // Space after directive $TTL
_EXPECT_DIRTTL // Directive $TTL
_EXPECT_DIRORIGIN_BL // Space after directive $ORIGIN
_EXPECT_DIRORIGIN // Directive $ORIGIN
_EXPECT_DIRINCLUDE_BL // Space after directive $INCLUDE
_EXPECT_DIRINCLUDE // Directive $INCLUDE
_EXPECT_DIRGENERATE // Directive $GENERATE
_EXPECT_DIRGENERATE_BL // Space after directive $GENERATE
zExpectOwnerDir // Ownername
zExpectOwnerBl // Whitespace after the ownername
zExpectAny // Expect rrtype, ttl or class
zExpectAnyNoClass // Expect rrtype or ttl
zExpectAnyNoClassBl // The whitespace after _EXPECT_ANY_NOCLASS
zExpectAnyNoTtl // Expect rrtype or class
zExpectAnyNoTtlBl // Whitespace after _EXPECT_ANY_NOTTL
zExpectRrtype // Expect rrtype
zExpectRrtypeBl // Whitespace BEFORE rrtype
zExpectRdata // The first element of the rdata
zExpectDirTtlBl // Space after directive $TTL
zExpectDirTtl // Directive $TTL
zExpectDirOriginBl // Space after directive $ORIGIN
zExpectDirOrigin // Directive $ORIGIN
zExpectDirIncludeBl // Space after directive $INCLUDE
zExpectDirInclude // Directive $INCLUDE
zExpectDirGenerate // Directive $GENERATE
zExpectDirGenerateBl // Space after directive $GENERATE
)
// ParseError is a parsing error. It contains the parse error and the location in the io.Reader
@ -88,18 +88,21 @@ type lex struct {
tokenUpper string // uppercase text of the token
length int // lenght of the token
err bool // when true, token text has lexer error
value uint8 // value: _STRING, _BLANK, etc.
value uint8 // value: zString, _BLANK, etc.
line int // line in the file
column int // column in the file
torc uint16 // type or class as parsed in the lexer, we only need to look this up in the grammar
comment string // any comment text seen
}
// *Tokens are returned when a zone file is parsed.
// Token holds the token that are returned when a zone file is parsed.
type Token struct {
RR // the scanned resource record when error is not nil
Error *ParseError // when an error occured, this has the error specifics
Comment string // a potential comment positioned after the RR and on the same line
// The scanned resource record when error is not nil.
RR
// When an error occured, this has the error specifics.
Error *ParseError
// A potential comment positioned after the RR and on the same line.
Comment string
}
// NewRR reads the RR contained in the string s. Only the first RR is
@ -168,17 +171,17 @@ func parseZone(r io.Reader, origin, f string, t chan *Token, include int) {
}
}()
s := scanInit(r)
c := make(chan lex, 1000)
c := make(chan lex)
// Start the lexer
go zlexer(s, c)
// 6 possible beginnings of a line, _ is a space
// 0. _RRTYPE -> all omitted until the rrtype
// 1. _OWNER _ _RRTYPE -> class/ttl omitted
// 2. _OWNER _ _STRING _ _RRTYPE -> class omitted
// 3. _OWNER _ _STRING _ _CLASS _ _RRTYPE -> ttl/class
// 4. _OWNER _ _CLASS _ _RRTYPE -> ttl omitted
// 5. _OWNER _ _CLASS _ _STRING _ _RRTYPE -> class/ttl (reversed)
// After detecting these, we know the _RRTYPE so we can jump to functions
// 0. zRRTYPE -> all omitted until the rrtype
// 1. zOwner _ zRrtype -> class/ttl omitted
// 2. zOwner _ zString _ zRrtype -> class omitted
// 3. zOwner _ zString _ zClass _ zRrtype -> ttl/class
// 4. zOwner _ zClass _ zRrtype -> ttl omitted
// 5. zOwner _ zClass _ zString _ zRrtype -> class/ttl (reversed)
// After detecting these, we know the zRrtype so we can jump to functions
// handling the rdata for each of these types.
if origin == "" {
@ -190,7 +193,7 @@ func parseZone(r io.Reader, origin, f string, t chan *Token, include int) {
return
}
st := _EXPECT_OWNER_DIR // initial state
st := zExpectOwnerDir // initial state
var h RR_Header
var defttl uint32 = defaultTtl
var prevName string
@ -202,19 +205,19 @@ func parseZone(r io.Reader, origin, f string, t chan *Token, include int) {
}
switch st {
case _EXPECT_OWNER_DIR:
case zExpectOwnerDir:
// We can also expect a directive, like $TTL or $ORIGIN
h.Ttl = defttl
h.Class = ClassINET
switch l.value {
case _NEWLINE: // Empty line
st = _EXPECT_OWNER_DIR
case _OWNER:
case zNewline:
st = zExpectOwnerDir
case zOwner:
h.Name = l.token
if l.token[0] == '@' {
h.Name = origin
prevName = h.Name
st = _EXPECT_OWNER_BL
st = zExpectOwnerBl
break
}
if h.Name[l.length-1] != '.' {
@ -226,58 +229,58 @@ func parseZone(r io.Reader, origin, f string, t chan *Token, include int) {
return
}
prevName = h.Name
st = _EXPECT_OWNER_BL
case _DIRTTL:
st = _EXPECT_DIRTTL_BL
case _DIRORIGIN:
st = _EXPECT_DIRORIGIN_BL
case _DIRINCLUDE:
st = _EXPECT_DIRINCLUDE_BL
case _DIRGENERATE:
st = _EXPECT_DIRGENERATE_BL
case _RRTYPE: // Everthing has been omitted, this is the first thing on the line
st = zExpectOwnerBl
case zDirTtl:
st = zExpectDirTtlBl
case zDirOrigin:
st = zExpectDirOriginBl
case zDirInclude:
st = zExpectDirIncludeBl
case zDirGenerate:
st = zExpectDirGenerateBl
case zRrtpe:
h.Name = prevName
h.Rrtype = l.torc
st = _EXPECT_RDATA
case _CLASS: // First thing on the line is the class
st = zExpectRdata
case zClass:
h.Name = prevName
h.Class = l.torc
st = _EXPECT_ANY_NOCLASS_BL
case _BLANK:
st = zExpectAnyNoClassBl
case zBlank:
// Discard, can happen when there is nothing on the
// line except the RR type
case _STRING: // First thing on the is the ttl
if ttl, ok := stringToTtl(l.token); !ok {
case zString:
ttl, ok := stringToTtl(l.token)
if !ok {
t <- &Token{Error: &ParseError{f, "not a TTL", l}}
return
} else {
h.Ttl = ttl
// Don't about the defttl, we should take the $TTL value
// defttl = ttl
}
st = _EXPECT_ANY_NOTTL_BL
h.Ttl = ttl
// Don't about the defttl, we should take the $TTL value
// defttl = ttl
st = zExpectAnyNoTtlBl
default:
t <- &Token{Error: &ParseError{f, "syntax error at beginning", l}}
return
}
case _EXPECT_DIRINCLUDE_BL:
if l.value != _BLANK {
case zExpectDirIncludeBl:
if l.value != zBlank {
t <- &Token{Error: &ParseError{f, "no blank after $INCLUDE-directive", l}}
return
}
st = _EXPECT_DIRINCLUDE
case _EXPECT_DIRINCLUDE:
if l.value != _STRING {
st = zExpectDirInclude
case zExpectDirInclude:
if l.value != zString {
t <- &Token{Error: &ParseError{f, "expecting $INCLUDE value, not this...", l}}
return
}
neworigin := origin // There may be optionally a new origin set after the filename, if not use current one
l := <-c
switch l.value {
case _BLANK:
case zBlank:
l := <-c
if l.value == _STRING {
if l.value == zString {
if _, ok := IsDomainName(l.token); !ok {
t <- &Token{Error: &ParseError{f, "bad origin name", l}}
return
@ -293,7 +296,7 @@ func parseZone(r io.Reader, origin, f string, t chan *Token, include int) {
neworigin = l.token
}
}
case _NEWLINE, _EOF:
case zNewline, zEOF:
// Ok
default:
t <- &Token{Error: &ParseError{f, "garbage after $INCLUDE", l}}
@ -310,15 +313,15 @@ func parseZone(r io.Reader, origin, f string, t chan *Token, include int) {
return
}
parseZone(r1, l.token, neworigin, t, include+1)
st = _EXPECT_OWNER_DIR
case _EXPECT_DIRTTL_BL:
if l.value != _BLANK {
st = zExpectOwnerDir
case zExpectDirTtlBl:
if l.value != zBlank {
t <- &Token{Error: &ParseError{f, "no blank after $TTL-directive", l}}
return
}
st = _EXPECT_DIRTTL
case _EXPECT_DIRTTL:
if l.value != _STRING {
st = zExpectDirTtl
case zExpectDirTtl:
if l.value != zString {
t <- &Token{Error: &ParseError{f, "expecting $TTL value, not this...", l}}
return
}
@ -326,21 +329,21 @@ func parseZone(r io.Reader, origin, f string, t chan *Token, include int) {
t <- &Token{Error: e}
return
}
if ttl, ok := stringToTtl(l.token); !ok {
ttl, ok := stringToTtl(l.token)
if !ok {
t <- &Token{Error: &ParseError{f, "expecting $TTL value, not this...", l}}
return
} else {
defttl = ttl
}
st = _EXPECT_OWNER_DIR
case _EXPECT_DIRORIGIN_BL:
if l.value != _BLANK {
defttl = ttl
st = zExpectOwnerDir
case zExpectDirOriginBl:
if l.value != zBlank {
t <- &Token{Error: &ParseError{f, "no blank after $ORIGIN-directive", l}}
return
}
st = _EXPECT_DIRORIGIN
case _EXPECT_DIRORIGIN:
if l.value != _STRING {
st = zExpectDirOrigin
case zExpectDirOrigin:
if l.value != zString {
t <- &Token{Error: &ParseError{f, "expecting $ORIGIN value, not this...", l}}
return
}
@ -360,15 +363,15 @@ func parseZone(r io.Reader, origin, f string, t chan *Token, include int) {
} else {
origin = l.token
}
st = _EXPECT_OWNER_DIR
case _EXPECT_DIRGENERATE_BL:
if l.value != _BLANK {
st = zExpectOwnerDir
case zExpectDirGenerateBl:
if l.value != zBlank {
t <- &Token{Error: &ParseError{f, "no blank after $GENERATE-directive", l}}
return
}
st = _EXPECT_DIRGENERATE
case _EXPECT_DIRGENERATE:
if l.value != _STRING {
st = zExpectDirGenerate
case zExpectDirGenerate:
if l.value != zString {
t <- &Token{Error: &ParseError{f, "expecting $GENERATE value, not this...", l}}
return
}
@ -376,90 +379,90 @@ func parseZone(r io.Reader, origin, f string, t chan *Token, include int) {
t <- &Token{Error: &ParseError{f, e, l}}
return
}
st = _EXPECT_OWNER_DIR
case _EXPECT_OWNER_BL:
if l.value != _BLANK {
st = zExpectOwnerDir
case zExpectOwnerBl:
if l.value != zBlank {
t <- &Token{Error: &ParseError{f, "no blank after owner", l}}
return
}
st = _EXPECT_ANY
case _EXPECT_ANY:
st = zExpectAny
case zExpectAny:
switch l.value {
case _RRTYPE:
case zRrtpe:
h.Rrtype = l.torc
st = _EXPECT_RDATA
case _CLASS:
st = zExpectRdata
case zClass:
h.Class = l.torc
st = _EXPECT_ANY_NOCLASS_BL
case _STRING: // TTL is this case
if ttl, ok := stringToTtl(l.token); !ok {
st = zExpectAnyNoClassBl
case zString:
ttl, ok := stringToTtl(l.token)
if !ok {
t <- &Token{Error: &ParseError{f, "not a TTL", l}}
return
} else {
h.Ttl = ttl
// defttl = ttl // don't set the defttl here
}
st = _EXPECT_ANY_NOTTL_BL
h.Ttl = ttl
// defttl = ttl // don't set the defttl here
st = zExpectAnyNoTtlBl
default:
t <- &Token{Error: &ParseError{f, "expecting RR type, TTL or class, not this...", l}}
return
}
case _EXPECT_ANY_NOCLASS_BL:
if l.value != _BLANK {
case zExpectAnyNoClassBl:
if l.value != zBlank {
t <- &Token{Error: &ParseError{f, "no blank before class", l}}
return
}
st = _EXPECT_ANY_NOCLASS
case _EXPECT_ANY_NOTTL_BL:
if l.value != _BLANK {
st = zExpectAnyNoClass
case zExpectAnyNoTtlBl:
if l.value != zBlank {
t <- &Token{Error: &ParseError{f, "no blank before TTL", l}}
return
}
st = _EXPECT_ANY_NOTTL
case _EXPECT_ANY_NOTTL:
st = zExpectAnyNoTtl
case zExpectAnyNoTtl:
switch l.value {
case _CLASS:
case zClass:
h.Class = l.torc
st = _EXPECT_RRTYPE_BL
case _RRTYPE:
st = zExpectRrtypeBl
case zRrtpe:
h.Rrtype = l.torc
st = _EXPECT_RDATA
st = zExpectRdata
default:
t <- &Token{Error: &ParseError{f, "expecting RR type or class, not this...", l}}
return
}
case _EXPECT_ANY_NOCLASS:
case zExpectAnyNoClass:
switch l.value {
case _STRING: // TTL
if ttl, ok := stringToTtl(l.token); !ok {
case zString:
ttl, ok := stringToTtl(l.token)
if !ok {
t <- &Token{Error: &ParseError{f, "not a TTL", l}}
return
} else {
h.Ttl = ttl
// defttl = ttl // don't set the def ttl anymore
}
st = _EXPECT_RRTYPE_BL
case _RRTYPE:
h.Ttl = ttl
// defttl = ttl // don't set the def ttl anymore
st = zExpectRrtypeBl
case zRrtpe:
h.Rrtype = l.torc
st = _EXPECT_RDATA
st = zExpectRdata
default:
t <- &Token{Error: &ParseError{f, "expecting RR type or TTL, not this...", l}}
return
}
case _EXPECT_RRTYPE_BL:
if l.value != _BLANK {
case zExpectRrtypeBl:
if l.value != zBlank {
t <- &Token{Error: &ParseError{f, "no blank before RR type", l}}
return
}
st = _EXPECT_RRTYPE
case _EXPECT_RRTYPE:
if l.value != _RRTYPE {
st = zExpectRrtype
case zExpectRrtype:
if l.value != zRrtpe {
t <- &Token{Error: &ParseError{f, "unknown RR type", l}}
return
}
h.Rrtype = l.torc
st = _EXPECT_RDATA
case _EXPECT_RDATA:
st = zExpectRdata
case zExpectRdata:
r, e, c1 := setRR(h, c, origin, f)
if e != nil {
// If e.lex is nil than we have encounter a unknown RR type
@ -471,7 +474,7 @@ func parseZone(r io.Reader, origin, f string, t chan *Token, include int) {
return
}
t <- &Token{RR: r, Comment: c1}
st = _EXPECT_OWNER_DIR
st = zExpectOwnerDir
}
}
// If we get here, we and the h.Rrtype is still zero, we haven't parsed anything, this
@ -535,60 +538,60 @@ func zlexer(s *scan, c chan lex) {
// Space directly in the beginning, handled in the grammar
} else if owner {
// If we have a string and its the first, make it an owner
l.value = _OWNER
l.value = zOwner
l.token = string(str[:stri])
l.tokenUpper = strings.ToUpper(l.token)
l.length = stri
// escape $... start with a \ not a $, so this will work
switch l.tokenUpper {
case "$TTL":
l.value = _DIRTTL
l.value = zDirTtl
case "$ORIGIN":
l.value = _DIRORIGIN
l.value = zDirOrigin
case "$INCLUDE":
l.value = _DIRINCLUDE
l.value = zDirInclude
case "$GENERATE":
l.value = _DIRGENERATE
l.value = zDirGenerate
}
debug.Printf("[7 %+v]", l.token)
c <- l
} else {
l.value = _STRING
l.value = zString
l.token = string(str[:stri])
l.tokenUpper = strings.ToUpper(l.token)
l.length = stri
if !rrtype {
if t, ok := StringToType[l.tokenUpper]; ok {
l.value = _RRTYPE
l.value = zRrtpe
l.torc = t
rrtype = true
} else {
if strings.HasPrefix(l.tokenUpper, "TYPE") {
if t, ok := typeToInt(l.token); !ok {
t, ok := typeToInt(l.token)
if !ok {
l.token = "unknown RR type"
l.err = true
c <- l
return
} else {
l.value = _RRTYPE
l.torc = t
}
l.value = zRrtpe
l.torc = t
}
}
if t, ok := StringToClass[l.tokenUpper]; ok {
l.value = _CLASS
l.value = zClass
l.torc = t
} else {
if strings.HasPrefix(l.tokenUpper, "CLASS") {
if t, ok := classToInt(l.token); !ok {
t, ok := classToInt(l.token)
if !ok {
l.token = "unknown class"
l.err = true
c <- l
return
} else {
l.value = _CLASS
l.torc = t
}
l.value = zClass
l.torc = t
}
}
}
@ -598,7 +601,7 @@ func zlexer(s *scan, c chan lex) {
stri = 0
// I reverse space stuff here
if !space && !commt {
l.value = _BLANK
l.value = zBlank
l.token = " "
l.length = 1
debug.Printf("[5 %+v]", l.token)
@ -620,7 +623,7 @@ func zlexer(s *scan, c chan lex) {
break
}
if stri > 0 {
l.value = _STRING
l.value = zString
l.token = string(str[:stri])
l.length = stri
debug.Printf("[4 %+v]", l.token)
@ -656,7 +659,7 @@ func zlexer(s *scan, c chan lex) {
if brace == 0 {
owner = true
owner = true
l.value = _NEWLINE
l.value = zNewline
l.token = "\n"
l.length = 1
l.comment = string(com[:comi])
@ -674,14 +677,14 @@ func zlexer(s *scan, c chan lex) {
if brace == 0 {
// If there is previous text, we should output it here
if stri != 0 {
l.value = _STRING
l.value = zString
l.token = string(str[:stri])
l.tokenUpper = strings.ToUpper(l.token)
l.length = stri
if !rrtype {
if t, ok := StringToType[l.tokenUpper]; ok {
l.value = _RRTYPE
l.value = zRrtpe
l.torc = t
rrtype = true
}
@ -689,7 +692,7 @@ func zlexer(s *scan, c chan lex) {
debug.Printf("[2 %+v]", l.token)
c <- l
}
l.value = _NEWLINE
l.value = zNewline
l.token = "\n"
l.length = 1
debug.Printf("[1 %+v]", l.token)
@ -733,7 +736,7 @@ func zlexer(s *scan, c chan lex) {
space = false
// send previous gathered text and the quote
if stri != 0 {
l.value = _STRING
l.value = zString
l.token = string(str[:stri])
l.length = stri
@ -743,7 +746,7 @@ func zlexer(s *scan, c chan lex) {
}
// send quote itself as separate token
l.value = _QUOTE
l.value = zQuote
l.token = "\""
l.length = 1
c <- l
@ -795,7 +798,7 @@ func zlexer(s *scan, c chan lex) {
// Send remainder
l.token = string(str[:stri])
l.length = stri
l.value = _STRING
l.value = zString
debug.Printf("[%+v]", l.token)
c <- l
}
@ -927,15 +930,15 @@ func slurpRemainder(c chan lex, f string) (*ParseError, string) {
l := <-c
com := ""
switch l.value {
case _BLANK:
case zBlank:
l = <-c
com = l.comment
if l.value != _NEWLINE && l.value != _EOF {
if l.value != zNewline && l.value != zEOF {
return &ParseError{f, "garbage after rdata", l}, ""
}
case _NEWLINE:
case zNewline:
com = l.comment
case _EOF:
case zEOF:
default:
return &ParseError{f, "garbage after rdata", l}, ""
}

File diff suppressed because it is too large Load diff

View file

@ -20,7 +20,7 @@ import (
dto "github.com/prometheus/client_model/go"
"github.com/matttproud/golang_protobuf_extensions/ext"
"github.com/matttproud/golang_protobuf_extensions/pbutil"
"github.com/prometheus/client_golang/model"
)
@ -40,7 +40,7 @@ func (m *metricFamilyProcessor) ProcessSingle(i io.Reader, out Ingester, o *Proc
for {
family.Reset()
if _, err := ext.ReadDelimited(i, family); err != nil {
if _, err := pbutil.ReadDelimited(i, family); err != nil {
if err == io.EOF {
return nil
}

View file

@ -18,8 +18,10 @@ import (
"fmt"
"math"
"net/http"
"os"
"runtime"
"sort"
"time"
dto "github.com/prometheus/client_model/go"
@ -498,3 +500,19 @@ func ExampleHistogram() {
// >
// >
}
func ExamplePushCollectors() {
hostname, _ := os.Hostname()
completionTime := prometheus.NewGauge(prometheus.GaugeOpts{
Name: "db_backup_last_completion_time",
Help: "The timestamp of the last succesful completion of a DB backup.",
})
completionTime.Set(float64(time.Now().Unix()))
if err := prometheus.PushCollectors(
"db_backup", hostname,
"http://pushgateway:9091",
completionTime,
); err != nil {
fmt.Println("Could not push completion time to Pushgateway:", err)
}
}

View file

@ -13,6 +13,8 @@
package prometheus
import "github.com/prometheus/procfs"
type processCollector struct {
pid int
collectFn func(chan<- Metric)
@ -79,7 +81,7 @@ func NewProcessCollectorPIDFn(
}
// Set up process metric collection if supported by the runtime.
if processCollectSupported() {
if _, err := procfs.NewStat(); err == nil {
c.collectFn = c.processCollect
}
@ -100,3 +102,41 @@ func (c *processCollector) Describe(ch chan<- *Desc) {
func (c *processCollector) Collect(ch chan<- Metric) {
c.collectFn(ch)
}
// TODO(ts): Bring back error reporting by reverting 7faf9e7 as soon as the
// client allows users to configure the error behavior.
func (c *processCollector) processCollect(ch chan<- Metric) {
pid, err := c.pidFn()
if err != nil {
return
}
p, err := procfs.NewProc(pid)
if err != nil {
return
}
if stat, err := p.NewStat(); err == nil {
c.cpuTotal.Set(stat.CPUTime())
ch <- c.cpuTotal
c.vsize.Set(float64(stat.VirtualMemory()))
ch <- c.vsize
c.rss.Set(float64(stat.ResidentMemory()))
ch <- c.rss
if startTime, err := stat.StartTime(); err == nil {
c.startTime.Set(startTime)
ch <- c.startTime
}
}
if fds, err := p.FileDescriptorsLen(); err == nil {
c.openFDs.Set(float64(fds))
ch <- c.openFDs
}
if limits, err := p.NewLimits(); err == nil {
c.maxFDs.Set(float64(limits.OpenFiles))
ch <- c.maxFDs
}
}

View file

@ -1,63 +0,0 @@
// Copyright 2015 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build linux,cgo plan9,cgo solaris,cgo
package prometheus
import "github.com/prometheus/procfs"
func processCollectSupported() bool {
if _, err := procfs.NewStat(); err == nil {
return true
}
return false
}
// TODO(ts): Bring back error reporting by reverting 7faf9e7 as soon as the
// client allows users to configure the error behavior.
func (c *processCollector) processCollect(ch chan<- Metric) {
pid, err := c.pidFn()
if err != nil {
return
}
p, err := procfs.NewProc(pid)
if err != nil {
return
}
if stat, err := p.NewStat(); err == nil {
c.cpuTotal.Set(stat.CPUTime())
ch <- c.cpuTotal
c.vsize.Set(float64(stat.VirtualMemory()))
ch <- c.vsize
c.rss.Set(float64(stat.ResidentMemory()))
ch <- c.rss
if startTime, err := stat.StartTime(); err == nil {
c.startTime.Set(startTime)
ch <- c.startTime
}
}
if fds, err := p.FileDescriptorsLen(); err == nil {
c.openFDs.Set(float64(fds))
ch <- c.openFDs
}
if limits, err := p.NewLimits(); err == nil {
c.maxFDs.Set(float64(limits.OpenFiles))
ch <- c.maxFDs
}
}

View file

@ -1,24 +0,0 @@
// Copyright 2015 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build !linux,!plan9,!solaris !cgo
package prometheus
func processCollectSupported() bool {
return false
}
func (c *processCollector) processCollect(ch chan<- Metric) {
panic("unreachable")
}

View file

@ -0,0 +1,65 @@
// Copyright 2015 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Copyright (c) 2013, The Prometheus Authors
// All rights reserved.
//
// Use of this source code is governed by a BSD-style license that can be found
// in the LICENSE file.
package prometheus
// Push triggers a metric collection by the default registry and pushes all
// collected metrics to the Pushgateway specified by addr. See the Pushgateway
// documentation for detailed implications of the job and instance
// parameter. instance can be left empty. You can use just host:port or ip:port
// as url, in which case 'http://' is added automatically. You can also include
// the schema in the URL. However, do not include the '/metrics/jobs/...' part.
//
// Note that all previously pushed metrics with the same job and instance will
// be replaced with the metrics pushed by this call. (It uses HTTP method 'PUT'
// to push to the Pushgateway.)
func Push(job, instance, url string) error {
return defRegistry.Push(job, instance, url, "PUT")
}
// PushAdd works like Push, but only previously pushed metrics with the same
// name (and the same job and instance) will be replaced. (It uses HTTP method
// 'POST' to push to the Pushgateway.)
func PushAdd(job, instance, url string) error {
return defRegistry.Push(job, instance, url, "POST")
}
// PushCollectors works like Push, but it does not collect from the default
// registry. Instead, it collects from the provided collectors. It is a
// convenient way to push only a few metrics.
func PushCollectors(job, instance, url string, collectors ...Collector) error {
return pushCollectors(job, instance, url, "PUT", collectors...)
}
// PushAddCollectors works like PushAdd, but it does not collect from the
// default registry. Instead, it collects from the provided collectors. It is a
// convenient way to push only a few metrics.
func PushAddCollectors(job, instance, url string, collectors ...Collector) error {
return pushCollectors(job, instance, url, "POST", collectors...)
}
func pushCollectors(job, instance, url, method string, collectors ...Collector) error {
r := newRegistry()
for _, collector := range collectors {
if _, err := r.Register(collector); err != nil {
return err
}
}
return r.Push(job, instance, url, method)
}

View file

@ -158,14 +158,19 @@ func Unregister(c Collector) bool {
// SetMetricFamilyInjectionHook sets a function that is called whenever metrics
// are collected. The hook function must be set before metrics collection begins
// (i.e. call SetMetricFamilyInjectionHook before setting the HTTP handler.) The
// MetricFamily protobufs returned by the hook function are added to the
// delivered metrics. Each returned MetricFamily must have a unique name (also
// taking into account the MetricFamilies created in the regular way).
// MetricFamily protobufs returned by the hook function are merged with the
// metrics collected in the usual way.
//
// This is a way to directly inject MetricFamily protobufs managed and owned by
// the caller. The caller has full responsibility. No sanity checks are
// performed on the returned protobufs (besides the name checks described
// above). The function must be callable at any time and concurrently.
// the caller. The caller has full responsibility. As no registration of the
// injected metrics has happened, there is no descriptor to check against, and
// there are no registration-time checks. If collect-time checks are disabled
// (see function EnableCollectChecks), no sanity checks are performed on the
// returned protobufs at all. If collect-checks are enabled, type and uniqueness
// checks are performed, but no further consistency checks (which would require
// knowledge of a metric descriptor).
//
// The function must be callable at any time and concurrently.
func SetMetricFamilyInjectionHook(hook func() []*dto.MetricFamily) {
defRegistry.metricFamilyInjectionHook = hook
}
@ -187,30 +192,10 @@ func EnableCollectChecks(b bool) {
defRegistry.collectChecksEnabled = b
}
// Push triggers a metric collection and pushes all collected metrics to the
// Pushgateway specified by addr. See the Pushgateway documentation for detailed
// implications of the job and instance parameter. instance can be left
// empty. The Pushgateway will then use the client's IP number instead. Use just
// host:port or ip:port ass addr. (Don't add 'http://' or any path.)
//
// Note that all previously pushed metrics with the same job and instance will
// be replaced with the metrics pushed by this call. (It uses HTTP method 'PUT'
// to push to the Pushgateway.)
func Push(job, instance, addr string) error {
return defRegistry.Push(job, instance, addr, "PUT")
}
// PushAdd works like Push, but only previously pushed metrics with the same
// name (and the same job and instance) will be replaced. (It uses HTTP method
// 'POST' to push to the Pushgateway.)
func PushAdd(job, instance, addr string) error {
return defRegistry.Push(job, instance, addr, "POST")
}
// encoder is a function that writes a dto.MetricFamily to an io.Writer in a
// certain encoding. It returns the number of bytes written and any error
// encountered. Note that ext.WriteDelimited and text.MetricFamilyToText are
// encoders.
// encountered. Note that pbutil.WriteDelimited and pbutil.MetricFamilyToText
// are encoders.
type encoder func(io.Writer, *dto.MetricFamily) (int, error)
type registry struct {
@ -346,10 +331,13 @@ func (r *registry) Unregister(c Collector) bool {
return true
}
func (r *registry) Push(job, instance, addr, method string) error {
u := fmt.Sprintf("http://%s/metrics/jobs/%s", addr, url.QueryEscape(job))
func (r *registry) Push(job, instance, pushURL, method string) error {
if !strings.Contains(pushURL, "://") {
pushURL = "http://" + pushURL
}
pushURL = fmt.Sprintf("%s/metrics/jobs/%s", pushURL, url.QueryEscape(job))
if instance != "" {
u += "/instances/" + url.QueryEscape(instance)
pushURL += "/instances/" + url.QueryEscape(instance)
}
buf := r.getBuf()
defer r.giveBuf(buf)
@ -359,7 +347,7 @@ func (r *registry) Push(job, instance, addr, method string) error {
}
return err
}
req, err := http.NewRequest(method, u, buf)
req, err := http.NewRequest(method, pushURL, buf)
if err != nil {
return err
}
@ -370,7 +358,7 @@ func (r *registry) Push(job, instance, addr, method string) error {
}
defer resp.Body.Close()
if resp.StatusCode != 202 {
return fmt.Errorf("unexpected status code %d while pushing to %s", resp.StatusCode, u)
return fmt.Errorf("unexpected status code %d while pushing to %s", resp.StatusCode, pushURL)
}
return nil
}
@ -479,10 +467,26 @@ func (r *registry) writePB(w io.Writer, writeEncoded encoder) (int, error) {
if r.metricFamilyInjectionHook != nil {
for _, mf := range r.metricFamilyInjectionHook() {
if _, exists := metricFamiliesByName[mf.GetName()]; exists {
return 0, fmt.Errorf("metric family with duplicate name injected: %s", mf)
existingMF, exists := metricFamiliesByName[mf.GetName()]
if !exists {
metricFamiliesByName[mf.GetName()] = mf
if r.collectChecksEnabled {
for _, m := range mf.Metric {
if err := r.checkConsistency(mf, m, nil, metricHashes); err != nil {
return 0, err
}
}
}
continue
}
for _, m := range mf.Metric {
if r.collectChecksEnabled {
if err := r.checkConsistency(existingMF, m, nil, metricHashes); err != nil {
return 0, err
}
}
existingMF.Metric = append(existingMF.Metric, m)
}
metricFamiliesByName[mf.GetName()] = mf
}
}
@ -523,11 +527,42 @@ func (r *registry) checkConsistency(metricFamily *dto.MetricFamily, dtoMetric *d
)
}
// Is the metric unique (i.e. no other metric with the same name and the same label values)?
h := fnv.New64a()
var buf bytes.Buffer
buf.WriteString(metricFamily.GetName())
buf.WriteByte(model.SeparatorByte)
h.Write(buf.Bytes())
for _, lp := range dtoMetric.Label {
buf.Reset()
buf.WriteString(lp.GetValue())
buf.WriteByte(model.SeparatorByte)
h.Write(buf.Bytes())
}
metricHash := h.Sum64()
if _, exists := metricHashes[metricHash]; exists {
return fmt.Errorf(
"collected metric %q was collected before with the same name and label values",
dtoMetric,
)
}
metricHashes[metricHash] = struct{}{}
if desc == nil {
return nil // Nothing left to check if we have no desc.
}
// Desc consistency with metric family.
if metricFamily.GetName() != desc.fqName {
return fmt.Errorf(
"collected metric %q has name %q but should have %q",
dtoMetric, metricFamily.GetName(), desc.fqName,
)
}
if metricFamily.GetHelp() != desc.help {
return fmt.Errorf(
"collected metric %q has help %q but should have %q",
dtoMetric, desc.help, metricFamily.GetHelp(),
dtoMetric, metricFamily.GetHelp(), desc.help,
)
}
@ -557,27 +592,6 @@ func (r *registry) checkConsistency(metricFamily *dto.MetricFamily, dtoMetric *d
}
}
// Is the metric unique (i.e. no other metric with the same name and the same label values)?
h := fnv.New64a()
var buf bytes.Buffer
buf.WriteString(desc.fqName)
buf.WriteByte(model.SeparatorByte)
h.Write(buf.Bytes())
for _, lp := range dtoMetric.Label {
buf.Reset()
buf.WriteString(lp.GetValue())
buf.WriteByte(model.SeparatorByte)
h.Write(buf.Bytes())
}
metricHash := h.Sum64()
if _, exists := metricHashes[metricHash]; exists {
return fmt.Errorf(
"collected metric %q was collected before with the same name and label values",
dtoMetric,
)
}
metricHashes[metricHash] = struct{}{}
r.mtx.RLock() // Remaining checks need the read lock.
defer r.mtx.RUnlock()
@ -712,6 +726,15 @@ func (s metricSorter) Swap(i, j int) {
}
func (s metricSorter) Less(i, j int) bool {
if len(s[i].Label) != len(s[j].Label) {
// This should not happen. The metrics are
// inconsistent. However, we have to deal with the fact, as
// people might use custom collectors or metric family injection
// to create inconsistent metrics. So let's simply compare the
// number of labels in this case. That will still yield
// reproducible sorting.
return len(s[i].Label) < len(s[j].Label)
}
for n, lp := range s[i].Label {
vi := lp.GetValue()
vj := s[j].Label[n].GetValue()

View file

@ -61,31 +61,29 @@ func testHandler(t testing.TB) {
varintBuf := make([]byte, binary.MaxVarintLen32)
externalMetricFamily := []*dto.MetricFamily{
{
Name: proto.String("externalname"),
Help: proto.String("externaldocstring"),
Type: dto.MetricType_COUNTER.Enum(),
Metric: []*dto.Metric{
{
Label: []*dto.LabelPair{
{
Name: proto.String("externallabelname"),
Value: proto.String("externalval1"),
},
{
Name: proto.String("externalconstname"),
Value: proto.String("externalconstvalue"),
},
externalMetricFamily := &dto.MetricFamily{
Name: proto.String("externalname"),
Help: proto.String("externaldocstring"),
Type: dto.MetricType_COUNTER.Enum(),
Metric: []*dto.Metric{
{
Label: []*dto.LabelPair{
{
Name: proto.String("externallabelname"),
Value: proto.String("externalval1"),
},
Counter: &dto.Counter{
Value: proto.Float64(1),
{
Name: proto.String("externalconstname"),
Value: proto.String("externalconstvalue"),
},
},
Counter: &dto.Counter{
Value: proto.Float64(1),
},
},
},
}
marshaledExternalMetricFamily, err := proto.Marshal(externalMetricFamily[0])
marshaledExternalMetricFamily, err := proto.Marshal(externalMetricFamily)
if err != nil {
t.Fatal(err)
}
@ -216,16 +214,42 @@ metric: <
expectedMetricFamilyAsProtoCompactText := []byte(`name:"name" help:"docstring" type:COUNTER metric:<label:<name:"constname" value:"constvalue" > label:<name:"labelname" value:"val1" > counter:<value:1 > > metric:<label:<name:"constname" value:"constvalue" > label:<name:"labelname" value:"val2" > counter:<value:1 > >
`)
externalMetricFamilyWithSameName := &dto.MetricFamily{
Name: proto.String("name"),
Help: proto.String("inconsistent help string does not matter here"),
Type: dto.MetricType_COUNTER.Enum(),
Metric: []*dto.Metric{
{
Label: []*dto.LabelPair{
{
Name: proto.String("constname"),
Value: proto.String("constvalue"),
},
{
Name: proto.String("labelname"),
Value: proto.String("different_val"),
},
},
Counter: &dto.Counter{
Value: proto.Float64(42),
},
},
},
}
expectedMetricFamilyMergedWithExternalAsProtoCompactText := []byte(`name:"name" help:"docstring" type:COUNTER metric:<label:<name:"constname" value:"constvalue" > label:<name:"labelname" value:"different_val" > counter:<value:42 > > metric:<label:<name:"constname" value:"constvalue" > label:<name:"labelname" value:"val1" > counter:<value:1 > > metric:<label:<name:"constname" value:"constvalue" > label:<name:"labelname" value:"val2" > counter:<value:1 > >
`)
type output struct {
headers map[string]string
body []byte
}
var scenarios = []struct {
headers map[string]string
out output
withCounter bool
withExternalMF bool
headers map[string]string
out output
collector Collector
externalMF []*dto.MetricFamily
}{
{ // 0
headers: map[string]string{
@ -281,7 +305,7 @@ metric: <
},
body: expectedMetricFamilyAsText,
},
withCounter: true,
collector: metricVec,
},
{ // 5
headers: map[string]string{
@ -293,7 +317,7 @@ metric: <
},
body: expectedMetricFamilyAsBytes,
},
withCounter: true,
collector: metricVec,
},
{ // 6
headers: map[string]string{
@ -305,7 +329,7 @@ metric: <
},
body: externalMetricFamilyAsText,
},
withExternalMF: true,
externalMF: []*dto.MetricFamily{externalMetricFamily},
},
{ // 7
headers: map[string]string{
@ -317,7 +341,7 @@ metric: <
},
body: externalMetricFamilyAsBytes,
},
withExternalMF: true,
externalMF: []*dto.MetricFamily{externalMetricFamily},
},
{ // 8
headers: map[string]string{
@ -335,8 +359,8 @@ metric: <
[]byte{},
),
},
withCounter: true,
withExternalMF: true,
collector: metricVec,
externalMF: []*dto.MetricFamily{externalMetricFamily},
},
{ // 9
headers: map[string]string{
@ -359,7 +383,7 @@ metric: <
},
body: expectedMetricFamilyAsText,
},
withCounter: true,
collector: metricVec,
},
{ // 11
headers: map[string]string{
@ -377,8 +401,8 @@ metric: <
[]byte{},
),
},
withCounter: true,
withExternalMF: true,
collector: metricVec,
externalMF: []*dto.MetricFamily{externalMetricFamily},
},
{ // 12
headers: map[string]string{
@ -396,8 +420,8 @@ metric: <
[]byte{},
),
},
withCounter: true,
withExternalMF: true,
collector: metricVec,
externalMF: []*dto.MetricFamily{externalMetricFamily},
},
{ // 13
headers: map[string]string{
@ -415,8 +439,8 @@ metric: <
[]byte{},
),
},
withCounter: true,
withExternalMF: true,
collector: metricVec,
externalMF: []*dto.MetricFamily{externalMetricFamily},
},
{ // 14
headers: map[string]string{
@ -434,20 +458,42 @@ metric: <
[]byte{},
),
},
withCounter: true,
withExternalMF: true,
collector: metricVec,
externalMF: []*dto.MetricFamily{externalMetricFamily},
},
{ // 15
headers: map[string]string{
"Accept": "application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=compact-text",
},
out: output{
headers: map[string]string{
"Content-Type": `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=compact-text`,
},
body: bytes.Join(
[][]byte{
externalMetricFamilyAsProtoCompactText,
expectedMetricFamilyMergedWithExternalAsProtoCompactText,
},
[]byte{},
),
},
collector: metricVec,
externalMF: []*dto.MetricFamily{
externalMetricFamily,
externalMetricFamilyWithSameName,
},
},
}
for i, scenario := range scenarios {
registry := newRegistry()
registry.collectChecksEnabled = true
if scenario.withCounter {
registry.Register(metricVec)
if scenario.collector != nil {
registry.Register(scenario.collector)
}
if scenario.withExternalMF {
if scenario.externalMF != nil {
registry.metricFamilyInjectionHook = func() []*dto.MetricFamily {
return externalMetricFamily
return scenario.externalMF
}
}
writer := &fakeResponseWriter{

View file

@ -16,6 +16,7 @@ package prometheus
import (
"fmt"
"hash/fnv"
"math"
"sort"
"sync"
"time"
@ -277,10 +278,8 @@ func (s *summary) Write(out *dto.Metric) error {
s.bufMtx.Lock()
s.mtx.Lock()
if len(s.hotBuf) != 0 {
s.swapBufs(time.Now())
}
// Swap bufs even if hotBuf is empty to set new hotBufExpTime.
s.swapBufs(time.Now())
s.bufMtx.Unlock()
s.flushColdBuf()
@ -288,9 +287,15 @@ func (s *summary) Write(out *dto.Metric) error {
sum.SampleSum = proto.Float64(s.sum)
for _, rank := range s.sortedObjectives {
var q float64
if s.headStream.Count() == 0 {
q = math.NaN()
} else {
q = s.headStream.Query(rank)
}
qs = append(qs, &dto.Quantile{
Quantile: proto.Float64(rank),
Value: proto.Float64(s.headStream.Query(rank)),
Value: proto.Float64(q),
})
}

View file

@ -289,6 +289,11 @@ func TestSummaryVecConcurrency(t *testing.T) {
}
func TestSummaryDecay(t *testing.T) {
if testing.Short() {
t.Skip("Skipping test in short mode.")
// More because it depends on timing than because it is particularly long...
}
sum := NewSummary(SummaryOpts{
Name: "test_summary",
Help: "helpless",
@ -315,6 +320,12 @@ func TestSummaryDecay(t *testing.T) {
}
}
tick.Stop()
// Wait for MaxAge without observations and make sure quantiles are NaN.
time.Sleep(100 * time.Millisecond)
sum.Write(m)
if got := *m.Summary.Quantile[0].Value; !math.IsNaN(got) {
t.Errorf("got %f, want NaN after expiration", got)
}
}
func getBounds(vars []float64, q, ε float64) (min, max float64) {

View file

@ -21,7 +21,7 @@ import (
"testing"
dto "github.com/prometheus/client_model/go"
"github.com/matttproud/golang_protobuf_extensions/ext"
"github.com/matttproud/golang_protobuf_extensions/pbutil"
)
// Benchmarks to show how much penalty text format parsing actually inflicts.
@ -101,7 +101,7 @@ func BenchmarkParseProto(b *testing.B) {
in := bytes.NewReader(data)
for {
family.Reset()
if _, err := ext.ReadDelimited(in, family); err != nil {
if _, err := pbutil.ReadDelimited(in, family); err != nil {
if err == io.EOF {
break
}
@ -129,7 +129,7 @@ func BenchmarkParseProtoGzip(b *testing.B) {
}
for {
family.Reset()
if _, err := ext.ReadDelimited(in, family); err != nil {
if _, err := pbutil.ReadDelimited(in, family); err != nil {
if err == io.EOF {
break
}
@ -156,7 +156,7 @@ func BenchmarkParseProtoMap(b *testing.B) {
in := bytes.NewReader(data)
for {
family := &dto.MetricFamily{}
if _, err := ext.ReadDelimited(in, family); err != nil {
if _, err := pbutil.ReadDelimited(in, family); err != nil {
if err == io.EOF {
break
}

View file

@ -18,7 +18,7 @@ import (
"io"
"github.com/golang/protobuf/proto"
"github.com/matttproud/golang_protobuf_extensions/ext"
"github.com/matttproud/golang_protobuf_extensions/pbutil"
dto "github.com/prometheus/client_model/go"
)
@ -27,7 +27,7 @@ import (
// protobuf format and returns the number of bytes written and any error
// encountered.
func WriteProtoDelimited(w io.Writer, p *dto.MetricFamily) (int, error) {
return ext.WriteDelimited(w, p)
return pbutil.WriteDelimited(w, p)
}
// WriteProtoText writes the MetricFamily to the writer in text format and

View file

@ -0,0 +1,5 @@
language: go
go:
- 1.3
- 1.4
- tip

View file

@ -4,8 +4,4 @@ This procfs package provides functions to retrieve system, kernel and process
metrics from the pseudo-filesystem proc.
[![GoDoc](https://godoc.org/github.com/prometheus/procfs?status.png)](https://godoc.org/github.com/prometheus/procfs)
[![Circle CI](https://circleci.com/gh/prometheus/procfs.svg?style=svg)](https://circleci.com/gh/prometheus/procfs)
# Testing
$ go test
[![Build Status](https://travis-ci.org/prometheus/procfs.svg?branch=master)](https://travis-ci.org/prometheus/procfs)

View file

@ -22,7 +22,7 @@
// "fmt"
// "log"
//
// "github.com/prometheus/client_golang/procfs"
// "github.com/prometheus/procfs"
// )
//
// func main() {
@ -31,7 +31,7 @@
// log.Fatalf("could not get process: %s", err)
// }
//
// stat, err := p.Stat()
// stat, err := p.NewStat()
// if err != nil {
// log.Fatalf("could not get process stat: %s", err)
// }

View file

@ -7,8 +7,22 @@ import (
"os"
)
// #include <unistd.h>
import "C"
// Originally, this USER_HZ value was dynamically retrieved via a sysconf call which
// required cgo. However, that caused a lot of problems regarding
// cross-compilation. Alternatives such as running a binary to determine the
// value, or trying to derive it in some other way were all problematic.
// After much research it was determined that USER_HZ is actually hardcoded to
// 100 on all Go-supported platforms as of the time of this writing. This is
// why we decided to hardcode it here as well. It is not impossible that there
// could be systems with exceptions, but they should be very exotic edge cases,
// and in that case, the worst outcome will be two misreported metrics.
//
// See also the following discussions:
//
// - https://github.com/prometheus/node_exporter/issues/52
// - https://github.com/prometheus/procfs/pull/2
// - http://stackoverflow.com/questions/17410841/how-does-user-hz-solve-the-jiffy-scaling-issue
const userHZ = 100
// ProcStat provides status information about the process,
// read from /proc/[pid]/stat.
@ -152,14 +166,10 @@ func (s ProcStat) StartTime() (float64, error) {
if err != nil {
return 0, err
}
return float64(stat.BootTime) + (float64(s.Starttime) / ticks()), nil
return float64(stat.BootTime) + (float64(s.Starttime) / userHZ), nil
}
// CPUTime returns the total CPU user and system time in seconds.
func (s ProcStat) CPUTime() float64 {
return float64(s.UTime+s.STime) / ticks()
}
func ticks() float64 {
return float64(C.sysconf(C._SC_CLK_TCK)) // most likely 100
return float64(s.UTime+s.STime) / userHZ
}

View file

@ -347,12 +347,14 @@ func recoverTable(s *session, o *opt.Options) error {
return err
}
iter := tr.NewIterator(nil, nil)
iter.(iterator.ErrorCallbackSetter).SetErrorCallback(func(err error) {
if errors.IsCorrupted(err) {
s.logf("table@recovery block corruption @%d %q", file.Num(), err)
tcorruptedBlock++
}
})
if itererr, ok := iter.(iterator.ErrorCallbackSetter); ok {
itererr.SetErrorCallback(func(err error) {
if errors.IsCorrupted(err) {
s.logf("table@recovery block corruption @%d %q", file.Num(), err)
tcorruptedBlock++
}
})
}
// Scan the table.
for iter.Next() {

View file

@ -8,6 +8,7 @@ package leveldb
import (
"errors"
"math/rand"
"runtime"
"sync"
"sync/atomic"
@ -80,6 +81,10 @@ func (db *DB) newIterator(seq uint64, slice *util.Range, ro *opt.ReadOptions) *d
return iter
}
func (db *DB) iterSamplingRate() int {
return rand.Intn(2 * db.s.o.GetIteratorSamplingRate())
}
type dir int
const (
@ -98,11 +103,21 @@ type dbIter struct {
seq uint64
strict bool
dir dir
key []byte
value []byte
err error
releaser util.Releaser
smaplingGap int
dir dir
key []byte
value []byte
err error
releaser util.Releaser
}
func (i *dbIter) sampleSeek() {
ikey := i.iter.Key()
i.smaplingGap -= len(ikey) + len(i.iter.Value())
for i.smaplingGap < 0 {
i.smaplingGap += i.db.iterSamplingRate()
i.db.sampleSeek(ikey)
}
}
func (i *dbIter) setErr(err error) {
@ -175,6 +190,7 @@ func (i *dbIter) Seek(key []byte) bool {
func (i *dbIter) next() bool {
for {
if ukey, seq, kt, kerr := parseIkey(i.iter.Key()); kerr == nil {
i.sampleSeek()
if seq <= i.seq {
switch kt {
case ktDel:
@ -225,6 +241,7 @@ func (i *dbIter) prev() bool {
if i.iter.Valid() {
for {
if ukey, seq, kt, kerr := parseIkey(i.iter.Key()); kerr == nil {
i.sampleSeek()
if seq <= i.seq {
if !del && i.icmp.uCompare(ukey, i.key) < 0 {
return true
@ -266,6 +283,7 @@ func (i *dbIter) Prev() bool {
case dirForward:
for i.iter.Prev() {
if ukey, _, _, kerr := parseIkey(i.iter.Key()); kerr == nil {
i.sampleSeek()
if i.icmp.uCompare(ukey, i.key) < 0 {
goto cont
}

View file

@ -48,6 +48,15 @@ func (db *DB) addSeq(delta uint64) {
atomic.AddUint64(&db.seq, delta)
}
func (db *DB) sampleSeek(ikey iKey) {
v := db.s.version()
if v.sampleSeek(ikey) {
// Trigger table compaction.
db.compSendTrigger(db.tcompCmdC)
}
v.release()
}
func (db *DB) mpoolPut(mem *memdb.DB) {
defer func() {
recover()

View file

@ -405,19 +405,21 @@ func (h *dbHarness) compactRange(min, max string) {
t.Log("DB range compaction done")
}
func (h *dbHarness) sizeAssert(start, limit string, low, hi uint64) {
t := h.t
db := h.db
s, err := db.SizeOf([]util.Range{
func (h *dbHarness) sizeOf(start, limit string) uint64 {
sz, err := h.db.SizeOf([]util.Range{
{[]byte(start), []byte(limit)},
})
if err != nil {
t.Error("SizeOf: got error: ", err)
h.t.Error("SizeOf: got error: ", err)
}
if s.Sum() < low || s.Sum() > hi {
t.Errorf("sizeof %q to %q not in range, want %d - %d, got %d",
shorten(start), shorten(limit), low, hi, s.Sum())
return sz.Sum()
}
func (h *dbHarness) sizeAssert(start, limit string, low, hi uint64) {
sz := h.sizeOf(start, limit)
if sz < low || sz > hi {
h.t.Errorf("sizeOf %q to %q not in range, want %d - %d, got %d",
shorten(start), shorten(limit), low, hi, sz)
}
}
@ -2577,3 +2579,87 @@ func TestDB_TableCompactionBuilder(t *testing.T) {
}
v.release()
}
func testDB_IterTriggeredCompaction(t *testing.T, limitDiv int) {
const (
vSize = 200 * opt.KiB
tSize = 100 * opt.MiB
mIter = 100
n = tSize / vSize
)
h := newDbHarnessWopt(t, &opt.Options{
Compression: opt.NoCompression,
DisableBlockCache: true,
})
defer h.close()
key := func(x int) string {
return fmt.Sprintf("v%06d", x)
}
// Fill.
value := strings.Repeat("x", vSize)
for i := 0; i < n; i++ {
h.put(key(i), value)
}
h.compactMem()
// Delete all.
for i := 0; i < n; i++ {
h.delete(key(i))
}
h.compactMem()
var (
limit = n / limitDiv
startKey = key(0)
limitKey = key(limit)
maxKey = key(n)
slice = &util.Range{Limit: []byte(limitKey)}
initialSize0 = h.sizeOf(startKey, limitKey)
initialSize1 = h.sizeOf(limitKey, maxKey)
)
t.Logf("inital size %s [rest %s]", shortenb(int(initialSize0)), shortenb(int(initialSize1)))
for r := 0; true; r++ {
if r >= mIter {
t.Fatal("taking too long to compact")
}
// Iterates.
iter := h.db.NewIterator(slice, h.ro)
for iter.Next() {
}
if err := iter.Error(); err != nil {
t.Fatalf("Iter err: %v", err)
}
iter.Release()
// Wait compaction.
h.waitCompaction()
// Check size.
size0 := h.sizeOf(startKey, limitKey)
size1 := h.sizeOf(limitKey, maxKey)
t.Logf("#%03d size %s [rest %s]", r, shortenb(int(size0)), shortenb(int(size1)))
if size0 < initialSize0/10 {
break
}
}
if initialSize1 > 0 {
h.sizeAssert(limitKey, maxKey, initialSize1/4-opt.MiB, initialSize1+opt.MiB)
}
}
func TestDB_IterTriggeredCompaction(t *testing.T) {
testDB_IterTriggeredCompaction(t, 1)
}
func TestDB_IterTriggeredCompactionHalf(t *testing.T) {
testDB_IterTriggeredCompaction(t, 2)
}

View file

@ -34,10 +34,11 @@ var (
DefaultCompactionTotalSize = 10 * MiB
DefaultCompactionTotalSizeMultiplier = 10.0
DefaultCompressionType = SnappyCompression
DefaultOpenFilesCacher = LRUCacher
DefaultOpenFilesCacheCapacity = 500
DefaultIteratorSamplingRate = 1 * MiB
DefaultMaxMemCompationLevel = 2
DefaultNumLevel = 7
DefaultOpenFilesCacher = LRUCacher
DefaultOpenFilesCacheCapacity = 500
DefaultWriteBuffer = 4 * MiB
DefaultWriteL0PauseTrigger = 12
DefaultWriteL0SlowdownTrigger = 8
@ -153,7 +154,7 @@ type Options struct {
BlockCacher Cacher
// BlockCacheCapacity defines the capacity of the 'sorted table' block caching.
// Use -1 for zero, this has same effect with specifying NoCacher to BlockCacher.
// Use -1 for zero, this has same effect as specifying NoCacher to BlockCacher.
//
// The default value is 8MiB.
BlockCacheCapacity int
@ -288,6 +289,13 @@ type Options struct {
// The default value is nil.
Filter filter.Filter
// IteratorSamplingRate defines approximate gap (in bytes) between read
// sampling of an iterator. The samples will be used to determine when
// compaction should be triggered.
//
// The default is 1MiB.
IteratorSamplingRate int
// MaxMemCompationLevel defines maximum level a newly compacted 'memdb'
// will be pushed into if doesn't creates overlap. This should less than
// NumLevel. Use -1 for level-0.
@ -308,7 +316,7 @@ type Options struct {
OpenFilesCacher Cacher
// OpenFilesCacheCapacity defines the capacity of the open files caching.
// Use -1 for zero, this has same effect with specifying NoCacher to OpenFilesCacher.
// Use -1 for zero, this has same effect as specifying NoCacher to OpenFilesCacher.
//
// The default value is 500.
OpenFilesCacheCapacity int
@ -355,9 +363,9 @@ func (o *Options) GetBlockCacher() Cacher {
}
func (o *Options) GetBlockCacheCapacity() int {
if o == nil || o.BlockCacheCapacity <= 0 {
if o == nil || o.BlockCacheCapacity == 0 {
return DefaultBlockCacheCapacity
} else if o.BlockCacheCapacity == -1 {
} else if o.BlockCacheCapacity < 0 {
return 0
}
return o.BlockCacheCapacity
@ -492,12 +500,19 @@ func (o *Options) GetFilter() filter.Filter {
return o.Filter
}
func (o *Options) GetIteratorSamplingRate() int {
if o == nil || o.IteratorSamplingRate <= 0 {
return DefaultIteratorSamplingRate
}
return o.IteratorSamplingRate
}
func (o *Options) GetMaxMemCompationLevel() int {
level := DefaultMaxMemCompationLevel
if o != nil {
if o.MaxMemCompationLevel > 0 {
level = o.MaxMemCompationLevel
} else if o.MaxMemCompationLevel == -1 {
} else if o.MaxMemCompationLevel < 0 {
level = 0
}
}
@ -525,9 +540,9 @@ func (o *Options) GetOpenFilesCacher() Cacher {
}
func (o *Options) GetOpenFilesCacheCapacity() int {
if o == nil || o.OpenFilesCacheCapacity <= 0 {
if o == nil || o.OpenFilesCacheCapacity == 0 {
return DefaultOpenFilesCacheCapacity
} else if o.OpenFilesCacheCapacity == -1 {
} else if o.OpenFilesCacheCapacity < 0 {
return 0
}
return o.OpenFilesCacheCapacity

View file

@ -136,9 +136,8 @@ func (v *version) get(ikey iKey, ro *opt.ReadOptions, noValue bool) (value []byt
if !tseek {
if tset == nil {
tset = &tSet{level, t}
} else if tset.table.consumeSeek() <= 0 {
} else {
tseek = true
tcomp = atomic.CompareAndSwapPointer(&v.cSeek, nil, unsafe.Pointer(tset))
}
}
@ -203,6 +202,28 @@ func (v *version) get(ikey iKey, ro *opt.ReadOptions, noValue bool) (value []byt
return true
})
if tseek && tset.table.consumeSeek() <= 0 {
tcomp = atomic.CompareAndSwapPointer(&v.cSeek, nil, unsafe.Pointer(tset))
}
return
}
func (v *version) sampleSeek(ikey iKey) (tcomp bool) {
var tset *tSet
v.walkOverlapping(ikey, func(level int, t *tFile) bool {
if tset == nil {
tset = &tSet{level, t}
return true
} else {
if tset.table.consumeSeek() <= 0 {
tcomp = atomic.CompareAndSwapPointer(&v.cSeek, nil, unsafe.Pointer(tset))
}
return false
}
}, nil)
return
}

View file

@ -7,10 +7,15 @@ package snappy
import (
"encoding/binary"
"errors"
"io"
)
// ErrCorrupt reports that the input is invalid.
var ErrCorrupt = errors.New("snappy: corrupt input")
var (
// ErrCorrupt reports that the input is invalid.
ErrCorrupt = errors.New("snappy: corrupt input")
// ErrUnsupported reports that the input isn't supported.
ErrUnsupported = errors.New("snappy: unsupported input")
)
// DecodedLen returns the length of the decoded block.
func DecodedLen(src []byte) (int, error) {
@ -122,3 +127,166 @@ func Decode(dst, src []byte) ([]byte, error) {
}
return dst[:d], nil
}
// NewReader returns a new Reader that decompresses from r, using the framing
// format described at
// https://code.google.com/p/snappy/source/browse/trunk/framing_format.txt
func NewReader(r io.Reader) *Reader {
return &Reader{
r: r,
decoded: make([]byte, maxUncompressedChunkLen),
buf: make([]byte, MaxEncodedLen(maxUncompressedChunkLen)+checksumSize),
}
}
// Reader is an io.Reader than can read Snappy-compressed bytes.
type Reader struct {
r io.Reader
err error
decoded []byte
buf []byte
// decoded[i:j] contains decoded bytes that have not yet been passed on.
i, j int
readHeader bool
}
// Reset discards any buffered data, resets all state, and switches the Snappy
// reader to read from r. This permits reusing a Reader rather than allocating
// a new one.
func (r *Reader) Reset(reader io.Reader) {
r.r = reader
r.err = nil
r.i = 0
r.j = 0
r.readHeader = false
}
func (r *Reader) readFull(p []byte) (ok bool) {
if _, r.err = io.ReadFull(r.r, p); r.err != nil {
if r.err == io.ErrUnexpectedEOF {
r.err = ErrCorrupt
}
return false
}
return true
}
// Read satisfies the io.Reader interface.
func (r *Reader) Read(p []byte) (int, error) {
if r.err != nil {
return 0, r.err
}
for {
if r.i < r.j {
n := copy(p, r.decoded[r.i:r.j])
r.i += n
return n, nil
}
if !r.readFull(r.buf[:4]) {
return 0, r.err
}
chunkType := r.buf[0]
if !r.readHeader {
if chunkType != chunkTypeStreamIdentifier {
r.err = ErrCorrupt
return 0, r.err
}
r.readHeader = true
}
chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16
if chunkLen > len(r.buf) {
r.err = ErrUnsupported
return 0, r.err
}
// The chunk types are specified at
// https://code.google.com/p/snappy/source/browse/trunk/framing_format.txt
switch chunkType {
case chunkTypeCompressedData:
// Section 4.2. Compressed data (chunk type 0x00).
if chunkLen < checksumSize {
r.err = ErrCorrupt
return 0, r.err
}
buf := r.buf[:chunkLen]
if !r.readFull(buf) {
return 0, r.err
}
checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24
buf = buf[checksumSize:]
n, err := DecodedLen(buf)
if err != nil {
r.err = err
return 0, r.err
}
if n > len(r.decoded) {
r.err = ErrCorrupt
return 0, r.err
}
if _, err := Decode(r.decoded, buf); err != nil {
r.err = err
return 0, r.err
}
if crc(r.decoded[:n]) != checksum {
r.err = ErrCorrupt
return 0, r.err
}
r.i, r.j = 0, n
continue
case chunkTypeUncompressedData:
// Section 4.3. Uncompressed data (chunk type 0x01).
if chunkLen < checksumSize {
r.err = ErrCorrupt
return 0, r.err
}
buf := r.buf[:checksumSize]
if !r.readFull(buf) {
return 0, r.err
}
checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24
// Read directly into r.decoded instead of via r.buf.
n := chunkLen - checksumSize
if !r.readFull(r.decoded[:n]) {
return 0, r.err
}
if crc(r.decoded[:n]) != checksum {
r.err = ErrCorrupt
return 0, r.err
}
r.i, r.j = 0, n
continue
case chunkTypeStreamIdentifier:
// Section 4.1. Stream identifier (chunk type 0xff).
if chunkLen != len(magicBody) {
r.err = ErrCorrupt
return 0, r.err
}
if !r.readFull(r.buf[:len(magicBody)]) {
return 0, r.err
}
for i := 0; i < len(magicBody); i++ {
if r.buf[i] != magicBody[i] {
r.err = ErrCorrupt
return 0, r.err
}
}
continue
}
if chunkType <= 0x7f {
// Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f).
r.err = ErrUnsupported
return 0, r.err
} else {
// Section 4.4 Padding (chunk type 0xfe).
// Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd).
if !r.readFull(r.buf[:chunkLen]) {
return 0, r.err
}
}
}
}

View file

@ -6,6 +6,7 @@ package snappy
import (
"encoding/binary"
"io"
)
// We limit how far copy back-references can go, the same as the C++ code.
@ -172,3 +173,86 @@ func MaxEncodedLen(srcLen int) int {
// This last factor dominates the blowup, so the final estimate is:
return 32 + srcLen + srcLen/6
}
// NewWriter returns a new Writer that compresses to w, using the framing
// format described at
// https://code.google.com/p/snappy/source/browse/trunk/framing_format.txt
func NewWriter(w io.Writer) *Writer {
return &Writer{
w: w,
enc: make([]byte, MaxEncodedLen(maxUncompressedChunkLen)),
}
}
// Writer is an io.Writer than can write Snappy-compressed bytes.
type Writer struct {
w io.Writer
err error
enc []byte
buf [checksumSize + chunkHeaderSize]byte
wroteHeader bool
}
// Reset discards the writer's state and switches the Snappy writer to write to
// w. This permits reusing a Writer rather than allocating a new one.
func (w *Writer) Reset(writer io.Writer) {
w.w = writer
w.err = nil
w.wroteHeader = false
}
// Write satisfies the io.Writer interface.
func (w *Writer) Write(p []byte) (n int, errRet error) {
if w.err != nil {
return 0, w.err
}
if !w.wroteHeader {
copy(w.enc, magicChunk)
if _, err := w.w.Write(w.enc[:len(magicChunk)]); err != nil {
w.err = err
return n, err
}
w.wroteHeader = true
}
for len(p) > 0 {
var uncompressed []byte
if len(p) > maxUncompressedChunkLen {
uncompressed, p = p[:maxUncompressedChunkLen], p[maxUncompressedChunkLen:]
} else {
uncompressed, p = p, nil
}
checksum := crc(uncompressed)
// Compress the buffer, discarding the result if the improvement
// isn't at least 12.5%.
chunkType := uint8(chunkTypeCompressedData)
chunkBody, err := Encode(w.enc, uncompressed)
if err != nil {
w.err = err
return n, err
}
if len(chunkBody) >= len(uncompressed)-len(uncompressed)/8 {
chunkType, chunkBody = chunkTypeUncompressedData, uncompressed
}
chunkLen := 4 + len(chunkBody)
w.buf[0] = chunkType
w.buf[1] = uint8(chunkLen >> 0)
w.buf[2] = uint8(chunkLen >> 8)
w.buf[3] = uint8(chunkLen >> 16)
w.buf[4] = uint8(checksum >> 0)
w.buf[5] = uint8(checksum >> 8)
w.buf[6] = uint8(checksum >> 16)
w.buf[7] = uint8(checksum >> 24)
if _, err = w.w.Write(w.buf[:]); err != nil {
w.err = err
return n, err
}
if _, err = w.w.Write(chunkBody); err != nil {
w.err = err
return n, err
}
n += len(uncompressed)
}
return n, nil
}

View file

@ -8,6 +8,10 @@
// The C++ snappy implementation is at http://code.google.com/p/snappy/
package snappy
import (
"hash/crc32"
)
/*
Each encoded block begins with the varint-encoded length of the decoded data,
followed by a sequence of chunks. Chunks begin and end on byte boundaries. The
@ -36,3 +40,29 @@ const (
tagCopy2 = 0x02
tagCopy4 = 0x03
)
const (
checksumSize = 4
chunkHeaderSize = 4
magicChunk = "\xff\x06\x00\x00" + magicBody
magicBody = "sNaPpY"
// https://code.google.com/p/snappy/source/browse/trunk/framing_format.txt says
// that "the uncompressed data in a chunk must be no longer than 65536 bytes".
maxUncompressedChunkLen = 65536
)
const (
chunkTypeCompressedData = 0x00
chunkTypeUncompressedData = 0x01
chunkTypePadding = 0xfe
chunkTypeStreamIdentifier = 0xff
)
var crcTable = crc32.MakeTable(crc32.Castagnoli)
// crc implements the checksum specified in section 3 of
// https://code.google.com/p/snappy/source/browse/trunk/framing_format.txt
func crc(b []byte) uint32 {
c := crc32.Update(0, crcTable, b)
return uint32(c>>15|c<<17) + 0xa282ead8
}

View file

@ -18,7 +18,10 @@ import (
"testing"
)
var download = flag.Bool("download", false, "If true, download any missing files before running benchmarks")
var (
download = flag.Bool("download", false, "If true, download any missing files before running benchmarks")
testdata = flag.String("testdata", "testdata", "Directory containing the test data")
)
func roundtrip(b, ebuf, dbuf []byte) error {
e, err := Encode(ebuf, b)
@ -55,11 +58,11 @@ func TestSmallCopy(t *testing.T) {
}
func TestSmallRand(t *testing.T) {
rand.Seed(27354294)
rng := rand.New(rand.NewSource(27354294))
for n := 1; n < 20000; n += 23 {
b := make([]byte, n)
for i, _ := range b {
b[i] = uint8(rand.Uint32())
for i := range b {
b[i] = uint8(rng.Uint32())
}
if err := roundtrip(b, nil, nil); err != nil {
t.Fatal(err)
@ -70,7 +73,7 @@ func TestSmallRand(t *testing.T) {
func TestSmallRegular(t *testing.T) {
for n := 1; n < 20000; n += 23 {
b := make([]byte, n)
for i, _ := range b {
for i := range b {
b[i] = uint8(i%10 + 'a')
}
if err := roundtrip(b, nil, nil); err != nil {
@ -79,6 +82,120 @@ func TestSmallRegular(t *testing.T) {
}
}
func cmp(a, b []byte) error {
if len(a) != len(b) {
return fmt.Errorf("got %d bytes, want %d", len(a), len(b))
}
for i := range a {
if a[i] != b[i] {
return fmt.Errorf("byte #%d: got 0x%02x, want 0x%02x", i, a[i], b[i])
}
}
return nil
}
func TestFramingFormat(t *testing.T) {
// src is comprised of alternating 1e5-sized sequences of random
// (incompressible) bytes and repeated (compressible) bytes. 1e5 was chosen
// because it is larger than maxUncompressedChunkLen (64k).
src := make([]byte, 1e6)
rng := rand.New(rand.NewSource(1))
for i := 0; i < 10; i++ {
if i%2 == 0 {
for j := 0; j < 1e5; j++ {
src[1e5*i+j] = uint8(rng.Intn(256))
}
} else {
for j := 0; j < 1e5; j++ {
src[1e5*i+j] = uint8(i)
}
}
}
buf := new(bytes.Buffer)
if _, err := NewWriter(buf).Write(src); err != nil {
t.Fatalf("Write: encoding: %v", err)
}
dst, err := ioutil.ReadAll(NewReader(buf))
if err != nil {
t.Fatalf("ReadAll: decoding: %v", err)
}
if err := cmp(dst, src); err != nil {
t.Fatal(err)
}
}
func TestReaderReset(t *testing.T) {
gold := bytes.Repeat([]byte("All that is gold does not glitter,\n"), 10000)
buf := new(bytes.Buffer)
if _, err := NewWriter(buf).Write(gold); err != nil {
t.Fatalf("Write: %v", err)
}
encoded, invalid, partial := buf.String(), "invalid", "partial"
r := NewReader(nil)
for i, s := range []string{encoded, invalid, partial, encoded, partial, invalid, encoded, encoded} {
if s == partial {
r.Reset(strings.NewReader(encoded))
if _, err := r.Read(make([]byte, 101)); err != nil {
t.Errorf("#%d: %v", i, err)
continue
}
continue
}
r.Reset(strings.NewReader(s))
got, err := ioutil.ReadAll(r)
switch s {
case encoded:
if err != nil {
t.Errorf("#%d: %v", i, err)
continue
}
if err := cmp(got, gold); err != nil {
t.Errorf("#%d: %v", i, err)
continue
}
case invalid:
if err == nil {
t.Errorf("#%d: got nil error, want non-nil", i)
continue
}
}
}
}
func TestWriterReset(t *testing.T) {
gold := bytes.Repeat([]byte("Not all those who wander are lost;\n"), 10000)
var gots, wants [][]byte
const n = 20
w, failed := NewWriter(nil), false
for i := 0; i <= n; i++ {
buf := new(bytes.Buffer)
w.Reset(buf)
want := gold[:len(gold)*i/n]
if _, err := w.Write(want); err != nil {
t.Errorf("#%d: Write: %v", i, err)
failed = true
continue
}
got, err := ioutil.ReadAll(NewReader(buf))
if err != nil {
t.Errorf("#%d: ReadAll: %v", i, err)
failed = true
continue
}
gots = append(gots, got)
wants = append(wants, want)
}
if failed {
return
}
for i := range gots {
if err := cmp(gots[i], wants[i]); err != nil {
t.Errorf("#%d: %v", i, err)
}
}
}
func benchDecode(b *testing.B, src []byte) {
encoded, err := Encode(nil, src)
if err != nil {
@ -102,7 +219,7 @@ func benchEncode(b *testing.B, src []byte) {
}
}
func readFile(b *testing.B, filename string) []byte {
func readFile(b testing.TB, filename string) []byte {
src, err := ioutil.ReadFile(filename)
if err != nil {
b.Fatalf("failed reading %s: %s", filename, err)
@ -144,7 +261,7 @@ func BenchmarkWordsEncode1e5(b *testing.B) { benchWords(b, 1e5, false) }
func BenchmarkWordsEncode1e6(b *testing.B) { benchWords(b, 1e6, false) }
// testFiles' values are copied directly from
// https://code.google.com/p/snappy/source/browse/trunk/snappy_unittest.cc.
// https://raw.githubusercontent.com/google/snappy/master/snappy_unittest.cc
// The label field is unused in snappy-go.
var testFiles = []struct {
label string
@ -152,29 +269,36 @@ var testFiles = []struct {
}{
{"html", "html"},
{"urls", "urls.10K"},
{"jpg", "house.jpg"},
{"pdf", "mapreduce-osdi-1.pdf"},
{"jpg", "fireworks.jpeg"},
{"jpg_200", "fireworks.jpeg"},
{"pdf", "paper-100k.pdf"},
{"html4", "html_x_4"},
{"cp", "cp.html"},
{"c", "fields.c"},
{"lsp", "grammar.lsp"},
{"xls", "kennedy.xls"},
{"txt1", "alice29.txt"},
{"txt2", "asyoulik.txt"},
{"txt3", "lcet10.txt"},
{"txt4", "plrabn12.txt"},
{"bin", "ptt5"},
{"sum", "sum"},
{"man", "xargs.1"},
{"pb", "geo.protodata"},
{"gaviota", "kppkn.gtb"},
}
// The test data files are present at this canonical URL.
const baseURL = "https://snappy.googlecode.com/svn/trunk/testdata/"
const baseURL = "https://raw.githubusercontent.com/google/snappy/master/testdata/"
func downloadTestdata(basename string) (errRet error) {
filename := filepath.Join("testdata", basename)
filename := filepath.Join(*testdata, basename)
if stat, err := os.Stat(filename); err == nil && stat.Size() != 0 {
return nil
}
if !*download {
return fmt.Errorf("test data not found; skipping benchmark without the -download flag")
}
// Download the official snappy C++ implementation reference test data
// files for benchmarking.
if err := os.Mkdir(*testdata, 0777); err != nil && !os.IsExist(err) {
return fmt.Errorf("failed to create testdata: %s", err)
}
f, err := os.Create(filename)
if err != nil {
return fmt.Errorf("failed to create %s: %s", filename, err)
@ -185,36 +309,27 @@ func downloadTestdata(basename string) (errRet error) {
os.Remove(filename)
}
}()
resp, err := http.Get(baseURL + basename)
url := baseURL + basename
resp, err := http.Get(url)
if err != nil {
return fmt.Errorf("failed to download %s: %s", baseURL+basename, err)
return fmt.Errorf("failed to download %s: %s", url, err)
}
defer resp.Body.Close()
if s := resp.StatusCode; s != http.StatusOK {
return fmt.Errorf("downloading %s: HTTP status code %d (%s)", url, s, http.StatusText(s))
}
_, err = io.Copy(f, resp.Body)
if err != nil {
return fmt.Errorf("failed to write %s: %s", filename, err)
return fmt.Errorf("failed to download %s to %s: %s", url, filename, err)
}
return nil
}
func benchFile(b *testing.B, n int, decode bool) {
filename := filepath.Join("testdata", testFiles[n].filename)
if stat, err := os.Stat(filename); err != nil || stat.Size() == 0 {
if !*download {
b.Fatal("test data not found; skipping benchmark without the -download flag")
}
// Download the official snappy C++ implementation reference test data
// files for benchmarking.
if err := os.Mkdir("testdata", 0777); err != nil && !os.IsExist(err) {
b.Fatalf("failed to create testdata: %s", err)
}
for _, tf := range testFiles {
if err := downloadTestdata(tf.filename); err != nil {
b.Fatalf("failed to download testdata: %s", err)
}
}
if err := downloadTestdata(testFiles[n].filename); err != nil {
b.Fatalf("failed to download testdata: %s", err)
}
data := readFile(b, filename)
data := readFile(b, filepath.Join(*testdata, testFiles[n].filename))
if decode {
benchDecode(b, data)
} else {
@ -235,12 +350,6 @@ func Benchmark_UFlat8(b *testing.B) { benchFile(b, 8, true) }
func Benchmark_UFlat9(b *testing.B) { benchFile(b, 9, true) }
func Benchmark_UFlat10(b *testing.B) { benchFile(b, 10, true) }
func Benchmark_UFlat11(b *testing.B) { benchFile(b, 11, true) }
func Benchmark_UFlat12(b *testing.B) { benchFile(b, 12, true) }
func Benchmark_UFlat13(b *testing.B) { benchFile(b, 13, true) }
func Benchmark_UFlat14(b *testing.B) { benchFile(b, 14, true) }
func Benchmark_UFlat15(b *testing.B) { benchFile(b, 15, true) }
func Benchmark_UFlat16(b *testing.B) { benchFile(b, 16, true) }
func Benchmark_UFlat17(b *testing.B) { benchFile(b, 17, true) }
func Benchmark_ZFlat0(b *testing.B) { benchFile(b, 0, false) }
func Benchmark_ZFlat1(b *testing.B) { benchFile(b, 1, false) }
func Benchmark_ZFlat2(b *testing.B) { benchFile(b, 2, false) }
@ -253,9 +362,3 @@ func Benchmark_ZFlat8(b *testing.B) { benchFile(b, 8, false) }
func Benchmark_ZFlat9(b *testing.B) { benchFile(b, 9, false) }
func Benchmark_ZFlat10(b *testing.B) { benchFile(b, 10, false) }
func Benchmark_ZFlat11(b *testing.B) { benchFile(b, 11, false) }
func Benchmark_ZFlat12(b *testing.B) { benchFile(b, 12, false) }
func Benchmark_ZFlat13(b *testing.B) { benchFile(b, 13, false) }
func Benchmark_ZFlat14(b *testing.B) { benchFile(b, 14, false) }
func Benchmark_ZFlat15(b *testing.B) { benchFile(b, 15, false) }
func Benchmark_ZFlat16(b *testing.B) { benchFile(b, 16, false) }
func Benchmark_ZFlat17(b *testing.B) { benchFile(b, 17, false) }