mirror of
https://github.com/prometheus/prometheus.git
synced 2025-03-05 20:59:13 -08:00
Merge pull request #628 from prometheus/beorn7/release
Update vendoring and release 0.13.0.
This commit is contained in:
commit
2d7d1a34b6
29
CHANGELOG.md
29
CHANGELOG.md
|
@ -1,3 +1,30 @@
|
||||||
|
## 0.13.0 / 2015-04-08
|
||||||
|
* [ENHANCEMENT] Double-delta encoding for chunks, saving typically 40% of
|
||||||
|
space, both in RAM and on disk.
|
||||||
|
* [ENHANCEMENT] Redesign of chunk persistence queuing, increasing performance
|
||||||
|
on spinning disks significantly.
|
||||||
|
* [ENHANCEMENT] Redesign of sample ingestion, increasing ingestion performance.
|
||||||
|
* [FEATURE] Added ln, log2, log10 and exp functions to the query language.
|
||||||
|
* [FEATURE] Experimental write support to InfluxDB.
|
||||||
|
* [FEATURE] Allow custom timestamps in instant query API.
|
||||||
|
* [FEATURE] Configurable path prefix for URLs to support proxies.
|
||||||
|
* [ENHANCEMENT] Increase of rule_checker CLI usability.
|
||||||
|
* [CHANGE] Show special float values as gaps.
|
||||||
|
* [ENHANCEMENT] Made usage output more readable.
|
||||||
|
* [ENHANCEMENT] Increased resilience of the storage against data corruption.
|
||||||
|
* [ENHANCEMENT] Various improvements around chunk encoding.
|
||||||
|
* [ENHANCEMENT] Nicer formatting of target health table on /status.
|
||||||
|
* [CHANGE] Rename UNREACHABLE to UNHEALTHY, ALIVE to HEALTHY.
|
||||||
|
* [BUGFIX] Strip trailing slash in alertmanager URL.
|
||||||
|
* [BUGFIX] Avoid +InfYs and similar, just display +Inf.
|
||||||
|
* [BUGFIX] Fixed HTML-escaping at various places.
|
||||||
|
* [BUGFIX] Fixed special value handling in division and modulo of the query
|
||||||
|
language.
|
||||||
|
* [BUGFIX] Fix embed-static.sh.
|
||||||
|
* [CLEANUP] Added intial HTTP API tests.
|
||||||
|
* [CLEANUP] Misc. other code cleanups.
|
||||||
|
* [MAINTENANCE] Updated vendored dependcies to their newest versions.
|
||||||
|
|
||||||
## 0.12.0 / 2015-03-04
|
## 0.12.0 / 2015-03-04
|
||||||
* [CHANGE] Use client_golang v0.3.1. THIS CHANGES FINGERPRINTING AND INVALIDATES
|
* [CHANGE] Use client_golang v0.3.1. THIS CHANGES FINGERPRINTING AND INVALIDATES
|
||||||
ALL PERSISTED FINGERPRINTS. You have to wipe your storage to use this or
|
ALL PERSISTED FINGERPRINTS. You have to wipe your storage to use this or
|
||||||
|
@ -24,7 +51,7 @@
|
||||||
* [CHANGE] Update vendoring due to vendoring changes in client_golang.
|
* [CHANGE] Update vendoring due to vendoring changes in client_golang.
|
||||||
* [CLEANUP] Code cleanups.
|
* [CLEANUP] Code cleanups.
|
||||||
* [ENHANCEMENT] Limit the number of 'dirty' series counted during checkpointing.
|
* [ENHANCEMENT] Limit the number of 'dirty' series counted during checkpointing.
|
||||||
|
|
||||||
## 0.11.0 / 2015-02-23
|
## 0.11.0 / 2015-02-23
|
||||||
* [FEATURE] Introduce new metric type Histogram with server-side aggregation.
|
* [FEATURE] Introduce new metric type Histogram with server-side aggregation.
|
||||||
* [FEATURE] Add offset operator.
|
* [FEATURE] Add offset operator.
|
||||||
|
|
32
Godeps/Godeps.json
generated
32
Godeps/Godeps.json
generated
|
@ -1,6 +1,6 @@
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/prometheus/prometheus",
|
"ImportPath": "github.com/prometheus/prometheus",
|
||||||
"GoVersion": "go1.4",
|
"GoVersion": "go1.4.2",
|
||||||
"Deps": [
|
"Deps": [
|
||||||
{
|
{
|
||||||
"ImportPath": "bitbucket.org/ww/goautoneg",
|
"ImportPath": "bitbucket.org/ww/goautoneg",
|
||||||
|
@ -17,35 +17,35 @@
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/golang/protobuf/proto",
|
"ImportPath": "github.com/golang/protobuf/proto",
|
||||||
"Rev": "5677a0e3d5e89854c9974e1256839ee23f8233ca"
|
"Rev": "655cdfa588ea190e901bc5590e65d5621688847c"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/matttproud/golang_protobuf_extensions/ext",
|
"ImportPath": "github.com/matttproud/golang_protobuf_extensions/pbutil",
|
||||||
"Rev": "ba7d65ac66e9da93a714ca18f6d1bc7a0c09100c"
|
"Rev": "fc2b8d3a73c4867e51861bbdd5ae3c1f0869dd6a"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/miekg/dns",
|
"ImportPath": "github.com/miekg/dns",
|
||||||
"Rev": "b65f52f3f0dd1afa25cbbf63f8e7eb15fb5c0641"
|
"Rev": "e6898c8f30b5d002db962043a62db90552e90bf7"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/prometheus/client_golang/extraction",
|
"ImportPath": "github.com/prometheus/client_golang/extraction",
|
||||||
"Comment": "0.3.2",
|
"Comment": "0.4.0-1-g692492e",
|
||||||
"Rev": "1cf6d4b964951c63779ba7513c57fe389b609014"
|
"Rev": "692492e54b553a81013254cc1fba4b6dd76fad30"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/prometheus/client_golang/model",
|
"ImportPath": "github.com/prometheus/client_golang/model",
|
||||||
"Comment": "0.3.2",
|
"Comment": "0.4.0-1-g692492e",
|
||||||
"Rev": "1cf6d4b964951c63779ba7513c57fe389b609014"
|
"Rev": "692492e54b553a81013254cc1fba4b6dd76fad30"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/prometheus/client_golang/prometheus",
|
"ImportPath": "github.com/prometheus/client_golang/prometheus",
|
||||||
"Comment": "0.3.2",
|
"Comment": "0.4.0-1-g692492e",
|
||||||
"Rev": "1cf6d4b964951c63779ba7513c57fe389b609014"
|
"Rev": "692492e54b553a81013254cc1fba4b6dd76fad30"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/prometheus/client_golang/text",
|
"ImportPath": "github.com/prometheus/client_golang/text",
|
||||||
"Comment": "0.3.2",
|
"Comment": "0.4.0-1-g692492e",
|
||||||
"Rev": "1cf6d4b964951c63779ba7513c57fe389b609014"
|
"Rev": "692492e54b553a81013254cc1fba4b6dd76fad30"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/prometheus/client_model/go",
|
"ImportPath": "github.com/prometheus/client_model/go",
|
||||||
|
@ -54,15 +54,15 @@
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/prometheus/procfs",
|
"ImportPath": "github.com/prometheus/procfs",
|
||||||
"Rev": "92faa308558161acab0ada1db048e9996ecec160"
|
"Rev": "490cc6eb5fa45bf8a8b7b73c8bc82a8160e8531d"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/syndtr/goleveldb/leveldb",
|
"ImportPath": "github.com/syndtr/goleveldb/leveldb",
|
||||||
"Rev": "e9e2c8f6d3b9c313fb4acaac5ab06285bcf30b04"
|
"Rev": "4875955338b0a434238a31165cb87255ab6e9e4a"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/syndtr/gosnappy/snappy",
|
"ImportPath": "github.com/syndtr/gosnappy/snappy",
|
||||||
"Rev": "ce8acff4829e0c2458a67ead32390ac0a381c862"
|
"Rev": "156a073208e131d7d2e212cb749feae7c339e846"
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
|
2
Godeps/_workspace/src/github.com/golang/protobuf/proto/Makefile
generated
vendored
2
Godeps/_workspace/src/github.com/golang/protobuf/proto/Makefile
generated
vendored
|
@ -39,5 +39,5 @@ test: install generate-test-pbs
|
||||||
generate-test-pbs:
|
generate-test-pbs:
|
||||||
make install
|
make install
|
||||||
make -C testdata
|
make -C testdata
|
||||||
make -C proto3_proto
|
protoc --go_out=Mtestdata/test.proto=github.com/golang/protobuf/proto/testdata:. proto3_proto/proto3.proto
|
||||||
make
|
make
|
||||||
|
|
16
Godeps/_workspace/src/github.com/golang/protobuf/proto/all_test.go
generated
vendored
16
Godeps/_workspace/src/github.com/golang/protobuf/proto/all_test.go
generated
vendored
|
@ -44,8 +44,8 @@ import (
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
. "./testdata"
|
|
||||||
. "github.com/golang/protobuf/proto"
|
. "github.com/golang/protobuf/proto"
|
||||||
|
. "github.com/golang/protobuf/proto/testdata"
|
||||||
)
|
)
|
||||||
|
|
||||||
var globalO *Buffer
|
var globalO *Buffer
|
||||||
|
@ -1252,7 +1252,8 @@ func TestProto1RepeatedGroup(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
o := old()
|
o := old()
|
||||||
if err := o.Marshal(pb); err != ErrRepeatedHasNil {
|
err := o.Marshal(pb)
|
||||||
|
if err == nil || !strings.Contains(err.Error(), "repeated field Message has nil") {
|
||||||
t.Fatalf("unexpected or no error when marshaling: %v", err)
|
t.Fatalf("unexpected or no error when marshaling: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1441,6 +1442,17 @@ func TestSetDefaultsWithRepeatedSubMessage(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestSetDefaultWithRepeatedNonMessage(t *testing.T) {
|
||||||
|
m := &MyMessage{
|
||||||
|
Pet: []string{"turtle", "wombat"},
|
||||||
|
}
|
||||||
|
expected := Clone(m)
|
||||||
|
SetDefaults(m)
|
||||||
|
if !Equal(m, expected) {
|
||||||
|
t.Errorf("\n got %v\nwant %v", m, expected)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func TestMaximumTagNumber(t *testing.T) {
|
func TestMaximumTagNumber(t *testing.T) {
|
||||||
m := &MaxTag{
|
m := &MaxTag{
|
||||||
LastField: String("natural goat essence"),
|
LastField: String("natural goat essence"),
|
||||||
|
|
2
Godeps/_workspace/src/github.com/golang/protobuf/proto/clone_test.go
generated
vendored
2
Godeps/_workspace/src/github.com/golang/protobuf/proto/clone_test.go
generated
vendored
|
@ -36,7 +36,7 @@ import (
|
||||||
|
|
||||||
"github.com/golang/protobuf/proto"
|
"github.com/golang/protobuf/proto"
|
||||||
|
|
||||||
pb "./testdata"
|
pb "github.com/golang/protobuf/proto/testdata"
|
||||||
)
|
)
|
||||||
|
|
||||||
var cloneTestMessage = &pb.MyMessage{
|
var cloneTestMessage = &pb.MyMessage{
|
||||||
|
|
25
Godeps/_workspace/src/github.com/golang/protobuf/proto/encode.go
generated
vendored
25
Godeps/_workspace/src/github.com/golang/protobuf/proto/encode.go
generated
vendored
|
@ -60,9 +60,9 @@ func (e *RequiredNotSetError) Error() string {
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
// ErrRepeatedHasNil is the error returned if Marshal is called with
|
// errRepeatedHasNil is the error returned if Marshal is called with
|
||||||
// a struct with a repeated field containing a nil element.
|
// a struct with a repeated field containing a nil element.
|
||||||
ErrRepeatedHasNil = errors.New("proto: repeated field has nil element")
|
errRepeatedHasNil = errors.New("proto: repeated field has nil element")
|
||||||
|
|
||||||
// ErrNil is the error returned if Marshal is called with nil.
|
// ErrNil is the error returned if Marshal is called with nil.
|
||||||
ErrNil = errors.New("proto: Marshal called with nil")
|
ErrNil = errors.New("proto: Marshal called with nil")
|
||||||
|
@ -939,7 +939,7 @@ func (o *Buffer) enc_slice_struct_message(p *Properties, base structPointer) err
|
||||||
for i := 0; i < l; i++ {
|
for i := 0; i < l; i++ {
|
||||||
structp := s.Index(i)
|
structp := s.Index(i)
|
||||||
if structPointer_IsNil(structp) {
|
if structPointer_IsNil(structp) {
|
||||||
return ErrRepeatedHasNil
|
return errRepeatedHasNil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Can the object marshal itself?
|
// Can the object marshal itself?
|
||||||
|
@ -958,7 +958,7 @@ func (o *Buffer) enc_slice_struct_message(p *Properties, base structPointer) err
|
||||||
err := o.enc_len_struct(p.sprop, structp, &state)
|
err := o.enc_len_struct(p.sprop, structp, &state)
|
||||||
if err != nil && !state.shouldContinue(err, nil) {
|
if err != nil && !state.shouldContinue(err, nil) {
|
||||||
if err == ErrNil {
|
if err == ErrNil {
|
||||||
return ErrRepeatedHasNil
|
return errRepeatedHasNil
|
||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -1001,7 +1001,7 @@ func (o *Buffer) enc_slice_struct_group(p *Properties, base structPointer) error
|
||||||
for i := 0; i < l; i++ {
|
for i := 0; i < l; i++ {
|
||||||
b := s.Index(i)
|
b := s.Index(i)
|
||||||
if structPointer_IsNil(b) {
|
if structPointer_IsNil(b) {
|
||||||
return ErrRepeatedHasNil
|
return errRepeatedHasNil
|
||||||
}
|
}
|
||||||
|
|
||||||
o.EncodeVarint(uint64((p.Tag << 3) | WireStartGroup))
|
o.EncodeVarint(uint64((p.Tag << 3) | WireStartGroup))
|
||||||
|
@ -1010,7 +1010,7 @@ func (o *Buffer) enc_slice_struct_group(p *Properties, base structPointer) error
|
||||||
|
|
||||||
if err != nil && !state.shouldContinue(err, nil) {
|
if err != nil && !state.shouldContinue(err, nil) {
|
||||||
if err == ErrNil {
|
if err == ErrNil {
|
||||||
return ErrRepeatedHasNil
|
return errRepeatedHasNil
|
||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -1128,10 +1128,12 @@ func size_new_map(p *Properties, base structPointer) int {
|
||||||
keycopy.Set(key)
|
keycopy.Set(key)
|
||||||
valcopy.Set(val)
|
valcopy.Set(val)
|
||||||
|
|
||||||
// Tag codes are two bytes per map entry.
|
// Tag codes for key and val are the responsibility of the sub-sizer.
|
||||||
n += 2
|
keysize := p.mkeyprop.size(p.mkeyprop, keybase)
|
||||||
n += p.mkeyprop.size(p.mkeyprop, keybase)
|
valsize := p.mvalprop.size(p.mvalprop, valbase)
|
||||||
n += p.mvalprop.size(p.mvalprop, valbase)
|
entry := keysize + valsize
|
||||||
|
// Add on tag code and length of map entry itself.
|
||||||
|
n += len(p.tagcode) + sizeVarint(uint64(entry)) + entry
|
||||||
}
|
}
|
||||||
return n
|
return n
|
||||||
}
|
}
|
||||||
|
@ -1184,6 +1186,9 @@ func (o *Buffer) enc_struct(prop *StructProperties, base structPointer) error {
|
||||||
if p.Required && state.err == nil {
|
if p.Required && state.err == nil {
|
||||||
state.err = &RequiredNotSetError{p.Name}
|
state.err = &RequiredNotSetError{p.Name}
|
||||||
}
|
}
|
||||||
|
} else if err == errRepeatedHasNil {
|
||||||
|
// Give more context to nil values in repeated fields.
|
||||||
|
return errors.New("repeated field " + p.OrigName + " has nil element")
|
||||||
} else if !state.shouldContinue(err, p) {
|
} else if !state.shouldContinue(err, p) {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
2
Godeps/_workspace/src/github.com/golang/protobuf/proto/equal_test.go
generated
vendored
2
Godeps/_workspace/src/github.com/golang/protobuf/proto/equal_test.go
generated
vendored
|
@ -34,8 +34,8 @@ package proto_test
|
||||||
import (
|
import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
pb "./testdata"
|
|
||||||
. "github.com/golang/protobuf/proto"
|
. "github.com/golang/protobuf/proto"
|
||||||
|
pb "github.com/golang/protobuf/proto/testdata"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Four identical base messages.
|
// Four identical base messages.
|
||||||
|
|
9
Godeps/_workspace/src/github.com/golang/protobuf/proto/extensions.go
generated
vendored
9
Godeps/_workspace/src/github.com/golang/protobuf/proto/extensions.go
generated
vendored
|
@ -37,6 +37,7 @@ package proto
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
|
"fmt"
|
||||||
"reflect"
|
"reflect"
|
||||||
"strconv"
|
"strconv"
|
||||||
"sync"
|
"sync"
|
||||||
|
@ -321,6 +322,14 @@ func SetExtension(pb extendableProto, extension *ExtensionDesc, value interface{
|
||||||
if typ != reflect.TypeOf(value) {
|
if typ != reflect.TypeOf(value) {
|
||||||
return errors.New("proto: bad extension value type")
|
return errors.New("proto: bad extension value type")
|
||||||
}
|
}
|
||||||
|
// nil extension values need to be caught early, because the
|
||||||
|
// encoder can't distinguish an ErrNil due to a nil extension
|
||||||
|
// from an ErrNil due to a missing field. Extensions are
|
||||||
|
// always optional, so the encoder would just swallow the error
|
||||||
|
// and drop all the extensions from the encoded message.
|
||||||
|
if reflect.ValueOf(value).IsNil() {
|
||||||
|
return fmt.Errorf("proto: SetExtension called with nil value of type %T", value)
|
||||||
|
}
|
||||||
|
|
||||||
pb.ExtensionMap()[extension.Field] = Extension{desc: extension, value: value}
|
pb.ExtensionMap()[extension.Field] = Extension{desc: extension, value: value}
|
||||||
return nil
|
return nil
|
||||||
|
|
18
Godeps/_workspace/src/github.com/golang/protobuf/proto/extensions_test.go
generated
vendored
18
Godeps/_workspace/src/github.com/golang/protobuf/proto/extensions_test.go
generated
vendored
|
@ -34,8 +34,8 @@ package proto_test
|
||||||
import (
|
import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
pb "./testdata"
|
|
||||||
"github.com/golang/protobuf/proto"
|
"github.com/golang/protobuf/proto"
|
||||||
|
pb "github.com/golang/protobuf/proto/testdata"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestGetExtensionsWithMissingExtensions(t *testing.T) {
|
func TestGetExtensionsWithMissingExtensions(t *testing.T) {
|
||||||
|
@ -135,3 +135,19 @@ func TestExtensionsRoundTrip(t *testing.T) {
|
||||||
t.Error("expected some sort of type mismatch error, got nil")
|
t.Error("expected some sort of type mismatch error, got nil")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestNilExtension(t *testing.T) {
|
||||||
|
msg := &pb.MyMessage{
|
||||||
|
Count: proto.Int32(1),
|
||||||
|
}
|
||||||
|
if err := proto.SetExtension(msg, pb.E_Ext_Text, proto.String("hello")); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if err := proto.SetExtension(msg, pb.E_Ext_More, (*pb.Ext)(nil)); err == nil {
|
||||||
|
t.Error("expected SetExtension to fail due to a nil extension")
|
||||||
|
} else if want := "proto: SetExtension called with nil value of type *testdata.Ext"; err.Error() != want {
|
||||||
|
t.Errorf("expected error %v, got %v", want, err)
|
||||||
|
}
|
||||||
|
// Note: if the behavior of Marshal is ever changed to ignore nil extensions, update
|
||||||
|
// this test to verify that E_Ext_Text is properly propagated through marshal->unmarshal.
|
||||||
|
}
|
||||||
|
|
57
Godeps/_workspace/src/github.com/golang/protobuf/proto/lib.go
generated
vendored
57
Godeps/_workspace/src/github.com/golang/protobuf/proto/lib.go
generated
vendored
|
@ -607,13 +607,15 @@ func setDefaults(v reflect.Value, recur, zeros bool) {
|
||||||
|
|
||||||
for _, ni := range dm.nested {
|
for _, ni := range dm.nested {
|
||||||
f := v.Field(ni)
|
f := v.Field(ni)
|
||||||
if f.IsNil() {
|
// f is *T or []*T or map[T]*T
|
||||||
continue
|
switch f.Kind() {
|
||||||
}
|
case reflect.Ptr:
|
||||||
// f is *T or []*T
|
if f.IsNil() {
|
||||||
if f.Kind() == reflect.Ptr {
|
continue
|
||||||
|
}
|
||||||
setDefaults(f, recur, zeros)
|
setDefaults(f, recur, zeros)
|
||||||
} else {
|
|
||||||
|
case reflect.Slice:
|
||||||
for i := 0; i < f.Len(); i++ {
|
for i := 0; i < f.Len(); i++ {
|
||||||
e := f.Index(i)
|
e := f.Index(i)
|
||||||
if e.IsNil() {
|
if e.IsNil() {
|
||||||
|
@ -621,6 +623,15 @@ func setDefaults(v reflect.Value, recur, zeros bool) {
|
||||||
}
|
}
|
||||||
setDefaults(e, recur, zeros)
|
setDefaults(e, recur, zeros)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
case reflect.Map:
|
||||||
|
for _, k := range f.MapKeys() {
|
||||||
|
e := f.MapIndex(k)
|
||||||
|
if e.IsNil() {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
setDefaults(e, recur, zeros)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -646,10 +657,6 @@ type scalarField struct {
|
||||||
value interface{} // the proto-declared default value, or nil
|
value interface{} // the proto-declared default value, or nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func ptrToStruct(t reflect.Type) bool {
|
|
||||||
return t.Kind() == reflect.Ptr && t.Elem().Kind() == reflect.Struct
|
|
||||||
}
|
|
||||||
|
|
||||||
// t is a struct type.
|
// t is a struct type.
|
||||||
func buildDefaultMessage(t reflect.Type) (dm defaultMessage) {
|
func buildDefaultMessage(t reflect.Type) (dm defaultMessage) {
|
||||||
sprop := GetProperties(t)
|
sprop := GetProperties(t)
|
||||||
|
@ -661,9 +668,33 @@ func buildDefaultMessage(t reflect.Type) (dm defaultMessage) {
|
||||||
}
|
}
|
||||||
ft := t.Field(fi).Type
|
ft := t.Field(fi).Type
|
||||||
|
|
||||||
// nested messages
|
var canHaveDefault, nestedMessage bool
|
||||||
if ptrToStruct(ft) || (ft.Kind() == reflect.Slice && ptrToStruct(ft.Elem())) {
|
switch ft.Kind() {
|
||||||
dm.nested = append(dm.nested, fi)
|
case reflect.Ptr:
|
||||||
|
if ft.Elem().Kind() == reflect.Struct {
|
||||||
|
nestedMessage = true
|
||||||
|
} else {
|
||||||
|
canHaveDefault = true // proto2 scalar field
|
||||||
|
}
|
||||||
|
|
||||||
|
case reflect.Slice:
|
||||||
|
switch ft.Elem().Kind() {
|
||||||
|
case reflect.Ptr:
|
||||||
|
nestedMessage = true // repeated message
|
||||||
|
case reflect.Uint8:
|
||||||
|
canHaveDefault = true // bytes field
|
||||||
|
}
|
||||||
|
|
||||||
|
case reflect.Map:
|
||||||
|
if ft.Elem().Kind() == reflect.Ptr {
|
||||||
|
nestedMessage = true // map with message values
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if !canHaveDefault {
|
||||||
|
if nestedMessage {
|
||||||
|
dm.nested = append(dm.nested, fi)
|
||||||
|
}
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
30
Godeps/_workspace/src/github.com/golang/protobuf/proto/properties.go
generated
vendored
30
Godeps/_workspace/src/github.com/golang/protobuf/proto/properties.go
generated
vendored
|
@ -440,7 +440,12 @@ func (p *Properties) setEncAndDec(typ reflect.Type, f *reflect.StructField, lock
|
||||||
p.enc = (*Buffer).enc_slice_byte
|
p.enc = (*Buffer).enc_slice_byte
|
||||||
p.dec = (*Buffer).dec_slice_byte
|
p.dec = (*Buffer).dec_slice_byte
|
||||||
p.size = size_slice_byte
|
p.size = size_slice_byte
|
||||||
if p.proto3 {
|
// This is a []byte, which is either a bytes field,
|
||||||
|
// or the value of a map field. In the latter case,
|
||||||
|
// we always encode an empty []byte, so we should not
|
||||||
|
// use the proto3 enc/size funcs.
|
||||||
|
// f == nil iff this is the key/value of a map field.
|
||||||
|
if p.proto3 && f != nil {
|
||||||
p.enc = (*Buffer).enc_proto3_slice_byte
|
p.enc = (*Buffer).enc_proto3_slice_byte
|
||||||
p.size = size_proto3_slice_byte
|
p.size = size_proto3_slice_byte
|
||||||
}
|
}
|
||||||
|
@ -595,7 +600,7 @@ func (p *Properties) init(typ reflect.Type, name, tag string, f *reflect.StructF
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
mutex sync.Mutex
|
propertiesMu sync.RWMutex
|
||||||
propertiesMap = make(map[reflect.Type]*StructProperties)
|
propertiesMap = make(map[reflect.Type]*StructProperties)
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -605,13 +610,26 @@ func GetProperties(t reflect.Type) *StructProperties {
|
||||||
if t.Kind() != reflect.Struct {
|
if t.Kind() != reflect.Struct {
|
||||||
panic("proto: type must have kind struct")
|
panic("proto: type must have kind struct")
|
||||||
}
|
}
|
||||||
mutex.Lock()
|
|
||||||
sprop := getPropertiesLocked(t)
|
// Most calls to GetProperties in a long-running program will be
|
||||||
mutex.Unlock()
|
// retrieving details for types we have seen before.
|
||||||
|
propertiesMu.RLock()
|
||||||
|
sprop, ok := propertiesMap[t]
|
||||||
|
propertiesMu.RUnlock()
|
||||||
|
if ok {
|
||||||
|
if collectStats {
|
||||||
|
stats.Chit++
|
||||||
|
}
|
||||||
|
return sprop
|
||||||
|
}
|
||||||
|
|
||||||
|
propertiesMu.Lock()
|
||||||
|
sprop = getPropertiesLocked(t)
|
||||||
|
propertiesMu.Unlock()
|
||||||
return sprop
|
return sprop
|
||||||
}
|
}
|
||||||
|
|
||||||
// getPropertiesLocked requires that mutex is held.
|
// getPropertiesLocked requires that propertiesMu is held.
|
||||||
func getPropertiesLocked(t reflect.Type) *StructProperties {
|
func getPropertiesLocked(t reflect.Type) *StructProperties {
|
||||||
if prop, ok := propertiesMap[t]; ok {
|
if prop, ok := propertiesMap[t]; ok {
|
||||||
if collectStats {
|
if collectStats {
|
||||||
|
|
44
Godeps/_workspace/src/github.com/golang/protobuf/proto/proto3_proto/Makefile
generated
vendored
44
Godeps/_workspace/src/github.com/golang/protobuf/proto/proto3_proto/Makefile
generated
vendored
|
@ -1,44 +0,0 @@
|
||||||
# Go support for Protocol Buffers - Google's data interchange format
|
|
||||||
#
|
|
||||||
# Copyright 2014 The Go Authors. All rights reserved.
|
|
||||||
# https://github.com/golang/protobuf
|
|
||||||
#
|
|
||||||
# Redistribution and use in source and binary forms, with or without
|
|
||||||
# modification, are permitted provided that the following conditions are
|
|
||||||
# met:
|
|
||||||
#
|
|
||||||
# * Redistributions of source code must retain the above copyright
|
|
||||||
# notice, this list of conditions and the following disclaimer.
|
|
||||||
# * Redistributions in binary form must reproduce the above
|
|
||||||
# copyright notice, this list of conditions and the following disclaimer
|
|
||||||
# in the documentation and/or other materials provided with the
|
|
||||||
# distribution.
|
|
||||||
# * Neither the name of Google Inc. nor the names of its
|
|
||||||
# contributors may be used to endorse or promote products derived from
|
|
||||||
# this software without specific prior written permission.
|
|
||||||
#
|
|
||||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|
||||||
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|
||||||
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|
||||||
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|
||||||
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|
||||||
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|
||||||
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
||||||
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
||||||
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
||||||
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|
||||||
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
||||||
|
|
||||||
|
|
||||||
include ../../Make.protobuf
|
|
||||||
|
|
||||||
all: regenerate
|
|
||||||
|
|
||||||
regenerate:
|
|
||||||
rm -f proto3.pb.go
|
|
||||||
make proto3.pb.go
|
|
||||||
|
|
||||||
# The following rules are just aids to development. Not needed for typical testing.
|
|
||||||
|
|
||||||
diff: regenerate
|
|
||||||
git diff proto3.pb.go
|
|
10
Godeps/_workspace/src/github.com/golang/protobuf/proto/proto3_proto/proto3.proto
generated
vendored
10
Godeps/_workspace/src/github.com/golang/protobuf/proto/proto3_proto/proto3.proto
generated
vendored
|
@ -31,6 +31,8 @@
|
||||||
|
|
||||||
syntax = "proto3";
|
syntax = "proto3";
|
||||||
|
|
||||||
|
import "testdata/test.proto";
|
||||||
|
|
||||||
package proto3_proto;
|
package proto3_proto;
|
||||||
|
|
||||||
message Message {
|
message Message {
|
||||||
|
@ -51,8 +53,16 @@ message Message {
|
||||||
|
|
||||||
repeated uint64 key = 5;
|
repeated uint64 key = 5;
|
||||||
Nested nested = 6;
|
Nested nested = 6;
|
||||||
|
|
||||||
|
map<string, Nested> terrain = 10;
|
||||||
|
testdata.SubDefaults proto2_field = 11;
|
||||||
|
map<string, testdata.SubDefaults> proto2_value = 13;
|
||||||
}
|
}
|
||||||
|
|
||||||
message Nested {
|
message Nested {
|
||||||
string bunny = 1;
|
string bunny = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
message MessageWithMap {
|
||||||
|
map<bool, bytes> byte_mapping = 1;
|
||||||
|
}
|
||||||
|
|
34
Godeps/_workspace/src/github.com/golang/protobuf/proto/proto3_test.go
generated
vendored
34
Godeps/_workspace/src/github.com/golang/protobuf/proto/proto3_test.go
generated
vendored
|
@ -34,8 +34,9 @@ package proto_test
|
||||||
import (
|
import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
pb "./proto3_proto"
|
|
||||||
"github.com/golang/protobuf/proto"
|
"github.com/golang/protobuf/proto"
|
||||||
|
pb "github.com/golang/protobuf/proto/proto3_proto"
|
||||||
|
tpb "github.com/golang/protobuf/proto/testdata"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestProto3ZeroValues(t *testing.T) {
|
func TestProto3ZeroValues(t *testing.T) {
|
||||||
|
@ -91,3 +92,34 @@ func TestRoundTripProto3(t *testing.T) {
|
||||||
t.Errorf("proto.Equal returned false:\n m: %v\nm2: %v", m, m2)
|
t.Errorf("proto.Equal returned false:\n m: %v\nm2: %v", m, m2)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestProto3SetDefaults(t *testing.T) {
|
||||||
|
in := &pb.Message{
|
||||||
|
Terrain: map[string]*pb.Nested{
|
||||||
|
"meadow": new(pb.Nested),
|
||||||
|
},
|
||||||
|
Proto2Field: new(tpb.SubDefaults),
|
||||||
|
Proto2Value: map[string]*tpb.SubDefaults{
|
||||||
|
"badlands": new(tpb.SubDefaults),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
got := proto.Clone(in).(*pb.Message)
|
||||||
|
proto.SetDefaults(got)
|
||||||
|
|
||||||
|
// There are no defaults in proto3. Everything should be the zero value, but
|
||||||
|
// we need to remember to set defaults for nested proto2 messages.
|
||||||
|
want := &pb.Message{
|
||||||
|
Terrain: map[string]*pb.Nested{
|
||||||
|
"meadow": new(pb.Nested),
|
||||||
|
},
|
||||||
|
Proto2Field: &tpb.SubDefaults{N: proto.Int64(7)},
|
||||||
|
Proto2Value: map[string]*tpb.SubDefaults{
|
||||||
|
"badlands": &tpb.SubDefaults{N: proto.Int64(7)},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
if !proto.Equal(got, want) {
|
||||||
|
t.Errorf("with in = %v\nproto.SetDefaults(in) =>\ngot %v\nwant %v", in, got, want)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
11
Godeps/_workspace/src/github.com/golang/protobuf/proto/size_test.go
generated
vendored
11
Godeps/_workspace/src/github.com/golang/protobuf/proto/size_test.go
generated
vendored
|
@ -33,11 +33,12 @@ package proto_test
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"log"
|
"log"
|
||||||
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
proto3pb "./proto3_proto"
|
|
||||||
pb "./testdata"
|
|
||||||
. "github.com/golang/protobuf/proto"
|
. "github.com/golang/protobuf/proto"
|
||||||
|
proto3pb "github.com/golang/protobuf/proto/proto3_proto"
|
||||||
|
pb "github.com/golang/protobuf/proto/testdata"
|
||||||
)
|
)
|
||||||
|
|
||||||
var messageWithExtension1 = &pb.MyMessage{Count: Int32(7)}
|
var messageWithExtension1 = &pb.MyMessage{Count: Int32(7)}
|
||||||
|
@ -113,10 +114,16 @@ var SizeTests = []struct {
|
||||||
{"proto3 bytes", &proto3pb.Message{Data: []byte("wowsa")}},
|
{"proto3 bytes", &proto3pb.Message{Data: []byte("wowsa")}},
|
||||||
{"proto3 bytes, empty", &proto3pb.Message{Data: []byte{}}},
|
{"proto3 bytes, empty", &proto3pb.Message{Data: []byte{}}},
|
||||||
{"proto3 enum", &proto3pb.Message{Hilarity: proto3pb.Message_PUNS}},
|
{"proto3 enum", &proto3pb.Message{Hilarity: proto3pb.Message_PUNS}},
|
||||||
|
{"proto3 map field with empty bytes", &proto3pb.MessageWithMap{ByteMapping: map[bool][]byte{false: []byte{}}}},
|
||||||
|
|
||||||
{"map field", &pb.MessageWithMap{NameMapping: map[int32]string{1: "Rob", 7: "Andrew"}}},
|
{"map field", &pb.MessageWithMap{NameMapping: map[int32]string{1: "Rob", 7: "Andrew"}}},
|
||||||
{"map field with message", &pb.MessageWithMap{MsgMapping: map[int64]*pb.FloatingPoint{0x7001: &pb.FloatingPoint{F: Float64(2.0)}}}},
|
{"map field with message", &pb.MessageWithMap{MsgMapping: map[int64]*pb.FloatingPoint{0x7001: &pb.FloatingPoint{F: Float64(2.0)}}}},
|
||||||
{"map field with bytes", &pb.MessageWithMap{ByteMapping: map[bool][]byte{true: []byte("this time for sure")}}},
|
{"map field with bytes", &pb.MessageWithMap{ByteMapping: map[bool][]byte{true: []byte("this time for sure")}}},
|
||||||
|
{"map field with empty bytes", &pb.MessageWithMap{ByteMapping: map[bool][]byte{true: []byte{}}}},
|
||||||
|
|
||||||
|
{"map field with big entry", &pb.MessageWithMap{NameMapping: map[int32]string{8: strings.Repeat("x", 125)}}},
|
||||||
|
{"map field with big key and val", &pb.MessageWithMap{StrToStr: map[string]string{strings.Repeat("x", 70): strings.Repeat("y", 70)}}},
|
||||||
|
{"map field with big numeric key", &pb.MessageWithMap{NameMapping: map[int32]string{0xf00d: "om nom nom"}}},
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestSize(t *testing.T) {
|
func TestSize(t *testing.T) {
|
||||||
|
|
8
Godeps/_workspace/src/github.com/golang/protobuf/proto/testdata/test.pb.go
generated
vendored
8
Godeps/_workspace/src/github.com/golang/protobuf/proto/testdata/test.pb.go
generated
vendored
|
@ -1890,6 +1890,7 @@ type MessageWithMap struct {
|
||||||
NameMapping map[int32]string `protobuf:"bytes,1,rep,name=name_mapping" json:"name_mapping,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
|
NameMapping map[int32]string `protobuf:"bytes,1,rep,name=name_mapping" json:"name_mapping,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
|
||||||
MsgMapping map[int64]*FloatingPoint `protobuf:"bytes,2,rep,name=msg_mapping" json:"msg_mapping,omitempty" protobuf_key:"zigzag64,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
|
MsgMapping map[int64]*FloatingPoint `protobuf:"bytes,2,rep,name=msg_mapping" json:"msg_mapping,omitempty" protobuf_key:"zigzag64,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
|
||||||
ByteMapping map[bool][]byte `protobuf:"bytes,3,rep,name=byte_mapping" json:"byte_mapping,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
|
ByteMapping map[bool][]byte `protobuf:"bytes,3,rep,name=byte_mapping" json:"byte_mapping,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
|
||||||
|
StrToStr map[string]string `protobuf:"bytes,4,rep,name=str_to_str" json:"str_to_str,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
|
||||||
XXX_unrecognized []byte `json:"-"`
|
XXX_unrecognized []byte `json:"-"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1918,6 +1919,13 @@ func (m *MessageWithMap) GetByteMapping() map[bool][]byte {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (m *MessageWithMap) GetStrToStr() map[string]string {
|
||||||
|
if m != nil {
|
||||||
|
return m.StrToStr
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
var E_Greeting = &proto.ExtensionDesc{
|
var E_Greeting = &proto.ExtensionDesc{
|
||||||
ExtendedType: (*MyMessage)(nil),
|
ExtendedType: (*MyMessage)(nil),
|
||||||
ExtensionType: ([]string)(nil),
|
ExtensionType: ([]string)(nil),
|
||||||
|
|
1
Godeps/_workspace/src/github.com/golang/protobuf/proto/testdata/test.proto
generated
vendored
1
Godeps/_workspace/src/github.com/golang/protobuf/proto/testdata/test.proto
generated
vendored
|
@ -431,4 +431,5 @@ message MessageWithMap {
|
||||||
map<int32, string> name_mapping = 1;
|
map<int32, string> name_mapping = 1;
|
||||||
map<sint64, FloatingPoint> msg_mapping = 2;
|
map<sint64, FloatingPoint> msg_mapping = 2;
|
||||||
map<bool, bytes> byte_mapping = 3;
|
map<bool, bytes> byte_mapping = 3;
|
||||||
|
map<string, string> str_to_str = 4;
|
||||||
}
|
}
|
||||||
|
|
4
Godeps/_workspace/src/github.com/golang/protobuf/proto/text_parser_test.go
generated
vendored
4
Godeps/_workspace/src/github.com/golang/protobuf/proto/text_parser_test.go
generated
vendored
|
@ -36,9 +36,9 @@ import (
|
||||||
"reflect"
|
"reflect"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
proto3pb "./proto3_proto"
|
|
||||||
. "./testdata"
|
|
||||||
. "github.com/golang/protobuf/proto"
|
. "github.com/golang/protobuf/proto"
|
||||||
|
proto3pb "github.com/golang/protobuf/proto/proto3_proto"
|
||||||
|
. "github.com/golang/protobuf/proto/testdata"
|
||||||
)
|
)
|
||||||
|
|
||||||
type UnmarshalTextTest struct {
|
type UnmarshalTextTest struct {
|
||||||
|
|
4
Godeps/_workspace/src/github.com/golang/protobuf/proto/text_test.go
generated
vendored
4
Godeps/_workspace/src/github.com/golang/protobuf/proto/text_test.go
generated
vendored
|
@ -41,8 +41,8 @@ import (
|
||||||
|
|
||||||
"github.com/golang/protobuf/proto"
|
"github.com/golang/protobuf/proto"
|
||||||
|
|
||||||
proto3pb "./proto3_proto"
|
proto3pb "github.com/golang/protobuf/proto/proto3_proto"
|
||||||
pb "./testdata"
|
pb "github.com/golang/protobuf/proto/testdata"
|
||||||
)
|
)
|
||||||
|
|
||||||
// textMessage implements the methods that allow it to marshal and unmarshal
|
// textMessage implements the methods that allow it to marshal and unmarshal
|
||||||
|
|
|
@ -12,7 +12,7 @@
|
||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
package ext
|
package pbutil
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
@ -21,6 +21,8 @@ import (
|
||||||
"testing"
|
"testing"
|
||||||
"testing/quick"
|
"testing/quick"
|
||||||
|
|
||||||
|
"github.com/matttproud/golang_protobuf_extensions/pbtest"
|
||||||
|
|
||||||
. "github.com/golang/protobuf/proto"
|
. "github.com/golang/protobuf/proto"
|
||||||
. "github.com/golang/protobuf/proto/testdata"
|
. "github.com/golang/protobuf/proto/testdata"
|
||||||
)
|
)
|
||||||
|
@ -138,10 +140,10 @@ I expect it may. Let's hope you enjoy testing as much as we do.`),
|
||||||
|
|
||||||
func TestEndToEndValid(t *testing.T) {
|
func TestEndToEndValid(t *testing.T) {
|
||||||
for _, test := range [][]Message{
|
for _, test := range [][]Message{
|
||||||
[]Message{&Empty{}},
|
{&Empty{}},
|
||||||
[]Message{&GoEnum{Foo: FOO_FOO1.Enum()}, &Empty{}, &GoEnum{Foo: FOO_FOO1.Enum()}},
|
{&GoEnum{Foo: FOO_FOO1.Enum()}, &Empty{}, &GoEnum{Foo: FOO_FOO1.Enum()}},
|
||||||
[]Message{&GoEnum{Foo: FOO_FOO1.Enum()}},
|
{&GoEnum{Foo: FOO_FOO1.Enum()}},
|
||||||
[]Message{&Strings{
|
{&Strings{
|
||||||
StringField: String(`This is my gigantic, unhappy string. It exceeds
|
StringField: String(`This is my gigantic, unhappy string. It exceeds
|
||||||
the encoding size of a single byte varint. We are using it to fuzz test the
|
the encoding size of a single byte varint. We are using it to fuzz test the
|
||||||
correctness of the header decoding mechanisms, which may prove problematic.
|
correctness of the header decoding mechanisms, which may prove problematic.
|
||||||
|
@ -176,45 +178,6 @@ I expect it may. Let's hope you enjoy testing as much as we do.`),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// visitMessage empties the private state fields of the quick.Value()-generated
|
|
||||||
// Protocol Buffer messages, for they cause an inordinate amount of problems.
|
|
||||||
// This is because we are using an automated fuzz generator on a type with
|
|
||||||
// private fields.
|
|
||||||
func visitMessage(m Message) {
|
|
||||||
t := reflect.TypeOf(m)
|
|
||||||
if t.Kind() != reflect.Ptr {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
derefed := t.Elem()
|
|
||||||
if derefed.Kind() != reflect.Struct {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
v := reflect.ValueOf(m)
|
|
||||||
elem := v.Elem()
|
|
||||||
for i := 0; i < elem.NumField(); i++ {
|
|
||||||
field := elem.FieldByIndex([]int{i})
|
|
||||||
fieldType := field.Type()
|
|
||||||
if fieldType.Implements(reflect.TypeOf((*Message)(nil)).Elem()) {
|
|
||||||
visitMessage(field.Interface().(Message))
|
|
||||||
}
|
|
||||||
if field.Kind() == reflect.Slice {
|
|
||||||
for i := 0; i < field.Len(); i++ {
|
|
||||||
elem := field.Index(i)
|
|
||||||
elemType := elem.Type()
|
|
||||||
if elemType.Implements(reflect.TypeOf((*Message)(nil)).Elem()) {
|
|
||||||
visitMessage(elem.Interface().(Message))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if field := elem.FieldByName("XXX_unrecognized"); field.IsValid() {
|
|
||||||
field.Set(reflect.ValueOf([]byte{}))
|
|
||||||
}
|
|
||||||
if field := elem.FieldByName("XXX_extensions"); field.IsValid() {
|
|
||||||
field.Set(reflect.ValueOf(nil))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// rndMessage generates a random valid Protocol Buffer message.
|
// rndMessage generates a random valid Protocol Buffer message.
|
||||||
func rndMessage(r *rand.Rand) Message {
|
func rndMessage(r *rand.Rand) Message {
|
||||||
var t reflect.Type
|
var t reflect.Type
|
||||||
|
@ -307,7 +270,9 @@ func rndMessage(r *rand.Rand) Message {
|
||||||
if !ok {
|
if !ok {
|
||||||
panic("attempt to generate illegal item; consult item 11")
|
panic("attempt to generate illegal item; consult item 11")
|
||||||
}
|
}
|
||||||
visitMessage(v.Interface().(Message))
|
if err := pbtest.SanitizeGenerated(v.Interface().(Message)); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
return v.Interface().(Message)
|
return v.Interface().(Message)
|
||||||
}
|
}
|
||||||
|
|
|
@ -12,7 +12,7 @@
|
||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
package ext
|
package pbutil
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/binary"
|
"encoding/binary"
|
|
@ -12,5 +12,5 @@
|
||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
// Package ext enables record length-delimited Protocol Buffer streaming.
|
// Package pbutil provides record length-delimited Protocol Buffer streaming.
|
||||||
package ext
|
package pbutil
|
|
@ -12,7 +12,7 @@
|
||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
package ext
|
package pbutil
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/binary"
|
"encoding/binary"
|
|
@ -27,7 +27,7 @@
|
||||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
|
||||||
package ext
|
package pbutil
|
||||||
|
|
||||||
import (
|
import (
|
||||||
. "github.com/golang/protobuf/proto"
|
. "github.com/golang/protobuf/proto"
|
19
Godeps/_workspace/src/github.com/miekg/dns/.travis.yml
generated
vendored
19
Godeps/_workspace/src/github.com/miekg/dns/.travis.yml
generated
vendored
|
@ -1,21 +1,6 @@
|
||||||
language: go
|
language: go
|
||||||
go:
|
go:
|
||||||
- 1.2
|
|
||||||
- 1.3
|
- 1.3
|
||||||
env:
|
- 1.4
|
||||||
# "gvm update" resets GOOS and GOARCH environment variable, workaround it by setting
|
|
||||||
# BUILD_GOOS and BUILD_GOARCH and overriding GOARCH and GOOS in the build script
|
|
||||||
global:
|
|
||||||
- BUILD_GOARCH=amd64
|
|
||||||
matrix:
|
|
||||||
- BUILD_GOOS=linux
|
|
||||||
- BUILD_GOOS=darwin
|
|
||||||
- BUILD_GOOS=windows
|
|
||||||
script:
|
script:
|
||||||
- gvm cross $BUILD_GOOS $BUILD_GOARCH
|
- go test -short -bench=.
|
||||||
- GOARCH=$BUILD_GOARCH GOOS=$BUILD_GOOS go build
|
|
||||||
|
|
||||||
# only test on linux
|
|
||||||
# also specify -short; the crypto tests fail in weird ways *sometimes*
|
|
||||||
# See issue #151
|
|
||||||
- if [ $BUILD_GOOS == "linux" ]; then GOARCH=$BUILD_GOARCH GOOS=$BUILD_GOOS go test -short -bench=.; fi
|
|
||||||
|
|
3
Godeps/_workspace/src/github.com/miekg/dns/README.md
generated
vendored
3
Godeps/_workspace/src/github.com/miekg/dns/README.md
generated
vendored
|
@ -35,6 +35,7 @@ A not-so-up-to-date-list-that-may-be-actually-current:
|
||||||
* https://github.com/DevelopersPL/godnsagent
|
* https://github.com/DevelopersPL/godnsagent
|
||||||
* https://github.com/duedil-ltd/discodns
|
* https://github.com/duedil-ltd/discodns
|
||||||
* https://github.com/StalkR/dns-reverse-proxy
|
* https://github.com/StalkR/dns-reverse-proxy
|
||||||
|
* https://github.com/tianon/rawdns
|
||||||
|
|
||||||
Send pull request if you want to be listed here.
|
Send pull request if you want to be listed here.
|
||||||
|
|
||||||
|
@ -68,7 +69,7 @@ correctly, the following should work:
|
||||||
|
|
||||||
## Examples
|
## Examples
|
||||||
|
|
||||||
A short "how to use the API" is at the beginning of dns.go (this also will show
|
A short "how to use the API" is at the beginning of doc.go (this also will show
|
||||||
when you call `godoc github.com/miekg/dns`).
|
when you call `godoc github.com/miekg/dns`).
|
||||||
|
|
||||||
Example programs can be found in the `github.com/miekg/exdns` repository.
|
Example programs can be found in the `github.com/miekg/exdns` repository.
|
||||||
|
|
9
Godeps/_workspace/src/github.com/miekg/dns/client.go
generated
vendored
9
Godeps/_workspace/src/github.com/miekg/dns/client.go
generated
vendored
|
@ -55,6 +55,13 @@ func Exchange(m *Msg, a string) (r *Msg, err error) {
|
||||||
defer co.Close()
|
defer co.Close()
|
||||||
co.SetReadDeadline(time.Now().Add(dnsTimeout))
|
co.SetReadDeadline(time.Now().Add(dnsTimeout))
|
||||||
co.SetWriteDeadline(time.Now().Add(dnsTimeout))
|
co.SetWriteDeadline(time.Now().Add(dnsTimeout))
|
||||||
|
|
||||||
|
opt := m.IsEdns0()
|
||||||
|
// If EDNS0 is used use that for size.
|
||||||
|
if opt != nil && opt.UDPSize() >= MinMsgSize {
|
||||||
|
co.UDPSize = opt.UDPSize()
|
||||||
|
}
|
||||||
|
|
||||||
if err = co.WriteMsg(m); err != nil {
|
if err = co.WriteMsg(m); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -290,7 +297,7 @@ func Dial(network, address string) (conn *Conn, err error) {
|
||||||
return conn, nil
|
return conn, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Dialtimeout acts like Dial but takes a timeout.
|
// DialTimeout acts like Dial but takes a timeout.
|
||||||
func DialTimeout(network, address string, timeout time.Duration) (conn *Conn, err error) {
|
func DialTimeout(network, address string, timeout time.Duration) (conn *Conn, err error) {
|
||||||
conn = new(Conn)
|
conn = new(Conn)
|
||||||
conn.Conn, err = net.DialTimeout(network, address, timeout)
|
conn.Conn, err = net.DialTimeout(network, address, timeout)
|
||||||
|
|
120
Godeps/_workspace/src/github.com/miekg/dns/client_test.go
generated
vendored
120
Godeps/_workspace/src/github.com/miekg/dns/client_test.go
generated
vendored
|
@ -1,6 +1,7 @@
|
||||||
package dns
|
package dns
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"strconv"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
)
|
)
|
||||||
|
@ -11,7 +12,7 @@ func TestClientSync(t *testing.T) {
|
||||||
|
|
||||||
s, addrstr, err := RunLocalUDPServer("127.0.0.1:0")
|
s, addrstr, err := RunLocalUDPServer("127.0.0.1:0")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Unable to run test server: %s", err)
|
t.Fatalf("Unable to run test server: %v", err)
|
||||||
}
|
}
|
||||||
defer s.Shutdown()
|
defer s.Shutdown()
|
||||||
|
|
||||||
|
@ -19,26 +20,20 @@ func TestClientSync(t *testing.T) {
|
||||||
m.SetQuestion("miek.nl.", TypeSOA)
|
m.SetQuestion("miek.nl.", TypeSOA)
|
||||||
|
|
||||||
c := new(Client)
|
c := new(Client)
|
||||||
r, _, e := c.Exchange(m, addrstr)
|
r, _, err := c.Exchange(m, addrstr)
|
||||||
if e != nil {
|
if err != nil {
|
||||||
t.Logf("failed to exchange: %s", e.Error())
|
t.Errorf("failed to exchange: %v", err)
|
||||||
t.Fail()
|
|
||||||
}
|
}
|
||||||
if r != nil && r.Rcode != RcodeSuccess {
|
if r != nil && r.Rcode != RcodeSuccess {
|
||||||
t.Log("failed to get an valid answer")
|
t.Errorf("failed to get an valid answer\n%v", r)
|
||||||
t.Fail()
|
|
||||||
t.Logf("%v\n", r)
|
|
||||||
}
|
}
|
||||||
// And now with plain Exchange().
|
// And now with plain Exchange().
|
||||||
r, e = Exchange(m, addrstr)
|
r, err = Exchange(m, addrstr)
|
||||||
if e != nil {
|
if err != nil {
|
||||||
t.Logf("failed to exchange: %s", e.Error())
|
t.Errorf("failed to exchange: %v", err)
|
||||||
t.Fail()
|
|
||||||
}
|
}
|
||||||
if r != nil && r.Rcode != RcodeSuccess {
|
if r != nil && r.Rcode != RcodeSuccess {
|
||||||
t.Log("failed to get an valid answer")
|
t.Errorf("failed to get an valid answer\n%v", r)
|
||||||
t.Fail()
|
|
||||||
t.Logf("%v\n", r)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -48,7 +43,7 @@ func TestClientEDNS0(t *testing.T) {
|
||||||
|
|
||||||
s, addrstr, err := RunLocalUDPServer("127.0.0.1:0")
|
s, addrstr, err := RunLocalUDPServer("127.0.0.1:0")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Unable to run test server: %s", err)
|
t.Fatalf("Unable to run test server: %v", err)
|
||||||
}
|
}
|
||||||
defer s.Shutdown()
|
defer s.Shutdown()
|
||||||
|
|
||||||
|
@ -57,6 +52,58 @@ func TestClientEDNS0(t *testing.T) {
|
||||||
|
|
||||||
m.SetEdns0(2048, true)
|
m.SetEdns0(2048, true)
|
||||||
|
|
||||||
|
c := new(Client)
|
||||||
|
r, _, err := c.Exchange(m, addrstr)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("failed to exchange: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if r != nil && r.Rcode != RcodeSuccess {
|
||||||
|
t.Errorf("failed to get an valid answer\n%v", r)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validates the transmission and parsing of local EDNS0 options.
|
||||||
|
func TestClientEDNS0Local(t *testing.T) {
|
||||||
|
|
||||||
|
optStr1 := "1979:0x0707"
|
||||||
|
optStr2 := strconv.Itoa(EDNS0LOCALSTART) + ":0x0601"
|
||||||
|
|
||||||
|
handler := func(w ResponseWriter, req *Msg) {
|
||||||
|
m := new(Msg)
|
||||||
|
m.SetReply(req)
|
||||||
|
|
||||||
|
m.Extra = make([]RR, 1, 2)
|
||||||
|
m.Extra[0] = &TXT{Hdr: RR_Header{Name: m.Question[0].Name, Rrtype: TypeTXT, Class: ClassINET, Ttl: 0}, Txt: []string{"Hello local edns"}}
|
||||||
|
|
||||||
|
// If the local options are what we expect, then reflect them back.
|
||||||
|
ec1 := req.Extra[0].(*OPT).Option[0].(*EDNS0_LOCAL).String()
|
||||||
|
ec2 := req.Extra[0].(*OPT).Option[1].(*EDNS0_LOCAL).String()
|
||||||
|
if ec1 == optStr1 && ec2 == optStr2 {
|
||||||
|
m.Extra = append(m.Extra, req.Extra[0])
|
||||||
|
}
|
||||||
|
|
||||||
|
w.WriteMsg(m)
|
||||||
|
}
|
||||||
|
|
||||||
|
HandleFunc("miek.nl.", handler)
|
||||||
|
defer HandleRemove("miek.nl.")
|
||||||
|
|
||||||
|
s, addrstr, err := RunLocalUDPServer("127.0.0.1:0")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Unable to run test server: %s", err)
|
||||||
|
}
|
||||||
|
defer s.Shutdown()
|
||||||
|
|
||||||
|
m := new(Msg)
|
||||||
|
m.SetQuestion("miek.nl.", TypeTXT)
|
||||||
|
|
||||||
|
// Add two local edns options to the query.
|
||||||
|
ec1 := &EDNS0_LOCAL{Code: 1979, Data: []byte{7, 7}}
|
||||||
|
ec2 := &EDNS0_LOCAL{Code: EDNS0LOCALSTART, Data: []byte{6, 1}}
|
||||||
|
o := &OPT{Hdr: RR_Header{Name: ".", Rrtype: TypeOPT}, Option: []EDNS0{ec1, ec2}}
|
||||||
|
m.Extra = append(m.Extra, o)
|
||||||
|
|
||||||
c := new(Client)
|
c := new(Client)
|
||||||
r, _, e := c.Exchange(m, addrstr)
|
r, _, e := c.Exchange(m, addrstr)
|
||||||
if e != nil {
|
if e != nil {
|
||||||
|
@ -65,7 +112,28 @@ func TestClientEDNS0(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
if r != nil && r.Rcode != RcodeSuccess {
|
if r != nil && r.Rcode != RcodeSuccess {
|
||||||
t.Log("failed to get an valid answer")
|
t.Log("failed to get a valid answer")
|
||||||
|
t.Fail()
|
||||||
|
t.Logf("%v\n", r)
|
||||||
|
}
|
||||||
|
|
||||||
|
txt := r.Extra[0].(*TXT).Txt[0]
|
||||||
|
if txt != "Hello local edns" {
|
||||||
|
t.Log("Unexpected result for miek.nl", txt, "!= Hello local edns")
|
||||||
|
t.Fail()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validate the local options in the reply.
|
||||||
|
got := r.Extra[1].(*OPT).Option[0].(*EDNS0_LOCAL).String()
|
||||||
|
if got != optStr1 {
|
||||||
|
t.Log("failed to get local edns0 answer; got %s, expected %s", got, optStr1)
|
||||||
|
t.Fail()
|
||||||
|
t.Logf("%v\n", r)
|
||||||
|
}
|
||||||
|
|
||||||
|
got = r.Extra[1].(*OPT).Option[1].(*EDNS0_LOCAL).String()
|
||||||
|
if got != optStr2 {
|
||||||
|
t.Log("failed to get local edns0 answer; got %s, expected %s", got, optStr2)
|
||||||
t.Fail()
|
t.Fail()
|
||||||
t.Logf("%v\n", r)
|
t.Logf("%v\n", r)
|
||||||
}
|
}
|
||||||
|
@ -77,7 +145,7 @@ func TestSingleSingleInflight(t *testing.T) {
|
||||||
|
|
||||||
s, addrstr, err := RunLocalUDPServer("127.0.0.1:0")
|
s, addrstr, err := RunLocalUDPServer("127.0.0.1:0")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Unable to run test server: %s", err)
|
t.Fatalf("Unable to run test server: %v", err)
|
||||||
}
|
}
|
||||||
defer s.Shutdown()
|
defer s.Shutdown()
|
||||||
|
|
||||||
|
@ -106,8 +174,7 @@ Loop:
|
||||||
first = rtt
|
first = rtt
|
||||||
} else {
|
} else {
|
||||||
if first != rtt {
|
if first != rtt {
|
||||||
t.Log("all rtts should be equal")
|
t.Errorf("all rtts should be equal. got %d want %d", rtt, first)
|
||||||
t.Fail()
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
i++
|
i++
|
||||||
|
@ -127,14 +194,14 @@ func ExampleUpdateLeaseTSIG(t *testing.T) {
|
||||||
rrs[0] = rr
|
rrs[0] = rr
|
||||||
m.Insert(rrs)
|
m.Insert(rrs)
|
||||||
|
|
||||||
lease_rr := new(OPT)
|
leaseRr := new(OPT)
|
||||||
lease_rr.Hdr.Name = "."
|
leaseRr.Hdr.Name = "."
|
||||||
lease_rr.Hdr.Rrtype = TypeOPT
|
leaseRr.Hdr.Rrtype = TypeOPT
|
||||||
e := new(EDNS0_UL)
|
e := new(EDNS0_UL)
|
||||||
e.Code = EDNS0UL
|
e.Code = EDNS0UL
|
||||||
e.Lease = 120
|
e.Lease = 120
|
||||||
lease_rr.Option = append(lease_rr.Option, e)
|
leaseRr.Option = append(leaseRr.Option, e)
|
||||||
m.Extra = append(m.Extra, lease_rr)
|
m.Extra = append(m.Extra, leaseRr)
|
||||||
|
|
||||||
c := new(Client)
|
c := new(Client)
|
||||||
m.SetTsig("polvi.", HmacMD5, 300, time.Now().Unix())
|
m.SetTsig("polvi.", HmacMD5, 300, time.Now().Unix())
|
||||||
|
@ -142,7 +209,6 @@ func ExampleUpdateLeaseTSIG(t *testing.T) {
|
||||||
|
|
||||||
_, _, err := c.Exchange(m, "127.0.0.1:53")
|
_, _, err := c.Exchange(m, "127.0.0.1:53")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Log(err.Error())
|
t.Error(err)
|
||||||
t.Fail()
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
2
Godeps/_workspace/src/github.com/miekg/dns/clientconfig.go
generated
vendored
2
Godeps/_workspace/src/github.com/miekg/dns/clientconfig.go
generated
vendored
|
@ -7,7 +7,7 @@ import (
|
||||||
"strings"
|
"strings"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Wraps the contents of the /etc/resolv.conf.
|
// ClientConfig wraps the contents of the /etc/resolv.conf file.
|
||||||
type ClientConfig struct {
|
type ClientConfig struct {
|
||||||
Servers []string // servers to use
|
Servers []string // servers to use
|
||||||
Search []string // suffixes to append to local name
|
Search []string // suffixes to append to local name
|
||||||
|
|
2
Godeps/_workspace/src/github.com/miekg/dns/clientconfig_test.go
generated
vendored
2
Godeps/_workspace/src/github.com/miekg/dns/clientconfig_test.go
generated
vendored
|
@ -32,7 +32,7 @@ func testConfig(t *testing.T, data string) {
|
||||||
}
|
}
|
||||||
cc, err := ClientConfigFromFile(path)
|
cc, err := ClientConfigFromFile(path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("error parsing resolv.conf: %s", err)
|
t.Errorf("error parsing resolv.conf: %v", err)
|
||||||
}
|
}
|
||||||
if l := len(cc.Servers); l != 2 {
|
if l := len(cc.Servers); l != 2 {
|
||||||
t.Errorf("incorrect number of nameservers detected: %d", l)
|
t.Errorf("incorrect number of nameservers detected: %d", l)
|
||||||
|
|
10
Godeps/_workspace/src/github.com/miekg/dns/defaults.go
generated
vendored
10
Godeps/_workspace/src/github.com/miekg/dns/defaults.go
generated
vendored
|
@ -24,7 +24,9 @@ func (dns *Msg) SetReply(request *Msg) *Msg {
|
||||||
return dns
|
return dns
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetQuestion creates a question message.
|
// SetQuestion creates a question message, it sets the Question
|
||||||
|
// section, generates an Id and sets the RecursionDesired (RD)
|
||||||
|
// bit to true.
|
||||||
func (dns *Msg) SetQuestion(z string, t uint16) *Msg {
|
func (dns *Msg) SetQuestion(z string, t uint16) *Msg {
|
||||||
dns.Id = Id()
|
dns.Id = Id()
|
||||||
dns.RecursionDesired = true
|
dns.RecursionDesired = true
|
||||||
|
@ -33,7 +35,9 @@ func (dns *Msg) SetQuestion(z string, t uint16) *Msg {
|
||||||
return dns
|
return dns
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetNotify creates a notify message.
|
// SetNotify creates a notify message, it sets the Question
|
||||||
|
// section, generates an Id and sets the Authoritative (AA)
|
||||||
|
// bit to true.
|
||||||
func (dns *Msg) SetNotify(z string) *Msg {
|
func (dns *Msg) SetNotify(z string) *Msg {
|
||||||
dns.Opcode = OpcodeNotify
|
dns.Opcode = OpcodeNotify
|
||||||
dns.Authoritative = true
|
dns.Authoritative = true
|
||||||
|
@ -184,7 +188,7 @@ func IsFqdn(s string) bool {
|
||||||
return s[l-1] == '.'
|
return s[l-1] == '.'
|
||||||
}
|
}
|
||||||
|
|
||||||
// Fqdns return the fully qualified domain name from s.
|
// Fqdn return the fully qualified domain name from s.
|
||||||
// If s is already fully qualified, it behaves as the identity function.
|
// If s is already fully qualified, it behaves as the identity function.
|
||||||
func Fqdn(s string) string {
|
func Fqdn(s string) string {
|
||||||
if IsFqdn(s) {
|
if IsFqdn(s) {
|
||||||
|
|
109
Godeps/_workspace/src/github.com/miekg/dns/dns.go
generated
vendored
109
Godeps/_workspace/src/github.com/miekg/dns/dns.go
generated
vendored
|
@ -1,106 +1,16 @@
|
||||||
// Package dns implements a full featured interface to the Domain Name System.
|
|
||||||
// Server- and client-side programming is supported.
|
|
||||||
// The package allows complete control over what is send out to the DNS. The package
|
|
||||||
// API follows the less-is-more principle, by presenting a small, clean interface.
|
|
||||||
//
|
|
||||||
// The package dns supports (asynchronous) querying/replying, incoming/outgoing zone transfers,
|
|
||||||
// TSIG, EDNS0, dynamic updates, notifies and DNSSEC validation/signing.
|
|
||||||
// Note that domain names MUST be fully qualified, before sending them, unqualified
|
|
||||||
// names in a message will result in a packing failure.
|
|
||||||
//
|
|
||||||
// Resource records are native types. They are not stored in wire format.
|
|
||||||
// Basic usage pattern for creating a new resource record:
|
|
||||||
//
|
|
||||||
// r := new(dns.MX)
|
|
||||||
// r.Hdr = dns.RR_Header{Name: "miek.nl.", Rrtype: dns.TypeMX, Class: dns.ClassINET, Ttl: 3600}
|
|
||||||
// r.Preference = 10
|
|
||||||
// r.Mx = "mx.miek.nl."
|
|
||||||
//
|
|
||||||
// Or directly from a string:
|
|
||||||
//
|
|
||||||
// mx, err := dns.NewRR("miek.nl. 3600 IN MX 10 mx.miek.nl.")
|
|
||||||
//
|
|
||||||
// Or when the default TTL (3600) and class (IN) suit you:
|
|
||||||
//
|
|
||||||
// mx, err := dns.NewRR("miek.nl. MX 10 mx.miek.nl.")
|
|
||||||
//
|
|
||||||
// Or even:
|
|
||||||
//
|
|
||||||
// mx, err := dns.NewRR("$ORIGIN nl.\nmiek 1H IN MX 10 mx.miek")
|
|
||||||
//
|
|
||||||
// In the DNS messages are exchanged, these messages contain resource
|
|
||||||
// records (sets). Use pattern for creating a message:
|
|
||||||
//
|
|
||||||
// m := new(dns.Msg)
|
|
||||||
// m.SetQuestion("miek.nl.", dns.TypeMX)
|
|
||||||
//
|
|
||||||
// Or when not certain if the domain name is fully qualified:
|
|
||||||
//
|
|
||||||
// m.SetQuestion(dns.Fqdn("miek.nl"), dns.TypeMX)
|
|
||||||
//
|
|
||||||
// The message m is now a message with the question section set to ask
|
|
||||||
// the MX records for the miek.nl. zone.
|
|
||||||
//
|
|
||||||
// The following is slightly more verbose, but more flexible:
|
|
||||||
//
|
|
||||||
// m1 := new(dns.Msg)
|
|
||||||
// m1.Id = dns.Id()
|
|
||||||
// m1.RecursionDesired = true
|
|
||||||
// m1.Question = make([]dns.Question, 1)
|
|
||||||
// m1.Question[0] = dns.Question{"miek.nl.", dns.TypeMX, dns.ClassINET}
|
|
||||||
//
|
|
||||||
// After creating a message it can be send.
|
|
||||||
// Basic use pattern for synchronous querying the DNS at a
|
|
||||||
// server configured on 127.0.0.1 and port 53:
|
|
||||||
//
|
|
||||||
// c := new(dns.Client)
|
|
||||||
// in, rtt, err := c.Exchange(m1, "127.0.0.1:53")
|
|
||||||
//
|
|
||||||
// Suppressing
|
|
||||||
// multiple outstanding queries (with the same question, type and class) is as easy as setting:
|
|
||||||
//
|
|
||||||
// c.SingleInflight = true
|
|
||||||
//
|
|
||||||
// If these "advanced" features are not needed, a simple UDP query can be send,
|
|
||||||
// with:
|
|
||||||
//
|
|
||||||
// in, err := dns.Exchange(m1, "127.0.0.1:53")
|
|
||||||
//
|
|
||||||
// When this functions returns you will get dns message. A dns message consists
|
|
||||||
// out of four sections.
|
|
||||||
// The question section: in.Question, the answer section: in.Answer,
|
|
||||||
// the authority section: in.Ns and the additional section: in.Extra.
|
|
||||||
//
|
|
||||||
// Each of these sections (except the Question section) contain a []RR. Basic
|
|
||||||
// use pattern for accessing the rdata of a TXT RR as the first RR in
|
|
||||||
// the Answer section:
|
|
||||||
//
|
|
||||||
// if t, ok := in.Answer[0].(*dns.TXT); ok {
|
|
||||||
// // do something with t.Txt
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// Domain Name and TXT Character String Representations
|
|
||||||
//
|
|
||||||
// Both domain names and TXT character strings are converted to presentation
|
|
||||||
// form both when unpacked and when converted to strings.
|
|
||||||
//
|
|
||||||
// For TXT character strings, tabs, carriage returns and line feeds will be
|
|
||||||
// converted to \t, \r and \n respectively. Back slashes and quotations marks
|
|
||||||
// will be escaped. Bytes below 32 and above 127 will be converted to \DDD
|
|
||||||
// form.
|
|
||||||
//
|
|
||||||
// For domain names, in addition to the above rules brackets, periods,
|
|
||||||
// spaces, semicolons and the at symbol are escaped.
|
|
||||||
package dns
|
package dns
|
||||||
|
|
||||||
import "strconv"
|
import "strconv"
|
||||||
|
|
||||||
const (
|
const (
|
||||||
year68 = 1 << 31 // For RFC1982 (Serial Arithmetic) calculations in 32 bits.
|
year68 = 1 << 31 // For RFC1982 (Serial Arithmetic) calculations in 32 bits.
|
||||||
DefaultMsgSize = 4096 // Standard default for larger than 512 bytes.
|
// DefaultMsgSize is the standard default for messages larger than 512 bytes.
|
||||||
MinMsgSize = 512 // Minimal size of a DNS packet.
|
DefaultMsgSize = 4096
|
||||||
MaxMsgSize = 65536 // Largest possible DNS packet.
|
// MinMsgSize is the minimal size of a DNS packet.
|
||||||
defaultTtl = 3600 // Default TTL.
|
MinMsgSize = 512
|
||||||
|
// MaxMsgSize is the largest possible DNS packet.
|
||||||
|
MaxMsgSize = 65535
|
||||||
|
defaultTtl = 3600 // Default internal TTL.
|
||||||
)
|
)
|
||||||
|
|
||||||
// Error represents a DNS error
|
// Error represents a DNS error
|
||||||
|
@ -137,9 +47,10 @@ type RR_Header struct {
|
||||||
Rdlength uint16 // length of data after header
|
Rdlength uint16 // length of data after header
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Header returns itself. This is here to make RR_Header implement the RR interface.
|
||||||
func (h *RR_Header) Header() *RR_Header { return h }
|
func (h *RR_Header) Header() *RR_Header { return h }
|
||||||
|
|
||||||
// Just to imlement the RR interface
|
// Just to imlement the RR interface.
|
||||||
func (h *RR_Header) copy() RR { return nil }
|
func (h *RR_Header) copy() RR { return nil }
|
||||||
|
|
||||||
func (h *RR_Header) copyHeader() *RR_Header {
|
func (h *RR_Header) copyHeader() *RR_Header {
|
||||||
|
|
133
Godeps/_workspace/src/github.com/miekg/dns/dns_test.go
generated
vendored
133
Godeps/_workspace/src/github.com/miekg/dns/dns_test.go
generated
vendored
|
@ -17,13 +17,11 @@ func TestPackUnpack(t *testing.T) {
|
||||||
out.Answer[0] = key
|
out.Answer[0] = key
|
||||||
msg, err := out.Pack()
|
msg, err := out.Pack()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Log("failed to pack msg with DNSKEY")
|
t.Error("failed to pack msg with DNSKEY")
|
||||||
t.Fail()
|
|
||||||
}
|
}
|
||||||
in := new(Msg)
|
in := new(Msg)
|
||||||
if in.Unpack(msg) != nil {
|
if in.Unpack(msg) != nil {
|
||||||
t.Log("failed to unpack msg with DNSKEY")
|
t.Error("failed to unpack msg with DNSKEY")
|
||||||
t.Fail()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
sig := new(RRSIG)
|
sig := new(RRSIG)
|
||||||
|
@ -35,13 +33,11 @@ func TestPackUnpack(t *testing.T) {
|
||||||
out.Answer[0] = sig
|
out.Answer[0] = sig
|
||||||
msg, err = out.Pack()
|
msg, err = out.Pack()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Log("failed to pack msg with RRSIG")
|
t.Error("failed to pack msg with RRSIG")
|
||||||
t.Fail()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if in.Unpack(msg) != nil {
|
if in.Unpack(msg) != nil {
|
||||||
t.Log("failed to unpack msg with RRSIG")
|
t.Error("failed to unpack msg with RRSIG")
|
||||||
t.Fail()
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -62,8 +58,7 @@ func TestPackUnpack2(t *testing.T) {
|
||||||
m.Answer[0] = rr
|
m.Answer[0] = rr
|
||||||
_, err := m.Pack()
|
_, err := m.Pack()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Log("Packing failed: " + err.Error())
|
t.Error("Packing failed: ", err)
|
||||||
t.Fail()
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -90,16 +85,14 @@ func TestPackUnpack3(t *testing.T) {
|
||||||
m.Answer[0] = rr
|
m.Answer[0] = rr
|
||||||
b, err := m.Pack()
|
b, err := m.Pack()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Log("packing failed: " + err.Error())
|
t.Error("packing failed: ", err)
|
||||||
t.Fail()
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
var unpackMsg Msg
|
var unpackMsg Msg
|
||||||
err = unpackMsg.Unpack(b)
|
err = unpackMsg.Unpack(b)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Log("unpacking failed")
|
t.Error("unpacking failed")
|
||||||
t.Fail()
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -111,10 +104,9 @@ func TestBailiwick(t *testing.T) {
|
||||||
}
|
}
|
||||||
for parent, child := range yes {
|
for parent, child := range yes {
|
||||||
if !IsSubDomain(parent, child) {
|
if !IsSubDomain(parent, child) {
|
||||||
t.Logf("%s should be child of %s\n", child, parent)
|
t.Errorf("%s should be child of %s", child, parent)
|
||||||
t.Logf("comparelabels %d", CompareDomainName(parent, child))
|
t.Errorf("comparelabels %d", CompareDomainName(parent, child))
|
||||||
t.Logf("lenlabels %d %d", CountLabel(parent), CountLabel(child))
|
t.Errorf("lenlabels %d %d", CountLabel(parent), CountLabel(child))
|
||||||
t.Fail()
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
no := map[string]string{
|
no := map[string]string{
|
||||||
|
@ -126,10 +118,9 @@ func TestBailiwick(t *testing.T) {
|
||||||
}
|
}
|
||||||
for parent, child := range no {
|
for parent, child := range no {
|
||||||
if IsSubDomain(parent, child) {
|
if IsSubDomain(parent, child) {
|
||||||
t.Logf("%s should not be child of %s\n", child, parent)
|
t.Errorf("%s should not be child of %s", child, parent)
|
||||||
t.Logf("comparelabels %d", CompareDomainName(parent, child))
|
t.Errorf("comparelabels %d", CompareDomainName(parent, child))
|
||||||
t.Logf("lenlabels %d %d", CountLabel(parent), CountLabel(child))
|
t.Errorf("lenlabels %d %d", CountLabel(parent), CountLabel(child))
|
||||||
t.Fail()
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -142,13 +133,11 @@ func TestPack(t *testing.T) {
|
||||||
for _, r := range rr {
|
for _, r := range rr {
|
||||||
m.Answer[0], err = NewRR(r)
|
m.Answer[0], err = NewRR(r)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Logf("failed to create RR: %s\n", err.Error())
|
t.Errorf("failed to create RR: %v", err)
|
||||||
t.Fail()
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if _, err := m.Pack(); err != nil {
|
if _, err := m.Pack(); err != nil {
|
||||||
t.Logf("packing failed: %s\n", err.Error())
|
t.Errorf("packing failed: %v", err)
|
||||||
t.Fail()
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
x := new(Msg)
|
x := new(Msg)
|
||||||
|
@ -160,20 +149,17 @@ func TestPack(t *testing.T) {
|
||||||
// This crashes due to the fact the a.ntpns.org isn't a FQDN
|
// This crashes due to the fact the a.ntpns.org isn't a FQDN
|
||||||
// How to recover() from a remove panic()?
|
// How to recover() from a remove panic()?
|
||||||
if _, err := x.Pack(); err == nil {
|
if _, err := x.Pack(); err == nil {
|
||||||
t.Log("packing should fail")
|
t.Error("packing should fail")
|
||||||
t.Fail()
|
|
||||||
}
|
}
|
||||||
x.Answer = make([]RR, 1)
|
x.Answer = make([]RR, 1)
|
||||||
x.Answer[0], err = NewRR(rr[0])
|
x.Answer[0], err = NewRR(rr[0])
|
||||||
if _, err := x.Pack(); err == nil {
|
if _, err := x.Pack(); err == nil {
|
||||||
t.Log("packing should fail")
|
t.Error("packing should fail")
|
||||||
t.Fail()
|
|
||||||
}
|
}
|
||||||
x.Question = make([]Question, 1)
|
x.Question = make([]Question, 1)
|
||||||
x.Question[0] = Question{";sd#eddddséâèµââ
â¥âxzztsestxssweewwsssstx@s@Zåµe@cn.pool.ntp.org.", TypeA, ClassINET}
|
x.Question[0] = Question{";sd#eddddséâèµââ
â¥âxzztsestxssweewwsssstx@s@Zåµe@cn.pool.ntp.org.", TypeA, ClassINET}
|
||||||
if _, err := x.Pack(); err == nil {
|
if _, err := x.Pack(); err == nil {
|
||||||
t.Log("packing should fail")
|
t.Error("packing should fail")
|
||||||
t.Fail()
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -186,11 +172,10 @@ func TestPackNAPTR(t *testing.T) {
|
||||||
rr, _ := NewRR(n)
|
rr, _ := NewRR(n)
|
||||||
msg := make([]byte, rr.len())
|
msg := make([]byte, rr.len())
|
||||||
if off, err := PackRR(rr, msg, 0, nil, false); err != nil {
|
if off, err := PackRR(rr, msg, 0, nil, false); err != nil {
|
||||||
t.Logf("packing failed: %s", err.Error())
|
t.Errorf("packing failed: %v", err)
|
||||||
t.Logf("length %d, need more than %d\n", rr.len(), off)
|
t.Errorf("length %d, need more than %d", rr.len(), off)
|
||||||
t.Fail()
|
|
||||||
} else {
|
} else {
|
||||||
t.Logf("buf size needed: %d\n", off)
|
t.Logf("buf size needed: %d", off)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -229,12 +214,10 @@ func TestMsgCompressLength(t *testing.T) {
|
||||||
buf, err := msg.Pack()
|
buf, err := msg.Pack()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Error(err)
|
t.Error(err)
|
||||||
t.Fail()
|
|
||||||
}
|
}
|
||||||
if predicted < len(buf) {
|
if predicted < len(buf) {
|
||||||
t.Errorf("predicted compressed length is wrong: predicted %s (len=%d) %d, actual %d\n",
|
t.Errorf("predicted compressed length is wrong: predicted %s (len=%d) %d, actual %d",
|
||||||
msg.Question[0].Name, len(msg.Answer), predicted, len(buf))
|
msg.Question[0].Name, len(msg.Answer), predicted, len(buf))
|
||||||
t.Fail()
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -261,12 +244,10 @@ func TestMsgLength(t *testing.T) {
|
||||||
buf, err := msg.Pack()
|
buf, err := msg.Pack()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Error(err)
|
t.Error(err)
|
||||||
t.Fail()
|
|
||||||
}
|
}
|
||||||
if predicted < len(buf) {
|
if predicted < len(buf) {
|
||||||
t.Errorf("predicted length is wrong: predicted %s (len=%d), actual %d\n",
|
t.Errorf("predicted length is wrong: predicted %s (len=%d), actual %d",
|
||||||
msg.Question[0].Name, predicted, len(buf))
|
msg.Question[0].Name, predicted, len(buf))
|
||||||
t.Fail()
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -400,10 +381,10 @@ func BenchmarkMsgUnpack(b *testing.B) {
|
||||||
name1 := "12345678901234567890123456789012345.12345678.123."
|
name1 := "12345678901234567890123456789012345.12345678.123."
|
||||||
rrMx, _ := NewRR(name1 + " 3600 IN MX 10 " + name1)
|
rrMx, _ := NewRR(name1 + " 3600 IN MX 10 " + name1)
|
||||||
msg := makeMsg(name1, []RR{rrMx, rrMx}, nil, nil)
|
msg := makeMsg(name1, []RR{rrMx, rrMx}, nil, nil)
|
||||||
msg_buf, _ := msg.Pack()
|
msgBuf, _ := msg.Pack()
|
||||||
b.ResetTimer()
|
b.ResetTimer()
|
||||||
for i := 0; i < b.N; i++ {
|
for i := 0; i < b.N; i++ {
|
||||||
_ = msg.Unpack(msg_buf)
|
_ = msg.Unpack(msgBuf)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -441,7 +422,7 @@ func TestToRFC3597(t *testing.T) {
|
||||||
x := new(RFC3597)
|
x := new(RFC3597)
|
||||||
x.ToRFC3597(a)
|
x.ToRFC3597(a)
|
||||||
if x.String() != `miek.nl. 3600 CLASS1 TYPE1 \# 4 0a000101` {
|
if x.String() != `miek.nl. 3600 CLASS1 TYPE1 \# 4 0a000101` {
|
||||||
t.Fail()
|
t.Error("string mismatch")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -453,10 +434,9 @@ func TestNoRdataPack(t *testing.T) {
|
||||||
}
|
}
|
||||||
r := fn()
|
r := fn()
|
||||||
*r.Header() = RR_Header{Name: "miek.nl.", Rrtype: typ, Class: ClassINET, Ttl: 3600}
|
*r.Header() = RR_Header{Name: "miek.nl.", Rrtype: typ, Class: ClassINET, Ttl: 3600}
|
||||||
_, e := PackRR(r, data, 0, nil, false)
|
_, err := PackRR(r, data, 0, nil, false)
|
||||||
if e != nil {
|
if err != nil {
|
||||||
t.Logf("failed to pack RR with zero rdata: %s: %s\n", TypeToString[typ], e.Error())
|
t.Errorf("failed to pack RR with zero rdata: %s: %v", TypeToString[typ], err)
|
||||||
t.Fail()
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -472,17 +452,17 @@ func TestNoRdataUnpack(t *testing.T) {
|
||||||
}
|
}
|
||||||
r := fn()
|
r := fn()
|
||||||
*r.Header() = RR_Header{Name: "miek.nl.", Rrtype: typ, Class: ClassINET, Ttl: 3600}
|
*r.Header() = RR_Header{Name: "miek.nl.", Rrtype: typ, Class: ClassINET, Ttl: 3600}
|
||||||
off, e := PackRR(r, data, 0, nil, false)
|
off, err := PackRR(r, data, 0, nil, false)
|
||||||
if e != nil {
|
if err != nil {
|
||||||
// Should always works, TestNoDataPack should have catched this
|
// Should always works, TestNoDataPack should have caught this
|
||||||
|
t.Errorf("failed to pack RR: %v", err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
rr, _, e := UnpackRR(data[:off], 0)
|
rr, _, err := UnpackRR(data[:off], 0)
|
||||||
if e != nil {
|
if err != nil {
|
||||||
t.Logf("failed to unpack RR with zero rdata: %s: %s\n", TypeToString[typ], e.Error())
|
t.Errorf("failed to unpack RR with zero rdata: %s: %v", TypeToString[typ], err)
|
||||||
t.Fail()
|
|
||||||
}
|
}
|
||||||
t.Logf("%s\n", rr)
|
t.Log(rr)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -563,18 +543,39 @@ func TestPackIPSECKEY(t *testing.T) {
|
||||||
buf := make([]byte, 1024)
|
buf := make([]byte, 1024)
|
||||||
for _, t1 := range tests {
|
for _, t1 := range tests {
|
||||||
rr, _ := NewRR(t1)
|
rr, _ := NewRR(t1)
|
||||||
off, e := PackRR(rr, buf, 0, nil, false)
|
off, err := PackRR(rr, buf, 0, nil, false)
|
||||||
if e != nil {
|
if err != nil {
|
||||||
t.Logf("failed to pack IPSECKEY %s: %s\n", e, t1)
|
t.Errorf("failed to pack IPSECKEY %v: %s", err, t1)
|
||||||
t.Fail()
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
rr, _, e = UnpackRR(buf[:off], 0)
|
rr, _, err = UnpackRR(buf[:off], 0)
|
||||||
if e != nil {
|
if err != nil {
|
||||||
t.Logf("failed to unpack IPSECKEY %s: %s\n", e, t1)
|
t.Errorf("failed to unpack IPSECKEY %v: %s", err, t1)
|
||||||
t.Fail()
|
|
||||||
}
|
}
|
||||||
t.Logf("%s\n", rr)
|
t.Log(rr)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestMsgPackBuffer(t *testing.T) {
|
||||||
|
var testMessages = []string{
|
||||||
|
// news.ycombinator.com.in.escapemg.com. IN A, response
|
||||||
|
"586285830001000000010000046e6577730b79636f6d62696e61746f7203636f6d02696e086573636170656d6703636f6d0000010001c0210006000100000e10002c036e7332c02103646e730b67726f6f7665736861726bc02d77ed50e600002a3000000e1000093a8000000e10",
|
||||||
|
|
||||||
|
// news.ycombinator.com.in.escapemg.com. IN A, question
|
||||||
|
"586201000001000000000000046e6577730b79636f6d62696e61746f7203636f6d02696e086573636170656d6703636f6d0000010001",
|
||||||
|
|
||||||
|
"398781020001000000000000046e6577730b79636f6d62696e61746f7203636f6d0000010001",
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, hexData := range testMessages {
|
||||||
|
// we won't fail the decoding of the hex
|
||||||
|
input, _ := hex.DecodeString(hexData)
|
||||||
|
m := new(Msg)
|
||||||
|
if err := m.Unpack(input); err != nil {
|
||||||
|
t.Errorf("packet %d failed to unpack", i)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
t.Logf("packet %d %s", i, m.String())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
22
Godeps/_workspace/src/github.com/miekg/dns/dnssec.go
generated
vendored
22
Godeps/_workspace/src/github.com/miekg/dns/dnssec.go
generated
vendored
|
@ -1,16 +1,3 @@
|
||||||
// DNSSEC
|
|
||||||
//
|
|
||||||
// DNSSEC (DNS Security Extension) adds a layer of security to the DNS. It
|
|
||||||
// uses public key cryptography to sign resource records. The
|
|
||||||
// public keys are stored in DNSKEY records and the signatures in RRSIG records.
|
|
||||||
//
|
|
||||||
// Requesting DNSSEC information for a zone is done by adding the DO (DNSSEC OK) bit
|
|
||||||
// to an request.
|
|
||||||
//
|
|
||||||
// m := new(dns.Msg)
|
|
||||||
// m.SetEdns0(4096, true)
|
|
||||||
//
|
|
||||||
// Signature generation, signature verification and key generation are all supported.
|
|
||||||
package dns
|
package dns
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
@ -422,8 +409,8 @@ func (rr *RRSIG) ValidityPeriod(t time.Time) bool {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Return the signatures base64 encodedig sigdata as a byte slice.
|
// Return the signatures base64 encodedig sigdata as a byte slice.
|
||||||
func (s *RRSIG) sigBuf() []byte {
|
func (rr *RRSIG) sigBuf() []byte {
|
||||||
sigbuf, err := fromBase64([]byte(s.Signature))
|
sigbuf, err := fromBase64([]byte(rr.Signature))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -588,7 +575,10 @@ func rawSignatureData(rrset []RR, s *RRSIG) (buf []byte, err error) {
|
||||||
wires[i] = wire
|
wires[i] = wire
|
||||||
}
|
}
|
||||||
sort.Sort(wires)
|
sort.Sort(wires)
|
||||||
for _, wire := range wires {
|
for i, wire := range wires {
|
||||||
|
if i > 0 && bytes.Equal(wire, wires[i-1]) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
buf = append(buf, wire...)
|
buf = append(buf, wire...)
|
||||||
}
|
}
|
||||||
return buf, nil
|
return buf, nil
|
||||||
|
|
14
Godeps/_workspace/src/github.com/miekg/dns/dnssec_keygen.go
generated
vendored
14
Godeps/_workspace/src/github.com/miekg/dns/dnssec_keygen.go
generated
vendored
|
@ -15,8 +15,8 @@ import (
|
||||||
// what kind of DNSKEY will be generated.
|
// what kind of DNSKEY will be generated.
|
||||||
// The ECDSA algorithms imply a fixed keysize, in that case
|
// The ECDSA algorithms imply a fixed keysize, in that case
|
||||||
// bits should be set to the size of the algorithm.
|
// bits should be set to the size of the algorithm.
|
||||||
func (r *DNSKEY) Generate(bits int) (PrivateKey, error) {
|
func (k *DNSKEY) Generate(bits int) (PrivateKey, error) {
|
||||||
switch r.Algorithm {
|
switch k.Algorithm {
|
||||||
case DSA, DSANSEC3SHA1:
|
case DSA, DSANSEC3SHA1:
|
||||||
if bits != 1024 {
|
if bits != 1024 {
|
||||||
return nil, ErrKeySize
|
return nil, ErrKeySize
|
||||||
|
@ -39,7 +39,7 @@ func (r *DNSKEY) Generate(bits int) (PrivateKey, error) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
switch r.Algorithm {
|
switch k.Algorithm {
|
||||||
case DSA, DSANSEC3SHA1:
|
case DSA, DSANSEC3SHA1:
|
||||||
params := new(dsa.Parameters)
|
params := new(dsa.Parameters)
|
||||||
if err := dsa.GenerateParameters(params, rand.Reader, dsa.L1024N160); err != nil {
|
if err := dsa.GenerateParameters(params, rand.Reader, dsa.L1024N160); err != nil {
|
||||||
|
@ -51,18 +51,18 @@ func (r *DNSKEY) Generate(bits int) (PrivateKey, error) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
r.setPublicKeyDSA(params.Q, params.P, params.G, priv.PublicKey.Y)
|
k.setPublicKeyDSA(params.Q, params.P, params.G, priv.PublicKey.Y)
|
||||||
return (*DSAPrivateKey)(priv), nil
|
return (*DSAPrivateKey)(priv), nil
|
||||||
case RSAMD5, RSASHA1, RSASHA256, RSASHA512, RSASHA1NSEC3SHA1:
|
case RSAMD5, RSASHA1, RSASHA256, RSASHA512, RSASHA1NSEC3SHA1:
|
||||||
priv, err := rsa.GenerateKey(rand.Reader, bits)
|
priv, err := rsa.GenerateKey(rand.Reader, bits)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
r.setPublicKeyRSA(priv.PublicKey.E, priv.PublicKey.N)
|
k.setPublicKeyRSA(priv.PublicKey.E, priv.PublicKey.N)
|
||||||
return (*RSAPrivateKey)(priv), nil
|
return (*RSAPrivateKey)(priv), nil
|
||||||
case ECDSAP256SHA256, ECDSAP384SHA384:
|
case ECDSAP256SHA256, ECDSAP384SHA384:
|
||||||
var c elliptic.Curve
|
var c elliptic.Curve
|
||||||
switch r.Algorithm {
|
switch k.Algorithm {
|
||||||
case ECDSAP256SHA256:
|
case ECDSAP256SHA256:
|
||||||
c = elliptic.P256()
|
c = elliptic.P256()
|
||||||
case ECDSAP384SHA384:
|
case ECDSAP384SHA384:
|
||||||
|
@ -72,7 +72,7 @@ func (r *DNSKEY) Generate(bits int) (PrivateKey, error) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
r.setPublicKeyECDSA(priv.PublicKey.X, priv.PublicKey.Y)
|
k.setPublicKeyECDSA(priv.PublicKey.X, priv.PublicKey.Y)
|
||||||
return (*ECDSAPrivateKey)(priv), nil
|
return (*ECDSAPrivateKey)(priv), nil
|
||||||
default:
|
default:
|
||||||
return nil, ErrAlg
|
return nil, ErrAlg
|
||||||
|
|
14
Godeps/_workspace/src/github.com/miekg/dns/dnssec_keyscan.go
generated
vendored
14
Godeps/_workspace/src/github.com/miekg/dns/dnssec_keyscan.go
generated
vendored
|
@ -9,6 +9,8 @@ import (
|
||||||
"strings"
|
"strings"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// NewPrivateKey returns a PrivateKey by parsing the string s.
|
||||||
|
// s should be in the same form of the BIND private key files.
|
||||||
func (k *DNSKEY) NewPrivateKey(s string) (PrivateKey, error) {
|
func (k *DNSKEY) NewPrivateKey(s string) (PrivateKey, error) {
|
||||||
if s[len(s)-1] != '\n' { // We need a closing newline
|
if s[len(s)-1] != '\n' { // We need a closing newline
|
||||||
return k.ReadPrivateKey(strings.NewReader(s+"\n"), "")
|
return k.ReadPrivateKey(strings.NewReader(s+"\n"), "")
|
||||||
|
@ -170,9 +172,9 @@ func parseKey(r io.Reader, file string) (map[string]string, error) {
|
||||||
for l := range c {
|
for l := range c {
|
||||||
// It should alternate
|
// It should alternate
|
||||||
switch l.value {
|
switch l.value {
|
||||||
case _KEY:
|
case zKey:
|
||||||
k = l.token
|
k = l.token
|
||||||
case _VALUE:
|
case zValue:
|
||||||
if k == "" {
|
if k == "" {
|
||||||
return nil, &ParseError{file, "no private key seen", l}
|
return nil, &ParseError{file, "no private key seen", l}
|
||||||
}
|
}
|
||||||
|
@ -202,14 +204,14 @@ func klexer(s *scan, c chan lex) {
|
||||||
}
|
}
|
||||||
l.token = str
|
l.token = str
|
||||||
if key {
|
if key {
|
||||||
l.value = _KEY
|
l.value = zKey
|
||||||
c <- l
|
c <- l
|
||||||
// Next token is a space, eat it
|
// Next token is a space, eat it
|
||||||
s.tokenText()
|
s.tokenText()
|
||||||
key = false
|
key = false
|
||||||
str = ""
|
str = ""
|
||||||
} else {
|
} else {
|
||||||
l.value = _VALUE
|
l.value = zValue
|
||||||
}
|
}
|
||||||
case ';':
|
case ';':
|
||||||
commt = true
|
commt = true
|
||||||
|
@ -218,7 +220,7 @@ func klexer(s *scan, c chan lex) {
|
||||||
// Reset a comment
|
// Reset a comment
|
||||||
commt = false
|
commt = false
|
||||||
}
|
}
|
||||||
l.value = _VALUE
|
l.value = zValue
|
||||||
l.token = str
|
l.token = str
|
||||||
c <- l
|
c <- l
|
||||||
str = ""
|
str = ""
|
||||||
|
@ -235,7 +237,7 @@ func klexer(s *scan, c chan lex) {
|
||||||
if len(str) > 0 {
|
if len(str) > 0 {
|
||||||
// Send remainder
|
// Send remainder
|
||||||
l.token = str
|
l.token = str
|
||||||
l.value = _VALUE
|
l.value = zValue
|
||||||
c <- l
|
c <- l
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
17
Godeps/_workspace/src/github.com/miekg/dns/dnssec_privkey.go
generated
vendored
17
Godeps/_workspace/src/github.com/miekg/dns/dnssec_privkey.go
generated
vendored
|
@ -10,8 +10,9 @@ import (
|
||||||
"strconv"
|
"strconv"
|
||||||
)
|
)
|
||||||
|
|
||||||
const _FORMAT = "Private-key-format: v1.3\n"
|
const format = "Private-key-format: v1.3\n"
|
||||||
|
|
||||||
|
// PrivateKey ... TODO(miek)
|
||||||
type PrivateKey interface {
|
type PrivateKey interface {
|
||||||
Sign([]byte, uint8) ([]byte, error)
|
Sign([]byte, uint8) ([]byte, error)
|
||||||
String(uint8) string
|
String(uint8) string
|
||||||
|
@ -53,17 +54,17 @@ func (p *RSAPrivateKey) String(alg uint8) string {
|
||||||
// Calculate Exponent1/2 and Coefficient as per: http://en.wikipedia.org/wiki/RSA#Using_the_Chinese_remainder_algorithm
|
// Calculate Exponent1/2 and Coefficient as per: http://en.wikipedia.org/wiki/RSA#Using_the_Chinese_remainder_algorithm
|
||||||
// and from: http://code.google.com/p/go/issues/detail?id=987
|
// and from: http://code.google.com/p/go/issues/detail?id=987
|
||||||
one := big.NewInt(1)
|
one := big.NewInt(1)
|
||||||
p_1 := big.NewInt(0).Sub(p.Primes[0], one)
|
p1 := big.NewInt(0).Sub(p.Primes[0], one)
|
||||||
q_1 := big.NewInt(0).Sub(p.Primes[1], one)
|
q1 := big.NewInt(0).Sub(p.Primes[1], one)
|
||||||
exp1 := big.NewInt(0).Mod(p.D, p_1)
|
exp1 := big.NewInt(0).Mod(p.D, p1)
|
||||||
exp2 := big.NewInt(0).Mod(p.D, q_1)
|
exp2 := big.NewInt(0).Mod(p.D, q1)
|
||||||
coeff := big.NewInt(0).ModInverse(p.Primes[1], p.Primes[0])
|
coeff := big.NewInt(0).ModInverse(p.Primes[1], p.Primes[0])
|
||||||
|
|
||||||
exponent1 := toBase64(exp1.Bytes())
|
exponent1 := toBase64(exp1.Bytes())
|
||||||
exponent2 := toBase64(exp2.Bytes())
|
exponent2 := toBase64(exp2.Bytes())
|
||||||
coefficient := toBase64(coeff.Bytes())
|
coefficient := toBase64(coeff.Bytes())
|
||||||
|
|
||||||
return _FORMAT +
|
return format +
|
||||||
"Algorithm: " + algorithm + "\n" +
|
"Algorithm: " + algorithm + "\n" +
|
||||||
"Modulus: " + modulus + "\n" +
|
"Modulus: " + modulus + "\n" +
|
||||||
"PublicExponent: " + publicExponent + "\n" +
|
"PublicExponent: " + publicExponent + "\n" +
|
||||||
|
@ -106,7 +107,7 @@ func (p *ECDSAPrivateKey) String(alg uint8) string {
|
||||||
intlen = 48
|
intlen = 48
|
||||||
}
|
}
|
||||||
private := toBase64(intToBytes(p.D, intlen))
|
private := toBase64(intToBytes(p.D, intlen))
|
||||||
return _FORMAT +
|
return format +
|
||||||
"Algorithm: " + algorithm + "\n" +
|
"Algorithm: " + algorithm + "\n" +
|
||||||
"PrivateKey: " + private + "\n"
|
"PrivateKey: " + private + "\n"
|
||||||
}
|
}
|
||||||
|
@ -133,7 +134,7 @@ func (p *DSAPrivateKey) String(alg uint8) string {
|
||||||
base := toBase64(intToBytes(p.PublicKey.Parameters.G, 64+T*8))
|
base := toBase64(intToBytes(p.PublicKey.Parameters.G, 64+T*8))
|
||||||
priv := toBase64(intToBytes(p.X, 20))
|
priv := toBase64(intToBytes(p.X, 20))
|
||||||
pub := toBase64(intToBytes(p.PublicKey.Y, 64+T*8))
|
pub := toBase64(intToBytes(p.PublicKey.Y, 64+T*8))
|
||||||
return _FORMAT +
|
return format +
|
||||||
"Algorithm: " + algorithm + "\n" +
|
"Algorithm: " + algorithm + "\n" +
|
||||||
"Prime(p): " + prime + "\n" +
|
"Prime(p): " + prime + "\n" +
|
||||||
"Subprime(q): " + subprime + "\n" +
|
"Subprime(q): " + subprime + "\n" +
|
||||||
|
|
132
Godeps/_workspace/src/github.com/miekg/dns/dnssec_test.go
generated
vendored
132
Godeps/_workspace/src/github.com/miekg/dns/dnssec_test.go
generated
vendored
|
@ -45,8 +45,8 @@ func TestGenerateEC(t *testing.T) {
|
||||||
key.Protocol = 3
|
key.Protocol = 3
|
||||||
key.Algorithm = ECDSAP256SHA256
|
key.Algorithm = ECDSAP256SHA256
|
||||||
privkey, _ := key.Generate(256)
|
privkey, _ := key.Generate(256)
|
||||||
t.Logf("%s\n", key.String())
|
t.Log(key.String())
|
||||||
t.Logf("%s\n", key.PrivateKeyString(privkey))
|
t.Log(key.PrivateKeyString(privkey))
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestGenerateDSA(t *testing.T) {
|
func TestGenerateDSA(t *testing.T) {
|
||||||
|
@ -62,8 +62,8 @@ func TestGenerateDSA(t *testing.T) {
|
||||||
key.Protocol = 3
|
key.Protocol = 3
|
||||||
key.Algorithm = DSA
|
key.Algorithm = DSA
|
||||||
privkey, _ := key.Generate(1024)
|
privkey, _ := key.Generate(1024)
|
||||||
t.Logf("%s\n", key.String())
|
t.Log(key.String())
|
||||||
t.Logf("%s\n", key.PrivateKeyString(privkey))
|
t.Log(key.PrivateKeyString(privkey))
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestGenerateRSA(t *testing.T) {
|
func TestGenerateRSA(t *testing.T) {
|
||||||
|
@ -79,8 +79,8 @@ func TestGenerateRSA(t *testing.T) {
|
||||||
key.Protocol = 3
|
key.Protocol = 3
|
||||||
key.Algorithm = RSASHA256
|
key.Algorithm = RSASHA256
|
||||||
privkey, _ := key.Generate(1024)
|
privkey, _ := key.Generate(1024)
|
||||||
t.Logf("%s\n", key.String())
|
t.Log(key.String())
|
||||||
t.Logf("%s\n", key.PrivateKeyString(privkey))
|
t.Log(key.PrivateKeyString(privkey))
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestSecure(t *testing.T) {
|
func TestSecure(t *testing.T) {
|
||||||
|
@ -107,10 +107,9 @@ func TestSecure(t *testing.T) {
|
||||||
key.Algorithm = RSASHA256
|
key.Algorithm = RSASHA256
|
||||||
key.PublicKey = "AwEAAcNEU67LJI5GEgF9QLNqLO1SMq1EdoQ6E9f85ha0k0ewQGCblyW2836GiVsm6k8Kr5ECIoMJ6fZWf3CQSQ9ycWfTyOHfmI3eQ/1Covhb2y4bAmL/07PhrL7ozWBW3wBfM335Ft9xjtXHPy7ztCbV9qZ4TVDTW/Iyg0PiwgoXVesz"
|
key.PublicKey = "AwEAAcNEU67LJI5GEgF9QLNqLO1SMq1EdoQ6E9f85ha0k0ewQGCblyW2836GiVsm6k8Kr5ECIoMJ6fZWf3CQSQ9ycWfTyOHfmI3eQ/1Covhb2y4bAmL/07PhrL7ozWBW3wBfM335Ft9xjtXHPy7ztCbV9qZ4TVDTW/Iyg0PiwgoXVesz"
|
||||||
|
|
||||||
// It should validate. Period is checked seperately, so this will keep on working
|
// It should validate. Period is checked separately, so this will keep on working
|
||||||
if sig.Verify(key, []RR{soa}) != nil {
|
if sig.Verify(key, []RR{soa}) != nil {
|
||||||
t.Log("failure to validate")
|
t.Error("failure to validate")
|
||||||
t.Fail()
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -131,15 +130,13 @@ func TestSignature(t *testing.T) {
|
||||||
|
|
||||||
// Should not be valid
|
// Should not be valid
|
||||||
if sig.ValidityPeriod(time.Now()) {
|
if sig.ValidityPeriod(time.Now()) {
|
||||||
t.Log("should not be valid")
|
t.Error("should not be valid")
|
||||||
t.Fail()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
sig.Inception = 315565800 //Tue Jan 1 10:10:00 CET 1980
|
sig.Inception = 315565800 //Tue Jan 1 10:10:00 CET 1980
|
||||||
sig.Expiration = 4102477800 //Fri Jan 1 10:10:00 CET 2100
|
sig.Expiration = 4102477800 //Fri Jan 1 10:10:00 CET 2100
|
||||||
if !sig.ValidityPeriod(time.Now()) {
|
if !sig.ValidityPeriod(time.Now()) {
|
||||||
t.Log("should be valid")
|
t.Error("should be valid")
|
||||||
t.Fail()
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -196,16 +193,14 @@ func TestSignVerify(t *testing.T) {
|
||||||
|
|
||||||
for _, r := range []RR{soa, soa1, srv} {
|
for _, r := range []RR{soa, soa1, srv} {
|
||||||
if sig.Sign(privkey, []RR{r}) != nil {
|
if sig.Sign(privkey, []RR{r}) != nil {
|
||||||
t.Log("failure to sign the record")
|
t.Error("failure to sign the record")
|
||||||
t.Fail()
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if sig.Verify(key, []RR{r}) != nil {
|
if sig.Verify(key, []RR{r}) != nil {
|
||||||
t.Log("failure to validate")
|
t.Error("failure to validate")
|
||||||
t.Fail()
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
t.Logf("validated: %s\n", r.Header().Name)
|
t.Logf("validated: %s", r.Header().Name)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -234,16 +229,14 @@ func Test65534(t *testing.T) {
|
||||||
sig.SignerName = key.Hdr.Name
|
sig.SignerName = key.Hdr.Name
|
||||||
sig.Algorithm = RSASHA256
|
sig.Algorithm = RSASHA256
|
||||||
if err := sig.Sign(privkey, []RR{t6}); err != nil {
|
if err := sig.Sign(privkey, []RR{t6}); err != nil {
|
||||||
t.Log(err)
|
t.Error(err)
|
||||||
t.Log("failure to sign the TYPE65534 record")
|
t.Error("failure to sign the TYPE65534 record")
|
||||||
t.Fail()
|
|
||||||
}
|
}
|
||||||
if err := sig.Verify(key, []RR{t6}); err != nil {
|
if err := sig.Verify(key, []RR{t6}); err != nil {
|
||||||
t.Log(err)
|
t.Error(err)
|
||||||
t.Log("failure to validate")
|
t.Error("failure to validate")
|
||||||
t.Fail()
|
|
||||||
} else {
|
} else {
|
||||||
t.Logf("validated: %s\n", t6.Header().Name)
|
t.Logf("validated: %s", t6.Header().Name)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -271,13 +264,11 @@ Coefficient: UuRoNqe7YHnKmQzE6iDWKTMIWTuoqqrFAmXPmKQnC+Y+BQzOVEHUo9bXdDnoI9hzXP1
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
if pubkey.(*DNSKEY).PublicKey != "AwEAAZuMCu2FdugHkTrXYgl5qixvcDw1aDDlvL46/xJKbHBAHY16fNUb2b65cwko2Js/aJxUYJbZk5dwCDZxYfrfbZVtDPQuc3o8QaChVxC7/JYz2AHc9qHvqQ1j4VrH71RWINlQo6VYjzN/BGpMhOZoZOEwzp1HfsOE3lNYcoWU1smL" {
|
if pubkey.(*DNSKEY).PublicKey != "AwEAAZuMCu2FdugHkTrXYgl5qixvcDw1aDDlvL46/xJKbHBAHY16fNUb2b65cwko2Js/aJxUYJbZk5dwCDZxYfrfbZVtDPQuc3o8QaChVxC7/JYz2AHc9qHvqQ1j4VrH71RWINlQo6VYjzN/BGpMhOZoZOEwzp1HfsOE3lNYcoWU1smL" {
|
||||||
t.Log("pubkey is not what we've read")
|
t.Error("pubkey is not what we've read")
|
||||||
t.Fail()
|
|
||||||
}
|
}
|
||||||
if pubkey.(*DNSKEY).PrivateKeyString(privkey) != privStr {
|
if pubkey.(*DNSKEY).PrivateKeyString(privkey) != privStr {
|
||||||
t.Log("privkey is not what we've read")
|
t.Error("privkey is not what we've read")
|
||||||
t.Logf("%v", pubkey.(*DNSKEY).PrivateKeyString(privkey))
|
t.Errorf("%v", pubkey.(*DNSKEY).PrivateKeyString(privkey))
|
||||||
t.Fail()
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -294,8 +285,7 @@ func TestTag(t *testing.T) {
|
||||||
|
|
||||||
tag := key.KeyTag()
|
tag := key.KeyTag()
|
||||||
if tag != 12051 {
|
if tag != 12051 {
|
||||||
t.Logf("wrong key tag: %d for key %v\n", tag, key)
|
t.Errorf("wrong key tag: %d for key %v", tag, key)
|
||||||
t.Fail()
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -335,13 +325,11 @@ func TestKeyRSA(t *testing.T) {
|
||||||
sig.SignerName = key.Hdr.Name
|
sig.SignerName = key.Hdr.Name
|
||||||
|
|
||||||
if err := sig.Sign(priv, []RR{soa}); err != nil {
|
if err := sig.Sign(priv, []RR{soa}); err != nil {
|
||||||
t.Logf("failed to sign")
|
t.Error("failed to sign")
|
||||||
t.Fail()
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if err := sig.Verify(key, []RR{soa}); err != nil {
|
if err := sig.Verify(key, []RR{soa}); err != nil {
|
||||||
t.Logf("failed to verify")
|
t.Error("failed to verify")
|
||||||
t.Fail()
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -358,8 +346,7 @@ func TestKeyToDS(t *testing.T) {
|
||||||
|
|
||||||
ds := key.ToDS(SHA1)
|
ds := key.ToDS(SHA1)
|
||||||
if strings.ToUpper(ds.Digest) != "B5121BDB5B8D86D0CC5FFAFBAAABE26C3E20BAC1" {
|
if strings.ToUpper(ds.Digest) != "B5121BDB5B8D86D0CC5FFAFBAAABE26C3E20BAC1" {
|
||||||
t.Logf("wrong DS digest for SHA1\n%v\n", ds)
|
t.Errorf("wrong DS digest for SHA1\n%v", ds)
|
||||||
t.Fail()
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -384,23 +371,18 @@ Activate: 20110302104537`
|
||||||
k := xk.(*DNSKEY)
|
k := xk.(*DNSKEY)
|
||||||
p, err := k.NewPrivateKey(priv)
|
p, err := k.NewPrivateKey(priv)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Logf("%v\n", err)
|
t.Error(err)
|
||||||
t.Fail()
|
|
||||||
}
|
}
|
||||||
switch priv := p.(type) {
|
switch priv := p.(type) {
|
||||||
case *RSAPrivateKey:
|
case *RSAPrivateKey:
|
||||||
if 65537 != priv.PublicKey.E {
|
if 65537 != priv.PublicKey.E {
|
||||||
t.Log("exponenent should be 65537")
|
t.Error("exponenent should be 65537")
|
||||||
t.Fail()
|
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
t.Logf("we should have read an RSA key: %v", priv)
|
t.Errorf("we should have read an RSA key: %v", priv)
|
||||||
t.Fail()
|
|
||||||
}
|
}
|
||||||
if k.KeyTag() != 37350 {
|
if k.KeyTag() != 37350 {
|
||||||
t.Logf("%d %v\n", k.KeyTag(), k)
|
t.Errorf("keytag should be 37350, got %d %v", k.KeyTag(), k)
|
||||||
t.Log("keytag should be 37350")
|
|
||||||
t.Fail()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
soa := new(SOA)
|
soa := new(SOA)
|
||||||
|
@ -423,9 +405,7 @@ Activate: 20110302104537`
|
||||||
|
|
||||||
sig.Sign(p, []RR{soa})
|
sig.Sign(p, []RR{soa})
|
||||||
if sig.Signature != "D5zsobpQcmMmYsUMLxCVEtgAdCvTu8V/IEeP4EyLBjqPJmjt96bwM9kqihsccofA5LIJ7DN91qkCORjWSTwNhzCv7bMyr2o5vBZElrlpnRzlvsFIoAZCD9xg6ZY7ZyzUJmU6IcTwG4v3xEYajcpbJJiyaw/RqR90MuRdKPiBzSo=" {
|
if sig.Signature != "D5zsobpQcmMmYsUMLxCVEtgAdCvTu8V/IEeP4EyLBjqPJmjt96bwM9kqihsccofA5LIJ7DN91qkCORjWSTwNhzCv7bMyr2o5vBZElrlpnRzlvsFIoAZCD9xg6ZY7ZyzUJmU6IcTwG4v3xEYajcpbJJiyaw/RqR90MuRdKPiBzSo=" {
|
||||||
t.Log("signature is not correct")
|
t.Errorf("signature is not correct: %v", sig)
|
||||||
t.Logf("%v\n", sig)
|
|
||||||
t.Fail()
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -440,13 +420,13 @@ PrivateKey: WURgWHCcYIYUPWgeLmiPY2DJJk02vgrmTfitxgqcL4vwW7BOrbawVmVe0d9V94SR`
|
||||||
|
|
||||||
eckey, err := NewRR(pub)
|
eckey, err := NewRR(pub)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err.Error())
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
privkey, err := eckey.(*DNSKEY).NewPrivateKey(priv)
|
privkey, err := eckey.(*DNSKEY).NewPrivateKey(priv)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err.Error())
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
// TODO: Create seperate test for this
|
// TODO: Create separate test for this
|
||||||
ds := eckey.(*DNSKEY).ToDS(SHA384)
|
ds := eckey.(*DNSKEY).ToDS(SHA384)
|
||||||
if ds.KeyTag != 10771 {
|
if ds.KeyTag != 10771 {
|
||||||
t.Fatal("wrong keytag on DS")
|
t.Fatal("wrong keytag on DS")
|
||||||
|
@ -467,22 +447,21 @@ PrivateKey: WURgWHCcYIYUPWgeLmiPY2DJJk02vgrmTfitxgqcL4vwW7BOrbawVmVe0d9V94SR`
|
||||||
t.Fatal("failure to sign the record")
|
t.Fatal("failure to sign the record")
|
||||||
}
|
}
|
||||||
|
|
||||||
if e := sig.Verify(eckey.(*DNSKEY), []RR{a}); e != nil {
|
if err := sig.Verify(eckey.(*DNSKEY), []RR{a}); err != nil {
|
||||||
t.Logf("\n%s\n%s\n%s\n\n%s\n\n",
|
t.Fatalf("Failure to validate:\n%s\n%s\n%s\n\n%s\n\n%v",
|
||||||
eckey.(*DNSKEY).String(),
|
eckey.(*DNSKEY).String(),
|
||||||
a.String(),
|
a.String(),
|
||||||
sig.String(),
|
sig.String(),
|
||||||
eckey.(*DNSKEY).PrivateKeyString(privkey),
|
eckey.(*DNSKEY).PrivateKeyString(privkey),
|
||||||
|
err,
|
||||||
)
|
)
|
||||||
|
|
||||||
t.Fatalf("failure to validate: %s", e.Error())
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestSignVerifyECDSA2(t *testing.T) {
|
func TestSignVerifyECDSA2(t *testing.T) {
|
||||||
srv1, err := NewRR("srv.miek.nl. IN SRV 1000 800 0 web1.miek.nl.")
|
srv1, err := NewRR("srv.miek.nl. IN SRV 1000 800 0 web1.miek.nl.")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf(err.Error())
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
srv := srv1.(*SRV)
|
srv := srv1.(*SRV)
|
||||||
|
|
||||||
|
@ -518,14 +497,13 @@ func TestSignVerifyECDSA2(t *testing.T) {
|
||||||
|
|
||||||
err = sig.Verify(key, []RR{srv})
|
err = sig.Verify(key, []RR{srv})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Logf("\n%s\n%s\n%s\n\n%s\n\n",
|
t.Logf("Failure to validate:\n%s\n%s\n%s\n\n%s\n\n%v",
|
||||||
key.String(),
|
key.String(),
|
||||||
srv.String(),
|
srv.String(),
|
||||||
sig.String(),
|
sig.String(),
|
||||||
key.PrivateKeyString(privkey),
|
key.PrivateKeyString(privkey),
|
||||||
|
err,
|
||||||
)
|
)
|
||||||
|
|
||||||
t.Fatal("Failure to validate:", err)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -540,11 +518,11 @@ Algorithm: 13 (ECDSAP256SHA256)
|
||||||
PrivateKey: GU6SnQ/Ou+xC5RumuIUIuJZteXT2z0O/ok1s38Et6mQ=`
|
PrivateKey: GU6SnQ/Ou+xC5RumuIUIuJZteXT2z0O/ok1s38Et6mQ=`
|
||||||
rrDNSKEY, err := NewRR(exDNSKEY)
|
rrDNSKEY, err := NewRR(exDNSKEY)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err.Error())
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
priv, err := rrDNSKEY.(*DNSKEY).NewPrivateKey(exPriv)
|
priv, err := rrDNSKEY.(*DNSKEY).NewPrivateKey(exPriv)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err.Error())
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
exDS := `example.net. 3600 IN DS 55648 13 2 (
|
exDS := `example.net. 3600 IN DS 55648 13 2 (
|
||||||
|
@ -552,11 +530,11 @@ PrivateKey: GU6SnQ/Ou+xC5RumuIUIuJZteXT2z0O/ok1s38Et6mQ=`
|
||||||
e2770ce6d6e37df61d17 )`
|
e2770ce6d6e37df61d17 )`
|
||||||
rrDS, err := NewRR(exDS)
|
rrDS, err := NewRR(exDS)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err.Error())
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
ourDS := rrDNSKEY.(*DNSKEY).ToDS(SHA256)
|
ourDS := rrDNSKEY.(*DNSKEY).ToDS(SHA256)
|
||||||
if !reflect.DeepEqual(ourDS, rrDS.(*DS)) {
|
if !reflect.DeepEqual(ourDS, rrDS.(*DS)) {
|
||||||
t.Errorf("DS record differs:\n%v\n%v\n", ourDS, rrDS.(*DS))
|
t.Errorf("DS record differs:\n%v\n%v", ourDS, rrDS.(*DS))
|
||||||
}
|
}
|
||||||
|
|
||||||
exA := `www.example.net. 3600 IN A 192.0.2.1`
|
exA := `www.example.net. 3600 IN A 192.0.2.1`
|
||||||
|
@ -566,11 +544,11 @@ PrivateKey: GU6SnQ/Ou+xC5RumuIUIuJZteXT2z0O/ok1s38Et6mQ=`
|
||||||
yGmt+3SNruPFKG7tZoLBLlUzGGus7ZwmwWep666VCw== )`
|
yGmt+3SNruPFKG7tZoLBLlUzGGus7ZwmwWep666VCw== )`
|
||||||
rrA, err := NewRR(exA)
|
rrA, err := NewRR(exA)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err.Error())
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
rrRRSIG, err := NewRR(exRRSIG)
|
rrRRSIG, err := NewRR(exRRSIG)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err.Error())
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
if err = rrRRSIG.(*RRSIG).Verify(rrDNSKEY.(*DNSKEY), []RR{rrA}); err != nil {
|
if err = rrRRSIG.(*RRSIG).Verify(rrDNSKEY.(*DNSKEY), []RR{rrA}); err != nil {
|
||||||
t.Errorf("Failure to validate the spec RRSIG: %v", err)
|
t.Errorf("Failure to validate the spec RRSIG: %v", err)
|
||||||
|
@ -588,7 +566,7 @@ PrivateKey: GU6SnQ/Ou+xC5RumuIUIuJZteXT2z0O/ok1s38Et6mQ=`
|
||||||
ourRRSIG.Inception, _ = StringToTime("20100812100439")
|
ourRRSIG.Inception, _ = StringToTime("20100812100439")
|
||||||
err = ourRRSIG.Sign(priv, []RR{rrA})
|
err = ourRRSIG.Sign(priv, []RR{rrA})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err.Error())
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = ourRRSIG.Verify(rrDNSKEY.(*DNSKEY), []RR{rrA}); err != nil {
|
if err = ourRRSIG.Verify(rrDNSKEY.(*DNSKEY), []RR{rrA}); err != nil {
|
||||||
|
@ -599,7 +577,7 @@ PrivateKey: GU6SnQ/Ou+xC5RumuIUIuJZteXT2z0O/ok1s38Et6mQ=`
|
||||||
rrRRSIG.(*RRSIG).Signature = ""
|
rrRRSIG.(*RRSIG).Signature = ""
|
||||||
ourRRSIG.Signature = ""
|
ourRRSIG.Signature = ""
|
||||||
if !reflect.DeepEqual(ourRRSIG, rrRRSIG.(*RRSIG)) {
|
if !reflect.DeepEqual(ourRRSIG, rrRRSIG.(*RRSIG)) {
|
||||||
t.Fatalf("RRSIG record differs:\n%v\n%v\n", ourRRSIG, rrRRSIG.(*RRSIG))
|
t.Fatalf("RRSIG record differs:\n%v\n%v", ourRRSIG, rrRRSIG.(*RRSIG))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -614,11 +592,11 @@ Algorithm: 14 (ECDSAP384SHA384)
|
||||||
PrivateKey: WURgWHCcYIYUPWgeLmiPY2DJJk02vgrmTfitxgqcL4vwW7BOrbawVmVe0d9V94SR`
|
PrivateKey: WURgWHCcYIYUPWgeLmiPY2DJJk02vgrmTfitxgqcL4vwW7BOrbawVmVe0d9V94SR`
|
||||||
rrDNSKEY, err := NewRR(exDNSKEY)
|
rrDNSKEY, err := NewRR(exDNSKEY)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err.Error())
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
priv, err := rrDNSKEY.(*DNSKEY).NewPrivateKey(exPriv)
|
priv, err := rrDNSKEY.(*DNSKEY).NewPrivateKey(exPriv)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err.Error())
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
exDS := `example.net. 3600 IN DS 10771 14 4 (
|
exDS := `example.net. 3600 IN DS 10771 14 4 (
|
||||||
|
@ -627,11 +605,11 @@ PrivateKey: WURgWHCcYIYUPWgeLmiPY2DJJk02vgrmTfitxgqcL4vwW7BOrbawVmVe0d9V94SR`
|
||||||
6df983d6 )`
|
6df983d6 )`
|
||||||
rrDS, err := NewRR(exDS)
|
rrDS, err := NewRR(exDS)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err.Error())
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
ourDS := rrDNSKEY.(*DNSKEY).ToDS(SHA384)
|
ourDS := rrDNSKEY.(*DNSKEY).ToDS(SHA384)
|
||||||
if !reflect.DeepEqual(ourDS, rrDS.(*DS)) {
|
if !reflect.DeepEqual(ourDS, rrDS.(*DS)) {
|
||||||
t.Fatalf("DS record differs:\n%v\n%v\n", ourDS, rrDS.(*DS))
|
t.Fatalf("DS record differs:\n%v\n%v", ourDS, rrDS.(*DS))
|
||||||
}
|
}
|
||||||
|
|
||||||
exA := `www.example.net. 3600 IN A 192.0.2.1`
|
exA := `www.example.net. 3600 IN A 192.0.2.1`
|
||||||
|
@ -642,11 +620,11 @@ PrivateKey: WURgWHCcYIYUPWgeLmiPY2DJJk02vgrmTfitxgqcL4vwW7BOrbawVmVe0d9V94SR`
|
||||||
WTSSPdz7wnqXL5bdcJzusdnI0RSMROxxwGipWcJm )`
|
WTSSPdz7wnqXL5bdcJzusdnI0RSMROxxwGipWcJm )`
|
||||||
rrA, err := NewRR(exA)
|
rrA, err := NewRR(exA)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err.Error())
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
rrRRSIG, err := NewRR(exRRSIG)
|
rrRRSIG, err := NewRR(exRRSIG)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err.Error())
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
if err = rrRRSIG.(*RRSIG).Verify(rrDNSKEY.(*DNSKEY), []RR{rrA}); err != nil {
|
if err = rrRRSIG.(*RRSIG).Verify(rrDNSKEY.(*DNSKEY), []RR{rrA}); err != nil {
|
||||||
t.Errorf("Failure to validate the spec RRSIG: %v", err)
|
t.Errorf("Failure to validate the spec RRSIG: %v", err)
|
||||||
|
@ -664,7 +642,7 @@ PrivateKey: WURgWHCcYIYUPWgeLmiPY2DJJk02vgrmTfitxgqcL4vwW7BOrbawVmVe0d9V94SR`
|
||||||
ourRRSIG.Inception, _ = StringToTime("20100812102025")
|
ourRRSIG.Inception, _ = StringToTime("20100812102025")
|
||||||
err = ourRRSIG.Sign(priv, []RR{rrA})
|
err = ourRRSIG.Sign(priv, []RR{rrA})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err.Error())
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = ourRRSIG.Verify(rrDNSKEY.(*DNSKEY), []RR{rrA}); err != nil {
|
if err = ourRRSIG.Verify(rrDNSKEY.(*DNSKEY), []RR{rrA}); err != nil {
|
||||||
|
@ -675,6 +653,6 @@ PrivateKey: WURgWHCcYIYUPWgeLmiPY2DJJk02vgrmTfitxgqcL4vwW7BOrbawVmVe0d9V94SR`
|
||||||
rrRRSIG.(*RRSIG).Signature = ""
|
rrRRSIG.(*RRSIG).Signature = ""
|
||||||
ourRRSIG.Signature = ""
|
ourRRSIG.Signature = ""
|
||||||
if !reflect.DeepEqual(ourRRSIG, rrRRSIG.(*RRSIG)) {
|
if !reflect.DeepEqual(ourRRSIG, rrRRSIG.(*RRSIG)) {
|
||||||
t.Fatalf("RRSIG record differs:\n%v\n%v\n", ourRRSIG, rrRRSIG.(*RRSIG))
|
t.Fatalf("RRSIG record differs:\n%v\n%v", ourRRSIG, rrRRSIG.(*RRSIG))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
247
Godeps/_workspace/src/github.com/miekg/dns/doc.go
generated
vendored
Normal file
247
Godeps/_workspace/src/github.com/miekg/dns/doc.go
generated
vendored
Normal file
|
@ -0,0 +1,247 @@
|
||||||
|
/*
|
||||||
|
Package dns implements a full featured interface to the Domain Name System.
|
||||||
|
Server- and client-side programming is supported.
|
||||||
|
The package allows complete control over what is send out to the DNS. The package
|
||||||
|
API follows the less-is-more principle, by presenting a small, clean interface.
|
||||||
|
|
||||||
|
The package dns supports (asynchronous) querying/replying, incoming/outgoing zone transfers,
|
||||||
|
TSIG, EDNS0, dynamic updates, notifies and DNSSEC validation/signing.
|
||||||
|
Note that domain names MUST be fully qualified, before sending them, unqualified
|
||||||
|
names in a message will result in a packing failure.
|
||||||
|
|
||||||
|
Resource records are native types. They are not stored in wire format.
|
||||||
|
Basic usage pattern for creating a new resource record:
|
||||||
|
|
||||||
|
r := new(dns.MX)
|
||||||
|
r.Hdr = dns.RR_Header{Name: "miek.nl.", Rrtype: dns.TypeMX, Class: dns.ClassINET, Ttl: 3600}
|
||||||
|
r.Preference = 10
|
||||||
|
r.Mx = "mx.miek.nl."
|
||||||
|
|
||||||
|
Or directly from a string:
|
||||||
|
|
||||||
|
mx, err := dns.NewRR("miek.nl. 3600 IN MX 10 mx.miek.nl.")
|
||||||
|
|
||||||
|
Or when the default TTL (3600) and class (IN) suit you:
|
||||||
|
|
||||||
|
mx, err := dns.NewRR("miek.nl. MX 10 mx.miek.nl.")
|
||||||
|
|
||||||
|
Or even:
|
||||||
|
|
||||||
|
mx, err := dns.NewRR("$ORIGIN nl.\nmiek 1H IN MX 10 mx.miek")
|
||||||
|
|
||||||
|
In the DNS messages are exchanged, these messages contain resource
|
||||||
|
records (sets). Use pattern for creating a message:
|
||||||
|
|
||||||
|
m := new(dns.Msg)
|
||||||
|
m.SetQuestion("miek.nl.", dns.TypeMX)
|
||||||
|
|
||||||
|
Or when not certain if the domain name is fully qualified:
|
||||||
|
|
||||||
|
m.SetQuestion(dns.Fqdn("miek.nl"), dns.TypeMX)
|
||||||
|
|
||||||
|
The message m is now a message with the question section set to ask
|
||||||
|
the MX records for the miek.nl. zone.
|
||||||
|
|
||||||
|
The following is slightly more verbose, but more flexible:
|
||||||
|
|
||||||
|
m1 := new(dns.Msg)
|
||||||
|
m1.Id = dns.Id()
|
||||||
|
m1.RecursionDesired = true
|
||||||
|
m1.Question = make([]dns.Question, 1)
|
||||||
|
m1.Question[0] = dns.Question{"miek.nl.", dns.TypeMX, dns.ClassINET}
|
||||||
|
|
||||||
|
After creating a message it can be send.
|
||||||
|
Basic use pattern for synchronous querying the DNS at a
|
||||||
|
server configured on 127.0.0.1 and port 53:
|
||||||
|
|
||||||
|
c := new(dns.Client)
|
||||||
|
in, rtt, err := c.Exchange(m1, "127.0.0.1:53")
|
||||||
|
|
||||||
|
Suppressing
|
||||||
|
multiple outstanding queries (with the same question, type and class) is as easy as setting:
|
||||||
|
|
||||||
|
c.SingleInflight = true
|
||||||
|
|
||||||
|
If these "advanced" features are not needed, a simple UDP query can be send,
|
||||||
|
with:
|
||||||
|
|
||||||
|
in, err := dns.Exchange(m1, "127.0.0.1:53")
|
||||||
|
|
||||||
|
When this functions returns you will get dns message. A dns message consists
|
||||||
|
out of four sections.
|
||||||
|
The question section: in.Question, the answer section: in.Answer,
|
||||||
|
the authority section: in.Ns and the additional section: in.Extra.
|
||||||
|
|
||||||
|
Each of these sections (except the Question section) contain a []RR. Basic
|
||||||
|
use pattern for accessing the rdata of a TXT RR as the first RR in
|
||||||
|
the Answer section:
|
||||||
|
|
||||||
|
if t, ok := in.Answer[0].(*dns.TXT); ok {
|
||||||
|
// do something with t.Txt
|
||||||
|
}
|
||||||
|
|
||||||
|
Domain Name and TXT Character String Representations
|
||||||
|
|
||||||
|
Both domain names and TXT character strings are converted to presentation
|
||||||
|
form both when unpacked and when converted to strings.
|
||||||
|
|
||||||
|
For TXT character strings, tabs, carriage returns and line feeds will be
|
||||||
|
converted to \t, \r and \n respectively. Back slashes and quotations marks
|
||||||
|
will be escaped. Bytes below 32 and above 127 will be converted to \DDD
|
||||||
|
form.
|
||||||
|
|
||||||
|
For domain names, in addition to the above rules brackets, periods,
|
||||||
|
spaces, semicolons and the at symbol are escaped.
|
||||||
|
|
||||||
|
DNSSEC
|
||||||
|
|
||||||
|
DNSSEC (DNS Security Extension) adds a layer of security to the DNS. It
|
||||||
|
uses public key cryptography to sign resource records. The
|
||||||
|
public keys are stored in DNSKEY records and the signatures in RRSIG records.
|
||||||
|
|
||||||
|
Requesting DNSSEC information for a zone is done by adding the DO (DNSSEC OK) bit
|
||||||
|
to an request.
|
||||||
|
|
||||||
|
m := new(dns.Msg)
|
||||||
|
m.SetEdns0(4096, true)
|
||||||
|
|
||||||
|
Signature generation, signature verification and key generation are all supported.
|
||||||
|
|
||||||
|
DYNAMIC UPDATES
|
||||||
|
|
||||||
|
Dynamic updates reuses the DNS message format, but renames three of
|
||||||
|
the sections. Question is Zone, Answer is Prerequisite, Authority is
|
||||||
|
Update, only the Additional is not renamed. See RFC 2136 for the gory details.
|
||||||
|
|
||||||
|
You can set a rather complex set of rules for the existence of absence of
|
||||||
|
certain resource records or names in a zone to specify if resource records
|
||||||
|
should be added or removed. The table from RFC 2136 supplemented with the Go
|
||||||
|
DNS function shows which functions exist to specify the prerequisites.
|
||||||
|
|
||||||
|
3.2.4 - Table Of Metavalues Used In Prerequisite Section
|
||||||
|
|
||||||
|
CLASS TYPE RDATA Meaning Function
|
||||||
|
--------------------------------------------------------------
|
||||||
|
ANY ANY empty Name is in use dns.NameUsed
|
||||||
|
ANY rrset empty RRset exists (value indep) dns.RRsetUsed
|
||||||
|
NONE ANY empty Name is not in use dns.NameNotUsed
|
||||||
|
NONE rrset empty RRset does not exist dns.RRsetNotUsed
|
||||||
|
zone rrset rr RRset exists (value dep) dns.Used
|
||||||
|
|
||||||
|
The prerequisite section can also be left empty.
|
||||||
|
If you have decided on the prerequisites you can tell what RRs should
|
||||||
|
be added or deleted. The next table shows the options you have and
|
||||||
|
what functions to call.
|
||||||
|
|
||||||
|
3.4.2.6 - Table Of Metavalues Used In Update Section
|
||||||
|
|
||||||
|
CLASS TYPE RDATA Meaning Function
|
||||||
|
---------------------------------------------------------------
|
||||||
|
ANY ANY empty Delete all RRsets from name dns.RemoveName
|
||||||
|
ANY rrset empty Delete an RRset dns.RemoveRRset
|
||||||
|
NONE rrset rr Delete an RR from RRset dns.Remove
|
||||||
|
zone rrset rr Add to an RRset dns.Insert
|
||||||
|
|
||||||
|
TRANSACTION SIGNATURE
|
||||||
|
|
||||||
|
An TSIG or transaction signature adds a HMAC TSIG record to each message sent.
|
||||||
|
The supported algorithms include: HmacMD5, HmacSHA1, HmacSHA256 and HmacSHA512.
|
||||||
|
|
||||||
|
Basic use pattern when querying with a TSIG name "axfr." (note that these key names
|
||||||
|
must be fully qualified - as they are domain names) and the base64 secret
|
||||||
|
"so6ZGir4GPAqINNh9U5c3A==":
|
||||||
|
|
||||||
|
c := new(dns.Client)
|
||||||
|
c.TsigSecret = map[string]string{"axfr.": "so6ZGir4GPAqINNh9U5c3A=="}
|
||||||
|
m := new(dns.Msg)
|
||||||
|
m.SetQuestion("miek.nl.", dns.TypeMX)
|
||||||
|
m.SetTsig("axfr.", dns.HmacMD5, 300, time.Now().Unix())
|
||||||
|
...
|
||||||
|
// When sending the TSIG RR is calculated and filled in before sending
|
||||||
|
|
||||||
|
When requesting an zone transfer (almost all TSIG usage is when requesting zone transfers), with
|
||||||
|
TSIG, this is the basic use pattern. In this example we request an AXFR for
|
||||||
|
miek.nl. with TSIG key named "axfr." and secret "so6ZGir4GPAqINNh9U5c3A=="
|
||||||
|
and using the server 176.58.119.54:
|
||||||
|
|
||||||
|
t := new(dns.Transfer)
|
||||||
|
m := new(dns.Msg)
|
||||||
|
t.TsigSecret = map[string]string{"axfr.": "so6ZGir4GPAqINNh9U5c3A=="}
|
||||||
|
m.SetAxfr("miek.nl.")
|
||||||
|
m.SetTsig("axfr.", dns.HmacMD5, 300, time.Now().Unix())
|
||||||
|
c, err := t.In(m, "176.58.119.54:53")
|
||||||
|
for r := range c { ... }
|
||||||
|
|
||||||
|
You can now read the records from the transfer as they come in. Each envelope is checked with TSIG.
|
||||||
|
If something is not correct an error is returned.
|
||||||
|
|
||||||
|
Basic use pattern validating and replying to a message that has TSIG set.
|
||||||
|
|
||||||
|
server := &dns.Server{Addr: ":53", Net: "udp"}
|
||||||
|
server.TsigSecret = map[string]string{"axfr.": "so6ZGir4GPAqINNh9U5c3A=="}
|
||||||
|
go server.ListenAndServe()
|
||||||
|
dns.HandleFunc(".", handleRequest)
|
||||||
|
|
||||||
|
func handleRequest(w dns.ResponseWriter, r *dns.Msg) {
|
||||||
|
m := new(Msg)
|
||||||
|
m.SetReply(r)
|
||||||
|
if r.IsTsig() {
|
||||||
|
if w.TsigStatus() == nil {
|
||||||
|
// *Msg r has an TSIG record and it was validated
|
||||||
|
m.SetTsig("axfr.", dns.HmacMD5, 300, time.Now().Unix())
|
||||||
|
} else {
|
||||||
|
// *Msg r has an TSIG records and it was not valided
|
||||||
|
}
|
||||||
|
}
|
||||||
|
w.WriteMsg(m)
|
||||||
|
}
|
||||||
|
|
||||||
|
PRIVATE RRS
|
||||||
|
|
||||||
|
RFC 6895 sets aside a range of type codes for private use. This range
|
||||||
|
is 65,280 - 65,534 (0xFF00 - 0xFFFE). When experimenting with new Resource Records these
|
||||||
|
can be used, before requesting an official type code from IANA.
|
||||||
|
|
||||||
|
EDNS0
|
||||||
|
|
||||||
|
EDNS0 is an extension mechanism for the DNS defined in RFC 2671 and updated
|
||||||
|
by RFC 6891. It defines an new RR type, the OPT RR, which is then completely
|
||||||
|
abused.
|
||||||
|
Basic use pattern for creating an (empty) OPT RR:
|
||||||
|
|
||||||
|
o := new(dns.OPT)
|
||||||
|
o.Hdr.Name = "." // MUST be the root zone, per definition.
|
||||||
|
o.Hdr.Rrtype = dns.TypeOPT
|
||||||
|
|
||||||
|
The rdata of an OPT RR consists out of a slice of EDNS0 (RFC 6891)
|
||||||
|
interfaces. Currently only a few have been standardized: EDNS0_NSID
|
||||||
|
(RFC 5001) and EDNS0_SUBNET (draft-vandergaast-edns-client-subnet-02). Note
|
||||||
|
that these options may be combined in an OPT RR.
|
||||||
|
Basic use pattern for a server to check if (and which) options are set:
|
||||||
|
|
||||||
|
// o is a dns.OPT
|
||||||
|
for _, s := range o.Option {
|
||||||
|
switch e := s.(type) {
|
||||||
|
case *dns.EDNS0_NSID:
|
||||||
|
// do stuff with e.Nsid
|
||||||
|
case *dns.EDNS0_SUBNET:
|
||||||
|
// access e.Family, e.Address, etc.
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
SIG(0)
|
||||||
|
|
||||||
|
From RFC 2931:
|
||||||
|
|
||||||
|
SIG(0) provides protection for DNS transactions and requests ....
|
||||||
|
... protection for glue records, DNS requests, protection for message headers
|
||||||
|
on requests and responses, and protection of the overall integrity of a response.
|
||||||
|
|
||||||
|
It works like TSIG, except that SIG(0) uses public key cryptography, instead of the shared
|
||||||
|
secret approach in TSIG.
|
||||||
|
Supported algorithms: DSA, ECDSAP256SHA256, ECDSAP384SHA384, RSASHA1, RSASHA256 and
|
||||||
|
RSASHA512.
|
||||||
|
|
||||||
|
Signing subsequent messages in multi-message sessions is not implemented.
|
||||||
|
*/
|
||||||
|
package dns
|
76
Godeps/_workspace/src/github.com/miekg/dns/edns.go
generated
vendored
76
Godeps/_workspace/src/github.com/miekg/dns/edns.go
generated
vendored
|
@ -1,29 +1,3 @@
|
||||||
// EDNS0
|
|
||||||
//
|
|
||||||
// EDNS0 is an extension mechanism for the DNS defined in RFC 2671 and updated
|
|
||||||
// by RFC 6891. It defines an new RR type, the OPT RR, which is then completely
|
|
||||||
// abused.
|
|
||||||
// Basic use pattern for creating an (empty) OPT RR:
|
|
||||||
//
|
|
||||||
// o := new(dns.OPT)
|
|
||||||
// o.Hdr.Name = "." // MUST be the root zone, per definition.
|
|
||||||
// o.Hdr.Rrtype = dns.TypeOPT
|
|
||||||
//
|
|
||||||
// The rdata of an OPT RR consists out of a slice of EDNS0 (RFC 6891)
|
|
||||||
// interfaces. Currently only a few have been standardized: EDNS0_NSID
|
|
||||||
// (RFC 5001) and EDNS0_SUBNET (draft-vandergaast-edns-client-subnet-02). Note
|
|
||||||
// that these options may be combined in an OPT RR.
|
|
||||||
// Basic use pattern for a server to check if (and which) options are set:
|
|
||||||
//
|
|
||||||
// // o is a dns.OPT
|
|
||||||
// for _, s := range o.Option {
|
|
||||||
// switch e := s.(type) {
|
|
||||||
// case *dns.EDNS0_NSID:
|
|
||||||
// // do stuff with e.Nsid
|
|
||||||
// case *dns.EDNS0_SUBNET:
|
|
||||||
// // access e.Family, e.Address, etc.
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
package dns
|
package dns
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
@ -44,9 +18,13 @@ const (
|
||||||
EDNS0SUBNET = 0x8 // client-subnet (RFC6891)
|
EDNS0SUBNET = 0x8 // client-subnet (RFC6891)
|
||||||
EDNS0EXPIRE = 0x9 // EDNS0 expire
|
EDNS0EXPIRE = 0x9 // EDNS0 expire
|
||||||
EDNS0SUBNETDRAFT = 0x50fa // Don't use! Use EDNS0SUBNET
|
EDNS0SUBNETDRAFT = 0x50fa // Don't use! Use EDNS0SUBNET
|
||||||
|
EDNS0LOCALSTART = 0xFDE9 // Beginning of range reserved for local/experimental use (RFC6891)
|
||||||
|
EDNS0LOCALEND = 0xFFFE // End of range reserved for local/experimental use (RFC6891)
|
||||||
_DO = 1 << 15 // dnssec ok
|
_DO = 1 << 15 // dnssec ok
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// OPT is the EDNS0 RR appended to messages to convey extra (meta) information.
|
||||||
|
// See RFC 6891.
|
||||||
type OPT struct {
|
type OPT struct {
|
||||||
Hdr RR_Header
|
Hdr RR_Header
|
||||||
Option []EDNS0 `dns:"opt"`
|
Option []EDNS0 `dns:"opt"`
|
||||||
|
@ -92,6 +70,8 @@ func (rr *OPT) String() string {
|
||||||
s += "\n; DS HASH UNDERSTOOD: " + o.String()
|
s += "\n; DS HASH UNDERSTOOD: " + o.String()
|
||||||
case *EDNS0_N3U:
|
case *EDNS0_N3U:
|
||||||
s += "\n; NSEC3 HASH UNDERSTOOD: " + o.String()
|
s += "\n; NSEC3 HASH UNDERSTOOD: " + o.String()
|
||||||
|
case *EDNS0_LOCAL:
|
||||||
|
s += "\n; LOCAL OPT: " + o.String()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return s
|
return s
|
||||||
|
@ -100,8 +80,9 @@ func (rr *OPT) String() string {
|
||||||
func (rr *OPT) len() int {
|
func (rr *OPT) len() int {
|
||||||
l := rr.Hdr.len()
|
l := rr.Hdr.len()
|
||||||
for i := 0; i < len(rr.Option); i++ {
|
for i := 0; i < len(rr.Option); i++ {
|
||||||
|
l += 4 // Account for 2-byte option code and 2-byte option length.
|
||||||
lo, _ := rr.Option[i].pack()
|
lo, _ := rr.Option[i].pack()
|
||||||
l += 2 + len(lo)
|
l += len(lo)
|
||||||
}
|
}
|
||||||
return l
|
return l
|
||||||
}
|
}
|
||||||
|
@ -499,3 +480,44 @@ func (e *EDNS0_EXPIRE) unpack(b []byte) error {
|
||||||
e.Expire = uint32(b[0])<<24 | uint32(b[1])<<16 | uint32(b[2])<<8 | uint32(b[3])
|
e.Expire = uint32(b[0])<<24 | uint32(b[1])<<16 | uint32(b[2])<<8 | uint32(b[3])
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// The local EDNS0 option is used for local/experimental purposes. The option
|
||||||
|
// code is recommended to be within the range [EDNS0LOCALSTART, EDNS0LOCALEND]
|
||||||
|
// (RFC6891), although any unassigned code can actually be used. The content of
|
||||||
|
// the option is made available in Data, unaltered.
|
||||||
|
// Basic use pattern for creating a local option:
|
||||||
|
//
|
||||||
|
// o := new(dns.OPT)
|
||||||
|
// o.Hdr.Name = "."
|
||||||
|
// o.Hdr.Rrtype = dns.TypeOPT
|
||||||
|
// e := new(dns.EDNS0_LOCAL)
|
||||||
|
// e.Code = dns.EDNS0LOCALSTART
|
||||||
|
// e.Data = []byte{72, 82, 74}
|
||||||
|
// o.Option = append(o.Option, e)
|
||||||
|
type EDNS0_LOCAL struct {
|
||||||
|
Code uint16
|
||||||
|
Data []byte
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *EDNS0_LOCAL) Option() uint16 { return e.Code }
|
||||||
|
func (e *EDNS0_LOCAL) String() string {
|
||||||
|
return strconv.FormatInt(int64(e.Code), 10) + ":0x" + hex.EncodeToString(e.Data)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *EDNS0_LOCAL) pack() ([]byte, error) {
|
||||||
|
b := make([]byte, len(e.Data))
|
||||||
|
copied := copy(b, e.Data)
|
||||||
|
if copied != len(e.Data) {
|
||||||
|
return nil, ErrBuf
|
||||||
|
}
|
||||||
|
return b, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *EDNS0_LOCAL) unpack(b []byte) error {
|
||||||
|
e.Data = make([]byte, len(b))
|
||||||
|
copied := copy(e.Data, b)
|
||||||
|
if copied != len(b) {
|
||||||
|
return ErrBuf
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
5
Godeps/_workspace/src/github.com/miekg/dns/idn/punycode.go
generated
vendored
5
Godeps/_workspace/src/github.com/miekg/dns/idn/punycode.go
generated
vendored
|
@ -216,8 +216,9 @@ func decode(b []byte) []byte {
|
||||||
}
|
}
|
||||||
out := make([]rune, 0, len(b))
|
out := make([]rune, 0, len(b))
|
||||||
b = b[len(_PREFIX):]
|
b = b[len(_PREFIX):]
|
||||||
for pos, x := range b {
|
for pos := len(b) - 1; pos >= 0; pos-- {
|
||||||
if x == _DELIMITER {
|
// only last delimiter is our interest
|
||||||
|
if b[pos] == _DELIMITER {
|
||||||
out = append(out, bytes.Runes(b[:pos])...)
|
out = append(out, bytes.Runes(b[:pos])...)
|
||||||
b = b[pos+1:] // trim source string
|
b = b[pos+1:] // trim source string
|
||||||
break
|
break
|
||||||
|
|
3
Godeps/_workspace/src/github.com/miekg/dns/idn/punycode_test.go
generated
vendored
3
Godeps/_workspace/src/github.com/miekg/dns/idn/punycode_test.go
generated
vendored
|
@ -9,10 +9,12 @@ var testcases = [][2]string{
|
||||||
{"", ""},
|
{"", ""},
|
||||||
{"a", "a"},
|
{"a", "a"},
|
||||||
{"A-B", "a-b"},
|
{"A-B", "a-b"},
|
||||||
|
{"A-B-C", "a-b-c"},
|
||||||
{"AbC", "abc"},
|
{"AbC", "abc"},
|
||||||
{"я", "xn--41a"},
|
{"я", "xn--41a"},
|
||||||
{"zя", "xn--z-0ub"},
|
{"zя", "xn--z-0ub"},
|
||||||
{"ЯZ", "xn--z-zub"},
|
{"ЯZ", "xn--z-zub"},
|
||||||
|
{"а-я", "xn----7sb8g"},
|
||||||
{"إختبار", "xn--kgbechtv"},
|
{"إختبار", "xn--kgbechtv"},
|
||||||
{"آزمایشی", "xn--hgbk6aj7f53bba"},
|
{"آزمایشی", "xn--hgbk6aj7f53bba"},
|
||||||
{"测试", "xn--0zwm56d"},
|
{"测试", "xn--0zwm56d"},
|
||||||
|
@ -24,6 +26,7 @@ var testcases = [][2]string{
|
||||||
{"טעסט", "xn--deba0ad"},
|
{"טעסט", "xn--deba0ad"},
|
||||||
{"テスト", "xn--zckzah"},
|
{"テスト", "xn--zckzah"},
|
||||||
{"பரிட்சை", "xn--hlcj6aya9esc7a"},
|
{"பரிட்சை", "xn--hlcj6aya9esc7a"},
|
||||||
|
{"mamão-com-açúcar", "xn--mamo-com-acar-yeb1e6q"},
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestEncodeDecodePunycode(t *testing.T) {
|
func TestEncodeDecodePunycode(t *testing.T) {
|
||||||
|
|
49
Godeps/_workspace/src/github.com/miekg/dns/labels_test.go
generated
vendored
49
Godeps/_workspace/src/github.com/miekg/dns/labels_test.go
generated
vendored
|
@ -13,34 +13,27 @@ func TestCompareDomainName(t *testing.T) {
|
||||||
s6 := "miek.nl"
|
s6 := "miek.nl"
|
||||||
|
|
||||||
if CompareDomainName(s1, s2) != 2 {
|
if CompareDomainName(s1, s2) != 2 {
|
||||||
t.Logf("%s with %s should be %d", s1, s2, 2)
|
t.Errorf("%s with %s should be %d", s1, s2, 2)
|
||||||
t.Fail()
|
|
||||||
}
|
}
|
||||||
if CompareDomainName(s1, s3) != 1 {
|
if CompareDomainName(s1, s3) != 1 {
|
||||||
t.Logf("%s with %s should be %d", s1, s3, 1)
|
t.Errorf("%s with %s should be %d", s1, s3, 1)
|
||||||
t.Fail()
|
|
||||||
}
|
}
|
||||||
if CompareDomainName(s3, s4) != 0 {
|
if CompareDomainName(s3, s4) != 0 {
|
||||||
t.Logf("%s with %s should be %d", s3, s4, 0)
|
t.Errorf("%s with %s should be %d", s3, s4, 0)
|
||||||
t.Fail()
|
|
||||||
}
|
}
|
||||||
// Non qualified tests
|
// Non qualified tests
|
||||||
if CompareDomainName(s1, s5) != 1 {
|
if CompareDomainName(s1, s5) != 1 {
|
||||||
t.Logf("%s with %s should be %d", s1, s5, 1)
|
t.Errorf("%s with %s should be %d", s1, s5, 1)
|
||||||
t.Fail()
|
|
||||||
}
|
}
|
||||||
if CompareDomainName(s1, s6) != 2 {
|
if CompareDomainName(s1, s6) != 2 {
|
||||||
t.Logf("%s with %s should be %d", s1, s5, 2)
|
t.Errorf("%s with %s should be %d", s1, s5, 2)
|
||||||
t.Fail()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if CompareDomainName(s1, ".") != 0 {
|
if CompareDomainName(s1, ".") != 0 {
|
||||||
t.Logf("%s with %s should be %d", s1, s5, 0)
|
t.Errorf("%s with %s should be %d", s1, s5, 0)
|
||||||
t.Fail()
|
|
||||||
}
|
}
|
||||||
if CompareDomainName(".", ".") != 0 {
|
if CompareDomainName(".", ".") != 0 {
|
||||||
t.Logf("%s with %s should be %d", ".", ".", 0)
|
t.Errorf("%s with %s should be %d", ".", ".", 0)
|
||||||
t.Fail()
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -59,10 +52,9 @@ func TestSplit(t *testing.T) {
|
||||||
}
|
}
|
||||||
for s, i := range splitter {
|
for s, i := range splitter {
|
||||||
if x := len(Split(s)); x != i {
|
if x := len(Split(s)); x != i {
|
||||||
t.Logf("labels should be %d, got %d: %s %v\n", i, x, s, Split(s))
|
t.Errorf("labels should be %d, got %d: %s %v", i, x, s, Split(s))
|
||||||
t.Fail()
|
|
||||||
} else {
|
} else {
|
||||||
t.Logf("%s %v\n", s, Split(s))
|
t.Logf("%s %v", s, Split(s))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -78,13 +70,11 @@ func TestSplit2(t *testing.T) {
|
||||||
switch len(i) {
|
switch len(i) {
|
||||||
case 1:
|
case 1:
|
||||||
if x[0] != i[0] {
|
if x[0] != i[0] {
|
||||||
t.Logf("labels should be %v, got %v: %s\n", i, x, s)
|
t.Errorf("labels should be %v, got %v: %s", i, x, s)
|
||||||
t.Fail()
|
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
if x[0] != i[0] || x[1] != i[1] || x[2] != i[2] {
|
if x[0] != i[0] || x[1] != i[1] || x[2] != i[2] {
|
||||||
t.Logf("labels should be %v, got %v: %s\n", i, x, s)
|
t.Errorf("labels should be %v, got %v: %s", i, x, s)
|
||||||
t.Fail()
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -113,8 +103,7 @@ func TestPrevLabel(t *testing.T) {
|
||||||
for s, i := range prever {
|
for s, i := range prever {
|
||||||
x, ok := PrevLabel(s.string, s.int)
|
x, ok := PrevLabel(s.string, s.int)
|
||||||
if i != x {
|
if i != x {
|
||||||
t.Logf("label should be %d, got %d, %t: preving %d, %s\n", i, x, ok, s.int, s.string)
|
t.Errorf("label should be %d, got %d, %t: preving %d, %s", i, x, ok, s.int, s.string)
|
||||||
t.Fail()
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -129,8 +118,7 @@ func TestCountLabel(t *testing.T) {
|
||||||
for s, i := range splitter {
|
for s, i := range splitter {
|
||||||
x := CountLabel(s)
|
x := CountLabel(s)
|
||||||
if x != i {
|
if x != i {
|
||||||
t.Logf("CountLabel should have %d, got %d\n", i, x)
|
t.Errorf("CountLabel should have %d, got %d", i, x)
|
||||||
t.Fail()
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -149,14 +137,12 @@ domainLoop:
|
||||||
for domain, splits := range labels {
|
for domain, splits := range labels {
|
||||||
parts := SplitDomainName(domain)
|
parts := SplitDomainName(domain)
|
||||||
if len(parts) != len(splits) {
|
if len(parts) != len(splits) {
|
||||||
t.Logf("SplitDomainName returned %v for %s, expected %v", parts, domain, splits)
|
t.Errorf("SplitDomainName returned %v for %s, expected %v", parts, domain, splits)
|
||||||
t.Fail()
|
|
||||||
continue domainLoop
|
continue domainLoop
|
||||||
}
|
}
|
||||||
for i := range parts {
|
for i := range parts {
|
||||||
if parts[i] != splits[i] {
|
if parts[i] != splits[i] {
|
||||||
t.Logf("SplitDomainName returned %v for %s, expected %v", parts, domain, splits)
|
t.Errorf("SplitDomainName returned %v for %s, expected %v", parts, domain, splits)
|
||||||
t.Fail()
|
|
||||||
continue domainLoop
|
continue domainLoop
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -180,9 +166,8 @@ func TestIsDomainName(t *testing.T) {
|
||||||
for d, ok := range names {
|
for d, ok := range names {
|
||||||
l, k := IsDomainName(d)
|
l, k := IsDomainName(d)
|
||||||
if ok.ok != k || ok.lab != l {
|
if ok.ok != k || ok.lab != l {
|
||||||
t.Logf(" got %v %d for %s ", k, l, d)
|
t.Errorf(" got %v %d for %s ", k, l, d)
|
||||||
t.Logf("have %v %d for %s ", ok.ok, ok.lab, d)
|
t.Errorf("have %v %d for %s ", ok.ok, ok.lab, d)
|
||||||
t.Fail()
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
115
Godeps/_workspace/src/github.com/miekg/dns/msg.go
generated
vendored
115
Godeps/_workspace/src/github.com/miekg/dns/msg.go
generated
vendored
|
@ -23,29 +23,40 @@ import (
|
||||||
const maxCompressionOffset = 2 << 13 // We have 14 bits for the compression pointer
|
const maxCompressionOffset = 2 << 13 // We have 14 bits for the compression pointer
|
||||||
|
|
||||||
var (
|
var (
|
||||||
ErrAlg error = &Error{err: "bad algorithm"}
|
// ErrAlg indicates an error with the (DNSSEC) algorithm.
|
||||||
ErrAuth error = &Error{err: "bad authentication"}
|
ErrAlg error = &Error{err: "bad algorithm"}
|
||||||
ErrBuf error = &Error{err: "buffer size too small"}
|
// ErrAuth indicates an error in the TSIG authentication.
|
||||||
ErrConnEmpty error = &Error{err: "conn has no connection"}
|
ErrAuth error = &Error{err: "bad authentication"}
|
||||||
ErrConn error = &Error{err: "conn holds both UDP and TCP connection"}
|
// ErrBuf indicates that the buffer used it too small for the message.
|
||||||
|
ErrBuf error = &Error{err: "buffer size too small"}
|
||||||
|
// ErrConn indicates that a connection has both a TCP and UDP socket.
|
||||||
|
ErrConn error = &Error{err: "conn holds both UDP and TCP connection"}
|
||||||
|
// ErrConnEmpty indicates a connection is being uses before it is initialized.
|
||||||
|
ErrConnEmpty error = &Error{err: "conn has no connection"}
|
||||||
|
// ErrExtendedRcode ...
|
||||||
ErrExtendedRcode error = &Error{err: "bad extended rcode"}
|
ErrExtendedRcode error = &Error{err: "bad extended rcode"}
|
||||||
ErrFqdn error = &Error{err: "domain must be fully qualified"}
|
// ErrFqdn indicates that a domain name does not have a closing dot.
|
||||||
ErrId error = &Error{err: "id mismatch"}
|
ErrFqdn error = &Error{err: "domain must be fully qualified"}
|
||||||
ErrKeyAlg error = &Error{err: "bad key algorithm"}
|
// ErrId indicates there is a mismatch with the message's ID.
|
||||||
ErrKey error = &Error{err: "bad key"}
|
ErrId error = &Error{err: "id mismatch"}
|
||||||
ErrKeySize error = &Error{err: "bad key size"}
|
ErrKeyAlg error = &Error{err: "bad key algorithm"}
|
||||||
ErrNoSig error = &Error{err: "no signature found"}
|
ErrKey error = &Error{err: "bad key"}
|
||||||
ErrPrivKey error = &Error{err: "bad private key"}
|
ErrKeySize error = &Error{err: "bad key size"}
|
||||||
ErrRcode error = &Error{err: "bad rcode"}
|
ErrNoSig error = &Error{err: "no signature found"}
|
||||||
ErrRdata error = &Error{err: "bad rdata"}
|
ErrPrivKey error = &Error{err: "bad private key"}
|
||||||
ErrRRset error = &Error{err: "bad rrset"}
|
ErrRcode error = &Error{err: "bad rcode"}
|
||||||
ErrSecret error = &Error{err: "no secrets defined"}
|
ErrRdata error = &Error{err: "bad rdata"}
|
||||||
ErrServ error = &Error{err: "no servers could be reached"}
|
ErrRRset error = &Error{err: "bad rrset"}
|
||||||
ErrShortRead error = &Error{err: "short read"}
|
ErrSecret error = &Error{err: "no secrets defined"}
|
||||||
ErrSig error = &Error{err: "bad signature"}
|
ErrShortRead error = &Error{err: "short read"}
|
||||||
ErrSigGen error = &Error{err: "bad signature generation"}
|
// ErrSig indicates that a signature can not be cryptographically validated.
|
||||||
ErrSoa error = &Error{err: "no SOA"}
|
ErrSig error = &Error{err: "bad signature"}
|
||||||
ErrTime error = &Error{err: "bad time"}
|
// ErrSigGen indicates a faulure to generate a signature.
|
||||||
|
ErrSigGen error = &Error{err: "bad signature generation"}
|
||||||
|
// ErrSOA indicates that no SOA RR was seen when doing zone transfers.
|
||||||
|
ErrSoa error = &Error{err: "no SOA"}
|
||||||
|
// ErrTime indicates a timing error in TSIG authentication.
|
||||||
|
ErrTime error = &Error{err: "bad time"}
|
||||||
)
|
)
|
||||||
|
|
||||||
// Id, by default, returns a 16 bits random number to be used as a
|
// Id, by default, returns a 16 bits random number to be used as a
|
||||||
|
@ -56,8 +67,7 @@ var (
|
||||||
// dns.Id = func() uint16 { return 3 }
|
// dns.Id = func() uint16 { return 3 }
|
||||||
var Id func() uint16 = id
|
var Id func() uint16 = id
|
||||||
|
|
||||||
// A manually-unpacked version of (id, bits).
|
// MsgHdr is a a manually-unpacked version of (id, bits).
|
||||||
// This is in its own struct for easy printing.
|
|
||||||
type MsgHdr struct {
|
type MsgHdr struct {
|
||||||
Id uint16
|
Id uint16
|
||||||
Response bool
|
Response bool
|
||||||
|
@ -72,7 +82,7 @@ type MsgHdr struct {
|
||||||
Rcode int
|
Rcode int
|
||||||
}
|
}
|
||||||
|
|
||||||
// The layout of a DNS message.
|
// Msg contains the layout of a DNS message.
|
||||||
type Msg struct {
|
type Msg struct {
|
||||||
MsgHdr
|
MsgHdr
|
||||||
Compress bool `json:"-"` // If true, the message will be compressed when converted to wire format. This not part of the official DNS packet format.
|
Compress bool `json:"-"` // If true, the message will be compressed when converted to wire format. This not part of the official DNS packet format.
|
||||||
|
@ -82,7 +92,7 @@ type Msg struct {
|
||||||
Extra []RR // Holds the RR(s) of the additional section.
|
Extra []RR // Holds the RR(s) of the additional section.
|
||||||
}
|
}
|
||||||
|
|
||||||
// Map of strings for each RR wire type.
|
// TypeToString is a map of strings for each RR wire type.
|
||||||
var TypeToString = map[uint16]string{
|
var TypeToString = map[uint16]string{
|
||||||
TypeA: "A",
|
TypeA: "A",
|
||||||
TypeAAAA: "AAAA",
|
TypeAAAA: "AAAA",
|
||||||
|
@ -161,8 +171,10 @@ var TypeToString = map[uint16]string{
|
||||||
TypeX25: "X25",
|
TypeX25: "X25",
|
||||||
}
|
}
|
||||||
|
|
||||||
// Reverse, needed for string parsing.
|
// StringToType is the reverse of TypeToString, needed for string parsing.
|
||||||
var StringToType = reverseInt16(TypeToString)
|
var StringToType = reverseInt16(TypeToString)
|
||||||
|
|
||||||
|
// StringToClass is the reverse of ClassToString, needed for string parsing.
|
||||||
var StringToClass = reverseInt16(ClassToString)
|
var StringToClass = reverseInt16(ClassToString)
|
||||||
|
|
||||||
// Map of opcodes strings.
|
// Map of opcodes strings.
|
||||||
|
@ -171,7 +183,7 @@ var StringToOpcode = reverseInt(OpcodeToString)
|
||||||
// Map of rcodes strings.
|
// Map of rcodes strings.
|
||||||
var StringToRcode = reverseInt(RcodeToString)
|
var StringToRcode = reverseInt(RcodeToString)
|
||||||
|
|
||||||
// Map of strings for each CLASS wire type.
|
// ClassToString is a maps Classes to strings for each CLASS wire type.
|
||||||
var ClassToString = map[uint16]string{
|
var ClassToString = map[uint16]string{
|
||||||
ClassINET: "IN",
|
ClassINET: "IN",
|
||||||
ClassCSNET: "CS",
|
ClassCSNET: "CS",
|
||||||
|
@ -181,7 +193,7 @@ var ClassToString = map[uint16]string{
|
||||||
ClassANY: "ANY",
|
ClassANY: "ANY",
|
||||||
}
|
}
|
||||||
|
|
||||||
// Map of strings for opcodes.
|
// OpcodeToString maps Opcodes to strings.
|
||||||
var OpcodeToString = map[int]string{
|
var OpcodeToString = map[int]string{
|
||||||
OpcodeQuery: "QUERY",
|
OpcodeQuery: "QUERY",
|
||||||
OpcodeIQuery: "IQUERY",
|
OpcodeIQuery: "IQUERY",
|
||||||
|
@ -190,7 +202,7 @@ var OpcodeToString = map[int]string{
|
||||||
OpcodeUpdate: "UPDATE",
|
OpcodeUpdate: "UPDATE",
|
||||||
}
|
}
|
||||||
|
|
||||||
// Map of strings for rcodes.
|
// RcodeToString maps Rcodes to strings.
|
||||||
var RcodeToString = map[int]string{
|
var RcodeToString = map[int]string{
|
||||||
RcodeSuccess: "NOERROR",
|
RcodeSuccess: "NOERROR",
|
||||||
RcodeFormatError: "FORMERR",
|
RcodeFormatError: "FORMERR",
|
||||||
|
@ -264,7 +276,7 @@ func packDomainName(s string, msg []byte, off int, compression map[string]int, c
|
||||||
// Emit sequence of counted strings, chopping at dots.
|
// Emit sequence of counted strings, chopping at dots.
|
||||||
begin := 0
|
begin := 0
|
||||||
bs := []byte(s)
|
bs := []byte(s)
|
||||||
ro_bs, bs_fresh, escaped_dot := s, true, false
|
roBs, bsFresh, escapedDot := s, true, false
|
||||||
for i := 0; i < ls; i++ {
|
for i := 0; i < ls; i++ {
|
||||||
if bs[i] == '\\' {
|
if bs[i] == '\\' {
|
||||||
for j := i; j < ls-1; j++ {
|
for j := i; j < ls-1; j++ {
|
||||||
|
@ -288,13 +300,13 @@ func packDomainName(s string, msg []byte, off int, compression map[string]int, c
|
||||||
} else if bs[i] == 'n' {
|
} else if bs[i] == 'n' {
|
||||||
bs[i] = '\n'
|
bs[i] = '\n'
|
||||||
}
|
}
|
||||||
escaped_dot = bs[i] == '.'
|
escapedDot = bs[i] == '.'
|
||||||
bs_fresh = false
|
bsFresh = false
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
if bs[i] == '.' {
|
if bs[i] == '.' {
|
||||||
if i > 0 && bs[i-1] == '.' && !escaped_dot {
|
if i > 0 && bs[i-1] == '.' && !escapedDot {
|
||||||
// two dots back to back is not legal
|
// two dots back to back is not legal
|
||||||
return lenmsg, labels, ErrRdata
|
return lenmsg, labels, ErrRdata
|
||||||
}
|
}
|
||||||
|
@ -320,16 +332,16 @@ func packDomainName(s string, msg []byte, off int, compression map[string]int, c
|
||||||
}
|
}
|
||||||
off++
|
off++
|
||||||
}
|
}
|
||||||
if compress && !bs_fresh {
|
if compress && !bsFresh {
|
||||||
ro_bs = string(bs)
|
roBs = string(bs)
|
||||||
bs_fresh = true
|
bsFresh = true
|
||||||
}
|
}
|
||||||
// Dont try to compress '.'
|
// Dont try to compress '.'
|
||||||
if compress && ro_bs[begin:] != "." {
|
if compress && roBs[begin:] != "." {
|
||||||
if p, ok := compression[ro_bs[begin:]]; !ok {
|
if p, ok := compression[roBs[begin:]]; !ok {
|
||||||
// Only offsets smaller than this can be used.
|
// Only offsets smaller than this can be used.
|
||||||
if offset < maxCompressionOffset {
|
if offset < maxCompressionOffset {
|
||||||
compression[ro_bs[begin:]] = offset
|
compression[roBs[begin:]] = offset
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
// The first hit is the longest matching dname
|
// The first hit is the longest matching dname
|
||||||
|
@ -348,7 +360,7 @@ func packDomainName(s string, msg []byte, off int, compression map[string]int, c
|
||||||
labels++
|
labels++
|
||||||
begin = i + 1
|
begin = i + 1
|
||||||
}
|
}
|
||||||
escaped_dot = false
|
escapedDot = false
|
||||||
}
|
}
|
||||||
// Root label is special
|
// Root label is special
|
||||||
if len(bs) == 1 && bs[0] == '.' {
|
if len(bs) == 1 && bs[0] == '.' {
|
||||||
|
@ -945,7 +957,7 @@ func unpackStructValue(val reflect.Value, msg []byte, off int) (off1 int, err er
|
||||||
return lenmsg, &Error{"bad tag unpacking slice: " + val.Type().Field(i).Tag.Get("dns")}
|
return lenmsg, &Error{"bad tag unpacking slice: " + val.Type().Field(i).Tag.Get("dns")}
|
||||||
case `dns:"domain-name"`:
|
case `dns:"domain-name"`:
|
||||||
// HIP record slice of name (or none)
|
// HIP record slice of name (or none)
|
||||||
servers := make([]string, 0)
|
var servers []string
|
||||||
var s string
|
var s string
|
||||||
for off < lenrd {
|
for off < lenrd {
|
||||||
s, off, err = UnpackDomainName(msg, off)
|
s, off, err = UnpackDomainName(msg, off)
|
||||||
|
@ -971,7 +983,7 @@ func unpackStructValue(val reflect.Value, msg []byte, off int) (off1 int, err er
|
||||||
// We can safely return here.
|
// We can safely return here.
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
edns := make([]EDNS0, 0)
|
var edns []EDNS0
|
||||||
Option:
|
Option:
|
||||||
code := uint16(0)
|
code := uint16(0)
|
||||||
if off+2 > lenmsg {
|
if off+2 > lenmsg {
|
||||||
|
@ -1036,7 +1048,12 @@ func unpackStructValue(val reflect.Value, msg []byte, off int) (off1 int, err er
|
||||||
edns = append(edns, e)
|
edns = append(edns, e)
|
||||||
off = off1 + int(optlen)
|
off = off1 + int(optlen)
|
||||||
default:
|
default:
|
||||||
// do nothing?
|
e := new(EDNS0_LOCAL)
|
||||||
|
e.Code = code
|
||||||
|
if err := e.unpack(msg[off1 : off1+int(optlen)]); err != nil {
|
||||||
|
return lenmsg, err
|
||||||
|
}
|
||||||
|
edns = append(edns, e)
|
||||||
off = off1 + int(optlen)
|
off = off1 + int(optlen)
|
||||||
}
|
}
|
||||||
if off < lenrd {
|
if off < lenrd {
|
||||||
|
@ -1077,7 +1094,7 @@ func unpackStructValue(val reflect.Value, msg []byte, off int) (off1 int, err er
|
||||||
off += net.IPv6len
|
off += net.IPv6len
|
||||||
case `dns:"wks"`:
|
case `dns:"wks"`:
|
||||||
// Rest of the record is the bitmap
|
// Rest of the record is the bitmap
|
||||||
serv := make([]uint16, 0)
|
var serv []uint16
|
||||||
j := 0
|
j := 0
|
||||||
for off < lenrd {
|
for off < lenrd {
|
||||||
if off+1 > lenmsg {
|
if off+1 > lenmsg {
|
||||||
|
@ -1121,7 +1138,7 @@ func unpackStructValue(val reflect.Value, msg []byte, off int) (off1 int, err er
|
||||||
if off+2 > lenrd || off+2 > lenmsg {
|
if off+2 > lenrd || off+2 > lenmsg {
|
||||||
return lenmsg, &Error{err: "overflow unpacking nsecx"}
|
return lenmsg, &Error{err: "overflow unpacking nsecx"}
|
||||||
}
|
}
|
||||||
nsec := make([]uint16, 0)
|
var nsec []uint16
|
||||||
length := 0
|
length := 0
|
||||||
window := 0
|
window := 0
|
||||||
for off+2 < lenrd {
|
for off+2 < lenrd {
|
||||||
|
@ -1903,7 +1920,11 @@ func Copy(r RR) RR {
|
||||||
|
|
||||||
// Copy returns a new *Msg which is a deep-copy of dns.
|
// Copy returns a new *Msg which is a deep-copy of dns.
|
||||||
func (dns *Msg) Copy() *Msg {
|
func (dns *Msg) Copy() *Msg {
|
||||||
r1 := new(Msg)
|
return dns.CopyTo(new(Msg))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CopyTo copies the contents to the provided message using a deep-copy and returns the copy.
|
||||||
|
func (dns *Msg) CopyTo(r1 *Msg) *Msg {
|
||||||
r1.MsgHdr = dns.MsgHdr
|
r1.MsgHdr = dns.MsgHdr
|
||||||
r1.Compress = dns.Compress
|
r1.Compress = dns.Compress
|
||||||
|
|
||||||
|
|
2
Godeps/_workspace/src/github.com/miekg/dns/nsecx.go
generated
vendored
2
Godeps/_workspace/src/github.com/miekg/dns/nsecx.go
generated
vendored
|
@ -50,6 +50,8 @@ func HashName(label string, ha uint8, iter uint16, salt string) string {
|
||||||
return toBase32(nsec3)
|
return toBase32(nsec3)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Denialer is an interface that should be implemented by types that are used to denial
|
||||||
|
// answers in DNSSEC.
|
||||||
type Denialer interface {
|
type Denialer interface {
|
||||||
// Cover will check if the (unhashed) name is being covered by this NSEC or NSEC3.
|
// Cover will check if the (unhashed) name is being covered by this NSEC or NSEC3.
|
||||||
Cover(name string) bool
|
Cover(name string) bool
|
||||||
|
|
12
Godeps/_workspace/src/github.com/miekg/dns/nsecx_test.go
generated
vendored
12
Godeps/_workspace/src/github.com/miekg/dns/nsecx_test.go
generated
vendored
|
@ -7,14 +7,12 @@ import (
|
||||||
func TestPackNsec3(t *testing.T) {
|
func TestPackNsec3(t *testing.T) {
|
||||||
nsec3 := HashName("dnsex.nl.", SHA1, 0, "DEAD")
|
nsec3 := HashName("dnsex.nl.", SHA1, 0, "DEAD")
|
||||||
if nsec3 != "ROCCJAE8BJJU7HN6T7NG3TNM8ACRS87J" {
|
if nsec3 != "ROCCJAE8BJJU7HN6T7NG3TNM8ACRS87J" {
|
||||||
t.Logf("%v\n", nsec3)
|
t.Error(nsec3)
|
||||||
t.Fail()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
nsec3 = HashName("a.b.c.example.org.", SHA1, 2, "DEAD")
|
nsec3 = HashName("a.b.c.example.org.", SHA1, 2, "DEAD")
|
||||||
if nsec3 != "6LQ07OAHBTOOEU2R9ANI2AT70K5O0RCG" {
|
if nsec3 != "6LQ07OAHBTOOEU2R9ANI2AT70K5O0RCG" {
|
||||||
t.Logf("%v\n", nsec3)
|
t.Error(nsec3)
|
||||||
t.Fail()
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -22,12 +20,10 @@ func TestNsec3(t *testing.T) {
|
||||||
// examples taken from .nl
|
// examples taken from .nl
|
||||||
nsec3, _ := NewRR("39p91242oslggest5e6a7cci4iaeqvnk.nl. IN NSEC3 1 1 5 F10E9F7EA83FC8F3 39P99DCGG0MDLARTCRMCF6OFLLUL7PR6 NS DS RRSIG")
|
nsec3, _ := NewRR("39p91242oslggest5e6a7cci4iaeqvnk.nl. IN NSEC3 1 1 5 F10E9F7EA83FC8F3 39P99DCGG0MDLARTCRMCF6OFLLUL7PR6 NS DS RRSIG")
|
||||||
if !nsec3.(*NSEC3).Cover("snasajsksasasa.nl.") { // 39p94jrinub66hnpem8qdpstrec86pg3
|
if !nsec3.(*NSEC3).Cover("snasajsksasasa.nl.") { // 39p94jrinub66hnpem8qdpstrec86pg3
|
||||||
t.Logf("39p94jrinub66hnpem8qdpstrec86pg3. should be covered by 39p91242oslggest5e6a7cci4iaeqvnk.nl. - 39P99DCGG0MDLARTCRMCF6OFLLUL7PR6")
|
t.Error("39p94jrinub66hnpem8qdpstrec86pg3. should be covered by 39p91242oslggest5e6a7cci4iaeqvnk.nl. - 39P99DCGG0MDLARTCRMCF6OFLLUL7PR6")
|
||||||
t.Fail()
|
|
||||||
}
|
}
|
||||||
nsec3, _ = NewRR("sk4e8fj94u78smusb40o1n0oltbblu2r.nl. IN NSEC3 1 1 5 F10E9F7EA83FC8F3 SK4F38CQ0ATIEI8MH3RGD0P5I4II6QAN NS SOA TXT RRSIG DNSKEY NSEC3PARAM")
|
nsec3, _ = NewRR("sk4e8fj94u78smusb40o1n0oltbblu2r.nl. IN NSEC3 1 1 5 F10E9F7EA83FC8F3 SK4F38CQ0ATIEI8MH3RGD0P5I4II6QAN NS SOA TXT RRSIG DNSKEY NSEC3PARAM")
|
||||||
if !nsec3.(*NSEC3).Match("nl.") { // sk4e8fj94u78smusb40o1n0oltbblu2r.nl.
|
if !nsec3.(*NSEC3).Match("nl.") { // sk4e8fj94u78smusb40o1n0oltbblu2r.nl.
|
||||||
t.Logf("sk4e8fj94u78smusb40o1n0oltbblu2r.nl. should match sk4e8fj94u78smusb40o1n0oltbblu2r.nl.")
|
t.Error("sk4e8fj94u78smusb40o1n0oltbblu2r.nl. should match sk4e8fj94u78smusb40o1n0oltbblu2r.nl.")
|
||||||
t.Fail()
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
415
Godeps/_workspace/src/github.com/miekg/dns/parse_test.go
generated
vendored
415
Godeps/_workspace/src/github.com/miekg/dns/parse_test.go
generated
vendored
|
@ -20,19 +20,16 @@ func TestDotInName(t *testing.T) {
|
||||||
PackDomainName("aa\\.bb.nl.", buf, 0, nil, false)
|
PackDomainName("aa\\.bb.nl.", buf, 0, nil, false)
|
||||||
// index 3 must be a real dot
|
// index 3 must be a real dot
|
||||||
if buf[3] != '.' {
|
if buf[3] != '.' {
|
||||||
t.Log("dot should be a real dot")
|
t.Error("dot should be a real dot")
|
||||||
t.Fail()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if buf[6] != 2 {
|
if buf[6] != 2 {
|
||||||
t.Log("this must have the value 2")
|
t.Error("this must have the value 2")
|
||||||
t.Fail()
|
|
||||||
}
|
}
|
||||||
dom, _, _ := UnpackDomainName(buf, 0)
|
dom, _, _ := UnpackDomainName(buf, 0)
|
||||||
// printing it should yield the backspace again
|
// printing it should yield the backspace again
|
||||||
if dom != "aa\\.bb.nl." {
|
if dom != "aa\\.bb.nl." {
|
||||||
t.Log("dot should have been escaped: " + dom)
|
t.Error("dot should have been escaped: ", dom)
|
||||||
t.Fail()
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -41,7 +38,7 @@ func TestDotLastInLabel(t *testing.T) {
|
||||||
buf := make([]byte, 20)
|
buf := make([]byte, 20)
|
||||||
_, err := PackDomainName(sample, buf, 0, nil, false)
|
_, err := PackDomainName(sample, buf, 0, nil, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("unexpected error packing domain: %s", err)
|
t.Fatalf("unexpected error packing domain: %v", err)
|
||||||
}
|
}
|
||||||
dom, _, _ := UnpackDomainName(buf, 0)
|
dom, _, _ := UnpackDomainName(buf, 0)
|
||||||
if dom != sample {
|
if dom != sample {
|
||||||
|
@ -52,19 +49,17 @@ func TestDotLastInLabel(t *testing.T) {
|
||||||
func TestTooLongDomainName(t *testing.T) {
|
func TestTooLongDomainName(t *testing.T) {
|
||||||
l := "aaabbbcccdddeeefffggghhhiiijjjkkklllmmmnnnooopppqqqrrrsssttt."
|
l := "aaabbbcccdddeeefffggghhhiiijjjkkklllmmmnnnooopppqqqrrrsssttt."
|
||||||
dom := l + l + l + l + l + l + l
|
dom := l + l + l + l + l + l + l
|
||||||
_, e := NewRR(dom + " IN A 127.0.0.1")
|
_, err := NewRR(dom + " IN A 127.0.0.1")
|
||||||
if e == nil {
|
if err == nil {
|
||||||
t.Log("should be too long")
|
t.Error("should be too long")
|
||||||
t.Fail()
|
|
||||||
} else {
|
} else {
|
||||||
t.Logf("error is %s", e.Error())
|
t.Logf("error is %v", err)
|
||||||
}
|
}
|
||||||
_, e = NewRR("..com. IN A 127.0.0.1")
|
_, err = NewRR("..com. IN A 127.0.0.1")
|
||||||
if e == nil {
|
if err == nil {
|
||||||
t.Log("should fail")
|
t.Error("should fail")
|
||||||
t.Fail()
|
|
||||||
} else {
|
} else {
|
||||||
t.Logf("error is %s", e.Error())
|
t.Logf("error is %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -76,19 +71,16 @@ func TestDomainName(t *testing.T) {
|
||||||
|
|
||||||
for _, ts := range tests {
|
for _, ts := range tests {
|
||||||
if _, err := PackDomainName(ts, dbuff, 0, nil, false); err != nil {
|
if _, err := PackDomainName(ts, dbuff, 0, nil, false); err != nil {
|
||||||
t.Log("not a valid domain name")
|
t.Error("not a valid domain name")
|
||||||
t.Fail()
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
n, _, err := UnpackDomainName(dbuff, 0)
|
n, _, err := UnpackDomainName(dbuff, 0)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Log("failed to unpack packed domain name")
|
t.Error("failed to unpack packed domain name")
|
||||||
t.Fail()
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if ts != n {
|
if ts != n {
|
||||||
t.Logf("must be equal: in: %s, out: %s\n", ts, n)
|
t.Errorf("must be equal: in: %s, out: %s", ts, n)
|
||||||
t.Fail()
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -110,26 +102,23 @@ func TestDomainNameAndTXTEscapes(t *testing.T) {
|
||||||
s := rr1.String()
|
s := rr1.String()
|
||||||
rr2, err := NewRR(s)
|
rr2, err := NewRR(s)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Logf("Error parsing unpacked RR's string: %v", err)
|
t.Errorf("Error parsing unpacked RR's string: %v", err)
|
||||||
t.Logf(" Bytes: %v\n", rrbytes)
|
t.Errorf(" Bytes: %v", rrbytes)
|
||||||
t.Logf("String: %v\n", s)
|
t.Errorf("String: %v", s)
|
||||||
t.Fail()
|
|
||||||
}
|
}
|
||||||
repacked := make([]byte, len(rrbytes))
|
repacked := make([]byte, len(rrbytes))
|
||||||
if _, err := PackRR(rr2, repacked, 0, nil, false); err != nil {
|
if _, err := PackRR(rr2, repacked, 0, nil, false); err != nil {
|
||||||
t.Logf("error packing parsed RR: %v", err)
|
t.Errorf("error packing parsed RR: %v", err)
|
||||||
t.Logf(" original Bytes: %v\n", rrbytes)
|
t.Errorf(" original Bytes: %v", rrbytes)
|
||||||
t.Logf("unpacked Struct: %V\n", rr1)
|
t.Errorf("unpacked Struct: %v", rr1)
|
||||||
t.Logf(" parsed Struct: %V\n", rr2)
|
t.Errorf(" parsed Struct: %v", rr2)
|
||||||
t.Fail()
|
|
||||||
}
|
}
|
||||||
if !bytes.Equal(repacked, rrbytes) {
|
if !bytes.Equal(repacked, rrbytes) {
|
||||||
t.Log("packed bytes don't match original bytes")
|
t.Error("packed bytes don't match original bytes")
|
||||||
t.Logf(" original bytes: %v", rrbytes)
|
t.Errorf(" original bytes: %v", rrbytes)
|
||||||
t.Logf(" packed bytes: %v", repacked)
|
t.Errorf(" packed bytes: %v", repacked)
|
||||||
t.Logf("unpacked struct: %V", rr1)
|
t.Errorf("unpacked struct: %v", rr1)
|
||||||
t.Logf(" parsed struct: %V", rr2)
|
t.Errorf(" parsed struct: %v", rr2)
|
||||||
t.Fail()
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -206,16 +195,16 @@ func TestDomainQuick(t *testing.T) {
|
||||||
buf := make([]byte, 255)
|
buf := make([]byte, 255)
|
||||||
off, err := PackDomainName(ds, buf, 0, nil, false)
|
off, err := PackDomainName(ds, buf, 0, nil, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Logf("error packing domain: %s", err.Error())
|
t.Errorf("error packing domain: %v", err)
|
||||||
t.Logf(" bytes: %v\n", db)
|
t.Errorf(" bytes: %v", db)
|
||||||
t.Logf("string: %v\n", ds)
|
t.Errorf("string: %v", ds)
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
if !bytes.Equal(db, buf[:off]) {
|
if !bytes.Equal(db, buf[:off]) {
|
||||||
t.Logf("repacked domain doesn't match original:")
|
t.Errorf("repacked domain doesn't match original:")
|
||||||
t.Logf("src bytes: %v", db)
|
t.Errorf("src bytes: %v", db)
|
||||||
t.Logf(" string: %v", ds)
|
t.Errorf(" string: %v", ds)
|
||||||
t.Logf("out bytes: %v", buf[:off])
|
t.Errorf("out bytes: %v", buf[:off])
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
return true
|
return true
|
||||||
|
@ -248,7 +237,13 @@ func GenerateTXT(r *rand.Rand, size int) []byte {
|
||||||
return rd
|
return rd
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestTXTRRQuick(t *testing.T) {
|
// Ok, 2 things. 1) this test breaks with the new functionality of splitting up larger txt
|
||||||
|
// chunks into 255 byte pieces. 2) I don't like the random nature of this thing, because I can't
|
||||||
|
// place the quotes where they need to be.
|
||||||
|
// So either add some code the places the quotes in just the right spots, make this non random
|
||||||
|
// or do something else.
|
||||||
|
// Disabled for now. (miek)
|
||||||
|
func testTXTRRQuick(t *testing.T) {
|
||||||
s := rand.NewSource(0)
|
s := rand.NewSource(0)
|
||||||
r := rand.New(s)
|
r := rand.New(s)
|
||||||
typeAndClass := []byte{
|
typeAndClass := []byte{
|
||||||
|
@ -272,15 +267,15 @@ func TestTXTRRQuick(t *testing.T) {
|
||||||
buf := make([]byte, len(rrbytes)*3)
|
buf := make([]byte, len(rrbytes)*3)
|
||||||
off, err := PackRR(rr, buf, 0, nil, false)
|
off, err := PackRR(rr, buf, 0, nil, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Logf("pack Error: %s\nRR: %V", err.Error(), rr)
|
t.Errorf("pack Error: %v\nRR: %v", err, rr)
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
buf = buf[:off]
|
buf = buf[:off]
|
||||||
if !bytes.Equal(buf, rrbytes) {
|
if !bytes.Equal(buf, rrbytes) {
|
||||||
t.Logf("packed bytes don't match original bytes")
|
t.Errorf("packed bytes don't match original bytes")
|
||||||
t.Logf("src bytes: %v", rrbytes)
|
t.Errorf("src bytes: %v", rrbytes)
|
||||||
t.Logf(" struct: %V", rr)
|
t.Errorf(" struct: %v", rr)
|
||||||
t.Logf("oUt bytes: %v", buf)
|
t.Errorf("out bytes: %v", buf)
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
if len(rdata) == 0 {
|
if len(rdata) == 0 {
|
||||||
|
@ -290,35 +285,35 @@ func TestTXTRRQuick(t *testing.T) {
|
||||||
rrString := rr.String()
|
rrString := rr.String()
|
||||||
rr2, err := NewRR(rrString)
|
rr2, err := NewRR(rrString)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Logf("error parsing own output: %s", err.Error())
|
t.Errorf("error parsing own output: %v", err)
|
||||||
t.Logf("struct: %V", rr)
|
t.Errorf("struct: %v", rr)
|
||||||
t.Logf("string: %v", rrString)
|
t.Errorf("string: %v", rrString)
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
if rr2.String() != rrString {
|
if rr2.String() != rrString {
|
||||||
t.Logf("parsed rr.String() doesn't match original string")
|
t.Errorf("parsed rr.String() doesn't match original string")
|
||||||
t.Logf("original: %v", rrString)
|
t.Errorf("original: %v", rrString)
|
||||||
t.Logf(" parsed: %v", rr2.String())
|
t.Errorf(" parsed: %v", rr2.String())
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
buf = make([]byte, len(rrbytes)*3)
|
buf = make([]byte, len(rrbytes)*3)
|
||||||
off, err = PackRR(rr2, buf, 0, nil, false)
|
off, err = PackRR(rr2, buf, 0, nil, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Logf("error packing parsed rr: %s", err.Error())
|
t.Errorf("error packing parsed rr: %v", err)
|
||||||
t.Logf("unpacked Struct: %V", rr)
|
t.Errorf("unpacked Struct: %v", rr)
|
||||||
t.Logf(" string: %v", rrString)
|
t.Errorf(" string: %v", rrString)
|
||||||
t.Logf(" parsed Struct: %V", rr2)
|
t.Errorf(" parsed Struct: %v", rr2)
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
buf = buf[:off]
|
buf = buf[:off]
|
||||||
if !bytes.Equal(buf, rrbytes) {
|
if !bytes.Equal(buf, rrbytes) {
|
||||||
t.Logf("parsed packed bytes don't match original bytes")
|
t.Errorf("parsed packed bytes don't match original bytes")
|
||||||
t.Logf(" source bytes: %v", rrbytes)
|
t.Errorf(" source bytes: %v", rrbytes)
|
||||||
t.Logf("unpacked struct: %V", rr)
|
t.Errorf("unpacked struct: %v", rr)
|
||||||
t.Logf(" string: %v", rrString)
|
t.Errorf(" string: %v", rrString)
|
||||||
t.Logf(" parsed struct: %V", rr2)
|
t.Errorf(" parsed struct: %v", rr2)
|
||||||
t.Logf(" repacked bytes: %v", buf)
|
t.Errorf(" repacked bytes: %v", buf)
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
return true
|
return true
|
||||||
|
@ -345,15 +340,13 @@ func TestParseDirectiveMisc(t *testing.T) {
|
||||||
"ONE.MY-ROOTS.NET. 3600000 IN A 192.168.1.1": "ONE.MY-ROOTS.NET.\t3600000\tIN\tA\t192.168.1.1",
|
"ONE.MY-ROOTS.NET. 3600000 IN A 192.168.1.1": "ONE.MY-ROOTS.NET.\t3600000\tIN\tA\t192.168.1.1",
|
||||||
}
|
}
|
||||||
for i, o := range tests {
|
for i, o := range tests {
|
||||||
rr, e := NewRR(i)
|
rr, err := NewRR(i)
|
||||||
if e != nil {
|
if err != nil {
|
||||||
t.Log("failed to parse RR: " + e.Error())
|
t.Error("failed to parse RR: ", err)
|
||||||
t.Fail()
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if rr.String() != o {
|
if rr.String() != o {
|
||||||
t.Logf("`%s' should be equal to\n`%s', but is `%s'\n", i, o, rr.String())
|
t.Errorf("`%s' should be equal to\n`%s', but is `%s'", i, o, rr.String())
|
||||||
t.Fail()
|
|
||||||
} else {
|
} else {
|
||||||
t.Logf("RR is OK: `%s'", rr.String())
|
t.Logf("RR is OK: `%s'", rr.String())
|
||||||
}
|
}
|
||||||
|
@ -369,15 +362,13 @@ func TestNSEC(t *testing.T) {
|
||||||
"localhost.dnssex.nl. IN NSEC www.dnssex.nl. A RRSIG NSec Type65534": "localhost.dnssex.nl.\t3600\tIN\tNSEC\twww.dnssex.nl. A RRSIG NSEC TYPE65534",
|
"localhost.dnssex.nl. IN NSEC www.dnssex.nl. A RRSIG NSec Type65534": "localhost.dnssex.nl.\t3600\tIN\tNSEC\twww.dnssex.nl. A RRSIG NSEC TYPE65534",
|
||||||
}
|
}
|
||||||
for i, o := range nsectests {
|
for i, o := range nsectests {
|
||||||
rr, e := NewRR(i)
|
rr, err := NewRR(i)
|
||||||
if e != nil {
|
if err != nil {
|
||||||
t.Log("failed to parse RR: " + e.Error())
|
t.Error("failed to parse RR: ", err)
|
||||||
t.Fail()
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if rr.String() != o {
|
if rr.String() != o {
|
||||||
t.Logf("`%s' should be equal to\n`%s', but is `%s'\n", i, o, rr.String())
|
t.Errorf("`%s' should be equal to\n`%s', but is `%s'", i, o, rr.String())
|
||||||
t.Fail()
|
|
||||||
} else {
|
} else {
|
||||||
t.Logf("RR is OK: `%s'", rr.String())
|
t.Logf("RR is OK: `%s'", rr.String())
|
||||||
}
|
}
|
||||||
|
@ -390,15 +381,13 @@ func TestParseLOC(t *testing.T) {
|
||||||
"SW1A2AA.find.me.uk. LOC 51 0 0.0 N 00 07 39.611 W 0.00m 0.00m 0.00m 0.00m": "SW1A2AA.find.me.uk.\t3600\tIN\tLOC\t51 00 0.000 N 00 07 39.611 W 0m 0.00m 0.00m 0.00m",
|
"SW1A2AA.find.me.uk. LOC 51 0 0.0 N 00 07 39.611 W 0.00m 0.00m 0.00m 0.00m": "SW1A2AA.find.me.uk.\t3600\tIN\tLOC\t51 00 0.000 N 00 07 39.611 W 0m 0.00m 0.00m 0.00m",
|
||||||
}
|
}
|
||||||
for i, o := range lt {
|
for i, o := range lt {
|
||||||
rr, e := NewRR(i)
|
rr, err := NewRR(i)
|
||||||
if e != nil {
|
if err != nil {
|
||||||
t.Log("failed to parse RR: " + e.Error())
|
t.Error("failed to parse RR: ", err)
|
||||||
t.Fail()
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if rr.String() != o {
|
if rr.String() != o {
|
||||||
t.Logf("`%s' should be equal to\n`%s', but is `%s'\n", i, o, rr.String())
|
t.Errorf("`%s' should be equal to\n`%s', but is `%s'", i, o, rr.String())
|
||||||
t.Fail()
|
|
||||||
} else {
|
} else {
|
||||||
t.Logf("RR is OK: `%s'", rr.String())
|
t.Logf("RR is OK: `%s'", rr.String())
|
||||||
}
|
}
|
||||||
|
@ -410,15 +399,13 @@ func TestParseDS(t *testing.T) {
|
||||||
"example.net. 3600 IN DS 40692 12 3 22261A8B0E0D799183E35E24E2AD6BB58533CBA7E3B14D659E9CA09B 2071398F": "example.net.\t3600\tIN\tDS\t40692 12 3 22261A8B0E0D799183E35E24E2AD6BB58533CBA7E3B14D659E9CA09B2071398F",
|
"example.net. 3600 IN DS 40692 12 3 22261A8B0E0D799183E35E24E2AD6BB58533CBA7E3B14D659E9CA09B 2071398F": "example.net.\t3600\tIN\tDS\t40692 12 3 22261A8B0E0D799183E35E24E2AD6BB58533CBA7E3B14D659E9CA09B2071398F",
|
||||||
}
|
}
|
||||||
for i, o := range dt {
|
for i, o := range dt {
|
||||||
rr, e := NewRR(i)
|
rr, err := NewRR(i)
|
||||||
if e != nil {
|
if err != nil {
|
||||||
t.Log("failed to parse RR: " + e.Error())
|
t.Error("failed to parse RR: ", err)
|
||||||
t.Fail()
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if rr.String() != o {
|
if rr.String() != o {
|
||||||
t.Logf("`%s' should be equal to\n`%s', but is `%s'\n", i, o, rr.String())
|
t.Errorf("`%s' should be equal to\n`%s', but is `%s'", i, o, rr.String())
|
||||||
t.Fail()
|
|
||||||
} else {
|
} else {
|
||||||
t.Logf("RR is OK: `%s'", rr.String())
|
t.Logf("RR is OK: `%s'", rr.String())
|
||||||
}
|
}
|
||||||
|
@ -446,15 +433,13 @@ func TestQuotes(t *testing.T) {
|
||||||
"cid.urn.arpa. NAPTR 100 10 \"\" \"\" \"/urn:cid:.+@([^\\.]+\\.)(.*)$/\\2/i\" .": "cid.urn.arpa.\t3600\tIN\tNAPTR\t100 10 \"\" \"\" \"/urn:cid:.+@([^\\.]+\\.)(.*)$/\\2/i\" .",
|
"cid.urn.arpa. NAPTR 100 10 \"\" \"\" \"/urn:cid:.+@([^\\.]+\\.)(.*)$/\\2/i\" .": "cid.urn.arpa.\t3600\tIN\tNAPTR\t100 10 \"\" \"\" \"/urn:cid:.+@([^\\.]+\\.)(.*)$/\\2/i\" .",
|
||||||
}
|
}
|
||||||
for i, o := range tests {
|
for i, o := range tests {
|
||||||
rr, e := NewRR(i)
|
rr, err := NewRR(i)
|
||||||
if e != nil {
|
if err != nil {
|
||||||
t.Log("failed to parse RR: " + e.Error())
|
t.Error("failed to parse RR: ", err)
|
||||||
t.Fail()
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if rr.String() != o {
|
if rr.String() != o {
|
||||||
t.Logf("`%s' should be equal to\n`%s', but is\n`%s'\n", i, o, rr.String())
|
t.Errorf("`%s' should be equal to\n`%s', but is\n`%s'", i, o, rr.String())
|
||||||
t.Fail()
|
|
||||||
} else {
|
} else {
|
||||||
t.Logf("RR is OK: `%s'", rr.String())
|
t.Logf("RR is OK: `%s'", rr.String())
|
||||||
}
|
}
|
||||||
|
@ -471,15 +456,13 @@ func TestParseClass(t *testing.T) {
|
||||||
"t.example.com. NONE A 127.0.0.1": "t.example.com. 3600 NONE A 127.0.0.1",
|
"t.example.com. NONE A 127.0.0.1": "t.example.com. 3600 NONE A 127.0.0.1",
|
||||||
}
|
}
|
||||||
for i, o := range tests {
|
for i, o := range tests {
|
||||||
rr, e := NewRR(i)
|
rr, err := NewRR(i)
|
||||||
if e != nil {
|
if err != nil {
|
||||||
t.Log("failed to parse RR: " + e.Error())
|
t.Error("failed to parse RR: ", err)
|
||||||
t.Fail()
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if rr.String() != o {
|
if rr.String() != o {
|
||||||
t.Logf("`%s' should be equal to\n`%s', but is\n`%s'\n", i, o, rr.String())
|
t.Errorf("`%s' should be equal to\n`%s', but is\n`%s'", i, o, rr.String())
|
||||||
t.Fail()
|
|
||||||
} else {
|
} else {
|
||||||
t.Logf("RR is OK: `%s'", rr.String())
|
t.Logf("RR is OK: `%s'", rr.String())
|
||||||
}
|
}
|
||||||
|
@ -524,15 +507,13 @@ func TestBrace(t *testing.T) {
|
||||||
)`: "miek.nl.\t86400\tIN\tSOA\telektron.atoom.net. miekg.atoom.net. 2009032802 21600 7200 604800 3600",
|
)`: "miek.nl.\t86400\tIN\tSOA\telektron.atoom.net. miekg.atoom.net. 2009032802 21600 7200 604800 3600",
|
||||||
}
|
}
|
||||||
for i, o := range tests {
|
for i, o := range tests {
|
||||||
rr, e := NewRR(i)
|
rr, err := NewRR(i)
|
||||||
if e != nil {
|
if err != nil {
|
||||||
t.Log("failed to parse RR: " + e.Error() + "\n\t" + i)
|
t.Errorf("failed to parse RR: %v\n\t%s", err, i)
|
||||||
t.Fail()
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if rr.String() != o {
|
if rr.String() != o {
|
||||||
t.Logf("`%s' should be equal to\n`%s', but is `%s'\n", i, o, rr.String())
|
t.Errorf("`%s' should be equal to\n`%s', but is `%s'", i, o, rr.String())
|
||||||
t.Fail()
|
|
||||||
} else {
|
} else {
|
||||||
t.Logf("RR is OK: `%s'", rr.String())
|
t.Logf("RR is OK: `%s'", rr.String())
|
||||||
}
|
}
|
||||||
|
@ -555,8 +536,7 @@ func TestParseFailure(t *testing.T) {
|
||||||
for _, s := range tests {
|
for _, s := range tests {
|
||||||
_, err := NewRR(s)
|
_, err := NewRR(s)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
t.Logf("should have triggered an error: \"%s\"", s)
|
t.Errorf("should have triggered an error: \"%s\"", s)
|
||||||
t.Fail()
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -577,11 +557,10 @@ test IN CNAME test.a.example.com.
|
||||||
for x := range to {
|
for x := range to {
|
||||||
i++
|
i++
|
||||||
if x.Error != nil {
|
if x.Error != nil {
|
||||||
t.Logf("%s\n", x.Error)
|
t.Error(x.Error)
|
||||||
t.Fail()
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
t.Logf("%s\n", x.RR)
|
t.Log(x.RR)
|
||||||
}
|
}
|
||||||
delta := time.Now().UnixNano() - start
|
delta := time.Now().UnixNano() - start
|
||||||
t.Logf("%d RRs parsed in %.2f s (%.2f RR/s)", i, float32(delta)/1e9, float32(i)/(float32(delta)/1e9))
|
t.Logf("%d RRs parsed in %.2f s (%.2f RR/s)", i, float32(delta)/1e9, float32(i)/(float32(delta)/1e9))
|
||||||
|
@ -625,7 +604,7 @@ moutamassey NS ns01.yahoodomains.jp.
|
||||||
`
|
`
|
||||||
to := ParseZone(strings.NewReader(zone), "", "testzone")
|
to := ParseZone(strings.NewReader(zone), "", "testzone")
|
||||||
for x := range to {
|
for x := range to {
|
||||||
fmt.Printf("%s\n", x.RR)
|
fmt.Println(x.RR)
|
||||||
}
|
}
|
||||||
// Output:
|
// Output:
|
||||||
// name. 3600 IN SOA a6.nstld.com. hostmaster.nic.name. 203362132 300 300 1209600 300
|
// name. 3600 IN SOA a6.nstld.com. hostmaster.nic.name. 203362132 300 300 1209600 300
|
||||||
|
@ -658,7 +637,7 @@ func ExampleHIP() {
|
||||||
b1slImA8YVJyuIDsj7kwzG7jnERNqnWxZ48AWkskmdHaVDP4BcelrTI3rMXdXF5D
|
b1slImA8YVJyuIDsj7kwzG7jnERNqnWxZ48AWkskmdHaVDP4BcelrTI3rMXdXF5D
|
||||||
rvs.example.com. )`
|
rvs.example.com. )`
|
||||||
if hip, err := NewRR(h); err == nil {
|
if hip, err := NewRR(h); err == nil {
|
||||||
fmt.Printf("%s\n", hip.String())
|
fmt.Println(hip.String())
|
||||||
}
|
}
|
||||||
// Output:
|
// Output:
|
||||||
// www.example.com. 3600 IN HIP 2 200100107B1A74DF365639CC39F1D578 AwEAAbdxyhNuSutc5EMzxTs9LBPCIkOFH8cIvM4p9+LrV4e19WzK00+CI6zBCQTdtWsuxKbWIy87UOoJTwkUs7lBu+Upr1gsNrut79ryra+bSRGQb1slImA8YVJyuIDsj7kwzG7jnERNqnWxZ48AWkskmdHaVDP4BcelrTI3rMXdXF5D rvs.example.com.
|
// www.example.com. 3600 IN HIP 2 200100107B1A74DF365639CC39F1D578 AwEAAbdxyhNuSutc5EMzxTs9LBPCIkOFH8cIvM4p9+LrV4e19WzK00+CI6zBCQTdtWsuxKbWIy87UOoJTwkUs7lBu+Upr1gsNrut79ryra+bSRGQb1slImA8YVJyuIDsj7kwzG7jnERNqnWxZ48AWkskmdHaVDP4BcelrTI3rMXdXF5D rvs.example.com.
|
||||||
|
@ -673,30 +652,30 @@ b1slImA8YVJyuIDsj7kwzG7jnERNqnWxZ48AWkskmdHaVDP4BcelrTI3rMXdXF5D
|
||||||
rvs2.example.com. )`
|
rvs2.example.com. )`
|
||||||
rr, err := NewRR(h)
|
rr, err := NewRR(h)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("failed to parse RR: %s", err)
|
t.Fatalf("failed to parse RR: %v", err)
|
||||||
}
|
}
|
||||||
t.Logf("RR: %s", rr)
|
t.Logf("RR: %s", rr)
|
||||||
msg := new(Msg)
|
msg := new(Msg)
|
||||||
msg.Answer = []RR{rr, rr}
|
msg.Answer = []RR{rr, rr}
|
||||||
bytes, err := msg.Pack()
|
bytes, err := msg.Pack()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("failed to pack msg: %s", err)
|
t.Fatalf("failed to pack msg: %v", err)
|
||||||
}
|
}
|
||||||
if err := msg.Unpack(bytes); err != nil {
|
if err := msg.Unpack(bytes); err != nil {
|
||||||
t.Fatalf("failed to unpack msg: %s", err)
|
t.Fatalf("failed to unpack msg: %v", err)
|
||||||
}
|
}
|
||||||
if len(msg.Answer) != 2 {
|
if len(msg.Answer) != 2 {
|
||||||
t.Fatalf("2 answers expected: %V", msg)
|
t.Fatalf("2 answers expected: %v", msg)
|
||||||
}
|
}
|
||||||
for i, rr := range msg.Answer {
|
for i, rr := range msg.Answer {
|
||||||
rr := rr.(*HIP)
|
rr := rr.(*HIP)
|
||||||
t.Logf("RR: %s", rr)
|
t.Logf("RR: %s", rr)
|
||||||
if l := len(rr.RendezvousServers); l != 2 {
|
if l := len(rr.RendezvousServers); l != 2 {
|
||||||
t.Fatalf("2 servers expected, only %d in record %d:\n%V", l, i, msg)
|
t.Fatalf("2 servers expected, only %d in record %d:\n%v", l, i, msg)
|
||||||
}
|
}
|
||||||
for j, s := range []string{"rvs1.example.com.", "rvs2.example.com."} {
|
for j, s := range []string{"rvs1.example.com.", "rvs2.example.com."} {
|
||||||
if rr.RendezvousServers[j] != s {
|
if rr.RendezvousServers[j] != s {
|
||||||
t.Fatalf("expected server %d of record %d to be %s:\n%V", j, i, s, msg)
|
t.Fatalf("expected server %d of record %d to be %s:\n%v", j, i, s, msg)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -705,7 +684,7 @@ b1slImA8YVJyuIDsj7kwzG7jnERNqnWxZ48AWkskmdHaVDP4BcelrTI3rMXdXF5D
|
||||||
func ExampleSOA() {
|
func ExampleSOA() {
|
||||||
s := "example.com. 1000 SOA master.example.com. admin.example.com. 1 4294967294 4294967293 4294967295 100"
|
s := "example.com. 1000 SOA master.example.com. admin.example.com. 1 4294967294 4294967293 4294967295 100"
|
||||||
if soa, err := NewRR(s); err == nil {
|
if soa, err := NewRR(s); err == nil {
|
||||||
fmt.Printf("%s\n", soa.String())
|
fmt.Println(soa.String())
|
||||||
}
|
}
|
||||||
// Output:
|
// Output:
|
||||||
// example.com. 1000 IN SOA master.example.com. admin.example.com. 1 4294967294 4294967293 4294967295 100
|
// example.com. 1000 IN SOA master.example.com. admin.example.com. 1 4294967294 4294967293 4294967295 100
|
||||||
|
@ -715,8 +694,7 @@ func TestLineNumberError(t *testing.T) {
|
||||||
s := "example.com. 1000 SOA master.example.com. admin.example.com. monkey 4294967294 4294967293 4294967295 100"
|
s := "example.com. 1000 SOA master.example.com. admin.example.com. monkey 4294967294 4294967293 4294967295 100"
|
||||||
if _, err := NewRR(s); err != nil {
|
if _, err := NewRR(s); err != nil {
|
||||||
if err.Error() != "dns: bad SOA zone parameter: \"monkey\" at line: 1:68" {
|
if err.Error() != "dns: bad SOA zone parameter: \"monkey\" at line: 1:68" {
|
||||||
t.Logf("not expecting this error: " + err.Error())
|
t.Error("not expecting this error: ", err)
|
||||||
t.Fail()
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -733,15 +711,13 @@ func TestLineNumberError2(t *testing.T) {
|
||||||
`example.com 1000 IN TALINK ( a.example.com. b...example.com.
|
`example.com 1000 IN TALINK ( a.example.com. b...example.com.
|
||||||
)`: "dns: bad TALINK NextName: \"b...example.com.\" at line: 2:1"}
|
)`: "dns: bad TALINK NextName: \"b...example.com.\" at line: 2:1"}
|
||||||
|
|
||||||
for in, err := range tests {
|
for in, errStr := range tests {
|
||||||
_, e := NewRR(in)
|
_, err := NewRR(in)
|
||||||
if e == nil {
|
if err == nil {
|
||||||
t.Fail()
|
t.Error("err is nil")
|
||||||
} else {
|
} else {
|
||||||
if e.Error() != err {
|
if err.Error() != errStr {
|
||||||
t.Logf("%s\n", in)
|
t.Errorf("%s: error should be %s is %v", in, errStr, err)
|
||||||
t.Logf("error should be %s is %s\n", err, e.Error())
|
|
||||||
t.Fail()
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -756,8 +732,7 @@ func TestRfc1982(t *testing.T) {
|
||||||
strtests := []string{"20120525134203", "19700101000000", "20380119031408"}
|
strtests := []string{"20120525134203", "19700101000000", "20380119031408"}
|
||||||
for _, v := range strtests {
|
for _, v := range strtests {
|
||||||
if x, _ := StringToTime(v); v != TimeToString(x) {
|
if x, _ := StringToTime(v); v != TimeToString(x) {
|
||||||
t.Logf("1982 arithmetic string failure %s (%s:%d)", v, TimeToString(x), x)
|
t.Errorf("1982 arithmetic string failure %s (%s:%d)", v, TimeToString(x), x)
|
||||||
t.Fail()
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -767,8 +742,7 @@ func TestRfc1982(t *testing.T) {
|
||||||
}
|
}
|
||||||
for i, v := range inttests {
|
for i, v := range inttests {
|
||||||
if TimeToString(i) != v {
|
if TimeToString(i) != v {
|
||||||
t.Logf("1982 arithmetic int failure %d:%s (%s)", i, v, TimeToString(i))
|
t.Errorf("1982 arithmetic int failure %d:%s (%s)", i, v, TimeToString(i))
|
||||||
t.Fail()
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -785,16 +759,14 @@ func TestRfc1982(t *testing.T) {
|
||||||
x, _ := StringToTime(from)
|
x, _ := StringToTime(from)
|
||||||
y := TimeToString(x)
|
y := TimeToString(x)
|
||||||
if y != to {
|
if y != to {
|
||||||
t.Logf("1982 arithmetic future failure %s:%s (%s)", from, to, y)
|
t.Errorf("1982 arithmetic future failure %s:%s (%s)", from, to, y)
|
||||||
t.Fail()
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestEmpty(t *testing.T) {
|
func TestEmpty(t *testing.T) {
|
||||||
for _ = range ParseZone(strings.NewReader(""), "", "") {
|
for _ = range ParseZone(strings.NewReader(""), "", "") {
|
||||||
t.Logf("should be empty")
|
t.Errorf("should be empty")
|
||||||
t.Fail()
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -819,7 +791,7 @@ func TestLowercaseTokens(t *testing.T) {
|
||||||
for _, testrr := range testrecords {
|
for _, testrr := range testrecords {
|
||||||
_, err := NewRR(testrr)
|
_, err := NewRR(testrr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("failed to parse %#v, got %s", testrr, err.Error())
|
t.Errorf("failed to parse %#v, got %v", testrr, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -830,7 +802,7 @@ func ExampleGenerate() {
|
||||||
to := ParseZone(strings.NewReader(zone), "0.0.192.IN-ADDR.ARPA.", "")
|
to := ParseZone(strings.NewReader(zone), "0.0.192.IN-ADDR.ARPA.", "")
|
||||||
for x := range to {
|
for x := range to {
|
||||||
if x.Error == nil {
|
if x.Error == nil {
|
||||||
fmt.Printf("%s\n", x.RR.String())
|
fmt.Println(x.RR.String())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Output:
|
// Output:
|
||||||
|
@ -881,25 +853,25 @@ func TestSRVPacking(t *testing.T) {
|
||||||
|
|
||||||
_, err := msg.Pack()
|
_, err := msg.Pack()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("couldn't pack %v\n", msg)
|
t.Fatalf("couldn't pack %v: %v", msg, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestParseBackslash(t *testing.T) {
|
func TestParseBackslash(t *testing.T) {
|
||||||
if r, e := NewRR("nul\\000gap.test.globnix.net. 600 IN A 192.0.2.10"); e != nil {
|
if r, err := NewRR("nul\\000gap.test.globnix.net. 600 IN A 192.0.2.10"); err != nil {
|
||||||
t.Fatalf("could not create RR with \\000 in it")
|
t.Errorf("could not create RR with \\000 in it")
|
||||||
} else {
|
} else {
|
||||||
t.Logf("parsed %s\n", r.String())
|
t.Logf("parsed %s", r.String())
|
||||||
}
|
}
|
||||||
if r, e := NewRR(`nul\000gap.test.globnix.net. 600 IN TXT "Hello\123"`); e != nil {
|
if r, err := NewRR(`nul\000gap.test.globnix.net. 600 IN TXT "Hello\123"`); err != nil {
|
||||||
t.Fatalf("could not create RR with \\000 in it")
|
t.Errorf("could not create RR with \\000 in it")
|
||||||
} else {
|
} else {
|
||||||
t.Logf("parsed %s\n", r.String())
|
t.Logf("parsed %s", r.String())
|
||||||
}
|
}
|
||||||
if r, e := NewRR(`m\ @\ iek.nl. IN 3600 A 127.0.0.1`); e != nil {
|
if r, err := NewRR(`m\ @\ iek.nl. IN 3600 A 127.0.0.1`); err != nil {
|
||||||
t.Fatalf("could not create RR with \\ and \\@ in it")
|
t.Errorf("could not create RR with \\ and \\@ in it")
|
||||||
} else {
|
} else {
|
||||||
t.Logf("parsed %s\n", r.String())
|
t.Logf("parsed %s", r.String())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -919,9 +891,9 @@ func TestILNP(t *testing.T) {
|
||||||
"host1.example.com.\t3600\tIN\tLP\t20 l32-subnet1.example.com.",
|
"host1.example.com.\t3600\tIN\tLP\t20 l32-subnet1.example.com.",
|
||||||
}
|
}
|
||||||
for _, t1 := range tests {
|
for _, t1 := range tests {
|
||||||
r, e := NewRR(t1)
|
r, err := NewRR(t1)
|
||||||
if e != nil {
|
if err != nil {
|
||||||
t.Fatalf("an error occured: %s\n", e.Error())
|
t.Fatalf("an error occurred: %v", err)
|
||||||
} else {
|
} else {
|
||||||
if t1 != r.String() {
|
if t1 != r.String() {
|
||||||
t.Fatalf("strings should be equal %s %s", t1, r.String())
|
t.Fatalf("strings should be equal %s %s", t1, r.String())
|
||||||
|
@ -941,15 +913,13 @@ func TestNsapGposEidNimloc(t *testing.T) {
|
||||||
"VAXA. IN EID 3141592653589793": "VAXA.\t3600\tIN\tEID\t3141592653589793",
|
"VAXA. IN EID 3141592653589793": "VAXA.\t3600\tIN\tEID\t3141592653589793",
|
||||||
}
|
}
|
||||||
for i, o := range dt {
|
for i, o := range dt {
|
||||||
rr, e := NewRR(i)
|
rr, err := NewRR(i)
|
||||||
if e != nil {
|
if err != nil {
|
||||||
t.Log("failed to parse RR: " + e.Error())
|
t.Error("failed to parse RR: ", err)
|
||||||
t.Fail()
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if rr.String() != o {
|
if rr.String() != o {
|
||||||
t.Logf("`%s' should be equal to\n`%s', but is `%s'\n", i, o, rr.String())
|
t.Errorf("`%s' should be equal to\n`%s', but is `%s'", i, o, rr.String())
|
||||||
t.Fail()
|
|
||||||
} else {
|
} else {
|
||||||
t.Logf("RR is OK: `%s'", rr.String())
|
t.Logf("RR is OK: `%s'", rr.String())
|
||||||
}
|
}
|
||||||
|
@ -962,15 +932,13 @@ func TestPX(t *testing.T) {
|
||||||
"ab.net2.it. IN PX 10 ab.net2.it. O-ab.PRMD-net2.ADMDb.C-it.": "ab.net2.it.\t3600\tIN\tPX\t10 ab.net2.it. O-ab.PRMD-net2.ADMDb.C-it.",
|
"ab.net2.it. IN PX 10 ab.net2.it. O-ab.PRMD-net2.ADMDb.C-it.": "ab.net2.it.\t3600\tIN\tPX\t10 ab.net2.it. O-ab.PRMD-net2.ADMDb.C-it.",
|
||||||
}
|
}
|
||||||
for i, o := range dt {
|
for i, o := range dt {
|
||||||
rr, e := NewRR(i)
|
rr, err := NewRR(i)
|
||||||
if e != nil {
|
if err != nil {
|
||||||
t.Log("failed to parse RR: " + e.Error())
|
t.Error("failed to parse RR: ", err)
|
||||||
t.Fail()
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if rr.String() != o {
|
if rr.String() != o {
|
||||||
t.Logf("`%s' should be equal to\n`%s', but is `%s'\n", i, o, rr.String())
|
t.Errorf("`%s' should be equal to\n`%s', but is `%s'", i, o, rr.String())
|
||||||
t.Fail()
|
|
||||||
} else {
|
} else {
|
||||||
t.Logf("RR is OK: `%s'", rr.String())
|
t.Logf("RR is OK: `%s'", rr.String())
|
||||||
}
|
}
|
||||||
|
@ -1004,8 +972,7 @@ foo. IN TXT "THIS IS TEXT MAN"; this is comment 8
|
||||||
if x.Error == nil {
|
if x.Error == nil {
|
||||||
if x.Comment != "" {
|
if x.Comment != "" {
|
||||||
if _, ok := comments[x.Comment]; !ok {
|
if _, ok := comments[x.Comment]; !ok {
|
||||||
t.Logf("wrong comment %s", x.Comment)
|
t.Errorf("wrong comment %s", x.Comment)
|
||||||
t.Fail()
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1018,14 +985,12 @@ func TestEUIxx(t *testing.T) {
|
||||||
"host.example. IN EUI64 00-00-5e-ef-00-00-00-2a": "host.example.\t3600\tIN\tEUI64\t00-00-5e-ef-00-00-00-2a",
|
"host.example. IN EUI64 00-00-5e-ef-00-00-00-2a": "host.example.\t3600\tIN\tEUI64\t00-00-5e-ef-00-00-00-2a",
|
||||||
}
|
}
|
||||||
for i, o := range tests {
|
for i, o := range tests {
|
||||||
r, e := NewRR(i)
|
r, err := NewRR(i)
|
||||||
if e != nil {
|
if err != nil {
|
||||||
t.Logf("failed to parse %s: %s\n", i, e.Error())
|
t.Errorf("failed to parse %s: %v", i, err)
|
||||||
t.Fail()
|
|
||||||
}
|
}
|
||||||
if r.String() != o {
|
if r.String() != o {
|
||||||
t.Logf("want %s, got %s\n", o, r.String())
|
t.Errorf("want %s, got %s", o, r.String())
|
||||||
t.Fail()
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1037,14 +1002,12 @@ func TestUserRR(t *testing.T) {
|
||||||
"host.example. IN UINFO \"Miek Gieben\"": "host.example.\t3600\tIN\tUINFO\t\"Miek Gieben\"",
|
"host.example. IN UINFO \"Miek Gieben\"": "host.example.\t3600\tIN\tUINFO\t\"Miek Gieben\"",
|
||||||
}
|
}
|
||||||
for i, o := range tests {
|
for i, o := range tests {
|
||||||
r, e := NewRR(i)
|
r, err := NewRR(i)
|
||||||
if e != nil {
|
if err != nil {
|
||||||
t.Logf("failed to parse %s: %s\n", i, e.Error())
|
t.Errorf("failed to parse %s: %v", i, err)
|
||||||
t.Fail()
|
|
||||||
}
|
}
|
||||||
if r.String() != o {
|
if r.String() != o {
|
||||||
t.Logf("want %s, got %s\n", o, r.String())
|
t.Errorf("want %s, got %s", o, r.String())
|
||||||
t.Fail()
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1109,30 +1072,42 @@ func TestTXT(t *testing.T) {
|
||||||
t.Error("bad size of serialized record:", rr.len())
|
t.Error("bad size of serialized record:", rr.len())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Test TXT record with chunk larger than 255 bytes, they should be split up, by the parser
|
||||||
|
s := ""
|
||||||
|
for i := 0; i < 255; i++ {
|
||||||
|
s += "a"
|
||||||
|
}
|
||||||
|
s += "b"
|
||||||
|
rr, err = NewRR(`test.local. 60 IN TXT "` + s + `"`)
|
||||||
|
if err != nil {
|
||||||
|
t.Error("failed to parse empty-string TXT record", err)
|
||||||
|
}
|
||||||
|
if rr.(*TXT).Txt[1] != "b" {
|
||||||
|
t.Errorf("Txt should have two chunk, last one my be 'b', but is %s", rr.(*TXT).Txt[1])
|
||||||
|
}
|
||||||
|
t.Log(rr.String())
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestTypeXXXX(t *testing.T) {
|
func TestTypeXXXX(t *testing.T) {
|
||||||
_, err := NewRR("example.com IN TYPE1234 \\# 4 aabbccdd")
|
_, err := NewRR("example.com IN TYPE1234 \\# 4 aabbccdd")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Logf("failed to parse TYPE1234 RR: %s", err.Error())
|
t.Errorf("failed to parse TYPE1234 RR: %v", err)
|
||||||
t.Fail()
|
|
||||||
}
|
}
|
||||||
_, err = NewRR("example.com IN TYPE655341 \\# 8 aabbccddaabbccdd")
|
_, err = NewRR("example.com IN TYPE655341 \\# 8 aabbccddaabbccdd")
|
||||||
if err == nil {
|
if err == nil {
|
||||||
t.Logf("this should not work, for TYPE655341")
|
t.Errorf("this should not work, for TYPE655341")
|
||||||
t.Fail()
|
|
||||||
}
|
}
|
||||||
_, err = NewRR("example.com IN TYPE1 \\# 4 0a000001")
|
_, err = NewRR("example.com IN TYPE1 \\# 4 0a000001")
|
||||||
if err == nil {
|
if err == nil {
|
||||||
t.Logf("this should not work")
|
t.Errorf("this should not work")
|
||||||
t.Fail()
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestPTR(t *testing.T) {
|
func TestPTR(t *testing.T) {
|
||||||
_, err := NewRR("144.2.0.192.in-addr.arpa. 900 IN PTR ilouse03146p0\\(.example.com.")
|
_, err := NewRR("144.2.0.192.in-addr.arpa. 900 IN PTR ilouse03146p0\\(.example.com.")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Error("failed to parse ", err.Error())
|
t.Error("failed to parse ", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1147,13 +1122,13 @@ func TestDigit(t *testing.T) {
|
||||||
"miek\\004.nl. 100 IN TXT \"A\"": 4,
|
"miek\\004.nl. 100 IN TXT \"A\"": 4,
|
||||||
}
|
}
|
||||||
for s, i := range tests {
|
for s, i := range tests {
|
||||||
r, e := NewRR(s)
|
r, err := NewRR(s)
|
||||||
buf := make([]byte, 40)
|
buf := make([]byte, 40)
|
||||||
if e != nil {
|
if err != nil {
|
||||||
t.Fatalf("failed to parse %s\n", e.Error())
|
t.Fatalf("failed to parse %v", err)
|
||||||
}
|
}
|
||||||
PackRR(r, buf, 0, nil, false)
|
PackRR(r, buf, 0, nil, false)
|
||||||
t.Logf("%v\n", buf)
|
t.Log(buf)
|
||||||
if buf[5] != i {
|
if buf[5] != i {
|
||||||
t.Fatalf("5 pos must be %d, is %d", i, buf[5])
|
t.Fatalf("5 pos must be %d, is %d", i, buf[5])
|
||||||
}
|
}
|
||||||
|
@ -1169,11 +1144,10 @@ func TestParseRRSIGTimestamp(t *testing.T) {
|
||||||
`miek.nl. IN RRSIG SOA 8 2 43200 20140210031301 20140111031301 12051 miek.nl. MVZUyrYwq0iZhMFDDnVXD2BvuNiUJjSYlJAgzyAE6CF875BMvvZa+Sb0 RlSCL7WODQSQHhCx/fegHhVVF+Iz8N8kOLrmXD1+jO3Bm6Prl5UhcsPx WTBsg/kmxbp8sR1kvH4oZJtVfakG3iDerrxNaf0sQwhZzyfJQAqpC7pcBoc=`: true,
|
`miek.nl. IN RRSIG SOA 8 2 43200 20140210031301 20140111031301 12051 miek.nl. MVZUyrYwq0iZhMFDDnVXD2BvuNiUJjSYlJAgzyAE6CF875BMvvZa+Sb0 RlSCL7WODQSQHhCx/fegHhVVF+Iz8N8kOLrmXD1+jO3Bm6Prl5UhcsPx WTBsg/kmxbp8sR1kvH4oZJtVfakG3iDerrxNaf0sQwhZzyfJQAqpC7pcBoc=`: true,
|
||||||
`miek.nl. IN RRSIG SOA 8 2 43200 315565800 4102477800 12051 miek.nl. MVZUyrYwq0iZhMFDDnVXD2BvuNiUJjSYlJAgzyAE6CF875BMvvZa+Sb0 RlSCL7WODQSQHhCx/fegHhVVF+Iz8N8kOLrmXD1+jO3Bm6Prl5UhcsPx WTBsg/kmxbp8sR1kvH4oZJtVfakG3iDerrxNaf0sQwhZzyfJQAqpC7pcBoc=`: true,
|
`miek.nl. IN RRSIG SOA 8 2 43200 315565800 4102477800 12051 miek.nl. MVZUyrYwq0iZhMFDDnVXD2BvuNiUJjSYlJAgzyAE6CF875BMvvZa+Sb0 RlSCL7WODQSQHhCx/fegHhVVF+Iz8N8kOLrmXD1+jO3Bm6Prl5UhcsPx WTBsg/kmxbp8sR1kvH4oZJtVfakG3iDerrxNaf0sQwhZzyfJQAqpC7pcBoc=`: true,
|
||||||
}
|
}
|
||||||
for r, _ := range tests {
|
for r := range tests {
|
||||||
_, e := NewRR(r)
|
_, err := NewRR(r)
|
||||||
if e != nil {
|
if err != nil {
|
||||||
t.Fail()
|
t.Error(err)
|
||||||
t.Logf("%s\n", e.Error())
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1184,11 +1158,10 @@ func TestTxtEqual(t *testing.T) {
|
||||||
rr1.Txt = []string{"a\"a", "\"", "b"}
|
rr1.Txt = []string{"a\"a", "\"", "b"}
|
||||||
rr2, _ := NewRR(rr1.String())
|
rr2, _ := NewRR(rr1.String())
|
||||||
if rr1.String() != rr2.String() {
|
if rr1.String() != rr2.String() {
|
||||||
t.Logf("these two TXT records should match")
|
// This is not an error, but keep this test.
|
||||||
t.Logf("\n%s\n%s\n", rr1.String(), rr2.String())
|
t.Errorf("these two TXT records should match:\n%s\n%s", rr1.String(), rr2.String())
|
||||||
t.Fail() // This is not an error, but keep this test.
|
|
||||||
}
|
}
|
||||||
t.Logf("\n%s\n%s\n", rr1.String(), rr2.String())
|
t.Logf("%s\n%s", rr1.String(), rr2.String())
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestTxtLong(t *testing.T) {
|
func TestTxtLong(t *testing.T) {
|
||||||
|
@ -1202,8 +1175,7 @@ func TestTxtLong(t *testing.T) {
|
||||||
}
|
}
|
||||||
str := rr1.String()
|
str := rr1.String()
|
||||||
if len(str) < len(rr1.Txt[0]) {
|
if len(str) < len(rr1.Txt[0]) {
|
||||||
t.Logf("string conversion should work")
|
t.Error("string conversion should work")
|
||||||
t.Fail()
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1217,7 +1189,7 @@ func TestMalformedPackets(t *testing.T) {
|
||||||
for _, packet := range packets {
|
for _, packet := range packets {
|
||||||
data, _ := hex.DecodeString(packet)
|
data, _ := hex.DecodeString(packet)
|
||||||
// for _, v := range data {
|
// for _, v := range data {
|
||||||
// t.Logf("%s ", string(v))
|
// t.Log(v)
|
||||||
// }
|
// }
|
||||||
var msg Msg
|
var msg Msg
|
||||||
msg.Unpack(data)
|
msg.Unpack(data)
|
||||||
|
@ -1253,15 +1225,14 @@ func TestNewPrivateKey(t *testing.T) {
|
||||||
key.Algorithm = algo.name
|
key.Algorithm = algo.name
|
||||||
privkey, err := key.Generate(algo.bits)
|
privkey, err := key.Generate(algo.bits)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err.Error())
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
newPrivKey, err := key.NewPrivateKey(key.PrivateKeyString(privkey))
|
newPrivKey, err := key.NewPrivateKey(key.PrivateKeyString(privkey))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Log(key.String())
|
t.Error(key.String())
|
||||||
t.Log(key.PrivateKeyString(privkey))
|
t.Error(key.PrivateKeyString(privkey))
|
||||||
|
t.Fatal(err)
|
||||||
t.Fatal(err.Error())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
switch newPrivKey := newPrivKey.(type) {
|
switch newPrivKey := newPrivKey.(type) {
|
||||||
|
@ -1270,7 +1241,7 @@ func TestNewPrivateKey(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
if !reflect.DeepEqual(privkey, newPrivKey) {
|
if !reflect.DeepEqual(privkey, newPrivKey) {
|
||||||
t.Errorf("[%v] Private keys differ:\n%#v\n%#v\n", AlgorithmToString[algo.name], privkey, newPrivKey)
|
t.Errorf("[%v] Private keys differ:\n%#v\n%#v", AlgorithmToString[algo.name], privkey, newPrivKey)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1286,7 +1257,7 @@ func TestNewRRSpecial(t *testing.T) {
|
||||||
rr, err = NewRR("; comment")
|
rr, err = NewRR("; comment")
|
||||||
expect = ""
|
expect = ""
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("unexpected err: %s", err)
|
t.Errorf("unexpected err: %v", err)
|
||||||
}
|
}
|
||||||
if rr != nil {
|
if rr != nil {
|
||||||
t.Errorf("unexpected result: [%s] != [%s]", rr, expect)
|
t.Errorf("unexpected result: [%s] != [%s]", rr, expect)
|
||||||
|
@ -1295,7 +1266,7 @@ func TestNewRRSpecial(t *testing.T) {
|
||||||
rr, err = NewRR("")
|
rr, err = NewRR("")
|
||||||
expect = ""
|
expect = ""
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("unexpected err: %s", err)
|
t.Errorf("unexpected err: %v", err)
|
||||||
}
|
}
|
||||||
if rr != nil {
|
if rr != nil {
|
||||||
t.Errorf("unexpected result: [%s] != [%s]", rr, expect)
|
t.Errorf("unexpected result: [%s] != [%s]", rr, expect)
|
||||||
|
@ -1304,7 +1275,7 @@ func TestNewRRSpecial(t *testing.T) {
|
||||||
rr, err = NewRR("$ORIGIN foo.")
|
rr, err = NewRR("$ORIGIN foo.")
|
||||||
expect = ""
|
expect = ""
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("unexpected err: %s", err)
|
t.Errorf("unexpected err: %v", err)
|
||||||
}
|
}
|
||||||
if rr != nil {
|
if rr != nil {
|
||||||
t.Errorf("unexpected result: [%s] != [%s]", rr, expect)
|
t.Errorf("unexpected result: [%s] != [%s]", rr, expect)
|
||||||
|
@ -1313,7 +1284,7 @@ func TestNewRRSpecial(t *testing.T) {
|
||||||
rr, err = NewRR(" ")
|
rr, err = NewRR(" ")
|
||||||
expect = ""
|
expect = ""
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("unexpected err: %s", err)
|
t.Errorf("unexpected err: %v", err)
|
||||||
}
|
}
|
||||||
if rr != nil {
|
if rr != nil {
|
||||||
t.Errorf("unexpected result: [%s] != [%s]", rr, expect)
|
t.Errorf("unexpected result: [%s] != [%s]", rr, expect)
|
||||||
|
@ -1322,7 +1293,7 @@ func TestNewRRSpecial(t *testing.T) {
|
||||||
rr, err = NewRR("\n")
|
rr, err = NewRR("\n")
|
||||||
expect = ""
|
expect = ""
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("unexpected err: %s", err)
|
t.Errorf("unexpected err: %v", err)
|
||||||
}
|
}
|
||||||
if rr != nil {
|
if rr != nil {
|
||||||
t.Errorf("unexpected result: [%s] != [%s]", rr, expect)
|
t.Errorf("unexpected result: [%s] != [%s]", rr, expect)
|
||||||
|
@ -1331,7 +1302,7 @@ func TestNewRRSpecial(t *testing.T) {
|
||||||
rr, err = NewRR("foo. A 1.1.1.1\nbar. A 2.2.2.2")
|
rr, err = NewRR("foo. A 1.1.1.1\nbar. A 2.2.2.2")
|
||||||
expect = "foo.\t3600\tIN\tA\t1.1.1.1"
|
expect = "foo.\t3600\tIN\tA\t1.1.1.1"
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("unexpected err: %s", err)
|
t.Errorf("unexpected err: %v", err)
|
||||||
}
|
}
|
||||||
if rr == nil || rr.String() != expect {
|
if rr == nil || rr.String() != expect {
|
||||||
t.Errorf("unexpected result: [%s] != [%s]", rr, expect)
|
t.Errorf("unexpected result: [%s] != [%s]", rr, expect)
|
||||||
|
@ -1394,15 +1365,13 @@ func TestParseIPSECKEY(t *testing.T) {
|
||||||
for i := 0; i < len(tests)-1; i++ {
|
for i := 0; i < len(tests)-1; i++ {
|
||||||
t1 := tests[i]
|
t1 := tests[i]
|
||||||
e1 := tests[i+1]
|
e1 := tests[i+1]
|
||||||
r, e := NewRR(t1)
|
r, err := NewRR(t1)
|
||||||
if e != nil {
|
if err != nil {
|
||||||
t.Logf("failed to parse IPSECKEY %s", e)
|
t.Errorf("failed to parse IPSECKEY %v", err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if r.String() != e1 {
|
if r.String() != e1 {
|
||||||
t.Logf("these two IPSECKEY records should match")
|
t.Errorf("these two IPSECKEY records should match:\n%s\n%s", r.String(), e1)
|
||||||
t.Logf("\n%s\n%s\n", r.String(), e1)
|
|
||||||
t.Fail()
|
|
||||||
}
|
}
|
||||||
i++
|
i++
|
||||||
}
|
}
|
||||||
|
|
11
Godeps/_workspace/src/github.com/miekg/dns/privaterr.go
generated
vendored
11
Godeps/_workspace/src/github.com/miekg/dns/privaterr.go
generated
vendored
|
@ -1,10 +1,3 @@
|
||||||
/*
|
|
||||||
PRIVATE RR
|
|
||||||
|
|
||||||
RFC 6895 sets aside a range of type codes for private use. This range
|
|
||||||
is 65,280 - 65,534 (0xFF00 - 0xFFFE). When experimenting with new Resource Records these
|
|
||||||
can be used, before requesting an official type code from IANA.
|
|
||||||
*/
|
|
||||||
package dns
|
package dns
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
@ -91,9 +84,9 @@ func PrivateHandle(rtypestr string, rtype uint16, generator func() PrivateRdata)
|
||||||
// TODO(miek): we could also be returning _QUOTE, this might or might not
|
// TODO(miek): we could also be returning _QUOTE, this might or might not
|
||||||
// be an issue (basically parsing TXT becomes hard)
|
// be an issue (basically parsing TXT becomes hard)
|
||||||
switch l = <-c; l.value {
|
switch l = <-c; l.value {
|
||||||
case _NEWLINE, _EOF:
|
case zNewline, zEOF:
|
||||||
break FETCH
|
break FETCH
|
||||||
case _STRING:
|
case zString:
|
||||||
text = append(text, l.token)
|
text = append(text, l.token)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
7
Godeps/_workspace/src/github.com/miekg/dns/privaterr_test.go
generated
vendored
7
Godeps/_workspace/src/github.com/miekg/dns/privaterr_test.go
generated
vendored
|
@ -1,9 +1,10 @@
|
||||||
package dns_test
|
package dns_test
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"github.com/miekg/dns"
|
|
||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
"github.com/miekg/dns"
|
||||||
)
|
)
|
||||||
|
|
||||||
const TypeISBN uint16 = 0x0F01
|
const TypeISBN uint16 = 0x0F01
|
||||||
|
@ -75,7 +76,7 @@ func TestPrivateByteSlice(t *testing.T) {
|
||||||
buf := make([]byte, 100)
|
buf := make([]byte, 100)
|
||||||
off, err := dns.PackRR(rr, buf, 0, nil, false)
|
off, err := dns.PackRR(rr, buf, 0, nil, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("got error packing ISBN: %s", err)
|
t.Errorf("got error packing ISBN: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
custrr := rr.(*dns.PrivateRR)
|
custrr := rr.(*dns.PrivateRR)
|
||||||
|
@ -85,7 +86,7 @@ func TestPrivateByteSlice(t *testing.T) {
|
||||||
|
|
||||||
rr1, off1, err := dns.UnpackRR(buf[:off], 0)
|
rr1, off1, err := dns.UnpackRR(buf[:off], 0)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("got error unpacking ISBN: %s", err)
|
t.Errorf("got error unpacking ISBN: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if off1 != off {
|
if off1 != off {
|
||||||
|
|
26
Godeps/_workspace/src/github.com/miekg/dns/server.go
generated
vendored
26
Godeps/_workspace/src/github.com/miekg/dns/server.go
generated
vendored
|
@ -10,6 +10,7 @@ import (
|
||||||
"time"
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// Handler is implemented by any value that implements ServeDNS.
|
||||||
type Handler interface {
|
type Handler interface {
|
||||||
ServeDNS(w ResponseWriter, r *Msg)
|
ServeDNS(w ResponseWriter, r *Msg)
|
||||||
}
|
}
|
||||||
|
@ -44,7 +45,7 @@ type response struct {
|
||||||
tsigSecret map[string]string // the tsig secrets
|
tsigSecret map[string]string // the tsig secrets
|
||||||
udp *net.UDPConn // i/o connection if UDP was used
|
udp *net.UDPConn // i/o connection if UDP was used
|
||||||
tcp *net.TCPConn // i/o connection if TCP was used
|
tcp *net.TCPConn // i/o connection if TCP was used
|
||||||
udpSession *sessionUDP // oob data to get egress interface right
|
udpSession *SessionUDP // oob data to get egress interface right
|
||||||
remoteAddr net.Addr // address of the client
|
remoteAddr net.Addr // address of the client
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -72,12 +73,12 @@ var DefaultServeMux = NewServeMux()
|
||||||
// Handler object that calls f.
|
// Handler object that calls f.
|
||||||
type HandlerFunc func(ResponseWriter, *Msg)
|
type HandlerFunc func(ResponseWriter, *Msg)
|
||||||
|
|
||||||
// ServerDNS calls f(w, r)
|
// ServeDNS calls f(w, r).
|
||||||
func (f HandlerFunc) ServeDNS(w ResponseWriter, r *Msg) {
|
func (f HandlerFunc) ServeDNS(w ResponseWriter, r *Msg) {
|
||||||
f(w, r)
|
f(w, r)
|
||||||
}
|
}
|
||||||
|
|
||||||
// FailedHandler returns a HandlerFunc that returns SERVFAIL for every request it gets.
|
// HandleFailed returns a HandlerFunc that returns SERVFAIL for every request it gets.
|
||||||
func HandleFailed(w ResponseWriter, r *Msg) {
|
func HandleFailed(w ResponseWriter, r *Msg) {
|
||||||
m := new(Msg)
|
m := new(Msg)
|
||||||
m.SetRcode(r, RcodeServerFailure)
|
m.SetRcode(r, RcodeServerFailure)
|
||||||
|
@ -121,10 +122,9 @@ func (mux *ServeMux) match(q string, t uint16) Handler {
|
||||||
if h, ok := mux.z[string(b[:l])]; ok { // 'causes garbage, might want to change the map key
|
if h, ok := mux.z[string(b[:l])]; ok { // 'causes garbage, might want to change the map key
|
||||||
if t != TypeDS {
|
if t != TypeDS {
|
||||||
return h
|
return h
|
||||||
} else {
|
|
||||||
// Continue for DS to see if we have a parent too, if so delegeate to the parent
|
|
||||||
handler = h
|
|
||||||
}
|
}
|
||||||
|
// Continue for DS to see if we have a parent too, if so delegeate to the parent
|
||||||
|
handler = h
|
||||||
}
|
}
|
||||||
off, end = NextLabel(q, off)
|
off, end = NextLabel(q, off)
|
||||||
if end {
|
if end {
|
||||||
|
@ -148,7 +148,7 @@ func (mux *ServeMux) Handle(pattern string, handler Handler) {
|
||||||
mux.m.Unlock()
|
mux.m.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Handle adds a handler to the ServeMux for pattern.
|
// HandleFunc adds a handler function to the ServeMux for pattern.
|
||||||
func (mux *ServeMux) HandleFunc(pattern string, handler func(ResponseWriter, *Msg)) {
|
func (mux *ServeMux) HandleFunc(pattern string, handler func(ResponseWriter, *Msg)) {
|
||||||
mux.Handle(pattern, HandlerFunc(handler))
|
mux.Handle(pattern, HandlerFunc(handler))
|
||||||
}
|
}
|
||||||
|
@ -372,7 +372,7 @@ func (srv *Server) getReadTimeout() time.Duration {
|
||||||
}
|
}
|
||||||
|
|
||||||
// serveTCP starts a TCP listener for the server.
|
// serveTCP starts a TCP listener for the server.
|
||||||
// Each request is handled in a seperate goroutine.
|
// Each request is handled in a separate goroutine.
|
||||||
func (srv *Server) serveTCP(l *net.TCPListener) error {
|
func (srv *Server) serveTCP(l *net.TCPListener) error {
|
||||||
defer l.Close()
|
defer l.Close()
|
||||||
|
|
||||||
|
@ -407,7 +407,7 @@ func (srv *Server) serveTCP(l *net.TCPListener) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
// serveUDP starts a UDP listener for the server.
|
// serveUDP starts a UDP listener for the server.
|
||||||
// Each request is handled in a seperate goroutine.
|
// Each request is handled in a separate goroutine.
|
||||||
func (srv *Server) serveUDP(l *net.UDPConn) error {
|
func (srv *Server) serveUDP(l *net.UDPConn) error {
|
||||||
defer l.Close()
|
defer l.Close()
|
||||||
|
|
||||||
|
@ -438,7 +438,7 @@ func (srv *Server) serveUDP(l *net.UDPConn) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Serve a new connection.
|
// Serve a new connection.
|
||||||
func (srv *Server) serve(a net.Addr, h Handler, m []byte, u *net.UDPConn, s *sessionUDP, t *net.TCPConn) {
|
func (srv *Server) serve(a net.Addr, h Handler, m []byte, u *net.UDPConn, s *SessionUDP, t *net.TCPConn) {
|
||||||
w := &response{tsigSecret: srv.TsigSecret, udp: u, tcp: t, remoteAddr: a, udpSession: s}
|
w := &response{tsigSecret: srv.TsigSecret, udp: u, tcp: t, remoteAddr: a, udpSession: s}
|
||||||
q := 0
|
q := 0
|
||||||
defer func() {
|
defer func() {
|
||||||
|
@ -537,10 +537,10 @@ func (srv *Server) readTCP(conn *net.TCPConn, timeout time.Duration) ([]byte, er
|
||||||
return m, nil
|
return m, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (srv *Server) readUDP(conn *net.UDPConn, timeout time.Duration) ([]byte, *sessionUDP, error) {
|
func (srv *Server) readUDP(conn *net.UDPConn, timeout time.Duration) ([]byte, *SessionUDP, error) {
|
||||||
conn.SetReadDeadline(time.Now().Add(timeout))
|
conn.SetReadDeadline(time.Now().Add(timeout))
|
||||||
m := make([]byte, srv.UDPSize)
|
m := make([]byte, srv.UDPSize)
|
||||||
n, s, e := readFromSessionUDP(conn, m)
|
n, s, e := ReadFromSessionUDP(conn, m)
|
||||||
if e != nil || n == 0 {
|
if e != nil || n == 0 {
|
||||||
if e != nil {
|
if e != nil {
|
||||||
return nil, nil, e
|
return nil, nil, e
|
||||||
|
@ -576,7 +576,7 @@ func (w *response) WriteMsg(m *Msg) (err error) {
|
||||||
func (w *response) Write(m []byte) (int, error) {
|
func (w *response) Write(m []byte) (int, error) {
|
||||||
switch {
|
switch {
|
||||||
case w.udp != nil:
|
case w.udp != nil:
|
||||||
n, err := writeToSessionUDP(w.udp, m, w.udpSession)
|
n, err := WriteToSessionUDP(w.udp, m, w.udpSession)
|
||||||
return n, err
|
return n, err
|
||||||
case w.tcp != nil:
|
case w.tcp != nil:
|
||||||
lm := len(m)
|
lm := len(m)
|
||||||
|
|
58
Godeps/_workspace/src/github.com/miekg/dns/server_test.go
generated
vendored
58
Godeps/_workspace/src/github.com/miekg/dns/server_test.go
generated
vendored
|
@ -95,7 +95,7 @@ func TestServing(t *testing.T) {
|
||||||
|
|
||||||
s, addrstr, err := RunLocalUDPServer("127.0.0.1:0")
|
s, addrstr, err := RunLocalUDPServer("127.0.0.1:0")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Unable to run test server: %s", err)
|
t.Fatalf("Unable to run test server: %v", err)
|
||||||
}
|
}
|
||||||
defer s.Shutdown()
|
defer s.Shutdown()
|
||||||
|
|
||||||
|
@ -104,38 +104,32 @@ func TestServing(t *testing.T) {
|
||||||
m.SetQuestion("miek.nl.", TypeTXT)
|
m.SetQuestion("miek.nl.", TypeTXT)
|
||||||
r, _, err := c.Exchange(m, addrstr)
|
r, _, err := c.Exchange(m, addrstr)
|
||||||
if err != nil || len(r.Extra) == 0 {
|
if err != nil || len(r.Extra) == 0 {
|
||||||
t.Log("failed to exchange miek.nl", err)
|
t.Fatal("failed to exchange miek.nl", err)
|
||||||
t.Fatal()
|
|
||||||
}
|
}
|
||||||
txt := r.Extra[0].(*TXT).Txt[0]
|
txt := r.Extra[0].(*TXT).Txt[0]
|
||||||
if txt != "Hello world" {
|
if txt != "Hello world" {
|
||||||
t.Log("Unexpected result for miek.nl", txt, "!= Hello world")
|
t.Error("Unexpected result for miek.nl", txt, "!= Hello world")
|
||||||
t.Fail()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
m.SetQuestion("example.com.", TypeTXT)
|
m.SetQuestion("example.com.", TypeTXT)
|
||||||
r, _, err = c.Exchange(m, addrstr)
|
r, _, err = c.Exchange(m, addrstr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Log("failed to exchange example.com", err)
|
t.Fatal("failed to exchange example.com", err)
|
||||||
t.Fatal()
|
|
||||||
}
|
}
|
||||||
txt = r.Extra[0].(*TXT).Txt[0]
|
txt = r.Extra[0].(*TXT).Txt[0]
|
||||||
if txt != "Hello example" {
|
if txt != "Hello example" {
|
||||||
t.Log("Unexpected result for example.com", txt, "!= Hello example")
|
t.Error("Unexpected result for example.com", txt, "!= Hello example")
|
||||||
t.Fail()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Test Mixes cased as noticed by Ask.
|
// Test Mixes cased as noticed by Ask.
|
||||||
m.SetQuestion("eXaMplE.cOm.", TypeTXT)
|
m.SetQuestion("eXaMplE.cOm.", TypeTXT)
|
||||||
r, _, err = c.Exchange(m, addrstr)
|
r, _, err = c.Exchange(m, addrstr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Log("failed to exchange eXaMplE.cOm", err)
|
t.Error("failed to exchange eXaMplE.cOm", err)
|
||||||
t.Fail()
|
|
||||||
}
|
}
|
||||||
txt = r.Extra[0].(*TXT).Txt[0]
|
txt = r.Extra[0].(*TXT).Txt[0]
|
||||||
if txt != "Hello example" {
|
if txt != "Hello example" {
|
||||||
t.Log("Unexpected result for example.com", txt, "!= Hello example")
|
t.Error("Unexpected result for example.com", txt, "!= Hello example")
|
||||||
t.Fail()
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -147,7 +141,7 @@ func BenchmarkServe(b *testing.B) {
|
||||||
|
|
||||||
s, addrstr, err := RunLocalUDPServer("127.0.0.1:0")
|
s, addrstr, err := RunLocalUDPServer("127.0.0.1:0")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
b.Fatalf("Unable to run test server: %s", err)
|
b.Fatalf("Unable to run test server: %v", err)
|
||||||
}
|
}
|
||||||
defer s.Shutdown()
|
defer s.Shutdown()
|
||||||
|
|
||||||
|
@ -169,7 +163,7 @@ func benchmarkServe6(b *testing.B) {
|
||||||
a := runtime.GOMAXPROCS(4)
|
a := runtime.GOMAXPROCS(4)
|
||||||
s, addrstr, err := RunLocalUDPServer("[::1]:0")
|
s, addrstr, err := RunLocalUDPServer("[::1]:0")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
b.Fatalf("Unable to run test server: %s", err)
|
b.Fatalf("Unable to run test server: %v", err)
|
||||||
}
|
}
|
||||||
defer s.Shutdown()
|
defer s.Shutdown()
|
||||||
|
|
||||||
|
@ -200,7 +194,7 @@ func BenchmarkServeCompress(b *testing.B) {
|
||||||
a := runtime.GOMAXPROCS(4)
|
a := runtime.GOMAXPROCS(4)
|
||||||
s, addrstr, err := RunLocalUDPServer("127.0.0.1:0")
|
s, addrstr, err := RunLocalUDPServer("127.0.0.1:0")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
b.Fatalf("Unable to run test server: %s", err)
|
b.Fatalf("Unable to run test server: %v", err)
|
||||||
}
|
}
|
||||||
defer s.Shutdown()
|
defer s.Shutdown()
|
||||||
|
|
||||||
|
@ -301,7 +295,7 @@ func TestServingLargeResponses(t *testing.T) {
|
||||||
|
|
||||||
s, addrstr, err := RunLocalUDPServer("127.0.0.1:0")
|
s, addrstr, err := RunLocalUDPServer("127.0.0.1:0")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Unable to run test server: %s", err)
|
t.Fatalf("Unable to run test server: %v", err)
|
||||||
}
|
}
|
||||||
defer s.Shutdown()
|
defer s.Shutdown()
|
||||||
|
|
||||||
|
@ -316,8 +310,7 @@ func TestServingLargeResponses(t *testing.T) {
|
||||||
M.Unlock()
|
M.Unlock()
|
||||||
_, _, err = c.Exchange(m, addrstr)
|
_, _, err = c.Exchange(m, addrstr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Logf("failed to exchange: %s", err.Error())
|
t.Errorf("failed to exchange: %v", err)
|
||||||
t.Fail()
|
|
||||||
}
|
}
|
||||||
// This must fail
|
// This must fail
|
||||||
M.Lock()
|
M.Lock()
|
||||||
|
@ -325,15 +318,13 @@ func TestServingLargeResponses(t *testing.T) {
|
||||||
M.Unlock()
|
M.Unlock()
|
||||||
_, _, err = c.Exchange(m, addrstr)
|
_, _, err = c.Exchange(m, addrstr)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
t.Logf("failed to fail exchange, this should generate packet error")
|
t.Error("failed to fail exchange, this should generate packet error")
|
||||||
t.Fail()
|
|
||||||
}
|
}
|
||||||
// But this must work again
|
// But this must work again
|
||||||
c.UDPSize = 7000
|
c.UDPSize = 7000
|
||||||
_, _, err = c.Exchange(m, addrstr)
|
_, _, err = c.Exchange(m, addrstr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Logf("failed to exchange: %s", err.Error())
|
t.Errorf("failed to exchange: %v", err)
|
||||||
t.Fail()
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -344,7 +335,7 @@ func TestServingResponse(t *testing.T) {
|
||||||
HandleFunc("miek.nl.", HelloServer)
|
HandleFunc("miek.nl.", HelloServer)
|
||||||
s, addrstr, err := RunLocalUDPServer("127.0.0.1:0")
|
s, addrstr, err := RunLocalUDPServer("127.0.0.1:0")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Unable to run test server: %s", err)
|
t.Fatalf("Unable to run test server: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
c := new(Client)
|
c := new(Client)
|
||||||
|
@ -353,49 +344,46 @@ func TestServingResponse(t *testing.T) {
|
||||||
m.Response = false
|
m.Response = false
|
||||||
_, _, err = c.Exchange(m, addrstr)
|
_, _, err = c.Exchange(m, addrstr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Log("failed to exchange", err)
|
t.Fatal("failed to exchange", err)
|
||||||
t.Fatal()
|
|
||||||
}
|
}
|
||||||
m.Response = true
|
m.Response = true
|
||||||
_, _, err = c.Exchange(m, addrstr)
|
_, _, err = c.Exchange(m, addrstr)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
t.Log("exchanged response message")
|
t.Fatal("exchanged response message")
|
||||||
t.Fatal()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
s.Shutdown()
|
s.Shutdown()
|
||||||
s, addrstr, err = RunLocalUDPServerUnsafe("127.0.0.1:0")
|
s, addrstr, err = RunLocalUDPServerUnsafe("127.0.0.1:0")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Unable to run test server: %s", err)
|
t.Fatalf("Unable to run test server: %v", err)
|
||||||
}
|
}
|
||||||
defer s.Shutdown()
|
defer s.Shutdown()
|
||||||
|
|
||||||
m.Response = true
|
m.Response = true
|
||||||
_, _, err = c.Exchange(m, addrstr)
|
_, _, err = c.Exchange(m, addrstr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Log("could exchanged response message in Unsafe mode")
|
t.Fatal("could exchanged response message in Unsafe mode")
|
||||||
t.Fatal()
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestShutdownTCP(t *testing.T) {
|
func TestShutdownTCP(t *testing.T) {
|
||||||
s, _, err := RunLocalTCPServer("127.0.0.1:0")
|
s, _, err := RunLocalTCPServer("127.0.0.1:0")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Unable to run test server: %s", err)
|
t.Fatalf("Unable to run test server: %v", err)
|
||||||
}
|
}
|
||||||
err = s.Shutdown()
|
err = s.Shutdown()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Could not shutdown test TCP server, %s", err)
|
t.Errorf("Could not shutdown test TCP server, %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestShutdownUDP(t *testing.T) {
|
func TestShutdownUDP(t *testing.T) {
|
||||||
s, _, err := RunLocalUDPServer("127.0.0.1:0")
|
s, _, err := RunLocalUDPServer("127.0.0.1:0")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Unable to run test server: %s", err)
|
t.Fatalf("Unable to run test server: %v", err)
|
||||||
}
|
}
|
||||||
err = s.Shutdown()
|
err = s.Shutdown()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Could not shutdown test UDP server, %s", err)
|
t.Errorf("Could not shutdown test UDP server, %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
17
Godeps/_workspace/src/github.com/miekg/dns/sig0.go
generated
vendored
17
Godeps/_workspace/src/github.com/miekg/dns/sig0.go
generated
vendored
|
@ -1,18 +1,3 @@
|
||||||
// SIG(0)
|
|
||||||
//
|
|
||||||
// From RFC 2931:
|
|
||||||
//
|
|
||||||
// SIG(0) provides protection for DNS transactions and requests ....
|
|
||||||
// ... protection for glue records, DNS requests, protection for message headers
|
|
||||||
// on requests and responses, and protection of the overall integrity of a response.
|
|
||||||
//
|
|
||||||
// It works like TSIG, except that SIG(0) uses public key cryptography, instead of the shared
|
|
||||||
// secret approach in TSIG.
|
|
||||||
// Supported algorithms: DSA, ECDSAP256SHA256, ECDSAP384SHA384, RSASHA1, RSASHA256 and
|
|
||||||
// RSASHA512.
|
|
||||||
//
|
|
||||||
// Signing subsequent messages in multi-message sessions is not implemented.
|
|
||||||
//
|
|
||||||
package dns
|
package dns
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
@ -92,7 +77,7 @@ func (rr *SIG) Sign(k PrivateKey, m *Msg) ([]byte, error) {
|
||||||
buf[rdoff], buf[rdoff+1] = packUint16(rdlen)
|
buf[rdoff], buf[rdoff+1] = packUint16(rdlen)
|
||||||
// Adjust additional count
|
// Adjust additional count
|
||||||
adc, _ := unpackUint16(buf, 10)
|
adc, _ := unpackUint16(buf, 10)
|
||||||
adc += 1
|
adc++
|
||||||
buf[10], buf[11] = packUint16(adc)
|
buf[10], buf[11] = packUint16(adc)
|
||||||
return buf, nil
|
return buf, nil
|
||||||
}
|
}
|
||||||
|
|
24
Godeps/_workspace/src/github.com/miekg/dns/sig0_test.go
generated
vendored
24
Godeps/_workspace/src/github.com/miekg/dns/sig0_test.go
generated
vendored
|
@ -27,8 +27,7 @@ func TestSIG0(t *testing.T) {
|
||||||
}
|
}
|
||||||
pk, err := keyrr.Generate(keysize)
|
pk, err := keyrr.Generate(keysize)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Logf("Failed to generate key for “%s”: %v", algstr, err)
|
t.Errorf("Failed to generate key for “%s”: %v", algstr, err)
|
||||||
t.Fail()
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
now := uint32(time.Now().Unix())
|
now := uint32(time.Now().Unix())
|
||||||
|
@ -43,19 +42,16 @@ func TestSIG0(t *testing.T) {
|
||||||
sigrr.SignerName = keyrr.Hdr.Name
|
sigrr.SignerName = keyrr.Hdr.Name
|
||||||
mb, err := sigrr.Sign(pk, m)
|
mb, err := sigrr.Sign(pk, m)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Logf("Failed to sign message using “%s”: %v", algstr, err)
|
t.Errorf("Failed to sign message using “%s”: %v", algstr, err)
|
||||||
t.Fail()
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
m := new(Msg)
|
m := new(Msg)
|
||||||
if err := m.Unpack(mb); err != nil {
|
if err := m.Unpack(mb); err != nil {
|
||||||
t.Logf("Failed to unpack message signed using “%s”: %v", algstr, err)
|
t.Errorf("Failed to unpack message signed using “%s”: %v", algstr, err)
|
||||||
t.Fail()
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if len(m.Extra) != 1 {
|
if len(m.Extra) != 1 {
|
||||||
t.Logf("Missing SIG for message signed using “%s”", algstr)
|
t.Errorf("Missing SIG for message signed using “%s”", algstr)
|
||||||
t.Fail()
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
var sigrrwire *SIG
|
var sigrrwire *SIG
|
||||||
|
@ -63,8 +59,7 @@ func TestSIG0(t *testing.T) {
|
||||||
case *SIG:
|
case *SIG:
|
||||||
sigrrwire = rr
|
sigrrwire = rr
|
||||||
default:
|
default:
|
||||||
t.Logf("Expected SIG RR, instead: %v", rr)
|
t.Errorf("Expected SIG RR, instead: %v", rr)
|
||||||
t.Fail()
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
for _, rr := range []*SIG{sigrr, sigrrwire} {
|
for _, rr := range []*SIG{sigrr, sigrrwire} {
|
||||||
|
@ -73,23 +68,20 @@ func TestSIG0(t *testing.T) {
|
||||||
id = "sigrrwire"
|
id = "sigrrwire"
|
||||||
}
|
}
|
||||||
if err := rr.Verify(keyrr, mb); err != nil {
|
if err := rr.Verify(keyrr, mb); err != nil {
|
||||||
t.Logf("Failed to verify “%s” signed SIG(%s): %v", algstr, id, err)
|
t.Errorf("Failed to verify “%s” signed SIG(%s): %v", algstr, id, err)
|
||||||
t.Fail()
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
mb[13]++
|
mb[13]++
|
||||||
if err := sigrr.Verify(keyrr, mb); err == nil {
|
if err := sigrr.Verify(keyrr, mb); err == nil {
|
||||||
t.Logf("Verify succeeded on an altered message using “%s”", algstr)
|
t.Errorf("Verify succeeded on an altered message using “%s”", algstr)
|
||||||
t.Fail()
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
sigrr.Expiration = 2
|
sigrr.Expiration = 2
|
||||||
sigrr.Inception = 1
|
sigrr.Inception = 1
|
||||||
mb, _ = sigrr.Sign(pk, m)
|
mb, _ = sigrr.Sign(pk, m)
|
||||||
if err := sigrr.Verify(keyrr, mb); err == nil {
|
if err := sigrr.Verify(keyrr, mb); err == nil {
|
||||||
t.Logf("Verify succeeded on an expired message using “%s”", algstr)
|
t.Errorf("Verify succeeded on an expired message using “%s”", algstr)
|
||||||
t.Fail()
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
6
Godeps/_workspace/src/github.com/miekg/dns/tlsa.go
generated
vendored
6
Godeps/_workspace/src/github.com/miekg/dns/tlsa.go
generated
vendored
|
@ -25,7 +25,8 @@ func CertificateToDANE(selector, matchingType uint8, cert *x509.Certificate) (st
|
||||||
h := sha256.New()
|
h := sha256.New()
|
||||||
switch selector {
|
switch selector {
|
||||||
case 0:
|
case 0:
|
||||||
return hex.EncodeToString(cert.Raw), nil
|
io.WriteString(h, string(cert.Raw))
|
||||||
|
return hex.EncodeToString(h.Sum(nil)), nil
|
||||||
case 1:
|
case 1:
|
||||||
io.WriteString(h, string(cert.RawSubjectPublicKeyInfo))
|
io.WriteString(h, string(cert.RawSubjectPublicKeyInfo))
|
||||||
return hex.EncodeToString(h.Sum(nil)), nil
|
return hex.EncodeToString(h.Sum(nil)), nil
|
||||||
|
@ -34,7 +35,8 @@ func CertificateToDANE(selector, matchingType uint8, cert *x509.Certificate) (st
|
||||||
h := sha512.New()
|
h := sha512.New()
|
||||||
switch selector {
|
switch selector {
|
||||||
case 0:
|
case 0:
|
||||||
return hex.EncodeToString(cert.Raw), nil
|
io.WriteString(h, string(cert.Raw))
|
||||||
|
return hex.EncodeToString(h.Sum(nil)), nil
|
||||||
case 1:
|
case 1:
|
||||||
io.WriteString(h, string(cert.RawSubjectPublicKeyInfo))
|
io.WriteString(h, string(cert.RawSubjectPublicKeyInfo))
|
||||||
return hex.EncodeToString(h.Sum(nil)), nil
|
return hex.EncodeToString(h.Sum(nil)), nil
|
||||||
|
|
55
Godeps/_workspace/src/github.com/miekg/dns/tsig.go
generated
vendored
55
Godeps/_workspace/src/github.com/miekg/dns/tsig.go
generated
vendored
|
@ -1,56 +1,3 @@
|
||||||
// TRANSACTION SIGNATURE
|
|
||||||
//
|
|
||||||
// An TSIG or transaction signature adds a HMAC TSIG record to each message sent.
|
|
||||||
// The supported algorithms include: HmacMD5, HmacSHA1, HmacSHA256 and HmacSHA512.
|
|
||||||
//
|
|
||||||
// Basic use pattern when querying with a TSIG name "axfr." (note that these key names
|
|
||||||
// must be fully qualified - as they are domain names) and the base64 secret
|
|
||||||
// "so6ZGir4GPAqINNh9U5c3A==":
|
|
||||||
//
|
|
||||||
// c := new(dns.Client)
|
|
||||||
// c.TsigSecret = map[string]string{"axfr.": "so6ZGir4GPAqINNh9U5c3A=="}
|
|
||||||
// m := new(dns.Msg)
|
|
||||||
// m.SetQuestion("miek.nl.", dns.TypeMX)
|
|
||||||
// m.SetTsig("axfr.", dns.HmacMD5, 300, time.Now().Unix())
|
|
||||||
// ...
|
|
||||||
// // When sending the TSIG RR is calculated and filled in before sending
|
|
||||||
//
|
|
||||||
// When requesting an zone transfer (almost all TSIG usage is when requesting zone transfers), with
|
|
||||||
// TSIG, this is the basic use pattern. In this example we request an AXFR for
|
|
||||||
// miek.nl. with TSIG key named "axfr." and secret "so6ZGir4GPAqINNh9U5c3A=="
|
|
||||||
// and using the server 176.58.119.54:
|
|
||||||
//
|
|
||||||
// t := new(dns.Transfer)
|
|
||||||
// m := new(dns.Msg)
|
|
||||||
// t.TsigSecret = map[string]string{"axfr.": "so6ZGir4GPAqINNh9U5c3A=="}
|
|
||||||
// m.SetAxfr("miek.nl.")
|
|
||||||
// m.SetTsig("axfr.", dns.HmacMD5, 300, time.Now().Unix())
|
|
||||||
// c, err := t.In(m, "176.58.119.54:53")
|
|
||||||
// for r := range c { /* r.RR */ }
|
|
||||||
//
|
|
||||||
// You can now read the records from the transfer as they come in. Each envelope is checked with TSIG.
|
|
||||||
// If something is not correct an error is returned.
|
|
||||||
//
|
|
||||||
// Basic use pattern validating and replying to a message that has TSIG set.
|
|
||||||
//
|
|
||||||
// server := &dns.Server{Addr: ":53", Net: "udp"}
|
|
||||||
// server.TsigSecret = map[string]string{"axfr.": "so6ZGir4GPAqINNh9U5c3A=="}
|
|
||||||
// go server.ListenAndServe()
|
|
||||||
// dns.HandleFunc(".", handleRequest)
|
|
||||||
//
|
|
||||||
// func handleRequest(w dns.ResponseWriter, r *dns.Msg) {
|
|
||||||
// m := new(Msg)
|
|
||||||
// m.SetReply(r)
|
|
||||||
// if r.IsTsig() {
|
|
||||||
// if w.TsigStatus() == nil {
|
|
||||||
// // *Msg r has an TSIG record and it was validated
|
|
||||||
// m.SetTsig("axfr.", dns.HmacMD5, 300, time.Now().Unix())
|
|
||||||
// } else {
|
|
||||||
// // *Msg r has an TSIG records and it was not valided
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
// w.WriteMsg(m)
|
|
||||||
// }
|
|
||||||
package dns
|
package dns
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
@ -75,6 +22,8 @@ const (
|
||||||
HmacSHA512 = "hmac-sha512."
|
HmacSHA512 = "hmac-sha512."
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// TSIG is the RR the holds the transaction signature of a message.
|
||||||
|
// See RFC 2845 and RFC 4635.
|
||||||
type TSIG struct {
|
type TSIG struct {
|
||||||
Hdr RR_Header
|
Hdr RR_Header
|
||||||
Algorithm string `dns:"domain-name"`
|
Algorithm string `dns:"domain-name"`
|
||||||
|
|
25
Godeps/_workspace/src/github.com/miekg/dns/types.go
generated
vendored
25
Godeps/_workspace/src/github.com/miekg/dns/types.go
generated
vendored
|
@ -10,9 +10,12 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
type (
|
type (
|
||||||
Type uint16 // Type is a DNS type.
|
// Type is a DNS type.
|
||||||
Class uint16 // Class is a DNS class.
|
Type uint16
|
||||||
Name string // Name is a DNS domain name.
|
// Class is a DNS class.
|
||||||
|
Class uint16
|
||||||
|
// Name is a DNS domain name.
|
||||||
|
Name string
|
||||||
)
|
)
|
||||||
|
|
||||||
// Packet formats
|
// Packet formats
|
||||||
|
@ -20,6 +23,7 @@ type (
|
||||||
// Wire constants and supported types.
|
// Wire constants and supported types.
|
||||||
const (
|
const (
|
||||||
// valid RR_Header.Rrtype and Question.qtype
|
// valid RR_Header.Rrtype and Question.qtype
|
||||||
|
|
||||||
TypeNone uint16 = 0
|
TypeNone uint16 = 0
|
||||||
TypeA uint16 = 1
|
TypeA uint16 = 1
|
||||||
TypeNS uint16 = 2
|
TypeNS uint16 = 2
|
||||||
|
@ -91,7 +95,9 @@ const (
|
||||||
|
|
||||||
TypeTKEY uint16 = 249
|
TypeTKEY uint16 = 249
|
||||||
TypeTSIG uint16 = 250
|
TypeTSIG uint16 = 250
|
||||||
|
|
||||||
// valid Question.Qtype only
|
// valid Question.Qtype only
|
||||||
|
|
||||||
TypeIXFR uint16 = 251
|
TypeIXFR uint16 = 251
|
||||||
TypeAXFR uint16 = 252
|
TypeAXFR uint16 = 252
|
||||||
TypeMAILB uint16 = 253
|
TypeMAILB uint16 = 253
|
||||||
|
@ -105,6 +111,7 @@ const (
|
||||||
TypeReserved uint16 = 65535
|
TypeReserved uint16 = 65535
|
||||||
|
|
||||||
// valid Question.Qclass
|
// valid Question.Qclass
|
||||||
|
|
||||||
ClassINET = 1
|
ClassINET = 1
|
||||||
ClassCSNET = 2
|
ClassCSNET = 2
|
||||||
ClassCHAOS = 3
|
ClassCHAOS = 3
|
||||||
|
@ -113,6 +120,7 @@ const (
|
||||||
ClassANY = 255
|
ClassANY = 255
|
||||||
|
|
||||||
// Msg.rcode
|
// Msg.rcode
|
||||||
|
|
||||||
RcodeSuccess = 0
|
RcodeSuccess = 0
|
||||||
RcodeFormatError = 1
|
RcodeFormatError = 1
|
||||||
RcodeServerFailure = 2
|
RcodeServerFailure = 2
|
||||||
|
@ -133,11 +141,11 @@ const (
|
||||||
RcodeBadAlg = 21
|
RcodeBadAlg = 21
|
||||||
RcodeBadTrunc = 22 // TSIG
|
RcodeBadTrunc = 22 // TSIG
|
||||||
|
|
||||||
// Opcode
|
// Opcode, there is no 3
|
||||||
|
|
||||||
OpcodeQuery = 0
|
OpcodeQuery = 0
|
||||||
OpcodeIQuery = 1
|
OpcodeIQuery = 1
|
||||||
OpcodeStatus = 2
|
OpcodeStatus = 2
|
||||||
// There is no 3
|
|
||||||
OpcodeNotify = 4
|
OpcodeNotify = 4
|
||||||
OpcodeUpdate = 5
|
OpcodeUpdate = 5
|
||||||
)
|
)
|
||||||
|
@ -198,7 +206,8 @@ var CertTypeToString = map[uint16]string{
|
||||||
|
|
||||||
var StringToCertType = reverseInt16(CertTypeToString)
|
var StringToCertType = reverseInt16(CertTypeToString)
|
||||||
|
|
||||||
// DNS queries.
|
// Question holds a DNS question. There can be multiple questions in the
|
||||||
|
// question section of a message. Usually there is just one.
|
||||||
type Question struct {
|
type Question struct {
|
||||||
Name string `dns:"cdomain-name"` // "cdomain-name" specifies encoding (and may be compressed)
|
Name string `dns:"cdomain-name"` // "cdomain-name" specifies encoding (and may be compressed)
|
||||||
Qtype uint16
|
Qtype uint16
|
||||||
|
@ -801,7 +810,7 @@ func cmToM(m, e uint8) string {
|
||||||
s := fmt.Sprintf("%d", m)
|
s := fmt.Sprintf("%d", m)
|
||||||
for e > 2 {
|
for e > 2 {
|
||||||
s += "0"
|
s += "0"
|
||||||
e -= 1
|
e--
|
||||||
}
|
}
|
||||||
return s
|
return s
|
||||||
}
|
}
|
||||||
|
@ -838,7 +847,7 @@ func (rr *LOC) String() string {
|
||||||
lon = lon % LOC_HOURS
|
lon = lon % LOC_HOURS
|
||||||
s += fmt.Sprintf("%02d %02d %0.3f %s ", h, m, (float64(lon) / 1000), ew)
|
s += fmt.Sprintf("%02d %02d %0.3f %s ", h, m, (float64(lon) / 1000), ew)
|
||||||
|
|
||||||
var alt float64 = float64(rr.Altitude) / 100
|
var alt = float64(rr.Altitude) / 100
|
||||||
alt -= LOC_ALTITUDEBASE
|
alt -= LOC_ALTITUDEBASE
|
||||||
if rr.Altitude%100 != 0 {
|
if rr.Altitude%100 != 0 {
|
||||||
s += fmt.Sprintf("%.2fm ", alt)
|
s += fmt.Sprintf("%.2fm ", alt)
|
||||||
|
|
14
Godeps/_workspace/src/github.com/miekg/dns/udp.go
generated
vendored
14
Godeps/_workspace/src/github.com/miekg/dns/udp.go
generated
vendored
|
@ -7,12 +7,12 @@ import (
|
||||||
"syscall"
|
"syscall"
|
||||||
)
|
)
|
||||||
|
|
||||||
type sessionUDP struct {
|
type SessionUDP struct {
|
||||||
raddr *net.UDPAddr
|
raddr *net.UDPAddr
|
||||||
context []byte
|
context []byte
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *sessionUDP) RemoteAddr() net.Addr { return s.raddr }
|
func (s *SessionUDP) RemoteAddr() net.Addr { return s.raddr }
|
||||||
|
|
||||||
// setUDPSocketOptions sets the UDP socket options.
|
// setUDPSocketOptions sets the UDP socket options.
|
||||||
// This function is implemented on a per platform basis. See udp_*.go for more details
|
// This function is implemented on a per platform basis. See udp_*.go for more details
|
||||||
|
@ -37,19 +37,19 @@ func setUDPSocketOptions(conn *net.UDPConn) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// readFromSessionUDP acts just like net.UDPConn.ReadFrom(), but returns a session object instead of a
|
// ReadFromSessionUDP acts just like net.UDPConn.ReadFrom(), but returns a session object instead of a
|
||||||
// net.UDPAddr.
|
// net.UDPAddr.
|
||||||
func readFromSessionUDP(conn *net.UDPConn, b []byte) (int, *sessionUDP, error) {
|
func ReadFromSessionUDP(conn *net.UDPConn, b []byte) (int, *SessionUDP, error) {
|
||||||
oob := make([]byte, 40)
|
oob := make([]byte, 40)
|
||||||
n, oobn, _, raddr, err := conn.ReadMsgUDP(b, oob)
|
n, oobn, _, raddr, err := conn.ReadMsgUDP(b, oob)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return n, nil, err
|
return n, nil, err
|
||||||
}
|
}
|
||||||
return n, &sessionUDP{raddr, oob[:oobn]}, err
|
return n, &SessionUDP{raddr, oob[:oobn]}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// writeToSessionUDP acts just like net.UDPConn.WritetTo(), but uses a *sessionUDP instead of a net.Addr.
|
// WriteToSessionUDP acts just like net.UDPConn.WritetTo(), but uses a *SessionUDP instead of a net.Addr.
|
||||||
func writeToSessionUDP(conn *net.UDPConn, b []byte, session *sessionUDP) (int, error) {
|
func WriteToSessionUDP(conn *net.UDPConn, b []byte, session *SessionUDP) (int, error) {
|
||||||
n, _, err := conn.WriteMsgUDP(b, session.context, session.raddr)
|
n, _, err := conn.WriteMsgUDP(b, session.context, session.raddr)
|
||||||
return n, err
|
return n, err
|
||||||
}
|
}
|
||||||
|
|
14
Godeps/_workspace/src/github.com/miekg/dns/udp_windows.go
generated
vendored
14
Godeps/_workspace/src/github.com/miekg/dns/udp_windows.go
generated
vendored
|
@ -4,28 +4,28 @@ package dns
|
||||||
|
|
||||||
import "net"
|
import "net"
|
||||||
|
|
||||||
type sessionUDP struct {
|
type SessionUDP struct {
|
||||||
raddr *net.UDPAddr
|
raddr *net.UDPAddr
|
||||||
}
|
}
|
||||||
|
|
||||||
// readFromSessionUDP acts just like net.UDPConn.ReadFrom(), but returns a session object instead of a
|
// ReadFromSessionUDP acts just like net.UDPConn.ReadFrom(), but returns a session object instead of a
|
||||||
// net.UDPAddr.
|
// net.UDPAddr.
|
||||||
func readFromSessionUDP(conn *net.UDPConn, b []byte) (int, *sessionUDP, error) {
|
func ReadFromSessionUDP(conn *net.UDPConn, b []byte) (int, *SessionUDP, error) {
|
||||||
n, raddr, err := conn.ReadFrom(b)
|
n, raddr, err := conn.ReadFrom(b)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return n, nil, err
|
return n, nil, err
|
||||||
}
|
}
|
||||||
session := &sessionUDP{raddr.(*net.UDPAddr)}
|
session := &SessionUDP{raddr.(*net.UDPAddr)}
|
||||||
return n, session, err
|
return n, session, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// writeToSessionUDP acts just like net.UDPConn.WritetTo(), but uses a *sessionUDP instead of a net.Addr.
|
// WriteToSessionUDP acts just like net.UDPConn.WritetTo(), but uses a *SessionUDP instead of a net.Addr.
|
||||||
func writeToSessionUDP(conn *net.UDPConn, b []byte, session *sessionUDP) (int, error) {
|
func WriteToSessionUDP(conn *net.UDPConn, b []byte, session *SessionUDP) (int, error) {
|
||||||
n, err := conn.WriteTo(b, session.raddr)
|
n, err := conn.WriteTo(b, session.raddr)
|
||||||
return n, err
|
return n, err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *sessionUDP) RemoteAddr() net.Addr { return s.raddr }
|
func (s *SessionUDP) RemoteAddr() net.Addr { return s.raddr }
|
||||||
|
|
||||||
// setUDPSocketOptions sets the UDP socket options.
|
// setUDPSocketOptions sets the UDP socket options.
|
||||||
// This function is implemented on a per platform basis. See udp_*.go for more details
|
// This function is implemented on a per platform basis. See udp_*.go for more details
|
||||||
|
|
35
Godeps/_workspace/src/github.com/miekg/dns/update.go
generated
vendored
35
Godeps/_workspace/src/github.com/miekg/dns/update.go
generated
vendored
|
@ -1,38 +1,3 @@
|
||||||
// DYNAMIC UPDATES
|
|
||||||
//
|
|
||||||
// Dynamic updates reuses the DNS message format, but renames three of
|
|
||||||
// the sections. Question is Zone, Answer is Prerequisite, Authority is
|
|
||||||
// Update, only the Additional is not renamed. See RFC 2136 for the gory details.
|
|
||||||
//
|
|
||||||
// You can set a rather complex set of rules for the existence of absence of
|
|
||||||
// certain resource records or names in a zone to specify if resource records
|
|
||||||
// should be added or removed. The table from RFC 2136 supplemented with the Go
|
|
||||||
// DNS function shows which functions exist to specify the prerequisites.
|
|
||||||
//
|
|
||||||
// 3.2.4 - Table Of Metavalues Used In Prerequisite Section
|
|
||||||
//
|
|
||||||
// CLASS TYPE RDATA Meaning Function
|
|
||||||
// --------------------------------------------------------------
|
|
||||||
// ANY ANY empty Name is in use dns.NameUsed
|
|
||||||
// ANY rrset empty RRset exists (value indep) dns.RRsetUsed
|
|
||||||
// NONE ANY empty Name is not in use dns.NameNotUsed
|
|
||||||
// NONE rrset empty RRset does not exist dns.RRsetNotUsed
|
|
||||||
// zone rrset rr RRset exists (value dep) dns.Used
|
|
||||||
//
|
|
||||||
// The prerequisite section can also be left empty.
|
|
||||||
// If you have decided on the prerequisites you can tell what RRs should
|
|
||||||
// be added or deleted. The next table shows the options you have and
|
|
||||||
// what functions to call.
|
|
||||||
//
|
|
||||||
// 3.4.2.6 - Table Of Metavalues Used In Update Section
|
|
||||||
//
|
|
||||||
// CLASS TYPE RDATA Meaning Function
|
|
||||||
// ---------------------------------------------------------------
|
|
||||||
// ANY ANY empty Delete all RRsets from name dns.RemoveName
|
|
||||||
// ANY rrset empty Delete an RRset dns.RemoveRRset
|
|
||||||
// NONE rrset rr Delete an RR from RRset dns.Remove
|
|
||||||
// zone rrset rr Add to an RRset dns.Insert
|
|
||||||
//
|
|
||||||
package dns
|
package dns
|
||||||
|
|
||||||
// NameUsed sets the RRs in the prereq section to
|
// NameUsed sets the RRs in the prereq section to
|
||||||
|
|
21
Godeps/_workspace/src/github.com/miekg/dns/update_test.go
generated
vendored
21
Godeps/_workspace/src/github.com/miekg/dns/update_test.go
generated
vendored
|
@ -12,10 +12,9 @@ func TestDynamicUpdateParsing(t *testing.T) {
|
||||||
typ == "TSIG" || typ == "ISDN" || typ == "UNSPEC" || typ == "NULL" || typ == "ATMA" {
|
typ == "TSIG" || typ == "ISDN" || typ == "UNSPEC" || typ == "NULL" || typ == "ATMA" {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
r, e := NewRR(prefix + typ)
|
r, err := NewRR(prefix + typ)
|
||||||
if e != nil {
|
if err != nil {
|
||||||
t.Log("failure to parse: " + prefix + typ)
|
t.Errorf("failure to parse: %s %s: %v", prefix, typ, err)
|
||||||
t.Fail()
|
|
||||||
} else {
|
} else {
|
||||||
t.Logf("parsed: %s", r.String())
|
t.Logf("parsed: %s", r.String())
|
||||||
}
|
}
|
||||||
|
@ -31,8 +30,7 @@ func TestDynamicUpdateUnpack(t *testing.T) {
|
||||||
msg := new(Msg)
|
msg := new(Msg)
|
||||||
err := msg.Unpack(buf)
|
err := msg.Unpack(buf)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Log("failed to unpack: " + err.Error() + "\n" + msg.String())
|
t.Errorf("failed to unpack: %v\n%s", err, msg.String())
|
||||||
t.Fail()
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -45,13 +43,11 @@ func TestDynamicUpdateZeroRdataUnpack(t *testing.T) {
|
||||||
rr.Rrtype = n
|
rr.Rrtype = n
|
||||||
bytes, err := m.Pack()
|
bytes, err := m.Pack()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Logf("failed to pack %s: %v", s, err)
|
t.Errorf("failed to pack %s: %v", s, err)
|
||||||
t.Fail()
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if err := new(Msg).Unpack(bytes); err != nil {
|
if err := new(Msg).Unpack(bytes); err != nil {
|
||||||
t.Logf("failed to unpack %s: %v", s, err)
|
t.Errorf("failed to unpack %s: %v", s, err)
|
||||||
t.Fail()
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -82,8 +78,7 @@ func TestRemoveRRset(t *testing.T) {
|
||||||
if err := tmp.Unpack(actual); err != nil {
|
if err := tmp.Unpack(actual); err != nil {
|
||||||
t.Fatalf("Error unpacking actual msg: %v", err)
|
t.Fatalf("Error unpacking actual msg: %v", err)
|
||||||
}
|
}
|
||||||
t.Logf("Expected msg:\n%s", expectstr)
|
t.Errorf("Expected msg:\n%s", expectstr)
|
||||||
t.Logf("Actual msg:\n%v", tmp)
|
t.Errorf("Actual msg:\n%v", tmp)
|
||||||
t.Fail()
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
1
Godeps/_workspace/src/github.com/miekg/dns/xfr.go
generated
vendored
1
Godeps/_workspace/src/github.com/miekg/dns/xfr.go
generated
vendored
|
@ -193,6 +193,7 @@ func (t *Transfer) ReadMsg() (*Msg, error) {
|
||||||
}
|
}
|
||||||
// Need to work on the original message p, as that was used to calculate the tsig.
|
// Need to work on the original message p, as that was used to calculate the tsig.
|
||||||
err = TsigVerify(p, t.TsigSecret[ts.Hdr.Name], t.tsigRequestMAC, t.tsigTimersOnly)
|
err = TsigVerify(p, t.TsigSecret[ts.Hdr.Name], t.tsigRequestMAC, t.tsigTimersOnly)
|
||||||
|
t.tsigRequestMAC = ts.MAC
|
||||||
}
|
}
|
||||||
return m, err
|
return m, err
|
||||||
}
|
}
|
||||||
|
|
27
Godeps/_workspace/src/github.com/miekg/dns/xfr_test.go
generated
vendored
27
Godeps/_workspace/src/github.com/miekg/dns/xfr_test.go
generated
vendored
|
@ -7,8 +7,8 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
func getIP(s string) string {
|
func getIP(s string) string {
|
||||||
a, e := net.LookupAddr(s)
|
a, err := net.LookupAddr(s)
|
||||||
if e != nil {
|
if err != nil {
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
return a[0]
|
return a[0]
|
||||||
|
@ -28,17 +28,15 @@ func testClientAXFR(t *testing.T) {
|
||||||
tr := new(Transfer)
|
tr := new(Transfer)
|
||||||
|
|
||||||
if a, err := tr.In(m, net.JoinHostPort(server, "53")); err != nil {
|
if a, err := tr.In(m, net.JoinHostPort(server, "53")); err != nil {
|
||||||
t.Log("failed to setup axfr: " + err.Error())
|
t.Fatal("failed to setup axfr: ", err)
|
||||||
t.Fatal()
|
|
||||||
} else {
|
} else {
|
||||||
for ex := range a {
|
for ex := range a {
|
||||||
if ex.Error != nil {
|
if ex.Error != nil {
|
||||||
t.Logf("error %s\n", ex.Error.Error())
|
t.Errorf("error %v", ex.Error)
|
||||||
t.Fail()
|
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
for _, rr := range ex.RR {
|
for _, rr := range ex.RR {
|
||||||
t.Logf("%s\n", rr.String())
|
t.Log(rr.String())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -56,14 +54,11 @@ func testClientAXFRMultipleEnvelopes(t *testing.T) {
|
||||||
|
|
||||||
tr := new(Transfer)
|
tr := new(Transfer)
|
||||||
if a, err := tr.In(m, net.JoinHostPort(server, "53")); err != nil {
|
if a, err := tr.In(m, net.JoinHostPort(server, "53")); err != nil {
|
||||||
t.Log("Failed to setup axfr" + err.Error() + "for server: " + server)
|
t.Fatalf("Failed to setup axfr %v for server: %v", err, server)
|
||||||
t.Fail()
|
|
||||||
return
|
|
||||||
} else {
|
} else {
|
||||||
for ex := range a {
|
for ex := range a {
|
||||||
if ex.Error != nil {
|
if ex.Error != nil {
|
||||||
t.Logf("Error %s\n", ex.Error.Error())
|
t.Errorf("Error %v", ex.Error)
|
||||||
t.Fail()
|
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -82,17 +77,15 @@ func testClientTsigAXFR(t *testing.T) {
|
||||||
tr.TsigSecret = map[string]string{"axfr.": "so6ZGir4GPAqINNh9U5c3A=="}
|
tr.TsigSecret = map[string]string{"axfr.": "so6ZGir4GPAqINNh9U5c3A=="}
|
||||||
|
|
||||||
if a, err := tr.In(m, "176.58.119.54:53"); err != nil {
|
if a, err := tr.In(m, "176.58.119.54:53"); err != nil {
|
||||||
t.Log("failed to setup axfr: " + err.Error())
|
t.Fatal("failed to setup axfr: ", err)
|
||||||
t.Fatal()
|
|
||||||
} else {
|
} else {
|
||||||
for ex := range a {
|
for ex := range a {
|
||||||
if ex.Error != nil {
|
if ex.Error != nil {
|
||||||
t.Logf("error %s\n", ex.Error.Error())
|
t.Errorf("error %v", ex.Error)
|
||||||
t.Fail()
|
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
for _, rr := range ex.RR {
|
for _, rr := range ex.RR {
|
||||||
t.Logf("%s\n", rr.String())
|
t.Log(rr.String())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
27
Godeps/_workspace/src/github.com/miekg/dns/zgenerate.go
generated
vendored
27
Godeps/_workspace/src/github.com/miekg/dns/zgenerate.go
generated
vendored
|
@ -1,6 +1,7 @@
|
||||||
package dns
|
package dns
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bytes"
|
||||||
"fmt"
|
"fmt"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
@ -24,13 +25,13 @@ func generate(l lex, c chan lex, t chan *Token, o string) string {
|
||||||
if i+1 == len(l.token) {
|
if i+1 == len(l.token) {
|
||||||
return "bad step in $GENERATE range"
|
return "bad step in $GENERATE range"
|
||||||
}
|
}
|
||||||
if s, e := strconv.Atoi(l.token[i+1:]); e != nil {
|
if s, e := strconv.Atoi(l.token[i+1:]); e == nil {
|
||||||
return "bad step in $GENERATE range"
|
|
||||||
} else {
|
|
||||||
if s < 0 {
|
if s < 0 {
|
||||||
return "bad step in $GENERATE range"
|
return "bad step in $GENERATE range"
|
||||||
}
|
}
|
||||||
step = s
|
step = s
|
||||||
|
} else {
|
||||||
|
return "bad step in $GENERATE range"
|
||||||
}
|
}
|
||||||
l.token = l.token[:i]
|
l.token = l.token[:i]
|
||||||
}
|
}
|
||||||
|
@ -46,7 +47,7 @@ func generate(l lex, c chan lex, t chan *Token, o string) string {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "bad stop in $GENERATE range"
|
return "bad stop in $GENERATE range"
|
||||||
}
|
}
|
||||||
if end < 0 || start < 0 || end <= start {
|
if end < 0 || start < 0 || end < start {
|
||||||
return "bad range in $GENERATE range"
|
return "bad range in $GENERATE range"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -55,14 +56,14 @@ func generate(l lex, c chan lex, t chan *Token, o string) string {
|
||||||
s := ""
|
s := ""
|
||||||
BuildRR:
|
BuildRR:
|
||||||
l = <-c
|
l = <-c
|
||||||
if l.value != _NEWLINE && l.value != _EOF {
|
if l.value != zNewline && l.value != zEOF {
|
||||||
s += l.token
|
s += l.token
|
||||||
goto BuildRR
|
goto BuildRR
|
||||||
}
|
}
|
||||||
for i := start; i <= end; i += step {
|
for i := start; i <= end; i += step {
|
||||||
var (
|
var (
|
||||||
escape bool
|
escape bool
|
||||||
dom string
|
dom bytes.Buffer
|
||||||
mod string
|
mod string
|
||||||
err string
|
err string
|
||||||
offset int
|
offset int
|
||||||
|
@ -72,7 +73,7 @@ BuildRR:
|
||||||
switch s[j] {
|
switch s[j] {
|
||||||
case '\\':
|
case '\\':
|
||||||
if escape {
|
if escape {
|
||||||
dom += "\\"
|
dom.WriteByte('\\')
|
||||||
escape = false
|
escape = false
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
@ -81,17 +82,17 @@ BuildRR:
|
||||||
mod = "%d"
|
mod = "%d"
|
||||||
offset = 0
|
offset = 0
|
||||||
if escape {
|
if escape {
|
||||||
dom += "$"
|
dom.WriteByte('$')
|
||||||
escape = false
|
escape = false
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
escape = false
|
escape = false
|
||||||
if j+1 >= len(s) { // End of the string
|
if j+1 >= len(s) { // End of the string
|
||||||
dom += fmt.Sprintf(mod, i+offset)
|
dom.WriteString(fmt.Sprintf(mod, i+offset))
|
||||||
continue
|
continue
|
||||||
} else {
|
} else {
|
||||||
if s[j+1] == '$' {
|
if s[j+1] == '$' {
|
||||||
dom += "$"
|
dom.WriteByte('$')
|
||||||
j++
|
j++
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
@ -108,17 +109,17 @@ BuildRR:
|
||||||
}
|
}
|
||||||
j += 2 + sep // Jump to it
|
j += 2 + sep // Jump to it
|
||||||
}
|
}
|
||||||
dom += fmt.Sprintf(mod, i+offset)
|
dom.WriteString(fmt.Sprintf(mod, i+offset))
|
||||||
default:
|
default:
|
||||||
if escape { // Pretty useless here
|
if escape { // Pretty useless here
|
||||||
escape = false
|
escape = false
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
dom += string(s[j])
|
dom.WriteByte(s[j])
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Re-parse the RR and send it on the current channel t
|
// Re-parse the RR and send it on the current channel t
|
||||||
rx, e := NewRR("$ORIGIN " + o + "\n" + dom)
|
rx, e := NewRR("$ORIGIN " + o + "\n" + dom.String())
|
||||||
if e != nil {
|
if e != nil {
|
||||||
return e.(*ParseError).err
|
return e.(*ParseError).err
|
||||||
}
|
}
|
||||||
|
|
349
Godeps/_workspace/src/github.com/miekg/dns/zscan.go
generated
vendored
349
Godeps/_workspace/src/github.com/miekg/dns/zscan.go
generated
vendored
|
@ -29,41 +29,41 @@ const maxUint16 = 1<<16 - 1
|
||||||
// * Handle braces - anywhere.
|
// * Handle braces - anywhere.
|
||||||
const (
|
const (
|
||||||
// Zonefile
|
// Zonefile
|
||||||
_EOF = iota
|
zEOF = iota
|
||||||
_STRING
|
zString
|
||||||
_BLANK
|
zBlank
|
||||||
_QUOTE
|
zQuote
|
||||||
_NEWLINE
|
zNewline
|
||||||
_RRTYPE
|
zRrtpe
|
||||||
_OWNER
|
zOwner
|
||||||
_CLASS
|
zClass
|
||||||
_DIRORIGIN // $ORIGIN
|
zDirOrigin // $ORIGIN
|
||||||
_DIRTTL // $TTL
|
zDirTtl // $TTL
|
||||||
_DIRINCLUDE // $INCLUDE
|
zDirInclude // $INCLUDE
|
||||||
_DIRGENERATE // $GENERATE
|
zDirGenerate // $GENERATE
|
||||||
|
|
||||||
// Privatekey file
|
// Privatekey file
|
||||||
_VALUE
|
zValue
|
||||||
_KEY
|
zKey
|
||||||
|
|
||||||
_EXPECT_OWNER_DIR // Ownername
|
zExpectOwnerDir // Ownername
|
||||||
_EXPECT_OWNER_BL // Whitespace after the ownername
|
zExpectOwnerBl // Whitespace after the ownername
|
||||||
_EXPECT_ANY // Expect rrtype, ttl or class
|
zExpectAny // Expect rrtype, ttl or class
|
||||||
_EXPECT_ANY_NOCLASS // Expect rrtype or ttl
|
zExpectAnyNoClass // Expect rrtype or ttl
|
||||||
_EXPECT_ANY_NOCLASS_BL // The whitespace after _EXPECT_ANY_NOCLASS
|
zExpectAnyNoClassBl // The whitespace after _EXPECT_ANY_NOCLASS
|
||||||
_EXPECT_ANY_NOTTL // Expect rrtype or class
|
zExpectAnyNoTtl // Expect rrtype or class
|
||||||
_EXPECT_ANY_NOTTL_BL // Whitespace after _EXPECT_ANY_NOTTL
|
zExpectAnyNoTtlBl // Whitespace after _EXPECT_ANY_NOTTL
|
||||||
_EXPECT_RRTYPE // Expect rrtype
|
zExpectRrtype // Expect rrtype
|
||||||
_EXPECT_RRTYPE_BL // Whitespace BEFORE rrtype
|
zExpectRrtypeBl // Whitespace BEFORE rrtype
|
||||||
_EXPECT_RDATA // The first element of the rdata
|
zExpectRdata // The first element of the rdata
|
||||||
_EXPECT_DIRTTL_BL // Space after directive $TTL
|
zExpectDirTtlBl // Space after directive $TTL
|
||||||
_EXPECT_DIRTTL // Directive $TTL
|
zExpectDirTtl // Directive $TTL
|
||||||
_EXPECT_DIRORIGIN_BL // Space after directive $ORIGIN
|
zExpectDirOriginBl // Space after directive $ORIGIN
|
||||||
_EXPECT_DIRORIGIN // Directive $ORIGIN
|
zExpectDirOrigin // Directive $ORIGIN
|
||||||
_EXPECT_DIRINCLUDE_BL // Space after directive $INCLUDE
|
zExpectDirIncludeBl // Space after directive $INCLUDE
|
||||||
_EXPECT_DIRINCLUDE // Directive $INCLUDE
|
zExpectDirInclude // Directive $INCLUDE
|
||||||
_EXPECT_DIRGENERATE // Directive $GENERATE
|
zExpectDirGenerate // Directive $GENERATE
|
||||||
_EXPECT_DIRGENERATE_BL // Space after directive $GENERATE
|
zExpectDirGenerateBl // Space after directive $GENERATE
|
||||||
)
|
)
|
||||||
|
|
||||||
// ParseError is a parsing error. It contains the parse error and the location in the io.Reader
|
// ParseError is a parsing error. It contains the parse error and the location in the io.Reader
|
||||||
|
@ -88,18 +88,21 @@ type lex struct {
|
||||||
tokenUpper string // uppercase text of the token
|
tokenUpper string // uppercase text of the token
|
||||||
length int // lenght of the token
|
length int // lenght of the token
|
||||||
err bool // when true, token text has lexer error
|
err bool // when true, token text has lexer error
|
||||||
value uint8 // value: _STRING, _BLANK, etc.
|
value uint8 // value: zString, _BLANK, etc.
|
||||||
line int // line in the file
|
line int // line in the file
|
||||||
column int // column in the file
|
column int // column in the file
|
||||||
torc uint16 // type or class as parsed in the lexer, we only need to look this up in the grammar
|
torc uint16 // type or class as parsed in the lexer, we only need to look this up in the grammar
|
||||||
comment string // any comment text seen
|
comment string // any comment text seen
|
||||||
}
|
}
|
||||||
|
|
||||||
// *Tokens are returned when a zone file is parsed.
|
// Token holds the token that are returned when a zone file is parsed.
|
||||||
type Token struct {
|
type Token struct {
|
||||||
RR // the scanned resource record when error is not nil
|
// The scanned resource record when error is not nil.
|
||||||
Error *ParseError // when an error occured, this has the error specifics
|
RR
|
||||||
Comment string // a potential comment positioned after the RR and on the same line
|
// When an error occured, this has the error specifics.
|
||||||
|
Error *ParseError
|
||||||
|
// A potential comment positioned after the RR and on the same line.
|
||||||
|
Comment string
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewRR reads the RR contained in the string s. Only the first RR is
|
// NewRR reads the RR contained in the string s. Only the first RR is
|
||||||
|
@ -168,17 +171,17 @@ func parseZone(r io.Reader, origin, f string, t chan *Token, include int) {
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
s := scanInit(r)
|
s := scanInit(r)
|
||||||
c := make(chan lex, 1000)
|
c := make(chan lex)
|
||||||
// Start the lexer
|
// Start the lexer
|
||||||
go zlexer(s, c)
|
go zlexer(s, c)
|
||||||
// 6 possible beginnings of a line, _ is a space
|
// 6 possible beginnings of a line, _ is a space
|
||||||
// 0. _RRTYPE -> all omitted until the rrtype
|
// 0. zRRTYPE -> all omitted until the rrtype
|
||||||
// 1. _OWNER _ _RRTYPE -> class/ttl omitted
|
// 1. zOwner _ zRrtype -> class/ttl omitted
|
||||||
// 2. _OWNER _ _STRING _ _RRTYPE -> class omitted
|
// 2. zOwner _ zString _ zRrtype -> class omitted
|
||||||
// 3. _OWNER _ _STRING _ _CLASS _ _RRTYPE -> ttl/class
|
// 3. zOwner _ zString _ zClass _ zRrtype -> ttl/class
|
||||||
// 4. _OWNER _ _CLASS _ _RRTYPE -> ttl omitted
|
// 4. zOwner _ zClass _ zRrtype -> ttl omitted
|
||||||
// 5. _OWNER _ _CLASS _ _STRING _ _RRTYPE -> class/ttl (reversed)
|
// 5. zOwner _ zClass _ zString _ zRrtype -> class/ttl (reversed)
|
||||||
// After detecting these, we know the _RRTYPE so we can jump to functions
|
// After detecting these, we know the zRrtype so we can jump to functions
|
||||||
// handling the rdata for each of these types.
|
// handling the rdata for each of these types.
|
||||||
|
|
||||||
if origin == "" {
|
if origin == "" {
|
||||||
|
@ -190,7 +193,7 @@ func parseZone(r io.Reader, origin, f string, t chan *Token, include int) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
st := _EXPECT_OWNER_DIR // initial state
|
st := zExpectOwnerDir // initial state
|
||||||
var h RR_Header
|
var h RR_Header
|
||||||
var defttl uint32 = defaultTtl
|
var defttl uint32 = defaultTtl
|
||||||
var prevName string
|
var prevName string
|
||||||
|
@ -202,19 +205,19 @@ func parseZone(r io.Reader, origin, f string, t chan *Token, include int) {
|
||||||
|
|
||||||
}
|
}
|
||||||
switch st {
|
switch st {
|
||||||
case _EXPECT_OWNER_DIR:
|
case zExpectOwnerDir:
|
||||||
// We can also expect a directive, like $TTL or $ORIGIN
|
// We can also expect a directive, like $TTL or $ORIGIN
|
||||||
h.Ttl = defttl
|
h.Ttl = defttl
|
||||||
h.Class = ClassINET
|
h.Class = ClassINET
|
||||||
switch l.value {
|
switch l.value {
|
||||||
case _NEWLINE: // Empty line
|
case zNewline:
|
||||||
st = _EXPECT_OWNER_DIR
|
st = zExpectOwnerDir
|
||||||
case _OWNER:
|
case zOwner:
|
||||||
h.Name = l.token
|
h.Name = l.token
|
||||||
if l.token[0] == '@' {
|
if l.token[0] == '@' {
|
||||||
h.Name = origin
|
h.Name = origin
|
||||||
prevName = h.Name
|
prevName = h.Name
|
||||||
st = _EXPECT_OWNER_BL
|
st = zExpectOwnerBl
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
if h.Name[l.length-1] != '.' {
|
if h.Name[l.length-1] != '.' {
|
||||||
|
@ -226,58 +229,58 @@ func parseZone(r io.Reader, origin, f string, t chan *Token, include int) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
prevName = h.Name
|
prevName = h.Name
|
||||||
st = _EXPECT_OWNER_BL
|
st = zExpectOwnerBl
|
||||||
case _DIRTTL:
|
case zDirTtl:
|
||||||
st = _EXPECT_DIRTTL_BL
|
st = zExpectDirTtlBl
|
||||||
case _DIRORIGIN:
|
case zDirOrigin:
|
||||||
st = _EXPECT_DIRORIGIN_BL
|
st = zExpectDirOriginBl
|
||||||
case _DIRINCLUDE:
|
case zDirInclude:
|
||||||
st = _EXPECT_DIRINCLUDE_BL
|
st = zExpectDirIncludeBl
|
||||||
case _DIRGENERATE:
|
case zDirGenerate:
|
||||||
st = _EXPECT_DIRGENERATE_BL
|
st = zExpectDirGenerateBl
|
||||||
case _RRTYPE: // Everthing has been omitted, this is the first thing on the line
|
case zRrtpe:
|
||||||
h.Name = prevName
|
h.Name = prevName
|
||||||
h.Rrtype = l.torc
|
h.Rrtype = l.torc
|
||||||
st = _EXPECT_RDATA
|
st = zExpectRdata
|
||||||
case _CLASS: // First thing on the line is the class
|
case zClass:
|
||||||
h.Name = prevName
|
h.Name = prevName
|
||||||
h.Class = l.torc
|
h.Class = l.torc
|
||||||
st = _EXPECT_ANY_NOCLASS_BL
|
st = zExpectAnyNoClassBl
|
||||||
case _BLANK:
|
case zBlank:
|
||||||
// Discard, can happen when there is nothing on the
|
// Discard, can happen when there is nothing on the
|
||||||
// line except the RR type
|
// line except the RR type
|
||||||
case _STRING: // First thing on the is the ttl
|
case zString:
|
||||||
if ttl, ok := stringToTtl(l.token); !ok {
|
ttl, ok := stringToTtl(l.token)
|
||||||
|
if !ok {
|
||||||
t <- &Token{Error: &ParseError{f, "not a TTL", l}}
|
t <- &Token{Error: &ParseError{f, "not a TTL", l}}
|
||||||
return
|
return
|
||||||
} else {
|
|
||||||
h.Ttl = ttl
|
|
||||||
// Don't about the defttl, we should take the $TTL value
|
|
||||||
// defttl = ttl
|
|
||||||
}
|
}
|
||||||
st = _EXPECT_ANY_NOTTL_BL
|
h.Ttl = ttl
|
||||||
|
// Don't about the defttl, we should take the $TTL value
|
||||||
|
// defttl = ttl
|
||||||
|
st = zExpectAnyNoTtlBl
|
||||||
|
|
||||||
default:
|
default:
|
||||||
t <- &Token{Error: &ParseError{f, "syntax error at beginning", l}}
|
t <- &Token{Error: &ParseError{f, "syntax error at beginning", l}}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
case _EXPECT_DIRINCLUDE_BL:
|
case zExpectDirIncludeBl:
|
||||||
if l.value != _BLANK {
|
if l.value != zBlank {
|
||||||
t <- &Token{Error: &ParseError{f, "no blank after $INCLUDE-directive", l}}
|
t <- &Token{Error: &ParseError{f, "no blank after $INCLUDE-directive", l}}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
st = _EXPECT_DIRINCLUDE
|
st = zExpectDirInclude
|
||||||
case _EXPECT_DIRINCLUDE:
|
case zExpectDirInclude:
|
||||||
if l.value != _STRING {
|
if l.value != zString {
|
||||||
t <- &Token{Error: &ParseError{f, "expecting $INCLUDE value, not this...", l}}
|
t <- &Token{Error: &ParseError{f, "expecting $INCLUDE value, not this...", l}}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
neworigin := origin // There may be optionally a new origin set after the filename, if not use current one
|
neworigin := origin // There may be optionally a new origin set after the filename, if not use current one
|
||||||
l := <-c
|
l := <-c
|
||||||
switch l.value {
|
switch l.value {
|
||||||
case _BLANK:
|
case zBlank:
|
||||||
l := <-c
|
l := <-c
|
||||||
if l.value == _STRING {
|
if l.value == zString {
|
||||||
if _, ok := IsDomainName(l.token); !ok {
|
if _, ok := IsDomainName(l.token); !ok {
|
||||||
t <- &Token{Error: &ParseError{f, "bad origin name", l}}
|
t <- &Token{Error: &ParseError{f, "bad origin name", l}}
|
||||||
return
|
return
|
||||||
|
@ -293,7 +296,7 @@ func parseZone(r io.Reader, origin, f string, t chan *Token, include int) {
|
||||||
neworigin = l.token
|
neworigin = l.token
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
case _NEWLINE, _EOF:
|
case zNewline, zEOF:
|
||||||
// Ok
|
// Ok
|
||||||
default:
|
default:
|
||||||
t <- &Token{Error: &ParseError{f, "garbage after $INCLUDE", l}}
|
t <- &Token{Error: &ParseError{f, "garbage after $INCLUDE", l}}
|
||||||
|
@ -310,15 +313,15 @@ func parseZone(r io.Reader, origin, f string, t chan *Token, include int) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
parseZone(r1, l.token, neworigin, t, include+1)
|
parseZone(r1, l.token, neworigin, t, include+1)
|
||||||
st = _EXPECT_OWNER_DIR
|
st = zExpectOwnerDir
|
||||||
case _EXPECT_DIRTTL_BL:
|
case zExpectDirTtlBl:
|
||||||
if l.value != _BLANK {
|
if l.value != zBlank {
|
||||||
t <- &Token{Error: &ParseError{f, "no blank after $TTL-directive", l}}
|
t <- &Token{Error: &ParseError{f, "no blank after $TTL-directive", l}}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
st = _EXPECT_DIRTTL
|
st = zExpectDirTtl
|
||||||
case _EXPECT_DIRTTL:
|
case zExpectDirTtl:
|
||||||
if l.value != _STRING {
|
if l.value != zString {
|
||||||
t <- &Token{Error: &ParseError{f, "expecting $TTL value, not this...", l}}
|
t <- &Token{Error: &ParseError{f, "expecting $TTL value, not this...", l}}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -326,21 +329,21 @@ func parseZone(r io.Reader, origin, f string, t chan *Token, include int) {
|
||||||
t <- &Token{Error: e}
|
t <- &Token{Error: e}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if ttl, ok := stringToTtl(l.token); !ok {
|
ttl, ok := stringToTtl(l.token)
|
||||||
|
if !ok {
|
||||||
t <- &Token{Error: &ParseError{f, "expecting $TTL value, not this...", l}}
|
t <- &Token{Error: &ParseError{f, "expecting $TTL value, not this...", l}}
|
||||||
return
|
return
|
||||||
} else {
|
|
||||||
defttl = ttl
|
|
||||||
}
|
}
|
||||||
st = _EXPECT_OWNER_DIR
|
defttl = ttl
|
||||||
case _EXPECT_DIRORIGIN_BL:
|
st = zExpectOwnerDir
|
||||||
if l.value != _BLANK {
|
case zExpectDirOriginBl:
|
||||||
|
if l.value != zBlank {
|
||||||
t <- &Token{Error: &ParseError{f, "no blank after $ORIGIN-directive", l}}
|
t <- &Token{Error: &ParseError{f, "no blank after $ORIGIN-directive", l}}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
st = _EXPECT_DIRORIGIN
|
st = zExpectDirOrigin
|
||||||
case _EXPECT_DIRORIGIN:
|
case zExpectDirOrigin:
|
||||||
if l.value != _STRING {
|
if l.value != zString {
|
||||||
t <- &Token{Error: &ParseError{f, "expecting $ORIGIN value, not this...", l}}
|
t <- &Token{Error: &ParseError{f, "expecting $ORIGIN value, not this...", l}}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -360,15 +363,15 @@ func parseZone(r io.Reader, origin, f string, t chan *Token, include int) {
|
||||||
} else {
|
} else {
|
||||||
origin = l.token
|
origin = l.token
|
||||||
}
|
}
|
||||||
st = _EXPECT_OWNER_DIR
|
st = zExpectOwnerDir
|
||||||
case _EXPECT_DIRGENERATE_BL:
|
case zExpectDirGenerateBl:
|
||||||
if l.value != _BLANK {
|
if l.value != zBlank {
|
||||||
t <- &Token{Error: &ParseError{f, "no blank after $GENERATE-directive", l}}
|
t <- &Token{Error: &ParseError{f, "no blank after $GENERATE-directive", l}}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
st = _EXPECT_DIRGENERATE
|
st = zExpectDirGenerate
|
||||||
case _EXPECT_DIRGENERATE:
|
case zExpectDirGenerate:
|
||||||
if l.value != _STRING {
|
if l.value != zString {
|
||||||
t <- &Token{Error: &ParseError{f, "expecting $GENERATE value, not this...", l}}
|
t <- &Token{Error: &ParseError{f, "expecting $GENERATE value, not this...", l}}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -376,90 +379,90 @@ func parseZone(r io.Reader, origin, f string, t chan *Token, include int) {
|
||||||
t <- &Token{Error: &ParseError{f, e, l}}
|
t <- &Token{Error: &ParseError{f, e, l}}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
st = _EXPECT_OWNER_DIR
|
st = zExpectOwnerDir
|
||||||
case _EXPECT_OWNER_BL:
|
case zExpectOwnerBl:
|
||||||
if l.value != _BLANK {
|
if l.value != zBlank {
|
||||||
t <- &Token{Error: &ParseError{f, "no blank after owner", l}}
|
t <- &Token{Error: &ParseError{f, "no blank after owner", l}}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
st = _EXPECT_ANY
|
st = zExpectAny
|
||||||
case _EXPECT_ANY:
|
case zExpectAny:
|
||||||
switch l.value {
|
switch l.value {
|
||||||
case _RRTYPE:
|
case zRrtpe:
|
||||||
h.Rrtype = l.torc
|
h.Rrtype = l.torc
|
||||||
st = _EXPECT_RDATA
|
st = zExpectRdata
|
||||||
case _CLASS:
|
case zClass:
|
||||||
h.Class = l.torc
|
h.Class = l.torc
|
||||||
st = _EXPECT_ANY_NOCLASS_BL
|
st = zExpectAnyNoClassBl
|
||||||
case _STRING: // TTL is this case
|
case zString:
|
||||||
if ttl, ok := stringToTtl(l.token); !ok {
|
ttl, ok := stringToTtl(l.token)
|
||||||
|
if !ok {
|
||||||
t <- &Token{Error: &ParseError{f, "not a TTL", l}}
|
t <- &Token{Error: &ParseError{f, "not a TTL", l}}
|
||||||
return
|
return
|
||||||
} else {
|
|
||||||
h.Ttl = ttl
|
|
||||||
// defttl = ttl // don't set the defttl here
|
|
||||||
}
|
}
|
||||||
st = _EXPECT_ANY_NOTTL_BL
|
h.Ttl = ttl
|
||||||
|
// defttl = ttl // don't set the defttl here
|
||||||
|
st = zExpectAnyNoTtlBl
|
||||||
default:
|
default:
|
||||||
t <- &Token{Error: &ParseError{f, "expecting RR type, TTL or class, not this...", l}}
|
t <- &Token{Error: &ParseError{f, "expecting RR type, TTL or class, not this...", l}}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
case _EXPECT_ANY_NOCLASS_BL:
|
case zExpectAnyNoClassBl:
|
||||||
if l.value != _BLANK {
|
if l.value != zBlank {
|
||||||
t <- &Token{Error: &ParseError{f, "no blank before class", l}}
|
t <- &Token{Error: &ParseError{f, "no blank before class", l}}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
st = _EXPECT_ANY_NOCLASS
|
st = zExpectAnyNoClass
|
||||||
case _EXPECT_ANY_NOTTL_BL:
|
case zExpectAnyNoTtlBl:
|
||||||
if l.value != _BLANK {
|
if l.value != zBlank {
|
||||||
t <- &Token{Error: &ParseError{f, "no blank before TTL", l}}
|
t <- &Token{Error: &ParseError{f, "no blank before TTL", l}}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
st = _EXPECT_ANY_NOTTL
|
st = zExpectAnyNoTtl
|
||||||
case _EXPECT_ANY_NOTTL:
|
case zExpectAnyNoTtl:
|
||||||
switch l.value {
|
switch l.value {
|
||||||
case _CLASS:
|
case zClass:
|
||||||
h.Class = l.torc
|
h.Class = l.torc
|
||||||
st = _EXPECT_RRTYPE_BL
|
st = zExpectRrtypeBl
|
||||||
case _RRTYPE:
|
case zRrtpe:
|
||||||
h.Rrtype = l.torc
|
h.Rrtype = l.torc
|
||||||
st = _EXPECT_RDATA
|
st = zExpectRdata
|
||||||
default:
|
default:
|
||||||
t <- &Token{Error: &ParseError{f, "expecting RR type or class, not this...", l}}
|
t <- &Token{Error: &ParseError{f, "expecting RR type or class, not this...", l}}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
case _EXPECT_ANY_NOCLASS:
|
case zExpectAnyNoClass:
|
||||||
switch l.value {
|
switch l.value {
|
||||||
case _STRING: // TTL
|
case zString:
|
||||||
if ttl, ok := stringToTtl(l.token); !ok {
|
ttl, ok := stringToTtl(l.token)
|
||||||
|
if !ok {
|
||||||
t <- &Token{Error: &ParseError{f, "not a TTL", l}}
|
t <- &Token{Error: &ParseError{f, "not a TTL", l}}
|
||||||
return
|
return
|
||||||
} else {
|
|
||||||
h.Ttl = ttl
|
|
||||||
// defttl = ttl // don't set the def ttl anymore
|
|
||||||
}
|
}
|
||||||
st = _EXPECT_RRTYPE_BL
|
h.Ttl = ttl
|
||||||
case _RRTYPE:
|
// defttl = ttl // don't set the def ttl anymore
|
||||||
|
st = zExpectRrtypeBl
|
||||||
|
case zRrtpe:
|
||||||
h.Rrtype = l.torc
|
h.Rrtype = l.torc
|
||||||
st = _EXPECT_RDATA
|
st = zExpectRdata
|
||||||
default:
|
default:
|
||||||
t <- &Token{Error: &ParseError{f, "expecting RR type or TTL, not this...", l}}
|
t <- &Token{Error: &ParseError{f, "expecting RR type or TTL, not this...", l}}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
case _EXPECT_RRTYPE_BL:
|
case zExpectRrtypeBl:
|
||||||
if l.value != _BLANK {
|
if l.value != zBlank {
|
||||||
t <- &Token{Error: &ParseError{f, "no blank before RR type", l}}
|
t <- &Token{Error: &ParseError{f, "no blank before RR type", l}}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
st = _EXPECT_RRTYPE
|
st = zExpectRrtype
|
||||||
case _EXPECT_RRTYPE:
|
case zExpectRrtype:
|
||||||
if l.value != _RRTYPE {
|
if l.value != zRrtpe {
|
||||||
t <- &Token{Error: &ParseError{f, "unknown RR type", l}}
|
t <- &Token{Error: &ParseError{f, "unknown RR type", l}}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
h.Rrtype = l.torc
|
h.Rrtype = l.torc
|
||||||
st = _EXPECT_RDATA
|
st = zExpectRdata
|
||||||
case _EXPECT_RDATA:
|
case zExpectRdata:
|
||||||
r, e, c1 := setRR(h, c, origin, f)
|
r, e, c1 := setRR(h, c, origin, f)
|
||||||
if e != nil {
|
if e != nil {
|
||||||
// If e.lex is nil than we have encounter a unknown RR type
|
// If e.lex is nil than we have encounter a unknown RR type
|
||||||
|
@ -471,7 +474,7 @@ func parseZone(r io.Reader, origin, f string, t chan *Token, include int) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
t <- &Token{RR: r, Comment: c1}
|
t <- &Token{RR: r, Comment: c1}
|
||||||
st = _EXPECT_OWNER_DIR
|
st = zExpectOwnerDir
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// If we get here, we and the h.Rrtype is still zero, we haven't parsed anything, this
|
// If we get here, we and the h.Rrtype is still zero, we haven't parsed anything, this
|
||||||
|
@ -535,60 +538,60 @@ func zlexer(s *scan, c chan lex) {
|
||||||
// Space directly in the beginning, handled in the grammar
|
// Space directly in the beginning, handled in the grammar
|
||||||
} else if owner {
|
} else if owner {
|
||||||
// If we have a string and its the first, make it an owner
|
// If we have a string and its the first, make it an owner
|
||||||
l.value = _OWNER
|
l.value = zOwner
|
||||||
l.token = string(str[:stri])
|
l.token = string(str[:stri])
|
||||||
l.tokenUpper = strings.ToUpper(l.token)
|
l.tokenUpper = strings.ToUpper(l.token)
|
||||||
l.length = stri
|
l.length = stri
|
||||||
// escape $... start with a \ not a $, so this will work
|
// escape $... start with a \ not a $, so this will work
|
||||||
switch l.tokenUpper {
|
switch l.tokenUpper {
|
||||||
case "$TTL":
|
case "$TTL":
|
||||||
l.value = _DIRTTL
|
l.value = zDirTtl
|
||||||
case "$ORIGIN":
|
case "$ORIGIN":
|
||||||
l.value = _DIRORIGIN
|
l.value = zDirOrigin
|
||||||
case "$INCLUDE":
|
case "$INCLUDE":
|
||||||
l.value = _DIRINCLUDE
|
l.value = zDirInclude
|
||||||
case "$GENERATE":
|
case "$GENERATE":
|
||||||
l.value = _DIRGENERATE
|
l.value = zDirGenerate
|
||||||
}
|
}
|
||||||
debug.Printf("[7 %+v]", l.token)
|
debug.Printf("[7 %+v]", l.token)
|
||||||
c <- l
|
c <- l
|
||||||
} else {
|
} else {
|
||||||
l.value = _STRING
|
l.value = zString
|
||||||
l.token = string(str[:stri])
|
l.token = string(str[:stri])
|
||||||
l.tokenUpper = strings.ToUpper(l.token)
|
l.tokenUpper = strings.ToUpper(l.token)
|
||||||
l.length = stri
|
l.length = stri
|
||||||
if !rrtype {
|
if !rrtype {
|
||||||
if t, ok := StringToType[l.tokenUpper]; ok {
|
if t, ok := StringToType[l.tokenUpper]; ok {
|
||||||
l.value = _RRTYPE
|
l.value = zRrtpe
|
||||||
l.torc = t
|
l.torc = t
|
||||||
rrtype = true
|
rrtype = true
|
||||||
} else {
|
} else {
|
||||||
if strings.HasPrefix(l.tokenUpper, "TYPE") {
|
if strings.HasPrefix(l.tokenUpper, "TYPE") {
|
||||||
if t, ok := typeToInt(l.token); !ok {
|
t, ok := typeToInt(l.token)
|
||||||
|
if !ok {
|
||||||
l.token = "unknown RR type"
|
l.token = "unknown RR type"
|
||||||
l.err = true
|
l.err = true
|
||||||
c <- l
|
c <- l
|
||||||
return
|
return
|
||||||
} else {
|
|
||||||
l.value = _RRTYPE
|
|
||||||
l.torc = t
|
|
||||||
}
|
}
|
||||||
|
l.value = zRrtpe
|
||||||
|
l.torc = t
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if t, ok := StringToClass[l.tokenUpper]; ok {
|
if t, ok := StringToClass[l.tokenUpper]; ok {
|
||||||
l.value = _CLASS
|
l.value = zClass
|
||||||
l.torc = t
|
l.torc = t
|
||||||
} else {
|
} else {
|
||||||
if strings.HasPrefix(l.tokenUpper, "CLASS") {
|
if strings.HasPrefix(l.tokenUpper, "CLASS") {
|
||||||
if t, ok := classToInt(l.token); !ok {
|
t, ok := classToInt(l.token)
|
||||||
|
if !ok {
|
||||||
l.token = "unknown class"
|
l.token = "unknown class"
|
||||||
l.err = true
|
l.err = true
|
||||||
c <- l
|
c <- l
|
||||||
return
|
return
|
||||||
} else {
|
|
||||||
l.value = _CLASS
|
|
||||||
l.torc = t
|
|
||||||
}
|
}
|
||||||
|
l.value = zClass
|
||||||
|
l.torc = t
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -598,7 +601,7 @@ func zlexer(s *scan, c chan lex) {
|
||||||
stri = 0
|
stri = 0
|
||||||
// I reverse space stuff here
|
// I reverse space stuff here
|
||||||
if !space && !commt {
|
if !space && !commt {
|
||||||
l.value = _BLANK
|
l.value = zBlank
|
||||||
l.token = " "
|
l.token = " "
|
||||||
l.length = 1
|
l.length = 1
|
||||||
debug.Printf("[5 %+v]", l.token)
|
debug.Printf("[5 %+v]", l.token)
|
||||||
|
@ -620,7 +623,7 @@ func zlexer(s *scan, c chan lex) {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
if stri > 0 {
|
if stri > 0 {
|
||||||
l.value = _STRING
|
l.value = zString
|
||||||
l.token = string(str[:stri])
|
l.token = string(str[:stri])
|
||||||
l.length = stri
|
l.length = stri
|
||||||
debug.Printf("[4 %+v]", l.token)
|
debug.Printf("[4 %+v]", l.token)
|
||||||
|
@ -656,7 +659,7 @@ func zlexer(s *scan, c chan lex) {
|
||||||
if brace == 0 {
|
if brace == 0 {
|
||||||
owner = true
|
owner = true
|
||||||
owner = true
|
owner = true
|
||||||
l.value = _NEWLINE
|
l.value = zNewline
|
||||||
l.token = "\n"
|
l.token = "\n"
|
||||||
l.length = 1
|
l.length = 1
|
||||||
l.comment = string(com[:comi])
|
l.comment = string(com[:comi])
|
||||||
|
@ -674,14 +677,14 @@ func zlexer(s *scan, c chan lex) {
|
||||||
if brace == 0 {
|
if brace == 0 {
|
||||||
// If there is previous text, we should output it here
|
// If there is previous text, we should output it here
|
||||||
if stri != 0 {
|
if stri != 0 {
|
||||||
l.value = _STRING
|
l.value = zString
|
||||||
l.token = string(str[:stri])
|
l.token = string(str[:stri])
|
||||||
l.tokenUpper = strings.ToUpper(l.token)
|
l.tokenUpper = strings.ToUpper(l.token)
|
||||||
|
|
||||||
l.length = stri
|
l.length = stri
|
||||||
if !rrtype {
|
if !rrtype {
|
||||||
if t, ok := StringToType[l.tokenUpper]; ok {
|
if t, ok := StringToType[l.tokenUpper]; ok {
|
||||||
l.value = _RRTYPE
|
l.value = zRrtpe
|
||||||
l.torc = t
|
l.torc = t
|
||||||
rrtype = true
|
rrtype = true
|
||||||
}
|
}
|
||||||
|
@ -689,7 +692,7 @@ func zlexer(s *scan, c chan lex) {
|
||||||
debug.Printf("[2 %+v]", l.token)
|
debug.Printf("[2 %+v]", l.token)
|
||||||
c <- l
|
c <- l
|
||||||
}
|
}
|
||||||
l.value = _NEWLINE
|
l.value = zNewline
|
||||||
l.token = "\n"
|
l.token = "\n"
|
||||||
l.length = 1
|
l.length = 1
|
||||||
debug.Printf("[1 %+v]", l.token)
|
debug.Printf("[1 %+v]", l.token)
|
||||||
|
@ -733,7 +736,7 @@ func zlexer(s *scan, c chan lex) {
|
||||||
space = false
|
space = false
|
||||||
// send previous gathered text and the quote
|
// send previous gathered text and the quote
|
||||||
if stri != 0 {
|
if stri != 0 {
|
||||||
l.value = _STRING
|
l.value = zString
|
||||||
l.token = string(str[:stri])
|
l.token = string(str[:stri])
|
||||||
l.length = stri
|
l.length = stri
|
||||||
|
|
||||||
|
@ -743,7 +746,7 @@ func zlexer(s *scan, c chan lex) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// send quote itself as separate token
|
// send quote itself as separate token
|
||||||
l.value = _QUOTE
|
l.value = zQuote
|
||||||
l.token = "\""
|
l.token = "\""
|
||||||
l.length = 1
|
l.length = 1
|
||||||
c <- l
|
c <- l
|
||||||
|
@ -795,7 +798,7 @@ func zlexer(s *scan, c chan lex) {
|
||||||
// Send remainder
|
// Send remainder
|
||||||
l.token = string(str[:stri])
|
l.token = string(str[:stri])
|
||||||
l.length = stri
|
l.length = stri
|
||||||
l.value = _STRING
|
l.value = zString
|
||||||
debug.Printf("[%+v]", l.token)
|
debug.Printf("[%+v]", l.token)
|
||||||
c <- l
|
c <- l
|
||||||
}
|
}
|
||||||
|
@ -927,15 +930,15 @@ func slurpRemainder(c chan lex, f string) (*ParseError, string) {
|
||||||
l := <-c
|
l := <-c
|
||||||
com := ""
|
com := ""
|
||||||
switch l.value {
|
switch l.value {
|
||||||
case _BLANK:
|
case zBlank:
|
||||||
l = <-c
|
l = <-c
|
||||||
com = l.comment
|
com = l.comment
|
||||||
if l.value != _NEWLINE && l.value != _EOF {
|
if l.value != zNewline && l.value != zEOF {
|
||||||
return &ParseError{f, "garbage after rdata", l}, ""
|
return &ParseError{f, "garbage after rdata", l}, ""
|
||||||
}
|
}
|
||||||
case _NEWLINE:
|
case zNewline:
|
||||||
com = l.comment
|
com = l.comment
|
||||||
case _EOF:
|
case zEOF:
|
||||||
default:
|
default:
|
||||||
return &ParseError{f, "garbage after rdata", l}, ""
|
return &ParseError{f, "garbage after rdata", l}, ""
|
||||||
}
|
}
|
||||||
|
|
742
Godeps/_workspace/src/github.com/miekg/dns/zscan_rr.go
generated
vendored
742
Godeps/_workspace/src/github.com/miekg/dns/zscan_rr.go
generated
vendored
File diff suppressed because it is too large
Load diff
|
@ -20,7 +20,7 @@ import (
|
||||||
|
|
||||||
dto "github.com/prometheus/client_model/go"
|
dto "github.com/prometheus/client_model/go"
|
||||||
|
|
||||||
"github.com/matttproud/golang_protobuf_extensions/ext"
|
"github.com/matttproud/golang_protobuf_extensions/pbutil"
|
||||||
|
|
||||||
"github.com/prometheus/client_golang/model"
|
"github.com/prometheus/client_golang/model"
|
||||||
)
|
)
|
||||||
|
@ -40,7 +40,7 @@ func (m *metricFamilyProcessor) ProcessSingle(i io.Reader, out Ingester, o *Proc
|
||||||
for {
|
for {
|
||||||
family.Reset()
|
family.Reset()
|
||||||
|
|
||||||
if _, err := ext.ReadDelimited(i, family); err != nil {
|
if _, err := pbutil.ReadDelimited(i, family); err != nil {
|
||||||
if err == io.EOF {
|
if err == io.EOF {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
18
Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/examples_test.go
generated
vendored
18
Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/examples_test.go
generated
vendored
|
@ -18,8 +18,10 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"math"
|
"math"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
"os"
|
||||||
"runtime"
|
"runtime"
|
||||||
"sort"
|
"sort"
|
||||||
|
"time"
|
||||||
|
|
||||||
dto "github.com/prometheus/client_model/go"
|
dto "github.com/prometheus/client_model/go"
|
||||||
|
|
||||||
|
@ -498,3 +500,19 @@ func ExampleHistogram() {
|
||||||
// >
|
// >
|
||||||
// >
|
// >
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func ExamplePushCollectors() {
|
||||||
|
hostname, _ := os.Hostname()
|
||||||
|
completionTime := prometheus.NewGauge(prometheus.GaugeOpts{
|
||||||
|
Name: "db_backup_last_completion_time",
|
||||||
|
Help: "The timestamp of the last succesful completion of a DB backup.",
|
||||||
|
})
|
||||||
|
completionTime.Set(float64(time.Now().Unix()))
|
||||||
|
if err := prometheus.PushCollectors(
|
||||||
|
"db_backup", hostname,
|
||||||
|
"http://pushgateway:9091",
|
||||||
|
completionTime,
|
||||||
|
); err != nil {
|
||||||
|
fmt.Println("Could not push completion time to Pushgateway:", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
@ -13,6 +13,8 @@
|
||||||
|
|
||||||
package prometheus
|
package prometheus
|
||||||
|
|
||||||
|
import "github.com/prometheus/procfs"
|
||||||
|
|
||||||
type processCollector struct {
|
type processCollector struct {
|
||||||
pid int
|
pid int
|
||||||
collectFn func(chan<- Metric)
|
collectFn func(chan<- Metric)
|
||||||
|
@ -79,7 +81,7 @@ func NewProcessCollectorPIDFn(
|
||||||
}
|
}
|
||||||
|
|
||||||
// Set up process metric collection if supported by the runtime.
|
// Set up process metric collection if supported by the runtime.
|
||||||
if processCollectSupported() {
|
if _, err := procfs.NewStat(); err == nil {
|
||||||
c.collectFn = c.processCollect
|
c.collectFn = c.processCollect
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -100,3 +102,41 @@ func (c *processCollector) Describe(ch chan<- *Desc) {
|
||||||
func (c *processCollector) Collect(ch chan<- Metric) {
|
func (c *processCollector) Collect(ch chan<- Metric) {
|
||||||
c.collectFn(ch)
|
c.collectFn(ch)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TODO(ts): Bring back error reporting by reverting 7faf9e7 as soon as the
|
||||||
|
// client allows users to configure the error behavior.
|
||||||
|
func (c *processCollector) processCollect(ch chan<- Metric) {
|
||||||
|
pid, err := c.pidFn()
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
p, err := procfs.NewProc(pid)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if stat, err := p.NewStat(); err == nil {
|
||||||
|
c.cpuTotal.Set(stat.CPUTime())
|
||||||
|
ch <- c.cpuTotal
|
||||||
|
c.vsize.Set(float64(stat.VirtualMemory()))
|
||||||
|
ch <- c.vsize
|
||||||
|
c.rss.Set(float64(stat.ResidentMemory()))
|
||||||
|
ch <- c.rss
|
||||||
|
|
||||||
|
if startTime, err := stat.StartTime(); err == nil {
|
||||||
|
c.startTime.Set(startTime)
|
||||||
|
ch <- c.startTime
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if fds, err := p.FileDescriptorsLen(); err == nil {
|
||||||
|
c.openFDs.Set(float64(fds))
|
||||||
|
ch <- c.openFDs
|
||||||
|
}
|
||||||
|
|
||||||
|
if limits, err := p.NewLimits(); err == nil {
|
||||||
|
c.maxFDs.Set(float64(limits.OpenFiles))
|
||||||
|
ch <- c.maxFDs
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
@ -1,63 +0,0 @@
|
||||||
// Copyright 2015 The Prometheus Authors
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
// +build linux,cgo plan9,cgo solaris,cgo
|
|
||||||
|
|
||||||
package prometheus
|
|
||||||
|
|
||||||
import "github.com/prometheus/procfs"
|
|
||||||
|
|
||||||
func processCollectSupported() bool {
|
|
||||||
if _, err := procfs.NewStat(); err == nil {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO(ts): Bring back error reporting by reverting 7faf9e7 as soon as the
|
|
||||||
// client allows users to configure the error behavior.
|
|
||||||
func (c *processCollector) processCollect(ch chan<- Metric) {
|
|
||||||
pid, err := c.pidFn()
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
p, err := procfs.NewProc(pid)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if stat, err := p.NewStat(); err == nil {
|
|
||||||
c.cpuTotal.Set(stat.CPUTime())
|
|
||||||
ch <- c.cpuTotal
|
|
||||||
c.vsize.Set(float64(stat.VirtualMemory()))
|
|
||||||
ch <- c.vsize
|
|
||||||
c.rss.Set(float64(stat.ResidentMemory()))
|
|
||||||
ch <- c.rss
|
|
||||||
|
|
||||||
if startTime, err := stat.StartTime(); err == nil {
|
|
||||||
c.startTime.Set(startTime)
|
|
||||||
ch <- c.startTime
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if fds, err := p.FileDescriptorsLen(); err == nil {
|
|
||||||
c.openFDs.Set(float64(fds))
|
|
||||||
ch <- c.openFDs
|
|
||||||
}
|
|
||||||
|
|
||||||
if limits, err := p.NewLimits(); err == nil {
|
|
||||||
c.maxFDs.Set(float64(limits.OpenFiles))
|
|
||||||
ch <- c.maxFDs
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,24 +0,0 @@
|
||||||
// Copyright 2015 The Prometheus Authors
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
// +build !linux,!plan9,!solaris !cgo
|
|
||||||
|
|
||||||
package prometheus
|
|
||||||
|
|
||||||
func processCollectSupported() bool {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *processCollector) processCollect(ch chan<- Metric) {
|
|
||||||
panic("unreachable")
|
|
||||||
}
|
|
65
Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/push.go
generated
vendored
Normal file
65
Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/push.go
generated
vendored
Normal file
|
@ -0,0 +1,65 @@
|
||||||
|
// Copyright 2015 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
// Copyright (c) 2013, The Prometheus Authors
|
||||||
|
// All rights reserved.
|
||||||
|
//
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be found
|
||||||
|
// in the LICENSE file.
|
||||||
|
|
||||||
|
package prometheus
|
||||||
|
|
||||||
|
// Push triggers a metric collection by the default registry and pushes all
|
||||||
|
// collected metrics to the Pushgateway specified by addr. See the Pushgateway
|
||||||
|
// documentation for detailed implications of the job and instance
|
||||||
|
// parameter. instance can be left empty. You can use just host:port or ip:port
|
||||||
|
// as url, in which case 'http://' is added automatically. You can also include
|
||||||
|
// the schema in the URL. However, do not include the '/metrics/jobs/...' part.
|
||||||
|
//
|
||||||
|
// Note that all previously pushed metrics with the same job and instance will
|
||||||
|
// be replaced with the metrics pushed by this call. (It uses HTTP method 'PUT'
|
||||||
|
// to push to the Pushgateway.)
|
||||||
|
func Push(job, instance, url string) error {
|
||||||
|
return defRegistry.Push(job, instance, url, "PUT")
|
||||||
|
}
|
||||||
|
|
||||||
|
// PushAdd works like Push, but only previously pushed metrics with the same
|
||||||
|
// name (and the same job and instance) will be replaced. (It uses HTTP method
|
||||||
|
// 'POST' to push to the Pushgateway.)
|
||||||
|
func PushAdd(job, instance, url string) error {
|
||||||
|
return defRegistry.Push(job, instance, url, "POST")
|
||||||
|
}
|
||||||
|
|
||||||
|
// PushCollectors works like Push, but it does not collect from the default
|
||||||
|
// registry. Instead, it collects from the provided collectors. It is a
|
||||||
|
// convenient way to push only a few metrics.
|
||||||
|
func PushCollectors(job, instance, url string, collectors ...Collector) error {
|
||||||
|
return pushCollectors(job, instance, url, "PUT", collectors...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// PushAddCollectors works like PushAdd, but it does not collect from the
|
||||||
|
// default registry. Instead, it collects from the provided collectors. It is a
|
||||||
|
// convenient way to push only a few metrics.
|
||||||
|
func PushAddCollectors(job, instance, url string, collectors ...Collector) error {
|
||||||
|
return pushCollectors(job, instance, url, "POST", collectors...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func pushCollectors(job, instance, url, method string, collectors ...Collector) error {
|
||||||
|
r := newRegistry()
|
||||||
|
for _, collector := range collectors {
|
||||||
|
if _, err := r.Register(collector); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return r.Push(job, instance, url, method)
|
||||||
|
}
|
139
Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/registry.go
generated
vendored
139
Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/registry.go
generated
vendored
|
@ -158,14 +158,19 @@ func Unregister(c Collector) bool {
|
||||||
// SetMetricFamilyInjectionHook sets a function that is called whenever metrics
|
// SetMetricFamilyInjectionHook sets a function that is called whenever metrics
|
||||||
// are collected. The hook function must be set before metrics collection begins
|
// are collected. The hook function must be set before metrics collection begins
|
||||||
// (i.e. call SetMetricFamilyInjectionHook before setting the HTTP handler.) The
|
// (i.e. call SetMetricFamilyInjectionHook before setting the HTTP handler.) The
|
||||||
// MetricFamily protobufs returned by the hook function are added to the
|
// MetricFamily protobufs returned by the hook function are merged with the
|
||||||
// delivered metrics. Each returned MetricFamily must have a unique name (also
|
// metrics collected in the usual way.
|
||||||
// taking into account the MetricFamilies created in the regular way).
|
|
||||||
//
|
//
|
||||||
// This is a way to directly inject MetricFamily protobufs managed and owned by
|
// This is a way to directly inject MetricFamily protobufs managed and owned by
|
||||||
// the caller. The caller has full responsibility. No sanity checks are
|
// the caller. The caller has full responsibility. As no registration of the
|
||||||
// performed on the returned protobufs (besides the name checks described
|
// injected metrics has happened, there is no descriptor to check against, and
|
||||||
// above). The function must be callable at any time and concurrently.
|
// there are no registration-time checks. If collect-time checks are disabled
|
||||||
|
// (see function EnableCollectChecks), no sanity checks are performed on the
|
||||||
|
// returned protobufs at all. If collect-checks are enabled, type and uniqueness
|
||||||
|
// checks are performed, but no further consistency checks (which would require
|
||||||
|
// knowledge of a metric descriptor).
|
||||||
|
//
|
||||||
|
// The function must be callable at any time and concurrently.
|
||||||
func SetMetricFamilyInjectionHook(hook func() []*dto.MetricFamily) {
|
func SetMetricFamilyInjectionHook(hook func() []*dto.MetricFamily) {
|
||||||
defRegistry.metricFamilyInjectionHook = hook
|
defRegistry.metricFamilyInjectionHook = hook
|
||||||
}
|
}
|
||||||
|
@ -187,30 +192,10 @@ func EnableCollectChecks(b bool) {
|
||||||
defRegistry.collectChecksEnabled = b
|
defRegistry.collectChecksEnabled = b
|
||||||
}
|
}
|
||||||
|
|
||||||
// Push triggers a metric collection and pushes all collected metrics to the
|
|
||||||
// Pushgateway specified by addr. See the Pushgateway documentation for detailed
|
|
||||||
// implications of the job and instance parameter. instance can be left
|
|
||||||
// empty. The Pushgateway will then use the client's IP number instead. Use just
|
|
||||||
// host:port or ip:port ass addr. (Don't add 'http://' or any path.)
|
|
||||||
//
|
|
||||||
// Note that all previously pushed metrics with the same job and instance will
|
|
||||||
// be replaced with the metrics pushed by this call. (It uses HTTP method 'PUT'
|
|
||||||
// to push to the Pushgateway.)
|
|
||||||
func Push(job, instance, addr string) error {
|
|
||||||
return defRegistry.Push(job, instance, addr, "PUT")
|
|
||||||
}
|
|
||||||
|
|
||||||
// PushAdd works like Push, but only previously pushed metrics with the same
|
|
||||||
// name (and the same job and instance) will be replaced. (It uses HTTP method
|
|
||||||
// 'POST' to push to the Pushgateway.)
|
|
||||||
func PushAdd(job, instance, addr string) error {
|
|
||||||
return defRegistry.Push(job, instance, addr, "POST")
|
|
||||||
}
|
|
||||||
|
|
||||||
// encoder is a function that writes a dto.MetricFamily to an io.Writer in a
|
// encoder is a function that writes a dto.MetricFamily to an io.Writer in a
|
||||||
// certain encoding. It returns the number of bytes written and any error
|
// certain encoding. It returns the number of bytes written and any error
|
||||||
// encountered. Note that ext.WriteDelimited and text.MetricFamilyToText are
|
// encountered. Note that pbutil.WriteDelimited and pbutil.MetricFamilyToText
|
||||||
// encoders.
|
// are encoders.
|
||||||
type encoder func(io.Writer, *dto.MetricFamily) (int, error)
|
type encoder func(io.Writer, *dto.MetricFamily) (int, error)
|
||||||
|
|
||||||
type registry struct {
|
type registry struct {
|
||||||
|
@ -346,10 +331,13 @@ func (r *registry) Unregister(c Collector) bool {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *registry) Push(job, instance, addr, method string) error {
|
func (r *registry) Push(job, instance, pushURL, method string) error {
|
||||||
u := fmt.Sprintf("http://%s/metrics/jobs/%s", addr, url.QueryEscape(job))
|
if !strings.Contains(pushURL, "://") {
|
||||||
|
pushURL = "http://" + pushURL
|
||||||
|
}
|
||||||
|
pushURL = fmt.Sprintf("%s/metrics/jobs/%s", pushURL, url.QueryEscape(job))
|
||||||
if instance != "" {
|
if instance != "" {
|
||||||
u += "/instances/" + url.QueryEscape(instance)
|
pushURL += "/instances/" + url.QueryEscape(instance)
|
||||||
}
|
}
|
||||||
buf := r.getBuf()
|
buf := r.getBuf()
|
||||||
defer r.giveBuf(buf)
|
defer r.giveBuf(buf)
|
||||||
|
@ -359,7 +347,7 @@ func (r *registry) Push(job, instance, addr, method string) error {
|
||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
req, err := http.NewRequest(method, u, buf)
|
req, err := http.NewRequest(method, pushURL, buf)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -370,7 +358,7 @@ func (r *registry) Push(job, instance, addr, method string) error {
|
||||||
}
|
}
|
||||||
defer resp.Body.Close()
|
defer resp.Body.Close()
|
||||||
if resp.StatusCode != 202 {
|
if resp.StatusCode != 202 {
|
||||||
return fmt.Errorf("unexpected status code %d while pushing to %s", resp.StatusCode, u)
|
return fmt.Errorf("unexpected status code %d while pushing to %s", resp.StatusCode, pushURL)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -479,10 +467,26 @@ func (r *registry) writePB(w io.Writer, writeEncoded encoder) (int, error) {
|
||||||
|
|
||||||
if r.metricFamilyInjectionHook != nil {
|
if r.metricFamilyInjectionHook != nil {
|
||||||
for _, mf := range r.metricFamilyInjectionHook() {
|
for _, mf := range r.metricFamilyInjectionHook() {
|
||||||
if _, exists := metricFamiliesByName[mf.GetName()]; exists {
|
existingMF, exists := metricFamiliesByName[mf.GetName()]
|
||||||
return 0, fmt.Errorf("metric family with duplicate name injected: %s", mf)
|
if !exists {
|
||||||
|
metricFamiliesByName[mf.GetName()] = mf
|
||||||
|
if r.collectChecksEnabled {
|
||||||
|
for _, m := range mf.Metric {
|
||||||
|
if err := r.checkConsistency(mf, m, nil, metricHashes); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
for _, m := range mf.Metric {
|
||||||
|
if r.collectChecksEnabled {
|
||||||
|
if err := r.checkConsistency(existingMF, m, nil, metricHashes); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
existingMF.Metric = append(existingMF.Metric, m)
|
||||||
}
|
}
|
||||||
metricFamiliesByName[mf.GetName()] = mf
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -523,11 +527,42 @@ func (r *registry) checkConsistency(metricFamily *dto.MetricFamily, dtoMetric *d
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Is the metric unique (i.e. no other metric with the same name and the same label values)?
|
||||||
|
h := fnv.New64a()
|
||||||
|
var buf bytes.Buffer
|
||||||
|
buf.WriteString(metricFamily.GetName())
|
||||||
|
buf.WriteByte(model.SeparatorByte)
|
||||||
|
h.Write(buf.Bytes())
|
||||||
|
for _, lp := range dtoMetric.Label {
|
||||||
|
buf.Reset()
|
||||||
|
buf.WriteString(lp.GetValue())
|
||||||
|
buf.WriteByte(model.SeparatorByte)
|
||||||
|
h.Write(buf.Bytes())
|
||||||
|
}
|
||||||
|
metricHash := h.Sum64()
|
||||||
|
if _, exists := metricHashes[metricHash]; exists {
|
||||||
|
return fmt.Errorf(
|
||||||
|
"collected metric %q was collected before with the same name and label values",
|
||||||
|
dtoMetric,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
metricHashes[metricHash] = struct{}{}
|
||||||
|
|
||||||
|
if desc == nil {
|
||||||
|
return nil // Nothing left to check if we have no desc.
|
||||||
|
}
|
||||||
|
|
||||||
// Desc consistency with metric family.
|
// Desc consistency with metric family.
|
||||||
|
if metricFamily.GetName() != desc.fqName {
|
||||||
|
return fmt.Errorf(
|
||||||
|
"collected metric %q has name %q but should have %q",
|
||||||
|
dtoMetric, metricFamily.GetName(), desc.fqName,
|
||||||
|
)
|
||||||
|
}
|
||||||
if metricFamily.GetHelp() != desc.help {
|
if metricFamily.GetHelp() != desc.help {
|
||||||
return fmt.Errorf(
|
return fmt.Errorf(
|
||||||
"collected metric %q has help %q but should have %q",
|
"collected metric %q has help %q but should have %q",
|
||||||
dtoMetric, desc.help, metricFamily.GetHelp(),
|
dtoMetric, metricFamily.GetHelp(), desc.help,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -557,27 +592,6 @@ func (r *registry) checkConsistency(metricFamily *dto.MetricFamily, dtoMetric *d
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Is the metric unique (i.e. no other metric with the same name and the same label values)?
|
|
||||||
h := fnv.New64a()
|
|
||||||
var buf bytes.Buffer
|
|
||||||
buf.WriteString(desc.fqName)
|
|
||||||
buf.WriteByte(model.SeparatorByte)
|
|
||||||
h.Write(buf.Bytes())
|
|
||||||
for _, lp := range dtoMetric.Label {
|
|
||||||
buf.Reset()
|
|
||||||
buf.WriteString(lp.GetValue())
|
|
||||||
buf.WriteByte(model.SeparatorByte)
|
|
||||||
h.Write(buf.Bytes())
|
|
||||||
}
|
|
||||||
metricHash := h.Sum64()
|
|
||||||
if _, exists := metricHashes[metricHash]; exists {
|
|
||||||
return fmt.Errorf(
|
|
||||||
"collected metric %q was collected before with the same name and label values",
|
|
||||||
dtoMetric,
|
|
||||||
)
|
|
||||||
}
|
|
||||||
metricHashes[metricHash] = struct{}{}
|
|
||||||
|
|
||||||
r.mtx.RLock() // Remaining checks need the read lock.
|
r.mtx.RLock() // Remaining checks need the read lock.
|
||||||
defer r.mtx.RUnlock()
|
defer r.mtx.RUnlock()
|
||||||
|
|
||||||
|
@ -712,6 +726,15 @@ func (s metricSorter) Swap(i, j int) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s metricSorter) Less(i, j int) bool {
|
func (s metricSorter) Less(i, j int) bool {
|
||||||
|
if len(s[i].Label) != len(s[j].Label) {
|
||||||
|
// This should not happen. The metrics are
|
||||||
|
// inconsistent. However, we have to deal with the fact, as
|
||||||
|
// people might use custom collectors or metric family injection
|
||||||
|
// to create inconsistent metrics. So let's simply compare the
|
||||||
|
// number of labels in this case. That will still yield
|
||||||
|
// reproducible sorting.
|
||||||
|
return len(s[i].Label) < len(s[j].Label)
|
||||||
|
}
|
||||||
for n, lp := range s[i].Label {
|
for n, lp := range s[i].Label {
|
||||||
vi := lp.GetValue()
|
vi := lp.GetValue()
|
||||||
vj := s[j].Label[n].GetValue()
|
vj := s[j].Label[n].GetValue()
|
||||||
|
|
130
Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/registry_test.go
generated
vendored
130
Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/registry_test.go
generated
vendored
|
@ -61,31 +61,29 @@ func testHandler(t testing.TB) {
|
||||||
|
|
||||||
varintBuf := make([]byte, binary.MaxVarintLen32)
|
varintBuf := make([]byte, binary.MaxVarintLen32)
|
||||||
|
|
||||||
externalMetricFamily := []*dto.MetricFamily{
|
externalMetricFamily := &dto.MetricFamily{
|
||||||
{
|
Name: proto.String("externalname"),
|
||||||
Name: proto.String("externalname"),
|
Help: proto.String("externaldocstring"),
|
||||||
Help: proto.String("externaldocstring"),
|
Type: dto.MetricType_COUNTER.Enum(),
|
||||||
Type: dto.MetricType_COUNTER.Enum(),
|
Metric: []*dto.Metric{
|
||||||
Metric: []*dto.Metric{
|
{
|
||||||
{
|
Label: []*dto.LabelPair{
|
||||||
Label: []*dto.LabelPair{
|
{
|
||||||
{
|
Name: proto.String("externallabelname"),
|
||||||
Name: proto.String("externallabelname"),
|
Value: proto.String("externalval1"),
|
||||||
Value: proto.String("externalval1"),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: proto.String("externalconstname"),
|
|
||||||
Value: proto.String("externalconstvalue"),
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
Counter: &dto.Counter{
|
{
|
||||||
Value: proto.Float64(1),
|
Name: proto.String("externalconstname"),
|
||||||
|
Value: proto.String("externalconstvalue"),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
Counter: &dto.Counter{
|
||||||
|
Value: proto.Float64(1),
|
||||||
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
marshaledExternalMetricFamily, err := proto.Marshal(externalMetricFamily[0])
|
marshaledExternalMetricFamily, err := proto.Marshal(externalMetricFamily)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
@ -216,16 +214,42 @@ metric: <
|
||||||
expectedMetricFamilyAsProtoCompactText := []byte(`name:"name" help:"docstring" type:COUNTER metric:<label:<name:"constname" value:"constvalue" > label:<name:"labelname" value:"val1" > counter:<value:1 > > metric:<label:<name:"constname" value:"constvalue" > label:<name:"labelname" value:"val2" > counter:<value:1 > >
|
expectedMetricFamilyAsProtoCompactText := []byte(`name:"name" help:"docstring" type:COUNTER metric:<label:<name:"constname" value:"constvalue" > label:<name:"labelname" value:"val1" > counter:<value:1 > > metric:<label:<name:"constname" value:"constvalue" > label:<name:"labelname" value:"val2" > counter:<value:1 > >
|
||||||
`)
|
`)
|
||||||
|
|
||||||
|
externalMetricFamilyWithSameName := &dto.MetricFamily{
|
||||||
|
Name: proto.String("name"),
|
||||||
|
Help: proto.String("inconsistent help string does not matter here"),
|
||||||
|
Type: dto.MetricType_COUNTER.Enum(),
|
||||||
|
Metric: []*dto.Metric{
|
||||||
|
{
|
||||||
|
Label: []*dto.LabelPair{
|
||||||
|
{
|
||||||
|
Name: proto.String("constname"),
|
||||||
|
Value: proto.String("constvalue"),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: proto.String("labelname"),
|
||||||
|
Value: proto.String("different_val"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Counter: &dto.Counter{
|
||||||
|
Value: proto.Float64(42),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
expectedMetricFamilyMergedWithExternalAsProtoCompactText := []byte(`name:"name" help:"docstring" type:COUNTER metric:<label:<name:"constname" value:"constvalue" > label:<name:"labelname" value:"different_val" > counter:<value:42 > > metric:<label:<name:"constname" value:"constvalue" > label:<name:"labelname" value:"val1" > counter:<value:1 > > metric:<label:<name:"constname" value:"constvalue" > label:<name:"labelname" value:"val2" > counter:<value:1 > >
|
||||||
|
`)
|
||||||
|
|
||||||
type output struct {
|
type output struct {
|
||||||
headers map[string]string
|
headers map[string]string
|
||||||
body []byte
|
body []byte
|
||||||
}
|
}
|
||||||
|
|
||||||
var scenarios = []struct {
|
var scenarios = []struct {
|
||||||
headers map[string]string
|
headers map[string]string
|
||||||
out output
|
out output
|
||||||
withCounter bool
|
collector Collector
|
||||||
withExternalMF bool
|
externalMF []*dto.MetricFamily
|
||||||
}{
|
}{
|
||||||
{ // 0
|
{ // 0
|
||||||
headers: map[string]string{
|
headers: map[string]string{
|
||||||
|
@ -281,7 +305,7 @@ metric: <
|
||||||
},
|
},
|
||||||
body: expectedMetricFamilyAsText,
|
body: expectedMetricFamilyAsText,
|
||||||
},
|
},
|
||||||
withCounter: true,
|
collector: metricVec,
|
||||||
},
|
},
|
||||||
{ // 5
|
{ // 5
|
||||||
headers: map[string]string{
|
headers: map[string]string{
|
||||||
|
@ -293,7 +317,7 @@ metric: <
|
||||||
},
|
},
|
||||||
body: expectedMetricFamilyAsBytes,
|
body: expectedMetricFamilyAsBytes,
|
||||||
},
|
},
|
||||||
withCounter: true,
|
collector: metricVec,
|
||||||
},
|
},
|
||||||
{ // 6
|
{ // 6
|
||||||
headers: map[string]string{
|
headers: map[string]string{
|
||||||
|
@ -305,7 +329,7 @@ metric: <
|
||||||
},
|
},
|
||||||
body: externalMetricFamilyAsText,
|
body: externalMetricFamilyAsText,
|
||||||
},
|
},
|
||||||
withExternalMF: true,
|
externalMF: []*dto.MetricFamily{externalMetricFamily},
|
||||||
},
|
},
|
||||||
{ // 7
|
{ // 7
|
||||||
headers: map[string]string{
|
headers: map[string]string{
|
||||||
|
@ -317,7 +341,7 @@ metric: <
|
||||||
},
|
},
|
||||||
body: externalMetricFamilyAsBytes,
|
body: externalMetricFamilyAsBytes,
|
||||||
},
|
},
|
||||||
withExternalMF: true,
|
externalMF: []*dto.MetricFamily{externalMetricFamily},
|
||||||
},
|
},
|
||||||
{ // 8
|
{ // 8
|
||||||
headers: map[string]string{
|
headers: map[string]string{
|
||||||
|
@ -335,8 +359,8 @@ metric: <
|
||||||
[]byte{},
|
[]byte{},
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
withCounter: true,
|
collector: metricVec,
|
||||||
withExternalMF: true,
|
externalMF: []*dto.MetricFamily{externalMetricFamily},
|
||||||
},
|
},
|
||||||
{ // 9
|
{ // 9
|
||||||
headers: map[string]string{
|
headers: map[string]string{
|
||||||
|
@ -359,7 +383,7 @@ metric: <
|
||||||
},
|
},
|
||||||
body: expectedMetricFamilyAsText,
|
body: expectedMetricFamilyAsText,
|
||||||
},
|
},
|
||||||
withCounter: true,
|
collector: metricVec,
|
||||||
},
|
},
|
||||||
{ // 11
|
{ // 11
|
||||||
headers: map[string]string{
|
headers: map[string]string{
|
||||||
|
@ -377,8 +401,8 @@ metric: <
|
||||||
[]byte{},
|
[]byte{},
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
withCounter: true,
|
collector: metricVec,
|
||||||
withExternalMF: true,
|
externalMF: []*dto.MetricFamily{externalMetricFamily},
|
||||||
},
|
},
|
||||||
{ // 12
|
{ // 12
|
||||||
headers: map[string]string{
|
headers: map[string]string{
|
||||||
|
@ -396,8 +420,8 @@ metric: <
|
||||||
[]byte{},
|
[]byte{},
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
withCounter: true,
|
collector: metricVec,
|
||||||
withExternalMF: true,
|
externalMF: []*dto.MetricFamily{externalMetricFamily},
|
||||||
},
|
},
|
||||||
{ // 13
|
{ // 13
|
||||||
headers: map[string]string{
|
headers: map[string]string{
|
||||||
|
@ -415,8 +439,8 @@ metric: <
|
||||||
[]byte{},
|
[]byte{},
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
withCounter: true,
|
collector: metricVec,
|
||||||
withExternalMF: true,
|
externalMF: []*dto.MetricFamily{externalMetricFamily},
|
||||||
},
|
},
|
||||||
{ // 14
|
{ // 14
|
||||||
headers: map[string]string{
|
headers: map[string]string{
|
||||||
|
@ -434,20 +458,42 @@ metric: <
|
||||||
[]byte{},
|
[]byte{},
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
withCounter: true,
|
collector: metricVec,
|
||||||
withExternalMF: true,
|
externalMF: []*dto.MetricFamily{externalMetricFamily},
|
||||||
|
},
|
||||||
|
{ // 15
|
||||||
|
headers: map[string]string{
|
||||||
|
"Accept": "application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=compact-text",
|
||||||
|
},
|
||||||
|
out: output{
|
||||||
|
headers: map[string]string{
|
||||||
|
"Content-Type": `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=compact-text`,
|
||||||
|
},
|
||||||
|
body: bytes.Join(
|
||||||
|
[][]byte{
|
||||||
|
externalMetricFamilyAsProtoCompactText,
|
||||||
|
expectedMetricFamilyMergedWithExternalAsProtoCompactText,
|
||||||
|
},
|
||||||
|
[]byte{},
|
||||||
|
),
|
||||||
|
},
|
||||||
|
collector: metricVec,
|
||||||
|
externalMF: []*dto.MetricFamily{
|
||||||
|
externalMetricFamily,
|
||||||
|
externalMetricFamilyWithSameName,
|
||||||
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
for i, scenario := range scenarios {
|
for i, scenario := range scenarios {
|
||||||
registry := newRegistry()
|
registry := newRegistry()
|
||||||
registry.collectChecksEnabled = true
|
registry.collectChecksEnabled = true
|
||||||
|
|
||||||
if scenario.withCounter {
|
if scenario.collector != nil {
|
||||||
registry.Register(metricVec)
|
registry.Register(scenario.collector)
|
||||||
}
|
}
|
||||||
if scenario.withExternalMF {
|
if scenario.externalMF != nil {
|
||||||
registry.metricFamilyInjectionHook = func() []*dto.MetricFamily {
|
registry.metricFamilyInjectionHook = func() []*dto.MetricFamily {
|
||||||
return externalMetricFamily
|
return scenario.externalMF
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
writer := &fakeResponseWriter{
|
writer := &fakeResponseWriter{
|
||||||
|
|
15
Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/summary.go
generated
vendored
15
Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/summary.go
generated
vendored
|
@ -16,6 +16,7 @@ package prometheus
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"hash/fnv"
|
"hash/fnv"
|
||||||
|
"math"
|
||||||
"sort"
|
"sort"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
@ -277,10 +278,8 @@ func (s *summary) Write(out *dto.Metric) error {
|
||||||
|
|
||||||
s.bufMtx.Lock()
|
s.bufMtx.Lock()
|
||||||
s.mtx.Lock()
|
s.mtx.Lock()
|
||||||
|
// Swap bufs even if hotBuf is empty to set new hotBufExpTime.
|
||||||
if len(s.hotBuf) != 0 {
|
s.swapBufs(time.Now())
|
||||||
s.swapBufs(time.Now())
|
|
||||||
}
|
|
||||||
s.bufMtx.Unlock()
|
s.bufMtx.Unlock()
|
||||||
|
|
||||||
s.flushColdBuf()
|
s.flushColdBuf()
|
||||||
|
@ -288,9 +287,15 @@ func (s *summary) Write(out *dto.Metric) error {
|
||||||
sum.SampleSum = proto.Float64(s.sum)
|
sum.SampleSum = proto.Float64(s.sum)
|
||||||
|
|
||||||
for _, rank := range s.sortedObjectives {
|
for _, rank := range s.sortedObjectives {
|
||||||
|
var q float64
|
||||||
|
if s.headStream.Count() == 0 {
|
||||||
|
q = math.NaN()
|
||||||
|
} else {
|
||||||
|
q = s.headStream.Query(rank)
|
||||||
|
}
|
||||||
qs = append(qs, &dto.Quantile{
|
qs = append(qs, &dto.Quantile{
|
||||||
Quantile: proto.Float64(rank),
|
Quantile: proto.Float64(rank),
|
||||||
Value: proto.Float64(s.headStream.Query(rank)),
|
Value: proto.Float64(q),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
11
Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/summary_test.go
generated
vendored
11
Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/summary_test.go
generated
vendored
|
@ -289,6 +289,11 @@ func TestSummaryVecConcurrency(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestSummaryDecay(t *testing.T) {
|
func TestSummaryDecay(t *testing.T) {
|
||||||
|
if testing.Short() {
|
||||||
|
t.Skip("Skipping test in short mode.")
|
||||||
|
// More because it depends on timing than because it is particularly long...
|
||||||
|
}
|
||||||
|
|
||||||
sum := NewSummary(SummaryOpts{
|
sum := NewSummary(SummaryOpts{
|
||||||
Name: "test_summary",
|
Name: "test_summary",
|
||||||
Help: "helpless",
|
Help: "helpless",
|
||||||
|
@ -315,6 +320,12 @@ func TestSummaryDecay(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
tick.Stop()
|
tick.Stop()
|
||||||
|
// Wait for MaxAge without observations and make sure quantiles are NaN.
|
||||||
|
time.Sleep(100 * time.Millisecond)
|
||||||
|
sum.Write(m)
|
||||||
|
if got := *m.Summary.Quantile[0].Value; !math.IsNaN(got) {
|
||||||
|
t.Errorf("got %f, want NaN after expiration", got)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func getBounds(vars []float64, q, ε float64) (min, max float64) {
|
func getBounds(vars []float64, q, ε float64) (min, max float64) {
|
||||||
|
|
8
Godeps/_workspace/src/github.com/prometheus/client_golang/text/bench_test.go
generated
vendored
8
Godeps/_workspace/src/github.com/prometheus/client_golang/text/bench_test.go
generated
vendored
|
@ -21,7 +21,7 @@ import (
|
||||||
"testing"
|
"testing"
|
||||||
dto "github.com/prometheus/client_model/go"
|
dto "github.com/prometheus/client_model/go"
|
||||||
|
|
||||||
"github.com/matttproud/golang_protobuf_extensions/ext"
|
"github.com/matttproud/golang_protobuf_extensions/pbutil"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Benchmarks to show how much penalty text format parsing actually inflicts.
|
// Benchmarks to show how much penalty text format parsing actually inflicts.
|
||||||
|
@ -101,7 +101,7 @@ func BenchmarkParseProto(b *testing.B) {
|
||||||
in := bytes.NewReader(data)
|
in := bytes.NewReader(data)
|
||||||
for {
|
for {
|
||||||
family.Reset()
|
family.Reset()
|
||||||
if _, err := ext.ReadDelimited(in, family); err != nil {
|
if _, err := pbutil.ReadDelimited(in, family); err != nil {
|
||||||
if err == io.EOF {
|
if err == io.EOF {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
@ -129,7 +129,7 @@ func BenchmarkParseProtoGzip(b *testing.B) {
|
||||||
}
|
}
|
||||||
for {
|
for {
|
||||||
family.Reset()
|
family.Reset()
|
||||||
if _, err := ext.ReadDelimited(in, family); err != nil {
|
if _, err := pbutil.ReadDelimited(in, family); err != nil {
|
||||||
if err == io.EOF {
|
if err == io.EOF {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
@ -156,7 +156,7 @@ func BenchmarkParseProtoMap(b *testing.B) {
|
||||||
in := bytes.NewReader(data)
|
in := bytes.NewReader(data)
|
||||||
for {
|
for {
|
||||||
family := &dto.MetricFamily{}
|
family := &dto.MetricFamily{}
|
||||||
if _, err := ext.ReadDelimited(in, family); err != nil {
|
if _, err := pbutil.ReadDelimited(in, family); err != nil {
|
||||||
if err == io.EOF {
|
if err == io.EOF {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
4
Godeps/_workspace/src/github.com/prometheus/client_golang/text/proto.go
generated
vendored
4
Godeps/_workspace/src/github.com/prometheus/client_golang/text/proto.go
generated
vendored
|
@ -18,7 +18,7 @@ import (
|
||||||
"io"
|
"io"
|
||||||
|
|
||||||
"github.com/golang/protobuf/proto"
|
"github.com/golang/protobuf/proto"
|
||||||
"github.com/matttproud/golang_protobuf_extensions/ext"
|
"github.com/matttproud/golang_protobuf_extensions/pbutil"
|
||||||
|
|
||||||
dto "github.com/prometheus/client_model/go"
|
dto "github.com/prometheus/client_model/go"
|
||||||
)
|
)
|
||||||
|
@ -27,7 +27,7 @@ import (
|
||||||
// protobuf format and returns the number of bytes written and any error
|
// protobuf format and returns the number of bytes written and any error
|
||||||
// encountered.
|
// encountered.
|
||||||
func WriteProtoDelimited(w io.Writer, p *dto.MetricFamily) (int, error) {
|
func WriteProtoDelimited(w io.Writer, p *dto.MetricFamily) (int, error) {
|
||||||
return ext.WriteDelimited(w, p)
|
return pbutil.WriteDelimited(w, p)
|
||||||
}
|
}
|
||||||
|
|
||||||
// WriteProtoText writes the MetricFamily to the writer in text format and
|
// WriteProtoText writes the MetricFamily to the writer in text format and
|
||||||
|
|
5
Godeps/_workspace/src/github.com/prometheus/procfs/.travis.yml
generated
vendored
Normal file
5
Godeps/_workspace/src/github.com/prometheus/procfs/.travis.yml
generated
vendored
Normal file
|
@ -0,0 +1,5 @@
|
||||||
|
language: go
|
||||||
|
go:
|
||||||
|
- 1.3
|
||||||
|
- 1.4
|
||||||
|
- tip
|
6
Godeps/_workspace/src/github.com/prometheus/procfs/README.md
generated
vendored
6
Godeps/_workspace/src/github.com/prometheus/procfs/README.md
generated
vendored
|
@ -4,8 +4,4 @@ This procfs package provides functions to retrieve system, kernel and process
|
||||||
metrics from the pseudo-filesystem proc.
|
metrics from the pseudo-filesystem proc.
|
||||||
|
|
||||||
[](https://godoc.org/github.com/prometheus/procfs)
|
[](https://godoc.org/github.com/prometheus/procfs)
|
||||||
[](https://circleci.com/gh/prometheus/procfs)
|
[](https://travis-ci.org/prometheus/procfs)
|
||||||
|
|
||||||
# Testing
|
|
||||||
|
|
||||||
$ go test
|
|
||||||
|
|
4
Godeps/_workspace/src/github.com/prometheus/procfs/doc.go
generated
vendored
4
Godeps/_workspace/src/github.com/prometheus/procfs/doc.go
generated
vendored
|
@ -22,7 +22,7 @@
|
||||||
// "fmt"
|
// "fmt"
|
||||||
// "log"
|
// "log"
|
||||||
//
|
//
|
||||||
// "github.com/prometheus/client_golang/procfs"
|
// "github.com/prometheus/procfs"
|
||||||
// )
|
// )
|
||||||
//
|
//
|
||||||
// func main() {
|
// func main() {
|
||||||
|
@ -31,7 +31,7 @@
|
||||||
// log.Fatalf("could not get process: %s", err)
|
// log.Fatalf("could not get process: %s", err)
|
||||||
// }
|
// }
|
||||||
//
|
//
|
||||||
// stat, err := p.Stat()
|
// stat, err := p.NewStat()
|
||||||
// if err != nil {
|
// if err != nil {
|
||||||
// log.Fatalf("could not get process stat: %s", err)
|
// log.Fatalf("could not get process stat: %s", err)
|
||||||
// }
|
// }
|
||||||
|
|
26
Godeps/_workspace/src/github.com/prometheus/procfs/proc_stat.go
generated
vendored
26
Godeps/_workspace/src/github.com/prometheus/procfs/proc_stat.go
generated
vendored
|
@ -7,8 +7,22 @@ import (
|
||||||
"os"
|
"os"
|
||||||
)
|
)
|
||||||
|
|
||||||
// #include <unistd.h>
|
// Originally, this USER_HZ value was dynamically retrieved via a sysconf call which
|
||||||
import "C"
|
// required cgo. However, that caused a lot of problems regarding
|
||||||
|
// cross-compilation. Alternatives such as running a binary to determine the
|
||||||
|
// value, or trying to derive it in some other way were all problematic.
|
||||||
|
// After much research it was determined that USER_HZ is actually hardcoded to
|
||||||
|
// 100 on all Go-supported platforms as of the time of this writing. This is
|
||||||
|
// why we decided to hardcode it here as well. It is not impossible that there
|
||||||
|
// could be systems with exceptions, but they should be very exotic edge cases,
|
||||||
|
// and in that case, the worst outcome will be two misreported metrics.
|
||||||
|
//
|
||||||
|
// See also the following discussions:
|
||||||
|
//
|
||||||
|
// - https://github.com/prometheus/node_exporter/issues/52
|
||||||
|
// - https://github.com/prometheus/procfs/pull/2
|
||||||
|
// - http://stackoverflow.com/questions/17410841/how-does-user-hz-solve-the-jiffy-scaling-issue
|
||||||
|
const userHZ = 100
|
||||||
|
|
||||||
// ProcStat provides status information about the process,
|
// ProcStat provides status information about the process,
|
||||||
// read from /proc/[pid]/stat.
|
// read from /proc/[pid]/stat.
|
||||||
|
@ -152,14 +166,10 @@ func (s ProcStat) StartTime() (float64, error) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
return float64(stat.BootTime) + (float64(s.Starttime) / ticks()), nil
|
return float64(stat.BootTime) + (float64(s.Starttime) / userHZ), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// CPUTime returns the total CPU user and system time in seconds.
|
// CPUTime returns the total CPU user and system time in seconds.
|
||||||
func (s ProcStat) CPUTime() float64 {
|
func (s ProcStat) CPUTime() float64 {
|
||||||
return float64(s.UTime+s.STime) / ticks()
|
return float64(s.UTime+s.STime) / userHZ
|
||||||
}
|
|
||||||
|
|
||||||
func ticks() float64 {
|
|
||||||
return float64(C.sysconf(C._SC_CLK_TCK)) // most likely 100
|
|
||||||
}
|
}
|
||||||
|
|
14
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db.go
generated
vendored
14
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db.go
generated
vendored
|
@ -347,12 +347,14 @@ func recoverTable(s *session, o *opt.Options) error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
iter := tr.NewIterator(nil, nil)
|
iter := tr.NewIterator(nil, nil)
|
||||||
iter.(iterator.ErrorCallbackSetter).SetErrorCallback(func(err error) {
|
if itererr, ok := iter.(iterator.ErrorCallbackSetter); ok {
|
||||||
if errors.IsCorrupted(err) {
|
itererr.SetErrorCallback(func(err error) {
|
||||||
s.logf("table@recovery block corruption @%d %q", file.Num(), err)
|
if errors.IsCorrupted(err) {
|
||||||
tcorruptedBlock++
|
s.logf("table@recovery block corruption @%d %q", file.Num(), err)
|
||||||
}
|
tcorruptedBlock++
|
||||||
})
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
// Scan the table.
|
// Scan the table.
|
||||||
for iter.Next() {
|
for iter.Next() {
|
||||||
|
|
28
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_iter.go
generated
vendored
28
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_iter.go
generated
vendored
|
@ -8,6 +8,7 @@ package leveldb
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
|
"math/rand"
|
||||||
"runtime"
|
"runtime"
|
||||||
"sync"
|
"sync"
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
|
@ -80,6 +81,10 @@ func (db *DB) newIterator(seq uint64, slice *util.Range, ro *opt.ReadOptions) *d
|
||||||
return iter
|
return iter
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (db *DB) iterSamplingRate() int {
|
||||||
|
return rand.Intn(2 * db.s.o.GetIteratorSamplingRate())
|
||||||
|
}
|
||||||
|
|
||||||
type dir int
|
type dir int
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
@ -98,11 +103,21 @@ type dbIter struct {
|
||||||
seq uint64
|
seq uint64
|
||||||
strict bool
|
strict bool
|
||||||
|
|
||||||
dir dir
|
smaplingGap int
|
||||||
key []byte
|
dir dir
|
||||||
value []byte
|
key []byte
|
||||||
err error
|
value []byte
|
||||||
releaser util.Releaser
|
err error
|
||||||
|
releaser util.Releaser
|
||||||
|
}
|
||||||
|
|
||||||
|
func (i *dbIter) sampleSeek() {
|
||||||
|
ikey := i.iter.Key()
|
||||||
|
i.smaplingGap -= len(ikey) + len(i.iter.Value())
|
||||||
|
for i.smaplingGap < 0 {
|
||||||
|
i.smaplingGap += i.db.iterSamplingRate()
|
||||||
|
i.db.sampleSeek(ikey)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (i *dbIter) setErr(err error) {
|
func (i *dbIter) setErr(err error) {
|
||||||
|
@ -175,6 +190,7 @@ func (i *dbIter) Seek(key []byte) bool {
|
||||||
func (i *dbIter) next() bool {
|
func (i *dbIter) next() bool {
|
||||||
for {
|
for {
|
||||||
if ukey, seq, kt, kerr := parseIkey(i.iter.Key()); kerr == nil {
|
if ukey, seq, kt, kerr := parseIkey(i.iter.Key()); kerr == nil {
|
||||||
|
i.sampleSeek()
|
||||||
if seq <= i.seq {
|
if seq <= i.seq {
|
||||||
switch kt {
|
switch kt {
|
||||||
case ktDel:
|
case ktDel:
|
||||||
|
@ -225,6 +241,7 @@ func (i *dbIter) prev() bool {
|
||||||
if i.iter.Valid() {
|
if i.iter.Valid() {
|
||||||
for {
|
for {
|
||||||
if ukey, seq, kt, kerr := parseIkey(i.iter.Key()); kerr == nil {
|
if ukey, seq, kt, kerr := parseIkey(i.iter.Key()); kerr == nil {
|
||||||
|
i.sampleSeek()
|
||||||
if seq <= i.seq {
|
if seq <= i.seq {
|
||||||
if !del && i.icmp.uCompare(ukey, i.key) < 0 {
|
if !del && i.icmp.uCompare(ukey, i.key) < 0 {
|
||||||
return true
|
return true
|
||||||
|
@ -266,6 +283,7 @@ func (i *dbIter) Prev() bool {
|
||||||
case dirForward:
|
case dirForward:
|
||||||
for i.iter.Prev() {
|
for i.iter.Prev() {
|
||||||
if ukey, _, _, kerr := parseIkey(i.iter.Key()); kerr == nil {
|
if ukey, _, _, kerr := parseIkey(i.iter.Key()); kerr == nil {
|
||||||
|
i.sampleSeek()
|
||||||
if i.icmp.uCompare(ukey, i.key) < 0 {
|
if i.icmp.uCompare(ukey, i.key) < 0 {
|
||||||
goto cont
|
goto cont
|
||||||
}
|
}
|
||||||
|
|
9
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_state.go
generated
vendored
9
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_state.go
generated
vendored
|
@ -48,6 +48,15 @@ func (db *DB) addSeq(delta uint64) {
|
||||||
atomic.AddUint64(&db.seq, delta)
|
atomic.AddUint64(&db.seq, delta)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (db *DB) sampleSeek(ikey iKey) {
|
||||||
|
v := db.s.version()
|
||||||
|
if v.sampleSeek(ikey) {
|
||||||
|
// Trigger table compaction.
|
||||||
|
db.compSendTrigger(db.tcompCmdC)
|
||||||
|
}
|
||||||
|
v.release()
|
||||||
|
}
|
||||||
|
|
||||||
func (db *DB) mpoolPut(mem *memdb.DB) {
|
func (db *DB) mpoolPut(mem *memdb.DB) {
|
||||||
defer func() {
|
defer func() {
|
||||||
recover()
|
recover()
|
||||||
|
|
104
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_test.go
generated
vendored
104
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_test.go
generated
vendored
|
@ -405,19 +405,21 @@ func (h *dbHarness) compactRange(min, max string) {
|
||||||
t.Log("DB range compaction done")
|
t.Log("DB range compaction done")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *dbHarness) sizeAssert(start, limit string, low, hi uint64) {
|
func (h *dbHarness) sizeOf(start, limit string) uint64 {
|
||||||
t := h.t
|
sz, err := h.db.SizeOf([]util.Range{
|
||||||
db := h.db
|
|
||||||
|
|
||||||
s, err := db.SizeOf([]util.Range{
|
|
||||||
{[]byte(start), []byte(limit)},
|
{[]byte(start), []byte(limit)},
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Error("SizeOf: got error: ", err)
|
h.t.Error("SizeOf: got error: ", err)
|
||||||
}
|
}
|
||||||
if s.Sum() < low || s.Sum() > hi {
|
return sz.Sum()
|
||||||
t.Errorf("sizeof %q to %q not in range, want %d - %d, got %d",
|
}
|
||||||
shorten(start), shorten(limit), low, hi, s.Sum())
|
|
||||||
|
func (h *dbHarness) sizeAssert(start, limit string, low, hi uint64) {
|
||||||
|
sz := h.sizeOf(start, limit)
|
||||||
|
if sz < low || sz > hi {
|
||||||
|
h.t.Errorf("sizeOf %q to %q not in range, want %d - %d, got %d",
|
||||||
|
shorten(start), shorten(limit), low, hi, sz)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2577,3 +2579,87 @@ func TestDB_TableCompactionBuilder(t *testing.T) {
|
||||||
}
|
}
|
||||||
v.release()
|
v.release()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func testDB_IterTriggeredCompaction(t *testing.T, limitDiv int) {
|
||||||
|
const (
|
||||||
|
vSize = 200 * opt.KiB
|
||||||
|
tSize = 100 * opt.MiB
|
||||||
|
mIter = 100
|
||||||
|
n = tSize / vSize
|
||||||
|
)
|
||||||
|
|
||||||
|
h := newDbHarnessWopt(t, &opt.Options{
|
||||||
|
Compression: opt.NoCompression,
|
||||||
|
DisableBlockCache: true,
|
||||||
|
})
|
||||||
|
defer h.close()
|
||||||
|
|
||||||
|
key := func(x int) string {
|
||||||
|
return fmt.Sprintf("v%06d", x)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fill.
|
||||||
|
value := strings.Repeat("x", vSize)
|
||||||
|
for i := 0; i < n; i++ {
|
||||||
|
h.put(key(i), value)
|
||||||
|
}
|
||||||
|
h.compactMem()
|
||||||
|
|
||||||
|
// Delete all.
|
||||||
|
for i := 0; i < n; i++ {
|
||||||
|
h.delete(key(i))
|
||||||
|
}
|
||||||
|
h.compactMem()
|
||||||
|
|
||||||
|
var (
|
||||||
|
limit = n / limitDiv
|
||||||
|
|
||||||
|
startKey = key(0)
|
||||||
|
limitKey = key(limit)
|
||||||
|
maxKey = key(n)
|
||||||
|
slice = &util.Range{Limit: []byte(limitKey)}
|
||||||
|
|
||||||
|
initialSize0 = h.sizeOf(startKey, limitKey)
|
||||||
|
initialSize1 = h.sizeOf(limitKey, maxKey)
|
||||||
|
)
|
||||||
|
|
||||||
|
t.Logf("inital size %s [rest %s]", shortenb(int(initialSize0)), shortenb(int(initialSize1)))
|
||||||
|
|
||||||
|
for r := 0; true; r++ {
|
||||||
|
if r >= mIter {
|
||||||
|
t.Fatal("taking too long to compact")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Iterates.
|
||||||
|
iter := h.db.NewIterator(slice, h.ro)
|
||||||
|
for iter.Next() {
|
||||||
|
}
|
||||||
|
if err := iter.Error(); err != nil {
|
||||||
|
t.Fatalf("Iter err: %v", err)
|
||||||
|
}
|
||||||
|
iter.Release()
|
||||||
|
|
||||||
|
// Wait compaction.
|
||||||
|
h.waitCompaction()
|
||||||
|
|
||||||
|
// Check size.
|
||||||
|
size0 := h.sizeOf(startKey, limitKey)
|
||||||
|
size1 := h.sizeOf(limitKey, maxKey)
|
||||||
|
t.Logf("#%03d size %s [rest %s]", r, shortenb(int(size0)), shortenb(int(size1)))
|
||||||
|
if size0 < initialSize0/10 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if initialSize1 > 0 {
|
||||||
|
h.sizeAssert(limitKey, maxKey, initialSize1/4-opt.MiB, initialSize1+opt.MiB)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestDB_IterTriggeredCompaction(t *testing.T) {
|
||||||
|
testDB_IterTriggeredCompaction(t, 1)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestDB_IterTriggeredCompactionHalf(t *testing.T) {
|
||||||
|
testDB_IterTriggeredCompaction(t, 2)
|
||||||
|
}
|
||||||
|
|
33
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/opt/options.go
generated
vendored
33
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/opt/options.go
generated
vendored
|
@ -34,10 +34,11 @@ var (
|
||||||
DefaultCompactionTotalSize = 10 * MiB
|
DefaultCompactionTotalSize = 10 * MiB
|
||||||
DefaultCompactionTotalSizeMultiplier = 10.0
|
DefaultCompactionTotalSizeMultiplier = 10.0
|
||||||
DefaultCompressionType = SnappyCompression
|
DefaultCompressionType = SnappyCompression
|
||||||
DefaultOpenFilesCacher = LRUCacher
|
DefaultIteratorSamplingRate = 1 * MiB
|
||||||
DefaultOpenFilesCacheCapacity = 500
|
|
||||||
DefaultMaxMemCompationLevel = 2
|
DefaultMaxMemCompationLevel = 2
|
||||||
DefaultNumLevel = 7
|
DefaultNumLevel = 7
|
||||||
|
DefaultOpenFilesCacher = LRUCacher
|
||||||
|
DefaultOpenFilesCacheCapacity = 500
|
||||||
DefaultWriteBuffer = 4 * MiB
|
DefaultWriteBuffer = 4 * MiB
|
||||||
DefaultWriteL0PauseTrigger = 12
|
DefaultWriteL0PauseTrigger = 12
|
||||||
DefaultWriteL0SlowdownTrigger = 8
|
DefaultWriteL0SlowdownTrigger = 8
|
||||||
|
@ -153,7 +154,7 @@ type Options struct {
|
||||||
BlockCacher Cacher
|
BlockCacher Cacher
|
||||||
|
|
||||||
// BlockCacheCapacity defines the capacity of the 'sorted table' block caching.
|
// BlockCacheCapacity defines the capacity of the 'sorted table' block caching.
|
||||||
// Use -1 for zero, this has same effect with specifying NoCacher to BlockCacher.
|
// Use -1 for zero, this has same effect as specifying NoCacher to BlockCacher.
|
||||||
//
|
//
|
||||||
// The default value is 8MiB.
|
// The default value is 8MiB.
|
||||||
BlockCacheCapacity int
|
BlockCacheCapacity int
|
||||||
|
@ -288,6 +289,13 @@ type Options struct {
|
||||||
// The default value is nil.
|
// The default value is nil.
|
||||||
Filter filter.Filter
|
Filter filter.Filter
|
||||||
|
|
||||||
|
// IteratorSamplingRate defines approximate gap (in bytes) between read
|
||||||
|
// sampling of an iterator. The samples will be used to determine when
|
||||||
|
// compaction should be triggered.
|
||||||
|
//
|
||||||
|
// The default is 1MiB.
|
||||||
|
IteratorSamplingRate int
|
||||||
|
|
||||||
// MaxMemCompationLevel defines maximum level a newly compacted 'memdb'
|
// MaxMemCompationLevel defines maximum level a newly compacted 'memdb'
|
||||||
// will be pushed into if doesn't creates overlap. This should less than
|
// will be pushed into if doesn't creates overlap. This should less than
|
||||||
// NumLevel. Use -1 for level-0.
|
// NumLevel. Use -1 for level-0.
|
||||||
|
@ -308,7 +316,7 @@ type Options struct {
|
||||||
OpenFilesCacher Cacher
|
OpenFilesCacher Cacher
|
||||||
|
|
||||||
// OpenFilesCacheCapacity defines the capacity of the open files caching.
|
// OpenFilesCacheCapacity defines the capacity of the open files caching.
|
||||||
// Use -1 for zero, this has same effect with specifying NoCacher to OpenFilesCacher.
|
// Use -1 for zero, this has same effect as specifying NoCacher to OpenFilesCacher.
|
||||||
//
|
//
|
||||||
// The default value is 500.
|
// The default value is 500.
|
||||||
OpenFilesCacheCapacity int
|
OpenFilesCacheCapacity int
|
||||||
|
@ -355,9 +363,9 @@ func (o *Options) GetBlockCacher() Cacher {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (o *Options) GetBlockCacheCapacity() int {
|
func (o *Options) GetBlockCacheCapacity() int {
|
||||||
if o == nil || o.BlockCacheCapacity <= 0 {
|
if o == nil || o.BlockCacheCapacity == 0 {
|
||||||
return DefaultBlockCacheCapacity
|
return DefaultBlockCacheCapacity
|
||||||
} else if o.BlockCacheCapacity == -1 {
|
} else if o.BlockCacheCapacity < 0 {
|
||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
return o.BlockCacheCapacity
|
return o.BlockCacheCapacity
|
||||||
|
@ -492,12 +500,19 @@ func (o *Options) GetFilter() filter.Filter {
|
||||||
return o.Filter
|
return o.Filter
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (o *Options) GetIteratorSamplingRate() int {
|
||||||
|
if o == nil || o.IteratorSamplingRate <= 0 {
|
||||||
|
return DefaultIteratorSamplingRate
|
||||||
|
}
|
||||||
|
return o.IteratorSamplingRate
|
||||||
|
}
|
||||||
|
|
||||||
func (o *Options) GetMaxMemCompationLevel() int {
|
func (o *Options) GetMaxMemCompationLevel() int {
|
||||||
level := DefaultMaxMemCompationLevel
|
level := DefaultMaxMemCompationLevel
|
||||||
if o != nil {
|
if o != nil {
|
||||||
if o.MaxMemCompationLevel > 0 {
|
if o.MaxMemCompationLevel > 0 {
|
||||||
level = o.MaxMemCompationLevel
|
level = o.MaxMemCompationLevel
|
||||||
} else if o.MaxMemCompationLevel == -1 {
|
} else if o.MaxMemCompationLevel < 0 {
|
||||||
level = 0
|
level = 0
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -525,9 +540,9 @@ func (o *Options) GetOpenFilesCacher() Cacher {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (o *Options) GetOpenFilesCacheCapacity() int {
|
func (o *Options) GetOpenFilesCacheCapacity() int {
|
||||||
if o == nil || o.OpenFilesCacheCapacity <= 0 {
|
if o == nil || o.OpenFilesCacheCapacity == 0 {
|
||||||
return DefaultOpenFilesCacheCapacity
|
return DefaultOpenFilesCacheCapacity
|
||||||
} else if o.OpenFilesCacheCapacity == -1 {
|
} else if o.OpenFilesCacheCapacity < 0 {
|
||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
return o.OpenFilesCacheCapacity
|
return o.OpenFilesCacheCapacity
|
||||||
|
|
25
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/version.go
generated
vendored
25
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/version.go
generated
vendored
|
@ -136,9 +136,8 @@ func (v *version) get(ikey iKey, ro *opt.ReadOptions, noValue bool) (value []byt
|
||||||
if !tseek {
|
if !tseek {
|
||||||
if tset == nil {
|
if tset == nil {
|
||||||
tset = &tSet{level, t}
|
tset = &tSet{level, t}
|
||||||
} else if tset.table.consumeSeek() <= 0 {
|
} else {
|
||||||
tseek = true
|
tseek = true
|
||||||
tcomp = atomic.CompareAndSwapPointer(&v.cSeek, nil, unsafe.Pointer(tset))
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -203,6 +202,28 @@ func (v *version) get(ikey iKey, ro *opt.ReadOptions, noValue bool) (value []byt
|
||||||
return true
|
return true
|
||||||
})
|
})
|
||||||
|
|
||||||
|
if tseek && tset.table.consumeSeek() <= 0 {
|
||||||
|
tcomp = atomic.CompareAndSwapPointer(&v.cSeek, nil, unsafe.Pointer(tset))
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (v *version) sampleSeek(ikey iKey) (tcomp bool) {
|
||||||
|
var tset *tSet
|
||||||
|
|
||||||
|
v.walkOverlapping(ikey, func(level int, t *tFile) bool {
|
||||||
|
if tset == nil {
|
||||||
|
tset = &tSet{level, t}
|
||||||
|
return true
|
||||||
|
} else {
|
||||||
|
if tset.table.consumeSeek() <= 0 {
|
||||||
|
tcomp = atomic.CompareAndSwapPointer(&v.cSeek, nil, unsafe.Pointer(tset))
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}, nil)
|
||||||
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
172
Godeps/_workspace/src/github.com/syndtr/gosnappy/snappy/decode.go
generated
vendored
172
Godeps/_workspace/src/github.com/syndtr/gosnappy/snappy/decode.go
generated
vendored
|
@ -7,10 +7,15 @@ package snappy
|
||||||
import (
|
import (
|
||||||
"encoding/binary"
|
"encoding/binary"
|
||||||
"errors"
|
"errors"
|
||||||
|
"io"
|
||||||
)
|
)
|
||||||
|
|
||||||
// ErrCorrupt reports that the input is invalid.
|
var (
|
||||||
var ErrCorrupt = errors.New("snappy: corrupt input")
|
// ErrCorrupt reports that the input is invalid.
|
||||||
|
ErrCorrupt = errors.New("snappy: corrupt input")
|
||||||
|
// ErrUnsupported reports that the input isn't supported.
|
||||||
|
ErrUnsupported = errors.New("snappy: unsupported input")
|
||||||
|
)
|
||||||
|
|
||||||
// DecodedLen returns the length of the decoded block.
|
// DecodedLen returns the length of the decoded block.
|
||||||
func DecodedLen(src []byte) (int, error) {
|
func DecodedLen(src []byte) (int, error) {
|
||||||
|
@ -122,3 +127,166 @@ func Decode(dst, src []byte) ([]byte, error) {
|
||||||
}
|
}
|
||||||
return dst[:d], nil
|
return dst[:d], nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// NewReader returns a new Reader that decompresses from r, using the framing
|
||||||
|
// format described at
|
||||||
|
// https://code.google.com/p/snappy/source/browse/trunk/framing_format.txt
|
||||||
|
func NewReader(r io.Reader) *Reader {
|
||||||
|
return &Reader{
|
||||||
|
r: r,
|
||||||
|
decoded: make([]byte, maxUncompressedChunkLen),
|
||||||
|
buf: make([]byte, MaxEncodedLen(maxUncompressedChunkLen)+checksumSize),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reader is an io.Reader than can read Snappy-compressed bytes.
|
||||||
|
type Reader struct {
|
||||||
|
r io.Reader
|
||||||
|
err error
|
||||||
|
decoded []byte
|
||||||
|
buf []byte
|
||||||
|
// decoded[i:j] contains decoded bytes that have not yet been passed on.
|
||||||
|
i, j int
|
||||||
|
readHeader bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reset discards any buffered data, resets all state, and switches the Snappy
|
||||||
|
// reader to read from r. This permits reusing a Reader rather than allocating
|
||||||
|
// a new one.
|
||||||
|
func (r *Reader) Reset(reader io.Reader) {
|
||||||
|
r.r = reader
|
||||||
|
r.err = nil
|
||||||
|
r.i = 0
|
||||||
|
r.j = 0
|
||||||
|
r.readHeader = false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *Reader) readFull(p []byte) (ok bool) {
|
||||||
|
if _, r.err = io.ReadFull(r.r, p); r.err != nil {
|
||||||
|
if r.err == io.ErrUnexpectedEOF {
|
||||||
|
r.err = ErrCorrupt
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read satisfies the io.Reader interface.
|
||||||
|
func (r *Reader) Read(p []byte) (int, error) {
|
||||||
|
if r.err != nil {
|
||||||
|
return 0, r.err
|
||||||
|
}
|
||||||
|
for {
|
||||||
|
if r.i < r.j {
|
||||||
|
n := copy(p, r.decoded[r.i:r.j])
|
||||||
|
r.i += n
|
||||||
|
return n, nil
|
||||||
|
}
|
||||||
|
if !r.readFull(r.buf[:4]) {
|
||||||
|
return 0, r.err
|
||||||
|
}
|
||||||
|
chunkType := r.buf[0]
|
||||||
|
if !r.readHeader {
|
||||||
|
if chunkType != chunkTypeStreamIdentifier {
|
||||||
|
r.err = ErrCorrupt
|
||||||
|
return 0, r.err
|
||||||
|
}
|
||||||
|
r.readHeader = true
|
||||||
|
}
|
||||||
|
chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16
|
||||||
|
if chunkLen > len(r.buf) {
|
||||||
|
r.err = ErrUnsupported
|
||||||
|
return 0, r.err
|
||||||
|
}
|
||||||
|
|
||||||
|
// The chunk types are specified at
|
||||||
|
// https://code.google.com/p/snappy/source/browse/trunk/framing_format.txt
|
||||||
|
switch chunkType {
|
||||||
|
case chunkTypeCompressedData:
|
||||||
|
// Section 4.2. Compressed data (chunk type 0x00).
|
||||||
|
if chunkLen < checksumSize {
|
||||||
|
r.err = ErrCorrupt
|
||||||
|
return 0, r.err
|
||||||
|
}
|
||||||
|
buf := r.buf[:chunkLen]
|
||||||
|
if !r.readFull(buf) {
|
||||||
|
return 0, r.err
|
||||||
|
}
|
||||||
|
checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24
|
||||||
|
buf = buf[checksumSize:]
|
||||||
|
|
||||||
|
n, err := DecodedLen(buf)
|
||||||
|
if err != nil {
|
||||||
|
r.err = err
|
||||||
|
return 0, r.err
|
||||||
|
}
|
||||||
|
if n > len(r.decoded) {
|
||||||
|
r.err = ErrCorrupt
|
||||||
|
return 0, r.err
|
||||||
|
}
|
||||||
|
if _, err := Decode(r.decoded, buf); err != nil {
|
||||||
|
r.err = err
|
||||||
|
return 0, r.err
|
||||||
|
}
|
||||||
|
if crc(r.decoded[:n]) != checksum {
|
||||||
|
r.err = ErrCorrupt
|
||||||
|
return 0, r.err
|
||||||
|
}
|
||||||
|
r.i, r.j = 0, n
|
||||||
|
continue
|
||||||
|
|
||||||
|
case chunkTypeUncompressedData:
|
||||||
|
// Section 4.3. Uncompressed data (chunk type 0x01).
|
||||||
|
if chunkLen < checksumSize {
|
||||||
|
r.err = ErrCorrupt
|
||||||
|
return 0, r.err
|
||||||
|
}
|
||||||
|
buf := r.buf[:checksumSize]
|
||||||
|
if !r.readFull(buf) {
|
||||||
|
return 0, r.err
|
||||||
|
}
|
||||||
|
checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24
|
||||||
|
// Read directly into r.decoded instead of via r.buf.
|
||||||
|
n := chunkLen - checksumSize
|
||||||
|
if !r.readFull(r.decoded[:n]) {
|
||||||
|
return 0, r.err
|
||||||
|
}
|
||||||
|
if crc(r.decoded[:n]) != checksum {
|
||||||
|
r.err = ErrCorrupt
|
||||||
|
return 0, r.err
|
||||||
|
}
|
||||||
|
r.i, r.j = 0, n
|
||||||
|
continue
|
||||||
|
|
||||||
|
case chunkTypeStreamIdentifier:
|
||||||
|
// Section 4.1. Stream identifier (chunk type 0xff).
|
||||||
|
if chunkLen != len(magicBody) {
|
||||||
|
r.err = ErrCorrupt
|
||||||
|
return 0, r.err
|
||||||
|
}
|
||||||
|
if !r.readFull(r.buf[:len(magicBody)]) {
|
||||||
|
return 0, r.err
|
||||||
|
}
|
||||||
|
for i := 0; i < len(magicBody); i++ {
|
||||||
|
if r.buf[i] != magicBody[i] {
|
||||||
|
r.err = ErrCorrupt
|
||||||
|
return 0, r.err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if chunkType <= 0x7f {
|
||||||
|
// Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f).
|
||||||
|
r.err = ErrUnsupported
|
||||||
|
return 0, r.err
|
||||||
|
|
||||||
|
} else {
|
||||||
|
// Section 4.4 Padding (chunk type 0xfe).
|
||||||
|
// Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd).
|
||||||
|
if !r.readFull(r.buf[:chunkLen]) {
|
||||||
|
return 0, r.err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
84
Godeps/_workspace/src/github.com/syndtr/gosnappy/snappy/encode.go
generated
vendored
84
Godeps/_workspace/src/github.com/syndtr/gosnappy/snappy/encode.go
generated
vendored
|
@ -6,6 +6,7 @@ package snappy
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/binary"
|
"encoding/binary"
|
||||||
|
"io"
|
||||||
)
|
)
|
||||||
|
|
||||||
// We limit how far copy back-references can go, the same as the C++ code.
|
// We limit how far copy back-references can go, the same as the C++ code.
|
||||||
|
@ -172,3 +173,86 @@ func MaxEncodedLen(srcLen int) int {
|
||||||
// This last factor dominates the blowup, so the final estimate is:
|
// This last factor dominates the blowup, so the final estimate is:
|
||||||
return 32 + srcLen + srcLen/6
|
return 32 + srcLen + srcLen/6
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// NewWriter returns a new Writer that compresses to w, using the framing
|
||||||
|
// format described at
|
||||||
|
// https://code.google.com/p/snappy/source/browse/trunk/framing_format.txt
|
||||||
|
func NewWriter(w io.Writer) *Writer {
|
||||||
|
return &Writer{
|
||||||
|
w: w,
|
||||||
|
enc: make([]byte, MaxEncodedLen(maxUncompressedChunkLen)),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Writer is an io.Writer than can write Snappy-compressed bytes.
|
||||||
|
type Writer struct {
|
||||||
|
w io.Writer
|
||||||
|
err error
|
||||||
|
enc []byte
|
||||||
|
buf [checksumSize + chunkHeaderSize]byte
|
||||||
|
wroteHeader bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reset discards the writer's state and switches the Snappy writer to write to
|
||||||
|
// w. This permits reusing a Writer rather than allocating a new one.
|
||||||
|
func (w *Writer) Reset(writer io.Writer) {
|
||||||
|
w.w = writer
|
||||||
|
w.err = nil
|
||||||
|
w.wroteHeader = false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write satisfies the io.Writer interface.
|
||||||
|
func (w *Writer) Write(p []byte) (n int, errRet error) {
|
||||||
|
if w.err != nil {
|
||||||
|
return 0, w.err
|
||||||
|
}
|
||||||
|
if !w.wroteHeader {
|
||||||
|
copy(w.enc, magicChunk)
|
||||||
|
if _, err := w.w.Write(w.enc[:len(magicChunk)]); err != nil {
|
||||||
|
w.err = err
|
||||||
|
return n, err
|
||||||
|
}
|
||||||
|
w.wroteHeader = true
|
||||||
|
}
|
||||||
|
for len(p) > 0 {
|
||||||
|
var uncompressed []byte
|
||||||
|
if len(p) > maxUncompressedChunkLen {
|
||||||
|
uncompressed, p = p[:maxUncompressedChunkLen], p[maxUncompressedChunkLen:]
|
||||||
|
} else {
|
||||||
|
uncompressed, p = p, nil
|
||||||
|
}
|
||||||
|
checksum := crc(uncompressed)
|
||||||
|
|
||||||
|
// Compress the buffer, discarding the result if the improvement
|
||||||
|
// isn't at least 12.5%.
|
||||||
|
chunkType := uint8(chunkTypeCompressedData)
|
||||||
|
chunkBody, err := Encode(w.enc, uncompressed)
|
||||||
|
if err != nil {
|
||||||
|
w.err = err
|
||||||
|
return n, err
|
||||||
|
}
|
||||||
|
if len(chunkBody) >= len(uncompressed)-len(uncompressed)/8 {
|
||||||
|
chunkType, chunkBody = chunkTypeUncompressedData, uncompressed
|
||||||
|
}
|
||||||
|
|
||||||
|
chunkLen := 4 + len(chunkBody)
|
||||||
|
w.buf[0] = chunkType
|
||||||
|
w.buf[1] = uint8(chunkLen >> 0)
|
||||||
|
w.buf[2] = uint8(chunkLen >> 8)
|
||||||
|
w.buf[3] = uint8(chunkLen >> 16)
|
||||||
|
w.buf[4] = uint8(checksum >> 0)
|
||||||
|
w.buf[5] = uint8(checksum >> 8)
|
||||||
|
w.buf[6] = uint8(checksum >> 16)
|
||||||
|
w.buf[7] = uint8(checksum >> 24)
|
||||||
|
if _, err = w.w.Write(w.buf[:]); err != nil {
|
||||||
|
w.err = err
|
||||||
|
return n, err
|
||||||
|
}
|
||||||
|
if _, err = w.w.Write(chunkBody); err != nil {
|
||||||
|
w.err = err
|
||||||
|
return n, err
|
||||||
|
}
|
||||||
|
n += len(uncompressed)
|
||||||
|
}
|
||||||
|
return n, nil
|
||||||
|
}
|
||||||
|
|
30
Godeps/_workspace/src/github.com/syndtr/gosnappy/snappy/snappy.go
generated
vendored
30
Godeps/_workspace/src/github.com/syndtr/gosnappy/snappy/snappy.go
generated
vendored
|
@ -8,6 +8,10 @@
|
||||||
// The C++ snappy implementation is at http://code.google.com/p/snappy/
|
// The C++ snappy implementation is at http://code.google.com/p/snappy/
|
||||||
package snappy
|
package snappy
|
||||||
|
|
||||||
|
import (
|
||||||
|
"hash/crc32"
|
||||||
|
)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
Each encoded block begins with the varint-encoded length of the decoded data,
|
Each encoded block begins with the varint-encoded length of the decoded data,
|
||||||
followed by a sequence of chunks. Chunks begin and end on byte boundaries. The
|
followed by a sequence of chunks. Chunks begin and end on byte boundaries. The
|
||||||
|
@ -36,3 +40,29 @@ const (
|
||||||
tagCopy2 = 0x02
|
tagCopy2 = 0x02
|
||||||
tagCopy4 = 0x03
|
tagCopy4 = 0x03
|
||||||
)
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
checksumSize = 4
|
||||||
|
chunkHeaderSize = 4
|
||||||
|
magicChunk = "\xff\x06\x00\x00" + magicBody
|
||||||
|
magicBody = "sNaPpY"
|
||||||
|
// https://code.google.com/p/snappy/source/browse/trunk/framing_format.txt says
|
||||||
|
// that "the uncompressed data in a chunk must be no longer than 65536 bytes".
|
||||||
|
maxUncompressedChunkLen = 65536
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
chunkTypeCompressedData = 0x00
|
||||||
|
chunkTypeUncompressedData = 0x01
|
||||||
|
chunkTypePadding = 0xfe
|
||||||
|
chunkTypeStreamIdentifier = 0xff
|
||||||
|
)
|
||||||
|
|
||||||
|
var crcTable = crc32.MakeTable(crc32.Castagnoli)
|
||||||
|
|
||||||
|
// crc implements the checksum specified in section 3 of
|
||||||
|
// https://code.google.com/p/snappy/source/browse/trunk/framing_format.txt
|
||||||
|
func crc(b []byte) uint32 {
|
||||||
|
c := crc32.Update(0, crcTable, b)
|
||||||
|
return uint32(c>>15|c<<17) + 0xa282ead8
|
||||||
|
}
|
||||||
|
|
201
Godeps/_workspace/src/github.com/syndtr/gosnappy/snappy/snappy_test.go
generated
vendored
201
Godeps/_workspace/src/github.com/syndtr/gosnappy/snappy/snappy_test.go
generated
vendored
|
@ -18,7 +18,10 @@ import (
|
||||||
"testing"
|
"testing"
|
||||||
)
|
)
|
||||||
|
|
||||||
var download = flag.Bool("download", false, "If true, download any missing files before running benchmarks")
|
var (
|
||||||
|
download = flag.Bool("download", false, "If true, download any missing files before running benchmarks")
|
||||||
|
testdata = flag.String("testdata", "testdata", "Directory containing the test data")
|
||||||
|
)
|
||||||
|
|
||||||
func roundtrip(b, ebuf, dbuf []byte) error {
|
func roundtrip(b, ebuf, dbuf []byte) error {
|
||||||
e, err := Encode(ebuf, b)
|
e, err := Encode(ebuf, b)
|
||||||
|
@ -55,11 +58,11 @@ func TestSmallCopy(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestSmallRand(t *testing.T) {
|
func TestSmallRand(t *testing.T) {
|
||||||
rand.Seed(27354294)
|
rng := rand.New(rand.NewSource(27354294))
|
||||||
for n := 1; n < 20000; n += 23 {
|
for n := 1; n < 20000; n += 23 {
|
||||||
b := make([]byte, n)
|
b := make([]byte, n)
|
||||||
for i, _ := range b {
|
for i := range b {
|
||||||
b[i] = uint8(rand.Uint32())
|
b[i] = uint8(rng.Uint32())
|
||||||
}
|
}
|
||||||
if err := roundtrip(b, nil, nil); err != nil {
|
if err := roundtrip(b, nil, nil); err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
|
@ -70,7 +73,7 @@ func TestSmallRand(t *testing.T) {
|
||||||
func TestSmallRegular(t *testing.T) {
|
func TestSmallRegular(t *testing.T) {
|
||||||
for n := 1; n < 20000; n += 23 {
|
for n := 1; n < 20000; n += 23 {
|
||||||
b := make([]byte, n)
|
b := make([]byte, n)
|
||||||
for i, _ := range b {
|
for i := range b {
|
||||||
b[i] = uint8(i%10 + 'a')
|
b[i] = uint8(i%10 + 'a')
|
||||||
}
|
}
|
||||||
if err := roundtrip(b, nil, nil); err != nil {
|
if err := roundtrip(b, nil, nil); err != nil {
|
||||||
|
@ -79,6 +82,120 @@ func TestSmallRegular(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func cmp(a, b []byte) error {
|
||||||
|
if len(a) != len(b) {
|
||||||
|
return fmt.Errorf("got %d bytes, want %d", len(a), len(b))
|
||||||
|
}
|
||||||
|
for i := range a {
|
||||||
|
if a[i] != b[i] {
|
||||||
|
return fmt.Errorf("byte #%d: got 0x%02x, want 0x%02x", i, a[i], b[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestFramingFormat(t *testing.T) {
|
||||||
|
// src is comprised of alternating 1e5-sized sequences of random
|
||||||
|
// (incompressible) bytes and repeated (compressible) bytes. 1e5 was chosen
|
||||||
|
// because it is larger than maxUncompressedChunkLen (64k).
|
||||||
|
src := make([]byte, 1e6)
|
||||||
|
rng := rand.New(rand.NewSource(1))
|
||||||
|
for i := 0; i < 10; i++ {
|
||||||
|
if i%2 == 0 {
|
||||||
|
for j := 0; j < 1e5; j++ {
|
||||||
|
src[1e5*i+j] = uint8(rng.Intn(256))
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
for j := 0; j < 1e5; j++ {
|
||||||
|
src[1e5*i+j] = uint8(i)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
buf := new(bytes.Buffer)
|
||||||
|
if _, err := NewWriter(buf).Write(src); err != nil {
|
||||||
|
t.Fatalf("Write: encoding: %v", err)
|
||||||
|
}
|
||||||
|
dst, err := ioutil.ReadAll(NewReader(buf))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("ReadAll: decoding: %v", err)
|
||||||
|
}
|
||||||
|
if err := cmp(dst, src); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestReaderReset(t *testing.T) {
|
||||||
|
gold := bytes.Repeat([]byte("All that is gold does not glitter,\n"), 10000)
|
||||||
|
buf := new(bytes.Buffer)
|
||||||
|
if _, err := NewWriter(buf).Write(gold); err != nil {
|
||||||
|
t.Fatalf("Write: %v", err)
|
||||||
|
}
|
||||||
|
encoded, invalid, partial := buf.String(), "invalid", "partial"
|
||||||
|
r := NewReader(nil)
|
||||||
|
for i, s := range []string{encoded, invalid, partial, encoded, partial, invalid, encoded, encoded} {
|
||||||
|
if s == partial {
|
||||||
|
r.Reset(strings.NewReader(encoded))
|
||||||
|
if _, err := r.Read(make([]byte, 101)); err != nil {
|
||||||
|
t.Errorf("#%d: %v", i, err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
r.Reset(strings.NewReader(s))
|
||||||
|
got, err := ioutil.ReadAll(r)
|
||||||
|
switch s {
|
||||||
|
case encoded:
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("#%d: %v", i, err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if err := cmp(got, gold); err != nil {
|
||||||
|
t.Errorf("#%d: %v", i, err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
case invalid:
|
||||||
|
if err == nil {
|
||||||
|
t.Errorf("#%d: got nil error, want non-nil", i)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestWriterReset(t *testing.T) {
|
||||||
|
gold := bytes.Repeat([]byte("Not all those who wander are lost;\n"), 10000)
|
||||||
|
var gots, wants [][]byte
|
||||||
|
const n = 20
|
||||||
|
w, failed := NewWriter(nil), false
|
||||||
|
for i := 0; i <= n; i++ {
|
||||||
|
buf := new(bytes.Buffer)
|
||||||
|
w.Reset(buf)
|
||||||
|
want := gold[:len(gold)*i/n]
|
||||||
|
if _, err := w.Write(want); err != nil {
|
||||||
|
t.Errorf("#%d: Write: %v", i, err)
|
||||||
|
failed = true
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
got, err := ioutil.ReadAll(NewReader(buf))
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("#%d: ReadAll: %v", i, err)
|
||||||
|
failed = true
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
gots = append(gots, got)
|
||||||
|
wants = append(wants, want)
|
||||||
|
}
|
||||||
|
if failed {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
for i := range gots {
|
||||||
|
if err := cmp(gots[i], wants[i]); err != nil {
|
||||||
|
t.Errorf("#%d: %v", i, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func benchDecode(b *testing.B, src []byte) {
|
func benchDecode(b *testing.B, src []byte) {
|
||||||
encoded, err := Encode(nil, src)
|
encoded, err := Encode(nil, src)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -102,7 +219,7 @@ func benchEncode(b *testing.B, src []byte) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func readFile(b *testing.B, filename string) []byte {
|
func readFile(b testing.TB, filename string) []byte {
|
||||||
src, err := ioutil.ReadFile(filename)
|
src, err := ioutil.ReadFile(filename)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
b.Fatalf("failed reading %s: %s", filename, err)
|
b.Fatalf("failed reading %s: %s", filename, err)
|
||||||
|
@ -144,7 +261,7 @@ func BenchmarkWordsEncode1e5(b *testing.B) { benchWords(b, 1e5, false) }
|
||||||
func BenchmarkWordsEncode1e6(b *testing.B) { benchWords(b, 1e6, false) }
|
func BenchmarkWordsEncode1e6(b *testing.B) { benchWords(b, 1e6, false) }
|
||||||
|
|
||||||
// testFiles' values are copied directly from
|
// testFiles' values are copied directly from
|
||||||
// https://code.google.com/p/snappy/source/browse/trunk/snappy_unittest.cc.
|
// https://raw.githubusercontent.com/google/snappy/master/snappy_unittest.cc
|
||||||
// The label field is unused in snappy-go.
|
// The label field is unused in snappy-go.
|
||||||
var testFiles = []struct {
|
var testFiles = []struct {
|
||||||
label string
|
label string
|
||||||
|
@ -152,29 +269,36 @@ var testFiles = []struct {
|
||||||
}{
|
}{
|
||||||
{"html", "html"},
|
{"html", "html"},
|
||||||
{"urls", "urls.10K"},
|
{"urls", "urls.10K"},
|
||||||
{"jpg", "house.jpg"},
|
{"jpg", "fireworks.jpeg"},
|
||||||
{"pdf", "mapreduce-osdi-1.pdf"},
|
{"jpg_200", "fireworks.jpeg"},
|
||||||
|
{"pdf", "paper-100k.pdf"},
|
||||||
{"html4", "html_x_4"},
|
{"html4", "html_x_4"},
|
||||||
{"cp", "cp.html"},
|
|
||||||
{"c", "fields.c"},
|
|
||||||
{"lsp", "grammar.lsp"},
|
|
||||||
{"xls", "kennedy.xls"},
|
|
||||||
{"txt1", "alice29.txt"},
|
{"txt1", "alice29.txt"},
|
||||||
{"txt2", "asyoulik.txt"},
|
{"txt2", "asyoulik.txt"},
|
||||||
{"txt3", "lcet10.txt"},
|
{"txt3", "lcet10.txt"},
|
||||||
{"txt4", "plrabn12.txt"},
|
{"txt4", "plrabn12.txt"},
|
||||||
{"bin", "ptt5"},
|
|
||||||
{"sum", "sum"},
|
|
||||||
{"man", "xargs.1"},
|
|
||||||
{"pb", "geo.protodata"},
|
{"pb", "geo.protodata"},
|
||||||
{"gaviota", "kppkn.gtb"},
|
{"gaviota", "kppkn.gtb"},
|
||||||
}
|
}
|
||||||
|
|
||||||
// The test data files are present at this canonical URL.
|
// The test data files are present at this canonical URL.
|
||||||
const baseURL = "https://snappy.googlecode.com/svn/trunk/testdata/"
|
const baseURL = "https://raw.githubusercontent.com/google/snappy/master/testdata/"
|
||||||
|
|
||||||
func downloadTestdata(basename string) (errRet error) {
|
func downloadTestdata(basename string) (errRet error) {
|
||||||
filename := filepath.Join("testdata", basename)
|
filename := filepath.Join(*testdata, basename)
|
||||||
|
if stat, err := os.Stat(filename); err == nil && stat.Size() != 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if !*download {
|
||||||
|
return fmt.Errorf("test data not found; skipping benchmark without the -download flag")
|
||||||
|
}
|
||||||
|
// Download the official snappy C++ implementation reference test data
|
||||||
|
// files for benchmarking.
|
||||||
|
if err := os.Mkdir(*testdata, 0777); err != nil && !os.IsExist(err) {
|
||||||
|
return fmt.Errorf("failed to create testdata: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
f, err := os.Create(filename)
|
f, err := os.Create(filename)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to create %s: %s", filename, err)
|
return fmt.Errorf("failed to create %s: %s", filename, err)
|
||||||
|
@ -185,36 +309,27 @@ func downloadTestdata(basename string) (errRet error) {
|
||||||
os.Remove(filename)
|
os.Remove(filename)
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
resp, err := http.Get(baseURL + basename)
|
url := baseURL + basename
|
||||||
|
resp, err := http.Get(url)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to download %s: %s", baseURL+basename, err)
|
return fmt.Errorf("failed to download %s: %s", url, err)
|
||||||
}
|
}
|
||||||
defer resp.Body.Close()
|
defer resp.Body.Close()
|
||||||
|
if s := resp.StatusCode; s != http.StatusOK {
|
||||||
|
return fmt.Errorf("downloading %s: HTTP status code %d (%s)", url, s, http.StatusText(s))
|
||||||
|
}
|
||||||
_, err = io.Copy(f, resp.Body)
|
_, err = io.Copy(f, resp.Body)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to write %s: %s", filename, err)
|
return fmt.Errorf("failed to download %s to %s: %s", url, filename, err)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func benchFile(b *testing.B, n int, decode bool) {
|
func benchFile(b *testing.B, n int, decode bool) {
|
||||||
filename := filepath.Join("testdata", testFiles[n].filename)
|
if err := downloadTestdata(testFiles[n].filename); err != nil {
|
||||||
if stat, err := os.Stat(filename); err != nil || stat.Size() == 0 {
|
b.Fatalf("failed to download testdata: %s", err)
|
||||||
if !*download {
|
|
||||||
b.Fatal("test data not found; skipping benchmark without the -download flag")
|
|
||||||
}
|
|
||||||
// Download the official snappy C++ implementation reference test data
|
|
||||||
// files for benchmarking.
|
|
||||||
if err := os.Mkdir("testdata", 0777); err != nil && !os.IsExist(err) {
|
|
||||||
b.Fatalf("failed to create testdata: %s", err)
|
|
||||||
}
|
|
||||||
for _, tf := range testFiles {
|
|
||||||
if err := downloadTestdata(tf.filename); err != nil {
|
|
||||||
b.Fatalf("failed to download testdata: %s", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
data := readFile(b, filename)
|
data := readFile(b, filepath.Join(*testdata, testFiles[n].filename))
|
||||||
if decode {
|
if decode {
|
||||||
benchDecode(b, data)
|
benchDecode(b, data)
|
||||||
} else {
|
} else {
|
||||||
|
@ -235,12 +350,6 @@ func Benchmark_UFlat8(b *testing.B) { benchFile(b, 8, true) }
|
||||||
func Benchmark_UFlat9(b *testing.B) { benchFile(b, 9, true) }
|
func Benchmark_UFlat9(b *testing.B) { benchFile(b, 9, true) }
|
||||||
func Benchmark_UFlat10(b *testing.B) { benchFile(b, 10, true) }
|
func Benchmark_UFlat10(b *testing.B) { benchFile(b, 10, true) }
|
||||||
func Benchmark_UFlat11(b *testing.B) { benchFile(b, 11, true) }
|
func Benchmark_UFlat11(b *testing.B) { benchFile(b, 11, true) }
|
||||||
func Benchmark_UFlat12(b *testing.B) { benchFile(b, 12, true) }
|
|
||||||
func Benchmark_UFlat13(b *testing.B) { benchFile(b, 13, true) }
|
|
||||||
func Benchmark_UFlat14(b *testing.B) { benchFile(b, 14, true) }
|
|
||||||
func Benchmark_UFlat15(b *testing.B) { benchFile(b, 15, true) }
|
|
||||||
func Benchmark_UFlat16(b *testing.B) { benchFile(b, 16, true) }
|
|
||||||
func Benchmark_UFlat17(b *testing.B) { benchFile(b, 17, true) }
|
|
||||||
func Benchmark_ZFlat0(b *testing.B) { benchFile(b, 0, false) }
|
func Benchmark_ZFlat0(b *testing.B) { benchFile(b, 0, false) }
|
||||||
func Benchmark_ZFlat1(b *testing.B) { benchFile(b, 1, false) }
|
func Benchmark_ZFlat1(b *testing.B) { benchFile(b, 1, false) }
|
||||||
func Benchmark_ZFlat2(b *testing.B) { benchFile(b, 2, false) }
|
func Benchmark_ZFlat2(b *testing.B) { benchFile(b, 2, false) }
|
||||||
|
@ -253,9 +362,3 @@ func Benchmark_ZFlat8(b *testing.B) { benchFile(b, 8, false) }
|
||||||
func Benchmark_ZFlat9(b *testing.B) { benchFile(b, 9, false) }
|
func Benchmark_ZFlat9(b *testing.B) { benchFile(b, 9, false) }
|
||||||
func Benchmark_ZFlat10(b *testing.B) { benchFile(b, 10, false) }
|
func Benchmark_ZFlat10(b *testing.B) { benchFile(b, 10, false) }
|
||||||
func Benchmark_ZFlat11(b *testing.B) { benchFile(b, 11, false) }
|
func Benchmark_ZFlat11(b *testing.B) { benchFile(b, 11, false) }
|
||||||
func Benchmark_ZFlat12(b *testing.B) { benchFile(b, 12, false) }
|
|
||||||
func Benchmark_ZFlat13(b *testing.B) { benchFile(b, 13, false) }
|
|
||||||
func Benchmark_ZFlat14(b *testing.B) { benchFile(b, 14, false) }
|
|
||||||
func Benchmark_ZFlat15(b *testing.B) { benchFile(b, 15, false) }
|
|
||||||
func Benchmark_ZFlat16(b *testing.B) { benchFile(b, 16, false) }
|
|
||||||
func Benchmark_ZFlat17(b *testing.B) { benchFile(b, 17, false) }
|
|
||||||
|
|
Loading…
Reference in a new issue